11 typedef struct __freenode_t {
12 uint32_t level:28, cnt:4;
13 struct __freenode_t *next;
14 } freenode_t, *freenode_p;
16 #define freenode_lt(a,b) ((a)->cnt < (b)->cnt || ((a)->cnt == (b)->cnt && (a)->level < (b)->level))
17 KSORT_INIT(node, freenode_p, freenode_lt)
19 /* Memory pool, similar to the one in bam_pileup.c */
25 static mempool_t *mp_init()
27 return (mempool_t*)calloc(1, sizeof(mempool_t));
29 static void mp_destroy(mempool_t *mp)
32 for (k = 0; k < mp->n; ++k) free(mp->buf[k]);
33 free(mp->buf); free(mp);
35 static inline freenode_t *mp_alloc(mempool_t *mp)
38 if (mp->n == 0) return (freenode_t*)calloc(1, sizeof(freenode_t));
39 else return mp->buf[--mp->n];
41 static inline void mp_free(mempool_t *mp, freenode_t *p)
43 --mp->cnt; p->next = 0; p->cnt = TV_GAP;
44 if (mp->n == mp->max) {
45 mp->max = mp->max? mp->max<<1 : 256;
46 mp->buf = (freenode_t**)realloc(mp->buf, sizeof(freenode_t*) * mp->max);
52 struct __bam_lplbuf_t {
53 int max, n_cur, n_pre;
54 int max_level, *cur_level, *pre_level;
56 freenode_t **aux, *head, *tail;
63 void bam_lplbuf_reset(bam_lplbuf_t *buf)
66 bam_plbuf_reset(buf->plbuf);
67 for (p = buf->head; p->next;) {
72 buf->head = buf->tail;
74 buf->n_cur = buf->n_pre = 0;
78 static int tview_func(uint32_t tid, uint32_t pos, int n, const bam_pileup1_t *pl, void *data)
80 bam_lplbuf_t *tv = (bam_lplbuf_t*)data;
83 // allocate memory if necessary
84 if (tv->max < n) { // enlarge
87 tv->cur_level = (int*)realloc(tv->cur_level, sizeof(int) * tv->max);
88 tv->pre_level = (int*)realloc(tv->pre_level, sizeof(int) * tv->max);
92 for (p = tv->head; p->next; p = p->next)
93 if (p->cnt > 0) --p->cnt;
94 // calculate cur_level[]
96 for (i = l = 0; i < n; ++i) {
97 const bam_pileup1_t *p = pl + i;
99 if (tv->head->next && tv->head->cnt == 0) { // then take a free slot
100 freenode_t *p = tv->head->next;
101 tv->cur_level[i] = tv->head->level;
102 mp_free(tv->mp, tv->head);
105 } else tv->cur_level[i] = ++tv->max_level;
107 tv->cur_level[i] = tv->pre_level[l++];
108 if (p->is_tail) { // then return a free slot
109 tv->tail->level = tv->cur_level[i];
110 tv->tail->next = mp_alloc(tv->mp);
111 tv->tail = tv->tail->next;
115 if (tv->cur_level[i] > max_level) max_level = tv->cur_level[i];
116 ((bam_pileup1_t*)p)->level = tv->cur_level[i];
118 assert(l == tv->n_pre);
119 tv->func(tid, pos, n, pl, tv->user_data);
120 // sort the linked list
123 if (tv->n_nodes + 1 > tv->m_aux) { // enlarge
124 tv->m_aux = tv->n_nodes + 1;
125 kroundup32(tv->m_aux);
126 tv->aux = (freenode_t**)realloc(tv->aux, sizeof(void*) * tv->m_aux);
128 for (p = tv->head, i = l = 0; p->next;) {
129 if (p->level > max_level) { // then discard this entry
138 tv->aux[i] = tv->tail; // add a proper tail for the loop below
141 ks_introsort(node, tv->n_nodes, tv->aux);
142 for (i = 0; i < tv->n_nodes; ++i) tv->aux[i]->next = tv->aux[i+1];
143 tv->head = tv->aux[0];
144 } else tv->head = tv->tail;
147 tv->max_level = max_level;
148 memcpy(tv->pre_level, tv->cur_level, tv->n_cur * 4);
149 // squeeze out terminated levels
150 for (i = l = 0; i < n; ++i) {
151 const bam_pileup1_t *p = pl + i;
153 tv->pre_level[l++] = tv->pre_level[i];
157 fprintf(pysamerr, "%d\t", pos+1);
158 for (i = 0; i < n; ++i) {
159 const bam_pileup1_t *p = pl + i;
160 if (p->is_head) fprintf(pysamerr, "^");
161 if (p->is_tail) fprintf(pysamerr, "$");
162 fprintf(pysamerr, "%d,", p->level);
164 fprintf(pysamerr, "\n");
169 bam_lplbuf_t *bam_lplbuf_init(bam_pileup_f func, void *data)
172 tv = (bam_lplbuf_t*)calloc(1, sizeof(bam_lplbuf_t));
174 tv->head = tv->tail = mp_alloc(tv->mp);
176 tv->user_data = data;
177 tv->plbuf = bam_plbuf_init(tview_func, tv);
178 return (bam_lplbuf_t*)tv;
181 void bam_lplbuf_destroy(bam_lplbuf_t *tv)
184 free(tv->cur_level); free(tv->pre_level);
185 bam_plbuf_destroy(tv->plbuf);
187 for (p = tv->head; p->next;) {
189 mp_free(tv->mp, p); p = q;
192 assert(tv->mp->cnt == 0);
197 int bam_lplbuf_push(const bam1_t *b, bam_lplbuf_t *tv)
199 return bam_plbuf_push(b, tv->plbuf);