13 static cstate_t g_cstate_null = { -1, 0, 0, 0 };
15 typedef struct __linkbuf_t {
19 struct __linkbuf_t *next;
22 /* --- BEGIN: Memory pool */
29 static mempool_t *mp_init()
32 mp = (mempool_t*)calloc(1, sizeof(mempool_t));
35 static void mp_destroy(mempool_t *mp)
38 for (k = 0; k < mp->n; ++k) {
39 free(mp->buf[k]->b.data);
45 static inline lbnode_t *mp_alloc(mempool_t *mp)
48 if (mp->n == 0) return (lbnode_t*)calloc(1, sizeof(lbnode_t));
49 else return mp->buf[--mp->n];
51 static inline void mp_free(mempool_t *mp, lbnode_t *p)
53 --mp->cnt; p->next = 0; // clear lbnode_t::next here
54 if (mp->n == mp->max) {
55 mp->max = mp->max? mp->max<<1 : 256;
56 mp->buf = (lbnode_t**)realloc(mp->buf, sizeof(lbnode_t*) * mp->max);
61 /* --- END: Memory pool */
63 /* --- BEGIN: Auxiliary functions */
65 /* s->k: the index of the CIGAR operator that has just been processed.
66 s->x: the reference coordinate of the start of s->k
67 s->y: the query coordiante of the start of s->k
69 static inline int resolve_cigar2(bam_pileup1_t *p, uint32_t pos, cstate_t *s)
71 #define _cop(c) ((c)&BAM_CIGAR_MASK)
72 #define _cln(c) ((c)>>BAM_CIGAR_SHIFT)
75 bam1_core_t *c = &b->core;
76 uint32_t *cigar = bam1_cigar(b);
78 // determine the current CIGAR operation
79 // fprintf(pysamerr, "%s\tpos=%d\tend=%d\t(%d,%d,%d)\n", bam1_qname(b), pos, s->end, s->k, s->x, s->y);
80 if (s->k == -1) { // never processed
82 if (c->n_cigar == 1) { // just one operation, save a loop
83 if (_cop(cigar[0]) == BAM_CMATCH) s->k = 0, s->x = c->pos, s->y = 0;
84 } else { // find the first match or deletion
85 for (k = 0, s->x = c->pos, s->y = 0; k < c->n_cigar; ++k) {
86 int op = _cop(cigar[k]);
87 int l = _cln(cigar[k]);
88 if (op == BAM_CMATCH || op == BAM_CDEL) break;
89 else if (op == BAM_CREF_SKIP) s->x += l;
90 else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) s->y += l;
92 assert(k < c->n_cigar);
95 } else { // the read has been processed before
96 int op, l = _cln(cigar[s->k]);
97 if (pos - s->x >= l) { // jump to the next operation
98 assert(s->k < c->n_cigar); // otherwise a bug: this function should not be called in this case
99 op = _cop(cigar[s->k+1]);
100 if (op == BAM_CMATCH || op == BAM_CDEL || op == BAM_CREF_SKIP) { // jump to the next without a loop
101 if (_cop(cigar[s->k]) == BAM_CMATCH) s->y += l;
104 } else { // find the next M/D/N
105 if (_cop(cigar[s->k]) == BAM_CMATCH) s->y += l;
107 for (k = s->k + 1; k < c->n_cigar; ++k) {
108 op = _cop(cigar[k]), l = _cln(cigar[k]);
109 if (op == BAM_CMATCH || op == BAM_CDEL || op == BAM_CREF_SKIP) break;
110 else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) s->y += l;
114 assert(s->k < c->n_cigar); // otherwise a bug
115 } // else, do nothing
117 { // collect pileup information
119 op = _cop(cigar[s->k]); l = _cln(cigar[s->k]);
120 p->is_del = p->indel = p->is_refskip = 0;
121 if (s->x + l - 1 == pos && s->k + 1 < c->n_cigar) { // peek the next operation
122 int op2 = _cop(cigar[s->k+1]);
123 int l2 = _cln(cigar[s->k+1]);
124 if (op2 == BAM_CDEL) p->indel = -(int)l2;
125 else if (op2 == BAM_CINS) p->indel = l2;
126 else if (op2 == BAM_CPAD && s->k + 2 < c->n_cigar) { // no working for adjacent padding
128 for (k = s->k + 2; k < c->n_cigar; ++k) {
129 op2 = _cop(cigar[k]); l2 = _cln(cigar[k]);
130 if (op2 == BAM_CINS) l3 += l2;
131 else if (op2 == BAM_CDEL || op2 == BAM_CMATCH || op2 == BAM_CREF_SKIP) break;
133 if (l3 > 0) p->indel = l3;
136 if (op == BAM_CMATCH) {
137 p->qpos = s->y + (pos - s->x);
138 } else if (op == BAM_CDEL || op == BAM_CREF_SKIP) {
139 p->is_del = 1; p->qpos = s->y; // FIXME: distinguish D and N!!!!!
140 p->is_refskip = (op == BAM_CREF_SKIP);
141 } // cannot be other operations; otherwise a bug
142 p->is_head = (pos == c->pos); p->is_tail = (pos == s->end);
147 /* --- END: Auxiliary functions */
155 lbnode_t *head, *tail, *dummy;
156 int32_t tid, pos, max_tid, max_pos;
157 int is_eof, flag_mask, max_plp, error, maxcnt;
159 // for the "auto" interface only
165 bam_plp_t bam_plp_init(bam_plp_auto_f func, void *data)
168 iter = calloc(1, sizeof(struct __bam_plp_t));
169 iter->mp = mp_init();
170 iter->head = iter->tail = mp_alloc(iter->mp);
171 iter->dummy = mp_alloc(iter->mp);
172 iter->max_tid = iter->max_pos = -1;
173 iter->flag_mask = BAM_DEF_MASK;
178 iter->b = bam_init1();
183 void bam_plp_destroy(bam_plp_t iter)
185 mp_free(iter->mp, iter->dummy);
186 mp_free(iter->mp, iter->head);
187 if (iter->mp->cnt != 0)
188 fprintf(pysamerr, "[bam_plp_destroy] memory leak: %d. Continue anyway.\n", iter->mp->cnt);
189 mp_destroy(iter->mp);
190 if (iter->b) bam_destroy1(iter->b);
195 const bam_pileup1_t *bam_plp_next(bam_plp_t iter, int *_tid, int *_pos, int *_n_plp)
197 if (iter->error) { *_n_plp = -1; return 0; }
199 if (iter->is_eof && iter->head->next == 0) return 0;
200 while (iter->is_eof || iter->max_tid > iter->tid || (iter->max_tid == iter->tid && iter->max_pos > iter->pos)) {
203 // write iter->plp at iter->pos
204 iter->dummy->next = iter->head;
205 for (p = iter->head, q = iter->dummy; p->next; q = p, p = p->next) {
206 if (p->b.core.tid < iter->tid || (p->b.core.tid == iter->tid && p->end <= iter->pos)) { // then remove
207 q->next = p->next; mp_free(iter->mp, p); p = q;
208 } else if (p->b.core.tid == iter->tid && p->beg <= iter->pos) { // here: p->end > pos; then add to pileup
209 if (n_plp == iter->max_plp) { // then double the capacity
210 iter->max_plp = iter->max_plp? iter->max_plp<<1 : 256;
211 iter->plp = (bam_pileup1_t*)realloc(iter->plp, sizeof(bam_pileup1_t) * iter->max_plp);
213 iter->plp[n_plp].b = &p->b;
214 if (resolve_cigar2(iter->plp + n_plp, iter->pos, &p->s)) ++n_plp; // actually always true...
217 iter->head = iter->dummy->next; // dummy->next may be changed
218 *_n_plp = n_plp; *_tid = iter->tid; *_pos = iter->pos;
219 // update iter->tid and iter->pos
220 if (iter->head->next) {
221 if (iter->tid > iter->head->b.core.tid) {
222 fprintf(pysamerr, "[%s] unsorted input. Pileup aborts.\n", __func__);
228 if (iter->tid < iter->head->b.core.tid) { // come to a new reference sequence
229 iter->tid = iter->head->b.core.tid; iter->pos = iter->head->beg; // jump to the next reference
230 } else if (iter->pos < iter->head->beg) { // here: tid == head->b.core.tid
231 iter->pos = iter->head->beg; // jump to the next position
232 } else ++iter->pos; // scan contiguously
234 if (n_plp) return iter->plp;
235 if (iter->is_eof && iter->head->next == 0) break;
240 int bam_plp_push(bam_plp_t iter, const bam1_t *b)
242 if (iter->error) return -1;
244 if (b->core.tid < 0) return 0;
245 if (b->core.flag & iter->flag_mask) return 0;
246 if (iter->tid == b->core.tid && iter->pos == b->core.pos && iter->mp->cnt > iter->maxcnt) return 0;
247 bam_copy1(&iter->tail->b, b);
248 iter->tail->beg = b->core.pos; iter->tail->end = bam_calend(&b->core, bam1_cigar(b));
249 iter->tail->s = g_cstate_null; iter->tail->s.end = iter->tail->end - 1; // initialize cstate_t
250 if (b->core.tid < iter->max_tid) {
251 fprintf(pysamerr, "[bam_pileup_core] the input is not sorted (chromosomes out of order)\n");
255 if ((b->core.tid == iter->max_tid) && (iter->tail->beg < iter->max_pos)) {
256 fprintf(pysamerr, "[bam_pileup_core] the input is not sorted (reads out of order)\n");
260 iter->max_tid = b->core.tid; iter->max_pos = iter->tail->beg;
261 if (iter->tail->end > iter->pos || iter->tail->b.core.tid > iter->tid) {
262 iter->tail->next = mp_alloc(iter->mp);
263 iter->tail = iter->tail->next;
265 } else iter->is_eof = 1;
269 const bam_pileup1_t *bam_plp_auto(bam_plp_t iter, int *_tid, int *_pos, int *_n_plp)
271 const bam_pileup1_t *plp;
272 if (iter->func == 0 || iter->error) { *_n_plp = -1; return 0; }
273 if ((plp = bam_plp_next(iter, _tid, _pos, _n_plp)) != 0) return plp;
274 else { // no pileup line can be obtained; read alignments
276 if (iter->is_eof) return 0;
277 while (iter->func(iter->data, iter->b) >= 0) {
278 if (bam_plp_push(iter, iter->b) < 0) {
282 if ((plp = bam_plp_next(iter, _tid, _pos, _n_plp)) != 0) return plp;
283 // otherwise no pileup line can be returned; read the next alignment.
285 bam_plp_push(iter, 0);
286 if ((plp = bam_plp_next(iter, _tid, _pos, _n_plp)) != 0) return plp;
291 void bam_plp_reset(bam_plp_t iter)
294 iter->max_tid = iter->max_pos = -1;
295 iter->tid = iter->pos = 0;
297 for (p = iter->head; p->next;) {
299 mp_free(iter->mp, p);
302 iter->head = iter->tail;
305 void bam_plp_set_mask(bam_plp_t iter, int mask)
307 iter->flag_mask = mask < 0? BAM_DEF_MASK : (BAM_FUNMAP | mask);
310 void bam_plp_set_maxcnt(bam_plp_t iter, int maxcnt)
312 iter->maxcnt = maxcnt;
319 int bam_pileup_file(bamFile fp, int mask, bam_pileup_f func, void *func_data)
325 buf = bam_plbuf_init(func, func_data);
326 bam_plbuf_set_mask(buf, mask);
327 while ((ret = bam_read1(fp, b)) >= 0)
328 bam_plbuf_push(b, buf);
329 bam_plbuf_push(0, buf);
330 bam_plbuf_destroy(buf);
335 void bam_plbuf_set_mask(bam_plbuf_t *buf, int mask)
337 bam_plp_set_mask(buf->iter, mask);
340 void bam_plbuf_reset(bam_plbuf_t *buf)
342 bam_plp_reset(buf->iter);
345 bam_plbuf_t *bam_plbuf_init(bam_pileup_f func, void *data)
348 buf = calloc(1, sizeof(bam_plbuf_t));
349 buf->iter = bam_plp_init(0, 0);
355 void bam_plbuf_destroy(bam_plbuf_t *buf)
357 bam_plp_destroy(buf->iter);
361 int bam_plbuf_push(const bam1_t *b, bam_plbuf_t *buf)
363 int ret, n_plp, tid, pos;
364 const bam_pileup1_t *plp;
365 ret = bam_plp_push(buf->iter, b);
366 if (ret < 0) return ret;
367 while ((plp = bam_plp_next(buf->iter, &tid, &pos, &n_plp)) != 0)
368 buf->func(tid, pos, n_plp, plp, buf->data);
376 struct __bam_mplp_t {
381 const bam_pileup1_t **plp;
384 bam_mplp_t bam_mplp_init(int n, bam_plp_auto_f func, void **data)
388 iter = calloc(1, sizeof(struct __bam_mplp_t));
389 iter->pos = calloc(n, 8);
390 iter->n_plp = calloc(n, sizeof(int));
391 iter->plp = calloc(n, sizeof(void*));
392 iter->iter = calloc(n, sizeof(void*));
394 iter->min = (uint64_t)-1;
395 for (i = 0; i < n; ++i) {
396 iter->iter[i] = bam_plp_init(func, data[i]);
397 iter->pos[i] = iter->min;
402 void bam_mplp_set_maxcnt(bam_mplp_t iter, int maxcnt)
405 for (i = 0; i < iter->n; ++i)
406 iter->iter[i]->maxcnt = maxcnt;
409 void bam_mplp_destroy(bam_mplp_t iter)
412 for (i = 0; i < iter->n; ++i) bam_plp_destroy(iter->iter[i]);
413 free(iter->iter); free(iter->pos); free(iter->n_plp); free(iter->plp);
417 int bam_mplp_auto(bam_mplp_t iter, int *_tid, int *_pos, int *n_plp, const bam_pileup1_t **plp)
420 uint64_t new_min = (uint64_t)-1;
421 for (i = 0; i < iter->n; ++i) {
422 if (iter->pos[i] == iter->min) {
424 iter->plp[i] = bam_plp_auto(iter->iter[i], &tid, &pos, &iter->n_plp[i]);
425 iter->pos[i] = (uint64_t)tid<<32 | pos;
427 if (iter->plp[i] && iter->pos[i] < new_min) new_min = iter->pos[i];
430 if (new_min == (uint64_t)-1) return 0;
431 *_tid = new_min>>32; *_pos = (uint32_t)new_min;
432 for (i = 0; i < iter->n; ++i) {
433 if (iter->pos[i] == iter->min) { // FIXME: valgrind reports "uninitialised value(s) at this line"
434 n_plp[i] = iter->n_plp[i], plp[i] = iter->plp[i];
436 } else n_plp[i] = 0, plp[i] = 0;