11 KHASH_SET_INIT_STR(rg)
14 KSORT_INIT_GENERIC(uint32_t)
16 #define MINUS_CONST 0x10000000
17 #define INDEL_WINDOW_SIZE 50
19 void *bcf_call_add_rg(void *_hash, const char *hdtext, const char *list)
21 const char *s, *p, *q, *r, *t;
23 if (list == 0 || hdtext == 0) return _hash;
24 if (_hash == 0) _hash = kh_init(rg);
25 hash = (khash_t(rg)*)_hash;
26 if ((s = strstr(hdtext, "@RG\t")) == 0) return hash;
28 t = strstr(s + 4, "@RG\t"); // the next @RG
29 if ((p = strstr(s, "\tID:")) != 0) p += 4;
30 if ((q = strstr(s, "\tPL:")) != 0) q += 4;
31 if (p && q && (t == 0 || (p < t && q < t))) { // ID and PL are both present
34 for (r = p; *r && *r != '\t' && *r != '\n'; ++r); lp = r - p;
35 for (r = q; *r && *r != '\t' && *r != '\n'; ++r); lq = r - q;
36 x = calloc((lp > lq? lp : lq) + 1, 1);
37 for (r = q; *r && *r != '\t' && *r != '\n'; ++r) x[r-q] = *r;
38 if (strstr(list, x)) { // insert ID to the hash table
41 for (r = p; *r && *r != '\t' && *r != '\n'; ++r) x[r-p] = *r;
43 k = kh_get(rg, hash, x);
44 if (k == kh_end(hash)) k = kh_put(rg, hash, x, &ret);
53 void bcf_call_del_rghash(void *_hash)
56 khash_t(rg) *hash = (khash_t(rg)*)_hash;
57 if (hash == 0) return;
58 for (k = kh_begin(hash); k < kh_end(hash); ++k)
59 if (kh_exist(hash, k))
60 free((char*)kh_key(hash, k));
64 static int tpos2qpos(const bam1_core_t *c, const uint32_t *cigar, int32_t tpos, int is_left, int32_t *_tpos)
66 int k, x = c->pos, y = 0, last_y = 0;
68 for (k = 0; k < c->n_cigar; ++k) {
69 int op = cigar[k] & BAM_CIGAR_MASK;
70 int l = cigar[k] >> BAM_CIGAR_SHIFT;
71 if (op == BAM_CMATCH || op == BAM_CEQUAL || op == BAM_CDIFF) {
72 if (c->pos > tpos) return y;
75 return y + (tpos - x);
79 } else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) y += l;
80 else if (op == BAM_CDEL || op == BAM_CREF_SKIP) {
82 *_tpos = is_left? x : x + l;
91 // FIXME: check if the inserted sequence is consistent with the homopolymer run
92 // l is the relative gap length and l_run is the length of the homopolymer on the reference
93 static inline int est_seqQ(const bcf_callaux_t *bca, int l, int l_run)
96 q = bca->openQ + bca->extQ * (abs(l) - 1);
97 qh = l_run >= 3? (int)(bca->tandemQ * (double)abs(l) / l_run + .499) : 1000;
98 return q < qh? q : qh;
101 static inline int est_indelreg(int pos, const char *ref, int l, char *ins4)
103 int i, j, max = 0, max_i = pos, score = 0;
105 for (i = pos + 1, j = 0; ref[i]; ++i, ++j) {
106 if (ins4) score += (toupper(ref[i]) != "ACGTN"[(int)ins4[j%l]])? -10 : 1;
107 else score += (toupper(ref[i]) != toupper(ref[pos+1+j%l]))? -10 : 1;
108 if (score < 0) break;
109 if (max < score) max = score, max_i = i;
114 int bcf_call_gap_prep(int n, int *n_plp, bam_pileup1_t **plp, int pos, bcf_callaux_t *bca, const char *ref,
117 int i, s, j, k, t, n_types, *types, max_rd_len, left, right, max_ins, *score1, *score2, max_ref2;
118 int N, K, l_run, ref_type, n_alt;
119 char *inscns = 0, *ref2, *query, **ref_sample;
120 khash_t(rg) *hash = (khash_t(rg)*)rghash;
121 if (ref == 0 || bca == 0) return -1;
122 // mark filtered reads
125 for (s = N = 0; s < n; ++s) {
126 for (i = 0; i < n_plp[s]; ++i) {
127 bam_pileup1_t *p = plp[s] + i;
128 const uint8_t *rg = bam_aux_get(p->b, "RG");
129 p->aux = 1; // filtered by default
131 khint_t k = kh_get(rg, hash, (const char*)(rg + 1));
132 if (k != kh_end(hash)) p->aux = 0, ++N; // not filtered
136 if (N == 0) return -1; // no reads left
138 // determine if there is a gap
139 for (s = N = 0; s < n; ++s) {
140 for (i = 0; i < n_plp[s]; ++i)
141 if (plp[s][i].indel != 0) break;
142 if (i < n_plp[s]) break;
144 if (s == n) return -1; // there is no indel at this position.
145 for (s = N = 0; s < n; ++s) N += n_plp[s]; // N is the total number of reads
146 { // find out how many types of indels are present
147 int m, n_alt = 0, n_tot = 0;
149 aux = calloc(N + 1, 4);
151 aux[m++] = MINUS_CONST; // zero indel is always a type
152 for (s = 0; s < n; ++s) {
153 for (i = 0; i < n_plp[s]; ++i) {
154 const bam_pileup1_t *p = plp[s] + i;
155 if (rghash == 0 || p->aux == 0) {
159 aux[m++] = MINUS_CONST + p->indel;
162 j = bam_cigar2qlen(&p->b->core, bam1_cigar(p->b));
163 if (j > max_rd_len) max_rd_len = j;
166 ks_introsort(uint32_t, m, aux);
167 // squeeze out identical types
168 for (i = 1, n_types = 1; i < m; ++i)
169 if (aux[i] != aux[i-1]) ++n_types;
170 if (n_types == 1 || (double)n_alt / n_tot < bca->min_frac || n_alt < bca->min_support) { // then skip
171 free(aux); return -1;
175 if (bam_verbose >= 2)
176 fprintf(pysamerr, "[%s] excessive INDEL alleles at position %d. Skip the position.\n", __func__, pos + 1);
179 types = (int*)calloc(n_types, sizeof(int));
181 types[t++] = aux[0] - MINUS_CONST;
182 for (i = 1; i < m; ++i)
183 if (aux[i] != aux[i-1])
184 types[t++] = aux[i] - MINUS_CONST;
186 for (t = 0; t < n_types; ++t)
187 if (types[t] == 0) break;
188 ref_type = t; // the index of the reference type (0)
190 { // calculate left and right boundary
191 left = pos > INDEL_WINDOW_SIZE? pos - INDEL_WINDOW_SIZE : 0;
192 right = pos + INDEL_WINDOW_SIZE;
193 if (types[0] < 0) right -= types[0];
194 // in case the alignments stand out the reference
195 for (i = pos; i < right; ++i)
196 if (ref[i] == 0) break;
199 /* The following block fixes a long-existing flaw in the INDEL
200 * calling model: the interference of nearby SNPs. However, it also
201 * reduces the power because sometimes, substitutions caused by
202 * indels are not distinguishable from true mutations. Multiple
203 * sequence realignment helps to increase the power.
205 { // construct per-sample consensus
206 int L = right - left + 1, max_i, max2_i;
207 uint32_t *cns, max, max2;
209 ref_sample = calloc(n, sizeof(void*));
212 for (i = 0; i < right - left; ++i)
213 ref0[i] = bam_nt16_table[(int)ref[i+left]];
214 for (s = 0; s < n; ++s) {
215 r = ref_sample[s] = calloc(L, 1);
216 memset(cns, 0, sizeof(int) * L);
217 // collect ref and non-ref counts
218 for (i = 0; i < n_plp[s]; ++i) {
219 bam_pileup1_t *p = plp[s] + i;
221 uint32_t *cigar = bam1_cigar(b);
222 uint8_t *seq = bam1_seq(b);
223 int x = b->core.pos, y = 0;
224 for (k = 0; k < b->core.n_cigar; ++k) {
225 int op = cigar[k]&0xf;
226 int j, l = cigar[k]>>4;
227 if (op == BAM_CMATCH || op == BAM_CEQUAL || op == BAM_CDIFF) {
228 for (j = 0; j < l; ++j)
229 if (x + j >= left && x + j < right)
230 cns[x+j-left] += (bam1_seqi(seq, y+j) == ref0[x+j-left])? 1 : 0x10000;
232 } else if (op == BAM_CDEL || op == BAM_CREF_SKIP) x += l;
233 else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) y += l;
236 // determine the consensus
237 for (i = 0; i < right - left; ++i) r[i] = ref0[i];
238 max = max2 = 0; max_i = max2_i = -1;
239 for (i = 0; i < right - left; ++i) {
240 if (cns[i]>>16 >= max>>16) max2 = max, max2_i = max_i, max = cns[i], max_i = i;
241 else if (cns[i]>>16 >= max2>>16) max2 = cns[i], max2_i = i;
243 if ((double)(max&0xffff) / ((max&0xffff) + (max>>16)) >= 0.7) max_i = -1;
244 if ((double)(max2&0xffff) / ((max2&0xffff) + (max2>>16)) >= 0.7) max2_i = -1;
245 if (max_i >= 0) r[max_i] = 15;
246 if (max2_i >= 0) r[max2_i] = 15;
247 // for (i = 0; i < right - left; ++i) fputc("=ACMGRSVTWYHKDBN"[(int)r[i]], pysamerr); fputc('\n', pysamerr);
249 free(ref0); free(cns);
251 { // the length of the homopolymer run around the current position
252 int c = bam_nt16_table[(int)ref[pos + 1]];
253 if (c == 15) l_run = 1;
255 for (i = pos + 2; ref[i]; ++i)
256 if (bam_nt16_table[(int)ref[i]] != c) break;
258 for (i = pos; i >= 0; --i)
259 if (bam_nt16_table[(int)ref[i]] != c) break;
263 // construct the consensus sequence
264 max_ins = types[n_types - 1]; // max_ins is at least 0
266 int *inscns_aux = calloc(4 * n_types * max_ins, sizeof(int));
267 // count the number of occurrences of each base at each position for each type of insertion
268 for (t = 0; t < n_types; ++t) {
270 for (s = 0; s < n; ++s) {
271 for (i = 0; i < n_plp[s]; ++i) {
272 bam_pileup1_t *p = plp[s] + i;
273 if (p->indel == types[t]) {
274 uint8_t *seq = bam1_seq(p->b);
275 for (k = 1; k <= p->indel; ++k) {
276 int c = bam_nt16_nt4_table[bam1_seqi(seq, p->qpos + k)];
277 if (c < 4) ++inscns_aux[(t*max_ins+(k-1))*4 + c];
284 // use the majority rule to construct the consensus
285 inscns = calloc(n_types * max_ins, 1);
286 for (t = 0; t < n_types; ++t) {
287 for (j = 0; j < types[t]; ++j) {
288 int max = 0, max_k = -1, *ia = &inscns_aux[(t*max_ins+j)*4];
289 for (k = 0; k < 4; ++k)
291 max = ia[k], max_k = k;
292 inscns[t*max_ins + j] = max? max_k : 4;
297 // compute the likelihood given each type of indel for each read
298 max_ref2 = right - left + 2 + 2 * (max_ins > -types[0]? max_ins : -types[0]);
299 ref2 = calloc(max_ref2, 1);
300 query = calloc(right - left + max_rd_len + max_ins + 2, 1);
301 score1 = calloc(N * n_types, sizeof(int));
302 score2 = calloc(N * n_types, sizeof(int));
304 for (t = 0; t < n_types; ++t) {
306 kpa_par_t apf1 = { 1e-4, 1e-2, 10 }, apf2 = { 1e-6, 1e-3, 10 };
307 apf1.bw = apf2.bw = abs(types[t]) + 3;
309 if (types[t] == 0) ir = 0;
310 else if (types[t] > 0) ir = est_indelreg(pos, ref, types[t], &inscns[t*max_ins]);
311 else ir = est_indelreg(pos, ref, -types[t], 0);
312 if (ir > bca->indelreg) bca->indelreg = ir;
313 // fprintf(pysamerr, "%d, %d, %d\n", pos, types[t], ir);
315 for (s = K = 0; s < n; ++s) {
317 for (k = 0, j = left; j <= pos; ++j)
318 ref2[k++] = bam_nt16_nt4_table[(int)ref_sample[s][j-left]];
319 if (types[t] <= 0) j += -types[t];
320 else for (l = 0; l < types[t]; ++l)
321 ref2[k++] = inscns[t*max_ins + l];
322 for (; j < right && ref[j]; ++j)
323 ref2[k++] = bam_nt16_nt4_table[(int)ref_sample[s][j-left]];
324 for (; k < max_ref2; ++k) ref2[k] = 4;
325 if (j < right) right = j;
326 // align each read to ref2
327 for (i = 0; i < n_plp[s]; ++i, ++K) {
328 bam_pileup1_t *p = plp[s] + i;
329 int qbeg, qend, tbeg, tend, sc, kk;
330 uint8_t *seq = bam1_seq(p->b);
331 uint32_t *cigar = bam1_cigar(p->b);
332 if (p->b->core.flag&4) continue; // unmapped reads
333 // FIXME: the following loop should be better moved outside; nonetheless, realignment should be much slower anyway.
334 for (kk = 0; kk < p->b->core.n_cigar; ++kk)
335 if ((cigar[kk]&BAM_CIGAR_MASK) == BAM_CREF_SKIP) break;
336 if (kk < p->b->core.n_cigar) continue;
337 // FIXME: the following skips soft clips, but using them may be more sensitive.
338 // determine the start and end of sequences for alignment
339 qbeg = tpos2qpos(&p->b->core, bam1_cigar(p->b), left, 0, &tbeg);
340 qend = tpos2qpos(&p->b->core, bam1_cigar(p->b), right, 1, &tend);
343 tbeg = tbeg - l > left? tbeg - l : left;
345 // write the query sequence
346 for (l = qbeg; l < qend; ++l)
347 query[l - qbeg] = bam_nt16_nt4_table[bam1_seqi(seq, l)];
348 { // do realignment; this is the bottleneck
349 const uint8_t *qual = bam1_qual(p->b), *bq;
351 qq = calloc(qend - qbeg, 1);
352 bq = (uint8_t*)bam_aux_get(p->b, "ZQ");
353 if (bq) ++bq; // skip type
354 for (l = qbeg; l < qend; ++l) {
355 qq[l - qbeg] = bq? qual[l] + (bq[l] - 64) : qual[l];
356 if (qq[l - qbeg] > 30) qq[l - qbeg] = 30;
357 if (qq[l - qbeg] < 7) qq[l - qbeg] = 7;
359 sc = kpa_glocal((uint8_t*)ref2 + tbeg - left, tend - tbeg + abs(types[t]),
360 (uint8_t*)query, qend - qbeg, qq, &apf1, 0, 0);
361 l = (int)(100. * sc / (qend - qbeg) + .499); // used for adjusting indelQ below
362 if (l > 255) l = 255;
363 score1[K*n_types + t] = score2[K*n_types + t] = sc<<8 | l;
365 sc = kpa_glocal((uint8_t*)ref2 + tbeg - left, tend - tbeg + abs(types[t]),
366 (uint8_t*)query, qend - qbeg, qq, &apf2, 0, 0);
367 l = (int)(100. * sc / (qend - qbeg) + .499);
368 if (l > 255) l = 255;
369 score2[K*n_types + t] = sc<<8 | l;
374 for (l = 0; l < tend - tbeg + abs(types[t]); ++l)
375 fputc("ACGTN"[(int)ref2[tbeg-left+l]], pysamerr);
376 fputc('\n', pysamerr);
377 for (l = 0; l < qend - qbeg; ++l) fputc("ACGTN"[(int)query[l]], pysamerr);
378 fputc('\n', pysamerr);
379 fprintf(pysamerr, "pos=%d type=%d read=%d:%d name=%s qbeg=%d tbeg=%d score=%d\n", pos, types[t], s, i, bam1_qname(p->b), qbeg, tbeg, sc);
384 free(ref2); free(query);
387 sc = alloca(n_types * sizeof(int));
388 sumq = alloca(n_types * sizeof(int));
389 memset(sumq, 0, sizeof(int) * n_types);
390 for (s = K = 0; s < n; ++s) {
391 for (i = 0; i < n_plp[s]; ++i, ++K) {
392 bam_pileup1_t *p = plp[s] + i;
393 int *sct = &score1[K*n_types], indelQ1, indelQ2, seqQ, indelQ;
394 for (t = 0; t < n_types; ++t) sc[t] = sct[t]<<6 | t;
395 for (t = 1; t < n_types; ++t) // insertion sort
396 for (j = t; j > 0 && sc[j] < sc[j-1]; --j)
397 tmp = sc[j], sc[j] = sc[j-1], sc[j-1] = tmp;
398 /* errmod_cal() assumes that if the call is wrong, the
399 * likelihoods of other events are equal. This is about
400 * right for substitutions, but is not desired for
401 * indels. To reuse errmod_cal(), I have to make
402 * compromise for multi-allelic indels.
404 if ((sc[0]&0x3f) == ref_type) {
405 indelQ1 = (sc[1]>>14) - (sc[0]>>14);
406 seqQ = est_seqQ(bca, types[sc[1]&0x3f], l_run);
408 for (t = 0; t < n_types; ++t) // look for the reference type
409 if ((sc[t]&0x3f) == ref_type) break;
410 indelQ1 = (sc[t]>>14) - (sc[0]>>14);
411 seqQ = est_seqQ(bca, types[sc[0]&0x3f], l_run);
413 tmp = sc[0]>>6 & 0xff;
414 indelQ1 = tmp > 111? 0 : (int)((1. - tmp/111.) * indelQ1 + .499); // reduce indelQ
415 sct = &score2[K*n_types];
416 for (t = 0; t < n_types; ++t) sc[t] = sct[t]<<6 | t;
417 for (t = 1; t < n_types; ++t) // insertion sort
418 for (j = t; j > 0 && sc[j] < sc[j-1]; --j)
419 tmp = sc[j], sc[j] = sc[j-1], sc[j-1] = tmp;
420 if ((sc[0]&0x3f) == ref_type) {
421 indelQ2 = (sc[1]>>14) - (sc[0]>>14);
423 for (t = 0; t < n_types; ++t) // look for the reference type
424 if ((sc[t]&0x3f) == ref_type) break;
425 indelQ2 = (sc[t]>>14) - (sc[0]>>14);
427 tmp = sc[0]>>6 & 0xff;
428 indelQ2 = tmp > 111? 0 : (int)((1. - tmp/111.) * indelQ2 + .499);
429 // pick the smaller between indelQ1 and indelQ2
430 indelQ = indelQ1 < indelQ2? indelQ1 : indelQ2;
431 if (indelQ > 255) indelQ = 255;
432 if (seqQ > 255) seqQ = 255;
433 p->aux = (sc[0]&0x3f)<<16 | seqQ<<8 | indelQ; // use 22 bits in total
434 sumq[sc[0]&0x3f] += indelQ < seqQ? indelQ : seqQ;
435 // fprintf(pysamerr, "pos=%d read=%d:%d name=%s call=%d indelQ=%d seqQ=%d\n", pos, s, i, bam1_qname(p->b), types[sc[0]&0x3f], indelQ, seqQ);
438 // determine bca->indel_types[] and bca->inscns
439 bca->maxins = max_ins;
440 bca->inscns = realloc(bca->inscns, bca->maxins * 4);
441 for (t = 0; t < n_types; ++t)
442 sumq[t] = sumq[t]<<6 | t;
443 for (t = 1; t < n_types; ++t) // insertion sort
444 for (j = t; j > 0 && sumq[j] > sumq[j-1]; --j)
445 tmp = sumq[j], sumq[j] = sumq[j-1], sumq[j-1] = tmp;
446 for (t = 0; t < n_types; ++t) // look for the reference type
447 if ((sumq[t]&0x3f) == ref_type) break;
448 if (t) { // then move the reference type to the first
450 for (; t > 0; --t) sumq[t] = sumq[t-1];
453 for (t = 0; t < 4; ++t) bca->indel_types[t] = B2B_INDEL_NULL;
454 for (t = 0; t < 4 && t < n_types; ++t) {
455 bca->indel_types[t] = types[sumq[t]&0x3f];
456 memcpy(&bca->inscns[t * bca->maxins], &inscns[(sumq[t]&0x3f) * max_ins], bca->maxins);
459 for (s = n_alt = 0; s < n; ++s) {
460 for (i = 0; i < n_plp[s]; ++i) {
461 bam_pileup1_t *p = plp[s] + i;
462 int x = types[p->aux>>16&0x3f];
463 for (j = 0; j < 4; ++j)
464 if (x == bca->indel_types[j]) break;
465 p->aux = j<<16 | (j == 4? 0 : (p->aux&0xffff));
466 if ((p->aux>>16&0x3f) > 0) ++n_alt;
467 // fprintf(pysamerr, "X pos=%d read=%d:%d name=%s call=%d type=%d q=%d seqQ=%d\n", pos, s, i, bam1_qname(p->b), p->aux>>16&63, bca->indel_types[p->aux>>16&63], p->aux&0xff, p->aux>>8&0xff);
471 free(score1); free(score2);
473 for (i = 0; i < n; ++i) free(ref_sample[i]);
475 free(types); free(inscns);
476 return n_alt > 0? 0 : -1;