first pass cleanup of cistematic/genomes; change bamPreprocessing
[erange.git] / makerdsfrombowtie.py
1 #
2 #  makerdsfrombowtie.py
3 #  ENRAGE
4 #
5 #  Created by Ali Mortazavi on 10/20/08.
6 #
7
8 try:
9     import psyco
10     psyco.full()
11 except:
12     pass
13
14 import sys
15 import string
16 import optparse
17 from commoncode import writeLog, getConfigParser, getConfigOption, getConfigBoolOption, getConfigIntOption
18 import ReadDataset
19
20 verstring = "makerdsfrombowtie: version 4.2"
21 print verstring
22
23 def main(argv=None):
24     if not argv:
25         argv = sys.argv
26
27     usage = "usage: python %prog label infilename outrdsfile [propertyName::propertyValue] [options]"
28
29     parser = getParser(usage)
30     (options, args) = parser.parse_args(argv[1:])
31
32     if len(args) < 3:
33         print usage
34         sys.exit(1)
35
36     label = args[0]
37     filename = args[1]
38     outdbname = args[2]
39
40     propertyList = []
41     for arg in args:
42         if "::" in arg:
43             (pname, pvalue) = arg.strip().split("::")
44             propertyList.append((pname, pvalue))
45
46     makerdsfrombowtie(label, filename, outdbname, options.genedatafilename, options.init,
47                       options.doIndex, options.spacer, options.trimReadID, options.forceID,
48                       options.flip, options.verbose, options.stripSpace, options.cachePages,
49                       propertyList)
50
51
52 def getParser(usage):
53     parser = optparse.OptionParser(usage=usage)
54     parser.add_option("--RNA", dest="genedatafilename")
55     parser.add_option("--append", action="store_false", dest="init")
56     parser.add_option("--index", action="store_true", dest="doIndex")
57     parser.add_option("--spacer", type="int", dest="spacer")
58     parser.add_option("--rawreadID", action="store_false", dest="trimReadID")
59     parser.add_option("--forcepair", type="int", dest="forceID")
60     parser.add_option("--flip", action="store_true", dest="flip")
61     parser.add_option("--verbose", action="store_true", dest="verbose")
62     parser.add_option("--strip", action="store_true", dest="stripSpace")
63     parser.add_option("--cache", type="int", dest="cachePages")
64
65     configParser = getConfigParser()
66     section = "makerdsfrom bowtie"
67     genedatafilename = getConfigOption(configParser, section, "genedatafilename", None)
68     init = getConfigBoolOption(configParser, section, "init", True)
69     doIndex = getConfigBoolOption(configParser, section, "doIndex", False)
70     spacer = getConfigIntOption(configParser, section, "spacer", 2)
71     trimReadID = getConfigBoolOption(configParser, section, "trimReadID", True)
72     forceID = getConfigOption(configParser, section, "forceID", None)
73     flip = getConfigBoolOption(configParser, section, "flip", False)
74     verbose = getConfigBoolOption(configParser, section, "verbose", False)
75     stripSpace = getConfigBoolOption(configParser, section, "stripSpace", False)
76     cachePages = getConfigIntOption(configParser, section, "cachePages", 100000)
77
78     parser.set_defaults(genedatafilename=genedatafilename, init=init, doIndex=doIndex, spacer=spacer,
79                         trimReadID=trimReadID, forceID=forceID, flip=flip, verbose=verbose,
80                         stripSpace=stripSpace, cachePages=cachePages)
81
82     return parser
83
84
85 def makerdsfrombowtie(label, filename, outdbname, genedatafilename=None, init=True,
86                       doIndex=False, spacer=2, trimReadID=True, forceID=None,
87                       flip=False, verbose=False, stripSpace=False, cachePages=100000,
88                       propertyList=[]):
89
90     writeLog("%s.log" % outdbname, verstring, string.join(sys.argv[1:]))
91
92     geneDict = {}
93     dataType = "DNA"
94     if genedatafilename is not None:
95         dataType = "RNA"
96         genedatafile = open(genedatafilename)
97         for line in genedatafile:
98             fields = line.strip().split("\t")
99             blockCount = int(fields[7])
100             if blockCount < 2:
101                 continue
102
103             uname = fields[0]
104             chrom = fields[1]
105             sense = fields[2]
106             chromstarts = fields[8][:-1].split(",")
107             chromstops = fields[9][:-1].split(",")
108             exonLengths = []
109             totalLength = 0
110             for index in range(blockCount):
111                 chromstarts[index] = int(chromstarts[index])
112                 chromstops[index] = int(chromstops[index])
113                 exonLengths.append(chromstops[index] - chromstarts[index])
114                 totalLength += exonLengths[index]
115
116             geneDict[uname] = (sense, blockCount, totalLength, chrom, chromstarts, exonLengths)
117
118         genedatafile.close()
119
120     rds = ReadDataset.ReadDataset(outdbname, init, dataType, verbose=True)
121
122     #check that our cacheSize is better than the dataset's default cache size
123     defaultCacheSize = rds.getDefaultCacheSize()
124     if cachePages > defaultCacheSize:
125         if init:
126             rds.setDBcache(cachePages, default=True)
127         else:
128             rds.setDBcache(cachePages)
129
130     if not init and doIndex:
131         try:
132             if rds.hasIndex():
133                 rds.dropIndex()
134         except:
135             if verbose:
136                 print "couldn't drop Index"
137
138     if len(propertyList) > 0:
139         rds.insertMetadata(propertyList)
140
141     # make some assumptions based on first read
142     infile = open(filename, "r")
143     line = infile.readline()
144     if stripSpace:
145         line = line.replace(" ","")
146
147     fields = line.split()
148     readsize = len(fields[5])
149     pairedTest = fields[0][-2:]
150     forcePair = False
151     if forceID is not None:
152         forcePair = True
153     else:
154         forceID = 0
155
156     paired = False
157     if pairedTest in ["/1", "/2"] or forcePair:
158         print "assuming reads are paired"
159         paired = True
160
161     print "read size: %d bp" % readsize
162     if init:
163         rds.insertMetadata([("readsize", readsize)])
164         if paired:
165             rds.insertMetadata([("paired", "True")])
166
167     if "bowtie_mapped" not in rds.getMetadata():
168         rds.insertMetadata([("bowtie_mapped", "True")])
169
170     if dataType == "RNA" and "spacer" not in rds.getMetadata():
171         rds.insertMetadata([("spacer", spacer)])
172
173     infile.close()
174
175     maxBorder = 0
176     if dataType == "RNA":
177         trim = -4
178         maxBorder = readsize + trim
179
180     infile = open(filename, "r")
181     prevID = ""
182     readList = []
183     uInsertList = []
184     mInsertList = []
185     sInsertList = []
186     index = uIndex = mIndex = sIndex = lIndex = 0
187     delimiter = "|"
188     insertSize = 100000
189     for line in infile:
190         lIndex += 1
191         if stripSpace:
192             line = line.replace(" ","")
193
194         fields = line.strip().split()
195         readID = fields[0]
196         if trimReadID:
197             readID = string.join(readID.split(":")[1:], ":")
198
199         if readID != prevID:
200             listlen = len(readList)
201             if trimReadID:
202                 prevID = "%s-%s" % (label, prevID)
203
204             if forcePair:
205                 prevID += "/%d" % forceID 
206
207             if listlen == 1:
208                 (sense, chrom, start, mismatches) = readList[0]
209                 if flip:
210                     if sense == "+":
211                         sense = "-"
212                     else:
213                         sense = "+"
214
215                 if "|" not in chrom:
216                     stop = start + readsize
217                     uInsertList.append((prevID, chrom, start, stop, sense, 1.0, "", mismatches))
218                     uIndex += 1
219                 elif dataType == "RNA":
220                     currentSplice = chrom
221                     (model, spliceID, regionStart) = currentSplice.split(delimiter)
222                     if model not in geneDict:
223                         prevID = readID
224                     else:
225                         (gsense, blockCount, transLength, chrom, chromstarts, blockSizes) = geneDict[model]
226                         spliceID = int(spliceID)
227                         rstart = int(start) - spacer
228                         lefthalf = maxBorder - rstart
229                         if lefthalf < 1 or lefthalf > maxBorder:
230                             prevID = readID
231                         else:
232                             righthalf = readsize - lefthalf
233                             startL = int(regionStart)  + rstart
234                             stopL = startL + lefthalf
235                             startR = chromstarts[spliceID + 1]
236                             stopR = chromstarts[spliceID + 1] + righthalf
237                             sInsertList.append((prevID, chrom, startL, stopL, startR, stopR, sense, 1.0, "", mismatches))
238                             sIndex += 1
239             elif listlen > 1:
240                 prevID = "%s::%s" % (prevID, str(listlen))
241                 mIndex += 1
242                 # ignore multireads that can also map across splices
243                 skip = False
244                 for (sense, chrom, start, mismatches) in readList:
245                     if "|" in chrom:
246                         skip = True
247
248                 if not skip:
249                     for (sense, chrom, start, mismatches) in readList:
250                         stop = start + readsize
251                         if flip:
252                             if sense == "+":
253                                 sense = "-"
254                             else:
255                                 sense = "+"
256
257                         mInsertList.append((prevID, chrom, start, stop, sense, 1.0 / listlen, "", mismatches))
258             else:
259                 prevID = readID
260
261             if index % insertSize == 0:
262                 rds.insertUniqs(uInsertList)
263                 rds.insertMulti(mInsertList)
264                 uInsertList = []
265                 mInsertList = []
266                 if dataType == "RNA":
267                     rds.insertSplices(sInsertList)
268                     sInsertList = []
269
270                 print ".",
271                 sys.stdout.flush()
272
273             # start processing new read
274             readList = []
275             prevID = readID
276             index += 1
277
278         # add the new read
279         sense = fields[1]
280         chrom = fields[2]
281         # for eland compat, we are 1-based
282         start = int(fields[3]) + 1
283         mismatches = ""
284         if ":" in fields[-1]:
285             mismatches = decodeMismatches(fields[-1], sense)
286
287         readList.append((sense, chrom, start, mismatches))
288         if lIndex % 1000000 == 0:
289             print "processed %d lines" % lIndex
290
291     print "%d lines processed" % lIndex
292
293     if len(uInsertList) > 0:
294         rds.insertUniqs(uInsertList)
295
296     if len(mInsertList) > 0:
297         rds.insertMulti(mInsertList)
298
299     if len(sInsertList) > 0:
300         rds.insertSplices(sInsertList)
301
302     combString = "%d unique reads" % uIndex
303     combString += "\t%d multi reads" % mIndex
304     if dataType == "RNA":
305         combString += "\t%d spliced reads" % sIndex
306
307     print
308     print combString.replace("\t", "\n")
309
310     writeLog("%s.log" % outdbname, verstring, combString)
311
312     if doIndex:
313         print "building index...."
314         if cachePages > defaultCacheSize:
315             rds.setDBcache(cachePages)
316             rds.buildIndex(cachePages)
317         else:
318             rds.buildIndex(defaultCacheSize)
319
320
321 def decodeMismatches(mString, rsense):
322     complement = {"A": "T",
323                   "T": "A",
324                   "C": "G",
325                   "G": "C",
326                   "N": "N"
327     }
328
329     output = []
330     mismatches = mString.split(",")
331     for mismatch in mismatches:
332         (pos,change) = mismatch.split(":")
333         (genNT, readNT) = change.split(">")
334         if rsense == "-":
335             readNT = complement[readNT]
336             genNT  = complement[genNT]
337
338         elandCompatiblePos = int(pos) + 1
339         output.append("%s%d%s" % (readNT, elandCompatiblePos, genNT))
340
341     return string.join(output, ",")
342
343
344 if __name__ == "__main__":
345     main(sys.argv)