development release: conversion of ReadDataset to use BAM files
[erange.git] / normalizeExpandedExonic.py
index c677d563e18d3f2544fc3c70caee4148c743bf5f..cf02cb3af4e26d987a971817057c9e790dae874d 100644 (file)
@@ -45,7 +45,10 @@ def main(argv=None):
         except IndexError:
             pass
 
-    normalizeExpandedExonic(genome, hitfile, uniquecountfile, splicecountfile, outfile,
+    RDS = ReadDataset.ReadDataset(hitfile, verbose = True, cache=options.doCache, reportCount=False)    
+    uniqcount = RDS.getUniqsCount()
+    
+    normalizeExpandedExonic(genome, uniqcount, uniquecountfile, splicecountfile, outfile,
                             candidateLines, acceptedfilename, options.fieldID,
                             options.maxLength, options.doCache, options.extendGenome,
                             options.replaceModels)
@@ -73,7 +76,7 @@ def makeParser(usage=""):
     return parser
 
 
-def normalizeExpandedExonic(genome, hitfile, uniquecountfilename, splicecountfilename,
+def normalizeExpandedExonic(genome, uniqcount, uniquecountfilename, splicecountfilename,
                             outfilename, candidateLines=[], acceptedfilename="",
                             fieldID=0, maxLength=1000000000., doCache=False,
                             extendGenome="", replaceModels=False):
@@ -104,8 +107,7 @@ def normalizeExpandedExonic(genome, hitfile, uniquecountfilename, splicecountfil
     if extendGenome != "":
         hg.extendFeatures(extendGenome, replace=replaceModels)
 
-    RDS = ReadDataset.ReadDataset(hitfile, verbose = True, cache=doCache, reportCount=False)    
-    uniqcount = RDS.getUniqsCount()
+    
     print "%d unique reads" % uniqcount
 
     splicecount = 0