Use the image directory name to detect what type of image dir we have,
[htsworkflow.git] / htsworkflow / pipelines / runfolder.py
index ba99c4c25f31b956c18a7fbc20f548a02fa04bf0..097a61732884a7d2b90983cf01ff8ed61000e295 100644 (file)
@@ -136,6 +136,19 @@ class PipelineRun(object):
         tree = ElementTree.parse(filename).getroot()
         self.set_elements(tree)
 
+def load_pipeline_run_xml(pathname):
+    """
+    Load and instantiate a Pipeline run from a run xml file
+
+    :Parameters: 
+      - `pathname` : location of an run xml file
+
+    :Returns: initialized PipelineRun object
+    """
+    tree = ElementTree.parse(pathname).getroot()
+    run = PipelineRun(xml=tree)
+    return run
+
 def get_runs(runfolder):
     """
     Search through a run folder for all the various sub component runs
@@ -168,7 +181,7 @@ def get_runs(runfolder):
                     p.gerald = g
                     runs.append(p)
                 except IOError, e:
-                    print "Ignoring", str(e)
+                    logging.error("Ignoring " + str(e))
 
     datadir = os.path.join(runfolder, 'Data')
 
@@ -178,15 +191,92 @@ def get_runs(runfolder):
     for firecrest_pathname in glob(os.path.join(datadir,"*Firecrest*")):
         logging.info('Found firecrest in ' + datadir)
         image_analysis = firecrest.firecrest(firecrest_pathname)
-        scan_post_image_analysis(runs, runfolder, image_analysis, firecrest_pathname)
+        if image_analysis is None:
+           logging.warn(
+                "%s is an empty or invalid firecrest directory" % (firecrest_pathname,)
+            )
+       else:
+            scan_post_image_analysis(
+                runs, runfolder, image_analysis, firecrest_pathname
+            )
     # scan for IPAR directories
     for ipar_pathname in glob(os.path.join(datadir,"IPAR_*")):
         logging.info('Found ipar directories in ' + datadir)
         image_analysis = ipar.ipar(ipar_pathname)
-        scan_post_image_analysis(runs, runfolder, image_analysis, ipar_pathname)
+        if image_analysis is None:
+           logging.warn(
+                "%s is an empty or invalid IPAR directory" %(ipar_pathname,)
+            )
+       else:
+            scan_post_image_analysis(
+                runs, runfolder, image_analysis, ipar_pathname
+            )
 
     return runs
 
+def get_specific_run(gerald_dir):
+    """
+    Given a gerald directory, construct a PipelineRun out of its parents
+
+    Basically this allows specifying a particular run instead of the previous
+    get_runs which scans a runfolder for various combinations of
+    firecrest/ipar/bustard/gerald runs.
+    """
+    from htsworkflow.pipelines import firecrest
+    from htsworkflow.pipelines import ipar
+    from htsworkflow.pipelines import bustard
+    from htsworkflow.pipelines import gerald
+
+    bustard_dir = os.path.abspath(os.path.join(gerald_dir, '..'))
+    image_dir = os.path.abspath(os.path.join(gerald_dir, '..', '..'))
+
+    runfolder_dir = os.path.abspath(os.path.join(image_dir, '..','..'))
+   
+    logging.info('--- use-run detected options ---')
+    logging.info('runfolder: %s' % (runfolder_dir,))
+    logging.info('image_dir: %s' % (image_dir,))
+    logging.info('bustard_dir: %s' % (bustard_dir,))
+    logging.info('gerald_dir: %s' % (gerald_dir,))
+
+    # find our processed image dir
+    image_run = None
+    # split into parent, and leaf directory
+    # leaf directory should be an IPAR or firecrest directory
+    data_dir, short_image_dir = os.path.split(image_dir)
+    logging.info('data_dir: %s' % (data_dir,))
+    logging.info('short_iamge_dir: %s' %(short_image_dir,))
+
+    # guess which type of image processing directory we have by looking
+    # in the leaf directory name
+    if re.search('Firecrest', short_image_dir, re.IGNORECASE) is not None:
+        image_run = firecrest.firecrest(image_dir)
+    elif re.search('IPAR', short_image_dir, re.IGNORECASE) is not None:
+        image_run = ipar.ipar(image_dir)
+    # if we din't find a run, report the error and return 
+    if image_run is None:
+        msg = '%s does not contain an image processing step' % (image_dir,)
+        logging.error(msg)
+        return None
+
+    # find our base calling
+    base_calling_run = bustard.bustard(bustard_dir)
+    if base_calling_run is None:
+        logging.error('%s does not contain a bustard run' % (bustard_dir,))
+        return None
+
+    # find alignments
+    gerald_run = gerald.gerald(gerald_dir)
+    if gerald_run is None:
+        logging.error('%s does not contain a gerald run' % (gerald_dir,))
+        return None
+
+    p = PipelineRun(runfolder_dir)
+    p.image_analysis = image_run
+    p.bustard = base_calling_run
+    p.gerald = gerald_run
+    
+    logging.info('Constructed PipelineRun from %s' % (gerald_dir,))
+    return p
 
 def extract_run_parameters(runs):
     """
@@ -195,7 +285,7 @@ def extract_run_parameters(runs):
     for run in runs:
       run.save()
 
-def summarize_mapped_reads(mapped_reads):
+def summarize_mapped_reads(genome_map, mapped_reads):
     """
     Summarize per chromosome reads into a genome count
     But handle spike-in/contamination symlinks seperately.
@@ -205,7 +295,7 @@ def summarize_mapped_reads(mapped_reads):
     genome = 'unknown'
     for k, v in mapped_reads.items():
         path, k = os.path.split(k)
-        if len(path) > 0:
+        if len(path) > 0 and not genome_map.has_key(path):
             genome = path
             genome_reads += v
         else:
@@ -236,7 +326,7 @@ def summarize_lane(gerald, lane_id):
       report.append('Repeat (0,1,2 mismatches) %d %d %d' % \
                     (mc['R0'], mc['R1'], mc['R2']))
       report.append("Mapped Reads")
-      mapped_reads = summarize_mapped_reads(eland_result.mapped_reads)
+      mapped_reads = summarize_mapped_reads(eland_result.genome_map, eland_result.mapped_reads)
       for name, counts in mapped_reads.items():
         report.append("  %s: %d" % (name, counts))
       report.append('')
@@ -304,7 +394,15 @@ def extract_results(runs, output_base_dir=None):
 
       # tar score files
       score_files = []
-      for f in os.listdir(g.pathname):
+
+      # check for g.pathname/Temp a new feature of 1.1rc1
+      scores_path = g.pathname
+      scores_path_temp = os.path.join(scores_path, 'Temp')
+      if os.path.isdir(scores_path_temp):
+          scores_path = scores_path_temp
+
+      # hopefully we have a directory that contains s_*_score files
+      for f in os.listdir(scores_path):
           if re.match('.*_score.txt', f):
               score_files.append(f)
 
@@ -312,12 +410,13 @@ def extract_results(runs, output_base_dir=None):
       bzip_cmd = [ 'bzip2', '-9', '-c' ]
       tar_dest_name =os.path.join(cycle_dir, 'scores.tar.bz2')
       tar_dest = open(tar_dest_name, 'w')
-      logging.info("Compressing score files in %s" % (g.pathname,))
+      logging.info("Compressing score files from %s" % (scores_path,))
       logging.info("Running tar: " + " ".join(tar_cmd[:10]))
       logging.info("Running bzip2: " + " ".join(bzip_cmd))
       logging.info("Writing to %s" %(tar_dest_name))
 
-      tar = subprocess.Popen(tar_cmd, stdout=subprocess.PIPE, shell=False, cwd=g.pathname)
+      tar = subprocess.Popen(tar_cmd, stdout=subprocess.PIPE, shell=False, 
+                             cwd=scores_path)
       bzip = subprocess.Popen(bzip_cmd, stdin=tar.stdout, stdout=tar_dest)
       tar.wait()
 
@@ -327,6 +426,9 @@ def extract_results(runs, output_base_dir=None):
               source_name = eland_lane.pathname
               path, name = os.path.split(eland_lane.pathname)
               dest_name = os.path.join(cycle_dir, name)
+             logging.info("Saving eland file %s to %s" % \
+                          (source_name, dest_name))
+
               if is_compressed(name):
                 logging.info('Already compressed, Saving to %s' % (dest_name, ))
                 shutil.copy(source_name, dest_name)