Recent IPAR xml config blocks include the runfolder name
[htsworkflow.git] / htsworkflow / pipelines / runfolder.py
index 0fce8f840f49cd5c6ca3aea92e203051ff40d45f..59fd2131c1b9b810e53b16ef71237831a117e771 100644 (file)
@@ -55,31 +55,57 @@ class PipelineRun(object):
     def _get_flowcell_id(self):
         # extract flowcell ID
         if self._flowcell_id is None:
-            config_dir = os.path.join(self.pathname, 'Config')
-            flowcell_id_path = os.path.join(config_dir, 'FlowcellId.xml')
-            if os.path.exists(flowcell_id_path):
-                flowcell_id_tree = ElementTree.parse(flowcell_id_path)
-                self._flowcell_id = flowcell_id_tree.findtext('Text')
-            else:
-                path_fields = self.pathname.split('_')
-                if len(path_fields) > 0:
-                    # guessing last element of filename
-                   self._flowcell_id = path_fields[-1]
-                else:
-                   self._flowcell_id = 'unknown'
+            self._flowcell_id = self._get_flowcell_id_from_runinfo()
+        if self._flowcell_id is None:
+            self._flowcell_id = self._get_flowcell_id_from_flowcellid()
+        if self._flowcell_id is None:
+            self._flowcell_id = self._get_flowcell_id_from_path()
+        if self._flowcell_id is None:
+            self._flowcell_id = 'unknown'
 
-                   LOGGER.warning(
-                       "Flowcell id was not found, guessing %s" % (
-                       self._flowcell_id))
+            LOGGER.warning(
+                "Flowcell id was not found, guessing %s" % (
+                    self._flowcell_id))
 
         return self._flowcell_id
     flowcell_id = property(_get_flowcell_id)
 
+    def _get_flowcell_id_from_flowcellid(self):
+        """Extract flowcell id from a Config/FlowcellId.xml file
+        """
+        config_dir = os.path.join(self.pathname, 'Config')
+        flowcell_id_path = os.path.join(config_dir, 'FlowcellId.xml')
+        if os.path.exists(flowcell_id_path):
+            flowcell_id_tree = ElementTree.parse(flowcell_id_path)
+            return flowcell_id_tree.findtext('Text')
+
+    def _get_flowcell_id_from_runinfo(self):
+        """Read RunInfo file for flowcell id
+        """
+        runinfo = os.path.join(self.pathname, 'RunInfo.xml')
+        if os.path.exists(runinfo):
+            tree = ElementTree.parse(runinfo)
+            root = tree.getroot()
+            fc_nodes = root.xpath('/RunInfo/Run/Flowcell')
+            if len(fc_nodes) == 1:
+                return fc_nodes[0].text
+
+
+    def _get_flowcell_id_from_path(self):
+        """Guess a flowcell name from the path
+        """
+        path_fields = self.pathname.split('_')
+        if len(path_fields) > 0:
+            # guessing last element of filename
+            return path_fields[-1]
+
     def _get_runfolder_name(self):
-        if self.gerald is None:
-            return None
-        else:
+        if self.gerald:
             return self.gerald.runfolder_name
+        elif hasattr(self.image_analysis, 'runfolder_name'):
+            return self.image_analysis.runfolder_name
+        else:
+            return None
     runfolder_name = property(_get_runfolder_name)
 
     def get_elements(self):
@@ -177,7 +203,13 @@ def get_runs(runfolder, flowcell_id=None):
     from htsworkflow.pipelines import bustard
     from htsworkflow.pipelines import gerald
 
-    def scan_post_image_analysis(runs, runfolder, datadir, image_analysis, pathname):
+    def scan_post_image_analysis(runs, runfolder, datadir, image_analysis,
+                                 pathname):
+        added = build_aligned_runs(image_analysis, runs, datadir, runfolder)
+        # If we're a multiplexed run, don't look for older run type.
+        if added > 0:
+            return
+
         LOGGER.info("Looking for bustard directories in %s" % (pathname,))
         bustard_dirs = glob(os.path.join(pathname, "Bustard*"))
         # RTA BaseCalls looks enough like Bustard.
@@ -187,9 +219,9 @@ def get_runs(runfolder, flowcell_id=None):
             b = bustard.bustard(bustard_pathname)
             build_gerald_runs(runs, b, image_analysis, bustard_pathname, datadir, pathname, runfolder)
 
-            build_aligned_runs(image_analysis, runs, b, datadir, runfolder)
 
     def build_gerald_runs(runs, b, image_analysis, bustard_pathname, datadir, pathname, runfolder):
+        start = len(runs)
         gerald_glob = os.path.join(bustard_pathname, 'GERALD*')
         LOGGER.info("Looking for gerald directories in %s" % (pathname,))
         for gerald_pathname in glob(gerald_glob):
@@ -204,23 +236,27 @@ def get_runs(runfolder, flowcell_id=None):
                 runs.append(p)
             except IOError, e:
                 LOGGER.error("Ignoring " + str(e))
+        return len(runs) - start
 
 
-    def build_aligned_runs(image_analysis, runs, b, datadir, runfolder):
+    def build_aligned_runs(image_analysis, runs, datadir, runfolder):
+        start = len(runs)
         aligned_glob = os.path.join(runfolder, 'Aligned*')
         for aligned in glob(aligned_glob):
             LOGGER.info("Found aligned directory %s" % (aligned,))
             try:
-                g = gerald.HiSeq(aligned)
+                g = gerald.gerald(aligned)
                 p = PipelineRun(runfolder, flowcell_id)
+                bustard_pathname = os.path.join(runfolder, g.runfolder_name)
+
                 p.datadir = datadir
                 p.image_analysis = image_analysis
-                p.bustard = b
+                p.bustard = bustard.bustard(bustard_pathname)
                 p.gerald = g
                 runs.append(p)
             except IOError, e:
                 LOGGER.error("Ignoring " + str(e))
-
+        return len(runs) - start
     datadir = os.path.join(runfolder, 'Data')
 
     LOGGER.info('Searching for runs in ' + datadir)
@@ -340,7 +376,7 @@ def summarize_mapped_reads(genome_map, mapped_reads):
     genome = 'unknown'
     for k, v in mapped_reads.items():
         path, k = os.path.split(k)
-        if len(path) > 0 and not genome_map.has_key(path):
+        if len(path) > 0 and path not in genome_map:
             genome = path
             genome_reads += v
         else:
@@ -350,37 +386,40 @@ def summarize_mapped_reads(genome_map, mapped_reads):
 
 def summarize_lane(gerald, lane_id):
     report = []
-    summary_results = gerald.summary.lane_results
-    for end in range(len(summary_results)):
-      eland_result = gerald.eland_results.results[end][lane_id]
-      report.append("Sample name %s" % (eland_result.sample_name))
-      report.append("Lane id %s end %s" % (eland_result.lane_id, end))
-      if end < len(summary_results) and summary_results[end].has_key(eland_result.lane_id):
-          cluster = summary_results[end][eland_result.lane_id].cluster
-          report.append("Clusters %d +/- %d" % (cluster[0], cluster[1]))
-      report.append("Total Reads: %d" % (eland_result.reads))
-
-      if hasattr(eland_result, 'match_codes'):
-          mc = eland_result.match_codes
-          nm = mc['NM']
-          nm_percent = float(nm) / eland_result.reads * 100
-          qc = mc['QC']
-          qc_percent = float(qc) / eland_result.reads * 100
-
-          report.append("No Match: %d (%2.2g %%)" % (nm, nm_percent))
-          report.append("QC Failed: %d (%2.2g %%)" % (qc, qc_percent))
-          report.append('Unique (0,1,2 mismatches) %d %d %d' % \
-                        (mc['U0'], mc['U1'], mc['U2']))
-          report.append('Repeat (0,1,2 mismatches) %d %d %d' % \
-                        (mc['R0'], mc['R1'], mc['R2']))
-
-      if hasattr(eland_result, 'genome_map'):
-          report.append("Mapped Reads")
-          mapped_reads = summarize_mapped_reads(eland_result.genome_map, eland_result.mapped_reads)
-          for name, counts in mapped_reads.items():
+    lane_results = gerald.summary.lane_results
+    eland_result = gerald.eland_results[lane_id]
+    report.append("Sample name %s" % (eland_result.sample_name))
+    report.append("Lane id %s end %s" % (lane_id.lane, lane_id.read))
+
+    if lane_id.read < len(lane_results) and \
+           lane_id.lane in lane_results[lane_id.read]:
+        summary_results = lane_results[lane_id.read][lane_id.lane]
+        cluster = summary_results.cluster
+        report.append("Clusters %d +/- %d" % (cluster[0], cluster[1]))
+    report.append("Total Reads: %d" % (eland_result.reads))
+
+    if hasattr(eland_result, 'match_codes'):
+        mc = eland_result.match_codes
+        nm = mc['NM']
+        nm_percent = float(nm) / eland_result.reads * 100
+        qc = mc['QC']
+        qc_percent = float(qc) / eland_result.reads * 100
+
+        report.append("No Match: %d (%2.2g %%)" % (nm, nm_percent))
+        report.append("QC Failed: %d (%2.2g %%)" % (qc, qc_percent))
+        report.append('Unique (0,1,2 mismatches) %d %d %d' % \
+                      (mc['U0'], mc['U1'], mc['U2']))
+        report.append('Repeat (0,1,2 mismatches) %d %d %d' % \
+                      (mc['R0'], mc['R1'], mc['R2']))
+
+    if hasattr(eland_result, 'genome_map'):
+        report.append("Mapped Reads")
+        mapped_reads = summarize_mapped_reads(eland_result.genome_map,
+                                              eland_result.mapped_reads)
+        for name, counts in mapped_reads.items():
             report.append("  %s: %d" % (name, counts))
 
-      report.append('')
+        report.append('')
     return report
 
 def summary_report(runs):
@@ -392,14 +431,12 @@ def summary_report(runs):
         # print a run name?
         report.append('Summary for %s' % (run.name,))
        # sort the report
-       eland_keys = run.gerald.eland_results.results[0].keys()
-       eland_keys.sort(alphanum)
-
-       for lane_id in eland_keys:
-            report.extend(summarize_lane(run.gerald, lane_id))
-            report.append('---')
-            report.append('')
-        return os.linesep.join(report)
+       eland_keys = sorted(run.gerald.eland_results.keys())
+    for lane_id in eland_keys:
+        report.extend(summarize_lane(run.gerald, lane_id))
+        report.append('---')
+        report.append('')
+    return os.linesep.join(report)
 
 def is_compressed(filename):
     if os.path.splitext(filename)[1] == ".gz":
@@ -504,9 +541,9 @@ def compress_eland_results(gerald_object, cycle_dir, num_jobs=1):
     # copy & bzip eland files
     bz_commands = []
 
-    for lanes_dictionary in gerald_object.eland_results.results:
-        for eland_lane in lanes_dictionary.values():
-            source_name = eland_lane.pathname
+    for key in gerald_object.eland_results:
+        eland_lane = gerald_object.eland_results[key]
+        for source_name in eland_lane.pathnames:
             if source_name is None:
               LOGGER.info(
                 "Lane ID %s does not have a filename." % (eland_lane.lane_id,))
@@ -535,7 +572,7 @@ def compress_eland_results(gerald_object, cycle_dir, num_jobs=1):
       q.run()
 
 
-def extract_results(runs, output_base_dir=None, site="individual", num_jobs=1, raw_format='qseq'):
+def extract_results(runs, output_base_dir=None, site="individual", num_jobs=1, raw_format=None):
     """
     Iterate over runfolders in runs extracting the most useful information.
       * run parameters (in run-*.xml)
@@ -548,68 +585,75 @@ def extract_results(runs, output_base_dir=None, site="individual", num_jobs=1, r
         output_base_dir = os.getcwd()
 
     for r in runs:
-      result_dir = os.path.join(output_base_dir, r.flowcell_id)
-      LOGGER.info("Using %s as result directory" % (result_dir,))
-      if not os.path.exists(result_dir):
-        os.mkdir(result_dir)
-
-      # create cycle_dir
-      cycle = "C%d-%d" % (r.image_analysis.start, r.image_analysis.stop)
-      LOGGER.info("Filling in %s" % (cycle,))
-      cycle_dir = os.path.join(result_dir, cycle)
-      cycle_dir = os.path.abspath(cycle_dir)
-      if os.path.exists(cycle_dir):
-        LOGGER.error("%s already exists, not overwriting" % (cycle_dir,))
-        continue
-      else:
-        os.mkdir(cycle_dir)
-
-      # save run file
-      r.save(cycle_dir)
-
-      # save illumina flowcell status report
-      save_flowcell_reports(os.path.join(r.image_analysis.pathname, '..'), cycle_dir)
-
-      # save stuff from bustard
-      # grab IVC plot
-      save_ivc_plot(r.bustard, cycle_dir)
-
-      # build base call saving commands
-      if site is not None:
-        lanes = []
-        for lane in range(1, 9):
-          lane_parameters = r.gerald.lanes.get(lane, None)
-          if lane_parameters is not None and lane_parameters.analysis != 'none':
-            lanes.append(lane)
-
-        run_name = srf.pathname_to_run_name(r.pathname)
-        seq_cmds = []
-        LOGGER.info("Raw Format is: %s" % (raw_format, ))
-        if raw_format == 'fastq':
-            rawpath = os.path.join(r.pathname, r.gerald.runfolder_name)
-            LOGGER.info("raw data = %s" % (rawpath,))
-            srf.copy_hiseq_project_fastqs(run_name, rawpath, site, cycle_dir)
-        elif raw_format == 'qseq':
-            seq_cmds = srf.make_qseq_commands(run_name, r.bustard.pathname, lanes, site, cycle_dir)
-        elif raw_format == 'srf':
-            seq_cmds = srf.make_srf_commands(run_name, r.bustard.pathname, lanes, site, cycle_dir, 0)
+        result_dir = os.path.join(output_base_dir, r.flowcell_id)
+        LOGGER.info("Using %s as result directory" % (result_dir,))
+        if not os.path.exists(result_dir):
+            os.mkdir(result_dir)
+
+        # create cycle_dir
+        cycle = "C%d-%d" % (r.image_analysis.start, r.image_analysis.stop)
+        LOGGER.info("Filling in %s" % (cycle,))
+        cycle_dir = os.path.join(result_dir, cycle)
+        cycle_dir = os.path.abspath(cycle_dir)
+        if os.path.exists(cycle_dir):
+            LOGGER.error("%s already exists, not overwriting" % (cycle_dir,))
+            continue
         else:
-            raise ValueError('Unknown --raw-format=%s' % (raw_format))
-        srf.run_commands(r.bustard.pathname, seq_cmds, num_jobs)
+            os.mkdir(cycle_dir)
+
+        # save run file
+        r.save(cycle_dir)
 
-      # save stuff from GERALD
-      # copy stuff out of the main run
-      g = r.gerald
+        # save illumina flowcell status report
+        save_flowcell_reports(os.path.join(r.image_analysis.pathname, '..'),
+                              cycle_dir)
 
-      # save summary file
-      save_summary_file(r, cycle_dir)
+        # save stuff from bustard
+        # grab IVC plot
+        save_ivc_plot(r.bustard, cycle_dir)
 
-      # compress eland result files
-      compress_eland_results(g, cycle_dir, num_jobs)
+        # build base call saving commands
+        if site is not None:
+            save_raw_data(num_jobs, r, site, raw_format, cycle_dir)
 
-      # md5 all the compressed files once we're done
-      md5_commands = srf.make_md5_commands(cycle_dir)
-      srf.run_commands(cycle_dir, md5_commands, num_jobs)
+        # save stuff from GERALD
+        # copy stuff out of the main run
+        g = r.gerald
+
+        # save summary file
+        save_summary_file(r, cycle_dir)
+
+        # compress eland result files
+        compress_eland_results(g, cycle_dir, num_jobs)
+
+        # md5 all the compressed files once we're done
+        md5_commands = srf.make_md5_commands(cycle_dir)
+        srf.run_commands(cycle_dir, md5_commands, num_jobs)
+
+def save_raw_data(num_jobs, r, site, raw_format, cycle_dir):
+    lanes = []
+    for lane in r.gerald.lanes:
+        lane_parameters = r.gerald.lanes.get(lane, None)
+        if lane_parameters is not None:
+            lanes.append(lane)
+
+    run_name = srf.pathname_to_run_name(r.pathname)
+    seq_cmds = []
+    if raw_format is None:
+        raw_format = r.bustard.sequence_format
+
+    LOGGER.info("Raw Format is: %s" % (raw_format, ))
+    if raw_format == 'fastq':
+        rawpath = os.path.join(r.pathname, r.gerald.runfolder_name)
+        LOGGER.info("raw data = %s" % (rawpath,))
+        srf.copy_hiseq_project_fastqs(run_name, rawpath, site, cycle_dir)
+    elif raw_format == 'qseq':
+        seq_cmds = srf.make_qseq_commands(run_name, r.bustard.pathname, lanes, site, cycle_dir)
+    elif raw_format == 'srf':
+        seq_cmds = srf.make_srf_commands(run_name, r.bustard.pathname, lanes, site, cycle_dir, 0)
+    else:
+        raise ValueError('Unknown --raw-format=%s' % (raw_format))
+    srf.run_commands(r.bustard.pathname, seq_cmds, num_jobs)
 
 def rm_list(files, dry_run=True):
     for f in files: