Add the ability to specify a flowcell id instead of depending on autodetect.
authorDiane Trout <diane@caltech.edu>
Thu, 6 Oct 2011 18:58:39 +0000 (11:58 -0700)
committerDiane Trout <diane@caltech.edu>
Thu, 6 Oct 2011 18:58:39 +0000 (11:58 -0700)
This only works if there's one runfolder.

Also I shrunk htsw-runfolder's main function by doing a bit of
refactoring

htsworkflow/pipelines/runfolder.py
scripts/htsw-runfolder

index e1b474b6cd70846ee6f702ce01365a5c93db0190..a3e24f4048ce20ad10f9a6f86b6280bda9953bbb 100644 (file)
@@ -13,9 +13,9 @@ import tarfile
 import time
 
 try:
-  from xml.etree import ElementTree
+    from xml.etree import ElementTree
 except ImportError, e:
-  from elementtree import ElementTree
+    from elementtree import ElementTree
 
 EUROPEAN_STRPTIME = "%d-%m-%Y"
 EUROPEAN_DATE_RE = "([0-9]{1,2}-[0-9]{1,2}-[0-9]{4,4})"
@@ -38,13 +38,13 @@ class PipelineRun(object):
     PIPELINE_RUN = 'PipelineRun'
     FLOWCELL_ID = 'FlowcellID'
 
-    def __init__(self, pathname=None, xml=None):
+    def __init__(self, pathname=None, flowcell_id=None, xml=None):
         if pathname is not None:
           self.pathname = os.path.normpath(pathname)
         else:
           self.pathname = None
         self._name = None
-        self._flowcell_id = None
+        self._flowcell_id = flowcell_id
         self.image_analysis = None
         self.bustard = None
         self.gerald = None
@@ -55,23 +55,23 @@ class PipelineRun(object):
     def _get_flowcell_id(self):
         # extract flowcell ID
         if self._flowcell_id is None:
-          config_dir = os.path.join(self.pathname, 'Config')
-          flowcell_id_path = os.path.join(config_dir, 'FlowcellId.xml')
-         if os.path.exists(flowcell_id_path):
-            flowcell_id_tree = ElementTree.parse(flowcell_id_path)
-            self._flowcell_id = flowcell_id_tree.findtext('Text')
-         else:
-            path_fields = self.pathname.split('_')
-            if len(path_fields) > 0:
-              # guessing last element of filename
-              flowcell_id = path_fields[-1]
+            config_dir = os.path.join(self.pathname, 'Config')
+            flowcell_id_path = os.path.join(config_dir, 'FlowcellId.xml')
+            if os.path.exists(flowcell_id_path):
+                flowcell_id_tree = ElementTree.parse(flowcell_id_path)
+                self._flowcell_id = flowcell_id_tree.findtext('Text')
             else:
-              flowcell_id = 'unknown'
+                path_fields = self.pathname.split('_')
+                if len(path_fields) > 0:
+                    # guessing last element of filename
+                   flowcell_id = path_fields[-1]
+                else:
+                   flowcell_id = 'unknown'
 
-           logging.warning(
-             "Flowcell id was not found, guessing %s" % (
-                flowcell_id))
-           self._flowcell_id = flowcell_id
+                logging.warning(
+                  "Flowcell id was not found, guessing %s" % (
+                     flowcell_id))
+                self._flowcell_id = flowcell_id
         return self._flowcell_id
     flowcell_id = property(_get_flowcell_id)
 
@@ -152,7 +152,7 @@ def load_pipeline_run_xml(pathname):
     """
     Load and instantiate a Pipeline run from a run xml file
 
-    :Parameters: 
+    :Parameters:
       - `pathname` : location of an run xml file
 
     :Returns: initialized PipelineRun object
@@ -161,7 +161,7 @@ def load_pipeline_run_xml(pathname):
     run = PipelineRun(xml=tree)
     return run
 
-def get_runs(runfolder):
+def get_runs(runfolder, flowcell_id=None):
     """
     Search through a run folder for all the various sub component runs
     and then return a PipelineRun for each different combination.
@@ -189,7 +189,7 @@ def get_runs(runfolder):
                 logging.info("Found gerald directory %s" % (gerald_pathname,))
                 try:
                     g = gerald.gerald(gerald_pathname)
-                    p = PipelineRun(runfolder)
+                    p = PipelineRun(runfolder, flowcell_id)
                     p.image_analysis = image_analysis
                     p.bustard = b
                     p.gerald = g
@@ -273,7 +273,7 @@ def get_specific_run(gerald_dir):
     elif re.search('Intensities', short_image_dir, re.IGNORECASE) is not None:
         image_run = ipar.ipar(image_dir)
 
-    # if we din't find a run, report the error and return 
+    # if we din't find a run, report the error and return
     if image_run is None:
         msg = '%s does not contain an image processing step' % (image_dir,)
         logging.error(msg)
index 7e35c9790299feee1317d23a3e99685495f16ef2..e98360add54b8789ea43793de2f2cb948bd999bf 100755 (executable)
@@ -1,31 +1,35 @@
 #!/usr/bin/env python
-"""
-Runfolder.py can generate a xml file capturing all the 'interesting' parameters from a finished pipeline run. (using the -a option). The information currently being captured includes:
+"""htsw-runfolder archives summary information from a runfolder.
+The information currently being captured includes:
 
   * Flowcell ID
   * run dates
   * start/stop cycle numbers
   * Firecrest, bustard, gerald version numbers
   * Eland analysis types, and everything in the eland configuration file.
-  * cluster numbers and other values from the Summary.htm 
-    LaneSpecificParameters table. 
+  * cluster numbers and other values from the Summary.htm
+    LaneSpecificParameters table.
   * How many reads mapped to a genome from an eland file
 
+
 The ELAND "mapped reads" counter will also check for eland squashed file
-that were symlinked from another directory. This is so I can track how 
-many reads landed on the genome of interest and on the spike ins. 
+that were symlinked from another directory. This is so I can track how
+many reads landed on the genome of interest and on the spike ins.
 
 Basically my subdirectories something like:
 
 genomes/hg18
 genomes/hg18/chr*.2bpb <- files for hg18 genome
-genomes/hg18/chr*.vld  
+genomes/hg18/chr*.vld
 genomes/hg18/VATG.fa.2bp <- symlink to genomes/spikeins
-genomes/spikein 
+genomes/spikein
 
-runfolder.py can also spit out a simple summary report (-s option) 
-that contains the per lane post filter cluster numbers and the mapped 
+htsw-runfolder can also spit out a simple summary report (-s option)
+that contains the per lane post filter cluster numbers and the mapped
 read counts. (The report isn't currently very pretty)
+
+In addition if you provide a --site name it will also archive the raw
+reads.
 """
 from glob import glob
 import logging
@@ -36,6 +40,91 @@ import sys
 from htsworkflow.pipelines import runfolder
 from htsworkflow.pipelines.runfolder import ElementTree
 
+
+def main(cmdlist=None):
+    parser = make_parser()
+    opts, args = parser.parse_args(cmdlist)
+
+    logging.basicConfig()
+    if opts.verbose:
+        root_log = logging.getLogger()
+        root_log.setLevel(logging.INFO)
+
+    logging.info('Starting htsworkflow illumina runfolder processing tool.')
+    runs = []
+    runs.extend(load_run_xml_file(parser, args, opts))
+    runs.extend(load_specific_runfolder_analysis(parser, args, opts))
+    runs.extend(load_runfolders(parser, args, opts))
+
+    if len(runs) == 0:
+        parser.error("Please specify some run folders to process")
+
+    command_run = False
+    if opts.summary:
+        print runfolder.summary_report(runs)
+        command_run = True
+    if opts.archive:
+        runfolder.extract_run_parameters(runs)
+        command_run = True
+    if opts.extract_results:
+        command_run = True
+        extract_results(parser, args, opts, runs)
+    if opts.clean:
+        runfolder.clean_runs(runs, opts.dry_run)
+        command_run = True
+
+    if command_run == False:
+        parser.perror("No commands provided")
+
+    return 0
+
+
+def load_run_xml_file(parser, args, opts):
+    runs = []
+    if opts.run_xml:
+        # handle ~ shortcut
+        opt.run_xml = os.path.expanduser(opt.run_xml)
+        tree = ElementTree.parse(opt.run_xml).getroot()
+        runs.append(runfolder.PipelineRun(xml=tree))
+    return runs
+
+
+def load_specific_runfolder_analysis(parser, args, opts):
+    # look for manually specified run
+    runs = []
+    if opts.use_run is not None:
+        specific_run = runfolder.get_specific_run(opts.use_run)
+        if specific_run is not None:
+            runs.append(specific_run)
+        else:
+            logging.warn("Couldn't find a run in %s" % (opts.use_run,))
+    return runs
+
+
+def load_runfolders(parser, args, opts):
+    if opts.flowcell_id is not None:
+        if len(args) != 1:
+            parser.error(
+                'Can only force flowcell ID when operating on one run')
+    # scan runfolders for runs
+    runs = []
+    for run_pattern in args:
+        # expand args on our own if needed
+        for run_dir in glob(run_pattern):
+            runs.extend(runfolder.get_runs(run_dir, opts.flowcell_id))
+    return runs
+
+
+def extract_results(parser, args, opts, runs):
+    if opts.dry_run:
+        parser.error("Dry-run is not supported for extract-results")
+    runfolder.extract_results(runs,
+                              opts.output_dir,
+                              opts.site,
+                              opts.max_jobs,
+                              opts.raw_format)
+
+
 def make_parser():
     usage = 'usage: %prog [options] runfolder_root_dir'
     parser = optparse.OptionParser(usage)
@@ -55,15 +144,20 @@ def make_parser():
                         default=False,
                         help='generate run configuration archive')
     commands.add_option('--extract-results', action='store_true',
-           default=False,
-           help='create run-xml summary, compress the eland result files, build srf files and '
-                'copy all that and the Summary.htm file into an archival directory.')
+                        default=False,
+                        help='create run-xml summary, compress the eland '\
+                        'result files, build srf files and copy all that '\
+                        'and the Summary.htm file into an archival '\
+                        'directory.')
     commands.add_option('-c', '--clean', action='store_true', default=False,
-                        help='Clean runfolder, preparing it for long-term storage')
+                        help='Clean runfolder, preparing it for '\
+                             'long-term storage')
     parser.add_option_group(commands)
 
+    parser.add_option('-f', '--flowcell-id', default=None,
+                      help='force a particular flowcell id')
     parser.add_option('-j', '--max-jobs', default=1,
-                      help='sepcify the maximum number of processes to run '
+                      help='specify the maximum number of processes to run '
                            '(used in extract-results)')
     parser.add_option('-o', '--output-dir', default=None,
            help="specify the default output directory for extract results")
@@ -71,80 +165,20 @@ def make_parser():
            default=None,
            help='specify a run_<FlowCell>.xml file for summary reports')
     parser.add_option('--site', default=None,
-                      help='create srf files tagged with the provided site name')
+                      help='create srf files tagged with the provided '\
+                      'site name')
+    parser.add_option('--raw-format', dest="raw_format", default='qseq',
+                      choices=['qseq', 'srf'],
+                      help='Specify which type of raw format to use. '
+                           'Currently supported options: qseq, srf')
     parser.add_option('-u', '--use-run', dest='use_run', default=None,
                       help='Specify which run to use instead of autoscanning '
                            'the runfolder. You do this by providing the final '
                            ' GERALD directory, and it assumes the parent '
                            'directories are the bustard and image processing '
                            'directories.')
-    parser.add_option('--raw-format', dest="raw_format", default='qseq',
-                      choices=['qseq', 'srf'],
-                      help='Specify which type of raw format to use. '
-                           'Currently supported options: qseq, srf')
 
     return parser
 
-def main(cmdlist=None):
-    parser = make_parser()
-    opt, args = parser.parse_args(cmdlist)
-
-    logging.basicConfig()
-    if opt.verbose:
-        root_log = logging.getLogger()
-        root_log.setLevel(logging.INFO)
-
-    logging.info('Starting htsworkflow illumina runfolder processing tool.')
-    runs = []
-    if opt.run_xml:
-        # handle ~ shortcut
-        opt.run_xml = os.path.expanduser(opt.run_xml)
-        tree = ElementTree.parse(opt.run_xml).getroot()
-        runs.append(runfolder.PipelineRun(xml=tree))
-
-    # look for manually specified run
-    if opt.use_run is not None:
-        specific_run = runfolder.get_specific_run(opt.use_run)
-        if specific_run is not None:
-            runs.append(specific_run)
-        else:
-            logging.warn("Couldn't find a run in %s" % (opt.use_run,))
-
-    # scan runfolders for runs
-    for run_pattern in args:
-        # expand args on our own if needed
-        for run_dir in glob(run_pattern):
-            runs.extend(runfolder.get_runs(run_dir))
-
-    if len(runs) > 0:
-        command_run = False
-        if opt.summary:
-            print runfolder.summary_report(runs)
-            command_run = True
-        if opt.archive:
-            runfolder.extract_run_parameters(runs)
-            command_run = True
-        if opt.extract_results:
-            if opt.dry_run:
-                parser.error("Dry-run is not supported for extract-results")
-            runfolder.extract_results(runs,
-                                      opt.output_dir,
-                                      opt.site,
-                                      opt.max_jobs,
-                                      opt.raw_format)
-            command_run = True
-        if opt.clean:
-            runfolder.clean_runs(runs, opt.dry_run)
-            command_run = True
-
-        if command_run == False:
-            print "You need to specify a command." + os.linesep
-            parser.print_help()
-    else:
-        print "You need to specify some run folders to process..." + os.linesep
-        parser.print_help()
-
-    return 0
-
 if __name__ == "__main__":
-  sys.exit(main(sys.argv[1:]))
+    sys.exit(main(sys.argv[1:]))