Add building srf files to runfolder as part of --extract-results
[htsworkflow.git] / scripts / runfolder
index af89015a86e4854d2ab89c982cc03b3fbb0cba9f..2be3ab764589c0d9834dc39c450897469d92c283 100644 (file)
@@ -27,28 +27,57 @@ runfolder.py can also spit out a simple summary report (-s option)
 that contains the per lane post filter cluster numbers and the mapped 
 read counts. (The report isn't currently very pretty)
 """
+from glob import glob
 import logging
 import optparse
+import os
 import sys
 
-from gaworkflow.pipeline import runfolder
-from gaworkflow.pipeline.runfolder import ElementTree
+from htsworkflow.pipelines import runfolder
+from htsworkflow.pipelines.runfolder import ElementTree
         
 def make_parser():
     usage = 'usage: %prog [options] runfolder_root_dir'
     parser = optparse.OptionParser(usage)
+
     parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
                       default=False,
                       help='turn on verbose mode')
-    parser.add_option('-s', '--summary', dest='summary', action='store_true',
-                      default=False,
-                      help='produce summary report')
-    parser.add_option('-a', '--archive', dest='archive', action='store_true',
-                      default=False,
-                      help='generate run configuration archive')
+    parser.add_option('--dry-run', action='store_true', default=False,
+                      help="Don't delete anything (in clean mode)")
+
+    commands = optparse.OptionGroup(parser, 'Commands')
+
+    commands.add_option('-s', '--summary', dest='summary', action='store_true',
+                        default=False,
+                        help='produce summary report')
+    commands.add_option('-a', '--archive', dest='archive', action='store_true',
+                        default=False,
+                        help='generate run configuration archive')
+    commands.add_option('--extract-results', action='store_true',
+           default=False,
+           help='create run-xml summary, compress the eland result files, build srf files and '
+                'copy all that and the Summary.htm file into an archival directory.')
+    commands.add_option('-c', '--clean', action='store_true', default=False,
+                        help='Clean runfolder, preparing it for long-term storage')
+    parser.add_option_group(commands)
+
+    parser.add_option('-j', '--max-jobs', default=1,
+                      help='sepcify the maximum number of processes to run '
+                           '(used in extract-results)')
+    parser.add_option('-o', '--output-dir', default=None,
+           help="specify the default output directory for extract results")
     parser.add_option('--run-xml', dest='run_xml',
            default=None,
            help='specify a run_<FlowCell>.xml file for summary reports')
+    parser.add_option('--site', default='individual',
+                      help='specify the site name for srf files')
+    parser.add_option('-u', '--use-run', dest='use_run', default=None,
+                      help='Specify which run to use instead of autoscanning '
+                           'the runfolder. You do this by providing the final '
+                           ' GERALD directory, and it assumes the parent '
+                           'directories are the bustard and image processing '
+                           'directories.')
 
     return parser
 
@@ -61,18 +90,49 @@ def main(cmdlist=None):
         root_log = logging.getLogger()
         root_log.setLevel(logging.INFO)
 
+    logging.info('Starting htsworkflow illumina runfolder processing tool.')
     runs = []
     if opt.run_xml:
+        # handle ~ shortcut
+        opt.run_xml = os.path.expanduser(opt.run_xml)
         tree = ElementTree.parse(opt.run_xml).getroot()
         runs.append(runfolder.PipelineRun(xml=tree))
-    for run_dir in args:
-        runs.extend(runfolder.get_runs(run_dir))
+
+    # look for manually specified run
+    if opt.use_run is not None:
+        specific_run = runfolder.get_specific_run(opt.use_run)
+        if specific_run is not None:
+            runs.append(specific_run)
+        else:
+            logging.warn("Couldn't find a run in %s" % (opt.use_run,))
+
+    # scan runfolders for runs
+    for run_pattern in args:
+        # expand args on our own if needed
+        for run_dir in glob(run_pattern):
+            runs.extend(runfolder.get_runs(run_dir))
 
     if len(runs) > 0:
+        command_run = False
         if opt.summary:
             print runfolder.summary_report(runs)
+            command_run = True
         if opt.archive:
             runfolder.extract_run_parameters(runs)
+            command_run = True
+        if opt.extract_results:
+            runfolder.extract_results(runs, opt.output_dir, opt.site, opt.max_jobs)
+            command_run = True
+        if opt.clean:
+            runfolder.clean_runs(runs, opt.dry_run)
+            command_run = True
+
+        if command_run == False:
+            print "You need to specify a command."+os.linesep
+            parser.print_help()
+    else:
+        print "You need to specify some run folders to process..."+os.linesep
+        parser.print_help()
 
     return 0