Merge branch 'django1.4'
authorDiane Trout <diane@ghic.org>
Tue, 17 Dec 2013 19:20:03 +0000 (11:20 -0800)
committerDiane Trout <diane@ghic.org>
Tue, 17 Dec 2013 19:20:03 +0000 (11:20 -0800)
there was a conflict with my qualifying the load_pipeline_run_xml function call

Conflicts:
htsworkflow/frontend/samples/views.py

1  2 
htsworkflow/pipelines/retrieve_config.py
htsworkflow/pipelines/srf.py
htsworkflow/pipelines/test/test_runfolder_rta180.py

index 29f49bb32f8c3914976326d3b3b329db0021357e,a189f097e3017b8c8aac8b26b3d96ccbb0ec4460..fe2b9428cd01c38775c6a743dc2588b140425833
@@@ -22,7 -22,7 +22,7 @@@ from htsworkflow.util.url import normal
  from htsworkflow.pipelines.genome_mapper import \
       getAvailableGenomes, \
       constructMapperDict
- from htsworkflow.pipelines.runfolder import LANE_LIST
+ from htsworkflow.pipelines import LANE_LIST
  # JSON dictionaries use strings
  LANE_LIST_JSON = [ str(l) for l in LANE_LIST ]
  
@@@ -111,7 -111,7 +111,7 @@@ def format_gerald_header(flowcell_info)
      config += ['Flowcell Notes:']
      config.extend(flowcell_info['notes'].split('\r\n'))
      config += ['']
 -    for lane_number in LANE_LIST_JSON:
 +    for lane_number in sorted(flowcell_info['lane_set']):
          lane_contents = flowcell_info['lane_set'][lane_number]
          for lane_info in lane_contents:
              config += ['Lane%s: %s | %s' % (lane_number,
@@@ -356,8 -356,8 +356,8 @@@ def save_sample_sheet(outstream, option
                              'Operator': format_operator_name}
      out = csv.DictWriter(outstream, sample_sheet_fields)
      out.writerow(dict(((x,x) for x in sample_sheet_fields)))
 -    for lane_number in LANE_LIST:
 -        lane_contents = flowcell_info['lane_set'][str(lane_number)]
 +    for lane_number in sorted(flowcell_info['lane_set']):
 +        lane_contents = flowcell_info['lane_set'][lane_number]
  
          pooled_lane_contents = []
          for library in lane_contents:
index 31aa6d6640a7f7e12dab7d7ae899776820f3a656,5a6c969bd768aa09fab68e005f9ef5caae7ffcea..03b96b8f35b481fddfb7f41100ed0366c2ef473f
@@@ -1,4 -1,3 +1,4 @@@
 +import optparse
  from glob import glob
  import logging
  import os
@@@ -43,7 -42,7 +43,7 @@@ def make_srf_commands(run_name, bustard
    make a subprocess-friendly list of command line arguments to run solexa2srf
    generates files like:
    woldlab:080514_HWI-EAS229_0029_20768AAXX:8.srf
-    site        run name                    lane
+   site        run name                    lane
  
    run_name - most of the file name (run folder name is a good choice)
    lanes - list of integers corresponding to which lanes to process
@@@ -212,47 -211,3 +212,47 @@@ def make_md5_commands(destdir)
  
    return cmd_list
  
 +def main(cmdline=None):
 +    parser = make_parser()
 +    opts, args = parser.parse_args(cmdline)
 +
 +    logging.basicConfig(level = logging.DEBUG)
 +    if not opts.name:
 +        parser.error("Specify run name. Usually runfolder name")
 +    if not opts.destination:
 +        parser.error("Specify where to write sequence files")
 +    if not opts.site_name:
 +        parser.error("Specify site name")
 +    if len(args) != 1:
 +        parser.error("Can only process one directory")
 +
 +    source = args[0]
 +    LOGGER.info("Raw Format is: %s" % (opts.format, ))
 +    seq_cmds = []
 +    if opts.format == 'fastq':
 +        LOGGER.info("raw data = %s" % (source,))
 +        copy_hiseq_project_fastqs(opts.name, source, opts.site_name, opts.destination)
 +    elif opts.format == 'qseq':
 +        seq_cmds = make_qseq_commands(opts.name, source, opts.lanes, opts.site_name, opts.destination)
 +    elif opts.format == 'srf':
 +        seq_cmds = make_srf_commands(opts.name, source, opts.lanes, opts.site_name, opts.destination, 0)
 +    else:
 +        raise ValueError('Unknown --format=%s' % (opts.format))
 +    print seq_cmds
 +    srf.run_commands(args.source, seq_cmds, num_jobs)
 +
 +def make_parser():
 +    parser = optparse.OptionParser()
 +    parser.add_option('-f', '--format', default='fastq',
 +                        help="Format raw data is in")
 +    parser.add_option('-n', '--name', default=None,
 +                        help="Specify run name")
 +    parser.add_option('-d', '--destination', default=None,
 +                        help='specify where to write files  (cycle dir)')
 +    parser.add_option('-s', '--site-name', default=None,
 +                        help="specify site name")
 +    parser.add_option('-l', '--lanes', default="1,2,3,4,5,6,7,8",
 +                        help="what lanes to process, defaults to all")
 +    return parser
 +if __name__ == "__main__":
 +    main()
index eacae8c7f0e9db4aca1ee2573c391aba5a501f3e,0db7857399cd8f9c391a8b8c58fb0b5023fef0f5..2817328dc3a0fdcc415cc17e0c1adda725bebc37
@@@ -12,7 -12,7 +12,7 @@@ from htsworkflow.pipelines import busta
  from htsworkflow.pipelines import gerald
  from htsworkflow.pipelines import runfolder
  from htsworkflow.pipelines.samplekey import SampleKey
- from htsworkflow.pipelines.runfolder import ElementTree
+ from htsworkflow.pipelines import ElementTree
  
  from htsworkflow.pipelines.test.simulate_runfolder import *
  
@@@ -271,15 -271,15 +271,16 @@@ class RunfolderTests(TestCase)
          runs = runfolder.get_runs(self.runfolder_dir)
          self.failUnlessEqual(len(runs), 1)
          name = 'run_207BTAAXY_%s.xml' % ( date.today().strftime('%Y-%m-%d'),)
-         self.failUnlessEqual(runs[0].name, name)
+         self.failUnlessEqual(runs[0].serialization_filename, name)
  
 +
          r1 = runs[0]
          xml = r1.get_elements()
          xml_str = ElementTree.tostring(xml)
  
          r2 = runfolder.PipelineRun(xml=xml)
-         self.failUnlessEqual(r1.name, r2.name)
+         self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
          self.failIfEqual(r2.image_analysis, None)
          self.failIfEqual(r2.bustard, None)
          self.failIfEqual(r2.gerald, None)