import tarfile
import time
-try:
- from xml.etree import ElementTree
-except ImportError, e:
- from elementtree import ElementTree
+import lxml.etree as ElementTree
LOGGER = logging.getLogger(__name__)
PIPELINE_RUN = 'PipelineRun'
FLOWCELL_ID = 'FlowcellID'
- def __init__(self, pathname=None, xml=None):
+ def __init__(self, pathname=None, flowcell_id=None, xml=None):
if pathname is not None:
self.pathname = os.path.normpath(pathname)
else:
self.pathname = None
self._name = None
- self._flowcell_id = None
+ self._flowcell_id = flowcell_id
+ self.datadir = None
self.image_analysis = None
self.bustard = None
self.gerald = None
def _get_flowcell_id(self):
# extract flowcell ID
if self._flowcell_id is None:
- config_dir = os.path.join(self.pathname, 'Config')
- flowcell_id_path = os.path.join(config_dir, 'FlowcellId.xml')
- if os.path.exists(flowcell_id_path):
- flowcell_id_tree = ElementTree.parse(flowcell_id_path)
- self._flowcell_id = flowcell_id_tree.findtext('Text')
- else:
- path_fields = self.pathname.split('_')
- if len(path_fields) > 0:
- # guessing last element of filename
- flowcell_id = path_fields[-1]
- else:
- flowcell_id = 'unknown'
+ self._flowcell_id = self._get_flowcell_id_from_runinfo()
+ if self._flowcell_id is None:
+ self._flowcell_id = self._get_flowcell_id_from_flowcellid()
+ if self._flowcell_id is None:
+ self._flowcell_id = self._get_flowcell_id_from_path()
+ if self._flowcell_id is None:
+ self._flowcell_id = 'unknown'
+
+ LOGGER.warning(
+ "Flowcell id was not found, guessing %s" % (
+ self._flowcell_id))
- LOGGER.warning(
- "Flowcell id was not found, guessing %s" % (
- flowcell_id))
- self._flowcell_id = flowcell_id
return self._flowcell_id
flowcell_id = property(_get_flowcell_id)
+ def _get_flowcell_id_from_flowcellid(self):
+ """Extract flowcell id from a Config/FlowcellId.xml file
+ """
+ config_dir = os.path.join(self.pathname, 'Config')
+ flowcell_id_path = os.path.join(config_dir, 'FlowcellId.xml')
+ if os.path.exists(flowcell_id_path):
+ flowcell_id_tree = ElementTree.parse(flowcell_id_path)
+ return flowcell_id_tree.findtext('Text')
+
+ def _get_flowcell_id_from_runinfo(self):
+ """Read RunInfo file for flowcell id
+ """
+ runinfo = os.path.join(self.pathname, 'RunInfo.xml')
+ if os.path.exists(runinfo):
+ tree = ElementTree.parse(runinfo)
+ root = tree.getroot()
+ fc_nodes = root.xpath('/RunInfo/Run/Flowcell')
+ if len(fc_nodes) == 1:
+ return fc_nodes[0].text
+
+
+ def _get_flowcell_id_from_path(self):
+ """Guess a flowcell name from the path
+ """
+ path_fields = self.pathname.split('_')
+ if len(path_fields) > 0:
+ # guessing last element of filename
+ return path_fields[-1]
+
def _get_runfolder_name(self):
- if self.gerald is None:
- return None
- else:
+ if self.gerald:
return self.gerald.runfolder_name
+ elif hasattr(self.image_analysis, 'runfolder_name'):
+ return self.image_analysis.runfolder_name
+ else:
+ return None
runfolder_name = property(_get_runfolder_name)
def get_elements(self):
self.bustard = bustard.Bustard(xml=element)
elif tag == gerald.Gerald.GERALD.lower():
self.gerald = gerald.Gerald(xml=element)
+ elif tag == gerald.CASAVA.GERALD.lower():
+ self.gerald = gerald.CASAVA(xml=element)
else:
LOGGER.warn('PipelineRun unrecognized tag %s' % (tag,))
run = PipelineRun(xml=tree)
return run
-def get_runs(runfolder):
+def get_runs(runfolder, flowcell_id=None):
"""
Search through a run folder for all the various sub component runs
and then return a PipelineRun for each different combination.
from htsworkflow.pipelines import bustard
from htsworkflow.pipelines import gerald
- def scan_post_image_analysis(runs, runfolder, image_analysis, pathname):
+ def scan_post_image_analysis(runs, runfolder, datadir, image_analysis,
+ pathname):
+ added = build_aligned_runs(image_analysis, runs, datadir, runfolder)
+ # If we're a multiplexed run, don't look for older run type.
+ if added > 0:
+ return
+
LOGGER.info("Looking for bustard directories in %s" % (pathname,))
bustard_dirs = glob(os.path.join(pathname, "Bustard*"))
# RTA BaseCalls looks enough like Bustard.
for bustard_pathname in bustard_dirs:
LOGGER.info("Found bustard directory %s" % (bustard_pathname,))
b = bustard.bustard(bustard_pathname)
- gerald_glob = os.path.join(bustard_pathname, 'GERALD*')
- LOGGER.info("Looking for gerald directories in %s" % (pathname,))
- for gerald_pathname in glob(gerald_glob):
- LOGGER.info("Found gerald directory %s" % (gerald_pathname,))
- try:
- g = gerald.gerald(gerald_pathname)
- p = PipelineRun(runfolder)
- p.image_analysis = image_analysis
- p.bustard = b
- p.gerald = g
- runs.append(p)
- except IOError, e:
- LOGGER.error("Ignoring " + str(e))
-
+ build_gerald_runs(runs, b, image_analysis, bustard_pathname, datadir, pathname, runfolder)
+
+
+ def build_gerald_runs(runs, b, image_analysis, bustard_pathname, datadir, pathname, runfolder):
+ start = len(runs)
+ gerald_glob = os.path.join(bustard_pathname, 'GERALD*')
+ LOGGER.info("Looking for gerald directories in %s" % (pathname,))
+ for gerald_pathname in glob(gerald_glob):
+ LOGGER.info("Found gerald directory %s" % (gerald_pathname,))
+ try:
+ g = gerald.gerald(gerald_pathname)
+ p = PipelineRun(runfolder, flowcell_id)
+ p.datadir = datadir
+ p.image_analysis = image_analysis
+ p.bustard = b
+ p.gerald = g
+ runs.append(p)
+ except IOError, e:
+ LOGGER.error("Ignoring " + str(e))
+ return len(runs) - start
+
+
+ def build_aligned_runs(image_analysis, runs, datadir, runfolder):
+ start = len(runs)
+ aligned_glob = os.path.join(runfolder, 'Aligned*')
+ for aligned in glob(aligned_glob):
+ LOGGER.info("Found aligned directory %s" % (aligned,))
+ try:
+ g = gerald.gerald(aligned)
+ p = PipelineRun(runfolder, flowcell_id)
+ bustard_pathname = os.path.join(runfolder, g.runfolder_name)
+
+ p.datadir = datadir
+ p.image_analysis = image_analysis
+ p.bustard = bustard.bustard(bustard_pathname)
+ p.gerald = g
+ runs.append(p)
+ except IOError, e:
+ LOGGER.error("Ignoring " + str(e))
+ return len(runs) - start
datadir = os.path.join(runfolder, 'Data')
LOGGER.info('Searching for runs in ' + datadir)
)
else:
scan_post_image_analysis(
- runs, runfolder, image_analysis, firecrest_pathname
+ runs, runfolder, datadir, image_analysis, firecrest_pathname
)
# scan for IPAR directories
ipar_dirs = glob(os.path.join(datadir, "IPAR_*"))
)
else:
scan_post_image_analysis(
- runs, runfolder, image_analysis, ipar_pathname
+ runs, runfolder, datadir, image_analysis, ipar_pathname
)
return runs
genome = 'unknown'
for k, v in mapped_reads.items():
path, k = os.path.split(k)
- if len(path) > 0 and not genome_map.has_key(path):
+ if len(path) > 0 and path not in genome_map:
genome = path
genome_reads += v
else:
def summarize_lane(gerald, lane_id):
report = []
- summary_results = gerald.summary.lane_results
- for end in range(len(summary_results)):
- eland_result = gerald.eland_results.results[end][lane_id]
- report.append("Sample name %s" % (eland_result.sample_name))
- report.append("Lane id %s end %s" % (eland_result.lane_id, end))
- if end < len(summary_results) and summary_results[end].has_key(eland_result.lane_id):
- cluster = summary_results[end][eland_result.lane_id].cluster
- report.append("Clusters %d +/- %d" % (cluster[0], cluster[1]))
- report.append("Total Reads: %d" % (eland_result.reads))
-
- if hasattr(eland_result, 'match_codes'):
- mc = eland_result.match_codes
- nm = mc['NM']
- nm_percent = float(nm) / eland_result.reads * 100
- qc = mc['QC']
- qc_percent = float(qc) / eland_result.reads * 100
-
- report.append("No Match: %d (%2.2g %%)" % (nm, nm_percent))
- report.append("QC Failed: %d (%2.2g %%)" % (qc, qc_percent))
- report.append('Unique (0,1,2 mismatches) %d %d %d' % \
- (mc['U0'], mc['U1'], mc['U2']))
- report.append('Repeat (0,1,2 mismatches) %d %d %d' % \
- (mc['R0'], mc['R1'], mc['R2']))
-
- if hasattr(eland_result, 'genome_map'):
- report.append("Mapped Reads")
- mapped_reads = summarize_mapped_reads(eland_result.genome_map, eland_result.mapped_reads)
- for name, counts in mapped_reads.items():
+ lane_results = gerald.summary.lane_results
+ eland_result = gerald.eland_results[lane_id]
+ report.append("Sample name %s" % (eland_result.sample_name))
+ report.append("Lane id %s end %s" % (lane_id.lane, lane_id.read))
+
+ if lane_id.read < len(lane_results) and \
+ lane_id.lane in lane_results[lane_id.read]:
+ summary_results = lane_results[lane_id.read][lane_id.lane]
+ cluster = summary_results.cluster
+ report.append("Clusters %d +/- %d" % (cluster[0], cluster[1]))
+ report.append("Total Reads: %d" % (eland_result.reads))
+
+ if hasattr(eland_result, 'match_codes'):
+ mc = eland_result.match_codes
+ nm = mc['NM']
+ nm_percent = float(nm) / eland_result.reads * 100
+ qc = mc['QC']
+ qc_percent = float(qc) / eland_result.reads * 100
+
+ report.append("No Match: %d (%2.2g %%)" % (nm, nm_percent))
+ report.append("QC Failed: %d (%2.2g %%)" % (qc, qc_percent))
+ report.append('Unique (0,1,2 mismatches) %d %d %d' % \
+ (mc['U0'], mc['U1'], mc['U2']))
+ report.append('Repeat (0,1,2 mismatches) %d %d %d' % \
+ (mc['R0'], mc['R1'], mc['R2']))
+
+ if hasattr(eland_result, 'genome_map'):
+ report.append("Mapped Reads")
+ mapped_reads = summarize_mapped_reads(eland_result.genome_map,
+ eland_result.mapped_reads)
+ for name, counts in mapped_reads.items():
report.append(" %s: %d" % (name, counts))
- report.append('')
+ report.append('')
return report
def summary_report(runs):
# print a run name?
report.append('Summary for %s' % (run.name,))
# sort the report
- eland_keys = run.gerald.eland_results.results[0].keys()
- eland_keys.sort(alphanum)
-
- for lane_id in eland_keys:
- report.extend(summarize_lane(run.gerald, lane_id))
- report.append('---')
- report.append('')
- return os.linesep.join(report)
+ eland_keys = sorted(run.gerald.eland_results.keys())
+ for lane_id in eland_keys:
+ report.extend(summarize_lane(run.gerald, lane_id))
+ report.append('---')
+ report.append('')
+ return os.linesep.join(report)
def is_compressed(filename):
if os.path.splitext(filename)[1] == ".gz":
os.chdir(cwd)
-def save_summary_file(gerald_object, cycle_dir):
+def save_summary_file(pipeline, cycle_dir):
# Copy Summary.htm
- summary_path = os.path.join(gerald_object.pathname, 'Summary.htm')
- if os.path.exists(summary_path):
- LOGGER.info('Copying %s to %s' % (summary_path, cycle_dir))
- shutil.copy(summary_path, cycle_dir)
+ gerald_object = pipeline.gerald
+ gerald_summary = os.path.join(gerald_object.pathname, 'Summary.htm')
+ status_files_summary = os.path.join(pipeline.datadir, 'Status_Files', 'Summary.htm')
+ if os.path.exists(gerald_summary):
+ LOGGER.info('Copying %s to %s' % (gerald_summary, cycle_dir))
+ shutil.copy(gerald_summary, cycle_dir)
+ elif os.path.exists(status_files_summary):
+ LOGGER.info('Copying %s to %s' % (status_files_summary, cycle_dir))
+ shutil.copy(status_files_summary, cycle_dir)
else:
LOGGER.info('Summary file %s was not found' % (summary_path,))
# copy & bzip eland files
bz_commands = []
- for lanes_dictionary in gerald_object.eland_results.results:
- for eland_lane in lanes_dictionary.values():
- source_name = eland_lane.pathname
+ for key in gerald_object.eland_results:
+ eland_lane = gerald_object.eland_results[key]
+ for source_name in eland_lane.pathnames:
if source_name is None:
LOGGER.info(
"Lane ID %s does not have a filename." % (eland_lane.lane_id,))
q.run()
-def extract_results(runs, output_base_dir=None, site="individual", num_jobs=1, raw_format='qseq'):
+def extract_results(runs, output_base_dir=None, site="individual", num_jobs=1, raw_format=None):
"""
Iterate over runfolders in runs extracting the most useful information.
* run parameters (in run-*.xml)
output_base_dir = os.getcwd()
for r in runs:
- result_dir = os.path.join(output_base_dir, r.flowcell_id)
- LOGGER.info("Using %s as result directory" % (result_dir,))
- if not os.path.exists(result_dir):
- os.mkdir(result_dir)
-
- # create cycle_dir
- cycle = "C%d-%d" % (r.image_analysis.start, r.image_analysis.stop)
- LOGGER.info("Filling in %s" % (cycle,))
- cycle_dir = os.path.join(result_dir, cycle)
- cycle_dir = os.path.abspath(cycle_dir)
- if os.path.exists(cycle_dir):
- LOGGER.error("%s already exists, not overwriting" % (cycle_dir,))
- continue
- else:
- os.mkdir(cycle_dir)
-
- # save run file
- r.save(cycle_dir)
-
- # save illumina flowcell status report
- save_flowcell_reports(os.path.join(r.image_analysis.pathname, '..'), cycle_dir)
-
- # save stuff from bustard
- # grab IVC plot
- save_ivc_plot(r.bustard, cycle_dir)
-
- # build base call saving commands
- if site is not None:
- lanes = []
- for lane in range(1, 9):
- if r.gerald.lanes[lane].analysis != 'none':
- lanes.append(lane)
-
- run_name = srf.pathname_to_run_name(r.pathname)
- if raw_format == 'qseq':
- seq_cmds = srf.make_qseq_commands(run_name, r.bustard.pathname, lanes, site, cycle_dir)
- elif raw_format == 'srf':
- seq_cmds = srf.make_srf_commands(run_name, r.bustard.pathname, lanes, site, cycle_dir, 0)
+ result_dir = os.path.join(output_base_dir, r.flowcell_id)
+ LOGGER.info("Using %s as result directory" % (result_dir,))
+ if not os.path.exists(result_dir):
+ os.mkdir(result_dir)
+
+ # create cycle_dir
+ cycle = "C%d-%d" % (r.image_analysis.start, r.image_analysis.stop)
+ LOGGER.info("Filling in %s" % (cycle,))
+ cycle_dir = os.path.join(result_dir, cycle)
+ cycle_dir = os.path.abspath(cycle_dir)
+ if os.path.exists(cycle_dir):
+ LOGGER.error("%s already exists, not overwriting" % (cycle_dir,))
+ continue
else:
- raise ValueError('Unknown --raw-format=%s' % (raw_format))
- srf.run_commands(r.bustard.pathname, seq_cmds, num_jobs)
+ os.mkdir(cycle_dir)
+
+ # save run file
+ r.save(cycle_dir)
- # save stuff from GERALD
- # copy stuff out of the main run
- g = r.gerald
+ # save illumina flowcell status report
+ save_flowcell_reports(os.path.join(r.image_analysis.pathname, '..'),
+ cycle_dir)
- # save summary file
- save_summary_file(g, cycle_dir)
+ # save stuff from bustard
+ # grab IVC plot
+ save_ivc_plot(r.bustard, cycle_dir)
- # compress eland result files
- compress_eland_results(g, cycle_dir, num_jobs)
+ # build base call saving commands
+ if site is not None:
+ save_raw_data(num_jobs, r, site, raw_format, cycle_dir)
- # md5 all the compressed files once we're done
- md5_commands = srf.make_md5_commands(cycle_dir)
- srf.run_commands(cycle_dir, md5_commands, num_jobs)
+ # save stuff from GERALD
+ # copy stuff out of the main run
+ g = r.gerald
+
+ # save summary file
+ save_summary_file(r, cycle_dir)
+
+ # compress eland result files
+ compress_eland_results(g, cycle_dir, num_jobs)
+
+ # md5 all the compressed files once we're done
+ md5_commands = srf.make_md5_commands(cycle_dir)
+ srf.run_commands(cycle_dir, md5_commands, num_jobs)
+
+def save_raw_data(num_jobs, r, site, raw_format, cycle_dir):
+ lanes = []
+ for lane in r.gerald.lanes:
+ lane_parameters = r.gerald.lanes.get(lane, None)
+ if lane_parameters is not None:
+ lanes.append(lane)
+
+ run_name = srf.pathname_to_run_name(r.pathname)
+ seq_cmds = []
+ if raw_format is None:
+ raw_format = r.bustard.sequence_format
+
+ LOGGER.info("Raw Format is: %s" % (raw_format, ))
+ if raw_format == 'fastq':
+ rawpath = os.path.join(r.pathname, r.gerald.runfolder_name)
+ LOGGER.info("raw data = %s" % (rawpath,))
+ srf.copy_hiseq_project_fastqs(run_name, rawpath, site, cycle_dir)
+ elif raw_format == 'qseq':
+ seq_cmds = srf.make_qseq_commands(run_name, r.bustard.pathname, lanes, site, cycle_dir)
+ elif raw_format == 'srf':
+ seq_cmds = srf.make_srf_commands(run_name, r.bustard.pathname, lanes, site, cycle_dir, 0)
+ else:
+ raise ValueError('Unknown --raw-format=%s' % (raw_format))
+ srf.run_commands(r.bustard.pathname, seq_cmds, num_jobs)
def rm_list(files, dry_run=True):
for f in files:
LOGGER.info("Cleaning images")
image_dirs = glob(os.path.join(run.pathname, 'Images', 'L*'))
rm_list(image_dirs, dry_run)
- # cd Data/C1-*_Firecrest*
- LOGGER.info("Cleaning intermediate files")
+ # rm ReadPrep
+ LOGGER.info("Cleaning ReadPrep*")
+ read_prep_dirs = glob(os.path.join(run.pathname, 'ReadPrep*'))
+ rm_list(read_prep_dirs, dry_run)
+ # rm ReadPrep
+ LOGGER.info("Cleaning Thubmnail_images")
+ thumbnail_dirs = glob(os.path.join(run.pathname, 'Thumbnail_Images'))
+ rm_list(thumbnail_dirs, dry_run)
+
# make clean_intermediate
+ logging.info("Cleaning intermediate files")
if os.path.exists(os.path.join(run.image_analysis.pathname, 'Makefile')):
clean_process = subprocess.Popen(['make', 'clean_intermediate'],
cwd=run.image_analysis.pathname,)