VERSION_RE = "([0-9\.]+)"
USER_RE = "([a-zA-Z0-9]+)"
LANES_PER_FLOWCELL = 8
+LANE_LIST = range(1, LANES_PER_FLOWCELL+1)
from htsworkflow.util.alphanum import alphanum
from htsworkflow.util.ethelp import indent, flatten
def scan_post_image_analysis(runs, runfolder, image_analysis, pathname):
logging.info("Looking for bustard directories in %s" % (pathname,))
- bustard_glob = os.path.join(pathname, "Bustard*")
- for bustard_pathname in glob(bustard_glob):
+ bustard_dirs = glob(os.path.join(pathname, "Bustard*"))
+ # RTA BaseCalls looks enough like Bustard.
+ bustard_dirs.extend(glob(os.path.join(pathname, "BaseCalls")))
+ for bustard_pathname in bustard_dirs:
logging.info("Found bustard directory %s" % (bustard_pathname,))
b = bustard.bustard(bustard_pathname)
gerald_glob = os.path.join(bustard_pathname, 'GERALD*')
logging.info('Found firecrest in ' + datadir)
image_analysis = firecrest.firecrest(firecrest_pathname)
if image_analysis is None:
- logging.warn(
+ logging.warn(
"%s is an empty or invalid firecrest directory" % (firecrest_pathname,)
)
- else:
+ else:
scan_post_image_analysis(
runs, runfolder, image_analysis, firecrest_pathname
)
# scan for IPAR directories
- for ipar_pathname in glob(os.path.join(datadir,"IPAR_*")):
+ ipar_dirs = glob(os.path.join(datadir, "IPAR_*"))
+ # The Intensities directory from the RTA software looks a lot like IPAR
+ ipar_dirs.extend(glob(os.path.join(datadir, 'Intensities')))
+ for ipar_pathname in ipar_dirs:
logging.info('Found ipar directories in ' + datadir)
image_analysis = ipar.ipar(ipar_pathname)
if image_analysis is None:
- logging.warn(
+ logging.warn(
"%s is an empty or invalid IPAR directory" %(ipar_pathname,)
)
- else:
+ else:
scan_post_image_analysis(
runs, runfolder, image_analysis, ipar_pathname
)
from htsworkflow.pipelines import bustard
from htsworkflow.pipelines import gerald
+ gerald_dir = os.path.expanduser(gerald_dir)
bustard_dir = os.path.abspath(os.path.join(gerald_dir, '..'))
image_dir = os.path.abspath(os.path.join(gerald_dir, '..', '..'))
image_run = firecrest.firecrest(image_dir)
elif re.search('IPAR', short_image_dir, re.IGNORECASE) is not None:
image_run = ipar.ipar(image_dir)
+ elif re.search('Intensities', short_image_dir, re.IGNORECASE) is not None:
+ image_run = ipar.ipar(image_dir)
+
# if we din't find a run, report the error and return
if image_run is None:
msg = '%s does not contain an image processing step' % (image_dir,)
cluster = summary_results[end][eland_result.lane_id].cluster
report.append("Clusters %d +/- %d" % (cluster[0], cluster[1]))
report.append("Total Reads: %d" % (eland_result.reads))
- mc = eland_result._match_codes
- nm = mc['NM']
- nm_percent = float(nm)/eland_result.reads * 100
- qc = mc['QC']
- qc_percent = float(qc)/eland_result.reads * 100
-
- report.append("No Match: %d (%2.2g %%)" % (nm, nm_percent))
- report.append("QC Failed: %d (%2.2g %%)" % (qc, qc_percent))
- report.append('Unique (0,1,2 mismatches) %d %d %d' % \
- (mc['U0'], mc['U1'], mc['U2']))
- report.append('Repeat (0,1,2 mismatches) %d %d %d' % \
- (mc['R0'], mc['R1'], mc['R2']))
- report.append("Mapped Reads")
- mapped_reads = summarize_mapped_reads(eland_result.genome_map, eland_result.mapped_reads)
- for name, counts in mapped_reads.items():
- report.append(" %s: %d" % (name, counts))
+
+ if hasattr(eland_result, 'match_codes'):
+ mc = eland_result.match_codes
+ nm = mc['NM']
+ nm_percent = float(nm)/eland_result.reads * 100
+ qc = mc['QC']
+ qc_percent = float(qc)/eland_result.reads * 100
+
+ report.append("No Match: %d (%2.2g %%)" % (nm, nm_percent))
+ report.append("QC Failed: %d (%2.2g %%)" % (qc, qc_percent))
+ report.append('Unique (0,1,2 mismatches) %d %d %d' % \
+ (mc['U0'], mc['U1'], mc['U2']))
+ report.append('Repeat (0,1,2 mismatches) %d %d %d' % \
+ (mc['R0'], mc['R1'], mc['R2']))
+
+ if hasattr(eland_result, 'genome_map'):
+ report.append("Mapped Reads")
+ mapped_reads = summarize_mapped_reads(eland_result.genome_map, eland_result.mapped_reads)
+ for name, counts in mapped_reads.items():
+ report.append(" %s: %d" % (name, counts))
+
report.append('')
return report
logging.info("Running bzip2: " + " ".join(bzip_cmd))
logging.info("Writing to %s" %(tar_dest_name))
- tar = subprocess.Popen(tar_cmd, stdout=subprocess.PIPE, shell=False,
+ env = {'BZIP': '-9'}
+ tar = subprocess.Popen(tar_cmd, stdout=subprocess.PIPE, shell=False, env=env,
cwd=scores_path)
bzip = subprocess.Popen(bzip_cmd, stdin=tar.stdout, stdout=tar_dest)
tar.wait()
logging.info('Saving to %s' % (dest_name, ))
bzip.wait()
-def clean_runs(runs):
+def rm_list(files, dry_run=True):
+ for f in files:
+ if os.path.exists(f):
+ logging.info('deleting %s' % (f,))
+ if not dry_run:
+ if os.path.isdir(f):
+ shutil.rmtree(f)
+ else:
+ os.unlink(f)
+ else:
+ logging.warn("%s doesn't exist."% (f,))
+
+def clean_runs(runs, dry_run=True):
"""
Clean up run folders to optimize for compression.
"""
- # TODO: implement this.
- # rm RunLog*.xml
- # rm pipeline_*.txt
- # rm gclog.txt
- # rm NetCopy.log
- # rm nfn.log
- # rm Images/L*
- # cd Data/C1-*_Firecrest*
- # make clean_intermediate
-
- pass
+ if dry_run:
+ logging.info('In dry-run mode')
+
+ for run in runs:
+ logging.info('Cleaninging %s' % (run.pathname,))
+ # rm RunLog*.xml
+ runlogs = glob(os.path.join(run.pathname, 'RunLog*xml'))
+ rm_list(runlogs, dry_run)
+ # rm pipeline_*.txt
+ pipeline_logs = glob(os.path.join(run.pathname, 'pipeline*.txt'))
+ rm_list(pipeline_logs, dry_run)
+ # rm gclog.txt?
+ # rm NetCopy.log? Isn't this robocopy?
+ logs = glob(os.path.join(run.pathname, '*.log'))
+ rm_list(logs, dry_run)
+ # rm nfn.log?
+ # Calibration
+ calibration_dir = glob(os.path.join(run.pathname, 'Calibration_*'))
+ rm_list(calibration_dir, dry_run)
+ # rm Images/L*
+ logging.info("Cleaning images")
+ image_dirs = glob(os.path.join(run.pathname, 'Images', 'L*'))
+ rm_list(image_dirs, dry_run)
+ # cd Data/C1-*_Firecrest*
+ logging.info("Cleaning intermediate files")
+ # make clean_intermediate
+ if os.path.exists(os.path.join(run.image_analysis.pathname, 'Makefile')):
+ clean_process = subprocess.Popen(['make', 'clean_intermediate'],
+ cwd=run.image_analysis.pathname,)
+ clean_process.wait()
+
+
+