"""
import logging
import os
+from pprint import pformat,pprint
import sys
import types
+from urlparse import urljoin, urlparse
-from htsworkflow.pipelines.sequences import scan_for_sequences
+from htsworkflow.pipelines.sequences import scan_for_sequences, \
+ update_model_sequence_library
+from htsworkflow.pipelines.samplekey import SampleKey
from htsworkflow.pipelines import qseq2fastq
from htsworkflow.pipelines import srf2fastq
-from htsworkflow.util.api import HtswApi
+from htsworkflow.pipelines import desplit_fastq
+from htsworkflow.submission.fastqname import FastqName
+from htsworkflow.util.rdfhelp import get_model, dump_model, load_into_model, \
+ fromTypedNode, \
+ strip_namespace
+from htsworkflow.util.rdfns import *
from htsworkflow.util.conversion import parse_flowcell_id
+from django.conf import settings
+from django.template import Context, loader
+
+import RDF
+
LOGGER = logging.getLogger(__name__)
+
class CondorFastqExtract(object):
- def __init__(self, host, apidata, sequences_path,
+ def __init__(self, host, sequences_path,
log_path='log',
+ model=None,
force=False):
"""Extract fastqs from results archive
log_path (str): where to put condor log files
force (bool): do we force overwriting current files?
"""
- self.api = HtswApi(host, apidata)
+ self.host = host
+ self.model = get_model(model)
self.sequences_path = sequences_path
self.log_path = log_path
self.force = force
+ LOGGER.info("CondorFastq host={0}".format(self.host))
+ LOGGER.info("CondorFastq sequences_path={0}".format(self.sequences_path))
+ LOGGER.info("CondorFastq log_path={0}".format(self.log_path))
- def build_fastqs(self, library_result_map ):
+ def create_scripts(self, result_map ):
"""
Generate condor scripts to build any needed fastq files
Args:
- library_result_map (list): [(library_id, destination directory), ...]
+ result_map: htsworkflow.submission.results.ResultMap()
"""
- qseq_condor_header = self.get_qseq_condor_header()
- qseq_condor_entries = []
- srf_condor_header = self.get_srf_condor_header()
- srf_condor_entries = []
- lib_db = self.find_archive_sequence_files(library_result_map)
+ template_map = {'srf': 'srf.condor',
+ 'qseq': 'qseq.condor',
+ 'split_fastq': 'split_fastq.condor',
+ }
- needed_targets = self.find_missing_targets(library_result_map, lib_db)
+ env = None
+ pythonpath = os.environ.get('PYTHONPATH', None)
+ if pythonpath is not None:
+ env = "PYTHONPATH=%s" % (pythonpath,)
+ condor_entries = self.build_condor_arguments(result_map)
+ for script_type in template_map.keys():
+ template = loader.get_template(template_map[script_type])
+ variables = {'python': sys.executable,
+ 'logdir': self.log_path,
+ 'env': env,
+ 'args': condor_entries[script_type],
+ 'root_url': self.host,
+ }
+ context = Context(variables)
+
+ with open(script_type + '.condor','w+') as outstream:
+ outstream.write(template.render(context))
+
+ def build_condor_arguments(self, result_map):
+ condor_entries = {'srf': [],
+ 'qseq': [],
+ 'split_fastq': []}
+
+ conversion_funcs = {'srf': self.condor_srf_to_fastq,
+ 'qseq': self.condor_qseq_to_fastq,
+ 'split_fastq': self.condor_desplit_fastq
+ }
+ sequences = self.find_archive_sequence_files(result_map)
+ needed_targets = self.update_fastq_targets(result_map, sequences)
for target_pathname, available_sources in needed_targets.items():
LOGGER.debug(' target : %s' % (target_pathname,))
LOGGER.debug(' candidate sources: %s' % (available_sources,))
- if available_sources.has_key('qseq'):
- source = available_sources['qseq']
- qseq_condor_entries.append(
- self.condor_qseq_to_fastq(source.path,
- target_pathname,
- source.flowcell)
- )
- elif available_sources.has_key('srf'):
- source = available_sources['srf']
- mid = getattr(source, 'mid_point', None)
- srf_condor_entries.append(
- self.condor_srf_to_fastq(source.path,
- target_pathname,
- source.paired,
- source.flowcell,
- mid)
- )
+ for condor_type in available_sources.keys():
+ conversion = conversion_funcs.get(condor_type, None)
+ if conversion is None:
+ errmsg = "Unrecognized type: {0} for {1}"
+ LOGGER.error(errmsg.format(condor_type,
+ pformat(available_sources)))
+ continue
+ sources = available_sources.get(condor_type, None)
+
+ if sources is not None:
+ condor_entries.setdefault(condor_type, []).append(
+ conversion(sources, target_pathname))
else:
- print " need file", target_pathname
-
- if len(srf_condor_entries) > 0:
- make_submit_script('srf.fastq.condor',
- srf_condor_header,
- srf_condor_entries)
-
- if len(qseq_condor_entries) > 0:
- make_submit_script('qseq.fastq.condor',
- qseq_condor_header,
- qseq_condor_entries)
-
-
- def get_qseq_condor_header(self):
- return """Universe=vanilla
-executable=%(exe)s
-error=%(log)s/qseq2fastq.$(process).out
-output=%(log)s/qseq2fastq.$(process).out
-log=%(log)s/qseq2fastq.log
-
-""" % {'exe': sys.executable,
- 'log': self.log_path }
-
- def get_srf_condor_header(self):
- return """Universe=vanilla
-executable=%(exe)s
-output=%(log)s/srf_pair_fastq.$(process).out
-error=%(log)s/srf_pair_fastq.$(process).out
-log=%(log)s/srf_pair_fastq.log
-environment="PYTHONPATH=%(env)s"
-
-""" % {'exe': sys.executable,
- 'log': self.log_path,
- 'env': os.environ.get('PYTHONPATH', '')
- }
-
- def find_archive_sequence_files(self, library_result_map):
+ LOGGER.warn(" need file %s", target_pathname)
+
+ return condor_entries
+
+ def find_archive_sequence_files(self, result_map):
"""
Find archived sequence files associated with our results.
"""
- LOGGER.debug("Searching for sequence files in: %s" %(self.sequences_path,))
-
- lib_db = {}
- seq_dirs = set()
- candidate_lanes = {}
- for lib_id, result_dir in library_result_map:
- lib_info = self.api.get_library(lib_id)
- lib_info['lanes'] = {}
- lib_db[lib_id] = lib_info
-
- for lane in lib_info['lane_set']:
- lane_key = (lane['flowcell'], lane['lane_number'])
- candidate_lanes[lane_key] = lib_id
- seq_dirs.add(os.path.join(self.sequences_path,
- 'flowcells',
- lane['flowcell']))
- LOGGER.debug("Seq_dirs = %s" %(unicode(seq_dirs)))
- candidate_seq_list = scan_for_sequences(seq_dirs)
-
- # at this point we have too many sequences as scan_for_sequences
- # returns all the sequences in a flowcell directory
- # so lets filter out the extras
-
- for seq in candidate_seq_list:
- lane_key = (seq.flowcell, seq.lane)
- lib_id = candidate_lanes.get(lane_key, None)
- if lib_id is not None:
- lib_info = lib_db[lib_id]
- lib_info['lanes'].setdefault(lane_key, set()).add(seq)
-
- return lib_db
-
- def find_missing_targets(self, library_result_map, lib_db):
+ self.import_libraries(result_map)
+ flowcell_ids = self.find_relevant_flowcell_ids()
+ self.import_sequences(flowcell_ids)
+
+ query_text = """
+ prefix libns: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
+ prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
+ prefix xsd: <http://www.w3.org/2001/XMLSchema#>
+
+ select ?filenode ?filetype ?cycle ?lane_number ?read
+ ?library ?library_id
+ ?flowcell ?flowcell_id ?read_length
+ ?flowcell_type ?flowcell_status
+ where {
+ ?filenode libns:cycle ?cycle ;
+ libns:lane_number ?lane_number ;
+ libns:read ?read ;
+ libns:flowcell ?flowcell ;
+ libns:flowcell_id ?flowcell_id ;
+ libns:library ?library ;
+ libns:library_id ?library_id ;
+ libns:file_type ?filetype ;
+ a libns:IlluminaResult .
+ ?flowcell libns:read_length ?read_length ;
+ libns:flowcell_type ?flowcell_type .
+ OPTIONAL { ?flowcell libns:flowcell_status ?flowcell_status }
+ FILTER(?filetype != libns:sequencer_result)
+ }
"""
- Check if the sequence file exists.
- This requires computing what the sequence name is and checking
- to see if it can be found in the sequence location.
-
- Adds seq.paired flag to sequences listed in lib_db[*]['lanes']
+ LOGGER.debug("find_archive_sequence_files query: %s",
+ query_text)
+ query = RDF.SPARQLQuery(query_text)
+ results = []
+ for r in query.execute(self.model):
+ library_id = fromTypedNode(r['library_id'])
+ if library_id in result_map:
+ seq = SequenceResult(r)
+ LOGGER.debug("Creating sequence result for library %s: %s",
+ library_id,
+ repr(seq))
+ results.append(seq)
+ return results
+
+ def import_libraries(self, result_map):
+ for lib_id in result_map.keys():
+ lib_id_encoded = lib_id.encode('utf-8')
+ liburl = urljoin(self.host, 'library/%s/' % (lib_id_encoded,))
+ library = RDF.Node(RDF.Uri(liburl))
+ self.import_library(library)
+
+ def import_library(self, library):
+ """Import library data into our model if we don't have it already
+ """
+ q = RDF.Statement(library, rdfNS['type'], libraryOntology['Library'])
+ present = False
+ if not self.model.contains_statement(q):
+ present = True
+ load_into_model(self.model, 'rdfa', library)
+ LOGGER.debug("Did we import %s: %s", library.uri, present)
+
+ def find_relevant_flowcell_ids(self):
+ """Generate set of flowcell ids that had samples of interest on them
+ """
+ flowcell_query = RDF.SPARQLQuery("""
+prefix libns: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
+
+select distinct ?flowcell ?flowcell_id
+WHERE {
+ ?library a libns:Library ;
+ libns:has_lane ?lane .
+ ?lane libns:flowcell ?flowcell .
+ ?flowcell libns:flowcell_id ?flowcell_id .
+}""")
+ flowcell_ids = set()
+ for r in flowcell_query.execute(self.model):
+ flowcell_ids.add( fromTypedNode(r['flowcell_id']) )
+ imported = False
+ a_lane = self.model.get_target(r['flowcell'],
+ libraryOntology['has_lane'])
+ if a_lane is None:
+ imported = True
+ # we lack information about which lanes were on this flowcell
+ load_into_model(self.model, 'rdfa', r['flowcell'])
+ LOGGER.debug("Did we imported %s: %s" % (r['flowcell'].uri,
+ imported))
+
+ return flowcell_ids
+
+ def import_sequences(self, flowcell_ids):
+ seq_dirs = []
+ for f in flowcell_ids:
+ seq_dirs.append(os.path.join(self.sequences_path, str(f)))
+ sequences = scan_for_sequences(seq_dirs)
+ for seq in sequences:
+ seq.save_to_model(self.model, self.host)
+ update_model_sequence_library(self.model, self.host)
+
+ def update_fastq_targets(self, result_map, raw_files):
+ """Return list of fastq files that need to be built.
+
+ Also update model with link between illumina result files
+ and our target fastq file.
"""
fastq_paired_template = '%(lib_id)s_%(flowcell)s_c%(cycle)s_l%(lane)s_r%(read)s.fastq'
fastq_single_template = '%(lib_id)s_%(flowcell)s_c%(cycle)s_l%(lane)s.fastq'
# find what targets we're missing
needed_targets = {}
- for lib_id, result_dir in library_result_map:
- lib = lib_db[lib_id]
- lane_dict = make_lane_dict(lib_db, lib_id)
-
- for lane_key, sequences in lib['lanes'].items():
- for seq in sequences:
- seq.paired = lane_dict[seq.flowcell]['paired_end']
- lane_status = lane_dict[seq.flowcell]['status']
-
- if seq.paired and seq.read is None:
- seq.read = 1
- filename_attributes = {
- 'flowcell': seq.flowcell,
- 'lib_id': lib_id,
- 'lane': seq.lane,
- 'read': seq.read,
- 'cycle': seq.cycle
- }
- # skip bad runs
- if lane_status == 'Failed':
- continue
- if seq.flowcell == '30DY0AAXX':
- # 30DY0 only ran for 151 bases instead of 152
- # it is actually 76 1st read, 75 2nd read
- seq.mid_point = 76
-
- # end filters
- if seq.paired:
- target_name = fastq_paired_template % filename_attributes
- else:
- target_name = fastq_single_template % filename_attributes
-
- target_pathname = os.path.join(result_dir, target_name)
- if self.force or not os.path.exists(target_pathname):
- t = needed_targets.setdefault(target_pathname, {})
- t[seq.filetype] = seq
-
+ for seq in raw_files:
+ if not seq.isgood:
+ continue
+ filename_attributes = {
+ 'flowcell': seq.flowcell_id,
+ 'lib_id': seq.library_id,
+ 'lane': seq.lane_number,
+ 'read': seq.read,
+ 'cycle': seq.cycle,
+ 'is_paired': seq.ispaired
+ }
+
+ fqName = FastqName(**filename_attributes)
+
+ result_dir = result_map[seq.library_id]
+ target_pathname = os.path.join(result_dir, fqName.filename)
+ if self.force or not os.path.exists(target_pathname):
+ t = needed_targets.setdefault(target_pathname, {})
+ t.setdefault(seq.filetype, []).append(seq)
+ self.add_target_source_links(target_pathname, seq)
return needed_targets
+ def add_target_source_links(self, target, seq):
+ """Add link between target pathname and the 'lane' that produced it
+ (note lane objects are now post demultiplexing.)
+ """
+ target_uri = 'file://' + target.encode('utf-8')
+ target_node = RDF.Node(RDF.Uri(target_uri))
+ source_stmt = RDF.Statement(target_node, dcNS['source'], seq.filenode)
+ self.model.add_statement(source_stmt)
+
+ def condor_srf_to_fastq(self, sources, target_pathname):
+ if len(sources) > 1:
+ raise ValueError("srf to fastq can only handle one file")
+
+ mid_point = None
+ if sources[0].flowcell_id == '30DY0AAXX':
+ mid_point = 76
+
+ return {
+ 'sources': [sources[0].path],
+ 'pyscript': srf2fastq.__file__,
+ 'flowcell': sources[0].flowcell_id,
+ 'ispaired': sources[0].ispaired,
+ 'target': target_pathname,
+ 'target_right': target_pathname.replace('_r1.fastq', '_r2.fastq'),
+ 'mid': mid_point,
+ 'force': self.force,
+ }
+
+ def condor_qseq_to_fastq(self, sources, target_pathname):
+ paths = []
+ for source in sources:
+ paths.append(source.path)
+ paths.sort()
+ return {
+ 'pyscript': qseq2fastq.__file__,
+ 'flowcell': sources[0].flowcell_id,
+ 'target': target_pathname,
+ 'sources': paths,
+ 'ispaired': sources[0].ispaired,
+ 'istar': len(sources) == 1,
+ }
+
+ def condor_desplit_fastq(self, sources, target_pathname):
+ paths = []
+ for source in sources:
+ paths.append(source.path)
+ paths.sort()
+ return {
+ 'pyscript': desplit_fastq.__file__,
+ 'target': target_pathname,
+ 'sources': paths,
+ 'ispaired': sources[0].ispaired,
+ }
- def condor_srf_to_fastq(self,
- srf_file,
- target_pathname,
- paired,
- flowcell=None,
- mid=None):
- py = srf2fastq.__file__
- args = [ py, srf_file, '--verbose']
- if paired:
- args.extend(['--left', target_pathname])
- # this is ugly. I did it because I was pregenerating the target
- # names before I tried to figure out what sources could generate
- # those targets, and everything up to this point had been
- # one-to-one. So I couldn't figure out how to pair the
- # target names.
- # With this at least the command will run correctly.
- # however if we rename the default targets, this'll break
- # also I think it'll generate it twice.
- args.extend(['--right',
- target_pathname.replace('_r1.fastq', '_r2.fastq')])
- else:
- args.extend(['--single', target_pathname ])
- if flowcell is not None:
- args.extend(['--flowcell', flowcell])
-
- if mid is not None:
- args.extend(['-m', str(mid)])
-
- if self.force:
- args.extend(['--force'])
-
- script = """arguments="%s"
-queue
-""" % (" ".join(args),)
-
- return script
-
-
- def condor_qseq_to_fastq(self, qseq_file, target_pathname, flowcell=None):
- py = qseq2fastq.__file__
- args = [py, '-i', qseq_file, '-o', target_pathname ]
- if flowcell is not None:
- args.extend(['-f', flowcell])
- script = """arguments="%s"
-queue
-""" % (" ".join(args))
-
- return script
-
-def make_submit_script(target, header, body_list):
- """
- write out a text file
-
- this was intended for condor submit scripts
-
- Args:
- target (str or stream):
- if target is a string, we will open and close the file
- if target is a stream, the caller is responsible.
-
- header (str);
- header to write at the beginning of the file
- body_list (list of strs):
- a list of blocks to add to the file.
- """
- if type(target) in types.StringTypes:
- f = open(target,"w")
- else:
- f = target
- f.write(header)
- for entry in body_list:
- f.write(entry)
- if type(target) in types.StringTypes:
- f.close()
def make_lane_dict(lib_db, lib_id):
"""
result.append((lane['flowcell'], lane))
return dict(result)
+class SequenceResult(object):
+ """Convert the sparql query result from find_archive_sequence_files
+ """
+ def __init__(self, result):
+ self.filenode = result['filenode']
+ self._filetype = result['filetype']
+ self.cycle = fromTypedNode(result['cycle'])
+ self.lane_number = fromTypedNode(result['lane_number'])
+ self.read = fromTypedNode(result['read'])
+ if type(self.read) in types.StringTypes:
+ self.read = 1
+ self.library = result['library']
+ self.library_id = fromTypedNode(result['library_id'])
+ self.flowcell = result['flowcell']
+ self.flowcell_id = fromTypedNode(result['flowcell_id'])
+ self.flowcell_type = fromTypedNode(result['flowcell_type'])
+ self.flowcell_status = fromTypedNode(result['flowcell_status'])
+
+ def _is_good(self):
+ """is this sequence / flowcell 'good enough'"""
+ if self.flowcell_status is not None and \
+ self.flowcell_status.lower() == "failed":
+ return False
+ return True
+ isgood = property(_is_good)
+
+ def _get_ispaired(self):
+ if self.flowcell_type.lower() == "paired":
+ return True
+ else:
+ return False
+ ispaired = property(_get_ispaired)
+
+ def _get_filetype(self):
+ return strip_namespace(libraryOntology, self._filetype)
+ filetype = property(_get_filetype)
+
+ def _get_path(self):
+ url = urlparse(str(self.filenode.uri))
+ if url.scheme == 'file':
+ return url.path
+ else:
+ errmsg = u"Unsupported scheme {0} for {1}"
+ raise ValueError(errmsg.format(url.scheme, unicode(url)))
+ path = property(_get_path)
+
+ def __repr__(self):
+ return "SequenceResult({0},{1},{2})".format(
+ str(self.filenode),
+ str(self.library_id),
+ str(self.flowcell_id))