"""
import logging
import os
-from pprint import pformat
+from pprint import pformat,pprint
import sys
import types
+from urlparse import urljoin, urlparse
-from htsworkflow.pipelines.sequences import scan_for_sequences
+from htsworkflow.pipelines.sequences import scan_for_sequences, \
+ update_model_sequence_library
+from htsworkflow.pipelines.samplekey import SampleKey
from htsworkflow.pipelines import qseq2fastq
from htsworkflow.pipelines import srf2fastq
from htsworkflow.pipelines import desplit_fastq
-from htsworkflow.util.api import HtswApi
+from htsworkflow.submission.fastqname import FastqName
+from htsworkflow.util.rdfhelp import get_model, dump_model, load_into_model, \
+ fromTypedNode, \
+ strip_namespace
+from htsworkflow.util.rdfns import *
from htsworkflow.util.conversion import parse_flowcell_id
from django.conf import settings
from django.template import Context, loader
+import RDF
+
LOGGER = logging.getLogger(__name__)
+
class CondorFastqExtract(object):
- def __init__(self, host, apidata, sequences_path,
+ def __init__(self, host, sequences_path,
log_path='log',
+ model=None,
force=False):
"""Extract fastqs from results archive
log_path (str): where to put condor log files
force (bool): do we force overwriting current files?
"""
- self.api = HtswApi(host, apidata)
+ self.host = host
+ self.model = get_model(model)
self.sequences_path = sequences_path
self.log_path = log_path
self.force = force
+ LOGGER.info("CondorFastq host={0}".format(self.host))
+ LOGGER.info("CondorFastq sequences_path={0}".format(self.sequences_path))
+ LOGGER.info("CondorFastq log_path={0}".format(self.log_path))
def create_scripts(self, result_map ):
"""
"""
template_map = {'srf': 'srf.condor',
'qseq': 'qseq.condor',
- 'split_fastq': 'split_fastq.condor'}
+ 'split_fastq': 'split_fastq.condor',
+ }
+ env = None
+ pythonpath = os.environ.get('PYTHONPATH', None)
+ if pythonpath is not None:
+ env = "PYTHONPATH=%s" % (pythonpath,)
condor_entries = self.build_condor_arguments(result_map)
for script_type in template_map.keys():
template = loader.get_template(template_map[script_type])
variables = {'python': sys.executable,
'logdir': self.log_path,
- 'env': os.environ.get('PYTHONPATH', None),
+ 'env': env,
'args': condor_entries[script_type],
+ 'root_url': self.host,
}
-
context = Context(variables)
with open(script_type + '.condor','w+') as outstream:
condor_entries = {'srf': [],
'qseq': [],
'split_fastq': []}
+
conversion_funcs = {'srf': self.condor_srf_to_fastq,
'qseq': self.condor_qseq_to_fastq,
'split_fastq': self.condor_desplit_fastq
}
-
- lib_db = self.find_archive_sequence_files(result_map)
- needed_targets = self.find_missing_targets(result_map, lib_db)
+ sequences = self.find_archive_sequence_files(result_map)
+ needed_targets = self.update_fastq_targets(result_map, sequences)
for target_pathname, available_sources in needed_targets.items():
LOGGER.debug(' target : %s' % (target_pathname,))
conversion = conversion_funcs.get(condor_type, None)
if conversion is None:
errmsg = "Unrecognized type: {0} for {1}"
- print errmsg.format(condor_type,
- pformat(available_sources))
+ LOGGER.error(errmsg.format(condor_type,
+ pformat(available_sources)))
continue
sources = available_sources.get(condor_type, None)
condor_entries.setdefault(condor_type, []).append(
conversion(sources, target_pathname))
else:
- print " need file", target_pathname
+ LOGGER.warn(" need file %s", target_pathname)
return condor_entries
"""
Find archived sequence files associated with our results.
"""
- LOGGER.debug("Searching for sequence files in: %s" %(self.sequences_path,))
-
- lib_db = {}
- seq_dirs = set()
- candidate_lanes = {}
- for lib_id in result_map.keys():
- lib_info = self.api.get_library(lib_id)
- lib_info['lanes'] = {}
- lib_db[lib_id] = lib_info
-
- for lane in lib_info['lane_set']:
- lane_key = (lane['flowcell'], lane['lane_number'])
- candidate_lanes[lane_key] = lib_id
- seq_dirs.add(os.path.join(self.sequences_path,
- 'flowcells',
- lane['flowcell']))
- LOGGER.debug("Seq_dirs = %s" %(unicode(seq_dirs)))
- candidate_seq_list = scan_for_sequences(seq_dirs)
-
- # at this point we have too many sequences as scan_for_sequences
- # returns all the sequences in a flowcell directory
- # so lets filter out the extras
-
- for seq in candidate_seq_list:
- lane_key = (seq.flowcell, seq.lane)
- lib_id = candidate_lanes.get(lane_key, None)
- if lib_id is not None:
- lib_info = lib_db[lib_id]
- lib_info['lanes'].setdefault(lane_key, set()).add(seq)
-
- return lib_db
-
- def find_missing_targets(self, result_map, lib_db):
+ self.import_libraries(result_map)
+ flowcell_ids = self.find_relevant_flowcell_ids()
+ self.import_sequences(flowcell_ids)
+
+ query_text = """
+ prefix libns: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
+ prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
+ prefix xsd: <http://www.w3.org/2001/XMLSchema#>
+
+ select ?filenode ?filetype ?cycle ?lane_number ?read
+ ?library ?library_id
+ ?flowcell ?flowcell_id ?read_length
+ ?flowcell_type ?flowcell_status
+ where {
+ ?filenode libns:cycle ?cycle ;
+ libns:lane_number ?lane_number ;
+ libns:read ?read ;
+ libns:flowcell ?flowcell ;
+ libns:flowcell_id ?flowcell_id ;
+ libns:library ?library ;
+ libns:library_id ?library_id ;
+ libns:file_type ?filetype ;
+ a libns:IlluminaResult .
+ ?flowcell libns:read_length ?read_length ;
+ libns:flowcell_type ?flowcell_type .
+ OPTIONAL { ?flowcell libns:flowcell_status ?flowcell_status }
+ FILTER(?filetype != libns:sequencer_result)
+ }
"""
- Check if the sequence file exists.
- This requires computing what the sequence name is and checking
- to see if it can be found in the sequence location.
+ LOGGER.debug("find_archive_sequence_files query: %s",
+ query_text)
+ query = RDF.SPARQLQuery(query_text)
+ results = []
+ for r in query.execute(self.model):
+ library_id = fromTypedNode(r['library_id'])
+ if library_id in result_map:
+ seq = SequenceResult(r)
+ LOGGER.debug("Creating sequence result for library %s: %s",
+ library_id,
+ repr(seq))
+ results.append(seq)
+ return results
+
+ def import_libraries(self, result_map):
+ for lib_id in result_map.keys():
+ lib_id_encoded = lib_id.encode('utf-8')
+ liburl = urljoin(self.host, 'library/%s/' % (lib_id_encoded,))
+ library = RDF.Node(RDF.Uri(liburl))
+ self.import_library(library)
- Adds seq.paired flag to sequences listed in lib_db[*]['lanes']
+ def import_library(self, library):
+ """Import library data into our model if we don't have it already
+ """
+ q = RDF.Statement(library, rdfNS['type'], libraryOntology['Library'])
+ present = False
+ if not self.model.contains_statement(q):
+ present = True
+ load_into_model(self.model, 'rdfa', library)
+ LOGGER.debug("Did we import %s: %s", library.uri, present)
+
+ def find_relevant_flowcell_ids(self):
+ """Generate set of flowcell ids that had samples of interest on them
+ """
+ flowcell_query = RDF.SPARQLQuery("""
+prefix libns: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
+
+select distinct ?flowcell ?flowcell_id
+WHERE {
+ ?library a libns:Library ;
+ libns:has_lane ?lane .
+ ?lane libns:flowcell ?flowcell .
+ ?flowcell libns:flowcell_id ?flowcell_id .
+}""")
+ flowcell_ids = set()
+ for r in flowcell_query.execute(self.model):
+ flowcell_ids.add( fromTypedNode(r['flowcell_id']) )
+ imported = False
+ a_lane = self.model.get_target(r['flowcell'],
+ libraryOntology['has_lane'])
+ if a_lane is None:
+ imported = True
+ # we lack information about which lanes were on this flowcell
+ load_into_model(self.model, 'rdfa', r['flowcell'])
+ LOGGER.debug("Did we imported %s: %s" % (r['flowcell'].uri,
+ imported))
+
+ return flowcell_ids
+
+ def import_sequences(self, flowcell_ids):
+ seq_dirs = []
+ for f in flowcell_ids:
+ seq_dirs.append(os.path.join(self.sequences_path, str(f)))
+ sequences = scan_for_sequences(seq_dirs)
+ for seq in sequences:
+ seq.save_to_model(self.model, self.host)
+ update_model_sequence_library(self.model, self.host)
+
+ def update_fastq_targets(self, result_map, raw_files):
+ """Return list of fastq files that need to be built.
+
+ Also update model with link between illumina result files
+ and our target fastq file.
"""
fastq_paired_template = '%(lib_id)s_%(flowcell)s_c%(cycle)s_l%(lane)s_r%(read)s.fastq'
fastq_single_template = '%(lib_id)s_%(flowcell)s_c%(cycle)s_l%(lane)s.fastq'
# find what targets we're missing
needed_targets = {}
- for lib_id in result_map.keys():
- result_dir = result_map[lib_id]
- lib = lib_db[lib_id]
- lane_dict = make_lane_dict(lib_db, lib_id)
-
- for lane_key, sequences in lib['lanes'].items():
- for seq in sequences:
- seq.paired = lane_dict[seq.flowcell]['paired_end']
- lane_status = lane_dict[seq.flowcell]['status']
-
- if seq.paired and seq.read is None:
- seq.read = 1
- filename_attributes = {
- 'flowcell': seq.flowcell,
- 'lib_id': lib_id,
- 'lane': seq.lane,
- 'read': seq.read,
- 'cycle': seq.cycle
- }
- # skip bad runs
- if lane_status == 'Failed':
- continue
- if seq.flowcell == '30DY0AAXX':
- # 30DY0 only ran for 151 bases instead of 152
- # it is actually 76 1st read, 75 2nd read
- seq.mid_point = 76
-
- # end filters
- if seq.paired:
- target_name = fastq_paired_template % \
- filename_attributes
- else:
- target_name = fastq_single_template % \
- filename_attributes
-
- target_pathname = os.path.join(result_dir, target_name)
- if self.force or not os.path.exists(target_pathname):
- t = needed_targets.setdefault(target_pathname, {})
- t.setdefault(seq.filetype, []).append(seq)
-
+ for seq in raw_files:
+ if not seq.isgood:
+ continue
+ filename_attributes = {
+ 'flowcell': seq.flowcell_id,
+ 'lib_id': seq.library_id,
+ 'lane': seq.lane_number,
+ 'read': seq.read,
+ 'cycle': seq.cycle,
+ 'is_paired': seq.ispaired
+ }
+
+ fqName = FastqName(**filename_attributes)
+
+ result_dir = result_map[seq.library_id]
+ target_pathname = os.path.join(result_dir, fqName.filename)
+ if self.force or not os.path.exists(target_pathname):
+ t = needed_targets.setdefault(target_pathname, {})
+ t.setdefault(seq.filetype, []).append(seq)
+ self.add_target_source_links(target_pathname, seq)
return needed_targets
+ def add_target_source_links(self, target, seq):
+ """Add link between target pathname and the 'lane' that produced it
+ (note lane objects are now post demultiplexing.)
+ """
+ target_uri = 'file://' + target.encode('utf-8')
+ target_node = RDF.Node(RDF.Uri(target_uri))
+ source_stmt = RDF.Statement(target_node, dcNS['source'], seq.filenode)
+ self.model.add_statement(source_stmt)
def condor_srf_to_fastq(self, sources, target_pathname):
if len(sources) > 1:
raise ValueError("srf to fastq can only handle one file")
+ mid_point = None
+ if sources[0].flowcell_id == '30DY0AAXX':
+ mid_point = 76
+
return {
- 'sources': [os.path.abspath(sources[0].path)],
+ 'sources': [sources[0].path],
'pyscript': srf2fastq.__file__,
- 'flowcell': sources[0].flowcell,
- 'ispaired': sources[0].paired,
+ 'flowcell': sources[0].flowcell_id,
+ 'ispaired': sources[0].ispaired,
'target': target_pathname,
'target_right': target_pathname.replace('_r1.fastq', '_r2.fastq'),
- 'mid': getattr(sources[0], 'mid_point', None),
+ 'mid': mid_point,
'force': self.force,
}
paths.sort()
return {
'pyscript': qseq2fastq.__file__,
- 'flowcell': sources[0].flowcell,
+ 'flowcell': sources[0].flowcell_id,
'target': target_pathname,
'sources': paths,
- 'ispaired': sources[0].paired,
+ 'ispaired': sources[0].ispaired,
'istar': len(sources) == 1,
}
'pyscript': desplit_fastq.__file__,
'target': target_pathname,
'sources': paths,
- 'ispaired': sources[0].paired,
+ 'ispaired': sources[0].ispaired,
}
+
def make_lane_dict(lib_db, lib_id):
"""
Convert the lane_set in a lib_db to a dictionary
result.append((lane['flowcell'], lane))
return dict(result)
+class SequenceResult(object):
+ """Convert the sparql query result from find_archive_sequence_files
+ """
+ def __init__(self, result):
+ self.filenode = result['filenode']
+ self._filetype = result['filetype']
+ self.cycle = fromTypedNode(result['cycle'])
+ self.lane_number = fromTypedNode(result['lane_number'])
+ self.read = fromTypedNode(result['read'])
+ if type(self.read) in types.StringTypes:
+ self.read = 1
+ self.library = result['library']
+ self.library_id = fromTypedNode(result['library_id'])
+ self.flowcell = result['flowcell']
+ self.flowcell_id = fromTypedNode(result['flowcell_id'])
+ self.flowcell_type = fromTypedNode(result['flowcell_type'])
+ self.flowcell_status = fromTypedNode(result['flowcell_status'])
+
+ def _is_good(self):
+ """is this sequence / flowcell 'good enough'"""
+ if self.flowcell_status is not None and \
+ self.flowcell_status.lower() == "failed":
+ return False
+ return True
+ isgood = property(_is_good)
+
+ def _get_ispaired(self):
+ if self.flowcell_type.lower() == "paired":
+ return True
+ else:
+ return False
+ ispaired = property(_get_ispaired)
+
+ def _get_filetype(self):
+ return strip_namespace(libraryOntology, self._filetype)
+ filetype = property(_get_filetype)
+
+ def _get_path(self):
+ url = urlparse(str(self.filenode.uri))
+ if url.scheme == 'file':
+ return url.path
+ else:
+ errmsg = u"Unsupported scheme {0} for {1}"
+ raise ValueError(errmsg.format(url.scheme, unicode(url)))
+ path = property(_get_path)
+
+ def __repr__(self):
+ return "SequenceResult({0},{1},{2})".format(
+ str(self.filenode),
+ str(self.library_id),
+ str(self.flowcell_id))