from pprint import pformat,pprint
import sys
import types
-from urlparse import urljoin, urlparse
+from urllib.parse import urljoin, urlparse
from htsworkflow.pipelines.sequences import scan_for_sequences, \
update_model_sequence_library
from htsworkflow.pipelines import qseq2fastq
from htsworkflow.pipelines import srf2fastq
from htsworkflow.pipelines import desplit_fastq
+from htsworkflow.submission.fastqname import FastqName
from htsworkflow.util.rdfhelp import get_model, dump_model, load_into_model, \
fromTypedNode, \
stripNamespace
if pythonpath is not None:
env = "PYTHONPATH=%s" % (pythonpath,)
condor_entries = self.build_condor_arguments(result_map)
- for script_type in template_map.keys():
+ for script_type in list(template_map.keys()):
template = loader.get_template(template_map[script_type])
variables = {'python': sys.executable,
'logdir': self.log_path,
sequences = self.find_archive_sequence_files(result_map)
needed_targets = self.update_fastq_targets(result_map, sequences)
- for target_pathname, available_sources in needed_targets.items():
+ for target_pathname, available_sources in list(needed_targets.items()):
LOGGER.debug(' target : %s' % (target_pathname,))
LOGGER.debug(' candidate sources: %s' % (available_sources,))
- for condor_type in available_sources.keys():
+ for condor_type in list(available_sources.keys()):
conversion = conversion_funcs.get(condor_type, None)
if conversion is None:
errmsg = "Unrecognized type: {0} for {1}"
- print errmsg.format(condor_type,
- pformat(available_sources))
+ LOGGER.error(errmsg.format(condor_type,
+ pformat(available_sources)))
continue
sources = available_sources.get(condor_type, None)
condor_entries.setdefault(condor_type, []).append(
conversion(sources, target_pathname))
else:
- print " need file", target_pathname
+ LOGGER.warn(" need file %s", target_pathname)
return condor_entries
Find archived sequence files associated with our results.
"""
self.import_libraries(result_map)
- flowcell_ids = self.find_relavant_flowcell_ids()
+ flowcell_ids = self.find_relevant_flowcell_ids()
self.import_sequences(flowcell_ids)
query_text = """
libns:library ?library ;
libns:library_id ?library_id ;
libns:file_type ?filetype ;
- a libns:illumina_result .
+ a libns:IlluminaResult .
?flowcell libns:read_length ?read_length ;
libns:flowcell_type ?flowcell_type .
OPTIONAL { ?flowcell libns:flowcell_status ?flowcell_status }
return results
def import_libraries(self, result_map):
- for lib_id in result_map.keys():
+ for lib_id in list(result_map.keys()):
lib_id_encoded = lib_id.encode('utf-8')
liburl = urljoin(self.host, 'library/%s/' % (lib_id_encoded,))
library = RDF.Node(RDF.Uri(liburl))
def import_library(self, library):
"""Import library data into our model if we don't have it already
"""
- q = RDF.Statement(library, rdfNS['type'], libraryOntology['library'])
+ q = RDF.Statement(library, rdfNS['type'], libraryOntology['Library'])
present = False
if not self.model.contains_statement(q):
present = True
load_into_model(self.model, 'rdfa', library)
- LOGGER.debug("Did we import %s: %s", library, present)
+ LOGGER.debug("Did we import %s: %s", library.uri, present)
- def find_relavant_flowcell_ids(self):
+ def find_relevant_flowcell_ids(self):
"""Generate set of flowcell ids that had samples of interest on them
"""
- flowcell_query =RDF.SPARQLQuery("""
+ flowcell_query = RDF.SPARQLQuery("""
prefix libns: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
select distinct ?flowcell ?flowcell_id
WHERE {
- ?library a libns:library ;
+ ?library a libns:Library ;
libns:has_lane ?lane .
?lane libns:flowcell ?flowcell .
?flowcell libns:flowcell_id ?flowcell_id .
flowcell_ids = set()
for r in flowcell_query.execute(self.model):
flowcell_ids.add( fromTypedNode(r['flowcell_id']) )
- LOGGER.debug("Flowcells = %s" %(unicode(flowcell_ids)))
- flowcell_test = RDF.Statement(r['flowcell'],
- rdfNS['type'],
- libraryOntology['illumina_flowcell'])
- if not self.model.contains_statement(flowcell_test):
- # we probably lack full information about the flowcell.
+ imported = False
+ a_lane = self.model.get_target(r['flowcell'],
+ libraryOntology['has_lane'])
+ if a_lane is None:
+ imported = True
+ # we lack information about which lanes were on this flowcell
load_into_model(self.model, 'rdfa', r['flowcell'])
+ LOGGER.debug("Did we imported %s: %s" % (r['flowcell'].uri,
+ imported))
+
return flowcell_ids
def import_sequences(self, flowcell_ids):
'lib_id': seq.library_id,
'lane': seq.lane_number,
'read': seq.read,
- 'cycle': seq.cycle
+ 'cycle': seq.cycle,
+ 'is_paired': seq.ispaired
}
- if seq.ispaired:
- target_name = fastq_paired_template % \
- filename_attributes
- else:
- target_name = fastq_single_template % \
- filename_attributes
+ fqName = FastqName(**filename_attributes)
result_dir = result_map[seq.library_id]
- target_pathname = os.path.join(result_dir, target_name)
+ target_pathname = os.path.join(result_dir, fqName.filename)
if self.force or not os.path.exists(target_pathname):
t = needed_targets.setdefault(target_pathname, {})
t.setdefault(seq.filetype, []).append(seq)
"""Add link between target pathname and the 'lane' that produced it
(note lane objects are now post demultiplexing.)
"""
- target_uri = 'file://' + target
+ target_uri = 'file://' + target.encode('utf-8')
target_node = RDF.Node(RDF.Uri(target_uri))
source_stmt = RDF.Statement(target_node, dcNS['source'], seq.filenode)
self.model.add_statement(source_stmt)
self.cycle = fromTypedNode(result['cycle'])
self.lane_number = fromTypedNode(result['lane_number'])
self.read = fromTypedNode(result['read'])
- if type(self.read) in types.StringTypes:
+ if type(self.read) in str:
self.read = 1
self.library = result['library']
self.library_id = fromTypedNode(result['library_id'])
if url.scheme == 'file':
return url.path
else:
- errmsg = u"Unsupported scheme {0} for {1}"
- raise ValueError(errmsg.format(url.scheme, unicode(url)))
+ errmsg = "Unsupported scheme {0} for {1}"
+ raise ValueError(errmsg.format(url.scheme, str(url)))
path = property(_get_path)
def __repr__(self):