from htsworkflow.pipelines import qseq2fastq
from htsworkflow.pipelines import srf2fastq
from htsworkflow.pipelines import desplit_fastq
+from htsworkflow.submission.fastqname import FastqName
from htsworkflow.util.rdfhelp import get_model, dump_model, load_into_model, \
fromTypedNode, \
- stripNamespace
+ strip_namespace
from htsworkflow.util.rdfns import *
from htsworkflow.util.conversion import parse_flowcell_id
conversion = conversion_funcs.get(condor_type, None)
if conversion is None:
errmsg = "Unrecognized type: {0} for {1}"
- print errmsg.format(condor_type,
- pformat(available_sources))
+ LOGGER.error(errmsg.format(condor_type,
+ pformat(available_sources)))
continue
sources = available_sources.get(condor_type, None)
condor_entries.setdefault(condor_type, []).append(
conversion(sources, target_pathname))
else:
- print " need file", target_pathname
+ LOGGER.warn(" need file %s", target_pathname)
return condor_entries
Find archived sequence files associated with our results.
"""
self.import_libraries(result_map)
- flowcell_ids = self.find_relavant_flowcell_ids()
+ flowcell_ids = self.find_relevant_flowcell_ids()
self.import_sequences(flowcell_ids)
query_text = """
libns:library ?library ;
libns:library_id ?library_id ;
libns:file_type ?filetype ;
- a libns:illumina_result .
+ a libns:IlluminaResult .
?flowcell libns:read_length ?read_length ;
libns:flowcell_type ?flowcell_type .
OPTIONAL { ?flowcell libns:flowcell_status ?flowcell_status }
def import_library(self, library):
"""Import library data into our model if we don't have it already
"""
- q = RDF.Statement(library, rdfNS['type'], libraryOntology['library'])
+ q = RDF.Statement(library, rdfNS['type'], libraryOntology['Library'])
present = False
if not self.model.contains_statement(q):
present = True
load_into_model(self.model, 'rdfa', library)
- LOGGER.debug("Did we import %s: %s", library, present)
+ LOGGER.debug("Did we import %s: %s", library.uri, present)
- def find_relavant_flowcell_ids(self):
+ def find_relevant_flowcell_ids(self):
"""Generate set of flowcell ids that had samples of interest on them
"""
- flowcell_query =RDF.SPARQLQuery("""
+ flowcell_query = RDF.SPARQLQuery("""
prefix libns: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
select distinct ?flowcell ?flowcell_id
WHERE {
- ?library a libns:library ;
+ ?library a libns:Library ;
libns:has_lane ?lane .
?lane libns:flowcell ?flowcell .
?flowcell libns:flowcell_id ?flowcell_id .
flowcell_ids = set()
for r in flowcell_query.execute(self.model):
flowcell_ids.add( fromTypedNode(r['flowcell_id']) )
- LOGGER.debug("Flowcells = %s" %(unicode(flowcell_ids)))
- flowcell_test = RDF.Statement(r['flowcell'],
- rdfNS['type'],
- libraryOntology['illumina_flowcell'])
- if not self.model.contains_statement(flowcell_test):
- # we probably lack full information about the flowcell.
+ imported = False
+ a_lane = self.model.get_target(r['flowcell'],
+ libraryOntology['has_lane'])
+ if a_lane is None:
+ imported = True
+ # we lack information about which lanes were on this flowcell
load_into_model(self.model, 'rdfa', r['flowcell'])
+ LOGGER.debug("Did we imported %s: %s" % (r['flowcell'].uri,
+ imported))
+
return flowcell_ids
def import_sequences(self, flowcell_ids):
'lib_id': seq.library_id,
'lane': seq.lane_number,
'read': seq.read,
- 'cycle': seq.cycle
+ 'cycle': seq.cycle,
+ 'is_paired': seq.ispaired
}
- if seq.ispaired:
- target_name = fastq_paired_template % \
- filename_attributes
- else:
- target_name = fastq_single_template % \
- filename_attributes
+ fqName = FastqName(**filename_attributes)
result_dir = result_map[seq.library_id]
- target_pathname = os.path.join(result_dir, target_name)
+ target_pathname = os.path.join(result_dir, fqName.filename)
if self.force or not os.path.exists(target_pathname):
t = needed_targets.setdefault(target_pathname, {})
t.setdefault(seq.filetype, []).append(seq)
"""Add link between target pathname and the 'lane' that produced it
(note lane objects are now post demultiplexing.)
"""
- target_uri = 'file://' + target
+ target_uri = 'file://' + target.encode('utf-8')
target_node = RDF.Node(RDF.Uri(target_uri))
source_stmt = RDF.Statement(target_node, dcNS['source'], seq.filenode)
self.model.add_statement(source_stmt)
ispaired = property(_get_ispaired)
def _get_filetype(self):
- return stripNamespace(libraryOntology, self._filetype)
+ return strip_namespace(libraryOntology, self._filetype)
filetype = property(_get_filetype)
def _get_path(self):