from htsworkflow.util.rdfhelp import \
blankOrUri, \
- dafTermOntology, \
dump_model, \
+ fromTypedNode, \
get_model, \
- libraryOntology, \
- owlNS, \
- rdfNS, \
- submissionLog, \
- submissionOntology, \
- toTypedNode, \
- fromTypedNode
+ stripNamespace, \
+ toTypedNode
+from htsworkflow.util.rdfns import *
from htsworkflow.util.hashfile import make_md5sum
from htsworkflow.submission.fastqname import FastqName
from htsworkflow.submission.daf import \
MetadataLookupException, \
+ ModelException, \
get_submission_uri
LOGGER = logging.getLogger(__name__)
def scan_submission_dirs(self, result_map):
"""Examine files in our result directory
"""
- for lib_id, result_dir in result_map.items():
+ for lib_id, result_dir in list(result_map.items()):
LOGGER.info("Importing %s from %s" % (lib_id, result_dir))
try:
self.import_analysis_dir(result_dir, lib_id)
- except MetadataLookupException, e:
+ except MetadataLookupException as e:
LOGGER.error("Skipping %s: %s" % (lib_id, str(e)))
def import_analysis_dir(self, analysis_dir, library_id):
pathname = os.path.abspath(os.path.join(analysis_dir, filename))
self.construct_file_attributes(analysis_dir, libNode, pathname)
+ def analysis_nodes(self, result_map):
+ """Return an iterable of analysis nodes
+ """
+ for result_dir in list(result_map.values()):
+ an_analysis = self.get_submission_node(result_dir)
+ yield an_analysis
+
def construct_file_attributes(self, analysis_dir, libNode, pathname):
"""Looking for the best extension
The 'best' is the longest match
RDF.Statement(fileNode,
rdfNS['type'],
file_type))
+ self.model.add_statement(
+ RDF.Statement(fileNode,
+ libraryOntology['library'],
+ libNode))
+
LOGGER.debug("Done.")
def make_file_node(self, pathname, submissionNode):
"""
# add file specific information
path, filename = os.path.split(pathname)
- fileNode = RDF.Node(RDF.Uri('file://'+ os.path.abspath(pathname)))
+ pathname = os.path.abspath(pathname)
+ fileNode = RDF.Node(RDF.Uri('file://'+ pathname))
self.model.add_statement(
RDF.Statement(submissionNode,
dafTermOntology['has_file'],
RDF.Statement(fileNode,
dafTermOntology['filename'],
filename))
+ self.model.add_statement(
+ RDF.Statement(fileNode,
+ dafTermOntology['relative_path'],
+ os.path.relpath(pathname)))
return fileNode
def add_md5s(self, filename, fileNode, analysis_dir):
libraryOntology['has_mappings'],
dafTermOntology['has_file']))
parser = RDF.Parser(name='rdfa')
- new_statements = parser.parse_as_stream(libNode.uri)
+ try:
+ new_statements = parser.parse_as_stream(libNode.uri)
+ except RDF.RedlandError as e:
+ LOGGER.error(e)
+ return
+ LOGGER.debug("Scanning %s", str(libNode.uri))
toadd = []
for s in new_statements:
# always add "collections"
LOGGER.debug("Importing %s" % (lane.uri,))
try:
parser.parse_into_model(self.model, lane.uri)
- except RDF.RedlandError, e:
+ except RDF.RedlandError as e:
LOGGER.error("Error accessing %s" % (lane.uri,))
raise e
self.__view_map = self._get_filename_view_map()
results = []
- for pattern, view in self.__view_map.items():
+ for pattern, view in list(self.__view_map.items()):
if re.match(pattern, filename):
results.append(view)
LOGGER.debug("Found: %s" % (literal_re,))
try:
filename_re = re.compile(literal_re)
- except re.error, e:
+ except re.error as e:
LOGGER.error("Unable to compile: %s" % (literal_re,))
patterns[literal_re] = view_name
return patterns
paired = ['Barcoded Illumina',
'Multiplexing',
'Nextera',
- 'Paired End (non-multiplexed)',]
+ 'Paired End (non-multiplexed)',
+ 'Dual Index Illumina',]
if library_type in single:
return False
elif library_type in paired:
results = []
for record in rdfstream:
d = {}
- for key, value in record.items():
+ for key, value in list(record.items()):
d[key] = fromTypedNode(value)
results.append(d)
return results
+
+
+def list_submissions(model):
+ """Return generator of submissions in this model.
+ """
+ query_body = """
+ PREFIX subns: <http://jumpgate.caltech.edu/wiki/UcscSubmissionOntology#>
+
+ select distinct ?submission
+ where { ?submission subns:has_submission ?library_dir }
+ """
+ query = RDF.SPARQLQuery(query_body)
+ rdfstream = query.execute(model)
+ for row in rdfstream:
+ s = stripNamespace(submissionLog, row['submission'])
+ if s[-1] in ['#', '/', '?']:
+ s = s[:-1]
+ yield s