from htsworkflow.util.rdfhelp import \
blankOrUri, \
- dafTermOntology, \
dump_model, \
+ fromTypedNode, \
get_model, \
- libraryOntology, \
- owlNS, \
- rdfNS, \
- submissionLog, \
- submissionOntology, \
- toTypedNode, \
- fromTypedNode
+ stripNamespace, \
+ toTypedNode
+from htsworkflow.util.rdfns import *
from htsworkflow.util.hashfile import make_md5sum
-
+from htsworkflow.submission.fastqname import FastqName
from htsworkflow.submission.daf import \
MetadataLookupException, \
+ ModelException, \
get_submission_uri
LOGGER = logging.getLogger(__name__)
class Submission(object):
- def __init__(self, name, model):
+ def __init__(self, name, model, host):
self.name = name
self.model = model
self.submissionSet = get_submission_uri(self.name)
self.submissionSetNS = RDF.NS(str(self.submissionSet) + '#')
- self.libraryNS = RDF.NS('http://jumpgate.caltech.edu/library/')
+ self.libraryNS = RDF.NS('{0}/library/'.format(host))
self.__view_map = None
def scan_submission_dirs(self, result_map):
"""Examine files in our result directory
"""
- for lib_id, result_dir in result_map.items():
+ for lib_id, result_dir in list(result_map.items()):
LOGGER.info("Importing %s from %s" % (lib_id, result_dir))
try:
self.import_analysis_dir(result_dir, lib_id)
- except MetadataLookupException, e:
+ except MetadataLookupException as e:
LOGGER.error("Skipping %s: %s" % (lib_id, str(e)))
def import_analysis_dir(self, analysis_dir, library_id):
submission_files = os.listdir(analysis_dir)
for filename in submission_files:
- self.construct_file_attributes(analysis_dir, libNode, filename)
+ pathname = os.path.abspath(os.path.join(analysis_dir, filename))
+ self.construct_file_attributes(analysis_dir, libNode, pathname)
+
+ def analysis_nodes(self, result_map):
+ """Return an iterable of analysis nodes
+ """
+ for result_dir in list(result_map.values()):
+ an_analysis = self.get_submission_node(result_dir)
+ yield an_analysis
def construct_file_attributes(self, analysis_dir, libNode, pathname):
"""Looking for the best extension
rdfNS['type'])
if file_classification is None:
errmsg = 'Could not find class for {0}'
- logger.warning(errmsg.format(str(file_type)))
+ LOGGER.warning(errmsg.format(str(file_type)))
return
self.model.add_statement(
an_analysis))
# add file specific information
- fileNode = self.link_file_to_classes(filename,
- an_analysis,
- an_analysis_uri,
- analysis_dir)
+ fileNode = self.make_file_node(pathname, an_analysis)
self.add_md5s(filename, fileNode, analysis_dir)
+ self.add_fastq_metadata(filename, fileNode)
self.model.add_statement(
RDF.Statement(fileNode,
rdfNS['type'],
file_type))
+ self.model.add_statement(
+ RDF.Statement(fileNode,
+ libraryOntology['library'],
+ libNode))
+
LOGGER.debug("Done.")
- def link_file_to_classes(self, filename, submissionNode, submission_uri, analysis_dir):
+ def make_file_node(self, pathname, submissionNode):
+ """Create file node and attach it to its submission.
+ """
# add file specific information
- fileNode = RDF.Node(RDF.Uri(submission_uri + '/' + filename))
+ path, filename = os.path.split(pathname)
+ pathname = os.path.abspath(pathname)
+ fileNode = RDF.Node(RDF.Uri('file://'+ pathname))
self.model.add_statement(
RDF.Statement(submissionNode,
dafTermOntology['has_file'],
RDF.Statement(fileNode,
dafTermOntology['filename'],
filename))
+ self.model.add_statement(
+ RDF.Statement(fileNode,
+ dafTermOntology['relative_path'],
+ os.path.relpath(pathname)))
return fileNode
def add_md5s(self, filename, fileNode, analysis_dir):
self.model.add_statement(
RDF.Statement(fileNode, dafTermOntology['md5sum'], md5))
+ def add_fastq_metadata(self, filename, fileNode):
+ # How should I detect if this is actually a fastq file?
+ try:
+ fqname = FastqName(filename=filename)
+ except ValueError:
+ # currently its just ignore it if the fastq name parser fails
+ return
+
+ terms = [('flowcell', libraryOntology['flowcell_id']),
+ ('lib_id', libraryOntology['library_id']),
+ ('lane', libraryOntology['lane_number']),
+ ('read', libraryOntology['read']),
+ ('cycle', libraryOntology['read_length'])]
+ for file_term, model_term in terms:
+ value = fqname.get(file_term)
+ if value is not None:
+ s = RDF.Statement(fileNode, model_term, toTypedNode(value))
+ self.model.append(s)
+
def _add_library_details_to_model(self, libNode):
# attributes that can have multiple values
set_attributes = set((libraryOntology['has_lane'],
libraryOntology['has_mappings'],
dafTermOntology['has_file']))
parser = RDF.Parser(name='rdfa')
- new_statements = parser.parse_as_stream(libNode.uri)
+ try:
+ new_statements = parser.parse_as_stream(libNode.uri)
+ except RDF.RedlandError as e:
+ LOGGER.error(e)
+ return
+ LOGGER.debug("Scanning %s", str(libNode.uri))
toadd = []
for s in new_statements:
# always add "collections"
LOGGER.debug("Importing %s" % (lane.uri,))
try:
parser.parse_into_model(self.model, lane.uri)
- except RDF.RedlandError, e:
+ except RDF.RedlandError as e:
LOGGER.error("Error accessing %s" % (lane.uri,))
raise e
self.__view_map = self._get_filename_view_map()
results = []
- for pattern, view in self.__view_map.items():
+ for pattern, view in list(self.__view_map.items()):
if re.match(pattern, filename):
results.append(view)
LOGGER.debug("Found: %s" % (literal_re,))
try:
filename_re = re.compile(literal_re)
- except re.error, e:
+ except re.error as e:
LOGGER.error("Unable to compile: %s" % (literal_re,))
patterns[literal_re] = view_name
return patterns
paired = ['Barcoded Illumina',
'Multiplexing',
'Nextera',
- 'Paired End (non-multiplexed)',]
+ 'Paired End (non-multiplexed)',
+ 'Dual Index Illumina',]
if library_type in single:
return False
elif library_type in paired:
query = RDF.SPARQLQuery(str(formatted_query))
rdfstream = query.execute(self.model)
results = []
- for r in rdfstream:
- results.append(r)
+ for record in rdfstream:
+ d = {}
+ for key, value in list(record.items()):
+ d[key] = fromTypedNode(value)
+ results.append(d)
return results
+
+
+def list_submissions(model):
+ """Return generator of submissions in this model.
+ """
+ query_body = """
+ PREFIX subns: <http://jumpgate.caltech.edu/wiki/UcscSubmissionOntology#>
+
+ select distinct ?submission
+ where { ?submission subns:has_submission ?library_dir }
+ """
+ query = RDF.SPARQLQuery(query_body)
+ rdfstream = query.execute(model)
+ for row in rdfstream:
+ s = stripNamespace(submissionLog, row['submission'])
+ if s[-1] in ['#', '/', '?']:
+ s = s[:-1]
+ yield s