Generate manifest files for ENCODE3
[htsworkflow.git] / htsworkflow / submission / submission.py
index 18fa3b2bc9f487b63915ca02a3c060adbfeedf3b..3320f1caf3174e6d5636c4dd20841188771050d9 100644 (file)
@@ -19,21 +19,22 @@ from htsworkflow.util.rdfhelp import \
      toTypedNode, \
      fromTypedNode
 from htsworkflow.util.hashfile import make_md5sum
-
+from htsworkflow.submission.fastqname import FastqName
 from htsworkflow.submission.daf import \
      MetadataLookupException, \
+     ModelException, \
      get_submission_uri
 
 LOGGER = logging.getLogger(__name__)
 
 class Submission(object):
-    def __init__(self, name, model):
+    def __init__(self, name, model, host):
         self.name = name
         self.model = model
 
         self.submissionSet = get_submission_uri(self.name)
         self.submissionSetNS = RDF.NS(str(self.submissionSet) + '#')
-        self.libraryNS = RDF.NS('http://jumpgate.caltech.edu/library/')
+        self.libraryNS = RDF.NS('{0}/library/'.format(host))
 
         self.__view_map = None
 
@@ -57,7 +58,8 @@ class Submission(object):
 
         submission_files = os.listdir(analysis_dir)
         for filename in submission_files:
-            self.construct_file_attributes(analysis_dir, libNode, filename)
+            pathname = os.path.abspath(os.path.join(analysis_dir, filename))
+            self.construct_file_attributes(analysis_dir, libNode, pathname)
 
     def construct_file_attributes(self, analysis_dir, libNode, pathname):
         """Looking for the best extension
@@ -113,20 +115,27 @@ class Submission(object):
                           an_analysis))
 
         # add file specific information
-        fileNode = self.link_file_to_classes(filename,
-                                             an_analysis,
-                                             an_analysis_uri,
-                                             analysis_dir)
+        fileNode = self.make_file_node(pathname, an_analysis)
         self.add_md5s(filename, fileNode, analysis_dir)
+        self.add_fastq_metadata(filename, fileNode)
         self.model.add_statement(
             RDF.Statement(fileNode,
                           rdfNS['type'],
                           file_type))
+        self.model.add_statement(
+            RDF.Statement(fileNode,
+                          libraryOntology['library'],
+                          libNode))
+                          
         LOGGER.debug("Done.")
 
-    def link_file_to_classes(self, filename, submissionNode, submission_uri, analysis_dir):
+    def make_file_node(self, pathname, submissionNode):
+        """Create file node and attach it to its submission.
+        """
         # add file specific information
-        fileNode = RDF.Node(RDF.Uri(submission_uri + '/' + filename))
+        path, filename = os.path.split(pathname)
+        pathname = os.path.abspath(pathname)
+        fileNode = RDF.Node(RDF.Uri('file://'+ pathname))
         self.model.add_statement(
             RDF.Statement(submissionNode,
                           dafTermOntology['has_file'],
@@ -135,6 +144,10 @@ class Submission(object):
             RDF.Statement(fileNode,
                           dafTermOntology['filename'],
                           filename))
+        self.model.add_statement(
+            RDF.Statement(fileNode,
+                          dafTermOntology['relative_path'],
+                          os.path.relpath(pathname)))
         return fileNode
 
     def add_md5s(self, filename, fileNode, analysis_dir):
@@ -148,13 +161,37 @@ class Submission(object):
             self.model.add_statement(
                 RDF.Statement(fileNode, dafTermOntology['md5sum'], md5))
 
+    def add_fastq_metadata(self, filename, fileNode):
+        # How should I detect if this is actually a fastq file?
+        try:
+            fqname = FastqName(filename=filename)
+        except ValueError:
+            # currently its just ignore it if the fastq name parser fails
+            return
+        
+        terms = [('flowcell', libraryOntology['flowcell_id']),
+                 ('lib_id', libraryOntology['library_id']),
+                 ('lane', libraryOntology['lane_number']),
+                 ('read', libraryOntology['read']),
+                 ('cycle', libraryOntology['read_length'])]
+        for file_term, model_term in terms:
+            value = fqname.get(file_term)
+            if value is not None:
+                s = RDF.Statement(fileNode, model_term, toTypedNode(value))
+                self.model.append(s)
+
     def _add_library_details_to_model(self, libNode):
         # attributes that can have multiple values
         set_attributes = set((libraryOntology['has_lane'],
                               libraryOntology['has_mappings'],
                               dafTermOntology['has_file']))
         parser = RDF.Parser(name='rdfa')
-        new_statements = parser.parse_as_stream(libNode.uri)
+        try:
+            new_statements = parser.parse_as_stream(libNode.uri)
+        except RDF.RedlandError as e:
+            LOGGER.error(e)
+            return
+        LOGGER.debug("Scanning %s", str(libNode.uri))
         toadd = []
         for s in new_statements:
             # always add "collections"
@@ -288,7 +325,8 @@ class Submission(object):
         paired = ['Barcoded Illumina',
                   'Multiplexing',
                   'Nextera',
-                  'Paired End (non-multiplexed)',]
+                  'Paired End (non-multiplexed)',
+                  'Dual Index Illumina',]
         if library_type in single:
             return False
         elif library_type in paired:
@@ -306,6 +344,9 @@ class Submission(object):
         query = RDF.SPARQLQuery(str(formatted_query))
         rdfstream = query.execute(self.model)
         results = []
-        for r in rdfstream:
-            results.append(r)
+        for record in rdfstream:
+            d = {}
+            for key, value in record.items():
+                d[key] = fromTypedNode(value)
+            results.append(d)
         return results