Merge branch 'add-condition'
authorDiane Trout <diane@caltech.edu>
Thu, 26 May 2011 17:47:24 +0000 (10:47 -0700)
committerDiane Trout <diane@caltech.edu>
Thu, 26 May 2011 17:47:24 +0000 (10:47 -0700)
Conflicts:
htsworkflow/frontend/templates/samples/library_detail.html

Resolve differences between the headers of library_detail for
adding RDFa and reordering the sample details.

51 files changed:
.gitignore
extra/fix_ob3.py
extra/ucsc_encode_submission/dt-overrides.turtle [new file with mode: 0644]
extra/ucsc_encode_submission/encode_find.py
extra/ucsc_encode_submission/failed-submissions.sparql [new file with mode: 0644]
extra/ucsc_encode_submission/find-lib-by-cell.sparql [new file with mode: 0644]
extra/ucsc_encode_submission/scan_extension.py [new file with mode: 0644]
extra/ucsc_encode_submission/ucsc_gather.py
htsworkflow/automation/copier.py
htsworkflow/automation/solexa.py [new file with mode: 0644]
htsworkflow/automation/spoolwatcher.py
htsworkflow/automation/test/test_runner.py
htsworkflow/automation/test/test_solexa_utils.py [new file with mode: 0644]
htsworkflow/frontend/analysis/main.py
htsworkflow/frontend/analysis/models.py
htsworkflow/frontend/auth.py
htsworkflow/frontend/bcmagic/fixtures/initial_data.json
htsworkflow/frontend/bcmagic/utils.py
htsworkflow/frontend/eland_config/views.py
htsworkflow/frontend/experiments/experiments.py
htsworkflow/frontend/experiments/models.py
htsworkflow/frontend/experiments/tests.py
htsworkflow/frontend/experiments/views.py
htsworkflow/frontend/inventory/fixtures/initial_data.json
htsworkflow/frontend/inventory/views.py
htsworkflow/frontend/manage.py [deleted file]
htsworkflow/frontend/reports/libinfopar.py
htsworkflow/frontend/samples/models.py
htsworkflow/frontend/samples/results.py
htsworkflow/frontend/samples/tests.py
htsworkflow/frontend/samples/urls.py
htsworkflow/frontend/samples/views.py
htsworkflow/frontend/settings.py [deleted file]
htsworkflow/frontend/static/css/app.css
htsworkflow/frontend/templates/admin/experiments/flowcell/change_form.html
htsworkflow/frontend/templates/base.html
htsworkflow/frontend/templates/experiments/flowcell_detail.html [new file with mode: 0644]
htsworkflow/frontend/templates/experiments/flowcell_lane_detail.html [new file with mode: 0644]
htsworkflow/frontend/templates/samples/library_detail.html
htsworkflow/frontend/templates/samples/library_index.html
htsworkflow/frontend/templates/samples/species_detail.html [new file with mode: 0644]
htsworkflow/frontend/urls.py
htsworkflow/pipelines/sequences.py
htsworkflow/settings.py [new file with mode: 0644]
htsworkflow/util/api.py
htsworkflow/util/test/test_validate.py
htsworkflow/util/validate.py
manage.py [new file with mode: 0644]
scripts/htsw-record-runfolder
settings.py [new file with mode: 0644]
test/test_copier.py

index 921a57b3b720e8cb8b78e3edfca3fb695fcf073b..b8c701b93b866250b64bd4489ddb3bbe7051a1c8 100644 (file)
@@ -1,4 +1,5 @@
-*.py[co~]
+*~
+*.py[co]
 .coverage
 *.egg-info
 dist
index 12789019097440580d5ae1fb0c08132572ebb73a..9955f69af624dcf8bfe360b345b2dd94ff490728 100644 (file)
@@ -7,7 +7,7 @@ import re
 import sys
 
 from django.core.management import setup_environ
-from htsworkflow.frontend import settings
+from django.conf import settings
 setup_environ(settings)
 
 import htsworkflow.frontend.samples.models as samples
diff --git a/extra/ucsc_encode_submission/dt-overrides.turtle b/extra/ucsc_encode_submission/dt-overrides.turtle
new file mode 100644 (file)
index 0000000..fde9add
--- /dev/null
@@ -0,0 +1,158 @@
+##
+## Override submission ID to library URN names for our libraries
+## whose names either lack, or have the wrong library ID string 
+## embedded in them.
+##
+
+@base <file:///home/diane/proj/solexa/htsworkflow/extra/ucsc_encode_submission/no-lib.sparql> .
+@prefix encodeSubmit:<http://jumpgate.caltech.edu/wiki/UCSCSubmissionOntology#> .
+
+# woldlab-hepg2-rnaseq-2009dec
+<http://encodesubmit.ucsc.edu/pipeline/show/805>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/10879> .
+
+# woldlab-hepg2-rnaseq-2009dec-part2
+<http://encodesubmit.ucsc.edu/pipeline/show/810>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/10879> .
+
+# woldlab-hepg2-rnaseq-2009dec-part3\t 
+<http://encodesubmit.ucsc.edu/pipeline/show/869>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/10879> .
+
+# woldlab-rnaseq-GM12878-rep1-stranded-2010Jan15
+<http://encodesubmit.ucsc.edu/pipeline/show/870>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11011> .
+
+# woldlab-hepg2-rnaseq-2010Jan-part4
+<http://encodesubmit.ucsc.edu/pipeline/show/897>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/10879> .
+
+# woldlab-gm12878-directional-rep2-rnaseq-2010Jan06
+<http://encodesubmit.ucsc.edu/pipeline/show/898>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11010> .
+
+# woldlab-K562-directional-rnaseq-rep1-2010Jan6
+<http://encodesubmit.ucsc.edu/pipeline/show/903>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11010> .
+
+# woldlab-K562-directional-rnaseq-rep2-2010jan9
+<http://encodesubmit.ucsc.edu/pipeline/show/904>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11007> .
+
+# woldlab hESC 10886 rep1 2009Jan13
+<http://encodesubmit.ucsc.edu/pipeline/show/1026>
+  encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11286> .
+
+# woldlab 2010Jun15 1x75-Directional-NHEK-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/1483>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11204> .
+
+# woldlab Jun18 1x75-Directional-H1-hESC-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/1626>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11009> .
+
+# woldlab jun 18 1x75-Directional-GM12878-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/1631>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11011> .
+
+# woldlab jun 18  1x75-Directional-GM12878-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/1632>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11010> .
+
+# woldlab jun 18 1x75-Directional-H1-hESC-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/1633>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/10947> .
+
+# woldlab jun 18 1x75-Directional-HeLa-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/1634>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11208> .
+
+# woldlab jun 18 1x75-Directional-HeLa-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/1635>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11207> .
+
+# woldlab jun 18 1x75-Directional-HepG2-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/1636>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11210> .
+
+# woldlab jun 18 1x75-Directional-K562-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/1637>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11008> .
+
+# woldlab jun 18 1x75-Directional-HepG2-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/1638>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11209> .
+
+# woldlab jun 18 1x75-Directional-HUVEC-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/1639>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11206> .
+
+# woldlab jun 18 1x75-Directional-HUVEC-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/1645>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11205> .
+
+# woldlab jun 18 1x75-Directional-K562-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/1646>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11007> .
+
+# woldlab June  2x75-GM12878-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/1856>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/10515> .
+
+#2010 jul 9corrected fastqs 
+<http://encodesubmit.ucsc.edu/pipeline/show/1874>
+     encodeSubmit:ignore "1" .
+#    encodeSubmit:library_urn "
+
+# 2010-11-05 Correction 1x75-Directional-GM12878-Rep1.tgz
+<http://encodesubmit.ucsc.edu/pipeline/show/2926>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11010> .
+
+# 1x75-Directional-GM12878-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/2930>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11010> .
+
+# 1x75-Directional-H1-hESC-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/2931>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/10947> .
+
+# 1x75-Directional-H1-hESC-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/2932>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11205> .
+
+# 1x75-Directional-HUVEC-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/2933>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11206> .
+
+# 1x75-Directional-HUVEC-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/2934>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11205> .
+
+# 1x75-Directional-HeLa-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/2935>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11208> .
+
+# 1x75-Directional-HeLa-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/2936>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11207> .
+
+# 1x75-Directional-HepG2-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/2937>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11210> .
+
+# 1x75-Directional-HepG2-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/2938>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11209> .
+
+# 1x75-Directional-K562-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/2939>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11008> .
+
+# 1x75-Directional-K562-Rep2
+<http://encodesubmit.ucsc.edu/pipeline/show/2940>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11007> .
+
+# 1x75-Directional-NHEK-Rep1
+<http://encodesubmit.ucsc.edu/pipeline/show/2941>
+    encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/11204> .
+
index 7c614dc2cd7622e39006d7fb64613df786d057dd..3ac4f0420d435b66439b9d2b8488ae5999e522e2 100644 (file)
@@ -4,7 +4,7 @@ from BeautifulSoup import BeautifulSoup
 from datetime import datetime
 import httplib2
 from operator import attrgetter
-from optparse import OptionParser
+from optparse import OptionParser, OptionGroup
 # python keyring
 import keyring
 import logging
@@ -15,75 +15,323 @@ import RDF
 import sys
 import urllib
 
+from htsworkflow.util import api
+
+DBDIR = os.path.expanduser("~diane/proj/submission")
+
+logger = logging.getLogger("encode_find")
+
 libraryNS = RDF.NS("http://jumpgate.caltech.edu/library/")
 submissionNS = RDF.NS("http://encodesubmit.ucsc.edu/pipeline/show/")
-submitNS = RDF.NS("http://jumpgate.caltech.edu/wiki/EncodeSubmit#")
+submitOntologyNS = RDF.NS("http://jumpgate.caltech.edu/wiki/UCSCSubmissionOntology#")
+ddfNS = RDF.NS("http://encodesubmit.ucsc.edu/pipeline/download_ddf#")
+libOntNS = RDF.NS("http://jumpgate.caltech.edu/wiki/LibraryOntology#")
+
 dublinCoreNS = RDF.NS("http://purl.org/dc/elements/1.1/")
 rdfNS = RDF.NS("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
 rdfsNS= RDF.NS("http://www.w3.org/2000/01/rdf-schema#")
+xsdNS = RDF.NS("http://www.w3.org/2001/XMLSchema#")
 
 LOGIN_URL = 'http://encodesubmit.ucsc.edu/account/login'
 USER_URL = 'http://encodesubmit.ucsc.edu/pipeline/show_user'
-DETAIL_URL = 'http://encodesubmit.ucsc.edu/pipeline/show/{0}'
-LIBRARY_URL = 'http://jumpgate.caltech.edu/library/{0}'
+
 USERNAME = 'detrout'
+CHARSET = 'utf-8'
 
 def main(cmdline=None):
     parser = make_parser()
     opts, args = parser.parse_args(cmdline)
 
-    cookie = login()
-    if cookie is None:
-        print "Failed to login"
+    if opts.verbose:
+        logging.basicConfig(level=logging.INFO)
+
+    htsw_authdata = api.make_auth_from_opts(opts, parser)
+    htswapi = api.HtswApi(opts.host, htsw_authdata)
+    
+    cookie = None
+    model = get_model(opts.load_model)
+    
+    if opts.load_rdf is not None:
+        load_into_model(model, opts.rdf_parser_name, opts.load_rdf)
+        
+    if opts.update:
+        cookie = login(cookie=cookie)
+        load_my_submissions(model, cookie=cookie)
+        load_encode_libraries(model, htswapi)
+
+    if opts.sparql is not None:
+        sparql_query(model, opts.sparql)
+
+    if opts.find_submission_with_no_library:
+        missing = find_submissions_with_no_library(model)
+                
+    if opts.print_rdf:
+        serializer = RDF.Serializer(name=opts.rdf_parser_name)
+        print serializer.serialize_model_to_string(model)
+
 
-    submissions = my_submissions(cookie)
-    for s in submissions:
-        for t in s.triples():
-            print t
-            
 def make_parser():
     parser = OptionParser()
-    return parser
-
+    commands = OptionGroup(parser, "Commands")
+    commands.add_option('--load-model', default=None,
+      help="Load model database")
+    commands.add_option('--load-rdf', default=None,
+      help="load rdf statements into model")
+    commands.add_option('--print-rdf', action="store_true", default=False,
+      help="print ending model state")
+    commands.add_option('--update', action="store_true", default=False,
+      help="Query remote data sources and update our database")
+    #commands.add_option('--update-ucsc-status', default=None,
+    #  help="download status from ucsc, requires filename for extra rules")
+    #commands.add_option('--update-ddfs', action="store_true", default=False,
+    #  help="download ddf information for known submission")
+    #commands.add_option('--update-library', default=None,
+    #  help="download library info from htsw, requires filename for extra rules")
+    parser.add_option_group(commands)
+                      
+    queries = OptionGroup(parser, "Queries")
+    queries.add_option('--sparql', default=None,
+      help="execute arbitrary sparql query")
+    queries.add_option('--find-submission-with-no-library', default=False,
+      action="store_true",
+      help="find submissions with no library ID")    
+    parser.add_option_group(queries)
 
-def login():
-    keys = keyring.get_keyring()
-    password = keys.get_password(LOGIN_URL, USERNAME)
-    credentials = {'login': USERNAME,
-                   'password': password}
-    headers = {'Content-type': 'application/x-www-form-urlencoded'}
-    http = httplib2.Http()
-    response, content = http.request(LOGIN_URL,
-                                     'POST',
-                                     headers=headers,
-                                     body=urllib.urlencode(credentials))
-    logging.debug("Login to {0}, status {1}".format(LOGIN_URL,
-                                                    response['status']))
+    options = OptionGroup(parser, "Options")
+    options.add_option("--rdf-parser-name", default="turtle",
+      help="set rdf file parser type")
+    options.add_option("-v", "--verbose", action="store_true", default=False)
+    parser.add_option_group(options)
     
-    cookie = response.get('set-cookie', None)
-    return cookie
+    api.add_auth_options(parser)
+
+    return parser
 
-def my_submissions(cookie):
+def get_model(model_name=None):
+    if model_name is None:
+        storage = RDF.MemoryStorage()
+    else:
+        storage = RDF.HashStorage(model_name,
+                      options="hash-type='bdb',dir='{0}'".format(DBDIR))
+    model = RDF.Model(storage)
+    return model
+        
+def load_my_submissions(model, cookie=None):
+    if cookie is None:
+        cookie = login()
+        
     soup = get_url_as_soup(USER_URL, 'GET', cookie)
     p = soup.find('table', attrs={'id':'projects'})
     tr = p.findNext('tr')
     # first record is header
     tr = tr.findNext()
-    submissions = []
+    TypeN = rdfsNS['type']
+    NameN = submitOntologyNS['name']
+    SpeciesN = submitOntologyNS['species']
+    LibraryURN = submitOntologyNS['library_urn']
+
     while tr is not None:
         td = tr.findAll('td')
         if td is not None and len(td) > 1:
-            subid = td[0].contents[0].contents[0]
-            species = get_contents(td[2])
+            subUrnText = td[0].contents[0].contents[0].encode(CHARSET)
+            subUrn = submissionNS[subUrnText]
+
+            add_stmt(model, subUrn, TypeN, submitOntologyNS['Submission'])
+                
             name = get_contents(td[4])
+            add_stmt(model, subUrn, NameN, name)
+                
+            species = get_contents(td[2])
+            if species is not None:
+                add_stmt(model, subUrn, SpeciesN, species)
+
+            library_id = get_library_id(name)
+            if library_id is not None:
+                add_submission_to_library_urn(model,
+                                              subUrn,
+                                              LibraryURN,
+                                              library_id)
+
+            add_submission_creation_date(model, subUrn, cookie)
+
+            # grab changing atttributes
             status = get_contents(td[6]).strip()
-            date = get_date_contents(td[8])
-            age = get_contents(td[10])
-            submissions.append(
-                Submission(subid, species, name, status, date, age, cookie)
-            )
+            last_mod_datetime = get_date_contents(td[8])
+            last_mod = last_mod_datetime.isoformat()
+
+            update_submission_detail(model, subUrn, status, last_mod, cookie=cookie)
+
+            logging.info("Processed {0}".format( subUrn))
+            
         tr = tr.findNext('tr')
-    return submissions
+
+
+def add_submission_to_library_urn(model, submissionUrn, predicate, library_id):
+    """Add a link from a UCSC submission to woldlab library if needed
+    """
+    libraryUrn = libraryNS[library_id]
+    query = RDF.Statement(submissionUrn, predicate, libraryUrn)
+    if not model.contains_statement(query):
+        link = RDF.Statement(submissionUrn, predicate, libraryNS[library_id])
+        logger.info("Adding Sub -> Lib link: {0}".format(link))
+        model.add_statement(link)
+    else:
+        logger.debug("Found: {0}".format(str(query)))
+
+    
+def find_submissions_with_no_library(model):
+    missing_lib_query = RDF.SPARQLQuery("""
+PREFIX submissionOntology:<{submissionOntology}>
+
+SELECT 
+ ?subid ?name
+WHERE {{
+  ?subid submissionOntology:name ?name
+  OPTIONAL {{ ?subid submissionOntology:library_urn ?libid }}
+  FILTER  (!bound(?libid))
+}}""".format(submissionOntology=submitOntologyNS[''].uri)
+)    
+
+    results = missing_lib_query.execute(model)
+    for row in results:
+        subid = row['subid']
+        name = row['name']
+        print "# {0}".format(name)
+        print "<{0}>".format(subid.uri)
+        print "  encodeSubmit:library_urn <http://jumpgate.caltech.edu/library/> ."
+        print ""
+    
+
+def add_submission_creation_date(model, subUrn, cookie):
+    # in theory the submission page might have more information on it.
+    creationDateN = libOntNS['date']
+    dateTimeType = xsdNS['dateTime']
+    query = RDF.Statement(subUrn, creationDateN, None)
+    creation_dates = list(model.find_statements(query))
+    if len(creation_dates) == 0:
+        logger.info("Getting creation date for: {0}".format(str(subUrn)))
+        soup = get_url_as_soup(str(subUrn.uri), 'GET', cookie)
+        created_label = soup.find(text="Created: ")
+        if created_label:
+            created_date = get_date_contents(created_label.next)
+            created_date_node = RDF.Node(literal=created_date.isoformat(),
+                                         datatype=dateTimeType.uri)
+            add_stmt(model, subUrn, creationDateN, created_date_node)
+    else:
+        logger.debug("Found creation date for: {0}".format(str(subUrn)))
+
+def update_submission_detail(model, subUrn, status, recent_update, cookie):
+    HasStatusN = submitOntologyNS['has_status']
+    StatusN = submitOntologyNS['status']
+    LastModifyN = submitOntologyNS['last_modify_date']
+
+    status_nodes_query = RDF.Statement(subUrn, HasStatusN, None)
+    status_nodes = list(model.find_statements(status_nodes_query))
+
+    if len(status_nodes) == 0:
+        # has no status node, add one
+        logging.info("Adding status node to {0}".format(subUrn))
+        status_blank = RDF.Node()
+        add_stmt(model, subUrn, HasStatusN, status_blank)
+        add_stmt(model, status_blank, rdfsNS['type'], StatusN)
+        add_stmt(model, status_blank, StatusN, status)
+        add_stmt(model, status_blank, LastModifyN, recent_update)
+        update_ddf(model, subUrn, status_blank, cookie=cookie)
+    else:
+        logging.info("Found {0} status blanks".format(len(status_nodes)))
+        for status_statement in status_nodes:
+            status_blank = status_statement.object
+            last_modified_query = RDF.Statement(status_blank, LastModifyN, None)
+            last_mod_nodes = model.find_statements(last_modified_query)
+            for last_mod_statement in last_mod_nodes:
+                last_mod_date = str(last_mod_statement.object)
+                if recent_update == str(last_mod_date):
+                    update_ddf(model, subUrn, status_blank, cookie=cookie)
+                    break
+
+
+    
+def update_ddf(model, subUrn, statusNode, cookie):
+    TypeN = rdfsNS['type']
+    
+    download_ddf_url = str(subUrn).replace('show', 'download_ddf')
+    ddfUrn = RDF.Uri(download_ddf_url)
+    
+    status_is_ddf = RDF.Statement(statusNode, TypeN, ddfNS['ddf'])
+    if not model.contains_statement(status_is_ddf):
+        logging.info('Adding ddf to {0}, {1}'.format(subUrn, statusNode))
+        ddf_text = get_url_as_text(download_ddf_url, 'GET', cookie)
+        add_ddf_statements(model, statusNode, ddf_text)
+        model.add_statement(status_is_ddf)
+
+
+def add_ddf_statements(model, statusNode, ddf_string):
+    """Convert a ddf text file into RDF Statements
+    """
+    ddf_lines = ddf_string.split('\n')
+    # first line is header
+    header = ddf_lines[0].split()
+    attributes = [ ddfNS[x] for x in header ]
+    statements = []
+
+    for ddf_line in ddf_lines[1:]:
+        ddf_line = ddf_line.strip()
+        if len(ddf_line) == 0:
+            continue
+        if ddf_line.startswith("#"):
+            continue
+        
+        ddf_record = ddf_line.split('\t')
+        files = ddf_record[0].split(',')
+        file_attributes = ddf_record[1:]
+
+        for f in files:
+            fileNode = RDF.Node()
+            add_stmt(model, statusNode, submitOntologyNS['has_file'], fileNode)
+            add_stmt(model, fileNode, rdfsNS['type'], ddfNS['file'])
+            add_stmt(model, fileNode, ddfNS['filename'], f)
+
+            for predicate, object in zip( attributes[1:], file_attributes):
+                add_stmt(model, fileNode, predicate, object)
+
+
+def load_encode_libraries(model, htswapi):
+    """Get libraries associated with encode.
+    """
+    encodeUrl = os.path.join(htswapi.root_url + "/library/?affiliations__id__exact=44")
+    rdfaParser = RDF.Parser(name='rdfa')
+    print encodeUrl
+    rdfaParser.parse_into_model(model, encodeUrl)
+    query = RDF.Statement(None, libOntNS['library_id'], None)
+    libraries = model.find_statements(query)
+    for statement in libraries:
+        libraryUrn = statement.subject
+        load_library_detail(model, libraryUrn)
+
+
+def load_library_detail(model, libraryUrn):
+    """Grab detail information from library page
+    """
+    rdfaParser = RDF.Parser(name='rdfa')
+    query = RDF.Statement(libraryUrn, libOntNS['date'], None)
+    results = list(model.find_statements(query))
+    if len(results) == 0:
+        logger.info("Loading {0}".format(str(libraryUrn)))
+        rdfaParser.parse_into_model(model, libraryUrn.uri)
+    elif len(results) == 1:
+        pass # Assuming that a loaded dataset has one record
+    else:
+        logging.warning("Many dates for {0}".format(libraryUrn))
+                        
+def get_library_id(name):
+    """Guess library ID from library name
+    """
+    match = re.search(r"[ -](?P<id>([\d]{5})|(SL[\d]{4}))", name)
+    library_id = None
+    if match is not None:
+        library_id = match.group('id')
+    return library_id
+
 
 def get_contents(element):
     """Return contents or none.
@@ -93,10 +341,11 @@ def get_contents(element):
 
     a = element.find('a')
     if a is not None:
-        return a.contents[0]
-
-    return element.contents[0]
+        return a.contents[0].encode(CHARSET)
 
+    return element.contents[0].encode(CHARSET)
+    
+    
 def get_date_contents(element):
     data = get_contents(element)
     if data:
@@ -104,6 +353,92 @@ def get_date_contents(element):
     else:
         return None
 
+def sparql_query(model, query_filename):
+    """Execute sparql query from file
+    """
+    query_body = open(query_filename,'r').read()
+    query = RDF.SPARQLQuery(query_body)
+    results = query.execute(model)
+    for row in results:
+        output = []
+        for k,v in row.items()[::-1]:
+            print "{0}: {1}".format(k,v)
+        print 
+
+        
+def load_into_model(model, parser_name, filename):
+    if not os.path.exists(filename):
+        raise IOError("Can't find {0}".format(filename))
+    
+    data = open(filename, 'r').read()
+    rdf_parser = RDF.Parser(name=parser_name)
+    ns_uri = submitOntologyNS[''].uri
+    rdf_parser.parse_string_into_model(model, data, ns_uri)
+
+def add_stmt(model, subject, predicate, object):
+    """Convienence create RDF Statement and add to a model
+    """
+    return model.add_statement(
+        RDF.Statement(subject, predicate, object)
+    )
+
+def login(cookie=None):
+    """Login if we don't have a cookie
+    """
+    if cookie is not None:
+        return cookie
+    
+    keys = keyring.get_keyring()
+    password = keys.get_password(LOGIN_URL, USERNAME)
+    credentials = {'login': USERNAME,
+                   'password': password}
+    headers = {'Content-type': 'application/x-www-form-urlencoded'}
+    http = httplib2.Http()
+    response, content = http.request(LOGIN_URL,
+                                     'POST',
+                                     headers=headers,
+                                     body=urllib.urlencode(credentials))
+    logging.debug("Login to {0}, status {1}".format(LOGIN_URL,
+                                                    response['status']))
+    
+    cookie = response.get('set-cookie', None)
+    if cookie is None:
+        raise RuntimeError("Wasn't able to log into: {0}".format(LOGIN_URL))
+    return cookie
+
+                
+def get_url_as_soup(url, method, cookie=None):
+    http = httplib2.Http()
+    headers = {}
+    if cookie is not None:
+        headers['Cookie'] = cookie
+    response, content = http.request(url, method, headers=headers)
+    if response['status'] == '200':
+        soup = BeautifulSoup(content,
+                             fromEncoding="utf-8", # should read from header
+                             convertEntities=BeautifulSoup.HTML_ENTITIES
+                             )
+        return soup
+    else:
+        msg = "error accessing {0}, status {1}"
+        msg = msg.format(url, response['status'])
+        e = httplib2.HttpLib2ErrorWithResponse(msg, response, content)
+
+def get_url_as_text(url, method, cookie=None):
+    http = httplib2.Http()
+    headers = {}
+    if cookie is not None:
+        headers['Cookie'] = cookie
+    response, content = http.request(url, method, headers=headers)
+    if response['status'] == '200':
+        return content
+    else:
+        msg = "error accessing {0}, status {1}"
+        msg = msg.format(url, response['status'])
+        e = httplib2.HttpLib2ErrorWithResponse(msg, response, content)
+    
+################
+#  old stuff
 SUBMISSIONS_LACKING_LIBID = [
     ('1x75-Directional-HeLa-Rep1',    '11208'),
     ('1x75-Directional-HeLa-Rep2',    '11207'),
@@ -116,73 +451,10 @@ SUBMISSIONS_LACKING_LIBID = [
     ('1x75-Directional-K562-Rep1',    '11008'),
     ('1x75-Directional-K562-Rep2',    '11007'),
     ('1x75-Directional-NHEK-Rep1',    '11204'),
+    ('1x75-Directional-GM12878-Rep1', '11011'),
+    ('1x75-Directional-GM12878-Rep2', '11010'),
     ]
 
-class Submission(object):
-    def __init__(self, subid, species, name, status, date, age, cookie=None):
-        self.cookie = cookie
-        self.subid = subid
-        self.species = species
-        self.name = name
-        self.status = status
-        self.date = date
-        self.age = age
-        self._library_id = None
-        self._created_date = None
-
-    def triples(self):
-        subNode = submissionNS[self.subid.encode('utf-8')]
-        dateNode = self.date.strftime("%Y-%m-%d")
-        s = [RDF.Statement(subNode, submitNS['name'],
-                           self.name.encode('utf-8')),
-             RDF.Statement(subNode, submitNS['status'],
-                           self.status.encode('utf-8')),
-             RDF.Statement(subNode, submitNS['last_modify_date'], dateNode),
-             ]
-        if self.species is not None:
-            s.append(RDF.Statement(subNode, submitNS['species'],
-                                   self.species.encode('utf-8')))
-        if self.library_id is not None:
-             libId = libraryNS[self.library_id.encode('utf-8')]
-             s.append(RDF.Statement(subNode, rdfsNS['seeAlso'], libId))
-        
-        return s
-        
-
-    def _get_library_id(self):
-        if self._library_id is None:
-            match = re.search(r"[ -](?P<id>([\d]{5})|(SL[\d]{4}))", self.name)
-            if match is not None:
-                self._library_id = match.group('id')
-            else:
-                for dir_lib_name, lib_id in SUBMISSIONS_LACKING_LIBID:
-                    if dir_lib_name in self.name:
-                        self._library_id = lib_id
-                        break
-            
-        return self._library_id
-    
-    library_id = property(_get_library_id)
-
-    def _get_detail(self):
-        detail = DETAIL_URL.format(self.subid)
-        soup = get_url_as_soup(detail, 'GET', self.cookie)
-
-        created_label = soup.find(text="Created: ")
-        if created_label:
-            self._created_date = get_date_contents(created_label.next)
-            
-    def _get_created_date(self):
-        if self._created_date is None:
-            self._get_detail()
-        return self._created_date
-    created_date = property(_get_created_date)
-    
-    def __unicode__(self):
-        return u"{0}\t{1}\t{2}".format(self.subid, self.library_id, self.name)
-
-    def __repr__(self):
-        return u"<Submission ({0}) '{1}'>".format(self.subid, self.name)
 
 
 def select_by_library_id(submission_list):
@@ -218,7 +490,7 @@ def library_to_freeze(selected_libraries):
     report.append('<tbody>')
     for lib_id in lib_ids:
         report.append('<tr>')
-        lib_url = LIBRARY_URL.format(lib_id)
+        lib_url = libraryNS[lib_id].uri
         report.append('<td><a href="{0}">{1}</a></td>'.format(lib_url, lib_id))
         submissions = selected_libraries[lib_id]
         report.append('<td>{0}</td>'.format(submissions[0].name))
@@ -230,7 +502,8 @@ def library_to_freeze(selected_libraries):
         for d in freezes:
             report.append('<td>')
             for s in batched.get(d, []):
-                subid = '<a href="http://encodesubmit.ucsc.edu/pipeline/show/{0}">{0}</a>'.format(s.subid)
+                show_url = submissionNS[s.subid].uri
+                subid = '<a href="{0}">{1}</a>'.format(show_url, s.subid)
                 report.append("{0}:{1}".format(subid, s.status))
             report.append('</td>')
         else:
@@ -251,24 +524,6 @@ def date_to_freeze(d):
             return name
     else:
         return None
-    
-                
-def get_url_as_soup(url, method, cookie=None):
-    http = httplib2.Http()
-    headers = {}
-    if cookie is not None:
-        headers['Cookie'] = cookie
-    response, content = http.request(url, method, headers=headers)
-    if response['status'] == '200':
-        soup = BeautifulSoup(content,
-                             fromEncoding="utf-8", # should read from header
-                             convertEntities=BeautifulSoup.HTML_ENTITIES
-                             )
-        return soup
-    else:
-        msg = "error accessing {0}, status {1}"
-        msg = msg.format(url, response['status'])
-        e = httplib2.HttpLib2ErrorWithResponse(msg, response, content)
 
 if __name__ == "__main__":
     main()
diff --git a/extra/ucsc_encode_submission/failed-submissions.sparql b/extra/ucsc_encode_submission/failed-submissions.sparql
new file mode 100644 (file)
index 0000000..af4af4e
--- /dev/null
@@ -0,0 +1,22 @@
+##
+## Find submissions that are currently "failed"
+##
+
+PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
+PREFIX submitOnt:<http://jumpgate.caltech.edu/wiki/UCSCSubmissionOntology#>
+PREFIX libOntNS:<http://jumpgate.caltech.edu/wiki/LibraryOntology#">
+
+#libraryNS = RDF.NS("http://jumpgate.caltech.edu/library/")
+#submissionNS = RDF.NS("http://encodesubmit.ucsc.edu/pipeline/show/")
+#ddfNS = RDF.NS("http://encodesubmit.ucsc.edu/pipeline/download_ddf#")
+
+SELECT 
+ ?subid ?subname ?liburn ?status
+WHERE {
+  ?subid submitOnt:name ?subname .
+  ?subid submitOnt:library_urn ?liburn .
+  ?subid submitOnt:has_status ?statusNode .
+  ?statusNode submitOnt:status ?status .
+  ?statusNode submitOnt:last_modify_date ?last_modify .
+  FILTER (regex(?status, "failed", "i"))
+} 
diff --git a/extra/ucsc_encode_submission/find-lib-by-cell.sparql b/extra/ucsc_encode_submission/find-lib-by-cell.sparql
new file mode 100644 (file)
index 0000000..1342dac
--- /dev/null
@@ -0,0 +1,14 @@
+# Produce list of submissions associated with a cell/replicate
+
+PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
+PREFIX encodeSubmit:<http://jumpgate.caltech.edu/wiki/UCSCSubmissionOntology#>
+PREFIX libraryOntology:<http://jumpgate.caltech.edu/wiki/LibraryOntology#>
+
+SELECT distinct ?liburn ?cell ?replicate ?subid
+WHERE {
+    ?subid encodeSubmit:library_urn ?liburn ;
+           encodeSubmit:name ?name .
+    ?liburn libraryOntology:cell_line ?cell ;
+            libraryOntology:replicate ?replicate
+}
+ORDER BY ?cell ?replicate ?liburn
diff --git a/extra/ucsc_encode_submission/scan_extension.py b/extra/ucsc_encode_submission/scan_extension.py
new file mode 100644 (file)
index 0000000..f3e45a6
--- /dev/null
@@ -0,0 +1,46 @@
+from optparse import OptionParser
+import os
+import sys
+from pprint import pprint
+
+def main(cmdline=None):
+    parser = make_parser()
+    opts, args = parser.parse_args(cmdline)
+
+    extensions = scan(args)
+    #pprint(extensions)
+    print find_common_suffix(extensions)
+
+def make_parser():
+    parser = OptionParser("%prog: directory [directory...]")
+    return parser
+
+def scan(toscan):
+    index = {}
+    for cur_scan_dir in toscan:
+        for path, dirnames, filenames in os.walk(cur_scan_dir):
+            for filename in filenames:
+                next_index = index
+                for c in filename[::-1]:
+                    next_index = next_index.setdefault(c, {})
+    return index
+
+def find_common_suffix(index, tail=[]):
+    if len(tail) > 0 and len(index) > 1:
+        return "".join(tail[::-1])
+
+    results = []
+    for key, choice in index.items():
+        r = find_common_suffix(choice, tail+[key])
+        if r is not None:
+            results.append (r)
+        
+    if len(results) == 0:
+        return None
+    elif len(results) == 1:
+        return results[0]
+    else:
+        return results
+
+if __name__ == "__main__":
+    main()
index 07bea2c495b9d2d02309ec270cc7a9f1b0f3c1f0..38d8deefc7a8a1b2dc07b3f445534d24fc4fef0d 100755 (executable)
@@ -37,10 +37,7 @@ def main(cmdline=None):
     else:
         logging.basicConfig(level = logging.WARNING )        
     
-    apidata = {'apiid': opts.apiid, 'apikey': opts.apikey }
-
-    if opts.host is None or opts.apiid is None or opts.apikey is None:
-        parser.error("Please specify host url, apiid, apikey")
+    apidata = api.make_auth_from_opts(opts, parser)
 
     if opts.makeddf and opts.daf is None:
         parser.error("Please specify your daf when making ddf files")
@@ -73,22 +70,6 @@ def main(cmdline=None):
 
 
 def make_parser():
-    # Load defaults from the config files
-    config = SafeConfigParser()
-    config.read([os.path.expanduser('~/.htsworkflow.ini'), '/etc/htsworkflow.ini'])
-    
-    sequence_archive = None
-    apiid = None
-    apikey = None
-    apihost = None
-    SECTION = 'sequence_archive'
-    if config.has_section(SECTION):
-        sequence_archive = config.get(SECTION, 'sequence_archive',sequence_archive)
-        sequence_archive = os.path.expanduser(sequence_archive)
-        apiid = config.get(SECTION, 'apiid', apiid)
-        apikey = config.get(SECTION, 'apikey', apikey)
-        apihost = config.get(SECTION, 'host', apihost)
-
     parser = OptionParser()
 
     # commands
@@ -108,20 +89,14 @@ def make_parser():
     parser.add_option('--force', default=False, action="store_true",
                       help="Force regenerating fastqs")
 
-    # configuration options
-    parser.add_option('--apiid', default=apiid, help="Specify API ID")
-    parser.add_option('--apikey', default=apikey, help="Specify API KEY")
-    parser.add_option('--host',  default=apihost,
-                      help="specify HTSWorkflow host",)
-    parser.add_option('--sequence', default=sequence_archive,
-                      help="sequence repository")
-
     # debugging
     parser.add_option('--verbose', default=False, action="store_true",
                       help='verbose logging')
     parser.add_option('--debug', default=False, action="store_true",
                       help='debug logging')
 
+    api.add_auth_options(parser)
+    
     return parser
 
 
@@ -650,6 +625,18 @@ class NameToViewMap(object):
         ma = 'TH1014'
 
         self.patterns = [
+            # for 2011 Feb 18 elements submission
+            ('final_Cufflinks_genes_*gtf',       'GeneDeNovo'),
+            ('final_Cufflinks_transcripts_*gtf', 'TranscriptDeNovo'),
+            ('final_exonFPKM-Cufflinks-0.9.3-GENCODE-v3c-*.gtf',       
+             'ExonsGencV3c'),
+            ('final_GENCODE-v3-Cufflinks-0.9.3.genes-*gtf',          
+             'GeneGencV3c'),
+            ('final_GENCODE-v3-Cufflinks-0.9.3.transcripts-*gtf',    
+             'TranscriptGencV3c'),
+            ('final_TSS-Cufflinks-0.9.3-GENCODE-v3c-*.gtf', 'TSS'),
+            ('final_junctions-*.bed6+3',                    'Junctions'),
+            
             ('*.bai',                   None),
             ('*.splices.bam',           'Splices'),
             ('*.bam',                   self._guess_bam_view),
@@ -776,8 +763,14 @@ class NameToViewMap(object):
 
     def _is_paired(self, lib_id, lib_info):
         """Determine if a library is paired end"""
+        # TODO: encode this information in the library type page.
+        single = (1,3,6)
         if len(lib_info["lane_set"]) == 0:
-            return False
+            # we haven't sequenced anything so guess based on library type
+            if lib_info['library_type_id'] in single:
+                return False
+            else:
+                return True
 
         if not self.lib_paired.has_key(lib_id):
             is_paired = 0
index 0f526948615996c7d80ac8c61834b5da81c798aa..0b1256e509e96d6c297c8eaca4c3ab31ed48dfe1 100644 (file)
@@ -13,14 +13,7 @@ import urlparse
 
 from benderjab import rpc
 
-def runfolder_validate(fname):
-    """
-    Return True if fname looks like a runfolder name
-    """
-    if re.match("^[0-9]{6}_[-A-Za-z0-9_]*$", fname):
-        return True
-    else:
-        return False
+from htsworkflow.automation.solexa import is_runfolder
     
 class rsync(object):
   def __init__(self, sources, dest, pwfile):
@@ -240,7 +233,7 @@ class CopierBot(rpc.XmlRpcBot):
         self.rsync.poll()
         
         # see if we're still copying
-        if runfolder_validate(runDir):
+        if is_runfolder(runDir):
             logging.info("recevied sequencing finshed for %s" % (runDir))
             self.pending.append(runDir)
             self.startCopy()
diff --git a/htsworkflow/automation/solexa.py b/htsworkflow/automation/solexa.py
new file mode 100644 (file)
index 0000000..e2b5f59
--- /dev/null
@@ -0,0 +1,41 @@
+"""Utilities to help process solexa/illumina runfolders
+"""
+import os
+import re
+
+def is_runfolder(name):
+    """
+    Is it a runfolder?
+
+    >>> print is_runfolder('090630_HWUSI-EAS999_0006_30LNFAAXX')
+    True
+    >>> print is_runfolder('hello')
+    False
+    """
+    if re.match("^[0-9]{6}_[-A-Za-z0-9_]*$", name):
+        return True
+    else:
+        return False
+
+def get_top_dir(root, path):
+    """
+    Return the directory in path that is a subdirectory of root.
+    e.g.
+
+    >>> print get_top_dir('/a/b/c', '/a/b/c/d/e/f')
+    d
+    >>> print get_top_dir('/a/b/c/', '/a/b/c/d/e/f')
+    d
+    >>> print get_top_dir('/a/b/c', '/g/e/f')
+    None
+    >>> print get_top_dir('/a/b/c', '/a/b/c')
+    <BLANKLINE>
+    """
+    if path.startswith(root):
+        subpath = path[len(root):]
+        if subpath.startswith('/'):
+            subpath = subpath[1:]
+        return subpath.split(os.path.sep)[0]
+    else:
+        return None
+
index 93efe27f96a94478f9262ca0f7d2fb6724911165..af5932e8711d81c51ba3d5eb444eae4884a4d0a6 100644 (file)
@@ -7,6 +7,7 @@ import sys
 import time
 
 from htsworkflow.util import mount
+from htsworkflow.automation.solexa import is_runfolder, get_top_dir
 
 # this uses pyinotify
 import pyinotify
@@ -16,42 +17,6 @@ IN_UNMOUNT = EventsCodes.ALL_FLAGS['IN_UNMOUNT']
 
 from benderjab import rpc
 
-def is_runfolder(name):
-    """
-    Is it a runfolder?
-
-    >>> print is_runfolder('090630_HWUSI-EAS999_0006_30LNFAAXX')
-    True
-    >>> print is_runfolder('hello')
-    False
-    """
-    if re.match("[0-9]{6}_.*", name):
-        return True
-    else:
-        return False
-
-def get_top_dir(root, path):
-    """
-    Return the directory in path that is a subdirectory of root.
-    e.g.
-
-    >>> print get_top_dir('/a/b/c', '/a/b/c/d/e/f')
-    d
-    >>> print get_top_dir('/a/b/c/', '/a/b/c/d/e/f')
-    d
-    >>> print get_top_dir('/a/b/c', '/g/e/f')
-    None
-    >>> print get_top_dir('/a/b/c', '/a/b/c')
-    <BLANKLINE>
-    """
-    if path.startswith(root):
-        subpath = path[len(root):]
-        if subpath.startswith('/'):
-            subpath = subpath[1:]
-        return subpath.split(os.path.sep)[0]
-    else:
-        return None
-
 class WatcherEvent(object):
     """
     Track information about a file event
@@ -78,6 +43,7 @@ class Handler(pyinotify.ProcessEvent):
         self.last_event = {}
         self.watchmanager = watchmanager
         self.bot = bot
+        self.log = bot.log
         if completion_files is not None:
             completion_files = [ x.lower() for x in completion_files ]
         self.completion_files = completion_files
@@ -96,7 +62,7 @@ class Handler(pyinotify.ProcessEvent):
                     runfolder = os.path.join(watch_path, target)
 
                     if not is_runfolder(target):
-                        logging.debug("Skipping %s, not a runfolder" % (target,))
+                        self.log.debug("Skipping %s, not a runfolder" % (target,))
                         continue
                     
                     # grab the previous events for this watch path
@@ -122,15 +88,15 @@ class Handler(pyinotify.ProcessEvent):
                         self.last_event[watch_path][target].complete = True
                         msg += "(completed)"
 
-                    logging.debug(msg)
+                    self.log.debug(msg)
 
     def process_IN_DELETE(self, event):
-        logging.debug("Remove: %s" %  os.path.join(event.path, event.name))
+        self.log.debug("Remove: %s" %  os.path.join(event.path, event.name))
         pass
 
     def process_IN_UNMOUNT(self, event):
         pathname = os.path.join(event.path, event.name)
-        logging.debug("IN_UNMOUNT: %s" % (pathname,))
+        self.log.debug("IN_UNMOUNT: %s" % (pathname,))
         self.bot.unmount_watch(event.path)
 
 class SpoolWatcher(rpc.XmlRpcBot):
@@ -237,7 +203,7 @@ class SpoolWatcher(rpc.XmlRpcBot):
                 mounts.append(w)
                 self.mounts_to_watches[mount_location] = mounts
 
-            logging.info(u"Watching:"+unicode(w))
+            self.log.info(u"Watching:"+unicode(w))
             self.wdds.append(self.wm.add_watch(w, mask, rec=True, auto_add=True))
 
     def unmount_watch(self, event_path):
@@ -245,7 +211,7 @@ class SpoolWatcher(rpc.XmlRpcBot):
         # the list getting shorter
         for i in range(len(self.wdds),0, -1):
             wdd = self.wdds[i]
-            logging.info(u'unmounting: '+unicode(wdd.items()))
+            self.log.info(u'unmounting: '+unicode(wdd.items()))
             self.wm.rm_watch(wdd.values())
             del self.wdds[i]
         self.mounted = False
@@ -255,7 +221,7 @@ class SpoolWatcher(rpc.XmlRpcBot):
         if root_copy_url[-1] != '/':
             root_copy_url += '/'
         copy_url = root_copy_url + list_event_dir
-        logging.debug('Copy url: %s' % (copy_url,))
+        self.log.debug('Copy url: %s' % (copy_url,))
         return copy_url
                   
     def process_notify(self, *args):
@@ -288,7 +254,7 @@ class SpoolWatcher(rpc.XmlRpcBot):
                 # restart the watch
                 for watch in self.mounts_to_watches[mount_point]:
                     self.add_watch(watch)
-                    logging.info(
+                    self.logg.info(
                         "%s was remounted, restarting watch" % \
                             (mount_point)
                     )
@@ -335,7 +301,7 @@ class SpoolWatcher(rpc.XmlRpcBot):
         super(SpoolWatcher, self).stop()
     
     def startCopy(self, copy_url=None):
-        logging.debug("writes seem to have stopped")
+        self.log.debug("writes seem to have stopped")
         if self.notify_runner is not None:
             for r in self.notify_runner:
                 self.rpc_send(r, tuple([copy_url]), 'startCopy')
@@ -345,7 +311,7 @@ class SpoolWatcher(rpc.XmlRpcBot):
         
     def sequencingFinished(self, run_dir):
         # need to strip off self.watchdirs from rundir I suspect.
-        logging.info("run.completed in " + str(run_dir))
+        self.log.info("run.completed in " + str(run_dir))
         for watch in self.watchdirs:
             if not run_dir.startswith(watch):
                 print "%s didn't start with %s" % (run_dir, watch)
@@ -356,7 +322,7 @@ class SpoolWatcher(rpc.XmlRpcBot):
         else:
             stripped_run_dir = run_dir
 
-        logging.debug("stripped to " + stripped_run_dir)
+        self.log.debug("stripped to " + stripped_run_dir)
         if self.notify_users is not None:
             for u in self.notify_users:
                 self.send(u, 'Sequencing run %s finished' % \
index 6c3b9df5491bd95abfe75d814f273951fb847ce7..1457f9effbb44e764f119184a572cd48d232e940 100644 (file)
@@ -2,7 +2,7 @@ import unittest
 
 
 import os
-from htsworkflow.automation.copier import runfolder_validate
+from htsworkflow.automation.solexa import is_runfolder
 
 def extract_runfolder_path(watchdir, event):
   runfolder_path = watchdir
@@ -13,7 +13,7 @@ def extract_runfolder_path(watchdir, event):
   fragments = path[len(watchdir):].split(os.path.sep)
   for f in fragments:
     runfolder_path = os.path.join(runfolder_path, f)
-    if runfolder_validate(f):
+    if is_runfolder(f):
       return runfolder_path
   return None
 
diff --git a/htsworkflow/automation/test/test_solexa_utils.py b/htsworkflow/automation/test/test_solexa_utils.py
new file mode 100644 (file)
index 0000000..a527ad3
--- /dev/null
@@ -0,0 +1,31 @@
+
+import unittest
+
+from htsworkflow.automation import solexa
+
+class testSolexaRunfolderUtils(unittest.TestCase):
+    def test_is_runfolder(self):
+        self.failUnlessEqual(solexa.is_runfolder(""), False)
+        self.failUnlessEqual(solexa.is_runfolder("1345_23"), False)
+        self.failUnlessEqual(solexa.is_runfolder("123456_asdf-$23'"), False)
+        self.failUnlessEqual(solexa.is_runfolder("123456_USI-EAS44"), True)
+        self.failUnlessEqual(solexa.is_runfolder("123456_USI-EAS44 "), False)
+
+
+    def test_get_top_dir(self):
+        test_data = [ # root, path, response
+                      ('/a/b/c', '/a/b/c/d/e/f', 'd'),
+                      ('/a/b/c/', '/a/b/c/d/e/f', 'd'),
+                      ('/a/b/c', '/g/e/f', None),
+                      ('/a/b/c', '/a/b/c', ''),
+                    ]
+        
+        for root, path, response in test_data:
+            self.failUnlessEqual(solexa.get_top_dir(root, path), response)
+            
+def suite():
+    return unittest.makeSuite(testSolexaRunfolderUtils, 'test')
+
+if __name__ == "__main__":
+    unittest.main(defaultTest="suite")
+    
index b57ad2050dcbca68c1b476d48570a3b2204fe715..ba2a739d5be0b461aaf02264a40dda6ed90e6863 100644 (file)
@@ -1,11 +1,14 @@
 # some core functions of analysis manager module
-from django.http import HttpResponse
+
 from datetime import datetime
 from string import *
 import re
-from htsworkflow.frontend import settings
-from htsworkflow.frontend.analysis.models import Task, Project
+
+from django.conf import settings
 from django.core.exceptions import ObjectDoesNotExist
+from django.http import HttpResponse
+
+from htsworkflow.frontend.analysis.models import Task, Project
 
 def updStatus(request):
     ClIP = request.META['REMOTE_ADDR']
index 41ecf424bc2d29902ce076e55bafad0feefbb792..5a580e0538a14e7d43a110f1c8d0a0f9e53416ab 100644 (file)
@@ -1,6 +1,6 @@
 from django.db import models
+from django.conf import settings
 from datetime import datetime
-from htsworkflow.frontend import settings
 from htsworkflow.frontend.samples.models import Library 
 from string import *
 
index 4df771b7025b4b73c2a04011f874c26de8a873ca..31650a6bb56d6189503a8e6a04d6240c8330459f 100644 (file)
@@ -2,8 +2,7 @@
 Define some alternate authentication methods
 """
 from django.core.exceptions import PermissionDenied
-
-from htsworkflow.frontend import settings
+from django.conf import settings
 
 apidata = {'apiid': u'0', 'apikey': settings.DEFAULT_API_KEY}
 
index 179732030045c7ef16e5d4d640f849c821543a11..1c82a26470cfc95d4e5f0a16baf80e34c390eea1 100644 (file)
@@ -1 +1,53 @@
-[{"pk": 1, "model": "bcmagic.keywordmap", "fields": {"regex": "(?P<uuid>[A-Fa-f0-9]+)", "url_template": "/samples/freezer/{{ uuid }}/", "keyword": "frzr"}}, {"pk": 2, "model": "bcmagic.keywordmap", "fields": {"regex": "(?P<uuid>[A-Fa-f0-9]+)", "url_template": "/samples/container/{{ uuid }}/", "keyword": "cntr"}}, {"pk": 3, "model": "bcmagic.keywordmap", "fields": {"regex": "(?P<sampleid>\\d+)\\|(?P<owner>[A-Za-z0-9_\\- ]+)", "url_template": "/samples/sample/{{ sampleid }}/", "keyword": "s"}}, {"pk": 4, "model": "bcmagic.keywordmap", "fields": {"regex": "(?P<search>[\\S\\s]+)", "url_template": "http://www.google.com/search?q={{ search }}", "keyword": "gg"}}, {"pk": 5, "model": "bcmagic.keywordmap", "fields": {"regex": "(?P<search>[\\S\\s]+)", "url_template": "http://www.flickr.com/search/?q={{ search }}", "keyword": "flickr"}}, {"pk": 6, "model": "bcmagic.keywordmap", "fields": {"regex": "(?P<uuid>[A-Fa-f0-9]+)", "url_template": "/inventory/{{ uuid }}/", "keyword": "invu"}}, {"pk": 7, "model": "bcmagic.keywordmap", "fields": {"regex": "(?P<barcode_id>.+)", "url_template": "/inventory/{{barcode_id}}/", "keyword": "invb"}}, {"pk": 1, "model": "bcmagic.printer", "fields": {"name": "ZM400 1.25x1", "label_height": 1.0, "notes": "Everyday use labels", "label_width": 1.25, "label_shape": "Square", "model": "Zebra ZM400", "ip_address": "131.215.54.194"}}, {"pk": 2, "model": "bcmagic.printer", "fields": {"name": "ZM400 3x3", "label_height": 3.0, "notes": "Larger everyday use labels", "label_width": 3.0, "label_shape": "Square", "model": "Zebra ZM400", "ip_address": "131.215.34.199"}}]
+[{"pk": 1, 
+  "model": "bcmagic.keywordmap", 
+  "fields": {"regex": "(?P<uuid>[A-Fa-f0-9]+)", 
+             "url_template": "/samples/freezer/{{ uuid }}/", 
+             "keyword": "frzr"}},
+ {"pk": 2, 
+  "model": "bcmagic.keywordmap", 
+  "fields": {"regex": "(?P<uuid>[A-Fa-f0-9]+)", 
+             "url_template": "/samples/container/{{ uuid }}/", 
+             "keyword": "cntr"}},
+ {"pk": 3, 
+  "model": "bcmagic.keywordmap", 
+  "fields": {"regex": "(?P<sampleid>\\d+)\\|(?P<owner>[A-Za-z0-9_\\- ]+)",
+             "url_template": "/samples/sample/{{ sampleid }}/",
+             "keyword": "s"}},
+ {"pk": 4,
+  "model": "bcmagic.keywordmap",
+  "fields": {"regex": "(?P<search>[\\S\\s]+)",
+             "url_template": "http://www.google.com/search?q={{ search }}",
+             "keyword": "gg"}}, 
+ {"pk": 5, 
+  "model": "bcmagic.keywordmap", 
+  "fields": {"regex": "(?P<search>[\\S\\s]+)", 
+             "url_template": "http://www.flickr.com/search/?q={{ search }}", 
+             "keyword": "flickr"}}, 
+ {"pk": 6, 
+  "model": "bcmagic.keywordmap", 
+  "fields": {"regex": "(?P<uuid>[A-Fa-f0-9]+)", 
+             "url_template": "/inventory/{{ uuid }}/", 
+             "keyword": "invu"}}, 
+ {"pk": 7, 
+  "model": "bcmagic.keywordmap", 
+  "fields": {"regex": "(?P<barcode_id>.+)", 
+             "url_template": "/inventory/{{barcode_id}}/", 
+             "keyword": "invb"}}, 
+ {"pk": 1, 
+  "model": "bcmagic.printer", 
+  "fields": {"name": "ZM400 1.25x1", 
+             "label_height": 1.0, 
+             "notes": "Everyday use labels", 
+             "label_width": 1.25, 
+             "label_shape": "Square", 
+             "model": "Zebra ZM400", 
+             "ip_address": "131.215.54.194"}}, 
+ {"pk": 2, 
+  "model": "bcmagic.printer", 
+  "fields": {"name": "ZM400 3x3", 
+             "label_height": 3.0, 
+             "notes": "Larger everyday use labels", 
+             "label_width": 3.0, 
+             "label_shape": "Square", 
+             "model": "Zebra ZM400", 
+             "ip_address": "131.215.34.199"}}]
index e7d89d36e132eb1bece6b687304fc163a2a507ad..78919b00f99a8a834b90884b219fc2145f2e9f68 100644 (file)
@@ -1,4 +1,4 @@
-from htsworkflow.frontend import settings
+from django.conf import settings
 
 import ftplib
 import socket
@@ -53,4 +53,4 @@ def autofill(field, value):
     Return a bcm dictionary with a command to automatically fill the
     corresponding "field" with "value"
     """
-    return {'mode': 'autofill', 'field': field, 'value': value}
\ No newline at end of file
+    return {'mode': 'autofill', 'field': field, 'value': value}
index 6169f8a8d0e7b17dd8302a3fe58a92028f6db4c7..d21f86bb9d9995a9bb3dd1e69c08fe3ddc8a44d3 100644 (file)
@@ -1,9 +1,9 @@
+from django.conf import settings
 from django.http import HttpResponse
 from django.shortcuts import render_to_response
 from django.core.exceptions import ObjectDoesNotExist
 
 from htsworkflow.frontend.eland_config import forms
-from htsworkflow.frontend import settings
 from htsworkflow.frontend.experiments import models
 
 import os
index 882ea89c9961db701c3168f19efeb6910856a94c..5dec8cf5871bc640050c68b1118da2a3c073b400 100755 (executable)
@@ -12,9 +12,9 @@ from django.contrib.auth.decorators import login_required
 from django.core.exceptions import ObjectDoesNotExist
 from django.core.mail import send_mail, mail_admins
 from django.http import HttpResponse, Http404
+from django.conf import settings
 
 from htsworkflow.frontend.auth import require_api_key
-from htsworkflow.frontend import settings
 from htsworkflow.frontend.experiments.models import \
     FlowCell, \
     DataRun, \
index 6554405dab8cdfdc13fcd33005660f8499b1ecc8..8a71e252331c600ce12eeaea8b20511f337ad98c 100755 (executable)
@@ -5,7 +5,8 @@ from django.core import urlresolvers
 from django.db import models
 
 from htsworkflow.frontend.samples.models import *
-from htsworkflow.frontend.settings import options
+#from htsworkflow.frontend.settings import options
+from django.conf import settings
 
 class ClusterStation(models.Model):
   name = models.CharField(max_length=50, unique=True)
@@ -21,7 +22,7 @@ class Sequencer(models.Model):
 
 default_pM = 5
 try:
-  default_pM = int(options.get('frontend', 'default_pm'))
+  default_pM = int(settings.DEFAULT_PM)
 except ValueError,e:
   logging.error("invalid value for frontend.default_pm")
 
@@ -88,6 +89,11 @@ class FlowCell(models.Model):
       return u"Paired"
     else:
       return u"Single"
+
+  @models.permalink
+  def get_absolute_url(self):
+      return ('htsworkflow.frontend.experiments.views.flowcell_detail',
+              [str(self.flowcell_id)])
     
 ### -----------------------
 class DataRun(models.Model):
@@ -157,3 +163,8 @@ class Lane(models.Model):
   cluster_estimate = models.IntegerField(blank=True, null=True)                                       
   status = models.IntegerField(choices=LANE_STATUS_CODES, null=True, blank=True) 
   comment = models.TextField(null=True, blank=True)
+
+  @models.permalink
+  def get_absolute_url(self):
+       return ('htsworkflow.frontend.experiments.views.flowcell_lane_detail',
+               [str(self.flowcell.flowcell_id), str(self.lane_number)])
index 9088b46144f3f7066e1e36482ada58346be63502..7bfebfec3f9a5190ef40dac3dacbcb51dcd3bdd9 100644 (file)
@@ -4,6 +4,7 @@ try:
     import json
 except ImportError, e:
     import simplejson as json
+import os
 import sys
 
 from django.core import mail
@@ -206,3 +207,4 @@ class TestEmailNotify(TestCase):
         # require that navigation back to the admin page exists
         self.failUnless(re.search('<a href="/admin/experiments/flowcell/153/">[^<]+</a>', response.content))
         
+
index 7b6d4bae40a8b5886d2796a16d267d28462a473f..342438db356b4140a04c59b3195c2ee3252bb158 100755 (executable)
@@ -8,7 +8,7 @@ from django.core.exceptions import ObjectDoesNotExist
 from django.core.mail import EmailMessage, mail_managers
 from django.http import HttpResponse
 from django.shortcuts import render_to_response, get_object_or_404
-from django.template import Context
+from django.template import RequestContext
 from django.template.loader import get_template
 
 from htsworkflow.frontend.experiments.models import *
@@ -87,13 +87,14 @@ def startedEmail(request, pk):
     for user_email in email_lane.keys():
         sending = ""
         # build body
-        context = Context({u'flowcell': fc,
-                   u'lanes': email_lane[user_email],
-                   u'runfolder': 'blank',
-                   u'finish_low': estimate_low,
-                   u'finish_high': estimate_high,
-                   u'now': datetime.now(),        
-                  })
+        context = RequestContext(request,
+                                 {u'flowcell': fc,
+                                  u'lanes': email_lane[user_email],
+                                  u'runfolder': 'blank',
+                                  u'finish_low': estimate_low,
+                                  u'finish_high': estimate_high,
+                                  u'now': datetime.now(),        
+                                  })
 
         # build view
         subject = "Flowcell %s" % ( fc.flowcell_id )
@@ -108,14 +109,15 @@ def startedEmail(request, pk):
 
         emails.append((user_email, subject, body, sending))
 
-    verify_context = Context({
-        'emails': emails,
-        'flowcell': fc,
-        'from': sender,
-        'send': send,
-        'site_managers': settings.MANAGERS,
-        'title': fc.flowcell_id,
-        'warnings': warnings,
+    verify_context = RequestContext(
+        request,
+        { 'emails': emails,
+          'flowcell': fc,
+          'from': sender,
+          'send': send,
+          'site_managers': settings.MANAGERS,
+          'title': fc.flowcell_id,
+          'warnings': warnings,
         })
     return HttpResponse(email_verify.render(verify_context))
     
@@ -123,3 +125,25 @@ def finishedEmail(request, pk):
     """
     """
     return HttpResponse("I've got nothing.")
+
+
+def flowcell_detail(request, flowcell_id):
+    fc = get_object_or_404(FlowCell, flowcell_id=flowcell_id)
+
+    context = RequestContext(request,
+                             {'flowcell': fc})
+    
+    return render_to_response('experiments/flowcell_detail.html',
+                              context)
+
+def flowcell_lane_detail(request, flowcell_id, lane_number):
+    fc = get_object_or_404(FlowCell, flowcell_id=flowcell_id)
+    lane = get_object_or_404(fc.lane_set, lane_number=lane_number)
+
+    context = RequestContext(request,
+                             {'lib': lane.library,
+                              'lane': lane,
+                              'flowcell': fc})
+    
+    return render_to_response('experiments/flowcell_lane_detail.html',
+                              context)
index ae0ae12ca496b3ec72414ab0c3d98f0a2d315919..68944de17107df87f66ac9e0b1ec330aa77d6889 100644 (file)
@@ -1 +1,38 @@
-[{"pk": 1, "model": "inventory.printertemplate", "fields": {"default": false, "item_type": 1, "printer": 2, "template": "^FX=========================\r\n^FX 3\"x3\" Label\r\n^FX=========================\r\n^XA\r\n\r\n\r\n^FX======== Left Side ===========\r\n\r\n^FX------------\r\n^FX ^LH changes the 0,0 point of all subsequent location references\r\n^FX------------\r\n\r\n^LH0,50\r\n\r\n^FX ---Header---\r\n\r\n^FO25,0\r\n^CF0,50\r\n^FB250,2,,C\r\n^FD{{ item.barcode_id }}^FS\r\n\r\n^FX ---Column 1: Flowcells---\r\n\r\n^FX-----------------\r\n^FX FB command for automatic text formatting:\r\n^FX ^FB[dot width of area], [max # of lines], [change line spacing], [justification: L, C, R, J], [hanging indent]\r\n^FX-----------------\r\n\r\n^CF0,30,30\r\n^FO75,125\r\n^FB275,19,,L\r\n^FD{% for flowcell in flowcell_id_list %}{{ flowcell }}{% if not forloop.last %}\\&{% endif %}{% endfor %}^FS\r\n^FX ---Date---\r\n\r\n^FO0,725\r\n^CF0,35\r\n^FB300,2,,C\r\n^FD{{ oldest_rundate|date:\"YMd\" }} - {{ latest_rundate|date:\"YMd\" }}^FS\r\n\r\n^FX ---Barcode---\r\n\r\n^FO135,795\r\n^BXN,3,200^FDinvb|{{ item.barcode_id }}^FS\r\n\r\n^FX======== Right Side ===========\r\n\r\n^LH300,60\r\n\r\n^FX ---Header---\r\n\r\n^FO0,0\r\n^CF0,50\r\n^FB600,2,,C\r\n^FD{{ barcode_id }}^FS\r\n\r\n^FX ---Dividing line---\r\n\r\n^FX---------------\r\n^FX GB command:\r\n^FX ^GB[box width], [box height], [border thickness], [color: B, W], [corner rounding: 0-8]^FS\r\n^FX---------------\r\n\r\n^FO0,100\r\n^GB0,600,10^FS\r\n\r\n^FX ---Column 2: Libraries 1-20---\r\n\r\n^CF0,30,30\r\n^FO75,100\r\n^FB100,20,,L\r\n^FD{% for lib_id in library_id_list_1_to_20 %}{{ lib_id }}{% if not forloop.last %}\\&{% endif %}{% endfor %}^FS\r\n\r\n^FX ---Column 3: Libraries 21-40---\r\n\r\n^CF0,30,30\r\n^FO200,100\r\n^FB100,20,,L\r\n^FD{% for lib_id in library_id_list_21_to_40 %}{{ lib_id }}{% if not forloop.last %}\\&{% endif %}{% endfor %}^FS\r\n\r\n^FX ---Column 4: Libraries 41-60---\r\n\r\n^CF0,30,30\r\n^FO325,100\r\n^FB100,20,,L\r\n^FD{% for lib_id in library_id_list_41_to_60 %}{{ lib_id }}{% if not forloop.last %}\\&{% endif %}{% endfor %}^FS\r\n\r\n^FX ---Column 5: Libraries 61-80---\r\n\r\n^CF0,30,30\r\n^FO450,100\r\n^FB100,20,,L\r\n^FD{% for lib_id in library_id_list_61_to_80 %}{{ lib_id }}{% if not forloop.last %}\\&{% endif %}{% endfor %}^FS\r\n\r\n^FX ---Date---\r\n\r\n^FO0,715\r\n^CF0,35\r\n^FB600,2,,C\r\n^FDRun Dates: {{ oldest_rundate|date:\"YMd\" }}-{{ latest_rundate|date:\"YMd\" }}^FS\r\n\r\n^FX ---Barcode---\r\n\r\n^FO255,785\r\n^BXN,3,200^FDinvb|{{ item.barcode_id }}^FS\r\n\r\n^LH0,0\r\n^FX ---End---\r\n^XZ\r\n"}}, {"pk": 2, "model": "inventory.printertemplate", "fields": {"default": true, "item_type": 2, "printer": 1, "template": "^FX=========================\r\n^FX Harddrive Location Tracking Label\r\n^FX 300x375 dots\r\n^FX=========================\r\n\r\n^XA\r\n^LH 0,25\r\n\r\n^FO0,0\r\n^CF0,35\r\n^FB375,1,,C\r\n^FD{{ item.item_type.name }}:^FS\r\n\r\n^FX -------Text contains HD serial #-------------\r\n^FO15,75\r\n^CF0,42\r\n^FB325,3,,C\r\n^FD{% if use_uuid %}{{ item.uuid }}{% else %}{{ item.barcode_id }}{% endif %}^FS\r\n\r\n^FX -------Barcode contains HD serial #-----------\r\n^FO150,200\r\n^BXN,3,200\r\n^FD{% if use_uuid %}invu|{{ item.uuid }}{% else %}invb|{{ item.barcode_id }}{% endif %}^FS\r\n\r\n^XZ\r\n"}}]
+[
+    {"pk": 1,
+     "model": "inventory.itemtype",
+     "fields": {"name": "Hard Drive"}
+    },
+    {"pk":   2, 
+     "model": "inventory.itemtype",
+     "fields": {"name": "Illumina SR Cluster Generation Reagents"}
+    },       
+    {"pk":   3, 
+     "model": "inventory.itemtype",
+     "fields": {"name": "Illumina Library Creation Reagents"}
+    },
+    {"pk":   4, 
+     "model": "inventory.itemtype",
+     "fields": {"name": "Illumina Sequencing Reagents"}
+    },
+    {"pk":   5, 
+     "model": "inventory.itemtype",
+     "fields": {"name": "Illumina PE Cluster Generation Reagents"}
+    },
+    {"pk":   6, 
+     "model": "inventory.itemtype",
+     "fields": {"name": "Library"} 
+    },       
+
+    {"pk": 1, 
+     "model": "inventory.printertemplate", 
+     "fields": {"default": false, 
+                "item_type": 1, 
+                "printer": 2, 
+             "template": "^FX=========================\r\n^FX 3\"x3\" Label\r\n^FX=========================\r\n^XA\r\n\r\n\r\n^FX======== Left Side ===========\r\n\r\n^FX------------\r\n^FX ^LH changes the 0,0 point of all subsequent location references\r\n^FX------------\r\n\r\n^LH0,50\r\n\r\n^FX ---Header---\r\n\r\n^FO25,0\r\n^CF0,50\r\n^FB250,2,,C\r\n^FD{{ item.barcode_id }}^FS\r\n\r\n^FX ---Column 1: Flowcells---\r\n\r\n^FX-----------------\r\n^FX FB command for automatic text formatting:\r\n^FX ^FB[dot width of area], [max # of lines], [change line spacing], [justification: L, C, R, J], [hanging indent]\r\n^FX-----------------\r\n\r\n^CF0,30,30\r\n^FO75,125\r\n^FB275,19,,L\r\n^FD{% for flowcell in flowcell_id_list %}{{ flowcell }}{% if not forloop.last %}\\&{% endif %}{% endfor %}^FS\r\n^FX ---Date---\r\n\r\n^FO0,725\r\n^CF0,35\r\n^FB300,2,,C\r\n^FD{{ oldest_rundate|date:\"YMd\" }} - {{ latest_rundate|date:\"YMd\" }}^FS\r\n\r\n^FX ---Barcode---\r\n\r\n^FO135,795\r\n^BXN,3,200^FDinvb|{{ item.barcode_id }}^FS\r\n\r\n^FX======== Right Side ===========\r\n\r\n^LH300,60\r\n\r\n^FX ---Header---\r\n\r\n^FO0,0\r\n^CF0,50\r\n^FB600,2,,C\r\n^FD{{ barcode_id }}^FS\r\n\r\n^FX ---Dividing line---\r\n\r\n^FX---------------\r\n^FX GB command:\r\n^FX ^GB[box width], [box height], [border thickness], [color: B, W], [corner rounding: 0-8]^FS\r\n^FX---------------\r\n\r\n^FO0,100\r\n^GB0,600,10^FS\r\n\r\n^FX ---Column 2: Libraries 1-20---\r\n\r\n^CF0,30,30\r\n^FO75,100\r\n^FB100,20,,L\r\n^FD{% for lib_id in library_id_list_1_to_20 %}{{ lib_id }}{% if not forloop.last %}\\&{% endif %}{% endfor %}^FS\r\n\r\n^FX ---Column 3: Libraries 21-40---\r\n\r\n^CF0,30,30\r\n^FO200,100\r\n^FB100,20,,L\r\n^FD{% for lib_id in library_id_list_21_to_40 %}{{ lib_id }}{% if not forloop.last %}\\&{% endif %}{% endfor %}^FS\r\n\r\n^FX ---Column 4: Libraries 41-60---\r\n\r\n^CF0,30,30\r\n^FO325,100\r\n^FB100,20,,L\r\n^FD{% for lib_id in library_id_list_41_to_60 %}{{ lib_id }}{% if not forloop.last %}\\&{% endif %}{% endfor %}^FS\r\n\r\n^FX ---Column 5: Libraries 61-80---\r\n\r\n^CF0,30,30\r\n^FO450,100\r\n^FB100,20,,L\r\n^FD{% for lib_id in library_id_list_61_to_80 %}{{ lib_id }}{% if not forloop.last %}\\&{% endif %}{% endfor %}^FS\r\n\r\n^FX ---Date---\r\n\r\n^FO0,715\r\n^CF0,35\r\n^FB600,2,,C\r\n^FDRun Dates: {{ oldest_rundate|date:\"YMd\" }}-{{ latest_rundate|date:\"YMd\" }}^FS\r\n\r\n^FX ---Barcode---\r\n\r\n^FO255,785\r\n^BXN,3,200^FDinvb|{{ item.barcode_id }}^FS\r\n\r\n^LH0,0\r\n^FX ---End---\r\n^XZ\r\n"}},
+ {"pk": 2, 
+  "model": "inventory.printertemplate", 
+  "fields": {"default": true, 
+             "item_type": 2, 
+             "printer": 1, 
+             "template": "^FX=========================\r\n^FX Harddrive Location Tracking Label\r\n^FX 300x375 dots\r\n^FX=========================\r\n\r\n^XA\r\n^LH 0,25\r\n\r\n^FO0,0\r\n^CF0,35\r\n^FB375,1,,C\r\n^FD{{ item.item_type.name }}:^FS\r\n\r\n^FX -------Text contains HD serial #-------------\r\n^FO15,75\r\n^CF0,42\r\n^FB325,3,,C\r\n^FD{% if use_uuid %}{{ item.uuid }}{% else %}{{ item.barcode_id }}{% endif %}^FS\r\n\r\n^FX -------Barcode contains HD serial #-----------\r\n^FO150,200\r\n^BXN,3,200\r\n^FD{% if use_uuid %}invu|{{ item.uuid }}{% else %}invb|{{ item.barcode_id }}{% endif %}^FS\r\n\r\n^XZ\r\n"}}]
index dfb92a96ba8c41007b562f9cca750c6a8cd9a1b5..69848cc9ca21d716ca647757e9d2c68107c25806 100644 (file)
@@ -5,15 +5,14 @@ from htsworkflow.frontend.bcmagic.plugin import register_search_plugin
 from htsworkflow.frontend.experiments.models import FlowCell
 from htsworkflow.frontend.bcmagic.forms import BarcodeMagicForm
 from htsworkflow.frontend.bcmagic.utils import print_zpl_socket
-from htsworkflow.frontend import settings
-#from htsworkflow.util.jsonutil import encode_json
 
+from django.conf import settings
+from django.contrib.auth.decorators import login_required
 from django.core.exceptions import ObjectDoesNotExist
 from django.http import HttpResponse, HttpResponseRedirect
 from django.shortcuts import render_to_response
 from django.template import RequestContext, Template
 from django.template.loader import get_template
-from django.contrib.auth.decorators import login_required
 
 register_search_plugin('Inventory Item', item_search)
 
diff --git a/htsworkflow/frontend/manage.py b/htsworkflow/frontend/manage.py
deleted file mode 100644 (file)
index 5e78ea9..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-from django.core.management import execute_manager
-try:
-    import settings # Assumed to be in the same directory.
-except ImportError:
-    import sys
-    sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
-    sys.exit(1)
-
-if __name__ == "__main__":
-    execute_manager(settings)
index 954fce693dc847d725fddc8b3e62b627fe8b924c..73072af68c8967f8f77a8dce6eac096ef264b4f4 100644 (file)
@@ -1,4 +1,4 @@
-from htsworkflow.frontend import settings
+from django.conf import settings
 from django.http import HttpResponse
 from datetime import datetime
 from string import *
index 7584cfe94ed7ddd74f45320def558c4aab14152d..e49d23b16e98cde532377ae7ab08d24bfb7bb7f1 100644 (file)
@@ -4,7 +4,6 @@ from django.db import models
 from django.contrib.auth.models import User, UserManager
 from django.db.models.signals import pre_save, post_save
 from django.db import connection
-from htsworkflow.frontend import settings
 from htsworkflow.frontend.reports.libinfopar import *
 
 
@@ -101,6 +100,10 @@ class Species(models.Model):
   class Meta:
     verbose_name_plural = "species"
     ordering = ["scientific_name"]
+
+  @models.permalink
+  def get_absolute_url(self):
+    return ('htsworkflow.frontend.samples.views.species', [str(self.id)])
   
 class Affiliation(models.Model):
   name = models.CharField(max_length=256, db_index=True, verbose_name='Name')
@@ -285,9 +288,6 @@ class Library(models.Model):
   @models.permalink
   def get_absolute_url(self):
     return ('htsworkflow.frontend.samples.views.library_to_flowcells', [str(self.id)])
-    
-  
-    
 
 
 class HTSUser(User):
index 39797b12d8c574f2660b3a57fb4fb3f432e746aa..3c6543a31bce847e7ab6d4ec72d8ff153e44c836 100644 (file)
@@ -1,4 +1,4 @@
-from htsworkflow.frontend import settings
+from django.conf import settings
 
 import glob
 import os
index 4478d51a380a1e9ccc1b4c74be348d3ebb82417f..f4c810c747142ebf44a87d0e5396adb3e4329fe3 100644 (file)
@@ -57,7 +57,6 @@ class SampleWebTestCase(TestCase):
             lib_response = self.client.get(url, apidata)
             self.failUnlessEqual(lib_response.status_code, 200)
             lib_json = json.loads(lib_response.content)
-            print lib_json
 
             for d in [lib_dict, lib_json]:
                 # amplified_from_sample is a link to the library table,
index 470ab933cccc14168ec453239bf5252bf32c8b9b..eb3fa467aa13dd1b96814696e4846ef3f436d8ba 100644 (file)
@@ -3,4 +3,5 @@ from django.conf.urls.defaults import *
 urlpatterns = patterns('',
     (r"^library/(?P<library_id>\w+)/json", 'htsworkflow.frontend.samples.views.library_json'),
     (r"^species/(?P<species_id>\w+)/json", 'htsworkflow.frontend.samples.views.species_json'),
+    (r"^species/(?P<species_id>\w+)", 'htsworkflow.frontend.samples.views.species'),
 )
index a6286d7313072c706ed2e8225e3fd398d28dd290..2bbcc894a9c28ea5733f3ff62cdbebe1222bb480 100644 (file)
@@ -12,13 +12,12 @@ except ImportError, e:
 from htsworkflow.frontend.auth import require_api_key
 from htsworkflow.frontend.experiments.models import FlowCell, Lane, LANE_STATUS_MAP
 from htsworkflow.frontend.samples.changelist import ChangeList
-from htsworkflow.frontend.samples.models import Library, HTSUser
+from htsworkflow.frontend.samples.models import Library, Species, HTSUser
 from htsworkflow.frontend.samples.results import get_flowcell_result_dict, parse_flowcell_id
 from htsworkflow.frontend.bcmagic.forms import BarcodeMagicForm
 from htsworkflow.pipelines.runfolder import load_pipeline_run_xml
 from htsworkflow.pipelines import runfolder
 from htsworkflow.pipelines.eland import ResultLane
-from htsworkflow.frontend import settings
 from htsworkflow.util.conversion import unicode_or_none
 from htsworkflow.util import makebed
 from htsworkflow.util import opener
@@ -26,10 +25,11 @@ from htsworkflow.util import opener
 
 from django.core.exceptions import ObjectDoesNotExist
 from django.http import HttpResponse, HttpResponseRedirect, Http404
-from django.shortcuts import render_to_response
+from django.shortcuts import render_to_response, get_object_or_404
 from django.template import RequestContext
 from django.template.loader import get_template
 from django.contrib.auth.decorators import login_required
+from django.conf import settings
 
 LANE_LIST = [1,2,3,4,5,6,7,8]
 SAMPLES_CONTEXT_DEFAULTS = {
@@ -68,6 +68,7 @@ def create_library_context(cl):
     #for lib in library_items.object_list:
     for lib in cl.result_list:
        summary = {}
+       summary['library'] = lib
        summary['library_id'] = lib.id
        summary['library_name'] = lib.library_name
        summary['species_name' ] = lib.library_species.scientific_name
@@ -391,13 +392,14 @@ def get_eland_result_type(pathname):
     else:
         return 'unknown'
 
-def _make_eland_results(flowcell_id, lane, interesting_flowcells):
+def _make_eland_results(flowcell_id, lane_number, interesting_flowcells):
     fc_id, status = parse_flowcell_id(flowcell_id)
     cur_fc = interesting_flowcells.get(fc_id, None)
     if cur_fc is None:
       return []
 
     flowcell = FlowCell.objects.get(flowcell_id=flowcell_id)
+    lane = flowcell.lane_set.get(lane_number=lane_number)
     # Loop throw storage devices if a result has been archived
     storage_id_list = []
     if cur_fc is not None:
@@ -421,6 +423,7 @@ def _make_eland_results(flowcell_id, lane, interesting_flowcells):
         result_path = cur_fc[cycle]['eland_results'].get(lane, None)
         result_link = make_result_link(fc_id, cycle, lane, result_path)
         results.append({'flowcell_id': fc_id,
+                        'flowcell': flowcell,
                         'run_date': flowcell.run_date,
                         'cycle': cycle, 
                         'lane': lane, 
@@ -565,6 +568,14 @@ def species_json(request, species_id):
     """
     raise Http404
 
+def species(request, species_id):
+    species = get_object_or_404(Species, id=species_id)
+    
+    context = RequestContext(request,
+                             { 'species': species })
+
+    return render_to_response("samples/species_detail.html", context)
+
 @login_required
 def user_profile(request):
     """
diff --git a/htsworkflow/frontend/settings.py b/htsworkflow/frontend/settings.py
deleted file mode 100644 (file)
index ff88816..0000000
+++ /dev/null
@@ -1,215 +0,0 @@
-"""
-Generate settings for the Django Application.
-
-To make it easier to customize the application the settings can be 
-defined in a configuration file read by ConfigParser.
-
-The options understood by this module are (with their defaults):
-
-  [frontend]
-  email_host=localhost
-  email_port=25
-  database_engine=sqlite3
-  database_name=/path/to/db
-
-  [admins]
-  #name1=email1
-
-  [allowed_hosts]
-  #name1=ip
-  localhost=127.0.0.1
-  
-  [allowed_analysis_hosts]
-  #name1=ip
-  localhost=127.0.0.1
-
-"""
-import ConfigParser
-import os
-import shlex
-
-# make epydoc happy
-__docformat__ = "restructuredtext en"
-
-def options_to_list(options, dest, section_name, option_name):
-  """
-  Load a options from section_name and store in a dictionary
-  """
-  if options.has_option(section_name, option_name):
-    opt = options.get(section_name, option_name)
-    dest.extend( shlex.split(opt) )
-      
-def options_to_dict(dest, section_name):
-  """
-  Load a options from section_name and store in a dictionary
-  """
-  if options.has_section(section_name):
-    for name in options.options(section_name):
-      dest[name] = options.get(section_name, name)
-
-# define your defaults here
-options = ConfigParser.SafeConfigParser(
-           { 'email_host': 'localhost',
-             'email_port': '25', 
-             'database_engine': 'sqlite3',
-             'database_name': 
-               os.path.abspath('../../fctracker.db'),
-             'time_zone': 'America/Los_Angeles',
-             'default_pm': '5',
-             'link_flowcell_storage_device_url': "http://localhost:8000/inventory/lts/link/",
-             'printer1_host': '127.0.0.1',
-             'printer1_port': '9100',
-             'printer2_host': '127.0.0.1',
-             'printer2_port': '9100',
-           })
-
-options.read([os.path.expanduser("~/.htsworkflow.ini"),
-              '/etc/htsworkflow.ini',])
-
-# OptionParser will use the dictionary passed into the config parser as
-# 'Default' values in any section. However it still needs an empty section
-# to exist in order to retrieve anything.
-if not options.has_section('frontend'):
-    options.add_section('frontend')
-if not options.has_section('bcprinter'):
-    options.add_section('bcprinter')
-
-
-# Django settings for elandifier project.
-
-DEBUG = True
-TEMPLATE_DEBUG = DEBUG
-
-ADMINS = []
-options_to_list(options, ADMINS, 'frontend', 'admins')
-
-MANAGERS = []
-options_to_list(options, MANAGERS, 'frontend', 'managers')
-
-AUTHENTICATION_BACKENDS = ( 'samples.auth_backend.HTSUserModelBackend', )
-CUSTOM_USER_MODEL = 'samples.HTSUser' 
-
-EMAIL_HOST = options.get('frontend', 'email_host')
-EMAIL_PORT = int(options.get('frontend', 'email_port'))
-
-if options.has_option('frontend', 'notification_sender'):
-    NOTIFICATION_SENDER = options.get('frontend', 'notification_sender')
-else:
-    NOTIFICATION_SENDER = "noreply@example.com"
-NOTIFICATION_BCC = []
-options_to_list(options, NOTIFICATION_BCC, 'frontend', 'notification_bcc')
-
-# 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
-DATABASE_ENGINE = options.get('frontend', 'database_engine')
-
-# Or path to database file if using sqlite3.
-DATABASE_NAME = options.get('frontend', 'database_name' )
-DATABASE_USER = ''             # Not used with sqlite3.
-DATABASE_PASSWORD = ''         # Not used with sqlite3.
-DATABASE_HOST = ''             # Set to empty string for localhost. Not used with sqlite3.
-DATABASE_PORT = ''             # Set to empty string for default. Not used with sqlite3.
-
-# Local time zone for this installation. Choices can be found here:
-# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
-# although not all variations may be possible on all operating systems.
-# If running in a Windows environment this must be set to the same as your
-# system time zone.
-TIME_ZONE = options.get('frontend', 'time_zone')
-
-# Language code for this installation. All choices can be found here:
-# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
-# http://blogs.law.harvard.edu/tech/stories/storyReader$15
-LANGUAGE_CODE = 'en-us'
-
-SITE_ID = 1
-
-# If you set this to False, Django will make some optimizations so as not
-# to load the internationalization machinery.
-USE_I18N = True
-
-# Absolute path to the directory that holds media.
-# Example: "/home/media/media.lawrence.com/"
-MEDIA_ROOT = os.path.abspath(os.path.split(__file__)[0]) + '/static/'
-
-# URL that handles the media served from MEDIA_ROOT.
-# Example: "http://media.lawrence.com"
-MEDIA_URL = '/static/'
-
-# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
-# trailing slash.
-# Examples: "http://foo.com/media/", "/media/".
-ADMIN_MEDIA_PREFIX = '/media/'
-
-# Make this unique, and don't share it with anybody.
-SECRET_KEY = '(ekv^=gf(j9f(x25@a7r+8)hqlz%&_1!tw^75l%^041#vi=@4n'
-
-# some of our urls need an api key
-DEFAULT_API_KEY = 'n7HsXGHIi0vp9j5u4TIRJyqAlXYc4wrH'
-
-# List of callables that know how to import templates from various sources.
-TEMPLATE_LOADERS = (
-    'django.template.loaders.filesystem.load_template_source',
-    'django.template.loaders.app_directories.load_template_source',
-#     'django.template.loaders.eggs.load_template_source',
-)
-
-MIDDLEWARE_CLASSES = (
-    'django.middleware.common.CommonMiddleware',
-    'django.contrib.sessions.middleware.SessionMiddleware',
-    'django.contrib.auth.middleware.AuthenticationMiddleware',
-    'django.middleware.doc.XViewMiddleware',
-)
-
-ROOT_URLCONF = 'htsworkflow.frontend.urls'
-
-TEMPLATE_DIRS = (
-    # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
-    # Always use forward slashes, even on Windows.
-    # Don't forget to use absolute paths, not relative paths.
-    '/usr/share/python-support/python-django/django/contrib/admin/templates',
-    #'/usr/lib/pymodules/python2.6/django/contrib/admin/templates/',
-    os.path.join(os.path.split(__file__)[0], 'templates'),
-)
-
-INSTALLED_APPS = (
-    'django.contrib.admin',
-    'django.contrib.auth',
-    'django.contrib.contenttypes',
-    'django.contrib.humanize',
-    'django.contrib.sessions',
-    'django.contrib.sites',
-    'htsworkflow.frontend.eland_config',
-    'htsworkflow.frontend.samples',
-    # modules from htsworkflow branch
-    'htsworkflow.frontend.experiments',
-    'htsworkflow.frontend.analysis', 
-    'htsworkflow.frontend.reports',
-    'htsworkflow.frontend.inventory',
-    'htsworkflow.frontend.bcmagic',
-    'django.contrib.databrowse',
-)
-
-# Project specific settings
-
-ALLOWED_IPS={'127.0.0.1': '127.0.0.1'}
-options_to_dict(ALLOWED_IPS, 'allowed_hosts')
-
-ALLOWED_ANALYS_IPS = {'127.0.0.1': '127.0.0.1'}
-options_to_dict(ALLOWED_ANALYS_IPS, 'allowed_analysis_hosts')
-#UPLOADTO_HOME = os.path.abspath('../../uploads')
-#UPLOADTO_CONFIG_FILE = os.path.join(UPLOADTO_HOME, 'eland_config')
-#UPLOADTO_ELAND_RESULT_PACKS = os.path.join(UPLOADTO_HOME, 'eland_results')
-#UPLOADTO_BED_PACKS = os.path.join(UPLOADTO_HOME, 'bed_packs')
-# Where "results_dir" means directory with all the flowcells
-if options.has_option('frontend', 'results_dir'):
-    RESULT_HOME_DIR=os.path.expanduser(options.get('frontend', 'results_dir'))
-else:
-    RESULT_HOME_DIR='/tmp'
-
-LINK_FLOWCELL_STORAGE_DEVICE_URL = options.get('frontend', 'link_flowcell_storage_device_url')
-# PORT 9100 is default for Zebra tabletop/desktop printers
-# PORT 6101 is default for Zebra mobile printers
-BCPRINTER_PRINTER1_HOST = options.get('bcprinter', 'printer1_host')
-BCPRINTER_PRINTER1_PORT = int(options.get('bcprinter', 'printer1_port'))
-BCPRINTER_PRINTER2_HOST = options.get('bcprinter', 'printer2_host')
-BCPRINTER_PRINTER2_PORT = int(options.get('bcprinter', 'printer2_port'))
index 32c55e509797b960980c10c017381dbfd145c0bf..fec1733829d7739f9f71fad34f8a858f58657a36 100644 (file)
@@ -31,3 +31,55 @@ div.msg {
     color: white;
     background: #880000;
 }
+
+div.htswdetail {
+    margin: 0;
+    padding: 0;
+}
+div.htswdetail table, div.htswdetail td {
+    border-style: solid;
+}
+div.htswdetail table {
+    border-width: 0 0 1px 1px;
+    border-spacing: 0;
+    border-collapse: collapse;
+}
+div.htswdetail td {
+    margin: 0;
+    padding: 3px;
+    border-width: 1px 1px 0 0;
+}
+div.htswdetail thead {
+    text-align: center;
+}
+div.htswdetail tbody {
+    text-align: left;
+  }
+div.htswdetail h1,
+div.htswdetail h2
+{
+    font-size: 150%;
+}
+
+div.htswdetail h3 {
+     font-size: 125%;
+     margin: 0;
+}
+
+  div.htswdetail h4,
+  div.htswdetail h5,
+  div.htswdetail ul,
+  div.htswdetail ol,
+  div.htswdetail li
+  {
+    list-style: none;
+    margin: 0;   
+  }
+
+  div.htswdetail ul,
+  div.htswdetail ol
+  {
+    margin-bottom: .5em;
+  }
+ /* ]]> */
+</style>
index 2a44ee278eb0cb172517109462296ad9efef0c70..068fbfea4df63e453f2223dd1224e16c92987428 100644 (file)
@@ -5,8 +5,7 @@
   <ul class="object-tools">
     <li><a href="../../../../{{ app_label }}/started/{{ object_id }}/">{% trans "Started Email" %}</a></li>
     <li><a href="history/" class="historylink">{% trans "History" %}</a></li>
-  {% if has_absolute_url %}<li><a href="../../../r/{{ content_type_id }}/{{ object_id }}/" class="viewsitelink">{% trans
- "View on site" %}</a></li>{% endif%}
+  {% if has_absolute_url %}<li><a href="../../../r/{{ content_type_id }}/{{ object_id }}/" class="viewsitelink">{% trans "View on site" %}</a></li>{% endif%}
   </ul>
 {% endif %}{% endif %}
-{% endblock %}
\ No newline at end of file
+{% endblock %}
index 22dad4bfea6f13007bfedded02373a82e3ee9800..479f470fd18c897fb9cf03a5af8c02bd9a15efaf 100644 (file)
 </head>
 <body>
 <!-- Container -->
-   {% if not is_popup %}
+    {% if not is_popup %}
     
     <div id="header">
         <div id="branding">
         {% block branding %}{% endblock %}
         </div>
-        {% if user.is_authenticated and user.is_staff %}
-        <div id="user-tools">{% trans 'Welcome,' %} <strong>{% firstof user.first_name user.username %}</strong>. {% block userlinks %}{% url django-admindocs-docroot as docsroot %}{% if docsroot %}<a href="{{ docsroot }}">{% trans 'Documentation' %}</a> / {% endif %}<a href="/admin/password_change/">{% trans 'Change password' %}</a> / <a href="/admin/logout/">{% trans 'Log out' %}</a>{% endblock %} 
-        </div>
+        <div id="user-tools">
+        {% if user.is_authenticated %}
+        {% trans 'Welcome,' %} <strong>{% firstof user.first_name user.username %}</strong>. {% block userlinks %}{% url django-admindocs-docroot as docsroot %}{% if docsroot %}<a href="{{ docsroot }}">{% trans 'Documentation' %}</a> / {% endif %}<a href="/admin/password_change/">{% trans 'Change password' %}</a> / <a href="/admin/logout/">{% trans 'Log out' %}</a>{% endblock %} 
+        {% else %}
+        <a href="/admin/login/">{% trans 'Log in' %}</a>
         {% endif %}
+        </div>
         {% block nav-global %}{% endblock %}
     </div>
     {% endif %}
-       {% if messages %}
+    {% block breadcrumbs %}{% endblock %}
+    {% if messages %}
         <ul class="messagelist">{% for message in messages %}<li>{{ message }}</li>{% endfor %}</ul>
-        {% endif %}
-
+    {% endif %}
+        
     <!-- Content -->
     <div id="content" class="{% block coltype %}colM{% endblock %}">
         {% block pretitle %}{% endblock %}
diff --git a/htsworkflow/frontend/templates/experiments/flowcell_detail.html b/htsworkflow/frontend/templates/experiments/flowcell_detail.html
new file mode 100644 (file)
index 0000000..3c9329c
--- /dev/null
@@ -0,0 +1,58 @@
+{% extends "base_site.html" %}
+{% load adminmedia humanize i18n %}
+{% block extrahead %}
+    <!-- App Stuff -->
+    <link type="text/css" rel="stylesheet" href="/static/css/app.css" />
+    <script type="text/javascript" src="/static/js/jquery.min.js"></script>
+    
+    {% block additional_javascript %}
+    {% endblock %}
+{% endblock %}
+
+{% block content %}
+<div id="flowcell_detail">
+  <h2>About this Flowcell</h2>
+  <b>Flowcell</b>: 
+    <a href="{{flowcell.get_absolute_url}}" property="libns:flowcell_id">{{flowcell.flowcell_id}}</a><br/>
+  <b>Run Date</b>:
+    <span property="libns:date" content="{{flowcell.run_date|date:'Y-m-d\TH:i:s'}}" datatype="xsd:dateTime">{{ flowcell.run_date }}</span><br/>
+  <b>Type</b>: 
+    <span property="libns:flowcell_type">{{flowcell.flowcell_type}}</span><br/>
+  <b>Read Length</b>:
+    <span property="libns:read_length">{{flowcell.read_length}}</span><br/>
+  <b>Control Lane</b>:
+    <span property="libns:control_lane">{{flowcell.control_lane}}</span><br/>
+
+  <b>Notes</b>:
+    <p property="libns:flowcell_notes">{{flowcell.notes}}</p>
+  <div class="htswdetail">
+    <h2>Lanes</h2>
+    <table>
+      <thead>
+       <tr>
+         <td>Lane</td>
+         <td>Library ID</td>
+         <td>Library Name</td>
+         <td>Species</td>
+         <td>Comment</td>
+       </tr>
+      </thead>
+      <tbody>
+      {% for lane in flowcell.lane_set.all %}
+        <tr rel="libns:has_lane" resource="{{lane.get_absolute_url}}" >
+          <td><a href="{{lane.get_absolute_url}}"> 
+              <span property="libns:lane_number">{{lane.lane_number}}</span></a></td>
+          <td><a href="{{lane.library.get_absolute_url}}" 
+                 rel="libns:library"><span property="libns:library_id"
+              >{{lane.library.id}}</span></a></td>
+          <td><a href="{{lane.library.get_absolute_url}}" rel="libns:library"><span property="libns:name">{{lane.library.library_name}}</span></a></td>
+          <td><a href="{{lane.library.library_species.get_absolute_url}}" rel="libns:species">
+              <span property="libns:species_name">{{ lane.library.library_species.scientific_name }}</span></a></td>
+          <td><span property="libns:comment">{{lane.comment}}</span></td>
+        </tr>
+      {% endfor %}
+      </tbody>
+    </table>
+    </div>
+</div>  
+{% endblock %}
diff --git a/htsworkflow/frontend/templates/experiments/flowcell_lane_detail.html b/htsworkflow/frontend/templates/experiments/flowcell_lane_detail.html
new file mode 100644 (file)
index 0000000..aceb186
--- /dev/null
@@ -0,0 +1,86 @@
+{% extends "base_site.html" %}
+{% load adminmedia humanize i18n %}
+{% block extrahead %}
+    <!-- App Stuff -->
+    <link type="text/css" rel="stylesheet" href="/static/css/app.css" />
+    <script type="text/javascript" src="/static/js/jquery.min.js"></script>
+    
+    {% block additional_javascript %}
+    {% endblock %}
+{% endblock %}
+
+{% block content %}
+<div id="lane_detail">
+  <h2>About this lane</h2>
+  <div rel="libns:flowcell" resource="{{flowcell.get_absolute_url}}">
+  <b>Flowcell</b>: 
+    <a href="{{flowcell.get_absolute_url}}">{{flowcell.flowcell_id}}</a><br/>
+  <b>Run Date</b>:
+    <span property="libns:date" content="{{flowcell.run_date|date:'Y-m-d\TH:i:s'}}" datatype="xsd:dateTime">{{ flowcell.run_date }}</span><br/>
+  <b>Type</b>: 
+    <span property="libns:flowcell_type">{{flowcell.flowcell_type}}</span><br/>
+  </div>
+  <b>Lane</b>: 
+    <span property="libns:lane_number" datatype="xsd:decimal">{{lane.lane_number}}</span><br/>
+  <b>pM</b>
+    <span property="libns:pM" datatype="xsd:decimal">{{ lane.pM }}</span><br/>
+  <b>Cluster Estimate</b>
+    <span property="libns:cluster_estimate" datatype="xsd:decimal"
+          content="{{lane.cluster_estimate}}">{{ lane.cluster_estimate|intcomma }}</span><br/>
+  <b>Lane Status</b>: 
+    <span property="libns:status">{{ lane.status }}</span><br/>
+  <b>Comment</b>: 
+    <span property="libns:comment">{{ lane.comment }}</span><br/>
+
+
+  <div rel="libns:library" resource="{{lib.get_absolute_url}}">
+  <h2>About the library</h2>
+  <b>Library ID</b>: 
+    <a href="{{lib.get_absolute_url}}" property="libns:library_id">{{ lib.id }}</a><br/>
+  <b>Name</b>: 
+    <span property="libns:name">{{ lib.library_name }}</span>
+  <br/>
+  <b>Species</b>: 
+    <a href="{{lib.library_species.get_absolute_url}}" rel="libns:species"><span property="libns:species_name">{{ lib.library_species.scientific_name }}</span></a>
+  <br/>
+  <b>Concentration</b>: 
+    <span property="libns:concentration">{{ lib.undiluted_concentration }} ng/µl</span>
+  <br/>
+  <b>Gel Cut Size</b>: 
+    <span property="libns:gel_cut">{{ lib.gel_cut_size }}</span>
+  <br/>
+  <b>Insert Size</b>: 
+    <span property="libns:insert_size">{{ lib.insert_size }}</span>
+  <br/>
+  <b>Background or Cell Line</b>:
+     <span property="libns:cell_line">{{ lib.cell_line }}</span>
+  <br/>
+  <b>Replicate</b>: 
+     <span property="libns:replicate">{{ lib.replicate }}</span>
+  <br/>
+  <b>Library Type</b>:
+     <span property="libns:library_type">{{ lib.library_type }}</span>
+  <br/>
+  <b>Experiment Type</b>:
+     <span property="libns:experiment_type">{{ lib.experiment_type }}</span>
+  <br/>
+  <b>Made By</b>: 
+    <span property="libns:made_by">{{ lib.made_by }}</span>
+  <br/>
+  <b>Creation Date</b>
+    <span property="libns:date" content="{{lib.creation_date|date:'Y-m-d'}}T00:00:00" datatype="xsd:dateTime">{{ lib.creation_date }}</span>
+  <br/> 
+  <b>Protocol Stopping Point</b>
+    <span property="libns:stopping_point">{{ lib.stopping_point_name }}</span>
+  <br/> 
+  <b>Affiliations</b>:
+  <ul>
+    {% for individual in lib.affiliations.all %}
+      <li property="libns:affliation" content="{{individual.name}}">
+        {{ individual.name }} ( {{ individual.contact }} )
+      </li>
+    {% endfor %}
+  </ul>
+  </div>
+</div>  
+{% endblock %}
index 412f201c0b26d5e1bafcca0e8637743122939c98..2c2d90bf793acfb87e34d4be28997444e86705f6 100644 (file)
   </thead>
   <tbody>
     {% for result in eland_results %}
-    <tr about="/flowcell/{{result.flowcell_id}}/lane/{{result.lane}}">
+    <tr about="{{result.flowcell.get_absolute_url}}">
       <td property="libns:date" content="{{result.run_date|date:'Y-m-d\TH:i:s'}}" datatype="xsd:dateTime">{{ result.run_date|date}}</td>
       <td>{{ result.cycle }}</td>
-      <td property="libns:flowcell_id">{{ result.flowcell_id }}</td>
-      <td property="libns:lane">{{ result.lane }}</td>
+      <td><a href="{{result.flowcell.get_absolute_url}}"><span property="libns:flowcell_id">{{ result.flowcell_id }}</span></a></td>
+      <td><a href="{{result.lane.get_absolute_url}}" rel="libns:has_lane"><span property="libns:lane_number" datatype="xsd:decimal">{{ result.lane.lane_number }}</span></a></td>
       <td><a href="{{ result.summary_url }}">Summary</a></td>
       <td><a href="{{ result.result_url }}">{{ result.result_label }}</a></td>
       <td>
     <tbody>
   
       {% for lane in lane_summary_list %}
-      <tr about="/flowcell/{{lane.flowcell_id}}/lane/{{lane.lane_id}}/end/{% if lane.end %}{{ lane.end }}{% endif %}">
+      <tr about="/flowcell/{{lane.flowcell_id}}/{{lane.lane_id}}/{% if lane.end %}#end{{ lane.end }}{% endif %}">
         <td>{{ lane.cycle_width }}</td>
         <td>{{ lane.flowcell_id }}</td>
         <td>{{ lane.lane_id }}</td>
     </thead>
     <tbody>
       {% for lane in lib.lane_set.all %}
-      <tr>
-        <td>{{ lane.flowcell.flowcell_id }}</td>
-        <td>{{ lane.lane_number }}</td>
+      <tr rel="libns:has_lane" resource="{{lane.get_absolute_url}}">
+        <td><a href="{{lane.flowcell.get_absolute_url}}" rel="libns:flowcell">
+            <span property="libns:flowcell_id">{{ lane.flowcell.flowcell_id }}</span></a></td>
+        <td><a href="{{lane.get_absolute_url}}"> 
+            <span property="libns:lane_number"  datatype="xsd:decimal"
+               >{{ lane.lane_number }}</span></a></td>
         <td>{{ lane.comment }}</td>
       </tr>
          {% endfor %}
     </tbody>
   </table>
-  <br/>
-  <hr/>
-  <h2>Count of multi-reads</h2>
-  {% for lane in lane_summary_list %}
-    {% if lane.summarized_reads %}
-    <h3>
-      {{lane.cycle_width}} {{ lane.flowcell_id }} lane {{ lane.lane_id }} 
-      {% if lane.end %} end {{ lane.end }}{% endif %}
-    </h3>
-    <ul>
-      {% for name, counts in lane.summarized_reads.items %}
-      <li><b>{{ name }}</b>: {{ counts|intcomma }}</li>
-      {% endfor %}
-    </ul>
-    {% endif %}
-  {% endfor %}
   {% endblock %}
   </div>
 </div>
index 16cd9a32121106d7584fa151e78388bc274f3584..024a83dfa1a0c019c533b07883e330dec3b1ed8b 100644 (file)
     </thead>
     <tbody >
       {% for lib in library_list %}
-      <tr about="/library/{{lib.library_id}}">
-        <td ><a href="/library/{{ lib.library_id }}">{{ lib.amplified_from }}</a></td>
-        <td ><a href="/library/{{ lib.library_id }}" property="libns:library_id">{{ lib.library_id }}</a></td>
-        <td ><a href="/library/{{ lib.library_id }}" property="libns:species_name">{{ lib.species_name }}</a></td>
-        <td ><a href="/library/{{ lib.library_id }}" property="libns:library_name">{{ lib.library_name }}</a></td>
+      <tr about="{{lib.library.get_absolute_url}}">
+        <td ><a href="{{lib.library.get_absolute_url}}">{{ lib.amplified_from }}</a></td>
+        <td ><a href="{{lib.library.get_absolute_url}}"><span property="libns:library_id">{{ lib.library_id }}</span></a></td>
+        <td ><a href="{{lib.library.library_species.get_absolute_url}}" rel="libns:species"><span property="libns:species_name">{{ lib.species_name }}</span></a></td>
+        <td ><a href="{{ lib.library.get_absolute_url }}"><span property="libns:library_name">{{ lib.library_name }}</span></a></td>
         <td  bgcolor="#00BFFF">{{ lib.lanes_run.0.0 }}</td>      
         <td  bgcolor="#00BFFF">{{ lib.lanes_run.0.1 }}</td>      
         <td  bgcolor="#00BFFF">{{ lib.lanes_run.0.2 }}</td>      
diff --git a/htsworkflow/frontend/templates/samples/species_detail.html b/htsworkflow/frontend/templates/samples/species_detail.html
new file mode 100644 (file)
index 0000000..5e47679
--- /dev/null
@@ -0,0 +1,20 @@
+{% extends "base_site.html" %}
+{% load adminmedia humanize i18n %}
+{% block extrahead %}
+    <!-- App Stuff -->
+    <link type="text/css" rel="stylesheet" href="/static/css/app.css" />
+    <script type="text/javascript" src="/static/js/jquery.min.js"></script>
+    
+    {% block additional_javascript %}
+    {% endblock %}
+{% endblock %}
+
+{% block content %}
+<div id="genome_detail">
+  <h2>About this Genome</h2>
+  <b>Common Name</b>: 
+     <span property="libns:species">{{ species.common_name}}</span><br/>
+  <b>Scientific Name</b>: 
+     <span property="libns:species">{{ species.scientific_name}}</span><br/>
+</div>  
+{% endblock %}
index 7856f0628c62f2f57bfc928f4390e3abbdd49dc9..8cdb82c83936fb1c0859765d9bf09b3d31c1ef65 100644 (file)
@@ -9,7 +9,7 @@ admin.autodiscover()
 #databrowse.site.register(Library)
 #databrowse.site.register(FlowCell)
 
-from htsworkflow.frontend import settings
+from django.conf import settings
 
 
 urlpatterns = patterns('',
@@ -25,6 +25,11 @@ urlpatterns = patterns('',
     #(r'^admin/(.*)', admin.site.root),
     # Experiments:
     (r'^experiments/', include('htsworkflow.frontend.experiments.urls')),
+    # Flowcell:
+    (r'^flowcell/(?P<flowcell_id>\w+)/(?P<lane_number>\w+)/',
+     'htsworkflow.frontend.experiments.views.flowcell_lane_detail'),
+    (r'^flowcell/(?P<flowcell_id>\w+)/',
+     'htsworkflow.frontend.experiments.views.flowcell_detail'),
     # AnalysTrack:
     #(r'^analysis/', include('htsworkflow.frontend.analysis.urls')),
     # Inventory urls
index 627dfe3667a8500bd9132aba635221d150d81bbe..24cf88488525844cef7ee0f4cffe9454924a8f4d 100644 (file)
@@ -116,7 +116,9 @@ def get_flowcell_cycle(path):
     rest, flowcell = os.path.split(rest)
     cycle_match = re.match("C(?P<start>[0-9]+)-(?P<stop>[0-9]+)", cycle)
     if cycle_match is None:
-        raise ValueError("Expected .../flowcell/cycle/ directory structure")
+        raise ValueError(
+            "Expected .../flowcell/cycle/ directory structure in %s" % \
+            (path,))
     start = cycle_match.group('start')
     if start is not None:
         start = int(start)
@@ -156,6 +158,8 @@ def parse_qseq(path, filename):
     return SequenceFile('qseq', fullpath, flowcell, lane, read, cycle=stop)
 
 def parse_fastq(path, filename):
+    """Parse fastq names
+    """
     flowcell_dir, start, stop = get_flowcell_cycle(path)
     basename, ext = os.path.splitext(filename)
     records = basename.split('_')
@@ -163,19 +167,33 @@ def parse_fastq(path, filename):
     flowcell = records[4]
     lane = int(records[5][1])
     read = int(records[6][1])
-    if records[-1].startswith('pass'):
-        pf = True
-    elif records[-1].startswith('nopass'):
-        pf = False
-    else:
-        raise ValueError("Unrecognized fastq name")
-        
+    pf = parse_fastq_pf_flag(records)
+    
     if flowcell_dir != flowcell:
         logging.warn("flowcell %s found in wrong directory %s" % \
                          (flowcell, path))
 
     return SequenceFile('fastq', fullpath, flowcell, lane, read, pf=pf, cycle=stop)
 
+def parse_fastq_pf_flag(records):
+    """Take a fastq filename split on _ and look for the pass-filter flag
+    """
+    if len(records) < 8:
+        pf = None
+    else:
+        fastq_type = records[-1].lower()
+        if fastq_type.startswith('pass'):
+            pf = True
+        elif fastq_type.startswith('nopass'):
+            pf = False
+        elif fastq_type.startswith('all'):
+            pf = None
+        else:
+            raise ValueError("Unrecognized fastq name %s at %s" % \
+                             (records[-1], os.path.join(path,filename)))
+
+    return pf
+    
 def parse_eland(path, filename, eland_match=None):
     if eland_match is None:
         eland_match = eland_re.match(filename)
@@ -198,6 +216,10 @@ def scan_for_sequences(dirs):
     sequences = []
     for d in dirs:
         logging.info("Scanning %s for sequences" % (d,))
+        if not os.path.exists(d):
+            logging.warn("Flowcell directory %s does not exist" % (d,))
+            continue
+        
         for path, dirname, filenames in os.walk(d):
             for f in filenames:
                 seq = None
diff --git a/htsworkflow/settings.py b/htsworkflow/settings.py
new file mode 100644 (file)
index 0000000..68dfcf9
--- /dev/null
@@ -0,0 +1,234 @@
+"""
+Generate settings for the Django Application.
+
+To make it easier to customize the application the settings can be 
+defined in a configuration file read by ConfigParser.
+
+The options understood by this module are (with their defaults):
+
+  [frontend]
+  email_host=localhost
+  email_port=25
+  database_engine=sqlite3
+  database_name=/path/to/db
+
+  [admins]
+  #name1=email1
+
+  [allowed_hosts]
+  #name1=ip
+  localhost=127.0.0.1
+  
+  [allowed_analysis_hosts]
+  #name1=ip
+  localhost=127.0.0.1
+
+"""
+import ConfigParser
+import os
+import shlex
+import htsworkflow
+
+HTSWORKFLOW_ROOT = os.path.abspath(os.path.split(htsworkflow.__file__)[0])
+
+# make epydoc happy
+__docformat__ = "restructuredtext en"
+
+def options_to_list(options, dest, section_name, option_name):
+  """
+  Load a options from section_name and store in a dictionary
+  """
+  if options.has_option(section_name, option_name):
+    opt = options.get(section_name, option_name)
+    dest.extend( shlex.split(opt) )
+      
+def options_to_dict(dest, section_name):
+  """
+  Load a options from section_name and store in a dictionary
+  """
+  if options.has_section(section_name):
+    for name in options.options(section_name):
+      dest[name] = options.get(section_name, name)
+
+# define your defaults here
+options = ConfigParser.SafeConfigParser(
+           { 'email_host': 'localhost',
+             'email_port': '25', 
+             'database_engine': 'sqlite3',
+             'database_name': 
+                  os.path.join(HTSWORKFLOW_ROOT, '..', 'fctracker.db'),
+             'time_zone': 'America/Los_Angeles',
+             'default_pm': '5',
+             'link_flowcell_storage_device_url': "http://localhost:8000/inventory/lts/link/",
+             'printer1_host': '127.0.0.1',
+             'printer1_port': '9100',
+             'printer2_host': '127.0.0.1',
+             'printer2_port': '9100',
+           })
+
+options.read([os.path.expanduser("~/.htsworkflow.ini"),
+              '/etc/htsworkflow.ini',])
+
+# OptionParser will use the dictionary passed into the config parser as
+# 'Default' values in any section. However it still needs an empty section
+# to exist in order to retrieve anything.
+if not options.has_section('frontend'):
+    options.add_section('frontend')
+if not options.has_section('bcprinter'):
+    options.add_section('bcprinter')
+
+
+# Django settings for elandifier project.
+
+DEBUG = True
+TEMPLATE_DEBUG = DEBUG
+
+ADMINS = []
+options_to_list(options, ADMINS, 'frontend', 'admins')
+
+MANAGERS = []
+options_to_list(options, MANAGERS, 'frontend', 'managers')
+
+DEFAULT_PM=int(options.get('frontend', 'default_pm'))
+
+AUTHENTICATION_BACKENDS = ( 
+  'htsworkflow.frontend.samples.auth_backend.HTSUserModelBackend', )
+CUSTOM_USER_MODEL = 'samples.HTSUser' 
+
+EMAIL_HOST = options.get('frontend', 'email_host')
+EMAIL_PORT = int(options.get('frontend', 'email_port'))
+
+if options.has_option('frontend', 'notification_sender'):
+    NOTIFICATION_SENDER = options.get('frontend', 'notification_sender')
+else:
+    NOTIFICATION_SENDER = "noreply@example.com"
+NOTIFICATION_BCC = []
+options_to_list(options, NOTIFICATION_BCC, 'frontend', 'notification_bcc')
+
+database_section = options.get('frontend', 'database', 'database')
+
+if not options.has_section(database_section):
+    raise ConfigParser.NoSectionError(
+        "No database=<database_section_name> defined")
+    
+# 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
+DATABASE_ENGINE = options.get(database_section, 'engine')
+DATABASE_NAME = options.get(database_section, 'name')
+if options.has_option(database_section, 'user'):
+    DATABASE_USER = options.get(database_section, 'user')
+if options.has_option(database_section, 'host'):
+    DATABASE_HOST = options.get(database_section, 'host')
+if options.has_option(database_section, 'port'):
+    DATABASE_PORT = options.get(database_section, 'port')
+
+if options.has_option(database_section, 'password_file'):
+    password_file = options.get(database_section, 'password_file')
+    DATABASE_PASSWORD = open(password_file,'r').readline()
+elif options.has_option(database_section, 'password'):
+    DATABASE_PASSWORD = options.get(database_section, 'password')
+
+# Local time zone for this installation. Choices can be found here:
+# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
+# although not all variations may be possible on all operating systems.
+# If running in a Windows environment this must be set to the same as your
+# system time zone.
+TIME_ZONE = options.get('frontend', 'time_zone')
+
+# Language code for this installation. All choices can be found here:
+# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
+# http://blogs.law.harvard.edu/tech/stories/storyReader$15
+LANGUAGE_CODE = 'en-us'
+
+SITE_ID = 1
+
+# If you set this to False, Django will make some optimizations so as not
+# to load the internationalization machinery.
+USE_I18N = True
+
+# Absolute path to the directory that holds media.
+# Example: "/home/media/media.lawrence.com/"
+MEDIA_ROOT = os.path.join(HTSWORKFLOW_ROOT, 'frontend', 'static', '')
+
+# URL that handles the media served from MEDIA_ROOT.
+# Example: "http://media.lawrence.com"
+MEDIA_URL = '/static/'
+
+# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
+# trailing slash.
+# Examples: "http://foo.com/media/", "/media/".
+ADMIN_MEDIA_PREFIX = '/media/'
+
+# Make this unique, and don't share it with anybody.
+SECRET_KEY = '(ekv^=gf(j9f(x25@a7r+8)hqlz%&_1!tw^75l%^041#vi=@4n'
+
+# some of our urls need an api key
+DEFAULT_API_KEY = 'n7HsXGHIi0vp9j5u4TIRJyqAlXYc4wrH'
+
+# List of callables that know how to import templates from various sources.
+TEMPLATE_LOADERS = (
+    'django.template.loaders.filesystem.load_template_source',
+    'django.template.loaders.app_directories.load_template_source',
+#     'django.template.loaders.eggs.load_template_source',
+)
+
+MIDDLEWARE_CLASSES = (
+    'django.middleware.common.CommonMiddleware',
+    'django.contrib.sessions.middleware.SessionMiddleware',
+    'django.contrib.auth.middleware.AuthenticationMiddleware',
+    'django.middleware.doc.XViewMiddleware',
+)
+
+ROOT_URLCONF = 'htsworkflow.frontend.urls'
+
+TEMPLATE_DIRS = (
+    # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
+    # Always use forward slashes, even on Windows.
+    # Don't forget to use absolute paths, not relative paths.
+    '/usr/share/python-support/python-django/django/contrib/admin/templates',
+    #'/usr/lib/pymodules/python2.6/django/contrib/admin/templates/',
+    os.path.join(HTSWORKFLOW_ROOT, 'frontend','templates'),
+)
+
+INSTALLED_APPS = (
+    'django.contrib.admin',
+    'django.contrib.auth',
+    'django.contrib.contenttypes',
+    'django.contrib.humanize',
+    'django.contrib.sessions',
+    'django.contrib.sites',
+    'htsworkflow.frontend.eland_config',
+    'htsworkflow.frontend.samples',
+    # modules from htsworkflow branch
+    'htsworkflow.frontend.experiments',
+    'htsworkflow.frontend.analysis', 
+    'htsworkflow.frontend.reports',
+    'htsworkflow.frontend.inventory',
+    'htsworkflow.frontend.bcmagic',
+    'django.contrib.databrowse',
+)
+
+# Project specific settings
+
+ALLOWED_IPS={'127.0.0.1': '127.0.0.1'}
+options_to_dict(ALLOWED_IPS, 'allowed_hosts')
+
+ALLOWED_ANALYS_IPS = {'127.0.0.1': '127.0.0.1'}
+options_to_dict(ALLOWED_ANALYS_IPS, 'allowed_analysis_hosts')
+#UPLOADTO_HOME = os.path.abspath('../../uploads')
+#UPLOADTO_CONFIG_FILE = os.path.join(UPLOADTO_HOME, 'eland_config')
+#UPLOADTO_ELAND_RESULT_PACKS = os.path.join(UPLOADTO_HOME, 'eland_results')
+#UPLOADTO_BED_PACKS = os.path.join(UPLOADTO_HOME, 'bed_packs')
+# Where "results_dir" means directory with all the flowcells
+if options.has_option('frontend', 'results_dir'):
+    RESULT_HOME_DIR=os.path.expanduser(options.get('frontend', 'results_dir'))
+else:
+    RESULT_HOME_DIR='/tmp'
+
+LINK_FLOWCELL_STORAGE_DEVICE_URL = options.get('frontend', 'link_flowcell_storage_device_url')
+# PORT 9100 is default for Zebra tabletop/desktop printers
+# PORT 6101 is default for Zebra mobile printers
+BCPRINTER_PRINTER1_HOST = options.get('bcprinter', 'printer1_host')
+BCPRINTER_PRINTER1_PORT = int(options.get('bcprinter', 'printer1_port'))
+BCPRINTER_PRINTER2_HOST = options.get('bcprinter', 'printer2_host')
+BCPRINTER_PRINTER2_PORT = int(options.get('bcprinter', 'printer2_port'))
+
index 316aa490fd0ceaa67a4ed3712ad336f9519b3bbe..96854088c87d46bfaebfa3fe125cd5681c0d6378 100644 (file)
@@ -1,7 +1,6 @@
+"""Common functions for accessing the HTS Workflow REST API
 """
-Common functions for accessing the HTS Workflow REST API
-
-"""
+from ConfigParser import SafeConfigParser
 import logging
 
 # try to deal with python <2.6
@@ -10,10 +9,53 @@ try:
 except ImportError:
   import simplejson as json
 
+import os
+from optparse import OptionGroup
 import urllib
 import urllib2
 import urlparse
 
+
+def add_auth_options(parser):
+    """Add options OptParser configure authentication options
+    """
+    # Load defaults from the config files
+    config = SafeConfigParser()
+    config.read([os.path.expanduser('~/.htsworkflow.ini'),
+                 '/etc/htsworkflow.ini'
+                 ])
+    
+    sequence_archive = None
+    apiid = None
+    apikey = None
+    apihost = None
+    SECTION = 'sequence_archive'
+    if config.has_section(SECTION):
+        sequence_archive = config.get(SECTION, 'sequence_archive',sequence_archive)
+        sequence_archive = os.path.expanduser(sequence_archive)
+        apiid = config.get(SECTION, 'apiid', apiid)
+        apikey = config.get(SECTION, 'apikey', apikey)
+        apihost = config.get(SECTION, 'host', apihost)
+
+    # configuration options
+    group = OptionGroup(parser, "htsw api authentication")
+    group.add_option('--apiid', default=apiid, help="Specify API ID")
+    group.add_option('--apikey', default=apikey, help="Specify API KEY")
+    group.add_option('--host',  default=apihost,
+                     help="specify HTSWorkflow host",)
+    group.add_option('--sequence', default=sequence_archive,
+                     help="sequence repository")
+    parser.add_option_group(group)
+
+def make_auth_from_opts(opts, parser):
+    """Create htsw auth info dictionary from optparse info
+    """
+    if opts.host is None or opts.apiid is None or opts.apikey is None:
+        parser.error("Please specify host url, apiid, apikey")
+        
+    return {'apiid': opts.apiid, 'apikey': opts.apikey }
+
+
 def library_url(root_url, library_id):
     """
     Return the url for retrieving information about a specific library.
@@ -94,3 +136,24 @@ def retrieve_info(url, apidata):
     headers = web.info()
 
     return json.loads(contents)
+
+class HtswApi(object):
+  def __init__(self, root_url, authdata):
+    self.root_url = root_url
+    self.authdata = authdata
+
+  def get_flowcell(self, flowcellId):
+    url = flowcell_url(self.root_url, flowcellId)
+    return retrieve_info(url, self.authdata)
+
+  def get_library(self, libraryId):
+    url = library_url(self.root_url, libraryId)
+    return retrieve_info(url, self.authdata)
+
+  def get_lanes_for_user(self, user):
+    url = lanes_for_user(self.root_url, user)
+    return retrieve_info(url, self.authdata)
+
+  def get_url(self, url):
+    return retrieve_info(url, self.authdata)
+    
index e1906eb8acc0be9a19b4ad314a42b6fe94dedf74..513a3a396221149e8fbadee0457638fdd7865be4 100644 (file)
@@ -5,33 +5,43 @@ import unittest
 from htsworkflow.util import validate
 
 class TestValidate(unittest.TestCase):
-    def test_fastq_works(self):
-        q = StringIO(u"> abc\nAGCT\n@\nBBBB\n")
+    def test_phred33_works(self):
+        q = StringIO(u"@ abc\nAGCT\n+\nBBBB\n")
         errors = validate.validate_fastq(q)
         self.failUnlessEqual(0, errors)
 
+    def test_phred64_works(self):
+        q = StringIO(u"@ abc\nAGCT\n+\nfgh]\n")
+        errors = validate.validate_fastq(q, 'phred64')
+        self.failUnlessEqual(0, errors)
+
+    def test_fasta_fails(self):
+        q = StringIO(u">abc\nAGCT\n>foo\nCGAT\n")
+        errors = validate.validate_fastq(q)
+        self.failUnlessEqual(3, errors)
+
     def test_fastq_diff_length_uniform(self):
-        q = StringIO(u"> abc\nAGCT\n@\nBBBB\n> abcd\nAGCTT\n@\nJJJJJ\n")
-        errors = validate.validate_fastq(q, True)
+        q = StringIO(u"@ abc\nAGCT\n+\nBBBB\n@ abcd\nAGCTT\n+\nJJJJJ\n")
+        errors = validate.validate_fastq(q, 'phred33', True)
         self.failUnlessEqual(2, errors)
 
     def test_fastq_diff_length_variable(self):
-        q = StringIO(u"> abc\nAGCT\n@\n@@@@\n> abcd\nAGCTT\n@\nJJJJJ\n")
-        errors = validate.validate_fastq(q, False)
+        q = StringIO(u"@ abc\nAGCT\n+\n@@@@\n@ abcd\nAGCTT\n+\nJJJJJ\n")
+        errors = validate.validate_fastq(q, 'phred33', False)
         self.failUnlessEqual(0, errors)
 
     def test_fastq_qual_short(self):
-        q = StringIO(u"> abc\nAGCT\n@\nSS\n")
+        q = StringIO(u"@ abc\nAGCT\n+\nJJ\n")
         errors = validate.validate_fastq(q)
         self.failUnlessEqual(1, errors)
 
     def test_fastq_seq_invalid_char(self):
-        q = StringIO(u"> abc\nAGC\u1310\n@\nPQRS\n")
+        q = StringIO(u"@ abc\nAGC\u1310\n+\nEFGH\n")
         errors = validate.validate_fastq(q)
         self.failUnlessEqual(1, errors)
 
     def test_fastq_qual_invalid_char(self):
-        q = StringIO(u"> abc\nAGC.\n@\n!@#J\n")
+        q = StringIO(u"+ abc\nAGC.\n+\n!@#J\n")
         errors = validate.validate_fastq(q)
         self.failUnlessEqual(1, errors)
 
index f7b821269881c637efcddf24ee428057e2cc5d22..959acc281db0b951d0d0dc2dec17023eddc9be47 100644 (file)
@@ -9,11 +9,24 @@ def main(cmdline=None):
     parser = make_parser()
     opts, args = parser.parse_args(cmdline)
 
+    error_happened = False
     for filename in args[1:]:
         stream = open(filename, 'r')
+        
         if opts.fastq:
-            validate_fastq(f, opts.uniform_lengths)
+            errors = validate_fastq(stream,
+                                    opts.format,
+                                    opts.uniform_lengths,
+                                    opts.max_errors)
+            if errors > 0:
+                print "%s failed validation" % (filename,)
+                error_happened = True
+
         stream.close()
+
+    if error_happened:
+        return 1
+    
     return 0
 
 def make_parser():
@@ -22,11 +35,17 @@ def make_parser():
                       help="verify arguments are valid fastq file")
     parser.add_option("--uniform-lengths", action="store_true", default=False,
                       help="require all reads to be of the same length")
+    parser.add_option("--max-errors", type="int", default=None)
+    encodings=['phred33', 'phred64']
+    parser.add_option("--format", type="choice",
+                      choices=encodings,
+                      default='phred64',
+                      help="choose quality encoding one of: %s" % (", ".join(encodings)))
                       
     return parser
 
 
-def validate_fastq(stream, uniform_length=False):
+def validate_fastq(stream, format='phred33', uniform_length=False, max_errors=None):
     """Validate that a fastq file isn't corrupted
 
     uniform_length - requires that all sequence & qualities must be
@@ -39,61 +58,72 @@ def validate_fastq(stream, uniform_length=False):
     FQ_SEQ = 2
     FQ_H2 = 3
     FQ_QUAL = 4
-    h1_re = re.compile("^>[ \t\w]*$")
+    h1_re = re.compile("^@[\s\w:-]*$")
     seq_re = re.compile("^[AGCT.N]+$", re.IGNORECASE)
-    h2_re = re.compile("^@[ \t\w]*$")
+    h2_re = re.compile("^\+[\s\w:-]*$")
     phred33 = re.compile("^[!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJ]+$")
     phred64 = re.compile("^[@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh]+$")
 
+    if format == 'phred33':
+        quality_re = phred33
+    elif format == 'phred64':
+        quality_re = phred64
+    else:
+        raise ValueError("Unrecognized quality format name")
+
     state = FQ_H1
     length = None
     line_number = 1
     errors = 0
     for line in stream:
         line = line.rstrip()
+        len_errors = 0
         if state == FQ_H1:
             # reset length at start of new record for non-uniform check
             if not uniform_length:
                 length = None
             # start of record checks
-            errors = validate_re(h1_re, line, line_number, errors,
-                                 "FAIL H1")
+            errors += validate_re(h1_re, line, line_number, "FAIL H1")
             state = FQ_SEQ
         elif state == FQ_SEQ:
-            errors = validate_re(seq_re, line, line_number, errors,
-                                 "FAIL SEQ")
-            length, errors = validate_length(line, length, line_number,
-                                             errors,
-                                             "FAIL SEQ LEN")
+            errors += validate_re(seq_re, line, line_number, "FAIL SEQ")
+            length, len_errors = validate_length(line, length, line_number,
+                                                 "FAIL SEQ LEN")
+            errors += len_errors
             state = FQ_H2
         elif state == FQ_H2:
-            errors = validate_re(h2_re, line, line_number, errors, "FAIL H2")
+            errors += validate_re(h2_re, line, line_number, "FAIL H2")
             state = FQ_QUAL
         elif state == FQ_QUAL:
-            errors = validate_re(phred64, line, line_number, errors,
-                                 "FAIL QUAL")
-            length, errors = validate_length(line, length, line_number, errors,
-                                            "FAIL QUAL LEN")
+            errors += validate_re(quality_re, line, line_number, "FAIL QUAL")
+            length, len_errors = validate_length(line, length, line_number,
+                                                 "FAIL QUAL LEN")
+            errors += len_errors
             state = FQ_H1
         else:
             raise RuntimeError("Invalid state: %d" % (state,))
         line_number += 1
+        if max_errors is not None and errors > max_errors:
+            break
+        
     return errors
 
-def validate_re(pattern, line, line_number, error_count, errmsg):
+def validate_re(pattern, line, line_number, errmsg):
     if pattern.match(line) is None:
         print errmsg, "[%d]: %s" % (line_number, line)
-        error_count += 1
-    return error_count
+        return 1
+    else:
+        return 0
 
-def validate_length(line, line_length, line_number, error_count, errmsg):
+def validate_length(line, line_length, line_number, errmsg):
     """
     if line_length is None, sets it
     """
+    error_count = 0
     if line_length is None:
         line_length = len(line)
     elif len(line) != line_length:
         print errmsg, "%d: %s" %(line_number, line)
-        error_count += 1
+        error_count = 1
     return line_length, error_count
     
diff --git a/manage.py b/manage.py
new file mode 100644 (file)
index 0000000..5e78ea9
--- /dev/null
+++ b/manage.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+from django.core.management import execute_manager
+try:
+    import settings # Assumed to be in the same directory.
+except ImportError:
+    import sys
+    sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
+    sys.exit(1)
+
+if __name__ == "__main__":
+    execute_manager(settings)
index 288ec1ac00c1f05d327c48a0cb4cf735e16d6f3c..42bb61472d0c941787332e071f72f2422293b77e 100755 (executable)
@@ -1,8 +1,5 @@
 #!/usr/bin/env python
 
-from htsworkflow.util.hdquery import get_hd_serial_num
-from htsworkflow.frontend import settings
-
 from optparse import OptionParser
 import os
 import re
@@ -10,6 +7,10 @@ import sys
 import urllib2
 import urlparse
 
+from django.conf import settings
+
+from htsworkflow.util.hdquery import get_hd_serial_num
+
 runfolder_pattern = re.compile(r'[0-9]{6}_[-A-Za-z\d]+_\d+_(?P<flowcell>[A-Z\d]+)\.tgz')
 
 def extract_flowcell(runfolder_name):
diff --git a/settings.py b/settings.py
new file mode 100644 (file)
index 0000000..1731c13
--- /dev/null
@@ -0,0 +1,4 @@
+#
+# provide a pointer to the right settings.py file for
+# programs that assume it starts in the current directory
+from htsworkflow.settings import *
index 3a0890f9d14fa2ef1e0b30fc94917b393caf8226..36078dc438b083b7fec43dcbe932bc2bdab21225 100644 (file)
@@ -2,15 +2,9 @@ import unittest
 
 from StringIO import StringIO
 from htsworkflow.automation import copier
+from htsworkflow.automation.solexa import is_runfolder
 
-class testCopier(unittest.TestCase):
-    def test_runfolder_validate(self):
-        self.failUnlessEqual(copier.runfolder_validate(""), False)
-        self.failUnlessEqual(copier.runfolder_validate("1345_23"), False)
-        self.failUnlessEqual(copier.runfolder_validate("123456_asdf-$23'"), False)
-        self.failUnlessEqual(copier.runfolder_validate("123456_USI-EAS44"), True)
-        self.failUnlessEqual(copier.runfolder_validate("123456_USI-EAS44 "), False)
-        
+class testCopier(unittest.TestCase):        
     def test_empty_config(self):
         cfg = StringIO("""[fake]
 something: unrelated