apidata = api.make_auth_from_opts(opts, parser)
model = get_model(opts.load_model)
- mapper = DAFMapper(opts.name, opts.daf, model)
- submission_uri = get_submission_uri(opts.name)
+ if opts.name:
+ mapper = DAFMapper(opts.name, opts.daf, model)
+ submission_uri = get_submission_uri(opts.name)
if opts.library_url is not None:
mapper.library_url = opts.library_url
from django.db.models.signals import post_init
from htsworkflow.frontend.samples.models import Library
-from htsworkflow.frontend.samples.results import parse_flowcell_id
+from htsworkflow.util.conversion import parse_flowcell_id
from htsworkflow.pipelines import runfolder
logger = logging.getLogger(__name__)
pass
-def parse_flowcell_id(flowcell_id):
- """
- Return flowcell id and any status encoded in the id
-
- We stored the status information in the flowcell id name.
- this was dumb, but database schemas are hard to update.
- """
- fields = flowcell_id.split()
- fcid = None
- status = None
- if len(fields) > 0:
- fcid = fields[0]
- if len(fields) > 1:
- status = fields[1]
- return fcid, status
-
HAVE_RDF = False
-@unittest.skipIf(not HAVE_RDF,
- 'Install python librdf to test RDFa encoded in pages')
class TestRDFaLibrary(TestCase):
fixtures = ['test_samples.json']
from htsworkflow.frontend.experiments.models import FlowCell, Lane, LANE_STATUS_MAP
from htsworkflow.frontend.samples.changelist import ChangeList
from htsworkflow.frontend.samples.models import Library, Species, HTSUser
-from htsworkflow.frontend.samples.results import get_flowcell_result_dict, parse_flowcell_id
+from htsworkflow.frontend.samples.results import get_flowcell_result_dict
from htsworkflow.frontend.bcmagic.forms import BarcodeMagicForm
from htsworkflow.pipelines.runfolder import load_pipeline_run_xml
from htsworkflow.pipelines import runfolder
from htsworkflow.pipelines.eland import ResultLane
-from htsworkflow.util.conversion import unicode_or_none
+from htsworkflow.util.conversion import unicode_or_none, parse_flowcell_id
from htsworkflow.util import makebed
from htsworkflow.util import opener
# open the srf, fastq, or compressed fastq
if is_srf(args[0]):
- source = srf_open(args[0])
+ source = srf_open(args[0], opts.cnf1)
else:
source = autoopen(args[0])
help="show information about what we're doing.")
parser.add_option('--version', default=False, action="store_true",
help="Report software version")
+ parser.add_option('--cnf1', default=False, action="store_true",
+ help="Force cnf1 mode in srf2fastq")
return parser
Make a stream from srf file using srf2fastq
"""
cmd = ['srf2fastq']
- if is_cnf1(filename):
+ if cnf1 or is_cnf1(filename):
cmd.append('-c')
cmd.append(filename)
import sys
import types
-from htsworkflow.frontend.samples.results import parse_flowcell_id
from htsworkflow.pipelines.sequences import scan_for_sequences
from htsworkflow.pipelines import qseq2fastq
from htsworkflow.pipelines import srf2fastq
from htsworkflow.util.api import HtswApi
+from htsworkflow.util.conversion import parse_flowcell_id
logger = logging.getLogger(__name__)
def get_qseq_condor_header(self):
return """Universe=vanilla
executable=%(exe)s
-error=%(log)s/qseq2fastq.err.$(process).log
-output=%(log)s/qseq2fastq.out.$(process).log
+error=%(log)s/qseq2fastq.$(process).out
+output=%(log)s/qseq2fastq.$(process).out
log=%(log)s/qseq2fastq.log
""" % {'exe': sys.executable,
def get_srf_condor_header(self):
return """Universe=vanilla
executable=%(exe)s
-output=%(log)s/srf_pair_fastq.out.$(process).log
-error=%(log)s/srf_pair_fastq.err.$(process).log
+output=%(log)s/srf_pair_fastq.$(process).out
+error=%(log)s/srf_pair_fastq.$(process).out
log=%(log)s/srf_pair_fastq.log
-environment="%(env)s"
+environment="PYTHONPATH=%(env)s"
""" % {'exe': sys.executable,
'log': self.log_path,
flowcell=None,
mid=None):
py = srf2fastq.__file__
- args = [ py, srf_file, ]
+ args = [ py, srf_file, '--verbose']
if paired:
args.extend(['--left', target_pathname])
# this is ugly. I did it because I was pregenerating the target
submission_name = self.make_submission_name(submission_dir)
submissionNode = self.get_submission_node(submission_dir)
- submission_uri = submissionNode.uri
- print "submission:", str(submission_name), str(submissionNode), str(submission_uri)
-
+ submission_uri = str(submissionNode.uri)
view_name = fromTypedNode(self.model.get_target(view, dafTermOntology['name']))
- submissionView = RDF.Node(RDF.Uri(str(submission_uri) + '/' + view_name))
+ submissionView = RDF.Node(RDF.Uri(submission_uri + '/' + view_name))
self.model.add_statement(
RDF.Statement(self.submissionSet, dafTermOntology['has_submission'], submissionNode))
writer = get_serializer()
turtle = writer.serialize_model_to_string(model)
- self.failUnless(str(signal_view_node) in turtle)
+
+ self.failUnless(str(signal_view_node.uri) in turtle)
statements = list(model.find_statements(
RDF.Statement(
daf_mapper.construct_file_attributes('/tmp/analysis1', libNode, 'filename.bam')
source = daf_mapper.model.get_source(rdfNS['type'], submissionOntology['submission'])
- self.failUnlessEqual(str(source), "<http://jumpgate.caltech.edu/wiki/SubmissionsLog/testfind/analysis1>")
+ self.failUnlessEqual(str(source.uri), "http://jumpgate.caltech.edu/wiki/SubmissionsLog/testfind/analysis1")
view = daf_mapper.model.get_target(source, submissionOntology['has_view'])
- self.failUnlessEqual(str(view), "<http://jumpgate.caltech.edu/wiki/SubmissionsLog/testfind/analysis1/Signal>")
-
+ self.failUnlessEqual(str(view.uri), "http://jumpgate.caltech.edu/wiki/SubmissionsLog/testfind/analysis1/Signal")
def test_library_url(self):
daf_mapper = load_daf_mapper('urltest')
daf_mapper.library_url = 'http://google.com'
self.failUnlessEqual(daf_mapper.library_url, 'http://google.com' )
-
def suite():
suite = unittest.makeSuite(TestDAF, 'test')
suite.addTest(unittest.makeSuite(TestDAFMapper, 'test'))
return None
else:
return unicode(value)
+
+def parse_flowcell_id(flowcell_id):
+ """
+ Return flowcell id and any status encoded in the id
+
+ We stored the status information in the flowcell id name.
+ this was dumb, but database schemas are hard to update.
+ """
+ fields = flowcell_id.split()
+ fcid = None
+ status = None
+ if len(fields) > 0:
+ fcid = fields[0]
+ if len(fields) > 1:
+ status = fields[1]
+ return fcid, status
+