Merge branch 'django1.4'
authorDiane Trout <diane@ghic.org>
Tue, 17 Dec 2013 19:20:03 +0000 (11:20 -0800)
committerDiane Trout <diane@ghic.org>
Tue, 17 Dec 2013 19:20:03 +0000 (11:20 -0800)
there was a conflict with my qualifying the load_pipeline_run_xml function call

Conflicts:
htsworkflow/frontend/samples/views.py

116 files changed:
.gitignore
docs/Makefile [new file with mode: 0644]
docs/source/api.rst [new file with mode: 0644]
docs/source/conf.py [new file with mode: 0644]
docs/source/index.rst [new file with mode: 0644]
encode_submission/encode3.py [new file with mode: 0644]
htsworkflow/frontend/bcmagic/urls.py
htsworkflow/frontend/bcmagic/utils.py
htsworkflow/frontend/eland_config/urls.py
htsworkflow/frontend/experiments/admin.py
htsworkflow/frontend/experiments/experiments.py
htsworkflow/frontend/experiments/fixtures/initial_data.json
htsworkflow/frontend/experiments/fixtures/test_flowcells.json
htsworkflow/frontend/experiments/models.py
htsworkflow/frontend/experiments/test_experiments.py
htsworkflow/frontend/experiments/urls.py
htsworkflow/frontend/inventory/models.py
htsworkflow/frontend/inventory/test_inventory.py
htsworkflow/frontend/inventory/urls.py
htsworkflow/frontend/inventory/views.py
htsworkflow/frontend/reports/urls.py
htsworkflow/frontend/samples/admin.py
htsworkflow/frontend/samples/changelist.py
htsworkflow/frontend/samples/fixtures/initial_data.json
htsworkflow/frontend/samples/models.py
htsworkflow/frontend/samples/test_samples.py
htsworkflow/frontend/samples/urls.py
htsworkflow/frontend/samples/views.py
htsworkflow/frontend/static/css/dashboard.css [new file with mode: 0644]
htsworkflow/frontend/static/img/dna80.png [new file with mode: 0644]
htsworkflow/frontend/templates/admin/index.html
htsworkflow/frontend/templates/base.html
htsworkflow/frontend/templates/base_site.html
htsworkflow/frontend/templates/experiments/flowcell_detail.html
htsworkflow/frontend/templates/experiments/flowcell_header.html
htsworkflow/frontend/templates/experiments/flowcell_lane_detail.html
htsworkflow/frontend/templates/experiments/sequencer.html
htsworkflow/frontend/templates/inventory/inventory_all_index.html
htsworkflow/frontend/templates/inventory/inventory_index.html
htsworkflow/frontend/templates/inventory/inventory_itemtype_index.html
htsworkflow/frontend/templates/inventory/inventory_summary.html
htsworkflow/frontend/templates/registration/login.html
htsworkflow/frontend/templates/sample_header.html
htsworkflow/frontend/templates/samples/antibody_index.html
htsworkflow/frontend/templates/samples/lanes_for.html
htsworkflow/frontend/templates/samples/library_detail.html
htsworkflow/frontend/templates/samples/library_index.html
htsworkflow/frontend/templates/samples/species_detail.html
htsworkflow/frontend/templates/search_form.html
htsworkflow/frontend/urls.py
htsworkflow/pipelines/__init__.py
htsworkflow/pipelines/bustard.py
htsworkflow/pipelines/desplit_fastq.py
htsworkflow/pipelines/eland.py
htsworkflow/pipelines/firecrest.py
htsworkflow/pipelines/genomemap.py
htsworkflow/pipelines/gerald.py
htsworkflow/pipelines/ipar.py
htsworkflow/pipelines/qseq2fastq.py
htsworkflow/pipelines/retrieve_config.py
htsworkflow/pipelines/runfolder.py
htsworkflow/pipelines/samplekey.py
htsworkflow/pipelines/srf.py
htsworkflow/pipelines/srf2fastq.py
htsworkflow/pipelines/summary.py
htsworkflow/pipelines/test/simulate_runfolder.py
htsworkflow/pipelines/test/test_extract_results.py
htsworkflow/pipelines/test/test_genomemap.py
htsworkflow/pipelines/test/test_runfolder026.py
htsworkflow/pipelines/test/test_runfolder030.py
htsworkflow/pipelines/test/test_runfolder110.py
htsworkflow/pipelines/test/test_runfolder_casava_1_7.py
htsworkflow/pipelines/test/test_runfolder_ipar100.py
htsworkflow/pipelines/test/test_runfolder_ipar130.py
htsworkflow/pipelines/test/test_runfolder_pair.py
htsworkflow/pipelines/test/test_runfolder_rta.py
htsworkflow/pipelines/test/test_runfolder_rta160.py
htsworkflow/pipelines/test/test_runfolder_rta180.py
htsworkflow/pipelines/test/test_runfolder_rta1_12.py
htsworkflow/pipelines/test/test_runfolder_utils.py [new file with mode: 0644]
htsworkflow/pipelines/test/testdata/1_12/basecall_stats/All.htm [new file with mode: 0644]
htsworkflow/pipelines/test/testdata/1_12/basecall_stats/Demultiplex_Stats.htm [new file with mode: 0644]
htsworkflow/pipelines/test/testdata/1_12/basecall_stats/IVC.htm [new file with mode: 0644]
htsworkflow/settings.py
htsworkflow/submission/results.py
htsworkflow/submission/submission.py
htsworkflow/submission/test/submission_test_common.py [new file with mode: 0644]
htsworkflow/submission/test/test_condorfastq.py
htsworkflow/submission/test/test_daf.py
htsworkflow/submission/test/test_results.py
htsworkflow/submission/test/test_submission.py [new file with mode: 0644]
htsworkflow/submission/trackhub_submission.py [new file with mode: 0644]
htsworkflow/templates/manifest.txt [new file with mode: 0644]
htsworkflow/templates/submission_view_rdfs_label_metadata.sparql [new file with mode: 0644]
htsworkflow/templates/trackDb.txt [new file with mode: 0644]
htsworkflow/templates/trackhub_manifest.sparql [new file with mode: 0644]
htsworkflow/templates/trackhub_samples.sparql [new file with mode: 0644]
htsworkflow/templates/trackhub_term_values.sparql [new file with mode: 0644]
htsworkflow/util/ethelp.py
htsworkflow/util/hashfile.py
htsworkflow/util/rdfhelp.py
htsworkflow/util/rdfns.py
htsworkflow/util/schemas/htsworkflow.turtle
htsworkflow/util/test/__init__.py
htsworkflow/util/test/test_rdfhelp.py
htsworkflow/util/test/test_ucsc.py [new file with mode: 0644]
htsworkflow/util/test/test_url.py [new file with mode: 0644]
htsworkflow/util/test/test_version.py [new file with mode: 0644]
htsworkflow/util/test/testdata/foo.bigWig [new file with mode: 0644]
htsworkflow/util/ucsc.py [new file with mode: 0644]
htsworkflow/util/url.py
htsworkflow/util/version.py [new file with mode: 0644]
htsworkflow/version.py [deleted file]
htsworkflow/wsgi.py [new file with mode: 0644]
manage.py [changed mode: 0644->0755]
setup.py

index 6d73f11d37d699e92a21d1ecb4c7d3b1115d0f83..3eb65bb2d0094085311b677055db9b2a331daa01 100644 (file)
@@ -8,3 +8,4 @@
 .tox
 dist
 RELEASE-VERSION
+docs/build/
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644 (file)
index 0000000..6fe1342
--- /dev/null
@@ -0,0 +1,153 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = build
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+       @echo "Please use \`make <target>' where <target> is one of"
+       @echo "  html       to make standalone HTML files"
+       @echo "  dirhtml    to make HTML files named index.html in directories"
+       @echo "  singlehtml to make a single large HTML file"
+       @echo "  pickle     to make pickle files"
+       @echo "  json       to make JSON files"
+       @echo "  htmlhelp   to make HTML files and a HTML help project"
+       @echo "  qthelp     to make HTML files and a qthelp project"
+       @echo "  devhelp    to make HTML files and a Devhelp project"
+       @echo "  epub       to make an epub"
+       @echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+       @echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+       @echo "  text       to make text files"
+       @echo "  man        to make manual pages"
+       @echo "  texinfo    to make Texinfo files"
+       @echo "  info       to make Texinfo files and run them through makeinfo"
+       @echo "  gettext    to make PO message catalogs"
+       @echo "  changes    to make an overview of all changed/added/deprecated items"
+       @echo "  linkcheck  to check all external links for integrity"
+       @echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+       -rm -rf $(BUILDDIR)/*
+
+html:
+       $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+       @echo
+       @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+       $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+       @echo
+       @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+       $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+       @echo
+       @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+       $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+       @echo
+       @echo "Build finished; now you can process the pickle files."
+
+json:
+       $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+       @echo
+       @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+       $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+       @echo
+       @echo "Build finished; now you can run HTML Help Workshop with the" \
+             ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+       $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+       @echo
+       @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+             ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+       @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/HTS-Workflow.qhcp"
+       @echo "To view the help file:"
+       @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/HTS-Workflow.qhc"
+
+devhelp:
+       $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+       @echo
+       @echo "Build finished."
+       @echo "To view the help file:"
+       @echo "# mkdir -p $$HOME/.local/share/devhelp/HTS-Workflow"
+       @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/HTS-Workflow"
+       @echo "# devhelp"
+
+epub:
+       $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+       @echo
+       @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+       $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+       @echo
+       @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+       @echo "Run \`make' in that directory to run these through (pdf)latex" \
+             "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+       $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+       @echo "Running LaTeX files through pdflatex..."
+       $(MAKE) -C $(BUILDDIR)/latex all-pdf
+       @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+       $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+       @echo
+       @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+       $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+       @echo
+       @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+       $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+       @echo
+       @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+       @echo "Run \`make' in that directory to run these through makeinfo" \
+             "(use \`make info' here to do that automatically)."
+
+info:
+       $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+       @echo "Running Texinfo files through makeinfo..."
+       make -C $(BUILDDIR)/texinfo info
+       @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+       $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+       @echo
+       @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+       $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+       @echo
+       @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+       $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+       @echo
+       @echo "Link check complete; look for any errors in the above output " \
+             "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+       $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+       @echo "Testing of doctests in the sources finished, look at the " \
+             "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/docs/source/api.rst b/docs/source/api.rst
new file mode 100644 (file)
index 0000000..e6c6c11
--- /dev/null
@@ -0,0 +1,134 @@
+Runfolder Processing
+====================
+
+Runfolder
+---------
+
+The PipelineRun class is designed to combine information
+from the following importers.
+
+* Image Analysis (one of the following)
+
+  * :class:`Firecrest`
+  * :class:`IPAR`
+
+* BaseCaller
+
+  * :class:`Bustard`
+
+* Sequence Alignment
+
+  * :class:`Gerald`
+
+.. automodule:: htsworkflow.pipelines.runfolder
+   :members:
+
+.. _Firecrest:
+
+Firecrest
+---------
+
+.. automodule:: htsworkflow.pipelines.firecrest
+   :members:
+
+.. _IPAR:
+
+IPAR
+----
+
+.. automodule:: htsworkflow.pipelines.ipar
+   :members:
+
+.. _Bustard:
+
+Bustard
+-------
+.. automodule:: htsworkflow.pipelines.bustard
+   :members:
+
+.. _Gerald:
+
+Gerald
+------
+
+.. automodule:: htsworkflow.pipelines.gerald
+   :members:
+
+.. _Eland:
+
+Eland
+-----
+
+.. automodule:: htsworkflow.pipelines.eland
+   :members:
+
+.. _Summary:
+
+Summary
+-------
+
+.. automodule:: htsworkflow.pipelines.summary
+   :members:
+   
+Sequence Archival
+=================
+
+srf
+---
+
+.. automodule:: htsworkflow.pipelines.srf
+   :members:
+   
+Fastq conversion
+================
+
+srf2fastq
+---------
+
+.. automodule:: htsworkflow.pipelines.srf2fastq
+   :members:
+
+qseq2fastq
+----------
+
+.. automodule:: htsworkflow.pipelines.qseq2fastq
+   :members:
+   
+desplit_fastq
+-------------
+
+.. automodule:: htsworkflow.pipelines.desplit_fastq
+   :members:
+   
+sequences
+---------
+
+.. automodule:: htsworkflow.pipelines.sequences
+   :members:
+
+Utilities
+=========
+
+.. automodule:: htsworkflow.pipelines.genome_mapper
+   :members:
+   
+.. automodule:: htsworkflow.pipelines.genomemap
+   :members:
+   
+.. automodule:: htsworkflow.pipelines.samplekey
+   :members:
+   
+.. automodule:: htsworkflow.pipelines.recipe_parser
+   :members:
+   
+Run Automation
+==============
+
+.. automodule:: htsworkflow.pipelines.configure_run
+   :members:
+   
+.. automodule:: htsworkflow.pipelines.retrieve_config
+   :members:
+
+.. automodule:: htsworkflow.pipelines.run_status
+   :members:
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644 (file)
index 0000000..08f47eb
--- /dev/null
@@ -0,0 +1,290 @@
+# -*- coding: utf-8 -*-
+#
+# HTS-Workflow documentation build configuration file, created by
+# sphinx-quickstart on Mon Jan 14 10:18:40 2013.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+sys.path.insert(0, os.path.abspath('../../htsworkflow'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['.templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'HTS-Workflow'
+copyright = u'2013, Diane Trout'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.5'
+# The full version, including alpha/beta/rc tags.
+release = '0.5.4'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = []
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['.static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'HTS-Workflowdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'HTS-Workflow.tex', u'HTS-Workflow Documentation',
+   u'Diane Trout', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'hts-workflow', u'HTS-Workflow Documentation',
+     [u'Diane Trout'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  ('index', 'HTS-Workflow', u'HTS-Workflow Documentation',
+   u'Diane Trout', 'HTS-Workflow', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+
+# -- Options for Epub output ---------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = u'HTS-Workflow'
+epub_author = u'Diane Trout'
+epub_publisher = u'Diane Trout'
+epub_copyright = u'2013, Diane Trout'
+
+# The language of the text. It defaults to the language option
+# or en if the language is not set.
+#epub_language = ''
+
+# The scheme of the identifier. Typical schemes are ISBN or URL.
+#epub_scheme = ''
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#epub_identifier = ''
+
+# A unique identification for the text.
+#epub_uid = ''
+
+# A tuple containing the cover image and cover page html template filenames.
+#epub_cover = ()
+
+# HTML files that should be inserted before the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_pre_files = []
+
+# HTML files shat should be inserted after the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_post_files = []
+
+# A list of files that should not be packed into the epub file.
+#epub_exclude_files = []
+
+# The depth of the table of contents in toc.ncx.
+#epub_tocdepth = 3
+
+# Allow duplicate toc entries.
+#epub_tocdup = True
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644 (file)
index 0000000..9a15dbd
--- /dev/null
@@ -0,0 +1,22 @@
+.. HTS-Workflow documentation master file, created by
+   sphinx-quickstart on Mon Jan 14 10:18:40 2013.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to HTS-Workflow's documentation!
+========================================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   api
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/encode_submission/encode3.py b/encode_submission/encode3.py
new file mode 100644 (file)
index 0000000..a77151e
--- /dev/null
@@ -0,0 +1,205 @@
+"""Create a track hub 
+"""
+
+#!/usr/bin/env python
+from ConfigParser import SafeConfigParser
+import fnmatch
+from glob import glob
+import json
+import logging
+import netrc
+from optparse import OptionParser, OptionGroup
+import os
+from pprint import pprint, pformat
+import shlex
+from StringIO import StringIO
+import stat
+import sys
+import time
+import types
+import urllib
+import urllib2
+import urlparse
+from zipfile import ZipFile
+
+import RDF
+
+if not 'DJANGO_SETTINGS_MODULE' in os.environ:
+    os.environ['DJANGO_SETTINGS_MODULE'] = 'htsworkflow.settings'
+
+from htsworkflow.util import api
+from htsworkflow.util.rdfhelp import \
+     dafTermOntology, \
+     fromTypedNode, \
+     get_model, \
+     get_serializer, \
+     load_into_model, \
+     sparql_query, \
+     submissionOntology
+from htsworkflow.submission.daf import get_submission_uri
+from htsworkflow.submission.submission import list_submissions
+from htsworkflow.submission.results import ResultMap
+from htsworkflow.submission.trackhub_submission import TrackHubSubmission
+from htsworkflow.submission.condorfastq import CondorFastqExtract
+
+logger = logging.getLogger(__name__)
+
+INDENTED = "  " + os.linesep
+
+def main(cmdline=None):
+    parser = make_parser()
+    opts, args = parser.parse_args(cmdline)
+    submission_uri = None
+
+    if opts.debug:
+        logging.basicConfig(level = logging.DEBUG )
+    elif opts.verbose:
+        logging.basicConfig(level = logging.INFO )
+    else:
+        logging.basicConfig(level = logging.WARNING )
+
+    apidata = api.make_auth_from_opts(opts, parser)
+
+    model = get_model(opts.model, opts.db_path)
+
+    submission_names = list(list_submissions(model))
+    name = opts.name
+    if len(submission_names) == 0 and opts.name is None:
+        parser.error("Please name this submission")
+    elif opts.name and submission_names and opts.name not in submission_names:
+        parser.error("{} is not in this model. Choose from: {}{}".format(
+            opts.name,
+            os.linesep,
+            INDENTED.join(submission_names)))
+    elif opts.name is None and len(submission_names) > 1:
+        parser.error("Please choose submission name from: {}{}".format(
+            os.linesep,
+            INDENTED.join(submission_names)))
+    elif len(submission_names) == 1:
+        name = submission_names[0]
+        
+    if name:
+        submission_uri = get_submission_uri(name)
+        logger.info('Submission URI: %s', name)
+    else:
+        logger.debug('No name, unable to create submission ur')
+
+    mapper = None
+    if opts.make_track_hub:
+        mapper = TrackHubSubmission(name,
+                                    model,
+                                    baseurl=opts.make_track_hub,
+                                    baseupload=opts.track_hub_upload,
+                                    host=opts.host)
+
+    if opts.load_rdf is not None:
+        if submission_uri is None:
+            parser.error("Please specify the submission name")
+        load_into_model(model, 'turtle', opts.load_rdf, submission_uri)
+
+    results = ResultMap()
+    for a in args:
+        if os.path.exists(a):
+            results.add_results_from_file(a)
+        else:
+            logger.warn("File %s doesn't exist.", a)
+
+    if opts.make_link_tree_from is not None:
+        results.make_tree_from(opts.make_link_tree_from, link=True)
+
+    if opts.copy_tree_from is not None:
+        results.make_tree_from(opts.copy_tree_from, link=False)
+
+    if opts.fastq:
+        logger.info("Building fastq extraction scripts")
+        flowcells = os.path.join(opts.sequence, 'flowcells')
+        extractor = CondorFastqExtract(opts.host, flowcells,
+                                       model=opts.model,
+                                       force=opts.force)
+        extractor.create_scripts(results)
+
+    if opts.scan_submission:
+        if name is None:
+            parser.error("Please define a submission name")
+        if mapper is None:
+            parser.error("Scan submission needs --make-track-hub=public-url")
+        mapper.scan_submission_dirs(results)
+
+    if opts.make_track_hub:
+        trackdb = mapper.make_hub(results)
+
+    if opts.make_manifest:
+        make_manifest(mapper, results, opts.make_manifest)
+        
+    if opts.sparql:
+        sparql_query(model, opts.sparql)
+
+    if opts.print_rdf:
+        writer = get_serializer()
+        print writer.serialize_model_to_string(model)
+
+
+def make_manifest(mapper, results, filename=None):
+    manifest = mapper.make_manifest(results)
+
+    if filename is None or filename == '-':
+        sys.stdout.write(manifest)
+    else:
+        with open(filename, 'w') as mainifeststream:
+            mainifeststream.write(manifest)
+        
+def make_parser():
+    parser = OptionParser()
+
+    model = OptionGroup(parser, 'model')
+    model.add_option('--name', help="Set submission name")
+    model.add_option('--db-path', default=None,
+                     help="set rdf database path")
+    model.add_option('--model', default=None,
+      help="Load model database")
+    model.add_option('--load-rdf', default=None,
+      help="load rdf statements into model")
+    model.add_option('--sparql', default=None, help="execute sparql query")
+    model.add_option('--print-rdf', action="store_true", default=False,
+      help="print ending model state")
+    parser.add_option_group(model)
+    # commands
+    commands = OptionGroup(parser, 'commands')
+    commands.add_option('--make-link-tree-from',
+                      help="create directories & link data files",
+                      default=None)
+    commands.add_option('--copy-tree-from',
+                      help="create directories & copy data files",
+                      default=None)
+    commands.add_option('--fastq', default=False, action="store_true",
+                        help="generate scripts for making fastq files")
+    commands.add_option('--scan-submission', default=False, action="store_true",
+                      help="Import metadata for submission into our model")
+    commands.add_option('--make-track-hub', default=None,
+                        help='web root that will host the trackhub.')
+    commands.add_option('--track-hub-upload', default=None,
+                        help='where to upload track hub <host>:<path>')
+    commands.add_option('--make-manifest', 
+                        help='name the manifest file name or - for stdout to create it', 
+                        default=None)
+
+
+    parser.add_option_group(commands)
+
+    parser.add_option('--force', default=False, action="store_true",
+                      help="Force regenerating fastqs")
+    parser.add_option('--daf', default=None, help='specify daf name')
+    parser.add_option('--library-url', default=None,
+                      help="specify an alternate source for library information")
+    # debugging
+    parser.add_option('--verbose', default=False, action="store_true",
+                      help='verbose logging')
+    parser.add_option('--debug', default=False, action="store_true",
+                      help='debug logging')
+
+    api.add_auth_options(parser)
+
+    return parser
+
+if __name__ == "__main__":
+    main()
index 62497d2efdd4a1bdf2c53fdf0919410797aecf15..d5c9627ce701ed4639b9648e548353d6b57b2067 100644 (file)
@@ -1,7 +1,7 @@
-from django.conf.urls.defaults import *
+from django.conf.urls import patterns
 
 urlpatterns = patterns('',
     (r'^json_test/$', 'htsworkflow.frontend.bcmagic.views.json_test'),
     (r'^magic/$', 'htsworkflow.frontend.bcmagic.views.magic'),
     (r'^$', 'htsworkflow.frontend.bcmagic.views.index'),
-)
\ No newline at end of file
+)
index 5acc6ddefd6045473ad498c000ec60ae48f8b49c..ace241f4a748cbebe3aab4f98b56459f9fae8224 100644 (file)
@@ -5,25 +5,30 @@ import socket
 import StringIO
 
 
-def print_zpl(zpl_text, host=settings.BCPRINTER_PRINTER1_HOST):
+def print_zpl(zpl_text, host=None):
     """
     Sends zpl_text to printer
     """
+    if not host:
+        host = settings.BCPRINTER_PRINTER1_HOST
     ftp = ftplib.FTP(host=host, user='blank', passwd='')
     ftp.login()
     ftp.storlines("STOR printme.txt", StringIO.StringIO(zpl_text))
     ftp.quit()
 
 
-def print_zpl_socket(zpl_text,
-                     host=settings.BCPRINTER_PRINTER1_HOST,
-                     port=settings.BCPRINTER_PRINTER1_PORT):
+def print_zpl_socket(zpl_text, host=None, port=None):
     """
     Sends zpl_text to printer via a socket
 
     if zpl_text is a list of zpl_texts, it will print each one
     in that list.
     """
+    
+    if not host:
+        host=settings.BCPRINTER_PRINTER1_HOST
+    if not port:
+        port=settings.BCPRINTER_PRINTER1_PORT
 
     # Process anyway if zpl_text is a list.
     if type(zpl_text) is list:
index 129f57c87736a3b2558259f13bdfc5dc68344be1..b4bc42b7ba4f1cbbc3443018676a8a3474f0a83f 100644 (file)
@@ -1,10 +1,10 @@
-from django.conf.urls.defaults import *
+from django.conf.urls import patterns, url
 
 urlpatterns = patterns('',
-    # Example:
-    
-    (r'^(?P<flowcell>\w+)/$', 'htsworkflow.frontend.eland_config.views.config'),
-    (r'^$', 'htsworkflow.frontend.eland_config.views.config'),
-    #(r'^$', 'htsworkflow.frontend.eland_config.views.index')
+    ## Example:
+
+    url(r'^(?P<flowcell>\w+)/$', 'htsworkflow.frontend.eland_config.views.config'),
+    url(r'^$', 'htsworkflow.frontend.eland_config.views.config'),
+    #url(r'^$', 'htsworkflow.frontend.eland_config.views.index')
 
 )
index 23c8098400643c402d302f1a4c3668f2988529d2..6cf7aeb4c7a29a53fa4bfa4db0dc7ef4082e98c2 100644 (file)
@@ -107,7 +107,7 @@ class FlowCellOptions(admin.ModelAdmin):
         '=lane__library__id',
         'lane__library__library_name')
     list_display = ('flowcell_id','run_date','Lanes')
-    list_filter = ('sequencer','cluster_station')
+    list_filter = ('sequencer','cluster_station', 'paired_end')
     fieldsets = (
         (None, {
           'fields': ('run_date', ('flowcell_id','cluster_station','sequencer'),
index 1ccba526adb75272273985c0095db2361e09d40f..f24d13d5b8dc3e830b2b3c1548251886ec7f1299 100644 (file)
@@ -9,7 +9,7 @@ import os
 import re
 
 from django.contrib.auth.decorators import login_required
-from django.contrib.csrf.middleware import csrf_exempt
+from django.views.decorators.csrf import csrf_exempt
 from django.core.exceptions import ObjectDoesNotExist
 from django.core.mail import send_mail, mail_admins
 from django.http import HttpResponse, Http404
index 3371978a50174ef480d12cb3c5eab759aae8a4b9..e18b5ff62a77bc3036c52518b28ebe1151382703 100644 (file)
       "isdefault": false,
       "comment": "our first sequencer"
     }
+  },
+  { "model": "experiments.ClusterStation",
+    "pk": 3,
+    "fields": { "name": "new", "isdefault": false }
   }
-]
\ No newline at end of file
+]
index 149747656f3baf0b900ba3fb890f77fa0f773e00..d84bf17709c02311ade3120c37ec824d01574551 100644 (file)
        "pM": "7"
        }
    },
+    {
+        "pk": "11006",
+        "model": "samples.library",
+        "fields": {
+            "ten_nM_dilution": false,
+            "gel_cut_size": 325,
+            "library_name": "Paired End Pfl #3 MP 7/24/9",
+            "creation_date": "2009-08-05",
+            "cell_line": 1,
+            "library_species": 8,
+            "library_type": 2,
+            "made_by": "Lorian",
+            "affiliations": [
+                41
+            ],
+            "replicate": 1,
+            "condition": 1,
+            "hidden": true,
+            "stopping_point": "1A",
+            "tags": [],
+            "made_for": "",
+            "amplified_from_sample": null,
+            "notes": "7/31/2009 16:08:22\tColor: Blue",
+            "undiluted_concentration": null,
+            "successful_pM": null,
+            "experiment_type": 8,
+            "antibody": null
+        }
+    },
     {
         "pk": "11016",
         "model": "samples.library",
             "stopping_point": "1Aa",
             "tags": [],
             "made_for": "Brian Williams",
-            "amplified_from_sample": null,
+            "amplified_from_sample": 11006,
             "notes": "fragment size = 300 bp, Amicon filtered\r\nnanodrop: 56.3",
             "undiluted_concentration": "28.7",
             "successful_pM": null,
             "stopping_point": "1Aa",
             "tags": [],
             "made_for": "Brian Williams",
-            "amplified_from_sample": null,
+            "amplified_from_sample": 11006,
             "notes": "300 bp gel fragment, Amicon filtered\r\nnanodrop: 24.2",
             "undiluted_concentration": "2.05",
             "successful_pM": null,
             "stopping_point": "2A",
             "tags": [],
             "made_for": "",
-            "amplified_from_sample": 12044,
+            "amplified_from_sample": null,
             "notes": "8/21/2009 11:57:54\tColor: Yellow",
             "undiluted_concentration": "20.5",
             "successful_pM": null,
             "stopping_point": "2A",
             "tags": [],
             "made_for": "",
-            "amplified_from_sample": 11045,
+            "amplified_from_sample": null,
             "notes": "8/21/2009 11:57:54\tColor: Blue",
             "undiluted_concentration": "23.9",
             "successful_pM": null,
             "stopping_point": "1Aa",
             "tags": [],
             "made_for": "",
-            "amplified_from_sample": 11051,
+            "amplified_from_sample": null,
             "notes": "8/26/2009 14:46:56\tColor: Purple",
             "undiluted_concentration": "1.47",
             "successful_pM": null,
             "stopping_point": "1Aa",
             "tags": [],
             "made_for": "",
-            "amplified_from_sample": 11053,
+            "amplified_from_sample": null,
             "notes": "8/26/2009 14:46:56\tColor: Black",
             "undiluted_concentration": "1.42",
             "successful_pM": null,
             "stopping_point": "1Aa",
             "tags": [],
             "made_for": "",
-            "amplified_from_sample": 11054,
+            "amplified_from_sample": null,
             "notes": "8/26/2009 14:46:56\tColor: Orange.",
             "undiluted_concentration": "1.3",
             "successful_pM": null,
             "stopping_point": "2A",
             "tags": [],
             "made_for": "",
-            "amplified_from_sample": 11043,
+            "amplified_from_sample": null,
             "notes": "8/21/2009 11:57:54\tColor: Orange",
             "undiluted_concentration": "22.4",
             "successful_pM": null,
             "stopping_point": "2A",
             "tags": [],
             "made_for": "",
-            "amplified_from_sample": 11043,
+            "amplified_from_sample": null,
             "notes": "8/21/2009 11:57:54\tColor: Orange",
             "undiluted_concentration": "22.4",
             "successful_pM": null,
             "stopping_point": "2A",
             "tags": [],
             "made_for": "",
-            "amplified_from_sample": 11043,
+            "amplified_from_sample": null,
             "notes": "8/21/2009 11:57:54\tColor: Orange",
             "undiluted_concentration": "22.4",
             "successful_pM": null,
             "stopping_point": "2A",
             "tags": [],
             "made_for": "",
-            "amplified_from_sample": 11043,
+            "amplified_from_sample": null,
             "notes": "8/21/2009 11:57:54\tColor: Orange.",
             "undiluted_concentration": "22.4",
             "successful_pM": null,
             "stopping_point": "2A",
             "tags": [],
             "made_for": "",
-            "amplified_from_sample": 11046,
+            "amplified_from_sample": null,
             "notes": "8/21/2009 11:57:54\tColor: Green",
             "undiluted_concentration": "24.9",
             "successful_pM": null,
             "stopping_point": "1Aa",
             "tags": [],
             "made_for": "",
-            "amplified_from_sample": 11052,
+            "amplified_from_sample": null,
             "notes": "8/26/2009 14:46:56\tColor: White.",
             "undiluted_concentration": "2.17",
             "successful_pM": null,
index 5152c406e2fc55c87bd997ed7d4ee14197339859..e3771cbde31be4f654fd44dc5b043ed847144257 100644 (file)
@@ -20,7 +20,7 @@ LOGGER = logging.getLogger(__name__)
 default_pM = 5
 try:
     default_pM = int(settings.DEFAULT_PM)
-except ValueError, e:
+except AttributeError, e:
     LOGGER.error("invalid value for frontend.default_pm")
 
 # how many days to wait before trying to re-import a runfolder
index 8eb998343b7409350554a169c5a9cbc1748b96f6..5878d726d7ee1d268382230a56a235aef7ca29e6 100644 (file)
@@ -15,6 +15,8 @@ from django.core import mail
 from django.core.exceptions import ObjectDoesNotExist
 from django.test import TestCase
 from django.test.utils import setup_test_environment, teardown_test_environment
+from django.db import connection
+from django.conf import settings
 from htsworkflow.frontend.experiments import models
 from htsworkflow.frontend.experiments import experiments
 from htsworkflow.frontend.auth import apidata
@@ -26,6 +28,18 @@ LANE_SET = range(1,9)
 
 NSMAP = {'libns':'http://jumpgate.caltech.edu/wiki/LibraryOntology#'}
 
+from django.db import connection
+OLD_DB_NAME = settings.DATABASE_NAME
+VERBOSITY = 0
+def setUpModule():
+    setup_test_environment()
+    settings.DEBUG = False
+    connection.creation.create_test_db(VERBOSITY)
+
+def tearDownModule():
+    connection.creation.destroy_test_db(OLD_DB_NAME, VERBOSITY)
+    teardown_test_environment()
+
 class ClusterStationTestCases(TestCase):
     fixtures = ['test_flowcells.json']
 
@@ -480,10 +494,9 @@ class TestFileType(TestCase):
         file_type_objects = models.FileType.objects
         name = 'QSEQ tarfile'
         file_type_object = file_type_objects.get(name=name)
-        self.assertEqual(u"<FileType: QSEQ tarfile>",
+        self.assertEqual(u"QSEQ tarfile",
                              unicode(file_type_object))
 
-class TestFileType(TestCase):
     def test_find_file_type(self):
         file_type_objects = models.FileType.objects
         cases = [('woldlab_090921_HWUSI-EAS627_0009_42FC3AAXX_l7_r1.tar.bz2',
@@ -537,17 +550,6 @@ class TestFileType(TestCase):
 class TestEmailNotify(TestCase):
     fixtures = ['test_flowcells.json']
 
-    @classmethod
-    def setUpClass(self):
-        # isolate django mail when running under unittest2
-        setup_test_environment()
-
-    @classmethod
-    def tearDownClass(self):
-        # isolate django mail when running under unittest2
-        teardown_test_environment()
-
-
     def test_started_email_not_logged_in(self):
         response = self.client.get('/experiments/started/153/')
         self.assertEqual(response.status_code, 302)
@@ -684,3 +686,29 @@ class TestSequencer(TestCase):
 
         errmsgs = list(inference.run_validation())
         self.assertEqual(len(errmsgs), 0)
+
+
+OLD_DB = settings.DATABASES['default']['NAME']
+def setUpModule():
+    setup_test_environment()
+    connection.creation.create_test_db()
+
+def tearDownModule():
+    connection.creation.destroy_test_db(OLD_DB)
+    teardown_test_environment()
+
+def suite():
+    from unittest2 import TestSuite, defaultTestLoader
+    suite = TestSuite()
+    for testcase in [ClusterStationTestCases,
+                     SequencerTestCases,
+                     ExerimentsTestCases,
+                     TestFileType,
+                     TestEmailNotify,
+                     TestSequencer]:
+        suite.addTests(defaultTestLoader.loadTestsFromTestCase(testcase))
+    return suite
+
+if __name__ == "__main__":
+    from unittest2 import main
+    main(defaultTest="suite")
index 91202a867bd76bad92dad516d81869ba70c953b3..6a53cdb66c90023127295794fd0fc07abfff130b 100644 (file)
@@ -1,4 +1,4 @@
-from django.conf.urls.defaults import *
+from django.conf.urls import patterns
 
 urlpatterns = patterns('',
     (r'^$', 'htsworkflow.frontend.experiments.views.index'),
index 46b37ec615a93647cd40a0de76d65e0efec72afe..e729fe49b7a7f5b4651fbb7615c045dbe1834132 100644 (file)
@@ -157,7 +157,7 @@ class PrinterTemplate(models.Model):
     item_type = models.ForeignKey(ItemType)
     printer = models.ForeignKey(Printer)
 
-    default = models.BooleanField()
+    default = models.BooleanField(default=False)
 
     template = models.TextField()
 
index 118c654aaf8463ee638d748414c5c620df9c0d67..86d37b7cbe7e740468f04df9380ae8fa82c98b35 100644 (file)
@@ -1,6 +1,11 @@
 import RDF
 
 from django.test import TestCase
+from django.test.utils import setup_test_environment, \
+     teardown_test_environment
+from django.db import connection
+from django.conf import settings
+
 from django.contrib.auth.models import User
 from django.core import urlresolvers
 
@@ -108,6 +113,15 @@ class InventoryTestCase(TestCase):
         flowcells = [ str(x.uri) for x in targets]
         return flowcells
 
+OLD_DB = settings.DATABASES['default']['NAME']
+def setUpModule():
+    setup_test_environment()
+    connection.creation.create_test_db()
+
+def tearDownModule():
+    connection.creation.destroy_test_db(OLD_DB)
+    teardown_test_environment()
+
 def suite():
     from unittest2 import TestSuite, defaultTestLoader
     suite = TestSuite()
index 7269a802343a58b075434976686a930306bb2aff..fc25768cdf25897bd005c8bf74d647963e45d454 100644 (file)
@@ -1,4 +1,4 @@
-from django.conf.urls.defaults import *
+from django.conf.urls import patterns
 
 urlpatterns = patterns('',
     # DATA
index 1fb73783c029b4434614f6c8d7d761f36caab026..265e1bc74f43d74d53957401a20d018c043ac170 100644 (file)
@@ -1,5 +1,6 @@
-from htsworkflow.frontend.samples.changelist import ChangeList
+from htsworkflow.frontend.samples.changelist import HTSChangeList
 from htsworkflow.frontend.inventory.models import Item, LongTermStorage, ItemType
+from htsworkflow.frontend.inventory.admin import ItemAdmin, ItemTypeAdmin
 from htsworkflow.frontend.inventory.bcmagic import item_search
 from htsworkflow.frontend.bcmagic.plugin import register_search_plugin
 from htsworkflow.frontend.experiments.models import FlowCell
@@ -138,11 +139,11 @@ def all_index(request):
     Inventory Index View
     """
     # build changelist
-    item_changelist = ChangeList(request, Item,
+    item_changelist = HTSChangeList(request, Item,
         list_filter=[],
         search_fields=[],
         list_per_page=200,
-        queryset=Item.objects.all()
+        model_admin=ItemAdmin(Item, None)
     )
 
     context_dict = {
@@ -161,11 +162,11 @@ def index(request):
     Inventory Index View
     """
     # build changelist
-    item_changelist = ChangeList(request, Item,
+    item_changelist = HTSChangeList(request, ItemType,
         list_filter=[],
-        search_fields=['name'],
+        search_fields=['name', 'description'],
         list_per_page=50,
-        queryset=ItemType.objects.all()
+        model_admin=ItemTypeAdmin(ItemType, None)
     )
 
     context_dict = {
@@ -173,7 +174,6 @@ def index(request):
         'page_name': 'Inventory Index'
     }
     context_dict.update(INVENTORY_CONTEXT_DEFAULTS)
-
     return render_to_response('inventory/inventory_index.html',
                               context_dict,
                               context_instance=RequestContext(request))
@@ -189,11 +189,11 @@ def itemtype_index(request, name):
     itemtype = ItemType.objects.get(name=name)
 
     # build changelist
-    item_changelist = ChangeList(request, Item,
+    item_changelist = HTSChangeList(request, Item,
         list_filter=[],
         search_fields=[],
         list_per_page=200,
-        queryset=itemtype.item_set.all()
+        model_admin=ItemAdmin(Item, None)
     )
 
     context_dict = {
index 5a004f219f3f4eca044f43012c66ffaa0e3e0e24..fa76f83344682464aa866ac9ac2702e7db6b9a16 100644 (file)
@@ -1,4 +1,4 @@
-from django.conf.urls.defaults import *
+from django.conf.urls import patterns
 
 urlpatterns = patterns('',                                               
     (r'^updLibInfo$', 'htsworkflow.frontend.reports.libinfopar.refreshLibInfoFile'),
index e31f5810f87c3548847db489214173fd85bacb54..b97668d4d804486bcb122dafd5ae56b827115c8d 100644 (file)
@@ -1,6 +1,6 @@
 from django.contrib import admin
 from django.contrib.admin import widgets
-from django.contrib.admin.models import User
+from django.contrib.auth.models import User
 from django.contrib.auth.admin import UserAdmin
 from django.contrib.auth.forms import UserCreationForm, UserChangeForm
 from django.template import Context, Template
index cbbfd3864eca6e18afeb5d25ad3a47c7f5e49d31..1b50418dac1df7dc0b9ae111453fd7e7023e0f4b 100644 (file)
-"""
-Slightly modified version of the django admin component that handles filters and searches
-"""
-from django.contrib.admin.filterspecs import FilterSpec
-from django.contrib.admin.options import IncorrectLookupParameters
-from django.core.paginator import Paginator, InvalidPage, EmptyPage
-from django.db import models
-from django.db.models.query import QuerySet
-from django.utils.encoding import force_unicode, smart_str
-from django.utils.translation import ugettext
-from django.utils.http import urlencode
-import operator
+import django
+from django.contrib.admin.views.main import ChangeList
+
+class HTSChangeList(ChangeList):
+    def __init__(self, request, model, list_filter, search_fields,
+                 list_per_page, model_admin, extra_filters=None):
+        """Simplification of the django model filter view
+
+        The new parameter "extra_filter" should be a mapping
+        of that will be passed as keyword arguments to
+        queryset.filter
+        """
+        self.extra_filters = extra_filters
+
+        args = {
+            'request': request, #request
+            'model': model, #model
+            'list_display': [], # list_display
+            'list_display_links': None, # list_display_links
+            'list_filter': list_filter, #list_filter
+            'date_hierarchy': None, # date_hierarchy
+            'search_fields': search_fields, #search_fields
+            'list_select_related': None, # list_select_related,
+            'list_per_page': list_per_page, #list_per_page
+            'list_editable': None, # list_editable
+            'model_admin': model_admin #model_admin
+        }
+        if django.VERSION[0] >= 1 and django.VERSION[1] >= 4:
+            args['list_max_show_all'] = 20000, #list_max_show_all
+        super(HTSChangeList, self).__init__(**args)
+
+        self.is_popup = False
+        # I removed to field in the first version
 
-try:
-    set
-except NameError:
-    from sets import Set as set   # Python 2.3 fallback
-
-# The system will display a "Show all" link on the change list only if the
-# total result count is less than or equal to this setting.
-MAX_SHOW_ALL_ALLOWED = 20000
-
-# Changelist settings
-ALL_VAR = 'all'
-ORDER_VAR = 'o'
-ORDER_TYPE_VAR = 'ot'
-PAGE_VAR = 'p'
-SEARCH_VAR = 'q'
-TO_FIELD_VAR = 't'
-IS_POPUP_VAR = 'pop'
-ERROR_FLAG = 'e'
-
-# Text to display within change-list table cells if the value is blank.
-EMPTY_CHANGELIST_VALUE = '(None)'
-
-class ChangeList(object):
-    
-    #def __init__(self, request, model, list_display, list_display_links, list_filter, date_hierarchy, search_fields, list_select_related, list_per_page, list_editable, model_admin):
-    def __init__(self, request, model, list_filter, search_fields, list_per_page, queryset=None):
-        self.model = model
-        self.opts = model._meta
-        self.lookup_opts = self.opts
-        if queryset is None:
-            self.root_query_set = model.objects.all()
-        else:
-            self.root_query_set = queryset
-        self.list_display =  []
-        self.list_display_links = None
-        self.list_filter = list_filter
-        #self.date_hierarchy = date_hierarchy
-        self.search_fields = search_fields
-        self.list_select_related = None
-        self.list_per_page = list_per_page
-        #self.list_editable = list_editable
-        self.model_admin = None
-
-        # Get search parameters from the query string.
-        try:
-            self.page_num = int(request.GET.get(PAGE_VAR, '0'))
-        except ValueError:
-            self.page_num = 0
-        self.show_all = 'all' in request.GET
-        #self.is_popup = IS_POPUP_VAR in request.GET
-        #self.to_field = request.GET.get(TO_FIELD_VAR)
-        self.params = dict(request.GET.items())
-        if PAGE_VAR in self.params:
-            del self.params[PAGE_VAR]
-        #if TO_FIELD_VAR in self.params:
-        #    del self.params[TO_FIELD_VAR]
-        if ERROR_FLAG in self.params:
-            del self.params[ERROR_FLAG]
-            
         self.multi_page = True
         self.can_show_all = False
 
-        self.order_field, self.order_type = self.get_ordering()
-        self.query = request.GET.get(SEARCH_VAR, '')
-        self.query_set = self.get_query_set()
-        self.get_results(request)
-        #self.title = (self.is_popup and ugettext('Select %s') % force_unicode(self.opts.verbose_name) or ugettext('Select %s to change') % force_unicode(self.opts.verbose_name))
-        self.filter_specs, self.has_filters = self.get_filters(request)
-        #self.pk_attname = self.lookup_opts.pk.attname
-
-    def get_filters(self, request):
-        filter_specs = []
-        if self.list_filter:
-            filter_fields = [self.lookup_opts.get_field(field_name) for field_name in self.list_filter]
-            for f in filter_fields:
-                spec = FilterSpec.create(f, request, self.params, self.model, self.model_admin)
-                if spec and spec.has_output():
-                    filter_specs.append(spec)
-        return filter_specs, bool(filter_specs)
-
-    def get_query_string(self, new_params=None, remove=None):
-        if new_params is None: new_params = {}
-        if remove is None: remove = []
-        p = self.params.copy()
-        for r in remove:
-            for k in p.keys():
-                if k.startswith(r):
-                    del p[k]
-        for k, v in new_params.items():
-            if v is None:
-                if k in p:
-                    del p[k]
-            else:
-                p[k] = v
-        return '?%s' % urlencode(p)
-
-    def get_results(self, request):
-        paginator = Paginator(self.query_set, self.list_per_page)
-        # Get the number of objects, with admin filters applied.
-        result_count = paginator.count
-
-        # Get the total number of objects, with no admin filters applied.
-        # Perform a slight optimization: Check to see whether any filters were
-        # given. If not, use paginator.hits to calculate the number of objects,
-        # because we've already done paginator.hits and the value is cached.
-        if not self.query_set.query.where:
-            full_result_count = result_count
-        else:
-            full_result_count = self.root_query_set.count()
-
-        can_show_all = result_count <= MAX_SHOW_ALL_ALLOWED
-        multi_page = result_count > self.list_per_page
-
-        # Get the list of objects to display on this page.
-        if (self.show_all and can_show_all) or not multi_page:
-            result_list = self.query_set._clone()
-        else:
-            try:
-                result_list = paginator.page(self.page_num+1).object_list
-            except InvalidPage:
-                result_list = ()
-
-        self.result_count = result_count
-        self.full_result_count = full_result_count
-        self.result_list = result_list
-        self.can_show_all = can_show_all
-        self.multi_page = multi_page
-        self.paginator = paginator
-
-    def get_ordering(self):
-        lookup_opts, params = self.lookup_opts, self.params
-        # For ordering, first check the "ordering" parameter in the admin
-        # options, then check the object's default ordering. If neither of
-        # those exist, order descending by ID by default. Finally, look for
-        # manually-specified ordering from the query string.
-        ordering = lookup_opts.ordering or ['-' + lookup_opts.pk.name]
-
-        if ordering[0].startswith('-'):
-            order_field, order_type = ordering[0][1:], 'desc'
-        else:
-            order_field, order_type = ordering[0], 'asc'
-        if ORDER_VAR in params:
-            try:
-                field_name = self.list_display[int(params[ORDER_VAR])]
-                try:
-                    f = lookup_opts.get_field(field_name)
-                except models.FieldDoesNotExist:
-                    # See whether field_name is a name of a non-field
-                    # that allows sorting.
-                    try:
-                        if callable(field_name):
-                            attr = field_name
-                        elif hasattr(self.model_admin, field_name):
-                            attr = getattr(self.model_admin, field_name)
-                        else:
-                            attr = getattr(self.model, field_name)
-                        order_field = attr.admin_order_field
-                    except AttributeError:
-                        pass
-                else:
-                    order_field = f.name
-            except (IndexError, ValueError):
-                pass # Invalid ordering specified. Just use the default.
-        if ORDER_TYPE_VAR in params and params[ORDER_TYPE_VAR] in ('asc', 'desc'):
-            order_type = params[ORDER_TYPE_VAR]
-        return order_field, order_type
-
-    def get_query_set(self):
-        qs = self.root_query_set
-        lookup_params = self.params.copy() # a dictionary of the query string
-        for i in (ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR):
-            if i in lookup_params:
-                del lookup_params[i]
-        for key, value in lookup_params.items():
-            if not isinstance(key, str):
-                # 'key' will be used as a keyword argument later, so Python
-                # requires it to be a string.
-                del lookup_params[key]
-                lookup_params[smart_str(key)] = value
-
-            # if key ends with __in, split parameter into separate values
-            if key.endswith('__in'):
-                lookup_params[key] = value.split(',')
-
-        # Apply lookup parameters from the query string.
-        try:
-            qs = qs.filter(**lookup_params)
-        # Naked except! Because we don't have any other way of validating "params".
-        # They might be invalid if the keyword arguments are incorrect, or if the
-        # values are not in the correct type, so we might get FieldError, ValueError,
-        # ValicationError, or ? from a custom field that raises yet something else 
-        # when handed impossible data.
-        except:
-            raise IncorrectLookupParameters
-
-        # Use select_related() if one of the list_display options is a field
-        # with a relationship and the provided queryset doesn't already have
-        # select_related defined.
-        if not qs.query.select_related:
-            if self.list_select_related:
-                qs = qs.select_related()
-            else:
-                for field_name in self.list_display:
-                    try:
-                        f = self.lookup_opts.get_field(field_name)
-                    except models.FieldDoesNotExist:
-                        pass
-                    else:
-                        if isinstance(f.rel, models.ManyToOneRel):
-                            qs = qs.select_related()
-                            break
-
-        # Set ordering.
-        if self.order_field:
-            qs = qs.order_by('%s%s' % ((self.order_type == 'desc' and '-' or ''), self.order_field))
-
-        # Apply keyword searches.
-        def construct_search(field_name):
-            if field_name.startswith('^'):
-                return "%s__istartswith" % field_name[1:]
-            elif field_name.startswith('='):
-                return "%s__iexact" % field_name[1:]
-            elif field_name.startswith('@'):
-                return "%s__search" % field_name[1:]
-            else:
-                return "%s__icontains" % field_name
-
-        if self.search_fields and self.query:
-            for bit in self.query.split():
-                or_queries = [models.Q(**{construct_search(str(field_name)): bit}) for field_name in self.search_fields]
-                qs = qs.filter(reduce(operator.or_, or_queries))
-            for field_name in self.search_fields:
-                if '__' in field_name:
-                    qs = qs.distinct()
-                    break
+    def get_query_set(self, request=None):
+        args = {}
+        if django.VERSION[0] >= 1 and django.VERSION[1] >= 4:
+            args['request'] = request #list_max_show_all
 
+        qs = super(HTSChangeList, self).get_query_set(**args)
+        if self.extra_filters:
+            new_qs = qs.filter(**self.extra_filters)
+            if new_qs is not None:
+                qs = new_qs
         return qs
-
-    #def url_for_result(self, result):
-    #    return "%s/" % quote(getattr(result, self.pk_attname))
index de3ad89a6211bc51319833ad45c5c4b0ab5dd9b8..ae22c1df6084f6657a610ba97d623a0b993988bb 100644 (file)
         "notes": "Unknown"
      }
   },
+  { "pk": 1, "model": "samples.Condition",
+    "fields": {
+        "condition_name": "Unknown",
+        "nickname": "",
+        "notes": "Unknown"
+    }
+  },
   {
      "model": "samples.LibraryType",
      "pk": 1,
         "is_paired_end": true
      }
   },
+  {
+     "model": "samples.LibraryType",
+     "pk": 7,
+     "fields": {
+        "name": "Barcoded Small RNA",
+        "can_multiplex": true,
+        "is_paired_end": true
+     }
+  },
+  {
+     "model": "samples.LibraryType",
+     "pk": 8,
+     "fields": {
+        "name": "Nextera",
+        "can_multiplex": true,
+        "is_paired_end": true
+     }
+  },
   {
      "model": "samples.LibraryType",
      "pk": 9,
index 5126bb504eb21f723e5845489c8085fdfbbb35a0..d7c70c239c037db3dd4fc77a8c67cc9cb5cb1678 100644 (file)
@@ -156,7 +156,7 @@ class Library(models.Model):
   id = models.CharField(max_length=10, primary_key=True)
   library_name = models.CharField(max_length=100, unique=True)
   library_species = models.ForeignKey(Species)
-  hidden = models.BooleanField()
+  hidden = models.BooleanField(default=False)
   account_number = models.CharField(max_length=100, null=True, blank=True)
   cell_line = models.ForeignKey(Cellline, blank=True, null=True,
                                 verbose_name="Background")
@@ -207,7 +207,7 @@ class Library(models.Model):
       # note \u00b5 is the micro symbol in unicode
   successful_pM = models.DecimalField(max_digits=9,
                                       decimal_places=1, blank=True, null=True)
-  ten_nM_dilution = models.BooleanField()
+  ten_nM_dilution = models.BooleanField(default=False)
   gel_cut_size = models.IntegerField(default=225, blank=True, null=True)
   insert_size = models.IntegerField(blank=True, null=True)
   notes = models.TextField(blank=True)
index 2fb394511989b32e6cb97d7a507cf28fce71fc2d..f0844e55cce7fbf3b33fbdfbf03b9d9264b3e13a 100644 (file)
@@ -6,6 +6,10 @@ except ImportError, e:
     import simplejson as json
 
 from django.test import TestCase
+from django.test.utils import setup_test_environment, \
+     teardown_test_environment
+from django.db import connection
+from django.conf import settings
 
 from htsworkflow.frontend.samples.models import \
         Affiliation, \
@@ -327,3 +331,24 @@ def get_rdf_memory_model():
     storage = RDF.MemoryStorage()
     model = RDF.Model(storage)
     return model
+
+OLD_DB = settings.DATABASES['default']['NAME']
+def setUpModule():
+    setup_test_environment()
+    connection.creation.create_test_db()
+
+def tearDownModule():
+    connection.creation.destroy_test_db(OLD_DB)
+    teardown_test_environment()
+
+def suite():
+    from unittest2 import TestSuite, defaultTestLoader
+    suite = TestSuite()
+    suite.addTests(defaultTestLoader.loadTestsFromTestCase(LibraryTestCase))
+    suite.addTests(defaultTestLoader.loadTestsFromTestCase(SampleWebTestCase))
+    suite.addTests(defaultTestLoader.loadTestsFromTestCase(TestRDFaLibrary))
+    return suite
+
+if __name__ == "__main__":
+    from unittest2 import main
+    main(defaultTest="suite")
index fea937c69e52fa1117d4d78c2342eb17b9c7f467..570b1db72f2a83f97607c8bcaf1eb500cacac147 100644 (file)
@@ -1,8 +1,8 @@
-from django.conf.urls.defaults import *
+from django.conf.urls import patterns, url
 
 urlpatterns = patterns('',
-    (r"^library/(?P<library_id>\w+)/json", 'htsworkflow.frontend.samples.views.library_json'),
-    (r"^species/(?P<species_id>\w+)/json", 'htsworkflow.frontend.samples.views.species_json'),
-    (r"^species/(?P<species_id>\w+)", 'htsworkflow.frontend.samples.views.species'),
-    (r"^antibody/$", 'htsworkflow.frontend.samples.views.antibodies'),                   
+    url(r"^library/(?P<library_id>\w+)/json", 'htsworkflow.frontend.samples.views.library_json'),
+    url(r"^species/(?P<species_id>\w+)/json", 'htsworkflow.frontend.samples.views.species_json'),
+    url(r"^species/(?P<species_id>\w+)", 'htsworkflow.frontend.samples.views.species'),
+    url(r"^antibody/$", 'htsworkflow.frontend.samples.views.antibodies'),
 )
index e50b5b357c2caa1b88d73cb2f2884cf0124f6158..df7b7ba8e3ff349629db9ea0383492f12983607a 100644 (file)
@@ -9,14 +9,23 @@ try:
 except ImportError, e:
     import simplejson as json
 
-from django.contrib.csrf.middleware import csrf_exempt
+from django.views.decorators.csrf import csrf_exempt
+from django.core.exceptions import ObjectDoesNotExist
+from django.http import HttpResponse, HttpResponseRedirect, Http404
+from django.shortcuts import render_to_response, get_object_or_404
+from django.template import RequestContext
+from django.template.loader import get_template
+from django.contrib.auth.decorators import login_required
+from django.conf import settings
+
 from htsworkflow.frontend.auth import require_api_key
 from htsworkflow.frontend.experiments.models import FlowCell, Lane, LANE_STATUS_MAP
-from htsworkflow.frontend.samples.changelist import ChangeList
+from htsworkflow.frontend.experiments.admin import LaneOptions
+from htsworkflow.frontend.samples.changelist import HTSChangeList
 from htsworkflow.frontend.samples.models import Antibody, Library, Species, HTSUser
+from htsworkflow.frontend.samples.admin import LibraryOptions
 from htsworkflow.frontend.samples.results import get_flowcell_result_dict
 from htsworkflow.frontend.bcmagic.forms import BarcodeMagicForm
-from htsworkflow.pipelines.runfolder import load_pipeline_run_xml
 from htsworkflow.pipelines import runfolder
 from htsworkflow.pipelines.eland import ResultLane
 from htsworkflow.pipelines.samplekey import SampleKey
@@ -25,13 +34,6 @@ from htsworkflow.util import makebed
 from htsworkflow.util import opener
 
 
-from django.core.exceptions import ObjectDoesNotExist
-from django.http import HttpResponse, HttpResponseRedirect, Http404
-from django.shortcuts import render_to_response, get_object_or_404
-from django.template import RequestContext
-from django.template.loader import get_template
-from django.contrib.auth.decorators import login_required
-from django.conf import settings
 
 LANE_LIST = [1,2,3,4,5,6,7,8]
 SAMPLES_CONTEXT_DEFAULTS = {
@@ -95,14 +97,16 @@ def create_library_context(cl):
 
 def library(request, todo_only=False):
     queryset = Library.objects.filter(hidden__exact=0)
+    filters = {'hidden__exact': 0}
     if todo_only:
-        queryset = queryset.filter(lane=None)
+        filters[lane] = None
     # build changelist
-    fcl = ChangeList(request, Library,
+    fcl = HTSChangeList(request, Library,
         list_filter=['affiliations', 'library_species'],
         search_fields=['id', 'library_name', 'amplified_from_sample__id'],
         list_per_page=200,
-        queryset=queryset
+        model_admin=LibraryOptions(Library, None),
+        extra_filters=filters
     )
 
     context = { 'cl': fcl, 'title': 'Library Index', 'todo_only': todo_only}
@@ -164,10 +168,11 @@ def lanes_for(request, username=None):
     if username is not None:
         user = HTSUser.objects.get(username=username)
         query.update({'library__affiliations__users__id':user.id})
-    fcl = ChangeList(request, Lane,
+    fcl = HTSChangeList(request, Lane,
         list_filter=[],
         search_fields=['flowcell__flowcell_id', 'library__id', 'library__library_name'],
         list_per_page=200,
+        model_admin=LaneOptions,
         queryset=Lane.objects.filter(**query)
     )
 
@@ -295,7 +300,7 @@ def _summary_stats(flowcell_id, lane_id, library_id):
             err_list.append('Run xml for Flowcell %s(%s) not found.' % (fc_id, cycle_width))
             continue
 
-        run = load_pipeline_run_xml(xmlpath)
+        run = runfolder.load_pipeline_run_xml(xmlpath)
         # skip if we don't have available metadata.
         if run.gerald is None or run.gerald.summary is None:
             continue
@@ -557,5 +562,3 @@ def user_profile(request):
     context.update(SAMPLES_CONTEXT_DEFAULTS)
     return render_to_response('registration/profile.html', context,
                               context_instance=RequestContext(request))
-
-
diff --git a/htsworkflow/frontend/static/css/dashboard.css b/htsworkflow/frontend/static/css/dashboard.css
new file mode 100644 (file)
index 0000000..05808bc
--- /dev/null
@@ -0,0 +1,30 @@
+/* DASHBOARD */
+
+.dashboard .module table th {
+    width: 100%;
+}
+
+.dashboard .module table td {
+    white-space: nowrap;
+}
+
+.dashboard .module table td a {
+    display: block;
+    padding-right: .6em;
+}
+
+/* RECENT ACTIONS MODULE */
+
+.module ul.actionlist {
+    margin-left: 0;
+}
+
+ul.actionlist li {
+    list-style-type: none;
+}
+
+ul.actionlist li {
+    overflow: hidden;
+    text-overflow: ellipsis;
+    -o-text-overflow: ellipsis;
+}
diff --git a/htsworkflow/frontend/static/img/dna80.png b/htsworkflow/frontend/static/img/dna80.png
new file mode 100644 (file)
index 0000000..845c255
Binary files /dev/null and b/htsworkflow/frontend/static/img/dna80.png differ
index 4a5677d2da199df85f36ca47f552bc3b9889d2fa..66b6942d42cc7c17b533f28de154fab0b380fcb5 100644 (file)
@@ -1,7 +1,7 @@
 {% extends "admin/base_site.html" %}
 {% load i18n %}
 
-{% block extrastyle %}{{ block.super }}<link rel="stylesheet" type="text/css" href="{% load adminmedia %}{% admin_media_prefix %}css/dashboard.css" />{% endblock %}
+{% block extrastyle %}{{ block.super }}<link rel="stylesheet" type="text/css" href="{% load staticfiles %}{% static "css/dashboard.css" %}"/>{% endblock %}
 
 {% block coltype %}colMS{% endblock %}
 
index f8899df9e453177629b945e9ff598ecd8edfa5ab..0325a84e2162af3edb3e4b4d4cc81a8d694b21be 100644 (file)
     <title>{% block title %}{{ app_name }} - {{ page_name }}{% endblock %}</title>
 
 {% block additional_css %}
-    {% load adminmedia %}
-    {% if LANGUAGE_BIDI %}<link rel="stylesheet" type="text/css" href="{% block stylesheet_rtl %}{% admin_media_prefix %}css/rtl.css{% endblock %}" />{% endif %}
+    {% load staticfiles %}
+    {% if LANGUAGE_BIDI %}<link rel="stylesheet" type="text/css" href="{% block stylesheet_rtl %}{% static "css/rtl.css" %}{% endblock %}" />{% endif %}
     {% block extrastyle %}{% endblock %}
     {% block extrahead %}{% endblock %}
     {% block blockbots %}<meta name="robots" content="NONE,NOARCHIVE" />{% endblock %}
-    <link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}css/data-browse-index.css" />
+    <link rel="stylesheet" type="text/css" href="{% static "css/data-browse-index.css" %}" />
 {% endblock %}
 
 </head>
@@ -39,7 +39,7 @@
         {% trans 'Welcome,' %}
         <strong>{% firstof user.first_name user.username %}</strong>.
         {% block userlinks %}
-            {% url django-admindocs-docroot as docsroot %}
+            {% url "django.admindocs.docroot" as docsroot %}
             {% if docsroot %}
                 <a href="{{ docsroot }}">{% trans 'Documentation' %}</a> /
             {% endif %}
index 7381f62c645b5585a0fb02c07c58a6cba377ff23..1c02752c40358038e670725b03a55a8c095c30bf 100644 (file)
@@ -1,7 +1,8 @@
 {% extends "base.html" %}
+{% load staticfiles %}
 {% load i18n %}
 
-<link type="text/css" rel="stylesheet" href="/static/css/app.css" />
+<link type="text/css" rel="stylesheet" href="{% static "css/app.css" %}" />
 
 {% block title %}{{ sitename }}{% endblock %}
 
index 4183c995ebe81cca7117769658bcbf95175286c6..64da7e2886881d3609e4344353a0da75aceb00d5 100644 (file)
@@ -1,9 +1,10 @@
 {% extends "base_site.html" %}
-{% load adminmedia humanize i18n %}
+{% load staticfiles %}
+{% load humanize i18n %}
 {% block extrahead %}
     <!-- App Stuff -->
-    <link type="text/css" rel="stylesheet" href="/static/css/app.css" />
-    <script type="text/javascript" src="/static/js/jquery.min.js"></script>
+    <link type="text/css" rel="stylesheet" href="{% static "css/app.css" %}" />
+    <script type="text/javascript" src="{% static "js/jquery.min.js" %}"></script>
 
     {% block additional_javascript %}
     {% endblock %}
@@ -41,7 +42,7 @@
               {% if user.is_staff %}
               <a href="{{lane.library.get_admin_url}}">
                   <img class="icon_button"
-                       src="/media/img/admin/icon_changelink.gif"/>
+                       src="{% static "admin/img/icon_changelink.gif" %}"/>
               </a>{% endif %}
           </td>
           <td>
index f71ae0fed0e77a05520b7e35c6f631004d437666..f0cb470102ce3932bbfbe17fbec315a51f152202 100644 (file)
@@ -1,7 +1,8 @@
+{% load staticfiles %}
 <div class="flowcell_identity" typeof="libns:IlluminaFlowcell" resource="{{flowcell.get_absolute_url}}">
   <h2>About this Flowcell</h2>
   <b>Flowcell</b>:
-    <a href="{{flowcell.get_absolute_url}}"><span property="libns:flowcell_id">{{flowcell.flowcell_id}}</span></a>{% if user.is_staff %}<a href="{{flowcell.get_admin_url}}"><img class="icon_button" src="/media/img/admin/icon_changelink.gif" alt="Edit"/></a>{% endif%}
+    <a href="{{flowcell.get_absolute_url}}"><span property="libns:flowcell_id">{{flowcell.flowcell_id}}</span></a>{% if user.is_staff %}<a href="{{flowcell.get_admin_url}}"><img class="icon_button" src="{% static "admin/img/icon_changelink.gif" %}" alt="Edit"/></a>{% endif%}
   <br/>
   <div rel="libns:sequenced_by">
   <div typeof="libns:Sequencer"
index 7e834eb21ec00f76954776885fab803c3261da9d..ebe2c59d261be2740b40ab5e6dc9e1c20e30ba7a 100644 (file)
@@ -1,9 +1,10 @@
 {% extends "base_site.html" %}
-{% load adminmedia humanize i18n %}
+{% load humanize i18n %}
+{% load staticfiles %}
 {% block extrahead %}
     <!-- App Stuff -->
-    <link type="text/css" rel="stylesheet" href="/static/css/app.css" />
-    <script type="text/javascript" src="/static/js/jquery.min.js"></script>
+    <link type="text/css" rel="stylesheet" href="{% static "css/app.css" %}"/>
+    <script type="text/javascript" src="{% static "js/jquery.min.js" %}"></script>
 
     {% block additional_javascript %}
     {% endblock %}
index e4a6e0b6b03d7b7f907b7b4e641a941ca23704b6..411288c5b33ec244cde4ebe769dcc7bb3edeb261 100644 (file)
@@ -1,9 +1,10 @@
 {% extends "base_site.html" %}
-{% load adminmedia humanize i18n %}
+{% load humanize i18n %}
+{% load staticfiles %}
 {% block extrahead %}
     <!-- App Stuff -->
-    <link type="text/css" rel="stylesheet" href="/static/css/app.css" />
-    <script type="text/javascript" src="/static/js/jquery.min.js"></script>
+    <link type="text/css" rel="stylesheet" href="{% static "css/app.css" %}" />
+    <script type="text/javascript" src="{% static "js/jquery.min.js" %}"></script>
 
     {% block additional_javascript %}
     {% endblock %}
index 399dae1c6940ac4abf21bfad5fa7a2e749188f9e..c95a78b6b0a903ff49b652dabc61d987952b4f8d 100644 (file)
@@ -1,5 +1,5 @@
 {% extends "base_site.html" %}
-{% load adminmedia admin_list i18n %}
+{% load admin_list i18n %}
 {% block extrahead %}
     <script type="text/javascript">
       $(document).ready(function() {
index 2eb84abc4ba73d0497610711ebce06a8313e6494..7c7eb508065dc850d2249f19bab2c5c0a33881c8 100644 (file)
@@ -1,5 +1,5 @@
 {% extends "base_site.html" %}
-{% load adminmedia admin_list i18n %}
+{% load admin_list i18n %}
 {% block extrahead %}
     <script type="text/javascript">
       $(document).ready(function() {
@@ -18,7 +18,7 @@
 {% block coltype %}flex{% endblock %}
 {% block content %}
 <div id="inventory-index-div" >
-  <div class="module{% if cl.has_filters %} filtered{% endif %}" id="changelist">
+  <div class="module{% if item_changelist.has_filters %} filtered{% endif %}" id="changelist">
     {% block search %}{% search_form item_changelist %}{% endblock %}
  
     {% block pagination %}{% pagination item_changelist %}{% endblock %}
     <div id="changelist-filter">
       <h2 >{% trans 'Filter' %}</h2>
       {% for spec in item_changelist.filter_specs %}
-         {% admin_list_filter cl spec %}
+         {% admin_list_filter item_changelist spec %}
          {% endfor %}
        </div>
     {% endif %}
     {% endblock %}
   {% block summary_stats %}
-  <table class="{% if cl.has_filters %} filtered{% endif %}">
+  <table class="{% if item_changelist.has_filters %} filtered{% endif %}">
     <thead >
       <tr >
         <td >Name</td>
@@ -42,7 +42,7 @@
       </tr>
     </thead>
     <tbody >
-      {% for itemtype in item_changelist.get_query_set %}
+      {% for itemtype in item_changelist.result_list %}
       <tr >
         <td ><a href="/inventory/it/{{ itemtype.name }}/">{{ itemtype.name }}</a></td>
         <td >{{ itemtype.description }}</td>
index 62ebdcef6ed7014eaaa3b8193b17242150797fdd..eb559b1ae74ac8cd02045fd6c635e6db780451ef 100644 (file)
@@ -1,5 +1,5 @@
 {% extends "base_site.html" %}
-{% load adminmedia admin_list i18n %}
+{% load admin_list i18n %}
 {% block extrahead %}
     <script type="text/javascript">
       $(document).ready(function() {
@@ -18,7 +18,7 @@
 {% block coltype %}flex{% endblock %}
 {% block content %}
 <div id="inventory-index-div" >
-  <div class="module{% if cl.has_filters %} filtered{% endif %}" id="changelist">
+  <div class="module{% if item_changelist.has_filters %} filtered{% endif %}" id="changelist">
     {% block search %}{% search_form item_changelist %}{% endblock %}
 
     {% block pagination %}{% pagination item_changelist %}{% endblock %}
     <div id="changelist-filter">
       <h2 >{% trans 'Filter' %}</h2>
       {% for spec in item_changelist.filter_specs %}
-         {% admin_list_filter cl spec %}
+         {% admin_list_filter item_changelist spec %}
          {% endfor %}
     </div>
     {% endif %}
     {% endblock %}
   {% block summary_stats %}
-  <table class="{% if cl.has_filters %} filtered{% endif %}">
+  <table class="{% if item_changelist.has_filters %} filtered{% endif %}">
     <thead >
       <tr >
         <td >UUID</td>
@@ -47,7 +47,7 @@
       </tr>
     </thead>
     <tbody >
-      {% for item in item_changelist.get_query_set %}
+      {% for item in item_changelist.result_list %}
       <tr about="{{ item.get_absolute_url }}">
         <td ><a href="{{ item.get_absolute_url}}" rel="invns:uuid">{{ item.uuid }}</a></td>
         <td ><a href="/inventory/{{ item.barcode_id }}/" rel="invns:barcode">{{ item.barcode_id }}</a></td>
index 4c56f0916e4fc9494d983f1f30d872e4fe9003ad..1a4797c7484a382b33865a907608f8dd583f11c2 100644 (file)
@@ -3,7 +3,7 @@
 {% block content %}
 {% if item %}
         <h2>Item Summary:</h2>
-        <a href="{% url htsworkflow.frontend.inventory.views.index %}{{item.uuid}}/print/">Print</a><br />
+        <a href="{% url "htsworkflow.frontend.inventory.views.index" %}{{item.uuid}}/print/">Print</a><br />
         <br />
         <b>UUID:</b> <span property="invns:uuid">{{item.uuid}}</span><br />
         <b>Barcode ID:</b> <span property="invns:barcode">{{ item.barcode_id }}</span><br />
index 1f053df6e6538c96311fb1afb11b8be56b6e109b..d3d108b97ac9200f0535312a8864f90c4acb871d 100644 (file)
@@ -1,9 +1,9 @@
 {% extends "base_site.html" %}
 {% load i18n %}
 
-{% block additional_css %}{% load adminmedia %}{{ block.super }}
-<link rel="stylesheet" type="text/css" href="{% admin_media_prefix %}css/base.css" />
-<link rel="stylesheet" type="text/css" href="{% admin_media_prefix %}css/login.css" />
+{% block additional_css %}{% load staticfiles %}{{ block.super }}
+<link rel="stylesheet" type="text/css" href="{% static "css/base.css" %}"/>
+<link rel="stylesheet" type="text/css" href="{% static "css/login.css" %}"/>
 {% endblock %}
 
 {% block title %}Login{% endblock %}
@@ -16,6 +16,7 @@
 <div id="container">
     <h1>Login</h1>
 <form action="{{ app_path }}" method="post" id="login-form">
+  {% csrf_token %}
   <div class="form-row">
     <label for="id_username">{% trans 'Username:' %}</label> <input type="text" name="username" id="id_username" />
   </div>
index ed24bbde1e21b3e8d2c4267fdcd2c20920dd4b98..a198cef383aa91e5b0a19c5ebd68413c3da928e4 100644 (file)
@@ -1,3 +1,4 @@
+{% load staticfiles %}
 <div id="librarydetail"
      about="{{lib.get_absolute_url}}"
      typeof="libns:Library">
@@ -5,7 +6,7 @@
     <h2>Library Name</h2>
     <b>Library ID</b>:
        <a href="{{lib.get_absolute_url}}"><span property="libns:library_id">{{ lib.id }}</span></a>
-       {% if user.is_staff %}<a href="{{lib.get_admin_url}}"><img class="icon_button" src="/media/img/admin/icon_changelink.gif" alt="Edit"/></a>{% endif %}
+       {% if user.is_staff %}<a href="{{lib.get_admin_url}}"><img class="icon_button" src="{% static "admin/img/icon_changelink.gif" %}" alt="Edit"/></a>{% endif %}
        <br />
     <b>Name</b>:
       <span property="libns:name">{{ lib.library_name }}</span>
index 9d2982c28ac53d6a90317fa0d6e232b0810d89e9..7ef6c54a947f20c4557713c2f66a0fda1962158e 100644 (file)
@@ -1,5 +1,5 @@
 {% extends "base_site.html" %}
-{% load adminmedia admin_list i18n %}
+{% load admin_list i18n %}
 
 {% block bodyclass %}change-list{% endblock %}
 {% block coltype %}flex{% endblock %}
index 152f322300a40e0724892de777ac0faafb82ad83..b71d0be89aa58c75496097c431d9e3d22bf5c86b 100644 (file)
@@ -1,5 +1,5 @@
 {% extends "base_site.html" %}
-{% load adminmedia admin_list i18n %}
+{% load admin_list i18n %}
 
 {% block bodyclass %}change-list{% endblock %}
 {% block coltype %}flex{% endblock %}
index 1db49dffbb92dd225c345d855d1af074118dfb66..65747dcac5194ca6d73191a3ad419d6c9f500fe1 100644 (file)
@@ -1,9 +1,10 @@
 {% extends "base_site.html" %}
-{% load adminmedia humanize i18n %}
+{% load staticfiles %}
+{% load humanize i18n %}
 {% block extrahead %}
     <!-- App Stuff -->
-    <link type="text/css" rel="stylesheet" href="/static/css/app.css" />
-    <script type="text/javascript" src="/static/js/jquery.min.js"></script>
+    <link type="text/css" rel="stylesheet" href="{% static "css/app.css" %}"/>
+    <script type="text/javascript" src="{% static "js/jquery.min.js" %}"></script>
 
     {% block additional_javascript %}
     {% endblock %}
@@ -33,7 +34,7 @@
     <tr about="{{result.flowcell.get_absolute_url}}">
       <td property="libns:date" content="{{result.run_date|date:'Y-m-d\TH:i:s'}}" datatype="xsd:dateTime">{{ result.run_date|date}}</td>
       <td>{{ result.cycle }}</td>
-      <td><a href="{{result.flowcell.get_absolute_url}}"><span property="libns:flowcell_id">{{ result.flowcell_id }}</span></a>{% if user.is_staff %}<a href="{{result.flowcell.get_admin_url}}"><img class="icon_button" src="/media/img/admin/icon_changelink.gif" alt="Edit"/></a>{% endif%}</td>
+      <td><a href="{{result.flowcell.get_absolute_url}}"><span property="libns:flowcell_id">{{ result.flowcell_id }}</span></a>{% if user.is_staff %}<a href="{{result.flowcell.get_admin_url}}"><img class="icon_button" src="{% static "admin/img/icon_changelink.gif" %}" alt="Edit"/></a>{% endif%}</td>
       <td>{{ result.lane.lane_number }}</td>
       <td><a href="{{ result.summary_url }}">Summary</a></td>
       <td><a href="{{ result.result_url }}">{{ result.result_label }}</a></td>
           {% if user.is_staff %}
             <a href="{{lane.flowcell.get_admin_url}}">
                <img class="icon_button"
-                    src="/media/img/admin/icon_changelink.gif" alt="Edit"/>
+                    src="{% static "admin/img/icon_changelink.gif" %}" alt="Edit"/>
             </a>
           {% endif%}
         </td>
index ccba5614f9c7003b4f70822df06c30daeea4beb7..6ee9750cd8e0bc282fe57202596fd803cdd4f2f6 100644 (file)
@@ -1,12 +1,13 @@
 {% extends "base_site.html" %}
-{% load adminmedia admin_list i18n %}
+{% load admin_list i18n %}
+{% load staticfiles %}
 {% block extrahead %}
     <!-- App Stuff -->
-    <link type="text/css" rel="stylesheet" href="/static/css/app.css" />
+    <link type="text/css" rel="stylesheet" href="{% static "css/app.css" %}"/>
 
     {% block additional_javascript %}
-    <script type="text/javascript" src="/static/js/jquery.min.js"></script>
-    <script type="text/javascript" src="/static/js/htsw.js"></script>
+    <script type="text/javascript" src="{% static "js/jquery.min.js" %}"></script>
+    <script type="text/javascript" src="{% static "js/htsw.js" %}"></script>
     <script type="text/javascript">
       $(document).ready(function() {
         $(window).resize(function() {
@@ -91,7 +92,7 @@
         <td  bgcolor="#66CDAA">{{ lib.lanes_run.1.1 }}</td>
         <td  bgcolor="#66CDAA">{{ lib.lanes_run.1.2 }}</td>
         {% if lib.is_archived %}
-          <td ><img src="/static/img/hdd_unmount.png" alt="Archived" /></td>
+          <td ><img src="{% static "img/hdd_unmount.png" %}" alt="Archived" /></td>
         {% else %}
           <td ></td>
         {% endif %}
index 84f17ef9797f7bbf061ea2d0c89caf0f11820c88..d22c9cd58b6ffad4aa5acea4df3ae72d4b868e4e 100644 (file)
@@ -1,9 +1,10 @@
 {% extends "base_site.html" %}
-{% load adminmedia humanize i18n %}
+{% load staticfiles %}
+{% load humanize i18n %}
 {% block extrahead %}
     <!-- App Stuff -->
-    <link type="text/css" rel="stylesheet" href="/static/css/app.css" />
-    <script type="text/javascript" src="/static/js/jquery.min.js"></script>
+    <link type="text/css" rel="stylesheet" href="{% static "css/app.css" %}" />
+    <script type="text/javascript" src="{% static 'js/jquery.min.js" %}></script>
     
     {% block additional_javascript %}
     {% endblock %}
index 97ef8f18b7a67d388971bc854bc5e9915c88ee9c..3c66304ac332df3f3717a9f07ab7619381794590 100644 (file)
@@ -1,9 +1,9 @@
-{% load adminmedia %}
+{% load staticfiles %}
 {% load i18n %}
 {% if cl.search_fields %}
 <div id="toolbar"><form id="changelist-search" action="" method="get">
 <div><!-- DIV needed for valid HTML -->
-<label for="searchbar"><img src="{% admin_media_prefix %}img/admin/icon_searchbox.png" alt="Search" /></label>
+<label for="searchbar"><img src="{% static "img/admin/icon_searchbox.png" %}" alt="Search" /></label>
 <input type="text" size="40" name="{{ search_var }}" value="{{ cl.query }}" id="searchbar" />
 <input type="submit" value="{% trans 'Go' %}" />
 {% if show_result_count %}
index a03330436e72343d668911aeda5bcdac5659263a..c21d9a0d12f6515c6c881d3d4fdc1c707c001130 100644 (file)
@@ -1,79 +1,64 @@
-from django.conf.urls.defaults import *
+from django.conf.urls import include, patterns, url
 from django.contrib import admin
 import django
 admin.autodiscover()
 
-# Databrowser:
-#from django.contrib import databrowse
-#from htsworkflow.frontend.samples.models import Library
-#databrowse.site.register(Library)
-#databrowse.site.register(FlowCell)
-
 from django.conf import settings
 
-
 urlpatterns = patterns('',
-    ('^accounts/login/$', 'django.contrib.auth.views.login'),
-    ('^accounts/logout/$', 'django.contrib.auth.views.logout'),
-    ('^accounts/logout_then_login/$', 'django.contrib.auth.views.logout_then_login'),
-    ('^accounts/password_change/$', 'django.contrib.auth.views.password_change'),
-    ('^accounts/password_change_done/$', 'django.contrib.auth.views.password_change_done'),
-    ('^accounts/profile/$', 'htsworkflow.frontend.samples.views.user_profile'),
+    url('^accounts/login/$', 'django.contrib.auth.views.login'),
+    url('^accounts/logout/$', 'django.contrib.auth.views.logout'),
+    url('^accounts/logout_then_login/$', 'django.contrib.auth.views.logout_then_login'),
+    url('^accounts/password_change/$', 'django.contrib.auth.views.password_change'),
+    url('^accounts/password_change_done/$', 'django.contrib.auth.views.password_change_done'),
+    #url('^accounts/profile/$', 'htsworkflow.frontend.samples.views.user_profile'),
     # Base:
-    (r'^eland_config/', include('htsworkflow.frontend.eland_config.urls')),
+    url(r'^eland_config/', include('htsworkflow.frontend.eland_config.urls')),
     ### MOVED Admin from here ###
-    #(r'^admin/(.*)', admin.site.root),
     # Experiments:
-    (r'^experiments/', include('htsworkflow.frontend.experiments.urls')),
-    # Flowcell:
-    (r'^lane/(?P<lane_pk>\w+)',
-     'htsworkflow.frontend.experiments.views.flowcell_lane_detail'),
-    (r'^flowcell/(?P<flowcell_id>\w+)/((?P<lane_number>\w+)/)?$',
-     'htsworkflow.frontend.experiments.views.flowcell_detail'),
-    # AnalysTrack:
-    #(r'^analysis/', include('htsworkflow.frontend.analysis.urls')),
-    # Inventory urls
-    (r'^inventory/', include('htsworkflow.frontend.inventory.urls')),
-    # Report Views:
-    (r'^reports/', include('htsworkflow.frontend.reports.urls')),
-    # Library browser
-    (r'^library/$', 'htsworkflow.frontend.samples.views.library'),
-    (r'^library/not_run/$',
-      'htsworkflow.frontend.samples.views.library_not_run'),
-    (r'^library/(?P<lib_id>\w+)/$',
-      'htsworkflow.frontend.samples.views.library_to_flowcells'),
-    (r'^lanes_for/$', 'htsworkflow.frontend.samples.views.lanes_for'),
-    (r'^lanes_for/(?P<username>\w+)', 'htsworkflow.frontend.samples.views.lanes_for'),
-    # library id to admin url
-    (r'^library_id_to_admin_url/(?P<lib_id>\w+)/$',
-     'htsworkflow.frontend.samples.views.library_id_to_admin_url'),
-    # sample / library information
-    (r'^samples/', include('htsworkflow.frontend.samples.urls')),
-    (r'^sequencer/(?P<sequencer_id>\w+)',
-       'htsworkflow.frontend.experiments.views.sequencer'),
-    # Raw result files
-    (r'^results/(?P<flowcell_id>\w+)/(?P<cnm>C[0-9]+-[0-9]+)/summary/',
-      'htsworkflow.frontend.samples.views.summaryhtm_fc_cnm'),
-    (r'^results/(?P<flowcell_id>\w+)/(?P<cnm>C[0-9]+-[0-9]+)/eland_result/(?P<lane>[1-8])',
-      'htsworkflow.frontend.samples.views.result_fc_cnm_eland_lane'),
-    (r'^results/(?P<fc_id>\w+)/(?P<cnm>C[1-9]-[0-9]+)/bedfile/(?P<lane>[1-8])/ucsc',
-      'htsworkflow.frontend.samples.views.bedfile_fc_cnm_eland_lane_ucsc'),
-    (r'^results/(?P<fc_id>\w+)/(?P<cnm>C[1-9]-[0-9]+)/bedfile/(?P<lane>[1-8])',
-      'htsworkflow.frontend.samples.views.bedfile_fc_cnm_eland_lane'),
-    (r'^bcmagic/', include('htsworkflow.frontend.bcmagic.urls')),
+    url(r'^experiments/', include('htsworkflow.frontend.experiments.urls')),
+    ### Flowcell:
+    url(r'^lane/(?P<lane_pk>\w+)',
+        'htsworkflow.frontend.experiments.views.flowcell_lane_detail'),
+    url(r'^flowcell/(?P<flowcell_id>\w+)/((?P<lane_number>\w+)/)?$',
+        'htsworkflow.frontend.experiments.views.flowcell_detail'),
+    ## AnalysTrack:
+    ##(r'^analysis/', include('htsworkflow.frontend.analysis.urls')),
+    ## Inventory urls
+    #url(r'^inventory/', include('htsworkflow.frontend.inventory.urls')),
+    ## Report Views:
+    ##url(r'^reports/', include('htsworkflow.frontend.reports.urls')),
+    ## Library browser
+    url(r'^library/$', 'htsworkflow.frontend.samples.views.library'),
+    url(r'^library/not_run/$',
+        'htsworkflow.frontend.samples.views.library_not_run'),
+    url(r'^library/(?P<lib_id>\w+)/$',
+        'htsworkflow.frontend.samples.views.library_to_flowcells'),
+    url(r'^lanes_for/$', 'htsworkflow.frontend.samples.views.lanes_for'),
+    url(r'^lanes_for/(?P<username>\w+)', 'htsworkflow.frontend.samples.views.lanes_for'),
+    ### library id to admin url
+    url(r'^library_id_to_admin_url/(?P<lib_id>\w+)/$',
+        'htsworkflow.frontend.samples.views.library_id_to_admin_url'),
+    ### sample / library information
+    url(r'^samples/', include('htsworkflow.frontend.samples.urls')),
+    url(r'^sequencer/(?P<sequencer_id>\w+)',
+        'htsworkflow.frontend.experiments.views.sequencer'),
+    ## Raw result files
+    #url(r'^results/(?P<flowcell_id>\w+)/(?P<cnm>C[0-9]+-[0-9]+)/summary/',
+      #'htsworkflow.frontend.samples.views.summaryhtm_fc_cnm'),
+    #url(r'^results/(?P<flowcell_id>\w+)/(?P<cnm>C[0-9]+-[0-9]+)/eland_result/(?P<lane>[1-8])',
+      #'htsworkflow.frontend.samples.views.result_fc_cnm_eland_lane'),
+    #url(r'^results/(?P<fc_id>\w+)/(?P<cnm>C[1-9]-[0-9]+)/bedfile/(?P<lane>[1-8])/ucsc',
+      #'htsworkflow.frontend.samples.views.bedfile_fc_cnm_eland_lane_ucsc'),
+    #url(r'^results/(?P<fc_id>\w+)/(?P<cnm>C[1-9]-[0-9]+)/bedfile/(?P<lane>[1-8])',
+      #'htsworkflow.frontend.samples.views.bedfile_fc_cnm_eland_lane'),
+    url(r'^bcmagic/', include('htsworkflow.frontend.bcmagic.urls')),
 
-    # databrowser
-    #(r'^databrowse/(.*)', databrowse.site.root)
+    url(r'^admin/', include(admin.site.urls)),
 )
 
-# Allow admin
-if hasattr(admin.site, 'urls'):
-  urlpatterns += patterns('', (r'^admin/', include(admin.site.urls)))
-else:
-  urlpatterns += patterns('', (r'^admin/(.*)', admin.site.root))
-
 if settings.DEBUG:
   urlpatterns += patterns('',
-      (r'^static/(?P<path>.*)$', 'django.views.static.serve',
+      url(r'^static/(?P<path>.*)$', 'django.views.static.serve',
         {'document_root': settings.MEDIA_ROOT}),
   )
index beabfd11e081eaaba5a247ec73b398719ff72d94..8b4b191e65efc072db8b74e2c2786020b27cff7d 100644 (file)
@@ -4,3 +4,12 @@ Provide code to interact with the vendor tools to produce useable "raw" data.
 the illumina sub-package contains components to interact with the Illumina provided
 GAPipeline
 """
+import lxml.etree as ElementTree
+
+EUROPEAN_STRPTIME = "%d-%m-%Y"
+EUROPEAN_DATE_RE = "([0-9]{1,2}-[0-9]{1,2}-[0-9]{4,4})"
+VERSION_RE = "([0-9\.]+)"
+USER_RE = "([a-zA-Z0-9]+)"
+LANES_PER_FLOWCELL = 8
+LANE_LIST = range(1, LANES_PER_FLOWCELL + 1)
+
index 2993d06e6b128b00e1beba1154a74c00b3463818..99f231dd73e748a0c88b528f4489968ab20b8428 100644 (file)
@@ -13,7 +13,7 @@ import re
 import sys
 import time
 
-from htsworkflow.pipelines.runfolder import \
+from htsworkflow.pipelines import \
    ElementTree, \
    VERSION_RE, \
    EUROPEAN_STRPTIME
@@ -365,6 +365,9 @@ def bustard(pathname):
     else:
         b = bustard_from_ga1(pathname)
 
+    if not b:
+        raise RuntimeError("Unable to parse base-call directory at %s" % (pathname,))
+
     return b
 
 def bustard_from_ga1(pathname):
index 0624aaa290a170a57593d85f07b0b642a1743e46..2ad853c1fe2053f91ffaf19268264b159798e520 100644 (file)
@@ -7,7 +7,7 @@ import os
 from optparse import OptionParser
 import sys
 
-from htsworkflow.version import version
+from htsworkflow.util.version import version
 from htsworkflow.util.opener import autoopen
 from htsworkflow.util.conversion import parse_slice
 
index 84eb397930c41ad575f865795921fcf4c0bde0ae..a508a494c8c24900e8574c75f43c3616bf309814 100644 (file)
@@ -10,7 +10,7 @@ import stat
 import sys
 import types
 
-from htsworkflow.pipelines.runfolder import ElementTree, LANE_LIST
+from htsworkflow.pipelines import ElementTree, LANE_LIST
 from htsworkflow.pipelines.samplekey import SampleKey
 from htsworkflow.pipelines.genomemap import GenomeMap
 from htsworkflow.util.ethelp import indent, flatten
index de6b042a5ddd7cc06d0b7c28e99841edc5579ce9..3519eb0123600ef99602cccb25210d119a62a073 100644 (file)
@@ -17,16 +17,16 @@ import os
 import re
 import time
 
-from htsworkflow.pipelines.runfolder import \
+from htsworkflow.pipelines import \
    ElementTree, \
    VERSION_RE, \
    EUROPEAN_STRPTIME
 
 LOGGER = logging.getLogger(__name__)
 
-__docformat__ = "restructuredtext en"
-
 class Firecrest(object):
+    """Gather information about older firecrest runs
+    """
     XML_VERSION=1
 
     # xml tag names
@@ -39,6 +39,12 @@ class Firecrest(object):
     MATRIX = 'matrix'
 
     def __init__(self, xml=None):
+        """Initialize a Firecrest object
+        
+        consider using factory :function:firecrest
+        
+        :param xml: xml serialzation element to initialze from [optional]
+        """
         self.start = None
         self.stop = None
         self.version = None
@@ -58,6 +64,8 @@ class Firecrest(object):
     time = property(_get_time, doc='return run time as seconds since epoch')
 
     def dump(self):
+        """Report debugginf information
+        """
         print "Starting cycle:", self.start
         print "Ending cycle:", self.stop
         print "Firecrest version:", self.version
@@ -65,6 +73,8 @@ class Firecrest(object):
         print "user:", self.user
 
     def get_elements(self):
+        """Return XML serialization structure.
+        """
         attribs = {'version': str(Firecrest.XML_VERSION) }
         root = ElementTree.Element(Firecrest.FIRECREST, attrib=attribs)
         version = ElementTree.SubElement(root, Firecrest.SOFTWARE_VERSION)
index 4dbbc2dce6dd12f32a2ae38aa55ae69237d4abfb..6dfcf68fc235775a8280dd1bc0719e4fd513c7e4 100644 (file)
@@ -3,7 +3,7 @@
 from glob import glob
 import os
 import collections
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 vldInfo = collections.namedtuple('vldInfo', 'name is_link')
 
index e0d84cf49fb21d3f31841ae4857e7780365db5c3..2eaff677e6d90c75848a24a43f4b28fad8d2beff 100644 (file)
@@ -12,7 +12,7 @@ from htsworkflow.pipelines.summary import Summary, SummaryGA, SummaryHiSeq
 from htsworkflow.pipelines.eland import eland, ELAND
 from htsworkflow.pipelines.samplekey import SampleKey
 
-from htsworkflow.pipelines.runfolder import \
+from htsworkflow.pipelines import \
    ElementTree, \
    EUROPEAN_STRPTIME, \
    LANES_PER_FLOWCELL, \
@@ -104,7 +104,7 @@ class Gerald(Alignment):
 
         timestamp = self.tree.findtext('ChipWideRunParameters/TIME_STAMP')
         if timestamp is not None:
-            epochstamp = time.mktime(time.strptime(timestamp, '%c'))
+            epochstamp = time.mktime(time.strptime(timestamp))
             return datetime.fromtimestamp(epochstamp)
         return super(Gerald, self)._get_date()
     date = property(_get_date)
@@ -177,7 +177,7 @@ class CASAVA(Alignment):
         if self.tree is None:
             return
         if len(self.tree.xpath('TIME_STAMP')) == 0:
-            time_stamp = self.date.strftime('%c')
+            time_stamp = self.date.strftime('%a %b %d %H:%M:%S %Y')
             time_element = ElementTree.Element('TIME_STAMP')
             time_element.text = time_stamp
             self.tree.append(time_element)
@@ -187,7 +187,10 @@ class CASAVA(Alignment):
             return None
         time_element = self.tree.xpath('TIME_STAMP')
         if len(time_element) == 1:
-            return datetime.strptime(time_element[0].text, '%c')
+           timetuple = time.strptime(
+               time_element[0].text.strip(),
+               "%a %b %d %H:%M:%S %Y")
+           return datetime(*timetuple[:6])
         return super(CASAVA, self)._get_date()
     date = property(_get_date)
 
index c2cc6a3ffee289ccbdda93160c0d9fac5bf8afa7..6c3acbe19b50c8959b5deb011e36eb9cf7396212 100644 (file)
@@ -19,7 +19,7 @@ import re
 import stat
 import time
 
-from htsworkflow.pipelines.runfolder import \
+from htsworkflow.pipelines import \
    ElementTree, \
    VERSION_RE, \
    EUROPEAN_STRPTIME
index 2f017eb2dc0f4817448b4bae14eebd5b2657772f..c33d0143228568b36d216986bf0aad47f60d57b9 100644 (file)
@@ -8,7 +8,7 @@ import numpy
 import sys
 import tarfile
 
-from htsworkflow.version import version
+from htsworkflow.util.version import version
 from htsworkflow.util.conversion import parse_slice
 
 
index 29f49bb32f8c3914976326d3b3b329db0021357e..fe2b9428cd01c38775c6a743dc2588b140425833 100644 (file)
@@ -22,7 +22,7 @@ from htsworkflow.util.url import normalize_url
 from htsworkflow.pipelines.genome_mapper import \
      getAvailableGenomes, \
      constructMapperDict
-from htsworkflow.pipelines.runfolder import LANE_LIST
+from htsworkflow.pipelines import LANE_LIST
 # JSON dictionaries use strings
 LANE_LIST_JSON = [ str(l) for l in LANE_LIST ]
 
index 59fd2131c1b9b810e53b16ef71237831a117e771..669c5f03363dfc8c647730133af71678319d0336 100644 (file)
@@ -1,5 +1,4 @@
-"""
-Core information needed to inspect a runfolder.
+"""Core information needed to inspect a runfolder.
 """
 from glob import glob
 import logging
@@ -12,17 +11,17 @@ import sys
 import tarfile
 import time
 
-import lxml.etree as ElementTree
-
 LOGGER = logging.getLogger(__name__)
 
-EUROPEAN_STRPTIME = "%d-%m-%Y"
-EUROPEAN_DATE_RE = "([0-9]{1,2}-[0-9]{1,2}-[0-9]{4,4})"
-VERSION_RE = "([0-9\.]+)"
-USER_RE = "([a-zA-Z0-9]+)"
-LANES_PER_FLOWCELL = 8
-LANE_LIST = range(1, LANES_PER_FLOWCELL + 1)
-
+from htsworkflow.pipelines import firecrest
+from htsworkflow.pipelines import ipar
+from htsworkflow.pipelines import bustard
+from htsworkflow.pipelines import gerald
+from htsworkflow.pipelines import ElementTree, \
+                                  EUROPEAN_STRPTIME, EUROPEAN_DATE_RE, \
+                                  VERSION_RE, USER_RE, \
+                                  LANES_PER_FLOWCELL, LANE_LIST
+from htsworkflow.pipelines.samplekey import LANE_SAMPLE_KEYS
 from htsworkflow.util.alphanum import alphanum
 from htsworkflow.util.ethelp import indent, flatten
 from htsworkflow.util.queuecommands import QueueCommands
@@ -30,14 +29,34 @@ from htsworkflow.util.queuecommands import QueueCommands
 from htsworkflow.pipelines import srf
 
 class PipelineRun(object):
-    """
-    Capture "interesting" information about a pipeline run
+    """Capture "interesting" information about a pipeline run
+    
+    :Variables:
+      - `pathname` location of the root of this runfolder
+      - `serialization_filename` read only property containing name of run xml file
+      - `flowcell_id` read-only property containing flowcell id (bar code)
+      - `datadir` location of the runfolder data dir.
+      - `image_analysis` generic name for Firecrest or IPAR image analysis
+      - `bustard` summary base caller
+      - `gerald` summary of sequence alignment and quality control metrics
     """
     XML_VERSION = 1
     PIPELINE_RUN = 'PipelineRun'
     FLOWCELL_ID = 'FlowcellID'
 
     def __init__(self, pathname=None, flowcell_id=None, xml=None):
+        """Initialize a PipelineRun object
+        
+        :Parameters:
+          - `pathname` the root directory of this run folder.
+          - `flowcell_id` the flowcell ID in case it can't be determined
+          - `xml` Allows initializing an object from a serialized xml file.
+          
+        :Types:
+          - `pathname` str
+          - `flowcell_id` str
+          - `ElementTree` str
+        """
         if pathname is not None:
           self.pathname = os.path.normpath(pathname)
         else:
@@ -45,6 +64,7 @@ class PipelineRun(object):
         self._name = None
         self._flowcell_id = flowcell_id
         self.datadir = None
+        self.suffix = None
         self.image_analysis = None
         self.bustard = None
         self.gerald = None
@@ -53,6 +73,10 @@ class PipelineRun(object):
           self.set_elements(xml)
 
     def _get_flowcell_id(self):
+        """Return the flowcell ID
+        
+        Attempts to find the flowcell ID through several mechanisms.
+        """
         # extract flowcell ID
         if self._flowcell_id is None:
             self._flowcell_id = self._get_flowcell_id_from_runinfo()
@@ -72,6 +96,8 @@ class PipelineRun(object):
 
     def _get_flowcell_id_from_flowcellid(self):
         """Extract flowcell id from a Config/FlowcellId.xml file
+        
+        :return: flowcell_id or None if not found
         """
         config_dir = os.path.join(self.pathname, 'Config')
         flowcell_id_path = os.path.join(config_dir, 'FlowcellId.xml')
@@ -81,6 +107,8 @@ class PipelineRun(object):
 
     def _get_flowcell_id_from_runinfo(self):
         """Read RunInfo file for flowcell id
+
+        :return: flowcell_id or None if not found
         """
         runinfo = os.path.join(self.pathname, 'RunInfo.xml')
         if os.path.exists(runinfo):
@@ -90,9 +118,10 @@ class PipelineRun(object):
             if len(fc_nodes) == 1:
                 return fc_nodes[0].text
 
-
     def _get_flowcell_id_from_path(self):
         """Guess a flowcell name from the path
+
+        :return: flowcell_id or None if not found
         """
         path_fields = self.pathname.split('_')
         if len(path_fields) > 0:
@@ -108,26 +137,46 @@ class PipelineRun(object):
             return None
     runfolder_name = property(_get_runfolder_name)
 
-    def get_elements(self):
+    def _get_run_dirname(self):
+        """Return name of directory to hold result files from one analysis
+        
+        For pre-multiplexing runs this is just the cycle range C1-123
+        For post-multiplexing runs the "suffix" that we add to 
+        differentiate runs will be added to the range.
+        E.g. Unaligned_6mm may produce C1-200_6mm
         """
-        make one master xml file from all of our sub-components.
+        if self.image_analysis is None:
+            raise ValueError("Not initialized yet")
+        start = self.image_analysis.start
+        stop = self.image_analysis.stop
+        cycle_fragment = "C%d-%d" % (start, stop)
+        if self.suffix:
+            cycle_fragment += self.suffix
+
+        return cycle_fragment
+    run_dirname = property(_get_run_dirname)
+
+    def get_elements(self):
+        """make one master xml file from all of our sub-components.
+        
+        :return: an ElementTree containing all available pipeline
+                 run xml compoents.
         """
         root = ElementTree.Element(PipelineRun.PIPELINE_RUN)
         flowcell = ElementTree.SubElement(root, PipelineRun.FLOWCELL_ID)
         flowcell.text = self.flowcell_id
         root.append(self.image_analysis.get_elements())
         root.append(self.bustard.get_elements())
-        root.append(self.gerald.get_elements())
+        if self.gerald:
+            root.append(self.gerald.get_elements())
         return root
 
     def set_elements(self, tree):
-        # this file gets imported by all the others,
-        # so we need to hide the imports to avoid a cyclic imports
-        from htsworkflow.pipelines import firecrest
-        from htsworkflow.pipelines import ipar
-        from htsworkflow.pipelines import bustard
-        from htsworkflow.pipelines import gerald
+        """Initialize a PipelineRun object from an run.xml ElementTree.
 
+        :param tree: parsed ElementTree
+        :type tree: ElementTree
+        """
         tag = tree.tag.lower()
         if tag != PipelineRun.PIPELINE_RUN.lower():
           raise ValueError('Pipeline Run Expecting %s got %s' % (
@@ -151,27 +200,47 @@ class PipelineRun(object):
           else:
             LOGGER.warn('PipelineRun unrecognized tag %s' % (tag,))
 
-    def _get_run_name(self):
-        """
-        Given a run tuple, find the latest date and use that as our name
+    def _get_serialization_filename(self):
+        """Compute the filename for the run xml file
+        
+        Attempts to find the latest date from all of the run 
+        components.
+        
+        :return: filename run_{flowcell id}_{timestamp}.xml
+        :rtype: str
         """
         if self._name is None:
-          tmax = max(self.image_analysis.time, self.bustard.time, self.gerald.time)
+          components = [self.image_analysis, self.bustard, self.gerald]
+          tmax = max([ c.time for c in components if c ])
           timestamp = time.strftime('%Y-%m-%d', time.localtime(tmax))
           self._name = 'run_' + self.flowcell_id + "_" + timestamp + '.xml'
         return self._name
-    name = property(_get_run_name)
+    serialization_filename = property(_get_serialization_filename)
 
     def save(self, destdir=None):
+        """Save a run xml file.
+        
+        :param destdir: Directory name to save too, uses current directory
+                        if not specified.
+        :type destdir: str
+        """
         if destdir is None:
             destdir = ''
-        LOGGER.info("Saving run report " + self.name)
+        LOGGER.info("Saving run report " + self.serialization_filename)
         xml = self.get_elements()
         indent(xml)
-        dest_pathname = os.path.join(destdir, self.name)
+        dest_pathname = os.path.join(destdir, self.serialization_filename)
         ElementTree.ElementTree(xml).write(dest_pathname)
 
     def load(self, filename):
+        """Load a run xml into this object.
+        
+        :Parameters:
+          - `filename` location of a run xml file
+          
+        :Types:
+          - `filename` str
+        """
         LOGGER.info("Loading run report from " + filename)
         tree = ElementTree.parse(filename).getroot()
         self.set_elements(tree)
@@ -181,7 +250,7 @@ def load_pipeline_run_xml(pathname):
     Load and instantiate a Pipeline run from a run xml file
 
     :Parameters:
-      - `pathname` location of an run xml file
+      - `pathname` location of an run xml file
 
     :Returns: initialized PipelineRun object
     """
@@ -190,73 +259,16 @@ def load_pipeline_run_xml(pathname):
     return run
 
 def get_runs(runfolder, flowcell_id=None):
-    """
-    Search through a run folder for all the various sub component runs
-    and then return a PipelineRun for each different combination.
+    """Find all runs associated with a runfolder.
+    
+    We end up with multiple analysis runs as we sometimes
+    need to try with different parameters. This attempts
+    to return a list of all the various runs.
 
     For example if there are two different GERALD runs, this will
     generate two different PipelineRun objects, that differ
     in there gerald component.
     """
-    from htsworkflow.pipelines import firecrest
-    from htsworkflow.pipelines import ipar
-    from htsworkflow.pipelines import bustard
-    from htsworkflow.pipelines import gerald
-
-    def scan_post_image_analysis(runs, runfolder, datadir, image_analysis,
-                                 pathname):
-        added = build_aligned_runs(image_analysis, runs, datadir, runfolder)
-        # If we're a multiplexed run, don't look for older run type.
-        if added > 0:
-            return
-
-        LOGGER.info("Looking for bustard directories in %s" % (pathname,))
-        bustard_dirs = glob(os.path.join(pathname, "Bustard*"))
-        # RTA BaseCalls looks enough like Bustard.
-        bustard_dirs.extend(glob(os.path.join(pathname, "BaseCalls")))
-        for bustard_pathname in bustard_dirs:
-            LOGGER.info("Found bustard directory %s" % (bustard_pathname,))
-            b = bustard.bustard(bustard_pathname)
-            build_gerald_runs(runs, b, image_analysis, bustard_pathname, datadir, pathname, runfolder)
-
-
-    def build_gerald_runs(runs, b, image_analysis, bustard_pathname, datadir, pathname, runfolder):
-        start = len(runs)
-        gerald_glob = os.path.join(bustard_pathname, 'GERALD*')
-        LOGGER.info("Looking for gerald directories in %s" % (pathname,))
-        for gerald_pathname in glob(gerald_glob):
-            LOGGER.info("Found gerald directory %s" % (gerald_pathname,))
-            try:
-                g = gerald.gerald(gerald_pathname)
-                p = PipelineRun(runfolder, flowcell_id)
-                p.datadir = datadir
-                p.image_analysis = image_analysis
-                p.bustard = b
-                p.gerald = g
-                runs.append(p)
-            except IOError, e:
-                LOGGER.error("Ignoring " + str(e))
-        return len(runs) - start
-
-
-    def build_aligned_runs(image_analysis, runs, datadir, runfolder):
-        start = len(runs)
-        aligned_glob = os.path.join(runfolder, 'Aligned*')
-        for aligned in glob(aligned_glob):
-            LOGGER.info("Found aligned directory %s" % (aligned,))
-            try:
-                g = gerald.gerald(aligned)
-                p = PipelineRun(runfolder, flowcell_id)
-                bustard_pathname = os.path.join(runfolder, g.runfolder_name)
-
-                p.datadir = datadir
-                p.image_analysis = image_analysis
-                p.bustard = bustard.bustard(bustard_pathname)
-                p.gerald = g
-                runs.append(p)
-            except IOError, e:
-                LOGGER.error("Ignoring " + str(e))
-        return len(runs) - start
     datadir = os.path.join(runfolder, 'Data')
 
     LOGGER.info('Searching for runs in ' + datadir)
@@ -271,7 +283,7 @@ def get_runs(runfolder, flowcell_id=None):
             )
         else:
             scan_post_image_analysis(
-                runs, runfolder, datadir, image_analysis, firecrest_pathname
+                runs, runfolder, datadir, image_analysis, firecrest_pathname, flowcell_id
             )
     # scan for IPAR directories
     ipar_dirs = glob(os.path.join(datadir, "IPAR_*"))
@@ -286,11 +298,111 @@ def get_runs(runfolder, flowcell_id=None):
             )
         else:
             scan_post_image_analysis(
-                runs, runfolder, datadir, image_analysis, ipar_pathname
+                runs, runfolder, datadir, image_analysis, ipar_pathname, flowcell_id
             )
 
     return runs
 
+def scan_post_image_analysis(runs, runfolder, datadir, image_analysis,
+                             pathname, flowcell_id):
+    added = build_hiseq_runs(image_analysis, runs, datadir, runfolder, flowcell_id)
+    # If we're a multiplexed run, don't look for older run type.
+    if added > 0:
+        return
+
+    LOGGER.info("Looking for bustard directories in %s" % (pathname,))
+    bustard_dirs = glob(os.path.join(pathname, "Bustard*"))
+    # RTA BaseCalls looks enough like Bustard.
+    bustard_dirs.extend(glob(os.path.join(pathname, "BaseCalls")))
+    for bustard_pathname in bustard_dirs:
+        LOGGER.info("Found bustard directory %s" % (bustard_pathname,))
+        b = bustard.bustard(bustard_pathname)
+        build_gerald_runs(runs, b, image_analysis, bustard_pathname, datadir, pathname,
+                          runfolder, flowcell_id)
+
+
+def build_gerald_runs(runs, b, image_analysis, bustard_pathname, datadir, pathname, runfolder,
+                      flowcell_id):
+    start = len(runs)
+    gerald_glob = os.path.join(bustard_pathname, 'GERALD*')
+    LOGGER.info("Looking for gerald directories in %s" % (pathname,))
+    for gerald_pathname in glob(gerald_glob):
+        LOGGER.info("Found gerald directory %s" % (gerald_pathname,))
+        try:
+            g = gerald.gerald(gerald_pathname)
+            p = PipelineRun(runfolder, flowcell_id)
+            p.datadir = datadir
+            p.image_analysis = image_analysis
+            p.bustard = b
+            p.gerald = g
+            runs.append(p)
+        except IOError, e:
+            LOGGER.error("Ignoring " + str(e))
+    return len(runs) - start
+
+
+def build_hiseq_runs(image_analysis, runs, datadir, runfolder, flowcell_id):
+    start = len(runs)
+    aligned_glob = os.path.join(runfolder, 'Aligned*')
+    unaligned_glob = os.path.join(runfolder, 'Unaligned*')
+
+    aligned_paths = glob(aligned_glob)
+    unaligned_paths = glob(unaligned_glob)
+
+    matched_paths = hiseq_match_aligned_unaligned(aligned_paths, unaligned_paths)
+    LOGGER.debug("Matched HiSeq analysis: %s", str(matched_paths))
+
+    for aligned, unaligned, suffix in matched_paths:
+        if unaligned is None:
+            LOGGER.warn("Aligned directory %s without matching unalinged, skipping", aligned)
+            continue
+
+        try:
+            p = PipelineRun(runfolder, flowcell_id)
+            p.datadir = datadir
+            p.suffix = suffix
+            p.image_analysis = image_analysis
+            p.bustard = bustard.bustard(unaligned)
+            if aligned:
+                p.gerald = gerald.gerald(aligned)
+            runs.append(p)
+        except (IOError, RuntimeError) as e:
+           LOGGER.error("Exception %s", str(e))
+            LOGGER.error("Skipping run in %s", flowcell_id)
+    return len(runs) - start
+
+def hiseq_match_aligned_unaligned(aligned, unaligned):
+    """Match aligned and unaligned folders from seperate lists
+    """
+    unaligned_suffix_re = re.compile('Unaligned(?P<suffix>[\w]*)')
+
+    aligned_by_suffix = build_dir_dict_by_suffix('Aligned', aligned)
+    unaligned_by_suffix = build_dir_dict_by_suffix('Unaligned', unaligned)
+
+    keys = set(aligned_by_suffix.keys()).union(set(unaligned_by_suffix.keys()))
+
+    matches = []
+    for key in keys:
+        a = aligned_by_suffix.get(key)
+        u = unaligned_by_suffix.get(key)
+        matches.append((a, u, key))
+    return matches
+
+def build_dir_dict_by_suffix(prefix, dirnames):
+    """Build a dictionary indexed by suffix of last directory name.
+
+    It assumes a constant prefix
+    """
+    regex = re.compile('%s(?P<suffix>[\w]*)' % (prefix,))
+
+    by_suffix = {}
+    for absname in dirnames:
+        basename = os.path.basename(absname)
+        match = regex.match(basename)
+        if match:
+            by_suffix[match.group('suffix')] = absname
+    return by_suffix
+
 def get_specific_run(gerald_dir):
     """
     Given a gerald directory, construct a PipelineRun out of its parents
@@ -427,11 +539,16 @@ def summary_report(runs):
     Summarize cluster numbers and mapped read counts for a runfolder
     """
     report = []
+    eland_keys = []
     for run in runs:
         # print a run name?
-        report.append('Summary for %s' % (run.name,))
+        report.append('Summary for %s' % (run.serialization_filename,))
        # sort the report
-       eland_keys = sorted(run.gerald.eland_results.keys())
+       if run.gerald:
+            eland_keys = sorted(run.gerald.eland_results.keys())
+        else:
+            report.append("Alignment not done, no report possible")
+
     for lane_id in eland_keys:
         report.extend(summarize_lane(run.gerald, lane_id))
         report.append('---')
@@ -446,14 +563,14 @@ def is_compressed(filename):
     else:
         return False
 
-def save_flowcell_reports(data_dir, cycle_dir):
+def save_flowcell_reports(data_dir, run_dirname):
     """
     Save the flowcell quality reports
     """
     data_dir = os.path.abspath(data_dir)
     status_file = os.path.join(data_dir, 'Status.xml')
     reports_dir = os.path.join(data_dir, 'reports')
-    reports_dest = os.path.join(cycle_dir, 'flowcell-reports.tar.bz2')
+    reports_dest = os.path.join(run_dirname, 'flowcell-reports.tar.bz2')
     if os.path.exists(reports_dir):
         cmd_list = [ 'tar', 'cjvf', reports_dest, 'reports/' ]
         if os.path.exists(status_file):
@@ -466,21 +583,21 @@ def save_flowcell_reports(data_dir, cycle_dir):
         os.chdir(cwd)
 
 
-def save_summary_file(pipeline, cycle_dir):
+def save_summary_file(pipeline, run_dirname):
     # Copy Summary.htm
     gerald_object = pipeline.gerald
     gerald_summary = os.path.join(gerald_object.pathname, 'Summary.htm')
     status_files_summary = os.path.join(pipeline.datadir, 'Status_Files', 'Summary.htm')
     if os.path.exists(gerald_summary):
-        LOGGER.info('Copying %s to %s' % (gerald_summary, cycle_dir))
-        shutil.copy(gerald_summary, cycle_dir)
+        LOGGER.info('Copying %s to %s' % (gerald_summary, run_dirname))
+        shutil.copy(gerald_summary, run_dirname)
     elif os.path.exists(status_files_summary):
-        LOGGER.info('Copying %s to %s' % (status_files_summary, cycle_dir))
-        shutil.copy(status_files_summary, cycle_dir)
+        LOGGER.info('Copying %s to %s' % (status_files_summary, run_dirname))
+        shutil.copy(status_files_summary, run_dirname)
     else:
         LOGGER.info('Summary file %s was not found' % (summary_path,))
 
-def save_ivc_plot(bustard_object, cycle_dir):
+def save_ivc_plot(bustard_object, run_dirname):
     """
     Save the IVC page and its supporting images
     """
@@ -488,12 +605,12 @@ def save_ivc_plot(bustard_object, cycle_dir):
     plot_image_path = os.path.join(bustard_object.pathname, 'Plots')
     plot_images = os.path.join(plot_image_path, 's_?_[a-z]*.png')
 
-    plot_target_path = os.path.join(cycle_dir, 'Plots')
+    plot_target_path = os.path.join(run_dirname, 'Plots')
 
     if os.path.exists(plot_html):
         LOGGER.debug("Saving %s" % (plot_html,))
         LOGGER.debug("Saving %s" % (plot_images,))
-        shutil.copy(plot_html, cycle_dir)
+        shutil.copy(plot_html, run_dirname)
         if not os.path.exists(plot_target_path):
             os.mkdir(plot_target_path)
         for plot_file in glob(plot_images):
@@ -502,7 +619,7 @@ def save_ivc_plot(bustard_object, cycle_dir):
         LOGGER.warning('Missing IVC.html file, not archiving')
 
 
-def compress_score_files(bustard_object, cycle_dir):
+def compress_score_files(bustard_object, run_dirname):
     """
     Compress score files into our result directory
     """
@@ -520,7 +637,7 @@ def compress_score_files(bustard_object, cycle_dir):
 
     tar_cmd = ['tar', 'c'] + score_files
     bzip_cmd = [ 'bzip2', '-9', '-c' ]
-    tar_dest_name = os.path.join(cycle_dir, 'scores.tar.bz2')
+    tar_dest_name = os.path.join(run_dirname, 'scores.tar.bz2')
     tar_dest = open(tar_dest_name, 'w')
     LOGGER.info("Compressing score files from %s" % (scores_path,))
     LOGGER.info("Running tar: " + " ".join(tar_cmd[:10]))
@@ -534,7 +651,7 @@ def compress_score_files(bustard_object, cycle_dir):
     tar.wait()
 
 
-def compress_eland_results(gerald_object, cycle_dir, num_jobs=1):
+def compress_eland_results(gerald_object, run_dirname, num_jobs=1):
     """
     Compress eland result files into the archive directory
     """
@@ -549,7 +666,7 @@ def compress_eland_results(gerald_object, cycle_dir, num_jobs=1):
                 "Lane ID %s does not have a filename." % (eland_lane.lane_id,))
             else:
               path, name = os.path.split(source_name)
-              dest_name = os.path.join(cycle_dir, name)
+              dest_name = os.path.join(run_dirname, name)
               LOGGER.info("Saving eland file %s to %s" % \
                          (source_name, dest_name))
 
@@ -590,52 +707,56 @@ def extract_results(runs, output_base_dir=None, site="individual", num_jobs=1, r
         if not os.path.exists(result_dir):
             os.mkdir(result_dir)
 
-        # create cycle_dir
-        cycle = "C%d-%d" % (r.image_analysis.start, r.image_analysis.stop)
-        LOGGER.info("Filling in %s" % (cycle,))
-        cycle_dir = os.path.join(result_dir, cycle)
-        cycle_dir = os.path.abspath(cycle_dir)
-        if os.path.exists(cycle_dir):
-            LOGGER.error("%s already exists, not overwriting" % (cycle_dir,))
+        # create directory to add this runs results to
+        LOGGER.info("Filling in %s" % (r.run_dirname,))
+        run_dirname = os.path.join(result_dir, r.run_dirname)
+        run_dirname = os.path.abspath(run_dirname)
+        if os.path.exists(run_dirname):
+            LOGGER.error("%s already exists, not overwriting" % (run_dirname,))
             continue
         else:
-            os.mkdir(cycle_dir)
+            os.mkdir(run_dirname)
 
         # save run file
-        r.save(cycle_dir)
+        r.save(run_dirname)
 
         # save illumina flowcell status report
         save_flowcell_reports(os.path.join(r.image_analysis.pathname, '..'),
-                              cycle_dir)
+                              run_dirname)
 
         # save stuff from bustard
         # grab IVC plot
-        save_ivc_plot(r.bustard, cycle_dir)
+        save_ivc_plot(r.bustard, run_dirname)
 
         # build base call saving commands
         if site is not None:
-            save_raw_data(num_jobs, r, site, raw_format, cycle_dir)
+            save_raw_data(num_jobs, r, site, raw_format, run_dirname)
 
         # save stuff from GERALD
         # copy stuff out of the main run
-        g = r.gerald
+        if r.gerald:
+            g = r.gerald
 
-        # save summary file
-        save_summary_file(r, cycle_dir)
+            # save summary file
+            save_summary_file(r, run_dirname)
 
-        # compress eland result files
-        compress_eland_results(g, cycle_dir, num_jobs)
+            # compress eland result files
+            compress_eland_results(g, run_dirname, num_jobs)
 
         # md5 all the compressed files once we're done
-        md5_commands = srf.make_md5_commands(cycle_dir)
-        srf.run_commands(cycle_dir, md5_commands, num_jobs)
+        md5_commands = srf.make_md5_commands(run_dirname)
+        srf.run_commands(run_dirname, md5_commands, num_jobs)
 
-def save_raw_data(num_jobs, r, site, raw_format, cycle_dir):
+def save_raw_data(num_jobs, r, site, raw_format, run_dirname):
     lanes = []
-    for lane in r.gerald.lanes:
-        lane_parameters = r.gerald.lanes.get(lane, None)
-        if lane_parameters is not None:
-            lanes.append(lane)
+    if r.gerald:
+        for lane in r.gerald.lanes:
+            lane_parameters = r.gerald.lanes.get(lane, None)
+            if lane_parameters is not None:
+                lanes.append(lane)
+    else:
+        # assume default list of lanes
+        lanes = LANE_SAMPLE_KEYS
 
     run_name = srf.pathname_to_run_name(r.pathname)
     seq_cmds = []
@@ -644,13 +765,14 @@ def save_raw_data(num_jobs, r, site, raw_format, cycle_dir):
 
     LOGGER.info("Raw Format is: %s" % (raw_format, ))
     if raw_format == 'fastq':
-        rawpath = os.path.join(r.pathname, r.gerald.runfolder_name)
+        LOGGER.info("Reading fastq files from %s", r.bustard.pathname)
+        rawpath = os.path.join(r.pathname, r.bustard.pathname)
         LOGGER.info("raw data = %s" % (rawpath,))
-        srf.copy_hiseq_project_fastqs(run_name, rawpath, site, cycle_dir)
+        srf.copy_hiseq_project_fastqs(run_name, rawpath, site, run_dirname)
     elif raw_format == 'qseq':
-        seq_cmds = srf.make_qseq_commands(run_name, r.bustard.pathname, lanes, site, cycle_dir)
+        seq_cmds = srf.make_qseq_commands(run_name, r.bustard.pathname, lanes, site, run_dirname)
     elif raw_format == 'srf':
-        seq_cmds = srf.make_srf_commands(run_name, r.bustard.pathname, lanes, site, cycle_dir, 0)
+        seq_cmds = srf.make_srf_commands(run_name, r.bustard.pathname, lanes, site, run_dirname, 0)
     else:
         raise ValueError('Unknown --raw-format=%s' % (raw_format))
     srf.run_commands(r.bustard.pathname, seq_cmds, num_jobs)
index 0ff6c31fbbb918f4f516150130abde0986c41fdd..60c2dbb1662c2ff660d60083c807cb173ae21a38 100644 (file)
@@ -1,3 +1,5 @@
+from htsworkflow.pipelines import LANE_LIST
+
 class SampleKey(object):
     """Identifier for a sample in a particular 'location' on a flowcell.
     """
@@ -72,3 +74,4 @@ class SampleKey(object):
 
         return '<SampleKey(' + ",".join(name) + ')>'
 
+LANE_SAMPLE_KEYS = [ SampleKey(lane=l) for l in LANE_LIST ]
\ No newline at end of file
index 31aa6d6640a7f7e12dab7d7ae899776820f3a656..03b96b8f35b481fddfb7f41100ed0366c2ef473f 100644 (file)
@@ -43,7 +43,7 @@ def make_srf_commands(run_name, bustard_dir, lanes, site_name, destdir, cmdlevel
   make a subprocess-friendly list of command line arguments to run solexa2srf
   generates files like:
   woldlab:080514_HWI-EAS229_0029_20768AAXX:8.srf
-   site        run name                    lane
+  site        run name                    lane
 
   run_name - most of the file name (run folder name is a good choice)
   lanes - list of integers corresponding to which lanes to process
index ab86342ce6d2108a95811cfb5f9a134f17affc6d..0d895d9e4b18c8fbb968da470639df8808424339 100644 (file)
@@ -7,7 +7,7 @@ from subprocess import Popen, PIPE
 import sys
 
 from htsworkflow.util.opener import autoopen
-from htsworkflow.version import version
+from htsworkflow.util.version import version
 
 LOGGER = logging.getLogger(__name__)
 
index 6fdda43cf8b4abcee39100e4204df7182fa4a48b..8f47670d99521f37fc8afe75ab8edf292b4c5396 100644 (file)
@@ -7,7 +7,6 @@ import re
 import types
 from pprint import pprint
 
-#from htsworkflow.pipelines.runfolder import ElementTree
 from lxml import html
 from lxml import etree
 from htsworkflow.util.ethelp import indent, flatten
index afa2edfe003736605761cdb459c5ba875240ae68..759a13ee37f2f463771ac2429e64e9be49873031 100644 (file)
@@ -100,6 +100,17 @@ def make_unaligned_config_1_12(unaligned_dir):
     ]
     for src, dest in demultiplex_pairs:
         shutil.copy(src, dest)
+        
+def make_unaligned_status_1_12(unaligned_dir, flowcell_id):
+    basecall_status = ['All.htm', 'Demultiplex_Stats.htm', 'IVC.htm']
+    test_data_root = os.path.join(TESTDATA_DIR, '1_12', 'basecall_stats')
+    basecall_stats = os.path.join(unaligned_dir, 
+                                  'Basecall_Stats_{0}'.format(flowcell_id))
+    os.mkdir(basecall_stats)
+    for filename in basecall_status:
+        source = os.path.join(test_data_root, filename)
+        destination = os.path.join(basecall_stats, filename)
+        shutil.copy(source, destination)
 
 def make_rta_intensities_1460(data_dir, version='1.4.6.0'):
     """
index 6f2b4238fe7d648896bd4c3c3bffce3697dc8cf5..7381d8c38908a41753b79a202daecb9d56df847f 100644 (file)
@@ -13,7 +13,7 @@ from htsworkflow.pipelines import ipar
 from htsworkflow.pipelines import bustard
 from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines import runfolder
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
index aa2f618f9324faca633dae27a0053ad76c14b54b..7195d0f044a0e29529dc9e903e9598b12d4ad040 100644 (file)
@@ -7,7 +7,7 @@ import shutil
 import tempfile
 from unittest2 import TestCase
 
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 from htsworkflow.pipelines import genomemap
 
 MINI_GENOME_XML = '''<sequenceSizes>
index 8eef13096d3694a11c3f80e0704a6d6cdea7ee6b..de68a2505cbb2e1e6f83726e23780008d6e90b83 100644 (file)
@@ -10,7 +10,7 @@ from htsworkflow.pipelines import firecrest
 from htsworkflow.pipelines import bustard
 from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines import runfolder
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
@@ -451,20 +451,20 @@ class RunfolderTests(TestCase):
 
         # do we get the flowcell id from the filename?
         self.failUnlessEqual(len(runs), 1)
-        self.failUnlessEqual(runs[0].name, 'run_207BTAAXX_2008-04-19.xml')
+        self.failUnlessEqual(runs[0].serialization_filename, 'run_207BTAAXX_2008-04-19.xml')
 
         # do we get the flowcell id from the FlowcellId.xml file
         make_flowcell_id(self.runfolder_dir, '207BTAAXY')
         runs = runfolder.get_runs(self.runfolder_dir)
         self.failUnlessEqual(len(runs), 1)
-        self.failUnlessEqual(runs[0].name, 'run_207BTAAXY_2008-04-19.xml')
+        self.failUnlessEqual(runs[0].serialization_filename, 'run_207BTAAXY_2008-04-19.xml')
 
         r1 = runs[0]
         xml = r1.get_elements()
         xml_str = ElementTree.tostring(xml)
 
         r2 = runfolder.PipelineRun(xml=xml)
-        self.failUnlessEqual(r1.name, r2.name)
+        self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
         self.failIfEqual(r2.image_analysis, None)
         self.failIfEqual(r2.bustard, None)
         self.failIfEqual(r2.gerald, None)
index a571944d834a2e897832f777d137ed707369b5df..cd631bf6070a4f7c5bdd105ca56631462b364d70 100644 (file)
@@ -10,7 +10,7 @@ from htsworkflow.pipelines import firecrest
 from htsworkflow.pipelines import bustard
 from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines import runfolder
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
@@ -875,20 +875,20 @@ class RunfolderTests(TestCase):
 
         # do we get the flowcell id from the filename?
         self.failUnlessEqual(len(runs), 1)
-        self.failUnlessEqual(runs[0].name, 'run_207BTAAXX_2008-04-19.xml')
+        self.failUnlessEqual(runs[0].serialization_filename, 'run_207BTAAXX_2008-04-19.xml')
 
         # do we get the flowcell id from the FlowcellId.xml file
         make_flowcell_id(self.runfolder_dir, '207BTAAXY')
         runs = runfolder.get_runs(self.runfolder_dir)
         self.failUnlessEqual(len(runs), 1)
-        self.failUnlessEqual(runs[0].name, 'run_207BTAAXY_2008-04-19.xml')
+        self.failUnlessEqual(runs[0].serialization_filename, 'run_207BTAAXY_2008-04-19.xml')
 
         r1 = runs[0]
         xml = r1.get_elements()
         xml_str = ElementTree.tostring(xml)
 
         r2 = runfolder.PipelineRun(xml=xml)
-        self.failUnlessEqual(r1.name, r2.name)
+        self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
         self.failIfEqual(r2.image_analysis, None)
         self.failIfEqual(r2.bustard, None)
         self.failIfEqual(r2.gerald, None)
index 2397c2a6136e4e109d065685f780c65c4b8721fb..27e66499684b0860b08925ff24558b7959aafeb5 100644 (file)
@@ -10,7 +10,7 @@ from htsworkflow.pipelines import firecrest
 from htsworkflow.pipelines import bustard
 from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines import runfolder
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
@@ -279,21 +279,21 @@ class RunfolderTests(TestCase):
         # do we get the flowcell id from the filename?
         self.failUnlessEqual(len(runs), 1)
         name = 'run_30J55AAXX_2009-02-22.xml'
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         # do we get the flowcell id from the FlowcellId.xml file
         make_flowcell_id(self.runfolder_dir, '30J55AAXX')
         runs = runfolder.get_runs(self.runfolder_dir)
         self.failUnlessEqual(len(runs), 1)
         name = 'run_30J55AAXX_2009-02-22.xml'
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         r1 = runs[0]
         xml = r1.get_elements()
         xml_str = ElementTree.tostring(xml)
 
         r2 = runfolder.PipelineRun(xml=xml)
-        self.failUnlessEqual(r1.name, r2.name)
+        self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
         self.failIfEqual(r2.image_analysis, None)
         self.failIfEqual(r2.bustard, None)
         self.failIfEqual(r2.gerald, None)
index c0b12b468883ca3e1a61c05359aa237047b0f68f..96a5bcf1102a1a558c00ac93876cfb870cc50677 100644 (file)
@@ -13,7 +13,7 @@ from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines import ipar
 from htsworkflow.pipelines import runfolder
 from htsworkflow.pipelines import srf
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
@@ -268,21 +268,21 @@ class RunfolderTests(TestCase):
         # do we get the flowcell id from the filename?
         self.failUnlessEqual(len(runs), 1)
         name = 'run_%s_%s.xml' % ( FCID, date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         # do we get the flowcell id from the FlowcellId.xml file
         make_flowcell_id(self.runfolder_dir, FCID)
         runs = runfolder.get_runs(self.runfolder_dir)
         self.failUnlessEqual(len(runs), 1)
         name = 'run_%s_%s.xml' % ( FCID, date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         r1 = runs[0]
         xml = r1.get_elements()
         xml_str = ElementTree.tostring(xml)
 
         r2 = runfolder.PipelineRun(xml=xml)
-        self.failUnlessEqual(r1.name, r2.name)
+        self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
         self.failIfEqual(r2.image_analysis, None)
         self.failIfEqual(r2.bustard, None)
         self.failIfEqual(r2.gerald, None)
index 1e956326fa5cebc5f515934fa623a5661f3db5fe..34a9c8a04e8ce85dfd97f4b46d77b02ee9256fd9 100644 (file)
@@ -10,7 +10,7 @@ from htsworkflow.pipelines import ipar
 from htsworkflow.pipelines import bustard
 from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines import runfolder
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
@@ -278,21 +278,21 @@ class RunfolderTests(TestCase):
         # do we get the flowcell id from the filename?
         self.failUnlessEqual(len(runs), 1)
         name = 'run_207BTAAXX_%s.xml' % ( date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         # do we get the flowcell id from the FlowcellId.xml file
         make_flowcell_id(self.runfolder_dir, '207BTAAXY')
         runs = runfolder.get_runs(self.runfolder_dir)
         self.failUnlessEqual(len(runs), 1)
         name = 'run_207BTAAXY_%s.xml' % ( date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         r1 = runs[0]
         xml = r1.get_elements()
         xml_str = ElementTree.tostring(xml)
 
         r2 = runfolder.PipelineRun(xml=xml)
-        self.failUnlessEqual(r1.name, r2.name)
+        self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
         self.failIfEqual(r2.image_analysis, None)
         self.failIfEqual(r2.bustard, None)
         self.failIfEqual(r2.gerald, None)
index 07c87784fadd9062c977843566511accbf0f8a57..c23ed9730808416484500e37c866a29e984e9ce0 100644 (file)
@@ -11,7 +11,7 @@ from htsworkflow.pipelines import ipar
 from htsworkflow.pipelines import bustard
 from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines import runfolder
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
@@ -316,21 +316,21 @@ class RunfolderTests(TestCase):
         # do we get the flowcell id from the filename?
         self.failUnlessEqual(len(runs), 1)
         name = 'run_3021JAAXX_%s.xml' % ( date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         # do we get the flowcell id from the FlowcellId.xml file
         make_flowcell_id(self.runfolder_dir, '207BTAAXY')
         runs = runfolder.get_runs(self.runfolder_dir)
         self.failUnlessEqual(len(runs), 1)
         name = 'run_207BTAAXY_%s.xml' % ( date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         r1 = runs[0]
         xml = r1.get_elements()
         xml_str = ElementTree.tostring(xml)
 
         r2 = runfolder.PipelineRun(xml=xml)
-        self.failUnlessEqual(r1.name, r2.name)
+        self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
         self.failIfEqual(r2.image_analysis, None)
         self.failIfEqual(r2.bustard, None)
         self.failIfEqual(r2.gerald, None)
index b159c5fd9d953e5eff625a996b84ca9b7d9fa6f3..dce07884228bdb154058af15e838a5926bca63d7 100644 (file)
@@ -11,7 +11,7 @@ from htsworkflow.pipelines import bustard
 from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines.eland import SampleKey
 from htsworkflow.pipelines import runfolder
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
@@ -304,21 +304,21 @@ class RunfolderTests(TestCase):
         self.failUnlessEqual(len(runs), 1)
         # firecrest's date depends on filename not the create time.
         name = 'run_207BTAAXX_2009-02-22.xml'
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         # do we get the flowcell id from the FlowcellId.xml file
         make_flowcell_id(self.runfolder_dir, '207BTAAXY')
         runs = runfolder.get_runs(self.runfolder_dir)
         self.failUnlessEqual(len(runs), 1)
         name = 'run_207BTAAXY_2009-02-22.xml'
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         r1 = runs[0]
         xml = r1.get_elements()
         xml_str = ElementTree.tostring(xml)
 
         r2 = runfolder.PipelineRun(xml=xml)
-        self.failUnlessEqual(r1.name, r2.name)
+        self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
         self.failIfEqual(r2.image_analysis, None)
         self.failIfEqual(r2.bustard, None)
         self.failIfEqual(r2.gerald, None)
index 2c82041f4e18e37956661b08f11e2afaf2d3a808..497fe7b33181cd6896fb0cac05c0ce19d79c0b24 100644 (file)
@@ -12,7 +12,7 @@ from htsworkflow.pipelines import bustard
 from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines import runfolder
 from htsworkflow.pipelines.samplekey import SampleKey
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
@@ -278,21 +278,21 @@ class RunfolderTests(TestCase):
         # do we get the flowcell id from the filename?
         self.failUnlessEqual(len(runs), 1)
         name = 'run_4286GAAXX_%s.xml' % ( date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         # do we get the flowcell id from the FlowcellId.xml file
         make_flowcell_id(self.runfolder_dir, '207BTAAXY')
         runs = runfolder.get_runs(self.runfolder_dir)
         self.failUnlessEqual(len(runs), 1)
         name = 'run_207BTAAXY_%s.xml' % ( date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         r1 = runs[0]
         xml = r1.get_elements()
         xml_str = ElementTree.tostring(xml)
 
         r2 = runfolder.PipelineRun(xml=xml)
-        self.failUnlessEqual(r1.name, r2.name)
+        self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
         self.failIfEqual(r2.image_analysis, None)
         self.failIfEqual(r2.bustard, None)
         self.failIfEqual(r2.gerald, None)
index 9d4879a47f498c70c850bf90cf1332933ffd790a..8d8f2f104744672fc5ceba1ec3379f970dc093ac 100644 (file)
@@ -12,7 +12,7 @@ from htsworkflow.pipelines import bustard
 from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines import runfolder
 from htsworkflow.pipelines.samplekey import SampleKey
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
@@ -249,14 +249,14 @@ class RunfolderTests(TestCase):
         # do we get the flowcell id from the filename?
         self.failUnlessEqual(len(runs), 1)
         name = 'run_4286GAAXX_%s.xml' % ( date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         # do we get the flowcell id from the FlowcellId.xml file
         make_flowcell_id(self.runfolder_dir, '207BTAAXY')
         runs = runfolder.get_runs(self.runfolder_dir)
         self.failUnlessEqual(len(runs), 1)
         name = 'run_207BTAAXY_%s.xml' % ( date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         bustard_dir = os.path.join(self.runfolder_dir, 'Data',
                                    'Intensities', 'BaseCalls')
@@ -269,7 +269,7 @@ class RunfolderTests(TestCase):
                              '090220_HWI-EAS229_0093_30VR0AAXX')
 
         r2 = runfolder.PipelineRun(xml=xml)
-        self.failUnlessEqual(r1.name, r2.name)
+        self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
         self.failIfEqual(r2.image_analysis, None)
         self.failIfEqual(r2.bustard, None)
         self.failIfEqual(r2.gerald, None)
index eacae8c7f0e9db4aca1ee2573c391aba5a501f3e..2817328dc3a0fdcc415cc17e0c1adda725bebc37 100644 (file)
@@ -12,7 +12,7 @@ from htsworkflow.pipelines import bustard
 from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines import runfolder
 from htsworkflow.pipelines.samplekey import SampleKey
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
@@ -271,7 +271,8 @@ class RunfolderTests(TestCase):
         runs = runfolder.get_runs(self.runfolder_dir)
         self.failUnlessEqual(len(runs), 1)
         name = 'run_207BTAAXY_%s.xml' % ( date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
+
 
 
         r1 = runs[0]
@@ -279,7 +280,7 @@ class RunfolderTests(TestCase):
         xml_str = ElementTree.tostring(xml)
 
         r2 = runfolder.PipelineRun(xml=xml)
-        self.failUnlessEqual(r1.name, r2.name)
+        self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
         self.failIfEqual(r2.image_analysis, None)
         self.failIfEqual(r2.bustard, None)
         self.failIfEqual(r2.gerald, None)
index 6933157d5691917bb9bf7a1ff7be3993535b2453..8396bd0351265fe940a4b75dff3c421345272f56 100644 (file)
@@ -13,7 +13,7 @@ from htsworkflow.pipelines import ipar
 from htsworkflow.pipelines import bustard
 from htsworkflow.pipelines import gerald
 from htsworkflow.pipelines import runfolder
-from htsworkflow.pipelines.runfolder import ElementTree
+from htsworkflow.pipelines import ElementTree
 
 from htsworkflow.pipelines.test.simulate_runfolder import *
 
@@ -46,6 +46,7 @@ def make_runfolder(obj=None):
     os.mkdir(unaligned_dir)
     make_unaligned_fastqs_1_12(unaligned_dir, flowcell_id)
     make_unaligned_config_1_12(unaligned_dir)
+    make_unaligned_status_1_12(unaligned_dir, flowcell_id)
 
     aligned_dir = os.path.join(runfolder_dir, "Aligned")
     os.mkdir(aligned_dir)
@@ -256,7 +257,7 @@ class RunfolderTests(TestCase):
         self.assertEqual(runs[0].flowcell_id, self.flowcell_id)
         name = 'run_%s_%s.xml' % ( self.flowcell_id,
                                    date.today().strftime('%Y-%m-%d'),)
-        self.failUnlessEqual(runs[0].name, name)
+        self.failUnlessEqual(runs[0].serialization_filename, name)
 
         bustard_dir = os.path.join(self.runfolder_dir, 'Unaligned')
         r1 = runs[0]
@@ -268,7 +269,7 @@ class RunfolderTests(TestCase):
         xml_str = ElementTree.tostring(xml)
 
         r2 = runfolder.PipelineRun(xml=xml)
-        self.failUnlessEqual(r1.name, r2.name)
+        self.failUnlessEqual(r1.serialization_filename, r2.serialization_filename)
         self.failIfEqual(r2.image_analysis, None)
         self.failIfEqual(r2.bustard, None)
         self.failIfEqual(r2.gerald, None)
diff --git a/htsworkflow/pipelines/test/test_runfolder_utils.py b/htsworkflow/pipelines/test/test_runfolder_utils.py
new file mode 100644 (file)
index 0000000..e7c0c38
--- /dev/null
@@ -0,0 +1,36 @@
+from unittest2 import TestCase, TestSuite, defaultTestLoader
+
+from htsworkflow.pipelines import runfolder
+class TestRunfolderUtilities(TestCase):
+    """Some functions can be tested independently of the runfolder version.
+    """
+    def test_match_aligned_unaligned_abspath(self):
+        aligned = ['/a/b/c/Aligned', '/a/b/c/Aligned1234', '/a/b/c/Aligned_3mm']
+        unaligned = ['/a/b/c/Unaligned', '/a/b/c/Unaligned_3mm', '/a/b/c/Unaligned_6index']
+
+        matches = set(runfolder.hiseq_match_aligned_unaligned(aligned, unaligned))
+        self.assertEqual(len(matches), 4)
+        self.assertTrue(('/a/b/c/Aligned', '/a/b/c/Unaligned', '') in matches )
+        self.assertTrue(('/a/b/c/Aligned1234', None, '1234') in matches )
+        self.assertTrue(('/a/b/c/Aligned_3mm', '/a/b/c/Unaligned_3mm', '_3mm') in matches )
+        self.assertTrue((None, '/a/b/c/Unaligned_6index', '_6index') in matches )
+
+    def test_match_aligned_unaligned_relpath(self):
+        aligned = ['./Aligned', './Aligned1234', './Aligned_3mm']
+        unaligned = ['./Unaligned', './Unaligned_3mm', './Unaligned_6index']
+
+        matches = set(runfolder.hiseq_match_aligned_unaligned(aligned, unaligned))
+        self.assertEqual(len(matches), 4)
+        self.assertTrue(('./Aligned', './Unaligned', '') in matches )
+        self.assertTrue(('./Aligned1234', None, '1234') in matches )
+        self.assertTrue(('./Aligned_3mm', './Unaligned_3mm', '_3mm') in matches )
+        self.assertTrue((None, './Unaligned_6index', '_6index') in matches )
+
+def suite():
+    suite = TestSuite()
+    suite.addTests(defaultTestLoader.loadTestsFromTestCase(RunfolderTests))
+    return suite
+
+if __name__ == "__main__":
+    from unittest2 import main
+    main(defaultTest="suite")
diff --git a/htsworkflow/pipelines/test/testdata/1_12/basecall_stats/All.htm b/htsworkflow/pipelines/test/testdata/1_12/basecall_stats/All.htm
new file mode 100644 (file)
index 0000000..3e3efa6
--- /dev/null
@@ -0,0 +1,496 @@
+<html>
+<head>
+<!--RUN_TIME 2012-04-11 13:12:39-->
+<!--SOFTWARE_VERSION CASAVA-1.8.2-->
+</head>
+<title>120406_SN787_0114_AD0PMDACXX All</title><h1 align="center">120406_SN787_0114_AD0PMDACXX All</h1>
+<body>
+<a href="All.htm">Full output (Warning: may overload your browser!)</a><br><br>
+<table border="1" cellpadding="5"><tr><td><b>Tile</b></td>
+<td><b>Lane 1</b></td>
+<td><b>Lane 2</b></td>
+<td><b>Lane 3</b></td>
+</tr>
+<tr><td><b>1101</b></td>
+<td><a href="Plots/s_1_1101_all.png">Plots/s_1_1101_all.png</a></td>
+<td><a href="Plots/s_2_1101_all.png">Plots/s_2_1101_all.png</a></td>
+<td><a href="Plots/s_3_1101_all.png">Plots/s_3_1101_all.png</a></td>
+</tr>
+<tr><td><b>1102</b></td>
+<td><a href="Plots/s_1_1102_all.png">Plots/s_1_1102_all.png</a></td>
+<td><a href="Plots/s_2_1102_all.png">Plots/s_2_1102_all.png</a></td>
+<td><a href="Plots/s_3_1102_all.png">Plots/s_3_1102_all.png</a></td>
+</tr>
+<tr><td><b>1103</b></td>
+<td><a href="Plots/s_1_1103_all.png">Plots/s_1_1103_all.png</a></td>
+<td><a href="Plots/s_2_1103_all.png">Plots/s_2_1103_all.png</a></td>
+<td><a href="Plots/s_3_1103_all.png">Plots/s_3_1103_all.png</a></td>
+</tr>
+<tr><td><b>1104</b></td>
+<td><a href="Plots/s_1_1104_all.png">Plots/s_1_1104_all.png</a></td>
+<td><a href="Plots/s_2_1104_all.png">Plots/s_2_1104_all.png</a></td>
+<td><a href="Plots/s_3_1104_all.png">Plots/s_3_1104_all.png</a></td>
+</tr>
+<tr><td><b>1105</b></td>
+<td><a href="Plots/s_1_1105_all.png"> <img height=84 width=84 src="Plots/s_1_1105_all.png"></a></td>
+<td><a href="Plots/s_2_1105_all.png"> <img height=84 width=84 src="Plots/s_2_1105_all.png"></a></td>
+<td><a href="Plots/s_3_1105_all.png"> <img height=84 width=84 src="Plots/s_3_1105_all.png"></a></td>
+</tr>
+<tr><td><b>1106</b></td>
+<td><a href="Plots/s_1_1106_all.png">Plots/s_1_1106_all.png</a></td>
+<td><a href="Plots/s_2_1106_all.png">Plots/s_2_1106_all.png</a></td>
+<td><a href="Plots/s_3_1106_all.png">Plots/s_3_1106_all.png</a></td>
+</tr>
+<tr><td><b>1107</b></td>
+<td><a href="Plots/s_1_1107_all.png">Plots/s_1_1107_all.png</a></td>
+<td><a href="Plots/s_2_1107_all.png">Plots/s_2_1107_all.png</a></td>
+<td><a href="Plots/s_3_1107_all.png">Plots/s_3_1107_all.png</a></td>
+</tr>
+<tr><td><b>1108</b></td>
+<td><a href="Plots/s_1_1108_all.png">Plots/s_1_1108_all.png</a></td>
+<td><a href="Plots/s_2_1108_all.png">Plots/s_2_1108_all.png</a></td>
+<td><a href="Plots/s_3_1108_all.png">Plots/s_3_1108_all.png</a></td>
+</tr>
+<tr><td><b>1109</b></td>
+<td><a href="Plots/s_1_1109_all.png">Plots/s_1_1109_all.png</a></td>
+<td><a href="Plots/s_2_1109_all.png">Plots/s_2_1109_all.png</a></td>
+<td><a href="Plots/s_3_1109_all.png">Plots/s_3_1109_all.png</a></td>
+</tr>
+<tr><td><b>1110</b></td>
+<td><a href="Plots/s_1_1110_all.png"> <img height=84 width=84 src="Plots/s_1_1110_all.png"></a></td>
+<td><a href="Plots/s_2_1110_all.png"> <img height=84 width=84 src="Plots/s_2_1110_all.png"></a></td>
+<td><a href="Plots/s_3_1110_all.png"> <img height=84 width=84 src="Plots/s_3_1110_all.png"></a></td>
+</tr>
+<tr><td><b>1111</b></td>
+<td><a href="Plots/s_1_1111_all.png">Plots/s_1_1111_all.png</a></td>
+<td><a href="Plots/s_2_1111_all.png">Plots/s_2_1111_all.png</a></td>
+<td><a href="Plots/s_3_1111_all.png">Plots/s_3_1111_all.png</a></td>
+</tr>
+<tr><td><b>1112</b></td>
+<td><a href="Plots/s_1_1112_all.png">Plots/s_1_1112_all.png</a></td>
+<td><a href="Plots/s_2_1112_all.png">Plots/s_2_1112_all.png</a></td>
+<td><a href="Plots/s_3_1112_all.png">Plots/s_3_1112_all.png</a></td>
+</tr>
+<tr><td><b>1113</b></td>
+<td><a href="Plots/s_1_1113_all.png">Plots/s_1_1113_all.png</a></td>
+<td><a href="Plots/s_2_1113_all.png">Plots/s_2_1113_all.png</a></td>
+<td><a href="Plots/s_3_1113_all.png">Plots/s_3_1113_all.png</a></td>
+</tr>
+<tr><td><b>1114</b></td>
+<td><a href="Plots/s_1_1114_all.png">Plots/s_1_1114_all.png</a></td>
+<td><a href="Plots/s_2_1114_all.png">Plots/s_2_1114_all.png</a></td>
+<td><a href="Plots/s_3_1114_all.png">Plots/s_3_1114_all.png</a></td>
+</tr>
+<tr><td><b>1115</b></td>
+<td><a href="Plots/s_1_1115_all.png"> <img height=84 width=84 src="Plots/s_1_1115_all.png"></a></td>
+<td><a href="Plots/s_2_1115_all.png"> <img height=84 width=84 src="Plots/s_2_1115_all.png"></a></td>
+<td><a href="Plots/s_3_1115_all.png"> <img height=84 width=84 src="Plots/s_3_1115_all.png"></a></td>
+</tr>
+<tr><td><b>1116</b></td>
+<td><a href="Plots/s_1_1116_all.png">Plots/s_1_1116_all.png</a></td>
+<td><a href="Plots/s_2_1116_all.png">Plots/s_2_1116_all.png</a></td>
+<td><a href="Plots/s_3_1116_all.png">Plots/s_3_1116_all.png</a></td>
+</tr>
+<tr><td><b>1201</b></td>
+<td><a href="Plots/s_1_1201_all.png">Plots/s_1_1201_all.png</a></td>
+<td><a href="Plots/s_2_1201_all.png">Plots/s_2_1201_all.png</a></td>
+<td><a href="Plots/s_3_1201_all.png">Plots/s_3_1201_all.png</a></td>
+</tr>
+<tr><td><b>1202</b></td>
+<td><a href="Plots/s_1_1202_all.png">Plots/s_1_1202_all.png</a></td>
+<td><a href="Plots/s_2_1202_all.png">Plots/s_2_1202_all.png</a></td>
+<td><a href="Plots/s_3_1202_all.png">Plots/s_3_1202_all.png</a></td>
+</tr>
+<tr><td><b>1203</b></td>
+<td><a href="Plots/s_1_1203_all.png">Plots/s_1_1203_all.png</a></td>
+<td><a href="Plots/s_2_1203_all.png">Plots/s_2_1203_all.png</a></td>
+<td><a href="Plots/s_3_1203_all.png">Plots/s_3_1203_all.png</a></td>
+</tr>
+<tr><td><b>1204</b></td>
+<td><a href="Plots/s_1_1204_all.png"> <img height=84 width=84 src="Plots/s_1_1204_all.png"></a></td>
+<td><a href="Plots/s_2_1204_all.png"> <img height=84 width=84 src="Plots/s_2_1204_all.png"></a></td>
+<td><a href="Plots/s_3_1204_all.png"> <img height=84 width=84 src="Plots/s_3_1204_all.png"></a></td>
+</tr>
+<tr><td><b>1205</b></td>
+<td><a href="Plots/s_1_1205_all.png">Plots/s_1_1205_all.png</a></td>
+<td><a href="Plots/s_2_1205_all.png">Plots/s_2_1205_all.png</a></td>
+<td><a href="Plots/s_3_1205_all.png">Plots/s_3_1205_all.png</a></td>
+</tr>
+<tr><td><b>1206</b></td>
+<td><a href="Plots/s_1_1206_all.png">Plots/s_1_1206_all.png</a></td>
+<td><a href="Plots/s_2_1206_all.png">Plots/s_2_1206_all.png</a></td>
+<td><a href="Plots/s_3_1206_all.png">Plots/s_3_1206_all.png</a></td>
+</tr>
+<tr><td><b>1207</b></td>
+<td><a href="Plots/s_1_1207_all.png">Plots/s_1_1207_all.png</a></td>
+<td><a href="Plots/s_2_1207_all.png">Plots/s_2_1207_all.png</a></td>
+<td><a href="Plots/s_3_1207_all.png">Plots/s_3_1207_all.png</a></td>
+</tr>
+<tr><td><b>1208</b></td>
+<td><a href="Plots/s_1_1208_all.png">Plots/s_1_1208_all.png</a></td>
+<td><a href="Plots/s_2_1208_all.png">Plots/s_2_1208_all.png</a></td>
+<td><a href="Plots/s_3_1208_all.png">Plots/s_3_1208_all.png</a></td>
+</tr>
+<tr><td><b>1209</b></td>
+<td><a href="Plots/s_1_1209_all.png"> <img height=84 width=84 src="Plots/s_1_1209_all.png"></a></td>
+<td><a href="Plots/s_2_1209_all.png"> <img height=84 width=84 src="Plots/s_2_1209_all.png"></a></td>
+<td><a href="Plots/s_3_1209_all.png"> <img height=84 width=84 src="Plots/s_3_1209_all.png"></a></td>
+</tr>
+<tr><td><b>1210</b></td>
+<td><a href="Plots/s_1_1210_all.png">Plots/s_1_1210_all.png</a></td>
+<td><a href="Plots/s_2_1210_all.png">Plots/s_2_1210_all.png</a></td>
+<td><a href="Plots/s_3_1210_all.png">Plots/s_3_1210_all.png</a></td>
+</tr>
+<tr><td><b>1211</b></td>
+<td><a href="Plots/s_1_1211_all.png">Plots/s_1_1211_all.png</a></td>
+<td><a href="Plots/s_2_1211_all.png">Plots/s_2_1211_all.png</a></td>
+<td><a href="Plots/s_3_1211_all.png">Plots/s_3_1211_all.png</a></td>
+</tr>
+<tr><td><b>1212</b></td>
+<td><a href="Plots/s_1_1212_all.png">Plots/s_1_1212_all.png</a></td>
+<td><a href="Plots/s_2_1212_all.png">Plots/s_2_1212_all.png</a></td>
+<td><a href="Plots/s_3_1212_all.png">Plots/s_3_1212_all.png</a></td>
+</tr>
+<tr><td><b>1213</b></td>
+<td><a href="Plots/s_1_1213_all.png">Plots/s_1_1213_all.png</a></td>
+<td><a href="Plots/s_2_1213_all.png">Plots/s_2_1213_all.png</a></td>
+<td><a href="Plots/s_3_1213_all.png">Plots/s_3_1213_all.png</a></td>
+</tr>
+<tr><td><b>1214</b></td>
+<td><a href="Plots/s_1_1214_all.png"> <img height=84 width=84 src="Plots/s_1_1214_all.png"></a></td>
+<td><a href="Plots/s_2_1214_all.png"> <img height=84 width=84 src="Plots/s_2_1214_all.png"></a></td>
+<td><a href="Plots/s_3_1214_all.png"> <img height=84 width=84 src="Plots/s_3_1214_all.png"></a></td>
+</tr>
+<tr><td><b>1215</b></td>
+<td><a href="Plots/s_1_1215_all.png">Plots/s_1_1215_all.png</a></td>
+<td><a href="Plots/s_2_1215_all.png">Plots/s_2_1215_all.png</a></td>
+<td><a href="Plots/s_3_1215_all.png">Plots/s_3_1215_all.png</a></td>
+</tr>
+<tr><td><b>1216</b></td>
+<td><a href="Plots/s_1_1216_all.png">Plots/s_1_1216_all.png</a></td>
+<td><a href="Plots/s_2_1216_all.png">Plots/s_2_1216_all.png</a></td>
+<td><a href="Plots/s_3_1216_all.png">Plots/s_3_1216_all.png</a></td>
+</tr>
+<tr><td><b>1301</b></td>
+<td><a href="Plots/s_1_1301_all.png">Plots/s_1_1301_all.png</a></td>
+<td><a href="Plots/s_2_1301_all.png">Plots/s_2_1301_all.png</a></td>
+<td><a href="Plots/s_3_1301_all.png">Plots/s_3_1301_all.png</a></td>
+</tr>
+<tr><td><b>1302</b></td>
+<td><a href="Plots/s_1_1302_all.png">Plots/s_1_1302_all.png</a></td>
+<td><a href="Plots/s_2_1302_all.png">Plots/s_2_1302_all.png</a></td>
+<td><a href="Plots/s_3_1302_all.png">Plots/s_3_1302_all.png</a></td>
+</tr>
+<tr><td><b>1303</b></td>
+<td><a href="Plots/s_1_1303_all.png"> <img height=84 width=84 src="Plots/s_1_1303_all.png"></a></td>
+<td><a href="Plots/s_2_1303_all.png"> <img height=84 width=84 src="Plots/s_2_1303_all.png"></a></td>
+<td><a href="Plots/s_3_1303_all.png"> <img height=84 width=84 src="Plots/s_3_1303_all.png"></a></td>
+</tr>
+<tr><td><b>1304</b></td>
+<td><a href="Plots/s_1_1304_all.png">Plots/s_1_1304_all.png</a></td>
+<td><a href="Plots/s_2_1304_all.png">Plots/s_2_1304_all.png</a></td>
+<td><a href="Plots/s_3_1304_all.png">Plots/s_3_1304_all.png</a></td>
+</tr>
+<tr><td><b>1305</b></td>
+<td><a href="Plots/s_1_1305_all.png">Plots/s_1_1305_all.png</a></td>
+<td><a href="Plots/s_2_1305_all.png">Plots/s_2_1305_all.png</a></td>
+<td><a href="Plots/s_3_1305_all.png">Plots/s_3_1305_all.png</a></td>
+</tr>
+<tr><td><b>1306</b></td>
+<td><a href="Plots/s_1_1306_all.png">Plots/s_1_1306_all.png</a></td>
+<td><a href="Plots/s_2_1306_all.png">Plots/s_2_1306_all.png</a></td>
+<td><a href="Plots/s_3_1306_all.png">Plots/s_3_1306_all.png</a></td>
+</tr>
+<tr><td><b>1307</b></td>
+<td><a href="Plots/s_1_1307_all.png">Plots/s_1_1307_all.png</a></td>
+<td><a href="Plots/s_2_1307_all.png">Plots/s_2_1307_all.png</a></td>
+<td><a href="Plots/s_3_1307_all.png">Plots/s_3_1307_all.png</a></td>
+</tr>
+<tr><td><b>1308</b></td>
+<td><a href="Plots/s_1_1308_all.png"> <img height=84 width=84 src="Plots/s_1_1308_all.png"></a></td>
+<td><a href="Plots/s_2_1308_all.png"> <img height=84 width=84 src="Plots/s_2_1308_all.png"></a></td>
+<td><a href="Plots/s_3_1308_all.png"> <img height=84 width=84 src="Plots/s_3_1308_all.png"></a></td>
+</tr>
+<tr><td><b>1309</b></td>
+<td><a href="Plots/s_1_1309_all.png">Plots/s_1_1309_all.png</a></td>
+<td><a href="Plots/s_2_1309_all.png">Plots/s_2_1309_all.png</a></td>
+<td><a href="Plots/s_3_1309_all.png">Plots/s_3_1309_all.png</a></td>
+</tr>
+<tr><td><b>1310</b></td>
+<td><a href="Plots/s_1_1310_all.png">Plots/s_1_1310_all.png</a></td>
+<td><a href="Plots/s_2_1310_all.png">Plots/s_2_1310_all.png</a></td>
+<td><a href="Plots/s_3_1310_all.png">Plots/s_3_1310_all.png</a></td>
+</tr>
+<tr><td><b>1311</b></td>
+<td><a href="Plots/s_1_1311_all.png">Plots/s_1_1311_all.png</a></td>
+<td><a href="Plots/s_2_1311_all.png">Plots/s_2_1311_all.png</a></td>
+<td><a href="Plots/s_3_1311_all.png">Plots/s_3_1311_all.png</a></td>
+</tr>
+<tr><td><b>1312</b></td>
+<td><a href="Plots/s_1_1312_all.png">Plots/s_1_1312_all.png</a></td>
+<td><a href="Plots/s_2_1312_all.png">Plots/s_2_1312_all.png</a></td>
+<td><a href="Plots/s_3_1312_all.png">Plots/s_3_1312_all.png</a></td>
+</tr>
+<tr><td><b>1313</b></td>
+<td><a href="Plots/s_1_1313_all.png"> <img height=84 width=84 src="Plots/s_1_1313_all.png"></a></td>
+<td><a href="Plots/s_2_1313_all.png"> <img height=84 width=84 src="Plots/s_2_1313_all.png"></a></td>
+<td><a href="Plots/s_3_1313_all.png"> <img height=84 width=84 src="Plots/s_3_1313_all.png"></a></td>
+</tr>
+<tr><td><b>1314</b></td>
+<td><a href="Plots/s_1_1314_all.png">Plots/s_1_1314_all.png</a></td>
+<td><a href="Plots/s_2_1314_all.png">Plots/s_2_1314_all.png</a></td>
+<td><a href="Plots/s_3_1314_all.png">Plots/s_3_1314_all.png</a></td>
+</tr>
+<tr><td><b>1315</b></td>
+<td><a href="Plots/s_1_1315_all.png">Plots/s_1_1315_all.png</a></td>
+<td><a href="Plots/s_2_1315_all.png">Plots/s_2_1315_all.png</a></td>
+<td><a href="Plots/s_3_1315_all.png">Plots/s_3_1315_all.png</a></td>
+</tr>
+<tr><td><b>1316</b></td>
+<td><a href="Plots/s_1_1316_all.png">Plots/s_1_1316_all.png</a></td>
+<td><a href="Plots/s_2_1316_all.png">Plots/s_2_1316_all.png</a></td>
+<td><a href="Plots/s_3_1316_all.png">Plots/s_3_1316_all.png</a></td>
+</tr>
+<tr><td><b>2101</b></td>
+<td><a href="Plots/s_1_2101_all.png">Plots/s_1_2101_all.png</a></td>
+<td><a href="Plots/s_2_2101_all.png">Plots/s_2_2101_all.png</a></td>
+<td><a href="Plots/s_3_2101_all.png">Plots/s_3_2101_all.png</a></td>
+</tr>
+<tr><td><b>2102</b></td>
+<td><a href="Plots/s_1_2102_all.png"> <img height=84 width=84 src="Plots/s_1_2102_all.png"></a></td>
+<td><a href="Plots/s_2_2102_all.png"> <img height=84 width=84 src="Plots/s_2_2102_all.png"></a></td>
+<td><a href="Plots/s_3_2102_all.png"> <img height=84 width=84 src="Plots/s_3_2102_all.png"></a></td>
+</tr>
+<tr><td><b>2103</b></td>
+<td><a href="Plots/s_1_2103_all.png">Plots/s_1_2103_all.png</a></td>
+<td><a href="Plots/s_2_2103_all.png">Plots/s_2_2103_all.png</a></td>
+<td><a href="Plots/s_3_2103_all.png">Plots/s_3_2103_all.png</a></td>
+</tr>
+<tr><td><b>2104</b></td>
+<td><a href="Plots/s_1_2104_all.png">Plots/s_1_2104_all.png</a></td>
+<td><a href="Plots/s_2_2104_all.png">Plots/s_2_2104_all.png</a></td>
+<td><a href="Plots/s_3_2104_all.png">Plots/s_3_2104_all.png</a></td>
+</tr>
+<tr><td><b>2105</b></td>
+<td><a href="Plots/s_1_2105_all.png">Plots/s_1_2105_all.png</a></td>
+<td><a href="Plots/s_2_2105_all.png">Plots/s_2_2105_all.png</a></td>
+<td><a href="Plots/s_3_2105_all.png">Plots/s_3_2105_all.png</a></td>
+</tr>
+<tr><td><b>2106</b></td>
+<td><a href="Plots/s_1_2106_all.png">Plots/s_1_2106_all.png</a></td>
+<td><a href="Plots/s_2_2106_all.png">Plots/s_2_2106_all.png</a></td>
+<td><a href="Plots/s_3_2106_all.png">Plots/s_3_2106_all.png</a></td>
+</tr>
+<tr><td><b>2107</b></td>
+<td><a href="Plots/s_1_2107_all.png"> <img height=84 width=84 src="Plots/s_1_2107_all.png"></a></td>
+<td><a href="Plots/s_2_2107_all.png"> <img height=84 width=84 src="Plots/s_2_2107_all.png"></a></td>
+<td><a href="Plots/s_3_2107_all.png"> <img height=84 width=84 src="Plots/s_3_2107_all.png"></a></td>
+</tr>
+<tr><td><b>2108</b></td>
+<td><a href="Plots/s_1_2108_all.png">Plots/s_1_2108_all.png</a></td>
+<td><a href="Plots/s_2_2108_all.png">Plots/s_2_2108_all.png</a></td>
+<td><a href="Plots/s_3_2108_all.png">Plots/s_3_2108_all.png</a></td>
+</tr>
+<tr><td><b>2109</b></td>
+<td><a href="Plots/s_1_2109_all.png">Plots/s_1_2109_all.png</a></td>
+<td><a href="Plots/s_2_2109_all.png">Plots/s_2_2109_all.png</a></td>
+<td><a href="Plots/s_3_2109_all.png">Plots/s_3_2109_all.png</a></td>
+</tr>
+<tr><td><b>2110</b></td>
+<td><a href="Plots/s_1_2110_all.png">Plots/s_1_2110_all.png</a></td>
+<td><a href="Plots/s_2_2110_all.png">Plots/s_2_2110_all.png</a></td>
+<td><a href="Plots/s_3_2110_all.png">Plots/s_3_2110_all.png</a></td>
+</tr>
+<tr><td><b>2111</b></td>
+<td><a href="Plots/s_1_2111_all.png">Plots/s_1_2111_all.png</a></td>
+<td><a href="Plots/s_2_2111_all.png">Plots/s_2_2111_all.png</a></td>
+<td><a href="Plots/s_3_2111_all.png">Plots/s_3_2111_all.png</a></td>
+</tr>
+<tr><td><b>2112</b></td>
+<td><a href="Plots/s_1_2112_all.png"> <img height=84 width=84 src="Plots/s_1_2112_all.png"></a></td>
+<td><a href="Plots/s_2_2112_all.png"> <img height=84 width=84 src="Plots/s_2_2112_all.png"></a></td>
+<td><a href="Plots/s_3_2112_all.png"> <img height=84 width=84 src="Plots/s_3_2112_all.png"></a></td>
+</tr>
+<tr><td><b>2113</b></td>
+<td><a href="Plots/s_1_2113_all.png">Plots/s_1_2113_all.png</a></td>
+<td><a href="Plots/s_2_2113_all.png">Plots/s_2_2113_all.png</a></td>
+<td><a href="Plots/s_3_2113_all.png">Plots/s_3_2113_all.png</a></td>
+</tr>
+<tr><td><b>2114</b></td>
+<td><a href="Plots/s_1_2114_all.png">Plots/s_1_2114_all.png</a></td>
+<td><a href="Plots/s_2_2114_all.png">Plots/s_2_2114_all.png</a></td>
+<td><a href="Plots/s_3_2114_all.png">Plots/s_3_2114_all.png</a></td>
+</tr>
+<tr><td><b>2115</b></td>
+<td><a href="Plots/s_1_2115_all.png">Plots/s_1_2115_all.png</a></td>
+<td><a href="Plots/s_2_2115_all.png">Plots/s_2_2115_all.png</a></td>
+<td><a href="Plots/s_3_2115_all.png">Plots/s_3_2115_all.png</a></td>
+</tr>
+<tr><td><b>2116</b></td>
+<td><a href="Plots/s_1_2116_all.png">Plots/s_1_2116_all.png</a></td>
+<td><a href="Plots/s_2_2116_all.png">Plots/s_2_2116_all.png</a></td>
+<td><a href="Plots/s_3_2116_all.png">Plots/s_3_2116_all.png</a></td>
+</tr>
+<tr><td><b>2201</b></td>
+<td><a href="Plots/s_1_2201_all.png"> <img height=84 width=84 src="Plots/s_1_2201_all.png"></a></td>
+<td><a href="Plots/s_2_2201_all.png"> <img height=84 width=84 src="Plots/s_2_2201_all.png"></a></td>
+<td><a href="Plots/s_3_2201_all.png"> <img height=84 width=84 src="Plots/s_3_2201_all.png"></a></td>
+</tr>
+<tr><td><b>2202</b></td>
+<td><a href="Plots/s_1_2202_all.png">Plots/s_1_2202_all.png</a></td>
+<td><a href="Plots/s_2_2202_all.png">Plots/s_2_2202_all.png</a></td>
+<td><a href="Plots/s_3_2202_all.png">Plots/s_3_2202_all.png</a></td>
+</tr>
+<tr><td><b>2203</b></td>
+<td><a href="Plots/s_1_2203_all.png">Plots/s_1_2203_all.png</a></td>
+<td><a href="Plots/s_2_2203_all.png">Plots/s_2_2203_all.png</a></td>
+<td><a href="Plots/s_3_2203_all.png">Plots/s_3_2203_all.png</a></td>
+</tr>
+<tr><td><b>2204</b></td>
+<td><a href="Plots/s_1_2204_all.png">Plots/s_1_2204_all.png</a></td>
+<td><a href="Plots/s_2_2204_all.png">Plots/s_2_2204_all.png</a></td>
+<td><a href="Plots/s_3_2204_all.png">Plots/s_3_2204_all.png</a></td>
+</tr>
+<tr><td><b>2205</b></td>
+<td><a href="Plots/s_1_2205_all.png">Plots/s_1_2205_all.png</a></td>
+<td><a href="Plots/s_2_2205_all.png">Plots/s_2_2205_all.png</a></td>
+<td><a href="Plots/s_3_2205_all.png">Plots/s_3_2205_all.png</a></td>
+</tr>
+<tr><td><b>2206</b></td>
+<td><a href="Plots/s_1_2206_all.png"> <img height=84 width=84 src="Plots/s_1_2206_all.png"></a></td>
+<td><a href="Plots/s_2_2206_all.png"> <img height=84 width=84 src="Plots/s_2_2206_all.png"></a></td>
+<td><a href="Plots/s_3_2206_all.png"> <img height=84 width=84 src="Plots/s_3_2206_all.png"></a></td>
+</tr>
+<tr><td><b>2207</b></td>
+<td><a href="Plots/s_1_2207_all.png">Plots/s_1_2207_all.png</a></td>
+<td><a href="Plots/s_2_2207_all.png">Plots/s_2_2207_all.png</a></td>
+<td><a href="Plots/s_3_2207_all.png">Plots/s_3_2207_all.png</a></td>
+</tr>
+<tr><td><b>2208</b></td>
+<td><a href="Plots/s_1_2208_all.png">Plots/s_1_2208_all.png</a></td>
+<td><a href="Plots/s_2_2208_all.png">Plots/s_2_2208_all.png</a></td>
+<td><a href="Plots/s_3_2208_all.png">Plots/s_3_2208_all.png</a></td>
+</tr>
+<tr><td><b>2209</b></td>
+<td><a href="Plots/s_1_2209_all.png">Plots/s_1_2209_all.png</a></td>
+<td><a href="Plots/s_2_2209_all.png">Plots/s_2_2209_all.png</a></td>
+<td><a href="Plots/s_3_2209_all.png">Plots/s_3_2209_all.png</a></td>
+</tr>
+<tr><td><b>2210</b></td>
+<td><a href="Plots/s_1_2210_all.png">Plots/s_1_2210_all.png</a></td>
+<td><a href="Plots/s_2_2210_all.png">Plots/s_2_2210_all.png</a></td>
+<td><a href="Plots/s_3_2210_all.png">Plots/s_3_2210_all.png</a></td>
+</tr>
+<tr><td><b>2211</b></td>
+<td><a href="Plots/s_1_2211_all.png"> <img height=84 width=84 src="Plots/s_1_2211_all.png"></a></td>
+<td><a href="Plots/s_2_2211_all.png"> <img height=84 width=84 src="Plots/s_2_2211_all.png"></a></td>
+<td><a href="Plots/s_3_2211_all.png"> <img height=84 width=84 src="Plots/s_3_2211_all.png"></a></td>
+</tr>
+<tr><td><b>2212</b></td>
+<td><a href="Plots/s_1_2212_all.png">Plots/s_1_2212_all.png</a></td>
+<td><a href="Plots/s_2_2212_all.png">Plots/s_2_2212_all.png</a></td>
+<td><a href="Plots/s_3_2212_all.png">Plots/s_3_2212_all.png</a></td>
+</tr>
+<tr><td><b>2213</b></td>
+<td><a href="Plots/s_1_2213_all.png">Plots/s_1_2213_all.png</a></td>
+<td><a href="Plots/s_2_2213_all.png">Plots/s_2_2213_all.png</a></td>
+<td><a href="Plots/s_3_2213_all.png">Plots/s_3_2213_all.png</a></td>
+</tr>
+<tr><td><b>2214</b></td>
+<td><a href="Plots/s_1_2214_all.png">Plots/s_1_2214_all.png</a></td>
+<td><a href="Plots/s_2_2214_all.png">Plots/s_2_2214_all.png</a></td>
+<td><a href="Plots/s_3_2214_all.png">Plots/s_3_2214_all.png</a></td>
+</tr>
+<tr><td><b>2215</b></td>
+<td><a href="Plots/s_1_2215_all.png">Plots/s_1_2215_all.png</a></td>
+<td><a href="Plots/s_2_2215_all.png">Plots/s_2_2215_all.png</a></td>
+<td><a href="Plots/s_3_2215_all.png">Plots/s_3_2215_all.png</a></td>
+</tr>
+<tr><td><b>2216</b></td>
+<td><a href="Plots/s_1_2216_all.png"> <img height=84 width=84 src="Plots/s_1_2216_all.png"></a></td>
+<td><a href="Plots/s_2_2216_all.png"> <img height=84 width=84 src="Plots/s_2_2216_all.png"></a></td>
+<td><a href="Plots/s_3_2216_all.png"> <img height=84 width=84 src="Plots/s_3_2216_all.png"></a></td>
+</tr>
+<tr><td><b>2301</b></td>
+<td><a href="Plots/s_1_2301_all.png">Plots/s_1_2301_all.png</a></td>
+<td><a href="Plots/s_2_2301_all.png">Plots/s_2_2301_all.png</a></td>
+<td><a href="Plots/s_3_2301_all.png">Plots/s_3_2301_all.png</a></td>
+</tr>
+<tr><td><b>2302</b></td>
+<td><a href="Plots/s_1_2302_all.png">Plots/s_1_2302_all.png</a></td>
+<td><a href="Plots/s_2_2302_all.png">Plots/s_2_2302_all.png</a></td>
+<td><a href="Plots/s_3_2302_all.png">Plots/s_3_2302_all.png</a></td>
+</tr>
+<tr><td><b>2303</b></td>
+<td><a href="Plots/s_1_2303_all.png">Plots/s_1_2303_all.png</a></td>
+<td><a href="Plots/s_2_2303_all.png">Plots/s_2_2303_all.png</a></td>
+<td><a href="Plots/s_3_2303_all.png">Plots/s_3_2303_all.png</a></td>
+</tr>
+<tr><td><b>2304</b></td>
+<td><a href="Plots/s_1_2304_all.png">Plots/s_1_2304_all.png</a></td>
+<td><a href="Plots/s_2_2304_all.png">Plots/s_2_2304_all.png</a></td>
+<td><a href="Plots/s_3_2304_all.png">Plots/s_3_2304_all.png</a></td>
+</tr>
+<tr><td><b>2305</b></td>
+<td><a href="Plots/s_1_2305_all.png"> <img height=84 width=84 src="Plots/s_1_2305_all.png"></a></td>
+<td><a href="Plots/s_2_2305_all.png"> <img height=84 width=84 src="Plots/s_2_2305_all.png"></a></td>
+<td><a href="Plots/s_3_2305_all.png"> <img height=84 width=84 src="Plots/s_3_2305_all.png"></a></td>
+</tr>
+<tr><td><b>2306</b></td>
+<td><a href="Plots/s_1_2306_all.png">Plots/s_1_2306_all.png</a></td>
+<td><a href="Plots/s_2_2306_all.png">Plots/s_2_2306_all.png</a></td>
+<td><a href="Plots/s_3_2306_all.png">Plots/s_3_2306_all.png</a></td>
+</tr>
+<tr><td><b>2307</b></td>
+<td><a href="Plots/s_1_2307_all.png">Plots/s_1_2307_all.png</a></td>
+<td><a href="Plots/s_2_2307_all.png">Plots/s_2_2307_all.png</a></td>
+<td><a href="Plots/s_3_2307_all.png">Plots/s_3_2307_all.png</a></td>
+</tr>
+<tr><td><b>2308</b></td>
+<td><a href="Plots/s_1_2308_all.png">Plots/s_1_2308_all.png</a></td>
+<td><a href="Plots/s_2_2308_all.png">Plots/s_2_2308_all.png</a></td>
+<td><a href="Plots/s_3_2308_all.png">Plots/s_3_2308_all.png</a></td>
+</tr>
+<tr><td><b>2309</b></td>
+<td><a href="Plots/s_1_2309_all.png">Plots/s_1_2309_all.png</a></td>
+<td><a href="Plots/s_2_2309_all.png">Plots/s_2_2309_all.png</a></td>
+<td><a href="Plots/s_3_2309_all.png">Plots/s_3_2309_all.png</a></td>
+</tr>
+<tr><td><b>2310</b></td>
+<td><a href="Plots/s_1_2310_all.png"> <img height=84 width=84 src="Plots/s_1_2310_all.png"></a></td>
+<td><a href="Plots/s_2_2310_all.png"> <img height=84 width=84 src="Plots/s_2_2310_all.png"></a></td>
+<td><a href="Plots/s_3_2310_all.png"> <img height=84 width=84 src="Plots/s_3_2310_all.png"></a></td>
+</tr>
+<tr><td><b>2311</b></td>
+<td><a href="Plots/s_1_2311_all.png">Plots/s_1_2311_all.png</a></td>
+<td><a href="Plots/s_2_2311_all.png">Plots/s_2_2311_all.png</a></td>
+<td><a href="Plots/s_3_2311_all.png">Plots/s_3_2311_all.png</a></td>
+</tr>
+<tr><td><b>2312</b></td>
+<td><a href="Plots/s_1_2312_all.png">Plots/s_1_2312_all.png</a></td>
+<td><a href="Plots/s_2_2312_all.png">Plots/s_2_2312_all.png</a></td>
+<td><a href="Plots/s_3_2312_all.png">Plots/s_3_2312_all.png</a></td>
+</tr>
+<tr><td><b>2313</b></td>
+<td><a href="Plots/s_1_2313_all.png">Plots/s_1_2313_all.png</a></td>
+<td><a href="Plots/s_2_2313_all.png">Plots/s_2_2313_all.png</a></td>
+<td><a href="Plots/s_3_2313_all.png">Plots/s_3_2313_all.png</a></td>
+</tr>
+<tr><td><b>2314</b></td>
+<td><a href="Plots/s_1_2314_all.png">Plots/s_1_2314_all.png</a></td>
+<td><a href="Plots/s_2_2314_all.png">Plots/s_2_2314_all.png</a></td>
+<td><a href="Plots/s_3_2314_all.png">Plots/s_3_2314_all.png</a></td>
+</tr>
+<tr><td><b>2315</b></td>
+<td><a href="Plots/s_1_2315_all.png"> <img height=84 width=84 src="Plots/s_1_2315_all.png"></a></td>
+<td><a href="Plots/s_2_2315_all.png"> <img height=84 width=84 src="Plots/s_2_2315_all.png"></a></td>
+<td><a href="Plots/s_3_2315_all.png"> <img height=84 width=84 src="Plots/s_3_2315_all.png"></a></td>
+</tr>
+<tr><td><b>2316</b></td>
+<td><a href="Plots/s_1_2316_all.png">Plots/s_1_2316_all.png</a></td>
+<td><a href="Plots/s_2_2316_all.png">Plots/s_2_2316_all.png</a></td>
+<td><a href="Plots/s_3_2316_all.png">Plots/s_3_2316_all.png</a></td>
+</tr>
+</table>
+</body>
+</html>
diff --git a/htsworkflow/pipelines/test/testdata/1_12/basecall_stats/Demultiplex_Stats.htm b/htsworkflow/pipelines/test/testdata/1_12/basecall_stats/Demultiplex_Stats.htm
new file mode 100644 (file)
index 0000000..fc98d6e
--- /dev/null
@@ -0,0 +1,517 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html xmlns:casava="http://www.illumina.com/casava/alignment" xmlns:str="http://exslt.org/strings">
+<link rel="stylesheet" href="css/Reports.css" type="text/css">
+<body>
+<h1>Flowcell: D0PMDACXX</h1>
+<h2>Barcode lane statistics</h2>
+<div ID="ScrollableTableHeaderDiv"><table width="100%">
+<col width="4%">
+<col width="5%">
+<col width="19%">
+<col width="8%">
+<col width="7%">
+<col width="5%">
+<col width="12%">
+<col width="7%">
+<col width="4%">
+<col width="5%">
+<col width="4%">
+<col width="5%">
+<col width="6%">
+<col width="5%">
+<col>
+<tr>
+<th>Lane</th>
+<th>Sample ID</th>
+<th>Sample Ref</th>
+<th>Index</th>
+<th>Description</th>
+<th>Control</th>
+<th>Project</th>
+<th>Yield (Mbases)</th>
+<th>% PF</th>
+<th># Reads</th>
+<th>% of raw clusters per lane</th>
+<th>% Perfect Index Reads</th>
+<th>% One Mismatch Reads (Index)</th>
+<th>% of &gt;= Q30 Bases (PF)</th>
+<th>Mean Quality Score (PF)</th>
+</tr>
+</table></div>
+<div ID="ScrollableTableBodyDiv"><table width="100%">
+<col width="4%">
+<col width="5%">
+<col width="19%">
+<col width="8%">
+<col width="7%">
+<col width="5%">
+<col width="12%">
+<col width="7%">
+<col width="4%">
+<col width="5%">
+<col width="4%">
+<col width="5%">
+<col width="6%">
+<col width="5%">
+<col>
+<tr>
+<td>1</td>
+<td>12812</td>
+<td>Mus_musculus</td>
+<td>TAAGGCGA</td>
+<td>ES 10pg Nxt 701 503</td>
+<td>N</td>
+<td>12812</td>
+<td>1,682</td>
+<td>97.13</td>
+<td>17,320,100</td>
+<td>15.05</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>93.59</td>
+<td>36.23</td>
+</tr>
+<tr>
+<td>1</td>
+<td>12813</td>
+<td>Mus_musculus</td>
+<td>CGTACTAG</td>
+<td>ES 10pg Nxt 702 504</td>
+<td>N</td>
+<td>12813</td>
+<td>2,340</td>
+<td>97.66</td>
+<td>23,962,119</td>
+<td>20.82</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>93.53</td>
+<td>36.16</td>
+</tr>
+<tr>
+<td>1</td>
+<td>12814</td>
+<td>Mus_musculus</td>
+<td>AGGCAGAA</td>
+<td>ES 10pg Nxt 703 503</td>
+<td>N</td>
+<td>12814</td>
+<td>1,612</td>
+<td>97.48</td>
+<td>16,542,272</td>
+<td>14.37</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>93.76</td>
+<td>36.28</td>
+</tr>
+<tr>
+<td>1</td>
+<td>12815</td>
+<td>Mus_musculus</td>
+<td>TCCTGAGC</td>
+<td>ST 1-1 Nxt 704 504</td>
+<td>N</td>
+<td>12815</td>
+<td>1,984</td>
+<td>97.52</td>
+<td>20,342,058</td>
+<td>17.67</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>92.70</td>
+<td>35.86</td>
+</tr>
+<tr>
+<td>1</td>
+<td>12816</td>
+<td>Mus_musculus</td>
+<td>GGACTCCT</td>
+<td>ES 1-1 Nxt 705 503</td>
+<td>N</td>
+<td>12816</td>
+<td>1,358</td>
+<td>96.17</td>
+<td>14,116,368</td>
+<td>12.27</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>92.44</td>
+<td>35.87</td>
+</tr>
+<tr>
+<td>1</td>
+<td>12817</td>
+<td>Mus_musculus</td>
+<td>TAGGCATG</td>
+<td>ES 1-2 Nxt 706 504</td>
+<td>N</td>
+<td>12817</td>
+<td>1,820</td>
+<td>97.45</td>
+<td>18,681,381</td>
+<td>16.23</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>93.84</td>
+<td>36.31</td>
+</tr>
+<tr>
+<td>1</td>
+<td>lane1</td>
+<td>unknown</td>
+<td>Undetermined</td>
+<td>Clusters with unmatched barcodes for lane 1</td>
+<td>N</td>
+<td>Undetermined_indices</td>
+<td>272</td>
+<td>65.77</td>
+<td>4,129,350</td>
+<td>3.59</td>
+<td>0.00</td>
+<td>0.00</td>
+<td>89.42</td>
+<td>35.01</td>
+</tr>
+<tr>
+<td>2</td>
+<td>12754</td>
+<td>Mus_musculus</td>
+<td>AGGCAGAA</td>
+<td>Illumina index_N703_N502_Paired ends_LCMD250_Mm_CastaneusXC57Bl_Purkinje</td>
+<td>N</td>
+<td>12754</td>
+<td>1,585</td>
+<td>97.00</td>
+<td>16,340,810</td>
+<td>12.35</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>93.91</td>
+<td>36.35</td>
+</tr>
+<tr>
+<td>2</td>
+<td>12818</td>
+<td>Homo_sapiens</td>
+<td>CGTACTAG</td>
+<td>Illumina index_N702_N501_Paired ends_GM12878_200_singlecell</td>
+<td>N</td>
+<td>12818</td>
+<td>1,593</td>
+<td>92.36</td>
+<td>17,246,981</td>
+<td>13.03</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>87.55</td>
+<td>34.39</td>
+</tr>
+<tr>
+<td>2</td>
+<td>12820</td>
+<td>Homo_sapiens</td>
+<td>TAGGCATG</td>
+<td>Illumina index_N706_N503_Paired ends_GM12878_208_singlecell</td>
+<td>N</td>
+<td>12820</td>
+<td>2,209</td>
+<td>96.54</td>
+<td>22,878,845</td>
+<td>17.29</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>93.75</td>
+<td>36.26</td>
+</tr>
+<tr>
+<td>2</td>
+<td>12821</td>
+<td>Homo_sapiens</td>
+<td>TAAGGCGA</td>
+<td>Illumina index_N701_N501_Paired ends_GM12878_poolsplit_5_singlecell</td>
+<td>N</td>
+<td>12821</td>
+<td>1,935</td>
+<td>93.00</td>
+<td>20,809,512</td>
+<td>15.73</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>88.88</td>
+<td>34.63</td>
+</tr>
+<tr>
+<td>2</td>
+<td>12822</td>
+<td>Homo_sapiens</td>
+<td>TCCTGAGC</td>
+<td>Illumina index_N704_N503_Paired ends_GM12878_poolsplit_6_singlecell</td>
+<td>N</td>
+<td>12822</td>
+<td>2,985</td>
+<td>96.49</td>
+<td>30,936,670</td>
+<td>23.38</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>92.68</td>
+<td>35.77</td>
+</tr>
+<tr>
+<td>2</td>
+<td>lane2</td>
+<td>unknown</td>
+<td>Undetermined</td>
+<td>Clusters with unmatched barcodes for lane 2</td>
+<td>N</td>
+<td>Undetermined_indices</td>
+<td>479</td>
+<td>19.87</td>
+<td>24,108,302</td>
+<td>18.22</td>
+<td>0.00</td>
+<td>0.00</td>
+<td>81.00</td>
+<td>32.24</td>
+</tr>
+<tr>
+<td>3</td>
+<td>12744</td>
+<td>Mus_musculus</td>
+<td>TAAGGCGA</td>
+<td>Illumina index_N701_N504_Paired ends_LCMD240_Mm_CastaneusXC57Bl_Purkinje</td>
+<td>N</td>
+<td>12744</td>
+<td>1,875</td>
+<td>96.88</td>
+<td>19,349,616</td>
+<td>15.49</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>94.05</td>
+<td>36.49</td>
+</tr>
+<tr>
+<td>3</td>
+<td>12749</td>
+<td>Mus_musculus</td>
+<td>TCCTGAGC</td>
+<td>Illumina index_N704_N502_Paired ends_LCMD245_Mm_CastaneusXC57Bl_Purkinje</td>
+<td>N</td>
+<td>12749</td>
+<td>1,732</td>
+<td>96.89</td>
+<td>17,878,210</td>
+<td>14.31</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>93.98</td>
+<td>36.43</td>
+</tr>
+<tr>
+<td>3</td>
+<td>12819</td>
+<td>Homo_sapiens</td>
+<td>GGACTCCT</td>
+<td>Illumina index_N705_N502_Paired ends_GM12878_205_singlecell</td>
+<td>N</td>
+<td>12819</td>
+<td>1,977</td>
+<td>96.65</td>
+<td>20,460,064</td>
+<td>16.38</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>94.19</td>
+<td>36.48</td>
+</tr>
+<tr>
+<td>3</td>
+<td>12823</td>
+<td>Homo_sapiens</td>
+<td>CGTACTAG</td>
+<td>Illumina index_N702_N502_Paired ends_GM12878_poolsplit_7_singlecell</td>
+<td>N</td>
+<td>12823</td>
+<td>1,969</td>
+<td>96.67</td>
+<td>20,364,123</td>
+<td>16.30</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>93.29</td>
+<td>36.16</td>
+</tr>
+<tr>
+<td>3</td>
+<td>12824</td>
+<td>Homo_sapiens</td>
+<td>AGGCAGAA</td>
+<td>Illumina index_N703_N504_Paired ends_GM12878_poolsplit_8_singlecell</td>
+<td>N</td>
+<td>12824</td>
+<td>2,245</td>
+<td>96.94</td>
+<td>23,159,466</td>
+<td>18.54</td>
+<td>100.00</td>
+<td>0.00</td>
+<td>93.36</td>
+<td>36.14</td>
+</tr>
+<tr>
+<td>3</td>
+<td>lane3</td>
+<td>unknown</td>
+<td>Undetermined</td>
+<td>Clusters with unmatched barcodes for lane 3</td>
+<td>N</td>
+<td>Undetermined_indices</td>
+<td>1,912</td>
+<td>80.60</td>
+<td>23,724,897</td>
+<td>18.99</td>
+<td>0.00</td>
+<td>0.00</td>
+<td>82.49</td>
+<td>32.94</td>
+</tr>
+</table></div>
+<p></p>
+<h2>Sample information</h2>
+<div ID="ScrollableTableHeaderDiv"><table width="100%">
+<col width="10%">
+<col width="10%">
+<col width="7%">
+<col>
+<tr>
+<th>Sample<p></p>ID</th>
+<th>Recipe</th>
+<th>Operator</th>
+<th>Directory</th>
+</tr>
+</table></div>
+<div ID="ScrollableTableBodyDiv"><table width="100%">
+<col width="10%">
+<col width="10%">
+<col width="7%">
+<col>
+<tr>
+<td>12812</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12812/Sample_12812</td>
+</tr>
+<tr>
+<td>12813</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12813/Sample_12813</td>
+</tr>
+<tr>
+<td>12814</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12814/Sample_12814</td>
+</tr>
+<tr>
+<td>12815</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12815/Sample_12815</td>
+</tr>
+<tr>
+<td>12816</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12816/Sample_12816</td>
+</tr>
+<tr>
+<td>12817</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12817/Sample_12817</td>
+</tr>
+<tr>
+<td>lane1</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Undetermined_indices/Sample_lane1</td>
+</tr>
+<tr>
+<td>12754</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12754/Sample_12754</td>
+</tr>
+<tr>
+<td>12818</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12818/Sample_12818</td>
+</tr>
+<tr>
+<td>12820</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12820/Sample_12820</td>
+</tr>
+<tr>
+<td>12821</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12821/Sample_12821</td>
+</tr>
+<tr>
+<td>12822</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12822/Sample_12822</td>
+</tr>
+<tr>
+<td>lane2</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Undetermined_indices/Sample_lane2</td>
+</tr>
+<tr>
+<td>12744</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12744/Sample_12744</td>
+</tr>
+<tr>
+<td>12749</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12749/Sample_12749</td>
+</tr>
+<tr>
+<td>12819</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12819/Sample_12819</td>
+</tr>
+<tr>
+<td>12823</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12823/Sample_12823</td>
+</tr>
+<tr>
+<td>12824</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Project_12824/Sample_12824</td>
+</tr>
+<tr>
+<td>lane3</td>
+<td>SR_indexing</td>
+<td>Lorian</td>
+<td>/mmjggl/jenner/rotifer/120406_SN787_0114_AD0PMDACXX/Unaligned_8ntIndex/Undetermined_indices/Sample_lane3</td>
+</tr>
+</table></div>
+<p>CASAVA-1.8.2</p>
+</body>
+</html>
diff --git a/htsworkflow/pipelines/test/testdata/1_12/basecall_stats/IVC.htm b/htsworkflow/pipelines/test/testdata/1_12/basecall_stats/IVC.htm
new file mode 100644 (file)
index 0000000..3ffbe47
--- /dev/null
@@ -0,0 +1,9 @@
+<html><head>
+<!--RUN_TIME 2012-04-11 13:12:39-->
+<!--SOFTWARE_VERSION -->
+</head><body>
+<title>120406_SN787_0114_AD0PMDACXX IVC</title><h1 align="center">120406_SN787_0114_AD0PMDACXX Intensity Plots<br><br> </h1><table border="1" cellpadding="5"><tr><td><b>Lane</b></td><td><b>All</b></td><td><b>Called</b></td><td><b>% Base Calls</b></td><td><b>% All</b></td><td><b>% Called</b></td></tr>
+<tr><td><b>1</b></td><td><a href="Plots/s_1_all.png"> <img height=84 width=126 src="Plots/s_1_all.png"></a></td><td><a href="Plots/s_1_call.png"> <img height=84 width=126 src="Plots/s_1_call.png"></a></td><td><a href="Plots/s_1_percent_base.png"> <img height=84 width=126 src="Plots/s_1_percent_base.png"></a></td><td><a href="Plots/s_1_percent_all.png"> <img height=84 width=126 src="Plots/s_1_percent_all.png"></a></td><td><a href="Plots/s_1_percent_call.png"> <img height=84 width=126 src="Plots/s_1_percent_call.png"></a></td></tr>
+<tr><td><b>2</b></td><td><a href="Plots/s_2_all.png"> <img height=84 width=126 src="Plots/s_2_all.png"></a></td><td><a href="Plots/s_2_call.png"> <img height=84 width=126 src="Plots/s_2_call.png"></a></td><td><a href="Plots/s_2_percent_base.png"> <img height=84 width=126 src="Plots/s_2_percent_base.png"></a></td><td><a href="Plots/s_2_percent_all.png"> <img height=84 width=126 src="Plots/s_2_percent_all.png"></a></td><td><a href="Plots/s_2_percent_call.png"> <img height=84 width=126 src="Plots/s_2_percent_call.png"></a></td></tr>
+<tr><td><b>3</b></td><td><a href="Plots/s_3_all.png"> <img height=84 width=126 src="Plots/s_3_all.png"></a></td><td><a href="Plots/s_3_call.png"> <img height=84 width=126 src="Plots/s_3_call.png"></a></td><td><a href="Plots/s_3_percent_base.png"> <img height=84 width=126 src="Plots/s_3_percent_base.png"></a></td><td><a href="Plots/s_3_percent_all.png"> <img height=84 width=126 src="Plots/s_3_percent_all.png"></a></td><td><a href="Plots/s_3_percent_call.png"> <img height=84 width=126 src="Plots/s_3_percent_call.png"></a></td></tr>
+</table></body></html>
index 13eca3d95ba1147aa4d458ead5138240eb5f444f..c366ce32a238deac74491ad3300a7d2d343bf11f 100644 (file)
 """
-Generate settings for the Django Application.
+Django settings for wsgiexample project.
 
-To make it easier to customize the application the settings can be
-defined in a configuration file read by ConfigParser.
-
-The options understood by this module are (with their defaults):
-
-  [frontend]
-  email_host=localhost
-  email_port=25
-  database=<section_name>
-
-  [database_name]
-  engine=sqlite3
-  name=/path/to/database
-
-  [admins]
-  #name1=email1
-
-  [allowed_hosts]
-  #name1=ip
-  localhost=127.0.0.1
-
-  [allowed_analysis_hosts]
-  #name1=ip
-  localhost=127.0.0.1
+For more information on this file, see
+https://docs.djangoproject.com/en/1.6/topics/settings/
 
+For the full list of settings and their values, see
+https://docs.djangoproject.com/en/1.6/ref/settings/
 """
-import ConfigParser
-import logging
-import os
-import shlex
-import htsworkflow
-import django
-from django.conf import global_settings
-
-from htsworkflow.util.api import make_django_secret_key
-
-HTSWORKFLOW_ROOT = os.path.abspath(os.path.split(htsworkflow.__file__)[0])
-LOGGER = logging.getLogger(__name__)
 
-# make epydoc happy
-__docformat__ = "restructuredtext en"
-
-def options_to_list(options, dest, section_name, option_name):
-  """
-  Load a options from section_name and store in a dictionary
-  """
-  if options.has_option(section_name, option_name):
-    opt = options.get(section_name, option_name)
-    dest.extend( shlex.split(opt) )
-
-def options_to_dict(dest, section_name):
-  """
-  Load a options from section_name and store in a dictionary
-  """
-  if options.has_section(section_name):
-    for name in options.options(section_name):
-      dest[name] = options.get(section_name, name)
-
-# define your defaults here
-options = ConfigParser.SafeConfigParser()
-
-def save_options(filename, options):
-    try:
-        ini_stream = open(filename, 'w')
-        options.write(ini_stream)
-        ini_stream.close()
-    except IOError, e:
-        LOGGER.debug("Error saving setting: %s" % (str(e)))
+# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
+import os
+BASE_DIR = os.path.dirname(os.path.dirname(__file__))
 
-INI_FILE = options.read([os.path.expanduser("~/.htsworkflow.ini"),
-                         '/etc/htsworkflow.ini',])
 
-# OptionParser will use the dictionary passed into the config parser as
-# 'Default' values in any section. However it still needs an empty section
-# to exist in order to retrieve anything.
-if not options.has_section('frontend'):
-    options.add_section('frontend')
-if not options.has_section('bcprinter'):
-    options.add_section('bcprinter')
+# Quick-start development settings - unsuitable for production
+# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
 
+# SECURITY WARNING: keep the secret key used in production secret!
+SECRET_KEY = 'c=5&609$7)bm_u+3$2bi=ida$*a)c1(cp_0siua7uyww!1qfg_'
 
-# Django settings for elandifier project.
+DEFAULT_API_KEY = 'n7HsXGHIi0vp9j5u4TIRJyqAlXYc4wrH'
 
+# SECURITY WARNING: don't run with debug turned on in production!
 DEBUG = True
-TEMPLATE_DEBUG = DEBUG
 
-ADMINS = []
-options_to_list(options, ADMINS, 'frontend', 'admins')
+TEMPLATE_DEBUG = True
 
-MANAGERS = []
-options_to_list(options, MANAGERS, 'frontend', 'managers')
+ALLOWED_HOSTS = ['jumpgate.caltech.edu']
 
-if options.has_option('front', 'default_pm'):
-    DEFAULT_PM=int(options.get('frontend', 'default_pm'))
-else:
-    DEFAULT_PM=5
 
+# Application definition
 AUTHENTICATION_BACKENDS = (
   'htsworkflow.frontend.samples.auth_backend.HTSUserModelBackend', )
 CUSTOM_USER_MODEL = 'samples.HTSUser'
 
-EMAIL_HOST='localhost'
-if options.has_option('frontend', 'email_host'):
-  EMAIL_HOST = options.get('frontend', 'email_host')
+INSTALLED_APPS = (
+    'django.contrib.admin',
+    'django.contrib.auth',
+    'django.contrib.contenttypes',
+    'django.contrib.sessions',
+    'django.contrib.messages',
+    'django.contrib.staticfiles',
+    'django.contrib.humanize',
 
-EMAIL_PORT = 25
-if options.has_option('frontend', 'email_port'):
-  EMAIL_PORT = int(options.get('frontend', 'email_port'))
+    'htsworkflow.frontend.eland_config',
+    'htsworkflow.frontend.samples',
+    'htsworkflow.frontend.experiments',
+    'htsworkflow.frontend.inventory',
+    'htsworkflow.frontend.bcmagic',
+    'htsworkflow.frontend.labels',
+)
 
-if options.has_option('frontend', 'notification_sender'):
-    NOTIFICATION_SENDER = options.get('frontend', 'notification_sender')
-else:
-    NOTIFICATION_SENDER = "noreply@example.com"
-NOTIFICATION_BCC = []
-options_to_list(options, NOTIFICATION_BCC, 'frontend', 'notification_bcc')
+MIDDLEWARE_CLASSES = (
+    'django.contrib.sessions.middleware.SessionMiddleware',
+    'django.middleware.common.CommonMiddleware',
+    'django.middleware.csrf.CsrfViewMiddleware',
+    'django.contrib.auth.middleware.AuthenticationMiddleware',
+    'django.contrib.messages.middleware.MessageMiddleware',
+    'django.middleware.clickjacking.XFrameOptionsMiddleware',
+)
 
-if not options.has_option('frontend', 'database'):
-  raise ConfigParser.NoSectionError(
-    "Please define [frontend] database=<Section>")
+TEMPLATE_DIRS = (
+    os.path.join(BASE_DIR, 'htsworkflow', 'frontend', 'templates'),
+    os.path.join(BASE_DIR, 'htsworkflow', 'templates'),
+)
 
-database_section = options.get('frontend', 'database')
+ROOT_URLCONF = 'htsworkflow.frontend.urls'
+#ROOT_URLCONF='wsgiexample.urls'
 
-if not options.has_section(database_section):
-    raise ConfigParser.NoSectionError(
-        "No database=<database_section_name> defined")
+WSGI_APPLICATION = 'htsworkflow.frontend.wsgi.application'
 
-# 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
-DATABASE_ENGINE = options.get(database_section, 'engine')
-DATABASE_NAME = options.get(database_section, 'name')
-if options.has_option(database_section, 'user'):
-    DATABASE_USER = options.get(database_section, 'user')
-if options.has_option(database_section, 'host'):
-    DATABASE_HOST = options.get(database_section, 'host')
-if options.has_option(database_section, 'port'):
-    DATABASE_PORT = options.get(database_section, 'port')
+# Database
+# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
 
-if options.has_option(database_section, 'password_file'):
-    password_file = options.get(database_section, 'password_file')
-    DATABASE_PASSWORD = open(password_file,'r').readline()
-elif options.has_option(database_section, 'password'):
-    DATABASE_PASSWORD = options.get(database_section, 'password')
+DATABASES = {
+    'default': {
+        'ENGINE': 'django.db.backends.sqlite3',
+        'NAME': os.path.join(BASE_DIR, 'fctracker.db'),
+    }
+}
 
-# Local time zone for this installation. Choices can be found here:
-# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
-# although not all variations may be possible on all operating systems.
-# If running in a Windows environment this must be set to the same as your
-# system time zone.
-if options.has_option('frontend', 'time_zone'):
-  TIME_ZONE = options.get('frontend', 'time_zone')
-else:
-  TIME_ZONE = 'America/Los_Angeles'
+# Internationalization
+# https://docs.djangoproject.com/en/1.6/topics/i18n/
 
-# Language code for this installation. All choices can be found here:
-# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
-# http://blogs.law.harvard.edu/tech/stories/storyReader$15
 LANGUAGE_CODE = 'en-us'
 
-SITE_ID = 1
+TIME_ZONE = 'UTC'
 
-# If you set this to False, Django will make some optimizations so as not
-# to load the internationalization machinery.
 USE_I18N = True
 
-# Absolute path to the directory that holds media.
-# Example: "/home/media/media.lawrence.com/"
-MEDIA_ROOT = os.path.join(HTSWORKFLOW_ROOT, 'frontend', 'static', '')
-
-# URL that handles the media served from MEDIA_ROOT.
-# Example: "http://media.lawrence.com"
-MEDIA_URL = '/static/'
-
-# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
-# trailing slash.
-# Examples: "http://foo.com/media/", "/media/".
-ADMIN_MEDIA_PREFIX = '/media/'
-
-# Make this unique, and don't share it with anybody.
-if not options.has_option('frontend', 'secret'):
-    options.set('frontend', 'secret_key', make_django_secret_key(458))
-    save_options(INI_FILE[0], options)
-SECRET_KEY = options.get('frontend', 'secret_key')
-
-# some of our urls need an api key
-DEFAULT_API_KEY = 'n7HsXGHIi0vp9j5u4TIRJyqAlXYc4wrH'
-
-# List of callables that know how to import templates from various sources.
-TEMPLATE_LOADERS = (
-    'django.template.loaders.filesystem.load_template_source',
-    'django.template.loaders.app_directories.load_template_source',
-#     'django.template.loaders.eggs.load_template_source',
-)
-
-MIDDLEWARE_CLASSES = (
-    'django.contrib.csrf.middleware.CsrfMiddleware',
-    'django.middleware.common.CommonMiddleware',
-    'django.contrib.sessions.middleware.SessionMiddleware',
-    'django.contrib.auth.middleware.AuthenticationMiddleware',
-    'django.middleware.doc.XViewMiddleware',
-)
-
-TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
-    'htsworkflow.frontend.thispage.thispage',
-)
-ROOT_URLCONF = 'htsworkflow.frontend.urls'
-
-TEMPLATE_DIRS = (
-    # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
-    # Always use forward slashes, even on Windows.
-    # Don't forget to use absolute paths, not relative paths.
-    '/usr/share/python-support/python-django/django/contrib/admin/templates',
-    #'/usr/lib/pymodules/python2.6/django/contrib/admin/templates/',
-    os.path.join(HTSWORKFLOW_ROOT, 'frontend', 'templates'),
-    os.path.join(HTSWORKFLOW_ROOT, 'templates'),
-)
-
-INSTALLED_APPS = (
-    'django.contrib.admin',
-    'django.contrib.auth',
-    'django.contrib.contenttypes',
-    'django.contrib.humanize',
-    'django.contrib.sessions',
-    'django.contrib.sites',
-    'htsworkflow.frontend.eland_config',
-    'htsworkflow.frontend.samples',
-    # modules from htsworkflow branch
-    'htsworkflow.frontend.experiments',
-    'htsworkflow.frontend.analysis',
-    'htsworkflow.frontend.reports',
-    'htsworkflow.frontend.inventory',
-    'htsworkflow.frontend.bcmagic',
-    'htsworkflow.frontend.labels',
-    'django.contrib.databrowse',
-)
-
-# Project specific settings
+USE_L10N = True
 
-ALLOWED_IPS={'127.0.0.1': '127.0.0.1'}
-options_to_dict(ALLOWED_IPS, 'allowed_hosts')
+USE_TZ = True
 
-ALLOWED_ANALYS_IPS = {'127.0.0.1': '127.0.0.1'}
-options_to_dict(ALLOWED_ANALYS_IPS, 'allowed_analysis_hosts')
-#UPLOADTO_HOME = os.path.abspath('../../uploads')
-#UPLOADTO_CONFIG_FILE = os.path.join(UPLOADTO_HOME, 'eland_config')
-#UPLOADTO_ELAND_RESULT_PACKS = os.path.join(UPLOADTO_HOME, 'eland_results')
-#UPLOADTO_BED_PACKS = os.path.join(UPLOADTO_HOME, 'bed_packs')
-# Where "results_dir" means directory with all the flowcells
-if options.has_option('frontend', 'results_dir'):
-    RESULT_HOME_DIR=os.path.expanduser(options.get('frontend', 'results_dir'))
-else:
-    RESULT_HOME_DIR='/tmp'
 
-if options.has_option('frontend', 'link_flowcell_storage_device_url'):
-    LINK_FLOWCELL_STORAGE_DEVICE_URL = options.get('frontend',
-                                                   'link_flowcell_storage_device_url')
-else:
-    LINK_FLOWCELL_STORAGE_DEVICE_URL = None
-# PORT 9100 is default for Zebra tabletop/desktop printers
-# PORT 6101 is default for Zebra mobile printers
-BCPRINTER_PRINTER1_HOST = None
-if options.has_option('bcprinter', 'printer1_host'):
-    BCPRINTER_PRINTER1_HOST = options.get('bcprinter', 'printer1_host')
-BCPRINTER_PRINTER1_PORT=9100
-if options.has_option('bcprinter', 'printer1_port'):
-    BCPRINTER_PRINTER1_PORT = int(options.get('bcprinter', 'printer1_port'))
-BCPRINTER_PRINTER2_HOST = None
-if options.has_option('bcprinter', 'printer2_host'):
-    BCPRINTER_PRINTER1_HOST = options.get('bcprinter', 'printer2_host')
-BCPRINTER_PRINTER2_PORT=9100
-if options.has_option('bcprinter', 'printer2_port'):
-    BCPRINTER_PRINTER2_PORT = int(options.get('bcprinter', 'printer2_port'))
+# Static files (CSS, JavaScript, Images)
+# https://docs.djangoproject.com/en/1.6/howto/static-files/
+STATIC_URL = '/static/'
 
+try:
+    # allow local customizations
+    from settings_local import *
+except ImportError as e:
+    pass
\ No newline at end of file
index c9000e41e79f3c3e2b2962a4465636cdb718c933..5633745687c70cc9954685fa582b66c6d62a730f 100644 (file)
@@ -2,6 +2,7 @@
 """
 from collections import MutableMapping
 import os
+import shutil
 import logging
 
 from collections import namedtuple
@@ -45,14 +46,21 @@ class ResultMap(MutableMapping):
                 lib_path = os.path.join(basepath, lib_path)
             self[lib_id] = lib_path
 
-    def make_tree_from(self, source_path, destpath = None):
+    def make_tree_from(self, source_path, destpath = None, link=True):
         """Create a tree using data files from source path.
         """
         if destpath is None:
             destpath = os.getcwd()
 
+        LOGGER.debug("Source_path: %s", source_path)
+        LOGGER.debug("Dest_path: %s", destpath)
         for lib_id in self.results_order:
             lib_path = self.results[lib_id]
+            LOGGER.debug("lib_path: %s", lib_path)
+            if os.path.isabs(lib_path):
+                lib_path = os.path.relpath(lib_path, destpath)
+
+            LOGGER.debug('lib_path: %s', lib_path)
             lib_destination = os.path.join(destpath, lib_path)
             if not os.path.exists(lib_destination):
                 LOGGER.info("Making dir {0}".format(lib_destination))
@@ -60,6 +68,7 @@ class ResultMap(MutableMapping):
 
             source_rel_dir = os.path.join(source_path, lib_path)
             source_lib_dir = os.path.abspath(source_rel_dir)
+            LOGGER.debug("source_lib_dir: %s", source_lib_dir)
 
             for filename in os.listdir(source_lib_dir):
                 source_pathname = os.path.join(source_lib_dir, filename)
@@ -67,8 +76,11 @@ class ResultMap(MutableMapping):
                 if not os.path.exists(source_pathname):
                     raise IOError(
                         "{0} does not exist".format(source_pathname))
-                if not os.path.exists(target_pathname):
-                    os.symlink(source_pathname, target_pathname)
+                if not (os.path.exists(target_pathname) or os.path.isdir(source_pathname)):
+                    if link:
+                        os.symlink(source_pathname, target_pathname)
+                    else:
+                        shutil.copy(source_pathname, target_pathname)
                     LOGGER.info(
                         'LINK {0} to {1}'.format(source_pathname,
                                                  target_pathname))
index b3e2778ca490b8bbf4a111c1e2c0543f23910b00..c5a3f34b76c20aaae69cdb254b92e47190697752 100644 (file)
@@ -8,22 +8,22 @@ import RDF
 
 from htsworkflow.util.rdfhelp import \
      blankOrUri, \
-     dafTermOntology, \
      dump_model, \
+     fromTypedNode, \
      get_model, \
-     libraryOntology, \
-     owlNS, \
-     rdfNS, \
-     submissionLog, \
-     submissionOntology, \
-     toTypedNode, \
-     fromTypedNode
+     stripNamespace, \
+     toTypedNode
+from htsworkflow.util.rdfns import *
 from htsworkflow.util.hashfile import make_md5sum
 from htsworkflow.submission.fastqname import FastqName
 from htsworkflow.submission.daf import \
      MetadataLookupException, \
+     ModelException, \
      get_submission_uri
 
+from django.conf import settings
+from django.template import Context, Template, loader
+
 LOGGER = logging.getLogger(__name__)
 
 class Submission(object):
@@ -60,6 +60,13 @@ class Submission(object):
             pathname = os.path.abspath(os.path.join(analysis_dir, filename))
             self.construct_file_attributes(analysis_dir, libNode, pathname)
 
+    def analysis_nodes(self, result_map):
+        """Return an iterable of analysis nodes
+        """
+        for result_dir in result_map.values():
+            an_analysis = self.get_submission_node(result_dir)
+            yield an_analysis
+
     def construct_file_attributes(self, analysis_dir, libNode, pathname):
         """Looking for the best extension
         The 'best' is the longest match
@@ -117,10 +124,16 @@ class Submission(object):
         fileNode = self.make_file_node(pathname, an_analysis)
         self.add_md5s(filename, fileNode, analysis_dir)
         self.add_fastq_metadata(filename, fileNode)
+        self.add_label(file_type, fileNode, libNode)
         self.model.add_statement(
             RDF.Statement(fileNode,
                           rdfNS['type'],
                           file_type))
+        self.model.add_statement(
+            RDF.Statement(fileNode,
+                          libraryOntology['library'],
+                          libNode))
+                          
         LOGGER.debug("Done.")
 
     def make_file_node(self, pathname, submissionNode):
@@ -128,7 +141,8 @@ class Submission(object):
         """
         # add file specific information
         path, filename = os.path.split(pathname)
-        fileNode = RDF.Node(RDF.Uri('file://'+ os.path.abspath(pathname)))
+        pathname = os.path.abspath(pathname)
+        fileNode = RDF.Node(RDF.Uri('file://'+ pathname))
         self.model.add_statement(
             RDF.Statement(submissionNode,
                           dafTermOntology['has_file'],
@@ -137,6 +151,10 @@ class Submission(object):
             RDF.Statement(fileNode,
                           dafTermOntology['filename'],
                           filename))
+        self.model.add_statement(
+            RDF.Statement(fileNode,
+                          dafTermOntology['relative_path'],
+                          os.path.relpath(pathname)))
         return fileNode
 
     def add_md5s(self, filename, fileNode, analysis_dir):
@@ -168,6 +186,23 @@ class Submission(object):
             if value is not None:
                 s = RDF.Statement(fileNode, model_term, toTypedNode(value))
                 self.model.append(s)
+                
+    def add_label(self, file_type, file_node, lib_node):
+        """Add rdfs:label to a file node
+        """
+        #template_term = libraryOntology['label_template']
+        template_term = libraryOntology['label_template']
+        label_template = self.model.get_target(file_type, template_term)
+        if label_template:
+            template = loader.get_template('submission_view_rdfs_label_metadata.sparql')
+            context = Context({
+                'library': str(lib_node.uri),
+                })
+            for r in self.execute_query(template, context):
+                context = Context(r)
+                label = Template(label_template).render(context)
+                s = RDF.Statement(file_node, rdfsNS['label'], unicode(label))
+                self.model.append(s)
 
     def _add_library_details_to_model(self, libNode):
         # attributes that can have multiple values
@@ -175,7 +210,12 @@ class Submission(object):
                               libraryOntology['has_mappings'],
                               dafTermOntology['has_file']))
         parser = RDF.Parser(name='rdfa')
-        new_statements = parser.parse_as_stream(libNode.uri)
+        try:
+            new_statements = parser.parse_as_stream(libNode.uri)
+        except RDF.RedlandError as e:
+            LOGGER.error(e)
+            return
+        LOGGER.debug("Scanning %s", str(libNode.uri))
         toadd = []
         for s in new_statements:
             # always add "collections"
@@ -308,8 +348,11 @@ class Submission(object):
                   'Small RNA (non-multiplexed)',]
         paired = ['Barcoded Illumina',
                   'Multiplexing',
+                  'NEBNext Multiplexed',
+                  'NEBNext Small RNA',
                   'Nextera',
-                  'Paired End (non-multiplexed)',]
+                  'Paired End (non-multiplexed)',
+                  'Dual Index Illumina',]
         if library_type in single:
             return False
         elif library_type in paired:
@@ -333,3 +376,21 @@ class Submission(object):
                 d[key] = fromTypedNode(value)
             results.append(d)
         return results
+
+
+def list_submissions(model):
+    """Return generator of submissions in this model.
+    """
+    query_body = """
+      PREFIX subns: <http://jumpgate.caltech.edu/wiki/UcscSubmissionOntology#>
+
+      select distinct ?submission
+      where { ?submission subns:has_submission ?library_dir }
+    """
+    query = RDF.SPARQLQuery(query_body)
+    rdfstream = query.execute(model)
+    for row in rdfstream:
+        s = stripNamespace(submissionLog, row['submission'])
+        if s[-1] in ['#', '/', '?']:
+            s = s[:-1]
+        yield s
diff --git a/htsworkflow/submission/test/submission_test_common.py b/htsworkflow/submission/test/submission_test_common.py
new file mode 100644 (file)
index 0000000..3f43576
--- /dev/null
@@ -0,0 +1,103 @@
+"""Code shared between test cases.
+"""
+import RDF
+import logging
+import os
+import tempfile
+import htsworkflow.util.rdfhelp
+
+S1_NAME = '1000-sample'
+S2_NAME = '2000-sample'
+SCOMBINED_NAME = 'directory'
+
+S1_FILES = [
+    os.path.join(S1_NAME, 'file1_l8_r1.fastq'),
+    os.path.join(S1_NAME, 'file1_l8_r2.fastq'),
+]
+
+S2_FILES = [
+    os.path.join(S2_NAME, 'file1.bam'),
+    os.path.join(S2_NAME, 'file1_l5.fastq'),
+]
+
+SCOMBINED_FILES = [
+    os.path.join(SCOMBINED_NAME, 's1_file1.bam'),
+    os.path.join(SCOMBINED_NAME, 's1_l5.fastq'),
+    os.path.join(SCOMBINED_NAME, 's2_file1.bam'),
+    os.path.join(SCOMBINED_NAME, 's2_l4.read1.fastq'),
+    os.path.join(SCOMBINED_NAME, 's2_l4.read2.fastq'),
+]
+
+TURTLE_PREFIX = htsworkflow.util.rdfhelp.get_turtle_header()
+
+S1_TURTLE = TURTLE_PREFIX + """
+<http://localhost/library/1000/>
+  htswlib:cell_line "Cell1000" ;
+  htswlib:library_id "1000" ;
+  htswlib:library_type "Single" ;
+  htswlib:replicate "1" ;
+  htswlib:has_lane <http://localhost/lane/1> ;
+  a htswlib:IlluminaLibrary .
+
+<http://localhost/lane/1>
+  htswlib:flowcell <http://localhost/flowcel/1234ABXXX> ;
+  htswlib:lane_number "1"@en;
+  a htswlib:IlluminaLane .
+"""
+
+S2_TURTLE = TURTLE_PREFIX + """
+<http://localhost/library/2000/>
+  htswlib:cell_line "Cell2000" ;
+  htswlib:library_id "2000" ;
+  htswlib:library_type "Paired End (non-multiplexed)" ;
+  htswlib:replicate "2" ;
+  htswlib:has_lane <http://localhost/lane/2> ;
+  a htswlib:Library .
+
+<http://localhost/lane/2>
+  htswlib:flowcell <http://localhost/flowcel/1234ABXXX> ;
+  htswlib:lane_number "2"@en ;
+  a htswlib:IlluminaLane .
+"""
+
+class MockAddDetails(object):
+    def __init__(self, model, turtle=None):
+        self.model = model
+        if turtle:
+            self.add_turtle(turtle)
+
+    def add_turtle(self, turtle):
+        parser = RDF.Parser('turtle')
+        parser.parse_string_into_model(self.model, turtle, "http://localhost")
+
+    def __call__(self, libNode):
+        q = RDF.Statement(libNode, None, None)
+        found = False
+        for s in self.model.find_statements(q):
+            found = True
+            break
+        assert found
+
+def generate_sample_results_tree(obj, prefix):
+    obj.tempdir = tempfile.mkdtemp(prefix=prefix)
+    obj.sourcedir = os.path.join(obj.tempdir, 'source')
+    os.mkdir(obj.sourcedir)
+    obj.resultdir = os.path.join(obj.tempdir, 'results')
+    os.mkdir(obj.resultdir)
+
+    for d in [os.path.join(obj.sourcedir, S1_NAME),
+              os.path.join(obj.sourcedir, S2_NAME),
+              ]:
+        logging.debug("Creating: %s", d)
+        os.mkdir(d)
+
+    tomake = []
+    tomake.extend(S1_FILES)
+    tomake.extend(S2_FILES)
+    for f in tomake:
+        target = os.path.join(obj.sourcedir, f)
+        logging.debug("Creating: %s", target)
+        stream = open(target, 'w')
+        stream.write(f)
+        stream.close()
+
index ffb9f88620356ba0e61800db60fa4715463515e2..09d68083c1ec50052c450a71e9d9e24356573cf7 100644 (file)
@@ -7,6 +7,10 @@ import shutil
 import tempfile
 
 from django.test import TestCase
+from django.test.utils import setup_test_environment, \
+     teardown_test_environment
+from django.db import connection
+from django.conf import settings
 
 from htsworkflow.submission.condorfastq import CondorFastqExtract
 from htsworkflow.submission.results import ResultMap
@@ -676,6 +680,16 @@ class TestCondorFastq(TestCase):
             self.assertTrue('12345_C02F9ACXX_c202_l3_r2.fastq' in arguments[3])
 
 
+OLD_DB = settings.DATABASES['default']['NAME']
+def setUpModule():
+    setup_test_environment()
+    connection.creation.create_test_db()
+
+def tearDownModule():
+    connection.creation.destroy_test_db(OLD_DB)
+    teardown_test_environment()
+
+
 def suite():
     from unittest2 import TestSuite, defaultTestLoader
     suite = TestSuite()
index c7227216e42d2582129c8dae9dacc1d88638b499..082c4ab5e5cb793e0d43c701d2cf1e9de08867eb 100644 (file)
@@ -1,4 +1,5 @@
 from contextlib import contextmanager
+import logging
 import os
 from StringIO import StringIO
 import shutil
@@ -167,7 +168,7 @@ def dump_model(model):
 
 class TestUCSCSubmission(TestCase):
     def setUp(self):
-        test_results.generate_sample_results_tree(self)
+        test_results.generate_sample_results_tree(self, 'daf_results')
 
     def tearDown(self):
         # see things created by temp_results.generate_sample_results_tree
@@ -247,7 +248,7 @@ thisView:FastqRd1 dafTerm:filename_re ".*\\\\.fastq" ;
         # server is 500 for this library
         self.failUnlessEqual(gel_cut, 100)
 
-        species = daf_mapper._get_library_attribute(libNode, 'species')
+        species = daf_mapper._get_library_attribute(libNode, 'species_name')
         self.failUnlessEqual(species, "Homo sapiens")
 
         with mktempdir('analysis') as analysis_dir:
@@ -331,9 +332,11 @@ def mktempfile(suffix='', prefix='tmp', dir=None):
 
 def suite():
     suite = TestSuite()
-    suite.addTests(defaultTestLoader.loadTestsFromTestCase(TestUCSCInfo))
+    suite.addTests(defaultTestLoader.loadTestsFromTestCase(TestDAF))
+    suite.addTests(defaultTestLoader.loadTestsFromTestCase(TestUCSCSubmission))
     return suite
 
 if __name__ == "__main__":
+    logging.basicConfig(level=logging.DEBUG)
     from unittest2 import main
     main(defaultTest='suite')
index 3487b69d7e410f1734887f6afd0969013f0466ff..ee207cf57d972cc48dbda9279fba6683f29ab0bb 100644 (file)
@@ -1,50 +1,16 @@
 #!/usr/bin/env python
 
-import copy
-import os
 from pprint import pprint
 import shutil
-import tempfile
 
 from unittest2 import TestCase, defaultTestLoader
 
 from htsworkflow.submission.results import ResultMap
-
-S1_NAME = '1000-sample'
-S2_NAME = '2000-sample'
-
-S1_FILES = [
-    os.path.join(S1_NAME, 'file1_l8_r1.fastq'),
-    os.path.join(S1_NAME, 'file1_l8_r2.fastq'),
-]
-
-S2_FILES = [
-    os.path.join(S2_NAME, 'file1.bam'),
-    os.path.join(S2_NAME, 'file1_l5.fastq'),
-]
-
-def generate_sample_results_tree(obj):
-    obj.tempdir = tempfile.mkdtemp(prefix="results_test")
-    obj.sourcedir = os.path.join(obj.tempdir, 'source')
-    obj.resultdir = os.path.join(obj.tempdir, 'results')
-
-    for d in [obj.sourcedir,
-              os.path.join(obj.sourcedir, S1_NAME),
-              os.path.join(obj.sourcedir, S2_NAME),
-              obj.resultdir]:
-        os.mkdir(os.path.join(obj.tempdir, d))
-
-    tomake = []
-    tomake.extend(S1_FILES)
-    tomake.extend(S2_FILES)
-    for f in tomake:
-        stream = open(os.path.join(obj.sourcedir, f), 'w')
-        stream.write(f)
-        stream.close()
+from submission_test_common import *
 
 class TestResultMap(TestCase):
     def setUp(self):
-        generate_sample_results_tree(self)
+        generate_sample_results_tree(self, 'results_test')
 
     def tearDown(self):
         shutil.rmtree(self.tempdir)
@@ -74,13 +40,32 @@ class TestResultMap(TestCase):
         self.assertFalse(u'77777' in results)
         self.assertFalse('77777' in results)
 
-    def test_make_from(self):
+    def test_make_from_absolute(self):
+        """Test that make from works if ResultMap has absolute paths
+        """
+        results = ResultMap()
+        sample1_dir = os.path.join(self.resultdir, S1_NAME)
+        sample2_dir = os.path.join(self.resultdir, S2_NAME)
+        results['1000'] =  sample1_dir
+        results['2000'] =  sample2_dir
+
+        results.make_tree_from(self.sourcedir, self.resultdir)
+        self.failUnless(os.path.isdir(sample1_dir))
+        self.failUnless(os.path.isdir(sample2_dir))
+
+        for f in S1_FILES + S2_FILES:
+            self.failUnless(
+                os.path.islink(
+                    os.path.join(self.resultdir, f)))
+
+    def test_make_from_filename(self):
+        """Test that make from works if ResultMap has no path
+        """
         results = ResultMap()
         results['1000'] =  S1_NAME
         results['2000'] =  S2_NAME
 
         results.make_tree_from(self.sourcedir, self.resultdir)
-
         sample1_dir = os.path.join(self.resultdir, S1_NAME)
         sample2_dir = os.path.join(self.resultdir, S2_NAME)
         self.failUnless(os.path.isdir(sample1_dir))
@@ -91,11 +76,20 @@ class TestResultMap(TestCase):
                 os.path.islink(
                     os.path.join(self.resultdir, f)))
 
+    def test_make_from_shared_directory(self):
+        """Split multiple datasets stored in a single directory
+        """
+        self.skipTest("not implemented yet")
+        results = ResultMap()
+        results['S1'] = os.path.join(SCOMBINED_NAME, 's1*')
+        results['S2'] = os.path.join(SCOMBINED_NAME, 's2*')
 
 def suite():
     suite = defaultTestLoader.loadTestsFromTestCase(TestResultMap)
     return suite
 
 if __name__ == "__main__":
+    import logging
+    logging.basicConfig(level=logging.DEBUG)
     from unittest2 import main
     main(defaultTest='suite')
diff --git a/htsworkflow/submission/test/test_submission.py b/htsworkflow/submission/test/test_submission.py
new file mode 100644 (file)
index 0000000..53b4e91
--- /dev/null
@@ -0,0 +1,176 @@
+
+import os
+from StringIO import StringIO
+import shutil
+import tempfile
+from unittest2 import TestCase, TestSuite, defaultTestLoader
+
+from htsworkflow.submission import daf, results
+from htsworkflow.util.rdfhelp import \
+     dafTermOntology, \
+     dump_model, \
+     fromTypedNode, \
+     get_turtle_header, \
+     load_string_into_model, \
+     rdfNS, \
+     submissionLog, \
+     submissionOntology, \
+     get_model, \
+     get_serializer
+from htsworkflow.submission.submission import list_submissions, Submission
+from htsworkflow.submission.results import ResultMap
+from submission_test_common import *
+
+import RDF
+#import logging
+#logging.basicConfig(level=logging.DEBUG)
+
+class TestSubmissionModule(TestCase):
+    def test_empty_list_submission(self):
+        model = get_model()
+        self.assertEqual(len(list(list_submissions(model))), 0)
+
+    def test_one_submission(self):
+        model = get_model()
+        load_string_into_model(model, "turtle",
+            """
+            @prefix subns: <http://jumpgate.caltech.edu/wiki/UcscSubmissionOntology#> .
+            @prefix test: <http://jumpgate.caltech.edu/wiki/SubmissionsLog/test#> .
+
+            <http://jumpgate.caltech.edu/wiki/SubmissionsLog/test#>
+               subns:has_submission test:lib1 ;
+               subns:has_submission test:lib2.
+            """)
+        submissions = list(list_submissions(model))
+        self.assertEqual(len(submissions), 1)
+        self.assertEqual(submissions[0], "test")
+
+    def test_two_submission(self):
+        model = get_model()
+        load_string_into_model(model, "turtle",
+            """
+            @prefix subns: <http://jumpgate.caltech.edu/wiki/UcscSubmissionOntology#> .
+            @prefix test: <http://jumpgate.caltech.edu/wiki/SubmissionsLog/test#> .
+
+            <http://jumpgate.caltech.edu/wiki/SubmissionsLog/test1#>
+               subns:has_submission test:lib1 .
+            <http://jumpgate.caltech.edu/wiki/SubmissionsLog/test2#>
+               subns:has_submission test:lib2 .
+            """)
+        submissions = list(list_submissions(model))
+        self.assertEqual(len(submissions), 2)
+        truth = set(["test1", "test2"])
+        testset = set()
+        for name in submissions:
+            testset.add(name)
+        self.assertEqual(testset, truth)
+
+class TestSubmission(TestCase):
+    def setUp(self):
+        generate_sample_results_tree(self, 'submission_test')
+        self.model = get_model()
+
+    def tearDown(self):
+        shutil.rmtree(self.tempdir)
+
+    def test_create_submission(self):
+        model = get_model()
+        s = Submission('foo', self.model, 'http://localhost')
+        self.assertEqual(str(s.submissionSet),
+                         "http://jumpgate.caltech.edu/wiki/SubmissionsLog/foo")
+        self.assertEqual(str(s.submissionSetNS['']),
+                         str(RDF.NS(str(s.submissionSet) + '#')['']))
+        self.assertEqual(str(s.libraryNS['']),
+                         str(RDF.NS('http://localhost/library/')['']))
+
+    def test_scan_submission_dirs(self):
+        turtle = get_turtle_header() + """
+@prefix thisView: <http://jumpgate.caltech.edu/wiki/SubmissionsLog/test/view/> .
+thisView:Fastq ucscDaf:filename_re ".*[^12]\\.fastq\\.bz2$" ;
+               a geoSoft:raw ;
+               geoSoft:fileTypeLabel "fastq" ;
+               ucscDaf:output_type "read" .
+thisView:FastqRead1 ucscDaf:filename_re ".*r1\\.fastq\\.bz2$" ;
+               a geoSoft:raw ;
+               geoSoft:fileTypeLabel "fastq" ;
+               ucscDaf:output_type "read1" .
+thisView:FastqRead2 ucscDaf:filename_re ".*r2\\.fastq\\.bz2$" ;
+               a geoSoft:raw ;
+               geoSoft:fileTypeLabel "fastq" ;
+               ucscDaf:output_type "read2" .
+thisView:alignments ucscDaf:filename_re ".*\\.bam$" ;
+               a geoSoft:supplemental ;
+               geoSoft:fileTypeLabel "bam" ;
+               ucscDaf:output_type "alignments" .
+
+        """
+        map = ResultMap()
+        print self.tempdir
+        print os.listdir(self.tempdir)
+        map['1000'] = os.path.join(self.sourcedir, S1_NAME)
+        map['2000'] = os.path.join(self.sourcedir, S2_NAME)
+
+        s = Submission('foo', self.model, 'http://localhost')
+        mock = MockAddDetails(self.model, turtle)
+        mock.add_turtle(S1_TURTLE)
+        mock.add_turtle(S2_TURTLE)
+        s._add_library_details_to_model =  mock
+        s.scan_submission_dirs(map)
+
+        nodes = list(s.analysis_nodes(map))
+        self.assertEqual(len(nodes), 2)
+        expected = set((
+            'http://jumpgate.caltech.edu/wiki/SubmissionsLog/foo#1000-sample',
+            'http://jumpgate.caltech.edu/wiki/SubmissionsLog/foo#2000-sample',
+        ))
+        got = set((str(nodes[0]), str(nodes[1])))
+        self.assertEqual(expected, got)
+
+    def test_find_best_match(self):
+        turtle = get_turtle_header() + """
+@prefix thisView: <http://jumpgate.caltech.edu/wiki/SubmissionsLog/test/view/> .
+thisView:Fastq ucscDaf:filename_re ".*[^12]\\.fastq\\.bz2$" ;
+               a geoSoft:raw ;
+               geoSoft:fileTypeLabel "fastq" ;
+               ucscDaf:output_type "read" .
+thisView:FastqRead1 ucscDaf:filename_re ".*r1\\.fastq\\.bz2$" ;
+               a geoSoft:raw ;
+               geoSoft:fileTypeLabel "fastq" ;
+               ucscDaf:output_type "read1" .
+thisView:FastqRead2 ucscDaf:filename_re ".*r2\\.fastq\\.bz2$" ;
+               a geoSoft:raw ;
+               geoSoft:fileTypeLabel "fastq" ;
+               ucscDaf:output_type "read2" .
+thisView:alignments ucscDaf:filename_re ".*\\.bam$" ;
+               a geoSoft:supplemental ;
+               geoSoft:fileTypeLabel "bam" ;
+               ucscDaf:output_type "alignments" .
+
+        """
+        load_string_into_model(self.model, 'turtle', turtle)
+        s = Submission('foo', self.model, 'http://localhost')
+        q = RDF.Statement(None, dafTermOntology['filename_re'], None)
+        view_map = s._get_filename_view_map()
+        self.assertEqual(len(view_map), 4)
+
+        fastq = s.find_best_match("asdf.fastq.bz2")
+        self.assertEqual(
+            str(fastq),
+            "http://jumpgate.caltech.edu/wiki/SubmissionsLog/test/view/Fastq")
+
+        fastq = s.find_best_match("asdf.r2.fastq.bz2")
+        self.assertEqual(
+            str(fastq),
+            "http://jumpgate.caltech.edu/wiki/SubmissionsLog/test/view/FastqRead2")
+
+def suite():
+    suite = TestSuite()
+    suite.addTests(
+        defaultTestLoader.loadTestsFromTestCase(TestSubmissionModule))
+    suite.addTests(
+        defaultTestLoader.loadTestsFromTestCase(TestSubmission))
+    return suite
+
+if __name__ == "__main__":
+    from unittest2 import main
+    main(defaultTest='suite')
diff --git a/htsworkflow/submission/trackhub_submission.py b/htsworkflow/submission/trackhub_submission.py
new file mode 100644 (file)
index 0000000..e383175
--- /dev/null
@@ -0,0 +1,301 @@
+import logging
+import os
+from pprint import pformat
+import string
+import re
+
+import RDF
+
+from htsworkflow.submission.submission import Submission
+
+from htsworkflow.util.rdfhelp import \
+     fromTypedNode, \
+     geoSoftNS, \
+     stripNamespace, \
+     submissionOntology
+from htsworkflow.util.url import parse_ssh_url
+from htsworkflow.util.ucsc import bigWigInfo
+
+from django.conf import settings
+from django.template import Context, loader
+from trackhub import default_hub, CompositeTrack, Track, SuperTrack, ViewTrack
+from trackhub.track import TRACKTYPES, SubGroupDefinition
+from trackhub.helpers import show_rendered_files
+from trackhub.upload import upload_track, upload_hub
+
+LOGGER = logging.getLogger(__name__)
+
+class TrackHubSubmission(Submission):
+    def __init__(self, name, model, baseurl, baseupload, host):
+        """Create a trackhub based submission
+
+        :Parameters:
+          - `name`: Name of submission
+          - `model`: librdf model reference
+          - `baseurl`: web root where trackhub will be hosted
+          - `baseupload`: filesystem root where trackhub will be hosted
+          - `host`: hostname for library pages.
+        """
+        super(TrackHubSubmission, self).__init__(name, model, host)
+        if baseurl is None:
+            raise ValueError("Need a web root to make a track hub")
+        self.baseurl = os.path.join(baseurl, self.name)
+        if baseupload:
+            sshurl = parse_ssh_url(baseupload)
+            print sshurl
+            self.user = sshurl.user
+            self.host = sshurl.host
+            self.uploadpath =  sshurl.path
+        else:
+            self.uploadpath = None
+
+    def make_hub_template(self, result_map):
+        samples = []
+        for an_analysis in self.analysis_nodes(result_map):
+            metadata = self.get_sample_metadata(an_analysis)
+            if len(metadata) == 0:
+                errmsg = 'No metadata found for {0}'
+                LOGGER.error(errmsg.format(str(an_analysis),))
+                continue
+            elif len(metadata) > 1:
+                errmsg = 'Confused there are more than one sample for %s'
+                LOGGER.debug(errmsg % (str(an_analysis),))
+            metadata = metadata[0]
+            samples.append(metadata)
+
+        template = loader.get_template('trackDb.txt')
+        context = Context({
+            'samples': samples,
+        })
+        return str(template.render(context))
+
+    def make_hub(self, result_map):
+        genome_db = 'hg19'
+        hub_url = self.baseurl + '/'
+        hub, genomes_file, genome, trackdb = default_hub(
+            hub_name=self.name,
+            short_label=self.name,
+            long_label=self.name,
+            email='email',
+            genome=genome_db)
+
+        hub.remote_dir = self.uploadpath
+
+        # build higher order track types
+        composite = CompositeTrack(
+            name=self.sanitize_name(self.name),
+            short_label = self.sanitize_name(self.name),
+            long_label = str(self.name),
+            tracktype="bed 3",
+            dragAndDrop='subtracks',
+            visibility='full',
+        )
+        trackdb.add_tracks(composite)
+
+        subgroups = self.add_subgroups(composite)
+
+        view_type = None
+        view = None
+
+        for track in self.get_tracks():
+            if track['file_type'] not in TRACKTYPES:
+                LOGGER.info('Unrecognized file type %s', track['file_type'])
+                continue
+
+            view = self.add_new_view_if_needed(composite, view, track)
+            track_name = self.make_track_name(track)
+
+            track_subgroup = self.make_track_subgroups(subgroups, track)
+            track_type = self.make_track_type(track)
+
+            if 'file_label' in track:
+                track_label = self.sanitize_name(track['file_label'])
+            else:
+                track_label = track_name
+
+            attributes = {
+                'name': track_name,
+                'tracktype': track_type,
+                'url': hub_url + str(track['relative_path']),
+                'short_label': str(track['library_id']),
+                'long_label': str(track_label),
+                'subgroups': track_subgroup,
+            }
+            
+            LOGGER.debug('track attributes: %s', pformat(attributes))       
+            newtrack = Track(**attributes)                    
+            view.add_tracks([newtrack])
+
+        results = hub.render()
+        if hub.remote_dir:
+            LOGGER.info("Uploading to %s @ %s : %s",
+                        self.user, self.host, hub.remote_dir)
+            upload_hub(hub=hub, host=self.host, user='diane')
+
+    def add_new_view_if_needed(self, composite, view, track):
+        """Add new trakkhub view if we've hit a new type of track.
+
+        :Parameters:
+          - `composite`: composite track to attach to
+          - `view_type`: name of view type
+          - `track`: current track record
+        """
+        current_view_type = str(track['output_type'])
+        if not view or current_view_type != view.name:
+            attributes = {
+                'name': current_view_type,
+                'view': current_view_type,
+                'visibility': str(track.get('visibility', 'squish')),
+                'short_label': current_view_type,
+                'tracktype': str(track['file_type'])
+            }
+            maxHeightPixels = track.get('maxHeightPixels')
+            if maxHeightPixels:
+                attributes['maxHeightPixels'] = str(maxHeightPixels)
+            autoScale = track.get('autoScale')
+            if autoScale:
+                attributes['autoScale'] = str(autoScale)
+            view = ViewTrack(**attributes)
+            composite.add_view(view)
+            view_type = current_view_type
+        return view
+
+    def make_manifest(self, result_map):
+        files = []
+        for an_analysis in self.analysis_nodes(result_map):
+            metadata = self.get_manifest_metadata(an_analysis)
+            files.extend(metadata)
+
+        template = loader.get_template('manifest.txt')
+        context = Context({
+            'files': files
+        })
+        return str(template.render(context))
+
+    def make_track_name(self, track):
+        return '{}_{}_{}'.format(
+            track['library_id'],
+            track['replicate'],
+            track['output_type'],
+        )
+
+    def make_track_subgroups(self, subgroups, track):
+        track_subgroups = {}
+        for k in subgroups:
+            if k in track and track[k]:
+                value = self.sanitize_name(track[k])
+                track_subgroups[k] = value
+        return track_subgroups
+    
+    def make_track_type(self, track):
+        """Further annotate tracktype.
+        
+        bigWig files can have additional information. Add it if we can
+        """
+        track_type = track['file_type']
+        if track_type.lower() == 'bigwig':
+            # something we can enhance
+            info = bigWigInfo(track['relative_path'])
+            if info.min is not None and info.max is not None:
+                track_type = '{} {} {}'.format(track_type, int(info.min), int(info.max))
+
+        LOGGER.debug("track_type: %s", track_type)
+        return str(track_type)
+
+    def add_subgroups(self, composite):
+        """Add subgroups to composite track"""
+        search = [ ('htswlib:cell_line', 'cell'),
+                   ('encode3:rna_type', 'rna_type'),
+                   ('encode3:protocol', 'protocol'),
+                   ('htswlib:replicate', 'replicate'),
+                   ('encode3:library_id', 'library_id'),
+                   ('encode3:assay', 'assay'),
+                 ]
+        subgroups = []
+        names = []
+        sortorder = []
+        dimnames = ('dim{}'.format(x) for x in string.ascii_uppercase)
+        dimensions = []
+        filtercomposite = []
+        for term, name in search:
+            definitions = self.make_subgroupdefinition(term, name)
+            if definitions:
+                subgroups.append(definitions)
+                names.append(name)
+                sortorder.append("{}=+".format(name))
+                d = dimnames.next()
+                dimensions.append("{}={}".format(d, name))
+                filtercomposite.append("{}=multi".format(d))
+
+        composite.add_subgroups(subgroups)
+        composite.add_params(sortOrder=' '.join(sortorder))
+        composite.add_params(dimensions=' '.join(dimensions))
+        composite.add_params(filterComposite=' '.join(filtercomposite))
+        return names
+
+
+    def make_subgroupdefinition(self, term, name):
+        """Subgroup attributes need to be an attribute of the library.
+        """
+        template = loader.get_template('trackhub_term_values.sparql')
+        context = Context({'term': term})
+        results = self.execute_query(template, context)
+        values = {}
+        for row in results:
+            value = str(row['name'])
+            values[self.sanitize_name(value)] = value
+
+        if values:
+            return SubGroupDefinition(
+                    name=name,
+                    label=name,
+                    mapping=values,
+            )
+        else:
+            return None
+
+    def get_tracks(self):
+        """Collect information needed to describe trackhub tracks.
+        """
+        query_template = loader.get_template('trackhub_samples.sparql')
+
+        context = Context({ })
+
+        results = self.execute_query(query_template, context)
+        return results
+
+    def sanitize_name(self, name):
+        replacements = [('poly-?a\+', 'PolyAplus'),
+                        ('poly-?a-', 'PolyAminus'),
+                        ('RNA-Seq', 'RNASeq'),
+                        ('rna-seq', 'rnaseq'),
+                        ('-', '_'),
+                        (' ', '_'),
+                        ('^0', 'Zero'),
+                        ('^1', 'One'),
+                        ('^2', 'Two'),
+                        ('^3', 'Three'),
+                        ('^4', 'Four'),
+                        ('^5', 'Five'),
+                        ('^6', 'Six'),
+                        ('^7', 'Seven'),
+                        ('^8', 'Eight'),
+                        ('^9', 'Nine'),
+                        ]
+
+        for regex, substitution in replacements:
+            name = re.sub(regex, substitution, name, flags=re.IGNORECASE)
+
+        return name
+
+    def get_manifest_metadata(self, analysis_node):
+        query_template = loader.get_template('trackhub_manifest.sparql')
+
+        context = Context({
+            'submission': str(analysis_node.uri),
+            'submissionSet': str(self.submissionSetNS[''].uri),
+            })
+        results = self.execute_query(query_template, context)
+        LOGGER.info("scanned %s for results found %s",
+                    str(analysis_node), len(results))
+        return results
diff --git a/htsworkflow/templates/manifest.txt b/htsworkflow/templates/manifest.txt
new file mode 100644 (file)
index 0000000..21c8a47
--- /dev/null
@@ -0,0 +1,3 @@
+#version 1.7
+#file_name     format  output_type     experiment      replicate       enriched_in     ucsc_db paired_end      technical_replicate{% for r in files %}
+{{ r.relative_path }}  {{ r.file_format }}     {{ r.output_type }}     {{ r.dataset_id }}      {{ r.replicate }}       {{ r.enriched_in }}     {{ r.ucsc_db }} {{ r.paired_end|default_if_none:"n/a" }}        {{ r.technical_replicate|default_if_none:"n/a"}}{% endfor %}
diff --git a/htsworkflow/templates/submission_view_rdfs_label_metadata.sparql b/htsworkflow/templates/submission_view_rdfs_label_metadata.sparql
new file mode 100644 (file)
index 0000000..0666e62
--- /dev/null
@@ -0,0 +1,10 @@
+PREFIX htsw: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
+PREFIX encode3: <http://jumpgate.caltech.edu/wiki/Encode3#>
+
+select ?cell_line ?assay ?protocol ?lab
+where {
+    optional { <{{ library }}> htsw:cell_line ?cell_line . }
+    optional { <{{ library }}> encode3:assay ?assay . }
+    optional { <{{ library }}> encode3:protocol ?protocol. }
+    optional { <{{ library }}> encode3:lab ?lab. }
+}
diff --git a/htsworkflow/templates/trackDb.txt b/htsworkflow/templates/trackDb.txt
new file mode 100644 (file)
index 0000000..8b839a6
--- /dev/null
@@ -0,0 +1,30 @@
+track singleCell
+compositeTrack on
+visibility dense
+shortLabel Single RNA-Seq
+longLabel ENCODE Single cell and small pool RNA-Seq
+subGroup1 tier Tier t1=1 t2=2 t3=3
+subGroup2 poolSize \
+          Single=single \
+          Ten=10_cells \
+          Eleven=11_cells
+          Hundred=100_Cells \
+          Pool=Pool
+subGroup3 cellType Cell_Line GM12878=GM12878 H1hESC=H1-hESC K562=K562 HeLaS3=HeLa-S3 HepG2=HepG2 HUVEC=HUVEC T8988T=8988T A549=A549 AG04449=AG04449 AG04450=AG04450 AG09309=AG09309 AG09319=AG09319 AG10803=AG10803 AoAF=AoAF AoSMC=AoSMC BE2C=BE2_C BJ=BJ Caco2=Caco-2 CD20=CD20+ CD34Mobilized=CD34+_Mobilized Chorion=Chorion CLL=CLL CMK=CMK Fibrobl=Fibrobl FibroP=FibroP Gliobla=Gliobla GM06990=GM06990 GM12864=GM12864 GM12865=GM12865 GM12891=GM12891 GM12892=GM12892 GM18507=GM18507 GM19238=GM19238 GM19239=GM19239 GM19240=GM19240 H7hESC=H7-hESC H9ES=H9ES HAh=HA-h HAsp=HA-sp HAc=HAc HAEpiC=HAEpiC HBMEC=HBMEC HCF=HCF HCFaa=HCFaa HCM=HCM HConF=HConF HCPEpiC=HCPEpiC HCT116=HCT-116 HEEpiC=HEEpiC Hepatocytes=Hepatocytes HFF=HFF HFFMyc=HFF-Myc HGF=HGF HIPEpiC=HIPEpiC HL60=HL-60 HMEC=HMEC HMF=HMF HMVECdAd=HMVEC-dAd HMVECdBlAd=HMVEC-dBl-Ad HMVECdBlNeo=HMVEC-dBl-Neo HMVECdLyAd=HMVEC-dLy-Ad HMVECdLyNeo=HMVEC-dLy-Neo HMVECdNeo=HMVEC-dNeo HMVECLBl=HMVEC-LBl HMVECLLy=HMVEC-LLy HNPCEpiC=HNPCEpiC HPAEC=HPAEC HPAF=HPAF HPDE6E6E7=HPDE6-E6E7 HPdLF=HPdLF HPF=HPF HRCEpiC=HRCEpiC HRE=HRE HRGEC=HRGEC HRPEpiC=HRPEpiC HSMM=HSMM HSMMemb=HSMM_emb HSMMtube=HSMMtube HTR8svn=HTR8svn Huh7=Huh-7 Huh75=Huh-7.5 HVMF=HVMF iPS=iPS Ishikawa=Ishikawa Jurkat=Jurkat K562=K562 LNCaP=LNCaP MCF7=MCF-7 Medullo=Medullo Melano=Melano MonocytesCD14RO01746=Monocytes-CD14+_RO01746 Myometr=Myometr NB4=NB4 NHA=NH-A NHDFAd=NHDF-Ad NHDFneo=NHDF-neo NHEK=NHEK NHLF=NHLF NT2D1=NT2-D1 Osteobl=Osteobl PANC1=PANC-1 PanIsletD=PanIsletD PanIslets=PanIslets pHTE=pHTE PrEC=PrEC ProgFib=ProgFib RPTEC=RPTEC RWPE1=RWPE1 SAEC=SAEC SKNMC=SK-N-MC SKNSHRA=SK-N-SH_RA SkMC=SkMC Stellate=Stellate T47D=T-47D Th0=Th0 Th1=Th1 Th2=Th2 Urothelia=Urothelia WERIRb1=WERI-Rb-1 WI38=WI-38 
+subGroup4 readType Read_type R1x100=1x100
+dimensions dimX=poolSize dimY=cellType dimA=readType
+dragAndDrop subTracks
+type bam
+
+{% for sample in samples %}
+    track sample_{{ sample.library_id }}
+    parent singleCell on
+    bigDataUrl {{ sample.bam }}
+    shortLabel {{ sample.library_id }}
+    longLabel {{ sample.name }}
+    type bam
+    subGroups tier=t1 \
+              cellLine={{ sample.cell }} \
+              poolSize={{ sample.input_quantity }} \
+              readType=R1x100
+{% endfor %}
\ No newline at end of file
diff --git a/htsworkflow/templates/trackhub_manifest.sparql b/htsworkflow/templates/trackhub_manifest.sparql
new file mode 100644 (file)
index 0000000..36e57fa
--- /dev/null
@@ -0,0 +1,35 @@
+PREFIX htswlib: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
+PREFIX submissionOntology: <http://jumpgate.caltech.edu/wiki/UcscSubmissionOntology#>
+PREFIX ucscDaf: <http://jumpgate.caltech.edu/wiki/UcscDaf#>
+PREFIX encode3: <http://jumpgate.caltech.edu/wiki/Encode3#> 
+PREFIX ncbiTaxon: <http://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=>
+PREFIX geoSoft: <http://www.ncbi.nlm.nih.gov/geo/info/soft2.html#>
+PREFIX cells: <http://encodewiki.ucsc.edu/EncodeDCC/index.php/Cell_lines#>
+
+select distinct ?name ?filename ?relative_path ?file_format ?output_type ?dataset_id ?replicate ?enriched_in ?ucsc_db ?paired_end ?technical_replicate ?replaces ?replace_reason
+WHERE {
+  <{{submission}}> a submissionOntology:submission ;
+                   submissionOntology:name ?name ;
+                   ucscDaf:has_file ?file .
+
+  ?file ucscDaf:filename ?filename ;
+        ucscDaf:relative_path ?relative_path ;
+        htswlib:library ?library ;
+        a ?fileClass .
+
+  OPTIONAL { ?file encode3:replaces ?replaces_accession ;
+                   encode3:replace_reason ?replace_reason .
+  }
+
+  ?fileClass geoSoft:fileTypeLabel ?file_format ;
+             ucscDaf:output_type ?output_type .
+  OPTIONAL { ?fileClass ucscDaf:paired_end ?paired_end . }
+  OPTIONAL { ?fileClass ucscDaf:technical_replicate ?technical_replicate . }
+
+  
+  ?library htswlib:replicate ?replicate ;
+           ucscDaf:enriched_in ?enriched_in;
+           ucscDaf:genome_build ?ucsc_db .
+
+  ?library encode3:dataset_id ?dataset_id .
+}
diff --git a/htsworkflow/templates/trackhub_samples.sparql b/htsworkflow/templates/trackhub_samples.sparql
new file mode 100644 (file)
index 0000000..6259fce
--- /dev/null
@@ -0,0 +1,33 @@
+PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> 
+PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> 
+PREFIX htswlib: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
+PREFIX submissionOntology: <http://jumpgate.caltech.edu/wiki/UcscSubmissionOntology#>
+PREFIX ucscDaf: <http://jumpgate.caltech.edu/wiki/UcscDaf#>
+PREFIX ncbiTaxon: <http://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=>
+PREFIX trackdb: <http://genome.ucsc.edu/goldenPath/help/trackDb/trackDbHub.html#>
+PREFIX geoSoft: <http://www.ncbi.nlm.nih.gov/geo/info/soft2.html#>
+PREFIX cells: <http://encodewiki.ucsc.edu/EncodeDCC/index.php/Cell_lines#>
+PREFIX encode3: <http://jumpgate.caltech.edu/wiki/Encode3#>
+
+select distinct ?lab_library_id ?library_id ?filename ?relative_path ?output_type ?file_type ?cell ?replicate ?assay ?rna_type ?protocol ?file_label ?autoScale ?maxHeightPixels ?visibility
+WHERE {
+  ?trackType trackdb:type ?file_type ;
+             ucscDaf:output_type ?output_type .
+  OPTIONAL { ?trackType trackdb:autoScale ?autoScale . }
+  OPTIONAL { ?trackType trackdb:maxHeightPixels ?maxHeightPixels . }
+  OPTIONAL { ?trackType trackdb:visibility ?visibility . }
+  ?file ucscDaf:filename ?filename ;
+        ucscDaf:relative_path ?relative_path ;
+        htswlib:library ?library ;
+        a ?trackType .
+  OPTIONAL { ?file rdfs:label ?file_label . }
+  OPTIONAL { ?library htswlib:library_id ?lab_library_id }
+  OPTIONAL { ?library encode3:library_id ?library_id }
+  OPTIONAL { ?library htswlib:cell_line ?cell . }
+  OPTIONAL { ?library htswlib:replicate ?replicate }
+  OPTIONAL { ?library encode3:assay ?assay . }
+  OPTIONAL { ?library encode3:rna_type ?rna_type. }
+  OPTIONAL { ?library encode3:protocol ?protocol. }
+  #OPTIONAL { ?library ucscDaf:readType ?read_type }
+}
+order by ?trackType
diff --git a/htsworkflow/templates/trackhub_term_values.sparql b/htsworkflow/templates/trackhub_term_values.sparql
new file mode 100644 (file)
index 0000000..6cff5d1
--- /dev/null
@@ -0,0 +1,14 @@
+PREFIX htswlib: <http://jumpgate.caltech.edu/wiki/LibraryOntology#>
+PREFIX submissionOntology: <http://jumpgate.caltech.edu/wiki/UcscSubmissionOntology#>
+PREFIX ucscDaf: <http://jumpgate.caltech.edu/wiki/UcscDaf#>
+PREFIX ncbiTaxon: <http://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=>
+PREFIX geoSoft: <http://www.ncbi.nlm.nih.gov/geo/info/soft2.html#>
+PREFIX cells: <http://encodewiki.ucsc.edu/EncodeDCC/index.php/Cell_lines#>
+PREFIX encode3: <http://jumpgate.caltech.edu/wiki/Encode3#>
+
+select distinct ?name
+where
+{
+  ?library a htswlib:Library ;
+           {{term}} ?name.
+}
index 7398963999bfa470e7ffb9ee17822a4a8bc7ddd4..f2f3e019b20782146c0855f32a4e41e02468336e 100644 (file)
@@ -5,11 +5,6 @@ import os
 LOGGER = logging.getLogger(__name__)
 
 import lxml.etree
-try:
-    XHTML_RDF_DTD = lxml.etree.DTD(external_id='-//W3C//DTD XHTML+RDFa 1.0//EN')
-except lxml.etree.DTDParseError as e:
-    XHTML_RDF_DTD = None
-    LOGGER.warn("Unable to load XHTML DTD %s" % (str(e),))
 
 def indent(elem, level=0):
     """
@@ -49,8 +44,11 @@ def validate_xhtml(html, base_url='http://localhost'):
     Returns True if it passed validation
     and False if it fails.
     """
-    if XHTML_RDF_DTD is None:
-        return None
+    try:
+        XHTML_RDF_DTD = lxml.etree.DTD(external_id='-//W3C//DTD XHTML+RDFa 1.0//EN')
+    except lxml.etree.DTDParseError as e:
+        LOGGER.warn("Unable to load XHTML DTD %s" % (str(e),))
+        return
 
     try:
         root = lxml.etree.fromstring(html, base_url=base_url)
index 2900e76ae3727f0b2182376f666cda4deaf85bd3..af3db764971623b9c3580684ddef2ea90b63ee0c 100644 (file)
@@ -40,6 +40,8 @@ def make_md5sum_unix(filename, md5_cache):
 
 def parse_md5sum_line(lines, filename):
     md5sum, md5sum_filename = lines[0].split()
+    md5sum_filename = os.path.normpath(md5sum_filename)
+    filename = os.path.normpath(filename)
     if md5sum_filename != filename:
         errmsg = "MD5sum and I disagre about filename. {0} != {1}"
         logger.error(errmsg.format(filename, md5sum_filename))
index 90b0e6adb194d8e64d4f5ecbd677033ead87ac4a..ac5f6ccd1056a561418cd9b17e155ee936097630 100644 (file)
@@ -271,15 +271,20 @@ def load_into_model(model, parser_name, path, ns=None):
 
     statements = []
     retries = 3
+    succeeded = False
     while retries > 0:
         try:
             retries -= 1
             statements = rdf_parser.parse_as_stream(url, ns)
             retries = 0
+            succeeded = True
         except RDF.RedlandError, e:
             errmsg = "RDF.RedlandError: {0} {1} tries remaining"
             logger.error(errmsg.format(str(e), retries))
-
+            
+    if not succeeded:
+        logger.warn("Unable to download %s", url)
+        
     for s in statements:
         conditionally_add_statement(model, s, ns)
 
@@ -329,7 +334,7 @@ def add_default_schemas(model, schema_path=None):
         namespace = 'file://localhost/htsworkflow/schemas/'+s
         add_schema(model, schema, namespace)
 
-    if schema_path:    
+    if schema_path:
         if type(schema_path) in types.StringTypes:
             schema_path = [schema_path]
 
@@ -423,11 +428,19 @@ def get_serializer(name='turtle'):
     writer.set_namespace('wot', wotNS._prefix)
 
     # should these be here, kind of specific to an application
-    writer.set_namespace('libraryOntology', libraryOntology._prefix)
+    writer.set_namespace('htswlib', libraryOntology._prefix)
     writer.set_namespace('ucscSubmission', submissionOntology._prefix)
     writer.set_namespace('ucscDaf', dafTermOntology._prefix)
+    writer.set_namespace('geoSoft', geoSoftNS._prefix)
+    writer.set_namespace('encode3', encode3NS._prefix)
     return writer
 
+def get_turtle_header():
+    """Return a turtle header with our typical namespaces
+    """
+    serializer = get_serializer()
+    empty = get_model()
+    return serializer.serialize_model_to_string(empty)
 
 def dump_model(model, destination=None):
     if destination is None:
index d2164eea0c8a058329ae7a74f91d95fe0fcc4b6a..2ad363277da2c46c40eeb758403aa539266bdb7d 100644 (file)
@@ -23,3 +23,4 @@ inventoryOntology = NS(
     "http://jumpgate.caltech.edu/wiki/InventoryOntology#")
 submissionLog = NS("http://jumpgate.caltech.edu/wiki/SubmissionsLog/")
 geoSoftNS = NS('http://www.ncbi.nlm.nih.gov/geo/info/soft2.html#')
+encode3NS = NS("http://jumpgate.caltech.edu/wiki/Encode3#")
index 8fe0362d32a33b721d72c112fc7093f80ad34067..1c1c7a7dce0e6a0991a81aa019894c16616ecfbc 100644 (file)
@@ -339,7 +339,7 @@ htswlib:lane_number
     rdfs:comment "Which lane were we run in" ;
     rdfs:label "lane id" ;
     rdfs:domain htswlib:IlluminaLane ;
-    rdfs:range xsd:string .
+    rdfs:range rdfs:Literal .
 
 # FIXME: should this be note?
 htswlib:comment
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..fdc2a2a6e263151ba6f068479fed8d073f170d12 100644 (file)
@@ -0,0 +1,4 @@
+import os
+
+TEST_CODE_DIR = os.path.split(__file__)[0]
+TEST_DATA_DIR = os.path.join(TEST_CODE_DIR, 'testdata')
index a477b95c9adacbb4a15c1997d29e712750b0d452..3f328d8d18b43b8ec5bd80fb5f2f4a70501b67a8 100644 (file)
@@ -29,61 +29,67 @@ try:
 
     class TestRDFHelp(TestCase):
         def test_from_none(self):
-          self.failUnlessEqual(fromTypedNode(None), None)
+          self.assertEqual(fromTypedNode(None), None)
 
         def test_typed_node_boolean(self):
             node = toTypedNode(True)
-            self.failUnlessEqual(node.literal_value['string'], u'1')
-            self.failUnlessEqual(str(node.literal_value['datatype']),
+            self.assertIn(node.literal_value['string'], (u'1', u'true'))
+            self.assertEqual(str(node.literal_value['datatype']),
                                  'http://www.w3.org/2001/XMLSchema#boolean')
 
         def test_bad_boolean(self):
             node = RDF.Node(literal='bad', datatype=xsdNS['boolean'].uri)
-            self.failUnlessRaises(ValueError, fromTypedNode, node)
+            # older versions of librdf ~< 1.0.16 left the literal
+            # alone. and thus should fail the fromTypedNode call
+            # newer versions coerced the odd value to false.
+            try:
+                self.assertFalse(fromTypedNode(node))
+            except ValueError as e:
+                pass
 
         def test_typed_node_string(self):
             node = toTypedNode('hello')
-            self.failUnlessEqual(node.literal_value['string'], u'hello')
-            self.failUnless(node.literal_value['datatype'] is None)
+            self.assertEqual(node.literal_value['string'], u'hello')
+            self.assertTrue(node.literal_value['datatype'] is None)
 
         def test_typed_real_like(self):
             num = 3.14
             node = toTypedNode(num)
-            self.failUnlessEqual(fromTypedNode(node), num)
+            self.assertEqual(fromTypedNode(node), num)
 
         def test_typed_integer(self):
             num = 3
             node = toTypedNode(num)
-            self.failUnlessEqual(fromTypedNode(node), num)
-            self.failUnlessEqual(type(fromTypedNode(node)), type(num))
+            self.assertEqual(fromTypedNode(node), num)
+            self.assertEqual(type(fromTypedNode(node)), type(num))
 
         def test_typed_node_string(self):
             s = "Argh matey"
             node = toTypedNode(s)
-            self.failUnlessEqual(fromTypedNode(node), s)
-            self.failUnlessEqual(type(fromTypedNode(node)), types.UnicodeType)
+            self.assertEqual(fromTypedNode(node), s)
+            self.assertEqual(type(fromTypedNode(node)), types.UnicodeType)
 
         def test_blank_or_uri_blank(self):
             node = blankOrUri()
-            self.failUnlessEqual(node.is_blank(), True)
+            self.assertEqual(node.is_blank(), True)
 
         def test_blank_or_uri_url(self):
             s = 'http://google.com'
             node = blankOrUri(s)
-            self.failUnlessEqual(node.is_resource(), True)
-            self.failUnlessEqual(str(node.uri), s)
+            self.assertEqual(node.is_resource(), True)
+            self.assertEqual(str(node.uri), s)
 
         def test_blank_or_uri_node(self):
             s = RDF.Node(RDF.Uri('http://google.com'))
             node = blankOrUri(s)
-            self.failUnlessEqual(node.is_resource(), True)
-            self.failUnlessEqual(node, s)
+            self.assertEqual(node.is_resource(), True)
+            self.assertEqual(node, s)
 
         def test_unicode_node_roundtrip(self):
             literal = u'\u5927'
             roundtrip = fromTypedNode(toTypedNode(literal))
-            self.failUnlessEqual(roundtrip, literal)
-            self.failUnlessEqual(type(roundtrip), types.UnicodeType)
+            self.assertEqual(roundtrip, literal)
+            self.assertEqual(type(roundtrip), types.UnicodeType)
 
         def test_datetime_no_microsecond(self):
             dateTimeType = xsdNS['dateTime'].uri
@@ -115,17 +121,17 @@ try:
 
             term = 'foo'
             node = nsOrg[term]
-            self.failUnlessEqual(stripNamespace(nsOrg, node), term)
-            self.failUnlessEqual(stripNamespace(nsCom, node), None)
-            self.failUnlessEqual(stripNamespace(nsOrg, node.uri), term)
+            self.assertEqual(stripNamespace(nsOrg, node), term)
+            self.assertEqual(stripNamespace(nsCom, node), None)
+            self.assertEqual(stripNamespace(nsOrg, node.uri), term)
 
         def test_strip_namespace_exceptions(self):
             nsOrg = RDF.NS('example.org/example#')
             nsCom = RDF.NS('example.com/example#')
 
             node = toTypedNode('bad')
-            self.failUnlessRaises(ValueError, stripNamespace, nsOrg, node)
-            self.failUnlessRaises(ValueError, stripNamespace, nsOrg, nsOrg)
+            self.assertRaises(ValueError, stripNamespace, nsOrg, node)
+            self.assertRaises(ValueError, stripNamespace, nsOrg, nsOrg)
 
         def test_simplify_uri(self):
             DATA = [('http://asdf.org/foo/bar', 'bar'),
@@ -164,19 +170,19 @@ _:a owl:imports "{loc}extra.turtle" .
             tc = RDF.Node(RDF.Uri('http://jumpgate.caltech.edu/wiki/TestCase'))
             query = RDF.Statement(tc, rdfsNS['label'], None)
             result = list(model.find_statements(query))
-            self.failUnlessEqual(len(result), 1)
-            self.failUnlessEqual(str(result[0].object), 'TestCase')
+            self.assertEqual(len(result), 1)
+            self.assertEqual(str(result[0].object), 'TestCase')
 
         def test_sanitize_literal_text(self):
-            self.failUnlessRaises(ValueError, sanitize_literal, "hi")
+            self.assertRaises(ValueError, sanitize_literal, "hi")
             hello_text = "hello"
             hello_none = RDF.Node(hello_text)
-            self.failUnlessEqual(str(sanitize_literal(hello_none)),
+            self.assertEqual(str(sanitize_literal(hello_none)),
                                  hello_text)
             hello_str = RDF.Node(literal=hello_text,
                                  datatype=xsdNS['string'].uri)
             hello_clean = sanitize_literal(hello_str)
-            self.failUnlessEqual(hello_clean.literal_value['string'],
+            self.assertEqual(hello_clean.literal_value['string'],
                                  hello_text)
 
         def test_sanitize_literal_empty_string(self):
@@ -190,7 +196,7 @@ _:a owl:imports "{loc}extra.turtle" .
             hello_node = RDF.Node(literal=hello,
                                   datatype=xsdNS['string'].uri)
             hello_sanitized = sanitize_literal(hello_node)
-            self.failUnlessEqual(hello_sanitized.literal_value['string'],
+            self.assertEqual(hello_sanitized.literal_value['string'],
                                  hello_clean)
 
             hostile = "hi <b>there</b><script type='text/javascript>alert('boo');</script><a href='javascript:alert('poke')>evil</a> scammer"
@@ -199,7 +205,7 @@ _:a owl:imports "{loc}extra.turtle" .
             # so it drops the stuff after the javascript link.
             # I suppose it could be worse
             hostile_result = """hi <b>there</b>"""
-            self.failUnlessEqual(str(hostile_sanitized), hostile_result)
+            self.assertEqual(str(hostile_sanitized), hostile_result)
 
         def test_guess_parser_from_file(self):
             DATA = [
diff --git a/htsworkflow/util/test/test_ucsc.py b/htsworkflow/util/test/test_ucsc.py
new file mode 100644 (file)
index 0000000..2b2e976
--- /dev/null
@@ -0,0 +1,29 @@
+"""Test wrappers around ucsc file formats
+"""
+import os
+from unittest2 import TestCase
+from htsworkflow.util.test import TEST_DATA_DIR
+from htsworkflow.util.ucsc import bigWigInfo
+
+from distutils.spawn import find_executable
+
+class TestUCSC(TestCase):
+    def test_bigwig_info(self):
+        if not find_executable('bigWigInfo'):
+            self.skipTest('Need bigWigInfo on path to test')
+
+        filename = os.path.join(TEST_DATA_DIR, 'foo.bigWig')
+        info = bigWigInfo(filename)
+        self.assertEqual(info.version, 4)
+        self.assertEqual(info.isCompressed, True)
+        # what should i do for byteswapped arch?
+        self.assertEqual(info.isSwapped, False)
+        self.assertEqual(info.primaryDataSize, 48)
+        self.assertEqual(info.primaryIndexSize, 6204)
+        self.assertEqual(info.zoomLevels, 2)
+        self.assertEqual(info.basesCovered, 30)
+        self.assertAlmostEqual(info.mean, 0.0)
+        self.assertAlmostEqual(info.min, -5.5)
+        self.assertAlmostEqual(info.max, 5.5)
+        self.assertAlmostEqual(info.std, 4.567501)
+        
diff --git a/htsworkflow/util/test/test_url.py b/htsworkflow/util/test/test_url.py
new file mode 100644 (file)
index 0000000..979e144
--- /dev/null
@@ -0,0 +1,46 @@
+from unittest2 import TestCase
+
+from htsworkflow.util.url import normalize_url, parse_ssh_url
+
+class TestURLUtilities(TestCase):
+    def test_normalize_url(self):
+
+        self.assertEqual(normalize_url('caltech.edu'), 
+                         'http://caltech.edu')
+        self.assertEqual(normalize_url('http://caltech.edu'),
+                         'http://caltech.edu')
+        self.assertEqual(normalize_url("foo.com/a/b/c/d/e/f.html"),
+                         'http://foo.com/a/b/c/d/e/f.html')
+        self.assertEqual(normalize_url("foo.com", "https"),
+                         'https://foo.com')
+        self.assertEqual(normalize_url(None),
+                         None)
+
+    def test_parse_ssh_url(self):
+
+        u = parse_ssh_url('me@caltech.edu:/test/path')
+        self.assertEqual(u.user, 'me')
+        self.assertEqual(u.host, 'caltech.edu')
+        self.assertEqual(u.path, '/test/path')
+
+        u = parse_ssh_url('caltech.edu:path@there')
+        self.assertEqual(u.user, None)
+        self.assertEqual(u.host, 'caltech.edu')
+        self.assertEqual(u.path, 'path@there')
+
+        u = parse_ssh_url('caltech.edu:C:/me/@work')
+        self.assertEqual(u.user, None)
+        self.assertEqual(u.host, 'caltech.edu')
+        self.assertEqual(u.path, 'C:/me/@work')
+
+        self.assertRaises(ValueError, parse_ssh_url, 'hello')
+        
+def suite():
+    from unittest2 import TestSuite, defaultTestLoader
+    suite = TestSuite()
+    suite.addTests(defaultTestLoader.loadTestsFromTestCase(TestURLUtilities))
+    return suite
+
+if __name__ == '__main__':
+    from unittest2 import main
+    main(defaultTest="suite")
diff --git a/htsworkflow/util/test/test_version.py b/htsworkflow/util/test/test_version.py
new file mode 100644 (file)
index 0000000..212f124
--- /dev/null
@@ -0,0 +1,21 @@
+from unittest2 import TestCase
+
+from htsworkflow.util import version
+
+class TestVersion(TestCase):
+    def test_version(self):
+        long_version = version.version()
+        self.assertTrue(long_version)
+        self.assertEqual(long_version.project_name, 'htsworkflow')
+        self.assertTrue(long_version.version)
+        
+
+def suite():
+    from unittest2 import TestSuite, defaultTestLoader
+    suite = TestSuite()
+    suite.addTest(defaultTestLoader.loadTestsFromTestCase(TestVersion))
+    return suite
+
+if __name__ == "__main__":
+    from unittest2 import main
+    main(defaultTest="suite")
diff --git a/htsworkflow/util/test/testdata/foo.bigWig b/htsworkflow/util/test/testdata/foo.bigWig
new file mode 100644 (file)
index 0000000..98090a8
Binary files /dev/null and b/htsworkflow/util/test/testdata/foo.bigWig differ
diff --git a/htsworkflow/util/ucsc.py b/htsworkflow/util/ucsc.py
new file mode 100644 (file)
index 0000000..b96c46a
--- /dev/null
@@ -0,0 +1,73 @@
+"""Wrap ucsc command line utilities
+"""
+
+import logging
+import os
+import sys
+from subprocess import Popen, PIPE
+
+LOGGER = logging.getLogger(__name__)
+
+def parseNumber(number):
+    buffer = []
+    isFloat = False
+    for n in number:
+        if n == ',':
+            continue
+        if n == '.':
+            isFloat = True
+            buffer.append(n)
+        else:
+            buffer.append(n)
+    if isFloat:
+        return float(''.join(buffer))
+    else:
+        return int(''.join(buffer))
+
+def parseBoolean(value):
+    if value.lower() in ('yes', '1', 'true'):
+        return True
+    elif value.lower() in ('no', '0', 'false'):
+        return False
+        
+class bigWigInfo:
+    def __init__(self, filename=None):
+        self.version = None
+        self.isCompressed = None
+        self.isSwapped = None
+        self.primaryDataSize = None
+        self.primaryIndexSize = None
+        self.zoomLevels = None
+        self.chromCount = None
+        self.basesCovered = None
+        self.mean = None
+        self.min = None
+        self.max = None
+        self.std = None
+        self.filename = None
+        if filename:
+            self.scan_file(filename)
+            self.filename = filename
+
+    def scan_file(self, filename):
+        cmd = ['bigWigInfo', 
+               filename]
+        try:
+            p = Popen(cmd, stdout=PIPE)
+            stdout, _ = p.communicate()
+            for line in stdout.split(os.linesep):
+                if len(line) > 0:
+                    term, value = line.split(': ')
+                    if term in ('isCompressed', 'isSwapped'):
+                        value = parseBoolean(value)
+                    else:
+                        value = parseNumber(value)
+                    LOGGER.debug('%s: %s', term, str(value))
+                    setattr(self, term, value)
+        except OSError as e:
+            LOGGER.error("Exception %s trying to run: %s", str(e), ' '.join(cmd))
+            sys.exit(-1)
+
+                
+                
+
index 4e49c2dc4b41f63ac3ab199960225ee320a40408..503e9e38056e02e4d749a32eb9e492d0c66b57d2 100644 (file)
@@ -1,20 +1,11 @@
 """
 Utilities to help handle urls
 """
+import collections
 
 def normalize_url(url, scheme='http'):
     """
     Make sure there is a http at the head of what should be a url
-
-    >>> normalize_url("google.com")
-    'http://google.com'
-    >>> normalize_url("http://google.com")
-    'http://google.com'
-    >>> normalize_url("foo.com/a/b/c/d/e/f.html")
-    'http://foo.com/a/b/c/d/e/f.html'
-    >>> normalize_url("foo.com", "https")
-    'https://foo.com'
-    >>> normalize_url(None)
     """
     # not much to do with None except avoid an exception
     if url is None:
@@ -25,3 +16,30 @@ def normalize_url(url, scheme='http'):
         return url
     else:
         return scheme + scheme_sep + url
+
+SSHURL = collections.namedtuple("SSHURL", "user host path")
+
+def parse_ssh_url(url):
+    """Parse scp-style username, host and path.
+    """
+    # simple initialization
+    user = None
+    host = None
+    path = None
+    
+    colon = url.find(':')
+    if colon == -1:
+        raise ValueError("Invalid SSH URL: need <host>:<path>")
+    
+    path = url[colon+1:]
+    
+    user_host = url[:colon]
+    atsign = user_host.find('@')
+    if atsign != -1:
+        user = user_host[:atsign]
+        host = user_host[atsign+1:]
+    else:
+        host = user_host
+
+    return SSHURL(user, host, path)
+    
diff --git a/htsworkflow/util/version.py b/htsworkflow/util/version.py
new file mode 100644 (file)
index 0000000..8097edb
--- /dev/null
@@ -0,0 +1,21 @@
+import logging
+
+LOGGER = logging.getLogger(__name__)
+
+def version():
+    """Return version number
+    """
+    version = None
+    try:
+        import pkg_resources
+    except ImportError, e:
+        LOGGER.error("Can't find version number, please install setuptools")
+        raise e
+
+    try:
+        version = pkg_resources.get_distribution("htsworkflow")
+    except pkg_resources.DistributionNotFound, e:
+        LOGGER.error("Package not installed")
+
+    return version
+
diff --git a/htsworkflow/version.py b/htsworkflow/version.py
deleted file mode 100644 (file)
index 8097edb..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-import logging
-
-LOGGER = logging.getLogger(__name__)
-
-def version():
-    """Return version number
-    """
-    version = None
-    try:
-        import pkg_resources
-    except ImportError, e:
-        LOGGER.error("Can't find version number, please install setuptools")
-        raise e
-
-    try:
-        version = pkg_resources.get_distribution("htsworkflow")
-    except pkg_resources.DistributionNotFound, e:
-        LOGGER.error("Package not installed")
-
-    return version
-
diff --git a/htsworkflow/wsgi.py b/htsworkflow/wsgi.py
new file mode 100644 (file)
index 0000000..2632918
--- /dev/null
@@ -0,0 +1,19 @@
+"""
+WSGI config for htsworkflow project.
+
+It exposes the WSGI callable as a module-level variable named ``application``.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
+"""
+
+import os
+import sys
+WSGIAPP = os.path.join(os.path.dirname(__file__))
+
+sys.path.append(os.path.abspath(os.path.join(WSGIAPP, '..')))
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "htsworkflow.settings")
+
+from django.core.wsgi import get_wsgi_application
+application = get_wsgi_application()
old mode 100644 (file)
new mode 100755 (executable)
index 5e78ea9..89759ca
--- a/manage.py
+++ b/manage.py
@@ -1,11 +1,10 @@
 #!/usr/bin/env python
-from django.core.management import execute_manager
-try:
-    import settings # Assumed to be in the same directory.
-except ImportError:
-    import sys
-    sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
-    sys.exit(1)
+import os
+import sys
 
 if __name__ == "__main__":
-    execute_manager(settings)
+    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "htsworkflow.settings")
+
+    from django.core.management import execute_from_command_line
+
+    execute_from_command_line(sys.argv)
index dc3351cf88d7e0d8a449f02220c943bb43e64d8d..2d63df10ef7a35c623d0a0e219cf2c26571a1b6d 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@ setup(
         },
     include_package_data=True,
     install_requires=['distribute',
-                      'django >=1.1, <1.4',
+                      'django >=1.6, <1.7',
                       'lxml >= 2.2.4',
                       'numpy >= 1.3',
                       'benderjab >= 0.2',