- cluster_estimate = 'None'
- library_id = lane.library_id
- library = lane.library
- element = '<tr><td>%d</td><td><a href="%s">%s</a></td><td>%s</td></tr>'
- html.append(element % (lane.lane_number,
- library.get_admin_url(),
- library,
- cluster_estimate))
- html.append('</table>')
- return "\n".join(html)
- Lanes.allow_tags = True
-
- class Meta:
- ordering = ["-run_date"]
-
- def get_admin_url(self):
- # that's the django way... except it didn't work
- return urlresolvers.reverse('admin:experiments_flowcell_change',
- args=(self.id,))
-
- def flowcell_type(self):
- """
- Convert our boolean 'is paired' flag to a name
- """
- if self.paired_end:
- return u"Paired"
- else:
- return u"Single"
-
- @models.permalink
- def get_absolute_url(self):
- flowcell_id, status = parse_flowcell_id(self.flowcell_id)
- return ('htsworkflow.frontend.experiments.views.flowcell_detail',
- [str(flowcell_id)])
-
- def get_raw_data_directory(self):
- """Return location of where the raw data is stored"""
- flowcell_id, status = parse_flowcell_id(self.flowcell_id)
-
- return os.path.join(settings.RESULT_HOME_DIR, flowcell_id)
-
- def update_data_runs(self):
- result_root = self.get_raw_data_directory()
- logger.debug("Update data runs flowcell root: %s" % (result_root,))
- if result_root is None:
- return
-
- result_home_dir = os.path.join(settings.RESULT_HOME_DIR,'')
- run_xml_re = re.compile(glob.fnmatch.translate('run*.xml'))
-
- dataruns = dict([ (x.result_dir, x) for x in self.datarun_set.all() ])
-
- result_dirs = []
- for dirpath, dirnames, filenames in os.walk(result_root):
- for filename in filenames:
- if run_xml_re.match(filename):
- # we have a run directory
- relative_pathname = get_relative_pathname(dirpath)
- cached_run = dataruns.get(relative_pathname, None)
- now = datetime.datetime.now()
- if (cached_run is None):
- self.import_data_run(relative_pathname, filename)
- elif (now - cached_run.last_update_time).days > RESCAN_DELAY:
- self.import_data_run(relative_pathname,
- filename, cached_run)
-
- def import_data_run(self, relative_pathname, run_xml_name, run=None):
- """Given a result directory import files"""
- run_dir = get_absolute_pathname(relative_pathname)
- run_xml_path = os.path.join(run_dir, run_xml_name)
- run_xml_data = runfolder.load_pipeline_run_xml(run_xml_path)
- logger.debug("Importing run from %s" % (relative_pathname,))
-
- if run is None:
- run = DataRun()
- run.flowcell = self
- run.status = RUN_STATUS_REVERSE_MAP['DONE']
- run.result_dir = relative_pathname
- run.runfolder_name = run_xml_data.runfolder_name
- run.cycle_start = run_xml_data.image_analysis.start
- run.cycle_stop = run_xml_data.image_analysis.stop
- run.run_start_time = run_xml_data.image_analysis.date
- run.image_software = run_xml_data.image_analysis.software
- run.image_version = run_xml_data.image_analysis.version
- run.basecall_software = run_xml_data.bustard.software
- run.basecall_version = run_xml_data.bustard.version
- run.alignment_software = run_xml_data.gerald.software
- run.alignment_version = run_xml_data.gerald.version
-
- run.last_update_time = datetime.datetime.now()
- run.save()
-
- run.update_result_files()
+ return u"Single"
+
+ @models.permalink
+ def get_absolute_url(self):
+ flowcell_id, status = parse_flowcell_id(self.flowcell_id)
+ return ('htsworkflow.frontend.experiments.views.flowcell_detail',
+ [str(flowcell_id)])
+
+ def get_raw_data_directory(self):
+ """Return location of where the raw data is stored"""
+ flowcell_id, status = parse_flowcell_id(self.flowcell_id)
+
+ return os.path.join(settings.RESULT_HOME_DIR, flowcell_id)
+
+ def update_data_runs(self):
+ result_root = self.get_raw_data_directory()
+ LOGGER.debug("Update data runs flowcell root: %s" % (result_root,))
+ if result_root is None:
+ return
+
+ result_home_dir = os.path.join(settings.RESULT_HOME_DIR, '')
+ run_xml_re = re.compile(glob.fnmatch.translate('run*.xml'))
+
+ dataruns = dict([(x.result_dir, x) for x in self.datarun_set.all()])
+
+ result_dirs = []
+ for dirpath, dirnames, filenames in os.walk(result_root):
+ for filename in filenames:
+ if run_xml_re.match(filename):
+ # we have a run directory
+ relative_pathname = get_relative_pathname(dirpath)
+ cached_run = dataruns.get(relative_pathname, None)
+ now = datetime.datetime.now()
+ if (cached_run is None):
+ self.import_data_run(relative_pathname, filename)
+ elif (now - cached_run.last_update_time).days > \
+ RESCAN_DELAY:
+ self.import_data_run(relative_pathname,
+ filename, cached_run)
+
+ def import_data_run(self, relative_pathname, run_xml_name, run=None):
+ """Given a result directory import files"""
+ run_dir = get_absolute_pathname(relative_pathname)
+ run_xml_path = os.path.join(run_dir, run_xml_name)
+ run_xml_data = runfolder.load_pipeline_run_xml(run_xml_path)
+ LOGGER.debug("Importing run from %s" % (relative_pathname,))
+
+ if run is None:
+ run = DataRun()
+ run.flowcell = self
+ run.status = RUN_STATUS_REVERSE_MAP['DONE']
+ run.result_dir = relative_pathname
+ run.runfolder_name = run_xml_data.runfolder_name
+ run.cycle_start = run_xml_data.image_analysis.start
+ run.cycle_stop = run_xml_data.image_analysis.stop
+ run.run_start_time = run_xml_data.image_analysis.date
+ run.image_software = run_xml_data.image_analysis.software
+ run.image_version = run_xml_data.image_analysis.version
+ run.basecall_software = run_xml_data.bustard.software
+ run.basecall_version = run_xml_data.bustard.version
+ run.alignment_software = run_xml_data.gerald.software
+ run.alignment_version = run_xml_data.gerald.version
+
+ run.last_update_time = datetime.datetime.now()
+ run.save()
+
+ run.update_result_files()