import time
from htsworkflow.util import mount
+from htsworkflow.automation.solexa import is_runfolder, get_top_dir
# this uses pyinotify
import pyinotify
from pyinotify import EventsCodes
+IN_CREATE = EventsCodes.ALL_FLAGS['IN_CREATE']
+IN_UNMOUNT = EventsCodes.ALL_FLAGS['IN_UNMOUNT']
from benderjab import rpc
-def get_top_dir(root, path):
+LOGGER = logging.getLogger(__name__)
+
+class WatcherEvent(object):
"""
- Return the directory in path that is a subdirectory of root.
- e.g.
-
- >>> print get_top_dir('/a/b/c', '/a/b/c/d/e/f')
- d
- >>> print get_top_dir('/a/b/c/', '/a/b/c/d/e/f')
- d
- >>> print get_top_dir('/a/b/c', '/g/e/f')
- None
- >>> print get_top_dir('/a/b/c', '/a/b/c')
- <BLANKLINE>
+ Track information about a file event
+
+ Currently its time, and if it was an indication we've completed the run.
"""
- if path.startswith(root):
- subpath = path[len(root):]
- if subpath.startswith('/'):
- subpath = subpath[1:]
- return subpath.split(os.path.sep)[0]
- else:
- return None
-
-class WatcherEvents(object):
- # two events need to be tracked
- # one to send startCopy
- # one to send OMG its broken
- # OMG its broken needs to stop when we've seen enough
- # cycles
- # this should be per runfolder.
- # read the xml files
- def __init__(self):
- pass
-
+ def __init__(self, event_root=None):
+ self.time = time.time()
+ self.event_root = event_root
+ self.complete = False
+
+ def __unicode__(self):
+ if self.complete:
+ complete = "(completed)"
+ else:
+ complete = ""
+ return "<WatchEvent: %s %s %s>" % (time.ctime(self.time), self.event_root, complete)
class Handler(pyinotify.ProcessEvent):
- def __init__(self, watchmanager, bot, ipar=False):
+ def __init__(self, watchmanager, bot, completion_files=None):
"""
- ipar flag indicates we should wait for ipar to finish, instead of
- just the run finishing
+ Completion file contains current "completion" filename
"""
self.last_event = {}
self.watchmanager = watchmanager
self.bot = bot
- self.ipar_mode = ipar
- if self.ipar_mode:
- self.last_file = 'IPAR_Netcopy_Complete.txt'.lower()
- else:
- self.last_file = "run.completed".lower()
+ self.log = bot.log
+ if completion_files is not None:
+ completion_files = [ x.lower() for x in completion_files ]
+ self.completion_files = completion_files
def process_IN_CREATE(self, event):
for wdd in self.bot.wdds:
for watch_path in self.bot.watchdirs:
- if event.path.startswith(watch_path):
+ run_already_complete = False
+ # I only care about things created inside the watch directory, so
+ # the event path needs to be longer than the watch path in addition to
+ # starting with the watch_path
+ if len(event.path) > len(watch_path) and event.path.startswith(watch_path):
+ # compute name of the top level directory that had an event
+ # in the current watch path
target = get_top_dir(watch_path, event.path)
- self.last_event.setdefault(watch_path, {})[target] = time.time()
+ runfolder = os.path.join(watch_path, target)
+
+ if not is_runfolder(target):
+ self.log.debug("Skipping %s, not a runfolder" % (target,))
+ continue
+
+ # grab the previous events for this watch path
+ watch_path_events = self.last_event.setdefault(watch_path, {})
+
+ # if we've already seen an event in this directory (AKA runfolder)
+ # keep track if its already hit the "completed" flag
+ if target in watch_path_events:
+ run_already_complete = watch_path_events[target].complete
- msg = "Create: %s %s %s" % (event.path, event.name, target)
+ watch_path_events[target] = WatcherEvent(target)
+ #self.last_event.setdefault(watch_path, {})[target] = WatcherEvent(target)
- if event.name.lower() == self.last_file:
- try:
- self.bot.sequencingFinished(event.path)
- except IOError, e:
- logging.error("Couldn't send sequencingFinished")
- logging.debug(msg)
+ msg = "Create: %s %s %s %s" % (watch_path, target, event.path, event.name)
+
+ # the ReadPrep step uses some of the same file completion flags as the
+ # main analysis, which means this completion code might get tripped because of it
+ # so we need to make sure we're getting the completion file in the root of the
+ # runfolder
+ event_name = event.name.lower()
+ if (event_name in self.completion_files and event.path == runfolder) \
+ or run_already_complete:
+ self.last_event[watch_path][target].complete = True
+ msg += "(completed)"
+
+ self.log.debug(msg)
def process_IN_DELETE(self, event):
- logging.debug("Remove: %s" % os.path.join(event.path, event.name))
+ self.log.debug("Remove: %s" % os.path.join(event.path, event.name))
pass
def process_IN_UNMOUNT(self, event):
pathname = os.path.join(event.path, event.name)
- logging.debug("IN_UNMOUNT: %s" % (pathname,))
+ self.log.debug("IN_UNMOUNT: %s" % (pathname,))
self.bot.unmount_watch(event.path)
class SpoolWatcher(rpc.XmlRpcBot):
"""
Watch a directory and send a message when another process is done writing.
-
+
This monitors a directory tree using inotify (linux specific) and
after some files having been written will send a message after <timeout>
seconds of no file writing.
-
+
(Basically when the solexa machine finishes dumping a round of data
this'll hopefully send out a message saying hey look theres data available
-
+
"""
# these params need to be in the config file
# I wonder where I should put the documentation
# `write_timeout` - how many seconds to wait for writes to finish to
# the spool
# `notify_timeout` - how often to timeout from notify
-
+ # `completion_files` - what files indicates we've finished sequencing
+ # defaults to: netcopy_complete.txt
+
def __init__(self, section=None, configfile=None):
#if configfile is None:
# self.configfile = "~/.htsworkflow"
super(SpoolWatcher, self).__init__(section, configfile)
-
+
self.cfg['watchdirs'] = None
self.cfg['write_timeout'] = 10
self.cfg['notify_users'] = None
self.cfg['notify_runner'] = None
- self.cfg['wait_for_ipar'] = 0
-
+ self.cfg['completion_files'] = 'ImageAnalysis_Netcopy_complete_READ2.txt ImageAnalysis_Netcopy_complete_SINGLEREAD.txt'
+
self.watchdirs = []
self.watchdir_url_map = {}
self.notify_timeout = 0.001
- self.wm = None
+ self.wm = None
self.notify_users = None
self.notify_runner = None
self.wdds = []
# keep track of which mount points tie to which watch directories
# so maybe we can remount them.
self.mounts_to_watches = {}
-
+
self.eventTasks.append(self.process_notify)
def read_config(self, section=None, configfile=None):
- # Don't give in to the temptation to use logging functions here,
+ # Don't give in to the temptation to use logging functions here,
# need to wait until after we detach in start
super(SpoolWatcher, self).read_config(section, configfile)
-
+
self.watchdirs = shlex.split(self._check_required_option('watchdirs'))
# see if there's an alternate url that should be used for the watchdir
for watchdir in self.watchdirs:
self.watchdir_url_map[watchdir] = self.cfg.get(watchdir, watchdir)
self.write_timeout = int(self.cfg['write_timeout'])
- self.wait_for_ipar = int(self.cfg['wait_for_ipar'])
-
+ self.completion_files = shlex.split(self.cfg['completion_files'])
+
self.notify_users = self._parse_user_list(self.cfg['notify_users'])
try:
self.notify_runner = \
# create the watch managers if we need them
if self.wm is None:
self.wm = pyinotify.WatchManager()
- self.handler = Handler(self.wm, self, self.wait_for_ipar)
+ self.handler = Handler(self.wm, self, self.completion_files)
self.notifier = pyinotify.Notifier(self.wm, self.handler)
# the one tree limit is mostly because self.wdd is a single item
if watchdirs is None:
watchdirs = self.watchdirs
- mask = EventsCodes.IN_CREATE | EventsCodes.IN_UNMOUNT
+ mask = IN_CREATE | IN_UNMOUNT
# rec traverses the tree and adds all the directories that are there
# at the start.
# auto_add will add in new directories as they are created
mounts.append(w)
self.mounts_to_watches[mount_location] = mounts
- logging.info(u"Watching:"+unicode(w))
+ self.log.info("Watching:"+str(w))
self.wdds.append(self.wm.add_watch(w, mask, rec=True, auto_add=True))
def unmount_watch(self, event_path):
- # remove backwards so we don't get weirdness from
+ # remove backwards so we don't get weirdness from
# the list getting shorter
for i in range(len(self.wdds),0, -1):
wdd = self.wdds[i]
- logging.info(u'unmounting: '+unicode(wdd.items()))
- self.wm.rm_watch(wdd.values())
+ self.log.info('unmounting: '+str(list(wdd.items())))
+ self.wm.rm_watch(list(wdd.values()))
del self.wdds[i]
self.mounted = False
if root_copy_url[-1] != '/':
root_copy_url += '/'
copy_url = root_copy_url + list_event_dir
- logging.debug('Copy url: %s' % (copy_url,))
+ self.log.debug('Copy url: %s' % (copy_url,))
return copy_url
-
+
def process_notify(self, *args):
if self.notifier is None:
# nothing to do yet
self.notifier.read_events()
# should we do something?
# has something happened?
- for watchdir, last_events in self.handler.last_event.items():
- for last_event_dir, last_event_time in last_events.items():
- time_delta = time.time() - last_event_time
+ for watchdir, last_events in list(self.handler.last_event.items()):
+ for last_event_dir, last_event_detail in list(last_events.items()):
+ time_delta = time.time() - last_event_detail.time
if time_delta > self.write_timeout:
+ LOGGER.info("timeout: %s" % (str(last_event_detail),))
copy_url = self.make_copy_url(watchdir, last_event_dir)
self.startCopy(copy_url)
+ if last_event_detail.complete:
+ self.sequencingFinished(last_event_detail.event_root)
+
self.handler.last_event[watchdir] = {}
# handle unmounted filesystems
- for mount_point, was_mounted in self.mounted_points.items():
+ for mount_point, was_mounted in list(self.mounted_points.items()):
if not was_mounted and mount.is_mounted(mount_point):
# we've been remounted. Huzzah!
# restart the watch
for watch in self.mounts_to_watches[mount_point]:
self.add_watch(watch)
- logging.info(
+ self.logg.info(
"%s was remounted, restarting watch" % \
(mount_point)
)
"""
Parse xmpp chat messages
"""
- help = u"I can send [copy] message, or squencer [finished]"
- if re.match(u"help", msg):
+ help = "I can send [copy] message, or squencer [finished]"
+ if re.match("help", msg):
reply = help
- elif re.match("copy", msg):
+ elif re.match("copy", msg):
self.startCopy(msg)
- reply = u"sent copy message"
- elif re.match(u"finished", msg):
+ reply = "sent copy message"
+ elif re.match("finished", msg):
words = msg.split()
if len(words) == 2:
self.sequencingFinished(words[1])
- reply = u"sending sequencing finished for %s" % (words[1])
+ reply = "sending sequencing finished for %s" % (words[1])
else:
- reply = u"need runfolder name"
+ reply = "need runfolder name"
else:
- reply = u"I didn't understand '%s'" %(msg)
+ reply = "I didn't understand '%s'" %(msg)
return reply
-
+
def run(self):
"""
Start application
# after it's initialized.
self.add_watch()
super(SpoolWatcher, self).run()
-
+
def stop(self):
"""
shutdown application
if self.notifier is not None:
self.notifier.stop()
super(SpoolWatcher, self).stop()
-
+
def startCopy(self, copy_url=None):
- logging.debug("writes seem to have stopped")
+ self.log.debug("writes seem to have stopped")
if self.notify_runner is not None:
for r in self.notify_runner:
self.rpc_send(r, tuple([copy_url]), 'startCopy')
if self.notify_users is not None:
for u in self.notify_users:
- self.send(u, 'startCopy %s.' % (copy_urls,))
-
+ self.send(u, 'startCopy %s.' % (copy_url,))
+
def sequencingFinished(self, run_dir):
# need to strip off self.watchdirs from rundir I suspect.
- logging.info("run.completed in " + str(run_dir))
+ self.log.info("run.completed in " + str(run_dir))
for watch in self.watchdirs:
if not run_dir.startswith(watch):
+ LOGGER.info("%s didn't start with %s" % (run_dir, watch))
continue
if watch[-1] != os.path.sep:
watch += os.path.sep
stripped_run_dir = re.sub(watch, "", run_dir)
- logging.debug("stripped to " + stripped_run_dir)
- if self.notify_users is not None:
- for u in self.notify_users:
- self.send(u, 'Sequencing run %s finished' % \
- (stripped_run_dir))
- if self.notify_runner is not None:
- for r in self.notify_runner:
- self.rpc_send(r, (stripped_run_dir,), 'sequencingFinished')
+ else:
+ stripped_run_dir = run_dir
+
+ self.log.debug("stripped to " + stripped_run_dir)
+ if self.notify_users is not None:
+ for u in self.notify_users:
+ self.send(u, 'Sequencing run %s finished' % \
+ (stripped_run_dir))
+ if self.notify_runner is not None:
+ for r in self.notify_runner:
+ self.rpc_send(r, (stripped_run_dir,), 'sequencingFinished')
def main(args=None):
bot = SpoolWatcher()
return bot.main(args)
-
+
if __name__ == "__main__":
ret = main(sys.argv[1:])
#sys.exit(ret)