Watch for a list of files to indicate that the flowcell is done.
[htsworkflow.git] / htsworkflow / automation / spoolwatcher.py
1 #!/usr/bin/env python
2 import logging
3 import os
4 import re
5 import shlex
6 import sys
7 import time
8
9 from htsworkflow.util import mount
10
11 # this uses pyinotify
12 import pyinotify
13 from pyinotify import EventsCodes
14
15 from benderjab import rpc
16
17 def is_runfolder(name):
18     """
19     Is it a runfolder?
20
21     >>> print is_runfolder('090630_HWUSI-EAS999_0006_30LNFAAXX')
22     True
23     >>> print is_runfolder('hello')
24     False
25     """
26     if re.match("[0-9]{6}_.*", name):
27         return True
28     else:
29         return False
30
31 def get_top_dir(root, path):
32     """
33     Return the directory in path that is a subdirectory of root.
34     e.g.
35
36     >>> print get_top_dir('/a/b/c', '/a/b/c/d/e/f')
37     d
38     >>> print get_top_dir('/a/b/c/', '/a/b/c/d/e/f')
39     d
40     >>> print get_top_dir('/a/b/c', '/g/e/f')
41     None
42     >>> print get_top_dir('/a/b/c', '/a/b/c')
43     <BLANKLINE>
44     """
45     if path.startswith(root):
46         subpath = path[len(root):]
47         if subpath.startswith('/'):
48             subpath = subpath[1:]
49         return subpath.split(os.path.sep)[0]
50     else:
51         return None
52
53 class WatcherEvent(object):
54     """
55     Track information about a file event
56
57     Currently its time, and if it was an indication we've completed the run.
58     """
59     def __init__(self, event_root=None):
60         self.time = time.time()
61         self.event_root = event_root
62         self.complete = False
63         
64     def __unicode__(self):
65         if self.complete:
66            complete = "(completed)"
67         else:
68            complete = ""
69         return u"<WatchEvent: %s %s %s>" % (time.ctime(self.time), self.event_root, complete)
70
71 class Handler(pyinotify.ProcessEvent):
72     def __init__(self, watchmanager, bot, completion_files=None):
73         """
74         Completion file contains current "completion" filename
75         """
76         self.last_event = {}
77         self.watchmanager = watchmanager
78         self.bot = bot
79         if completion_files is not None:
80             completion_files = [ x.lower() for x in completion_files ]
81         self.completion_files = completion_files
82
83     def process_IN_CREATE(self, event):
84         for wdd in self.bot.wdds:
85             for watch_path in self.bot.watchdirs:
86                 run_already_complete = False
87                 # I only care about things created inside the watch directory, so
88                 # the event path needs to be longer than the watch path in addition to
89                 # starting with the watch_path
90                 if len(event.path) > len(watch_path) and event.path.startswith(watch_path):
91                     # compute name of the top level directory that had an event
92                     # in the current watch path
93                     target = get_top_dir(watch_path, event.path)
94                     runfolder = os.path.join(watch_path, target)
95
96                     if not is_runfolder(target):
97                         logging.debug("Skipping %s, not a runfolder" % (target,))
98                         continue
99                     
100                     # grab the previous events for this watch path
101                     watch_path_events = self.last_event.setdefault(watch_path, {})
102
103                     # if we've already seen an event in this directory (AKA runfolder)
104                     # keep track if its already hit the "completed" flag
105                     if watch_path_events.has_key(target):
106                        run_already_complete = watch_path_events[target].complete
107
108                     watch_path_events[target] = WatcherEvent(target)
109                     #self.last_event.setdefault(watch_path, {})[target] = WatcherEvent(target)
110
111                     msg = "Create: %s %s %s %s" % (watch_path, target, event.path, event.name)
112
113                     # the ReadPrep step uses some of the same file completion flags as the
114                     # main analysis, which means this completion code might get tripped because of it
115                     # so we need to make sure we're getting the completion file in the root of the
116                     # runfolder
117                     event_name = event.name.lower()
118                     if (event_name in self.completion_files and event.path == runfolder) \
119                       or run_already_complete:
120                         self.last_event[watch_path][target].complete = True
121                         msg += "(completed)"
122
123                     logging.debug(msg)
124
125     def process_IN_DELETE(self, event):
126         logging.debug("Remove: %s" %  os.path.join(event.path, event.name))
127         pass
128
129     def process_IN_UNMOUNT(self, event):
130         pathname = os.path.join(event.path, event.name)
131         logging.debug("IN_UNMOUNT: %s" % (pathname,))
132         self.bot.unmount_watch(event.path)
133
134 class SpoolWatcher(rpc.XmlRpcBot):
135     """
136     Watch a directory and send a message when another process is done writing.
137     
138     This monitors a directory tree using inotify (linux specific) and
139     after some files having been written will send a message after <timeout>
140     seconds of no file writing.
141     
142     (Basically when the solexa machine finishes dumping a round of data
143     this'll hopefully send out a message saying hey look theres data available
144     
145     """
146     # these params need to be in the config file
147     # I wonder where I should put the documentation
148     #:Parameters:
149     #    `watchdirs` - list of directories to monitor for modifications
150     #    `profile` - specify which .htsworkflow profile to use
151     #    `write_timeout` - how many seconds to wait for writes to finish to
152     #                      the spool
153     #    `notify_timeout` - how often to timeout from notify
154     #    `completion_files` - what files indicates we've finished sequencing
155     #                        defaults to: netcopy_complete.txt
156     
157     def __init__(self, section=None, configfile=None):
158         #if configfile is None:
159         #    self.configfile = "~/.htsworkflow"
160         super(SpoolWatcher, self).__init__(section, configfile)
161         
162         self.cfg['watchdirs'] = None
163         self.cfg['write_timeout'] = 10
164         self.cfg['notify_users'] = None
165         self.cfg['notify_runner'] = None
166         self.cfg['completion_files'] = 'ImageAnalysis_Netcopy_complete_READ2.txt ImageAnalysis_Netcopy_complete_SINGLEREAD.txt'
167        
168         self.watchdirs = []
169         self.watchdir_url_map = {}
170         self.notify_timeout = 0.001
171
172         self.wm = None 
173         self.notify_users = None
174         self.notify_runner = None
175         self.wdds = []
176
177         # keep track if the specified mount point is currently mounted
178         self.mounted_points = {}
179         # keep track of which mount points tie to which watch directories
180         # so maybe we can remount them.
181         self.mounts_to_watches = {}
182         
183         self.eventTasks.append(self.process_notify)
184
185     def read_config(self, section=None, configfile=None):
186         # Don't give in to the temptation to use logging functions here, 
187         # need to wait until after we detach in start
188         super(SpoolWatcher, self).read_config(section, configfile)
189         
190         self.watchdirs = shlex.split(self._check_required_option('watchdirs'))
191         # see if there's an alternate url that should be used for the watchdir
192         for watchdir in self.watchdirs:
193             self.watchdir_url_map[watchdir] = self.cfg.get(watchdir, watchdir)
194
195         self.write_timeout = int(self.cfg['write_timeout'])
196         self.completion_files = shlex.split(self.cfg['completion_files'])
197         
198         self.notify_users = self._parse_user_list(self.cfg['notify_users'])
199         try:
200           self.notify_runner = \
201              self._parse_user_list(self.cfg['notify_runner'],
202                                    require_resource=True)
203         except bot.JIDMissingResource:
204             msg = 'need a full jabber ID + resource for xml-rpc destinations'
205             raise bot.JIDMissingResource(msg)
206
207         self.handler = None
208         self.notifier = None
209
210     def add_watch(self, watchdirs=None):
211         """
212         start watching watchdir or self.watchdir
213         we're currently limited to watching one directory tree.
214         """
215         # create the watch managers if we need them
216         if self.wm is None:
217             self.wm = pyinotify.WatchManager()
218             self.handler = Handler(self.wm, self, self.completion_files)
219             self.notifier = pyinotify.Notifier(self.wm, self.handler)
220
221         # the one tree limit is mostly because self.wdd is a single item
222         # but managing it as a list might be a bit more annoying
223         if watchdirs is None:
224             watchdirs = self.watchdirs
225
226         mask = EventsCodes.IN_CREATE | EventsCodes.IN_UNMOUNT
227         # rec traverses the tree and adds all the directories that are there
228         # at the start.
229         # auto_add will add in new directories as they are created
230         for w in watchdirs:
231             mount_location = mount.find_mount_point_for(w)
232             self.mounted_points[mount_location] = True
233             mounts = self.mounts_to_watches.get(mount_location, [])
234             if w not in mounts:
235                 mounts.append(w)
236                 self.mounts_to_watches[mount_location] = mounts
237
238             logging.info(u"Watching:"+unicode(w))
239             self.wdds.append(self.wm.add_watch(w, mask, rec=True, auto_add=True))
240
241     def unmount_watch(self, event_path):
242         # remove backwards so we don't get weirdness from 
243         # the list getting shorter
244         for i in range(len(self.wdds),0, -1):
245             wdd = self.wdds[i]
246             logging.info(u'unmounting: '+unicode(wdd.items()))
247             self.wm.rm_watch(wdd.values())
248             del self.wdds[i]
249         self.mounted = False
250
251     def make_copy_url(self, watchdir, list_event_dir):
252         root_copy_url = self.watchdir_url_map[watchdir]
253         if root_copy_url[-1] != '/':
254             root_copy_url += '/'
255         copy_url = root_copy_url + list_event_dir
256         logging.debug('Copy url: %s' % (copy_url,))
257         return copy_url
258                   
259     def process_notify(self, *args):
260         if self.notifier is None:
261             # nothing to do yet
262             return
263         # process the queue of events as explained above
264         self.notifier.process_events()
265         #check events waits timeout
266         if self.notifier.check_events(self.notify_timeout):
267             # read notified events and enqeue them
268             self.notifier.read_events()
269             # should we do something?
270         # has something happened?
271         for watchdir, last_events in self.handler.last_event.items():
272             for last_event_dir, last_event_detail in last_events.items():
273                 time_delta = time.time() - last_event_detail.time
274                 if time_delta > self.write_timeout:
275                     print "timeout", unicode(last_event_detail)
276                     copy_url = self.make_copy_url(watchdir, last_event_dir)
277                     self.startCopy(copy_url)
278                     if last_event_detail.complete:
279                         self.sequencingFinished(last_event_detail.event_root)
280
281                     self.handler.last_event[watchdir] = {}
282         # handle unmounted filesystems
283         for mount_point, was_mounted in self.mounted_points.items():
284             if not was_mounted and mount.is_mounted(mount_point):
285                 # we've been remounted. Huzzah!
286                 # restart the watch
287                 for watch in self.mounts_to_watches[mount_point]:
288                     self.add_watch(watch)
289                     logging.info(
290                         "%s was remounted, restarting watch" % \
291                             (mount_point)
292                     )
293                 self.mounted_points[mount_point] = True
294
295     def _parser(self, msg, who):
296         """
297         Parse xmpp chat messages
298         """
299         help = u"I can send [copy] message, or squencer [finished]"
300         if re.match(u"help", msg):
301             reply = help
302         elif re.match("copy", msg):            
303             self.startCopy(msg)
304             reply = u"sent copy message"
305         elif re.match(u"finished", msg):
306             words = msg.split()
307             if len(words) == 2:
308                 self.sequencingFinished(words[1])
309                 reply = u"sending sequencing finished for %s" % (words[1])
310             else:
311                 reply = u"need runfolder name"
312         else:
313             reply = u"I didn't understand '%s'" %(msg)            
314         return reply
315         
316     def run(self):
317         """
318         Start application
319         """
320         # we have to configure pyinotify after BenderJab.start is called
321         # as weird things happen to pyinotify if the stdio is closed
322         # after it's initialized.
323         self.add_watch()
324         super(SpoolWatcher, self).run()
325         
326     def stop(self):
327         """
328         shutdown application
329         """
330         # destroy the inotify's instance on this interrupt (stop monitoring)
331         if self.notifier is not None:
332             self.notifier.stop()
333         super(SpoolWatcher, self).stop()
334     
335     def startCopy(self, copy_url=None):
336         logging.debug("writes seem to have stopped")
337         if self.notify_runner is not None:
338             for r in self.notify_runner:
339                 self.rpc_send(r, tuple([copy_url]), 'startCopy')
340         if self.notify_users is not None:
341             for u in self.notify_users:
342                 self.send(u, 'startCopy %s.' % (copy_url,))
343         
344     def sequencingFinished(self, run_dir):
345         # need to strip off self.watchdirs from rundir I suspect.
346         logging.info("run.completed in " + str(run_dir))
347         for watch in self.watchdirs:
348             if not run_dir.startswith(watch):
349                 print "%s didn't start with %s" % (run_dir, watch)
350                 continue
351             if watch[-1] != os.path.sep:
352                 watch += os.path.sep
353             stripped_run_dir = re.sub(watch, "", run_dir)
354         else:
355             stripped_run_dir = run_dir
356
357         logging.debug("stripped to " + stripped_run_dir)
358         if self.notify_users is not None:
359             for u in self.notify_users:
360                 self.send(u, 'Sequencing run %s finished' % \
361                           (stripped_run_dir))
362         if self.notify_runner is not None:
363             for r in self.notify_runner:
364                 self.rpc_send(r, (stripped_run_dir,), 'sequencingFinished')
365
366 def main(args=None):
367     bot = SpoolWatcher()
368     return bot.main(args)
369     
370 if __name__ == "__main__":
371     ret = main(sys.argv[1:])
372     #sys.exit(ret)
373
374 # TODO:
375 # send messages to copier specifying which mount to copy