9 from htsworkflow.util import mount
13 from pyinotify import EventsCodes
15 from benderjab import rpc
18 class WatcherEvents(object):
19 # two events need to be tracked
20 # one to send startCopy
21 # one to send OMG its broken
22 # OMG its broken needs to stop when we've seen enough
24 # this should be per runfolder.
30 class Handler(pyinotify.ProcessEvent):
31 def __init__(self, watchmanager, bot):
32 self.last_event_time = None
33 self.watchmanager = watchmanager
36 def process_IN_CREATE(self, event):
37 self.last_event_time = time.time()
38 msg = "Create: %s" % os.path.join(event.path, event.name)
39 if event.name.lower() == "run.completed":
41 self.bot.sequencingFinished(event.path)
43 logging.error("Couldn't send sequencingFinished")
46 def process_IN_DELETE(self, event):
47 logging.debug("Remove: %s" % os.path.join(event.path, event.name))
49 def process_IN_UNMOUNT(self, event):
50 pathname = os.path.join(event.path, event.name)
51 logging.debug("IN_UNMOUNT: %s" % (pathname,))
52 self.bot.unmount_watch()
54 class SpoolWatcher(rpc.XmlRpcBot):
56 Watch a directory and send a message when another process is done writing.
58 This monitors a directory tree using inotify (linux specific) and
59 after some files having been written will send a message after <timeout>
60 seconds of no file writing.
62 (Basically when the solexa machine finishes dumping a round of data
63 this'll hopefully send out a message saying hey look theres data available
66 # these params need to be in the config file
67 # I wonder where I should put the documentation
69 # `watchdir` - which directory tree to monitor for modifications
70 # `profile` - specify which .htsworkflow profile to use
71 # `write_timeout` - how many seconds to wait for writes to finish to
73 # `notify_timeout` - how often to timeout from notify
75 def __init__(self, section=None, configfile=None):
76 #if configfile is None:
77 # self.configfile = "~/.htsworkflow"
78 super(SpoolWatcher, self).__init__(section, configfile)
80 self.cfg['watchdir'] = None
81 self.cfg['write_timeout'] = 10
82 self.cfg['notify_users'] = None
83 self.cfg['notify_runner'] = None
85 self.notify_timeout = 0.001
86 self.wm = pyinotify.WatchManager()
87 self.handler = Handler(self.wm, self)
88 self.notifier = pyinotify.Notifier(self.wm, self.handler)
90 self.mount_point = None
93 self.notify_users = None
94 self.notify_runner = None
96 self.eventTasks.append(self.process_notify)
98 def read_config(self, section=None, configfile=None):
99 super(SpoolWatcher, self).read_config(section, configfile)
101 self.watch_dir = self._check_required_option('watchdir')
102 self.write_timeout = int(self.cfg['write_timeout'])
104 self.notify_users = self._parse_user_list(self.cfg['notify_users'])
106 self.notify_runner = \
107 self._parse_user_list(self.cfg['notify_runner'],
108 require_resource=True)
109 except bot.JIDMissingResource:
110 msg = 'need a full jabber ID + resource for xml-rpc destinations'
112 raise bot.JIDMissingResource(msg)
114 def add_watch(self, watchdir=None):
116 start watching watchdir or self.watch_dir
117 we're currently limited to watching one directory tree.
119 # the one tree limit is mostly because self.wdd is a single item
120 # but managing it as a list might be a bit more annoying
122 watchdir = self.watch_dir
123 logging.info("Watching:"+str(watchdir))
125 self.mount_point = mount.find_mount_point_for(watchdir)
127 mask = EventsCodes.IN_CREATE | EventsCodes.IN_UNMOUNT
128 # rec traverses the tree and adds all the directories that are there
130 # auto_add will add in new directories as they are created
131 self.wdd = self.wm.add_watch(watchdir, mask, rec=True, auto_add=True)
133 def unmount_watch(self):
134 if self.wdd is not None:
135 self.wm.rm_watch(self.wdd.values())
139 def process_notify(self, *args):
140 # process the queue of events as explained above
141 self.notifier.process_events()
142 #check events waits timeout
143 if self.notifier.check_events(self.notify_timeout):
144 # read notified events and enqeue them
145 self.notifier.read_events()
146 # should we do something?
147 # has something happened?
148 last_event_time = self.handler.last_event_time
149 if last_event_time is not None:
150 time_delta = time.time() - last_event_time
151 if time_delta > self.write_timeout:
153 self.handler.last_event_time = None
154 # handle unmounted filesystems
156 if mount.is_mounted(self.mount_point):
157 # we've been remounted. Huzzah!
162 "%s was remounted, restarting watch" % \
166 def _parser(self, msg, who):
168 Parse xmpp chat messages
170 help = u"I can send [copy] message, or squencer [finished]"
171 if re.match(u"help", msg):
173 elif re.match("copy", msg):
175 reply = u"sent copy message"
176 elif re.match(u"finished", msg):
179 self.sequencingFinished(words[1])
180 reply = u"sending sequencing finished for %s" % (words[1])
182 reply = u"need runfolder name"
184 reply = u"I didn't understand '%s'" %(msg)
187 def start(self, daemonize):
192 super(SpoolWatcher, self).start(daemonize)
198 # destroy the inotify's instance on this interrupt (stop monitoring)
200 super(SpoolWatcher, self).stop()
203 logging.debug("writes seem to have stopped")
204 if self.notify_runner is not None:
205 for r in self.notify_runner:
206 self.rpc_send(r, tuple(), 'startCopy')
208 def sequencingFinished(self, run_dir):
209 # need to strip off self.watch_dir from rundir I suspect.
210 logging.info("run.completed in " + str(run_dir))
211 pattern = self.watch_dir
212 if pattern[-1] != os.path.sep:
213 pattern += os.path.sep
214 stripped_run_dir = re.sub(pattern, "", run_dir)
215 logging.debug("stripped to " + stripped_run_dir)
216 if self.notify_users is not None:
217 for u in self.notify_users:
218 self.send(u, 'Sequencing run %s finished' % (stripped_run_dir))
219 if self.notify_runner is not None:
220 for r in self.notify_runner:
221 self.rpc_send(r, (stripped_run_dir,), 'sequencingFinished')
225 return bot.main(args)
227 if __name__ == "__main__":
228 sys.exit(main(sys.argv[1:]))