b.select_form(nr=0)
b.submit()
+def feed_to_zephyrs(thing, rss, parse):
+ zephyrs = []
+ try:
+ feed = feedparser.parse(rss)
+ for e in feed.entries:
+ global old_time, new_time
+ t = int(calendar.timegm(e.date_parsed))
+ if t <= old_time:
+ continue
+ if t > new_time:
+ new_time = t
+ try:
+ z = parse(e)
+ except:
+ z = zerror("Error parsing " + thing + ":\n" + e.id + "\n" + traceback.format_exc())
+ zephyrs.append((t, z))
+ except:
+ zephyrs.append((0, zerror("Error parsing " + thing + "s feed:\n" + traceback.format_exc())))
+ return zephyrs
+
def parse_issue(e):
issue = urlparse.urlparse(e.id)[2].rsplit('/', 1)[1]
url = e.id
)
b = jira_init()
+zephyr.init()
-jira_login(b)
-b.open("https://jira.mit.edu/jira/sr/jira.issueviews:searchrequest-rss/temp/SearchRequest.xml?&pid=10185&updated%3Aprevious=-1w&sorter/field=updated&sorter/order=DESC&tempMax=1000")
-issues_rss = b.response().read()
-b.open("https://jira.mit.edu/jira/sr/jira.issueviews:searchrequest-comments-rss/temp/SearchRequest.xml?&pid=10185&updated%3Aprevious=-1w&sorter/field=updated&sorter/order=DESC&tempMax=1000")
-comments_rss = b.response().read()
-
-time_file_new = time_file + '.' + ''.join(random.sample(string.letters, 8))
+while True:
+ jira_login(b)
+ b.open("https://jira.mit.edu/jira/sr/jira.issueviews:searchrequest-rss/temp/SearchRequest.xml?&pid=10185&updated%3Aprevious=-1w&sorter/field=updated&sorter/order=DESC&tempMax=1000")
+ issues_rss = b.response().read()
+ b.open("https://jira.mit.edu/jira/sr/jira.issueviews:searchrequest-comments-rss/temp/SearchRequest.xml?&pid=10185&updated%3Aprevious=-1w&sorter/field=updated&sorter/order=DESC&tempMax=1000")
+ comments_rss = b.response().read()
-try:
- os.rename(time_file, time_file_new)
-except OSError:
- exit()
+ time_file_new = time_file + '.' + ''.join(random.sample(string.letters, 8))
-old_time = int(open(time_file_new).read())
-new_time = old_time
+ try:
+ os.rename(time_file, time_file_new)
+ except OSError:
+ print "warning: could not acquire timestamp lock"
+ time.sleep(17)
+ continue
-zephyr.init()
-zephyrs = []
+ old_time = int(open(time_file_new).read())
+ new_time = old_time
-for (thing, rss, parse) in [('issue', issues_rss, parse_issue),
- ('comment', comments_rss, parse_comment)]:
- try:
- feed = feedparser.parse(rss)
- for e in feed.entries:
- t = int(calendar.timegm(e.date_parsed))
- if t <= old_time:
- continue
- if t > new_time:
- new_time = t
- try:
- z = parse(e)
- except:
- z = zerror("Error parsing " + thing + ":\n" + e.id + "\n" + traceback.format_exc())
- zephyrs.append((t, z))
- except:
- zephyrs.append((0, zerror("Error parsing " + thing + "s feed:\n" + traceback.format_exc())))
+ zephyrs = (feed_to_zephyrs('issue', issues_rss, parse_issue) +
+ feed_to_zephyrs('comment', comments_rss, parse_comment))
-open(time_file_new, 'w').write(str(new_time))
+ open(time_file_new, 'w').write(str(new_time))
-os.rename(time_file_new, time_file)
+ os.rename(time_file_new, time_file)
-zephyrs.sort(key=lambda tz: tz[0])
-for (t, z) in zephyrs:
- z.send()
+ zephyrs.sort(key=lambda tz: tz[0])
+ for (t, z) in zephyrs:
+ z.send()
+ time.sleep(60)