+#!/usr/bin/python
+import cStringIO
+import calendar
+import feedparser
+import formatter
+import htmllib
import mechanize
+import os
+import random
+import string
+import time
+import traceback
+import urlparse
+import zephyr
+
+zephyr_sender = 'jira'
+zephyr_class = 'andersk-test'
+time_file = 'jirabot.time'
b = mechanize.Browser()
b.set_handle_robots(False)
b.submit()
b.select_form(nr=0)
b.submit()
-print b.response().read()
+b.open("https://jira.mit.edu/jira/sr/jira.issueviews:searchrequest-rss/temp/SearchRequest.xml?&pid=10185&updated%3Aprevious=-1w&sorter/field=updated&sorter/order=DESC&tempMax=1000")
+issues_rss = b.response().read()
+b.open("https://jira.mit.edu/jira/sr/jira.issueviews:searchrequest-comments-rss/temp/SearchRequest.xml?&pid=10185&updated%3Aprevious=-1w&sorter/field=updated&sorter/order=DESC&tempMax=1000")
+comments_rss = b.response().read()
+
+def parse_issue(e):
+ issue = urlparse.urlparse(e.id)[2].rsplit('/', 1)[1]
+ url = e.id
+ msg = e.id + "\nThis issue was updated."
+
+ return zephyr.ZNotice(
+ sender=zephyr_sender,
+ auth=False,
+ cls=zephyr_class,
+ instance=issue,
+ fields=[e.title.encode('UTF-8'), msg.encode('UTF-8')],
+ )
+
+def parse_comment(e):
+ url = urlparse.urlunsplit(urlparse.urlparse(e.id)[0:3] + (None,None))
+ issue = url.rsplit('/', 1)[1]
+
+ s = cStringIO.StringIO()
+ parser = htmllib.HTMLParser(formatter.AbstractFormatter(formatter.DumbWriter(s)))
+ parser.feed(e.summary.rsplit('<table>', 1)[0])
+ parser.close()
+ s.seek(0)
+ comment = s.read()
+
+ msg = e.author + " added a comment:\n" + comment.rstrip()
+
+ return zephyr.ZNotice(
+ sender=zephyr_sender,
+ auth=False,
+ cls=zephyr_class,
+ instance=issue,
+ fields=[e.title.encode('UTF-8'), msg.encode('UTF-8')],
+ sender='jira',
+ auth=False,
+ )
+
+def zerror(msg):
+ return zephyr.ZNotice(
+ sender=zephyr_sender,
+ auth=False,
+ cls=zephyr_class,
+ instance='jira-error',
+ fields=['Jira bot error', msg.encode('UTF-8')]
+ )
+
+time_file_new = time_file + '.' + ''.join(random.sample(string.letters, 8))
+
+try:
+ os.rename(time_file, time_file_new)
+except OSError:
+ exit()
+
+old_time = int(open(time_file_new).read())
+new_time = old_time
+
+zephyr.init()
+zephyrs = []
+
+for (thing, rss, parse) in [('issue', issues_rss, parse_issue),
+ ('comment', comments_rss, parse_comment)]:
+ try:
+ feed = feedparser.parse(rss)
+ for e in feed.entries:
+ t = int(calendar.timegm(e.date_parsed))
+ if t <= old_time:
+ continue
+ if t > new_time:
+ new_time = t
+ try:
+ z = parse(e)
+ except:
+ z = zerror("Error parsing " + thing + ":\n" + e.id + "\n" + traceback.format_exc())
+ zephyrs.append((t, z))
+ except:
+ zephyrs.append((0, zerror("Error parsing " + thing + "s feed:\n" + traceback.format_exc())))
+
+open(time_file_new, 'w').write(str(new_time))
+
+os.rename(time_file_new, time_file)
+
+zephyrs.sort(key=lambda tz: tz[0])
+for (t, z) in zephyrs:
+ z.send()