Initial commit
authorScott Gasch <[email protected]>
Wed, 1 Jul 2020 21:01:40 +0000 (14:01 -0700)
committerScott Gasch <[email protected]>
Wed, 1 Jul 2020 21:01:40 +0000 (14:01 -0700)
91 files changed:
.gitignore [new file with mode: 0644]
README.txt [new file with mode: 0644]
bellevue_reporter_rss_renderer.py [new file with mode: 0644]
camera_trigger.py [new file with mode: 0644]
chooser.py [new file with mode: 0644]
cnn_rss_renderer.py [new file with mode: 0644]
constants.py [new file with mode: 0644]
file_writer.py [new file with mode: 0644]
gcal_renderer.py [new file with mode: 0644]
gcal_trigger.py [new file with mode: 0644]
gdata_oauth.py [new file with mode: 0644]
gdocs_renderer.py [new file with mode: 0644]
generic_news_rss_renderer.py [new file with mode: 0644]
gkeep_renderer.py [new file with mode: 0644]
globals.py [new file with mode: 0644]
grab_bag.py [new file with mode: 0644]
health_renderer.py [new file with mode: 0644]
kiosk.py [new file with mode: 0755]
local_photos_mirror_renderer.py [new file with mode: 0644]
logger.py [new file with mode: 0644]
mynorthwest_rss_renderer.py [new file with mode: 0644]
myq_trigger.py [new file with mode: 0644]
page_builder.py [new file with mode: 0644]
pages/cabin_2_none.html [new file with mode: 0644]
pages/clock_10_none.html [new file with mode: 0755]
pages/clockimg/0.png [new file with mode: 0644]
pages/clockimg/1.png [new file with mode: 0644]
pages/clockimg/2.png [new file with mode: 0644]
pages/clockimg/3.png [new file with mode: 0644]
pages/clockimg/4.png [new file with mode: 0644]
pages/clockimg/5.png [new file with mode: 0644]
pages/clockimg/6.png [new file with mode: 0644]
pages/clockimg/7.png [new file with mode: 0644]
pages/clockimg/8.png [new file with mode: 0644]
pages/clockimg/9.png [new file with mode: 0644]
pages/clockimg/apr.png [new file with mode: 0644]
pages/clockimg/aug.png [new file with mode: 0644]
pages/clockimg/clockface.png [new file with mode: 0644]
pages/clockimg/clockface_old.png [new file with mode: 0644]
pages/clockimg/dash.png [new file with mode: 0644]
pages/clockimg/dec.png [new file with mode: 0644]
pages/clockimg/dial.png [new file with mode: 0644]
pages/clockimg/feb.png [new file with mode: 0644]
pages/clockimg/fri.png [new file with mode: 0644]
pages/clockimg/hourhand.png [new file with mode: 0644]
pages/clockimg/jan.png [new file with mode: 0644]
pages/clockimg/jul.png [new file with mode: 0644]
pages/clockimg/jun.png [new file with mode: 0644]
pages/clockimg/mar.png [new file with mode: 0644]
pages/clockimg/may.png [new file with mode: 0644]
pages/clockimg/minhand.png [new file with mode: 0644]
pages/clockimg/mon.png [new file with mode: 0644]
pages/clockimg/nov.png [new file with mode: 0644]
pages/clockimg/oct.png [new file with mode: 0644]
pages/clockimg/sat.png [new file with mode: 0644]
pages/clockimg/sechand.png [new file with mode: 0644]
pages/clockimg/sep.png [new file with mode: 0644]
pages/clockimg/slash.png [new file with mode: 0644]
pages/clockimg/smallface.gif [new file with mode: 0644]
pages/clockimg/smallface.jpg [new file with mode: 0644]
pages/clockimg/sun.png [new file with mode: 0644]
pages/clockimg/thu.png [new file with mode: 0644]
pages/clockimg/tue.png [new file with mode: 0644]
pages/clockimg/wed.png [new file with mode: 0644]
pages/hidden/cabin.html [new file with mode: 0644]
pages/hidden/driveway.html [new file with mode: 0644]
pages/jquery-1.2.6.min.js [new file with mode: 0755]
pages/radar_2_none.html [new file with mode: 0644]
pages/stevens_cams_1_none.html [new file with mode: 0644]
pages/style.css [new file with mode: 0644]
pages/wenatchee-cams_3_none.html [new file with mode: 0644]
pages/wsdot-bridges_3_none.html [new file with mode: 0644]
picasa_renderer.py [new file with mode: 0644]
pollen_renderer.py [new file with mode: 0644]
profanity_filter.py [new file with mode: 0644]
reddit_renderer.py [new file with mode: 0644]
renderer.py [new file with mode: 0644]
renderer_catalog.py [new file with mode: 0644]
reuters_rss_renderer.py [new file with mode: 0644]
seattletimes_rss_renderer.py [new file with mode: 0644]
secrets.py [new file with mode: 0644]
stdin_trigger.py [new file with mode: 0644]
stevens_renderer.py [new file with mode: 0644]
stock_renderer.py [new file with mode: 0644]
stranger_renderer.py [new file with mode: 0644]
trigger.py [new file with mode: 0644]
trigger_catalog.py [new file with mode: 0644]
twitter_renderer.py [new file with mode: 0644]
utils.py [new file with mode: 0644]
weather_renderer.py [new file with mode: 0644]
wsj_rss_renderer.py [new file with mode: 0644]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..0d20b64
--- /dev/null
@@ -0,0 +1 @@
+*.pyc
diff --git a/README.txt b/README.txt
new file mode 100644 (file)
index 0000000..d684b6f
--- /dev/null
@@ -0,0 +1,20 @@
+Root directory of the kitchen kiosk project.  It works like this:
+
+kiosk.py is the main entry point.  It has two jobs: start up a chooser
+thread that picks what the kiosk is currently showing by moving a
+symlink called current.html in the ./pages/ directory.  It has a kinda
+crazy semantic for doing this:
+
+  1. it looks for all files that look like [name]_[weight]_[staleness].html
+  2. it sees if there are any trigger conditions (e.g. movement on a webcam)
+     and, if so, it choose the appropriate page (./pages/hidden/*)
+  3. otherwise it does a weighted random selection based on [weight], as
+     long as [staleness] is not too stale.
+
+The other main thread in kiosk is a renderer thread which knows how to go
+out and fetch information from the web somewhere and make a page with a
+filename of something like [basename]_[weight]_[staleness].html.  It makes
+a bunch of these off in ./pages and refreshes them at periodic intervals.
+All of this code is in renderer.py and it's kinda ugly.
+
+Feel free to mail me if you have questions: [email protected].
diff --git a/bellevue_reporter_rss_renderer.py b/bellevue_reporter_rss_renderer.py
new file mode 100644 (file)
index 0000000..f630aee
--- /dev/null
@@ -0,0 +1,59 @@
+import generic_news_rss_renderer as gnrss
+import re
+
+class bellevue_reporter_rss_renderer(gnrss.generic_news_rss_renderer):
+    def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
+        super(bellevue_reporter_rss_renderer, self).__init__(
+            name_to_timeout_dict,
+            feed_site,
+            feed_uris,
+            page_title)
+        self.debug = 1
+
+    def debug_prefix(self):
+        return "bellevue_reporter(%s)" % (self.page_title)
+
+    def get_headlines_page_prefix(self):
+        return "bellevue-reporter"
+
+    def get_details_page_prefix(self):
+        return "bellevue-reporter-details"
+
+    def should_use_https(self):
+        return True
+
+    def munge_description(self, description):
+        description = re.sub('<[^>]+>', '', description)
+        description = re.sub('Bellevue\s+Reporter\s+Bellevue\s+Reporter', '',
+                             description)
+        description = re.sub('\s*\-\s*Your local homepage\.\s*', '', description)
+        return description
+
+    def item_is_interesting_for_headlines(self, title, description, item):
+        return True
+
+    def item_is_interesting_for_article(self, title, description, item):
+        return True
+
+# Test
+#x = bellevue_reporter_rss_renderer(
+#    {"Fetch News" : 1,
+#     "Shuffle News" : 1},
+#    "www.bellevuereporter.com",
+#    [ "/feed/" ],
+#    "Test" )
+#d = """
+#<DIV style="padding:8px;
+#     font-size:44pt;
+#     -webkit-column-break-inside:avoid;"><P>
+#<B>Task force will tackle issues of racial justice, police reform</B>
+#<BR>Bellevue Reporter
+#Bellevue Reporter - Your local homepage.
+#Inslee names civil rights activists, pastors, and cops to panel that may forge ideas f#or new laws Task force will tackle issues of racial justice, police reform
+#Wire Service
+#</DIV>"""
+#d = x.munge_description(d)
+#print d
+#if x.fetch_news() == 0:
+#    print "Error fetching news, no items fetched."
+#x.shuffle_news()
diff --git a/camera_trigger.py b/camera_trigger.py
new file mode 100644 (file)
index 0000000..6e63dd1
--- /dev/null
@@ -0,0 +1,93 @@
+import glob
+import os
+import time
+import trigger
+import utils
+from datetime import datetime
+
+class any_camera_trigger(trigger.trigger):
+    def __init__(self):
+        self.triggers_in_the_past_seven_min = {
+            "driveway" :       0,
+            "frontdoor" :      0,
+            "cabin_driveway" : 0,
+            "backyard" :       0,
+        }
+        self.last_trigger = {
+            "driveway" :       0,
+            "frontdoor" :      0,
+            "cabin_driveway" : 0,
+            "backyard" :       0,
+        }
+
+    def choose_priority(self, camera, age):
+        base_priority_by_camera = {
+            "driveway" : 1,
+            "frontdoor" : 2,
+            "cabin_driveway" : 1,
+            "backyard" : 0,
+        }
+        priority = base_priority_by_camera[camera]
+        if age < 10:
+            priority += trigger.trigger.PRIORITY_HIGH
+        elif age < 30:
+            priority += trigger.trigger.PRIORITY_NORMAL + age
+        else:
+            priority += trigger.trigger.PRIORITY_LOW
+        return priority
+
+    def get_triggered_page_list(self):
+        triggers = []
+        cameras_with_recent_triggers = 0
+        camera_list = [ "driveway",
+                        "frontdoor",
+                        "cabin_driveway",
+                        "backyard" ]
+
+        now = time.time()
+        try:
+            # First pass, just see whether each camera is triggered and,
+            # if so, count how many times in the past 7m it has triggered.
+            for camera in camera_list:
+                file = "/timestamps/last_camera_motion_%s" % camera
+                ts = os.stat(file).st_ctime
+                age = now - ts
+                # print "Camera: %s, age %s" % (camera, age)
+                if age < 60:
+                    cameras_with_recent_triggers += 1
+                    time_since_last_trigger = now - self.last_trigger[camera]
+                    self.last_trigger[camera] = now
+                    if time_since_last_trigger < (60 * 7):
+                        self.triggers_in_the_past_seven_min[camera] += 1
+                    else:
+                        self.triggers_in_the_past_seven_min[camera] = 1
+
+            # Second pass, see whether we want to trigger due to
+            # camera activity we found.  All cameras timestamps were
+            # just considered and should be up-to-date.  Some logic to
+            # squelch spammy cameras unless more than one is
+            # triggered at the same time.
+            for camera in camera_list:
+                if self.last_trigger[camera] == now:
+                    ts = utils.timestamp()
+                    if (self.triggers_in_the_past_seven_min[camera] <= 4 or
+                        cameras_with_recent_triggers > 1):
+                        p = self.choose_priority(camera, age)
+                        print "%s: ****** %s[%d] CAMERA TRIGGER ******" % (
+                            ts, camera, p)
+                        triggers.append( ( "hidden/%s.html" % camera,
+                                           self.choose_priority(camera, age)) )
+                    else:
+                        print "%s: Camera %s too spammy, squelching it" % (
+                            ts, camera)
+        except Exception as e:
+            print e
+            pass
+
+        if len(triggers) == 0:
+            return None
+        else:
+            return triggers
+
+#x = any_camera_trigger()
+#print x.get_triggered_page_list()
diff --git a/chooser.py b/chooser.py
new file mode 100644 (file)
index 0000000..47a2cb7
--- /dev/null
@@ -0,0 +1,136 @@
+import os
+import random
+import re
+import sys
+import time
+import glob
+import constants
+import trigger
+
+class chooser(object):
+    """Base class of a thing that chooses pages"""
+    def get_page_list(self):
+        now = time.time()
+        valid_filename = re.compile("([^_]+)_(\d+)_([^\.]+)\.html")
+        filenames = []
+        pages = [ f for f in os.listdir(constants.pages_dir)
+                  if os.path.isfile(os.path.join(constants.pages_dir, f))]
+        for page in pages:
+            result = re.match(valid_filename, page)
+            if result != None:
+                print('chooser: candidate page: "%s"' % page)
+                if (result.group(3) != "none"):
+                    freshness_requirement = int(result.group(3))
+                    last_modified = int(os.path.getmtime(
+                        os.path.join(constants.pages_dir, page)))
+                    age = (now - last_modified)
+                    if (age > freshness_requirement):
+                        print ('"%s" is too old.' % page)
+                        continue
+                filenames.append(page)
+        return filenames
+
+    def choose_next_page(self):
+        pass
+
+class weighted_random_chooser(chooser):
+    """Chooser that does it via weighted RNG"""
+    def __init__(self):
+        self.last_choice = ""
+        self.valid_filename = re.compile("([^_]+)_(\d+)_([^\.]+)\.html")
+        self.pages = None
+        self.count = 0
+
+    def choose_next_page(self):
+        if (self.pages == None or
+            self.count % 100 == 0):
+            self.pages = self.get_page_list()
+
+        total_weight = 0
+        weights = []
+        for page in self.pages:
+            result = re.match(self.valid_filename, page)
+            if result != None:
+                weight = int(result.group(2))
+                weights.append(weight)
+                total_weight += weight
+
+        if (total_weight <= 0):
+            raise(error("No valid candidate pages found!"))
+
+        while True:
+            pick = random.randrange(0, total_weight - 1)
+            so_far = 0
+            for x in xrange(0, len(weights)):
+                so_far += weights[x]
+                if (so_far > pick and
+                    self.pages[x] != self.last_choice):
+                    self.last_choice = self.pages[x]
+                    self.count += 1
+                    return self.pages[x]
+
+class weighted_random_chooser_with_triggers(weighted_random_chooser):
+    """Same as WRC but has trigger events"""
+    def __init__(self, trigger_list):
+        weighted_random_chooser.__init__(self)
+        self.trigger_list = trigger_list
+        self.page_queue = set(())
+
+    def check_for_triggers(self):
+        triggered = False
+        for t in self.trigger_list:
+            x = t.get_triggered_page_list()
+            if x != None and len(x) > 0:
+                for y in x:
+                    self.page_queue.add(y)
+                    triggered = True
+        return triggered
+
+    def choose_next_page(self):
+        if (self.pages == None or
+            self.count % 100 == 0):
+            self.pages = self.get_page_list()
+
+        triggered = self.check_for_triggers()
+
+        # First try to satisfy from the page queue
+        if (len(self.page_queue) > 0):
+            print "Pulling page from queue"
+            page = None
+            priority = None
+            for t in self.page_queue:
+                if priority == None or t[1] > priority:
+                    page = t[0]
+                    priority = t[1]
+            self.page_queue.remove((page, priority))
+            return page, triggered
+
+        # Fall back on weighted random choice.
+        else:
+            return weighted_random_chooser.choose_next_page(self), False
+
+class rotating_chooser(chooser):
+    """Chooser that does it in a rotation"""
+    def __init__(self):
+        self.valid_filename = re.compile("([^_]+)_(\d+)_([^\.]+)\.html")
+        self.pages = None
+        self.current = 0
+        self.count = 0
+
+    def choose_next_page(self):
+        if (self.pages == None or
+            self.count % 100 == 0):
+            self.pages = self.get_page_list()
+
+        if len(self.pages) == 0:
+            raise(error("No pages!"))
+
+        if (self.current >= len(self.pages)):
+            self.current = 0
+
+        page = self.pages[self.current]
+        self.current += 1
+        self.count += 1
+        return page
+
+#x = weighted_random_chooser_with_triggers(None)
diff --git a/cnn_rss_renderer.py b/cnn_rss_renderer.py
new file mode 100644 (file)
index 0000000..402d987
--- /dev/null
@@ -0,0 +1,48 @@
+import generic_news_rss_renderer
+import re
+
+class cnn_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
+    def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
+        super(cnn_rss_renderer, self).__init__(
+            name_to_timeout_dict,
+            feed_site,
+            feed_uris,
+            page_title)
+        self.debug = 1
+
+    def debug_prefix(self):
+        return "cnn(%s)" % (self.page_title)
+
+    def get_headlines_page_prefix(self):
+        return "cnn-%s" % (self.page_title)
+
+    def get_details_page_prefix(self):
+        return "cnn-details-%s" % (self.page_title)
+
+    def munge_description(self, description):
+        description = re.sub('[Rr]ead full story for latest details.', '', description)
+        description = re.sub('<[^>]+>', '', description)
+        return description
+
+    def should_use_https(self):
+        return False
+
+    def item_is_interesting_for_headlines(self, title, description, item):
+        return "CNN.com" not in title
+
+    def item_is_interesting_for_article(self, title, description, item):
+        return len(description) >= 65
+
+# Test
+#x = cnn_rss_renderer(
+#    {"Fetch News" : 1,
+#     "Shuffle News" : 1},
+#    "rss.cnn.com",
+#    [ "/rss/cnn_topstories.rss",
+#      "/rss/money_latest.rss",
+#     "/rss/cnn_tech.rss",
+#    ],
+#    "Test" )
+#if x.fetch_news() == 0:
+#    print "Error fetching news, no items fetched."
+#x.shuffle_news()
diff --git a/constants.py b/constants.py
new file mode 100644 (file)
index 0000000..880fb85
--- /dev/null
@@ -0,0 +1,12 @@
+#!/usr/local/bin/python
+
+refresh_period_sec = 22
+render_period_sec = 30
+pages_dir = "/usr/local/export/www/kiosk/pages"
+
+seconds_per_minute = 60
+seconds_per_hour = seconds_per_minute * 60
+seconds_per_day = seconds_per_hour * 24
+
+myq_pagename = "myq_4_300.html"
+gcal_imminent_pagename = "hidden/gcal-imminent_0_none.html"
diff --git a/file_writer.py b/file_writer.py
new file mode 100644 (file)
index 0000000..aba7d8f
--- /dev/null
@@ -0,0 +1,42 @@
+import constants
+import os
+
+def remove_tricky_unicode(x):
+    try:
+        x = x.decode('utf-8')
+        x = x.replace(u"\u2018", "'").replace(u"\u2019", "'")
+        x = x.replace(u"\u201c", '"').replace(u"\u201d", '"')
+        x = x.replace(u"\u2e3a", "-").replace(u"\u2014", "-")
+    except:
+        pass
+    return x
+
+class file_writer:
+    def __init__(self, filename):
+        self.full_filename = os.path.join(constants.pages_dir,
+                                          filename)
+        self.f = open(self.full_filename, 'w')
+        self.xforms = [ remove_tricky_unicode ]
+
+    def add_xform(self, xform):
+        self.xforms.append(xform)
+
+    def write(self, data):
+        for xform in self.xforms:
+            data = xform(data)
+        self.f.write(data.encode('utf-8'))
+
+    def done(self):
+        self.f.close()
+
+    def close(self):
+        self.done()
+
+# Test
+#def toupper(x):
+#    return x.upper()
+#
+#fw = file_writer("test")
+#fw.add_xform(toupper)
+#fw.write(u"This is a \u201ctest\u201d. \n")
+#fw.done()
diff --git a/gcal_renderer.py b/gcal_renderer.py
new file mode 100644 (file)
index 0000000..1609e92
--- /dev/null
@@ -0,0 +1,270 @@
+from oauth2client.client import AccessTokenRefreshError
+import constants
+import datetime
+import file_writer
+import gdata
+import globals
+import os
+import renderer
+import sets
+import time
+
+class gcal_renderer(renderer.debuggable_abstaining_renderer):
+    """A renderer to fetch upcoming events from www.google.com/calendar"""
+
+    calendar_whitelist = sets.ImmutableSet([
+        'Alex\'s calendar',
+        'Family',
+        'Holidays in United States',
+        'Lynn Gasch',
+        'Lynn\'s Work',
+        '[email protected]',
+        'Scott Gasch External - Misc',
+        'Birthdays',  # <-- from g+ contacts
+    ])
+
+    class comparable_event(object):
+        """A helper class to sort events."""
+        def __init__(self, start_time, end_time, summary, calendar):
+            if start_time is None:
+                assert(end_time is None)
+            self.start_time = start_time
+            self.end_time = end_time
+            self.summary = summary
+            self.calendar = calendar
+
+        def __lt__(self, that):
+            if self.start_time is None and that.start_time is None:
+                return self.summary < that.summary
+            if self.start_time is None or that.start_time is None:
+                return self.start_time is None
+            return (self.start_time,
+                    self.end_time,
+                    self.summary,
+                    self.calendar) < (that.start_time,
+                                      that.end_time,
+                                      that.summary,
+                                      that.calendar)
+
+        def __str__(self):
+            return '[%s]&nbsp;%s' % (self.timestamp(), self.friendly_name())
+
+        def friendly_name(self):
+            name = self.summary
+            name = name.replace("countdown:", "")
+            return "<B>%s</B>" % name
+
+        def timestamp(self):
+            if self.start_time is None:
+                return "None"
+            elif (self.start_time.hour == 0):
+                return datetime.datetime.strftime(self.start_time,
+                                                   '%a %b %d %Y')
+            else:
+                return datetime.datetime.strftime(self.start_time,
+                                                  '%a %b %d %Y %H:%M%p')
+
+    def __init__(self, name_to_timeout_dict, oauth):
+        super(gcal_renderer, self).__init__(name_to_timeout_dict, True)
+        self.oauth = oauth
+        self.client = self.oauth.calendar_service()
+        self.sortable_events = []
+        self.countdown_events = []
+
+    def debug_prefix(self):
+        return "gcal"
+
+    def periodic_render(self, key):
+        self.debug_print('called for "%s"' % key)
+        if (key == "Render Upcoming Events"):
+            return self.render_upcoming_events()
+        elif (key == "Look For Triggered Events"):
+            return self.look_for_triggered_events()
+        else:
+            raise error('Unexpected operation')
+
+    def render_upcoming_events(self):
+        page_token = None
+        def format_datetime(x):
+            return datetime.datetime.strftime(x, '%Y-%m-%dT%H:%M:%SZ')
+        time_min = datetime.datetime.now()
+        time_max = time_min + datetime.timedelta(95)
+        time_min, time_max = map(format_datetime, (time_min, time_max))
+        self.debug_print("time_min is %s" % time_min)
+        self.debug_print("time_max is %s" % time_max)
+
+        # Writes 2 files:
+        #  + "upcoming events",
+        #  + a countdown timer for a subser of events,
+        f = file_writer.file_writer('gcal_3_none.html')
+        f.write('<h1>Upcoming Calendar Events:</h1><hr>\n')
+        f.write('<center><table width=96%>\n')
+
+        g = file_writer.file_writer('countdown_3_7200.html')
+        g.write('<h1>Countdowns:</h1><hr><ul>\n')
+
+        try:
+            self.sortable_events = []
+            self.countdown_events = []
+            while True:
+                calendar_list = self.client.calendarList().list(
+                    pageToken=page_token).execute()
+                for calendar in calendar_list['items']:
+                    if (calendar['summary'] in gcal_renderer.calendar_whitelist):
+                        events = self.client.events().list(
+                            calendarId=calendar['id'],
+                            singleEvents=True,
+                            timeMin=time_min,
+                            timeMax=time_max,
+                            maxResults=50).execute()
+
+                        def parse_date(x):
+                            y = x.get('date')
+                            if y:
+                                y = datetime.datetime.strptime(y, '%Y-%m-%d')
+                            else:
+                                y = x.get('dateTime')
+                                if y:
+                                    y = datetime.datetime.strptime(y[:-6],
+                                                         '%Y-%m-%dT%H:%M:%S')
+                                else:
+                                    y = None
+                            return y
+
+                        for event in events['items']:
+                            try:
+                                summary = event['summary']
+                                self.debug_print("event '%s' (%s to %s)" % (
+                                    summary, event['start'], event['end']))
+                                start = parse_date(event['start'])
+                                end = parse_date(event['end'])
+                                self.sortable_events.append(
+                                    gcal_renderer.comparable_event(start,
+                                                                   end,
+                                                                   summary,
+                                                                   calendar['summary']))
+                                if ('countdown' in summary or
+                                    'Holidays' in calendar['summary'] or
+                                    'Countdown' in summary):
+                                    self.debug_print("event is countdown worthy")
+                                    self.countdown_events.append(
+                                        gcal_renderer.comparable_event(start,
+                                                                       end,
+                                                                       summary,
+                                                                       calendar['summary']))
+                            except Exception as e:
+                                print("gcal unknown exception, skipping event.");
+                    else:
+                        self.debug_print("Skipping calendar '%s'" % calendar['summary'])
+                page_token = calendar_list.get('nextPageToken')
+                if not page_token: break
+
+            self.sortable_events.sort()
+            upcoming_sortable_events = self.sortable_events[:12]
+            for event in upcoming_sortable_events:
+                self.debug_print("sorted event: %s" % event.friendly_name())
+                f.write("""
+<tr>
+  <td style="padding-right: 1em;">
+    %s
+  </td>
+  <td style="padding-left: 1em;">
+    %s
+  </td>
+</tr>\n""" % (event.timestamp(), event.friendly_name()))
+            f.write('</table></center>\n')
+            f.close()
+
+            self.countdown_events.sort()
+            upcoming_countdown_events = self.countdown_events[:12]
+            now = datetime.datetime.now()
+            count = 0
+            timestamps = { }
+            for event in upcoming_countdown_events:
+                eventstamp = event.start_time
+                delta = eventstamp - now
+                name = event.friendly_name()
+                x = int(delta.total_seconds())
+                if x > 0:
+                    identifier = "id%d" % count
+                    days = divmod(x, constants.seconds_per_day)
+                    hours = divmod(days[1], constants.seconds_per_hour)
+                    minutes = divmod(hours[1], constants.seconds_per_minute)
+                    g.write('<li><SPAN id="%s">%d days, %02d:%02d</SPAN> until %s</li>\n' % (identifier, days[0], hours[0], minutes[0], name))
+                    timestamps[identifier] = time.mktime(eventstamp.timetuple())
+                    count += 1
+                    self.debug_print("countdown to %s is %dd %dh %dm" % (
+                        name, days[0], hours[0], minutes[0]))
+            g.write('</ul>')
+            g.write('<SCRIPT>\nlet timestampMap = new Map([')
+            for x in timestamps.keys():
+                g.write('    ["%s", %f],\n' % (x, timestamps[x] * 1000.0))
+            g.write(']);\n\n')
+            g.write("""
+// Pad things with a leading zero if necessary.
+function pad(n) {
+    return (n < 10) ? ("0" + n) : n;
+}
+
+// Return an 's' if things are plural.
+function plural(n) {
+    return (n == 1) ? "" : "s";
+}
+
+// Periodic function to run the page timers.
+var fn = setInterval(function() {
+    var now = new Date().getTime();
+    for (let [id, timestamp] of timestampMap) {
+        var delta = timestamp - now;
+
+        if (delta > 0) {
+            var days = Math.floor(delta / (1000 * 60 * 60 * 24));
+            var hours = pad(Math.floor((delta % (1000 * 60 * 60 * 24)) / (1000 * 60 * 60)));
+            var minutes = pad(Math.floor((delta % (1000 * 60 * 60)) / (1000 * 60)));
+            var seconds = pad(Math.floor((delta % (1000 * 60)) / 1000));
+
+            var s = days + " day" + plural(days) + ", ";
+            s = s + hours + ":" + minutes;
+            document.getElementById(id).innerHTML = s;
+        } else {
+            document.getElementById(id).innerHTML = "EXPIRED";
+        }
+    }
+}, 1000);
+</script>""");
+            g.close()
+            return True
+        except (gdata.service.RequestError, AccessTokenRefreshError):
+            print("********* TRYING TO REFRESH GCAL CLIENT *********")
+            self.oauth.refresh_token()
+            self.client = self.oauth.calendar_service()
+            return False
+        except:
+            raise
+
+    def look_for_triggered_events(self):
+        f = file_writer.file_writer(constants.gcal_imminent_pagename)
+        f.write('<h1>Imminent Upcoming Calendar Events:</h1>\n<hr>\n')
+        f.write('<center><table width=99%>\n')
+        now = datetime.datetime.now()
+        count = 0
+        for event in self.sortable_events:
+            eventstamp = event.start_time
+            delta = eventstamp - now
+            x = int(delta.total_seconds())
+            if x > 0 and x <= constants.seconds_per_minute * 3:
+                days = divmod(x, constants.seconds_per_day)
+                hours = divmod(days[1], constants.seconds_per_hour)
+                minutes = divmod(hours[1], constants.seconds_per_minute)
+                eventstamp = event.start_time
+                name = event.friendly_name()
+                calendar = event.calendar
+                f.write("<LI> %s (%s) upcoming in %d minutes.\n" % (name, calendar, minutes[0]))
+                count += 1
+        f.write("</table>")
+        f.close()
+        if count > 0:
+            globals.put("gcal_triggered", True)
+        else:
+            globals.put("gcal_triggered", False)
+        return True
diff --git a/gcal_trigger.py b/gcal_trigger.py
new file mode 100644 (file)
index 0000000..4e2f65e
--- /dev/null
@@ -0,0 +1,15 @@
+import constants
+import globals
+import trigger
+
+class gcal_trigger(trigger.trigger):
+    def get_triggered_page_list(self):
+        if globals.get("gcal_triggered") == True:
+            print "****** gcal has an imminent upcoming event. ******"
+            return (constants.gcal_imminent_pagename, trigger.trigger.PRIORITY_HIGH)
+        else:
+            return None
+
+#globals.put('gcal_triggered', True)
+#x = gcal_trigger()
+#x.get_triggered_page_list()
diff --git a/gdata_oauth.py b/gdata_oauth.py
new file mode 100644 (file)
index 0000000..64934eb
--- /dev/null
@@ -0,0 +1,205 @@
+# https://developers.google.com/accounts/docs/OAuth2ForDevices
+# https://developers.google.com/drive/web/auth/web-server
+# https://developers.google.com/google-apps/calendar/v3/reference/calendars
+# https://developers.google.com/picasa-web/
+
+import sys
+import urllib
+try:
+    import httplib     # python2
+except ImportError:
+    import http.client # python3
+import os.path
+import json
+import time
+from oauth2client.client import OAuth2Credentials
+import gdata.calendar.service
+import gdata.docs.service
+import gdata.photos.service, gdata.photos
+from apiclient.discovery import build
+import httplib2
+from apiclient.discovery import build
+import datetime
+import ssl
+
+class OAuth:
+    def __init__(self, client_id, client_secret):
+        print("gdata: initializing oauth token...")
+        self.client_id = client_id
+        self.client_secret = client_secret
+        self.user_code = None
+        #print 'Client id: %s' % (client_id)
+        #print 'Client secret: %s' % (client_secret)
+        self.token = None
+        self.device_code = None
+        self.verfication_url = None
+        self.token_file = 'client_secrets.json'
+        self.scope = [
+            #'https://www.googleapis.com/auth/calendar',
+            #'https://www.googleapis.com/auth/drive',
+            #'https://docs.google.com/feeds',
+            #'https://www.googleapis.com/auth/calendar.readonly',
+            #'https://picasaweb.google.com/data/',
+            'https://www.googleapis.com/auth/photoslibrary.readonly',
+            #'http://picasaweb.google.com/data/',
+            #'https://www.google.com/calendar/feeds/',
+        ]
+        self.host = 'accounts.google.com'
+        self.reset_connection()
+        self.load_token()
+        self.last_action = 0
+        self.ssl_ctx = None
+
+    # this setup is isolated because it eventually generates a BadStatusLine
+    # exception, after which we always get httplib.CannotSendRequest errors.
+    # When this happens, we try re-creating the exception.
+    def reset_connection(self):
+        self.ssl_ctx = ssl.create_default_context(cafile='/usr/local/etc/ssl/cert.pem')
+        httplib.HTTPConnection.debuglevel = 2
+        self.conn = httplib.HTTPSConnection(self.host, context=self.ssl_ctx)
+
+    def load_token(self):
+        token = None
+        if os.path.isfile(self.token_file):
+            f = open(self.token_file)
+            json_token = f.read()
+            self.token = json.loads(json_token)
+            f.close()
+
+    def save_token(self):
+        f = open(self.token_file, 'w')
+        f.write(json.dumps(self.token))
+        f.close()
+
+    def has_token(self):
+        if self.token != None:
+            print("gdata: we have a token!")
+        else:
+            print("gdata: we have no token.")
+        return self.token != None
+
+    def get_user_code(self):
+        self.conn.request(
+            "POST",
+            "/o/oauth2/device/code",
+            urllib.urlencode({
+                'client_id': self.client_id,
+                'scope'    : ' '.join(self.scope)
+            }),
+            {"Content-type": "application/x-www-form-urlencoded"})
+        response = self.conn.getresponse()
+        if response.status == 200:
+            data = json.loads(response.read())
+            self.device_code = data['device_code']
+            self.user_code = data['user_code']
+            self.verification_url = data['verification_url']
+            self.retry_interval = data['interval']
+        else:
+            print("gdata: %d" % response.status)
+            print(response.read())
+            sys.exit()
+        return self.user_code
+
+    def get_new_token(self):
+        # call get_device_code if not already set
+        if self.user_code == None:
+            print("gdata: getting user code")
+            self.get_user_code()
+
+        while self.token == None:
+            self.conn.request(
+                "POST",
+                "/o/oauth2/token",
+                urllib.urlencode({
+                    'client_id'     : self.client_id,
+                    'client_secret' : self.client_secret,
+                    'code'          : self.device_code,
+                    'grant_type'    : 'http://oauth.net/grant_type/device/1.0'
+                    }),
+                {"Content-type": "application/x-www-form-urlencoded"})
+            response = self.conn.getresponse()
+            if response.status == 200:
+                data = json.loads(response.read())
+                if 'access_token' in data:
+                    self.token = data
+                    self.save_token()
+                else:
+                    time.sleep(self.retry_interval + 2)
+            else:
+                print("gdata: failed to get token")
+                print(response.status)
+                print(response.read())
+
+    def refresh_token(self):
+        if self.checking_too_often():
+            print("gdata: not refreshing yet, too soon...")
+            return False
+        else:
+            print('gdata: trying to refresh oauth token...')
+        self.reset_connection()
+        refresh_token = self.token['refresh_token']
+        self.conn.request(
+            "POST",
+            "/o/oauth2/token",
+            urllib.urlencode({
+                'client_id'     : self.client_id,
+                'client_secret' : self.client_secret,
+                'refresh_token' : refresh_token,
+                'grant_type'    : 'refresh_token'
+                }),
+            {"Content-type": "application/x-www-form-urlencoded"})
+
+        response = self.conn.getresponse()
+        self.last_action = time.time()
+        if response.status == 200:
+            data = json.loads(response.read())
+            if 'access_token' in data:
+                self.token = data
+                # in fact we NEVER get a new refresh token at this point
+                if not 'refresh_token' in self.token:
+                    self.token['refresh_token'] = refresh_token
+                    self.save_token()
+                return True
+        print("gdata: unexpected response %d to renewal request" % response.status)
+        print(response.read())
+        return False
+
+    def checking_too_often(self):
+        now = time.time()
+        return (now - self.last_action) <= 30
+
+    # https://developers.google.com/picasa-web/
+    def photos_service(self):
+        headers = {
+            "Authorization": "%s %s"  % (self.token['token_type'], self.token['access_token'])
+        }
+        client = gdata.photos.service.PhotosService(additional_headers=headers)
+        return client
+
+    # https://developers.google.com/drive/
+    def docs_service(self):
+        cred = OAuth2Credentials(self.token['access_token'],
+                                 self.client_id,
+                                 self.client_secret,
+                                 self.token['refresh_token'],
+                                 datetime.datetime.now(),
+                                 'http://accounts.google.com/o/oauth2/token',
+                                 'KitchenKiosk/0.9')
+        http = httplib2.Http(disable_ssl_certificate_validation=True)
+        http = cred.authorize(http)
+        service = build('drive', 'v2', http)
+        return service
+
+    # https://developers.google.com/google-apps/calendar/
+    def calendar_service(self):
+        cred = OAuth2Credentials(self.token['access_token'],
+                                 self.client_id,
+                                 self.client_secret,
+                                 self.token['refresh_token'],
+                                 datetime.datetime.now(),
+                                 'http://accounts.google.com/o/oauth2/token',
+                                 'KitchenKiosk/0.9')
+        http = httplib2.Http(disable_ssl_certificate_validation=True)
+        http = cred.authorize(http)
+        service = build('calendar', 'v3', http)
+        return service
diff --git a/gdocs_renderer.py b/gdocs_renderer.py
new file mode 100644 (file)
index 0000000..125d5b4
--- /dev/null
@@ -0,0 +1,85 @@
+import file_writer
+import renderer
+import re
+import sets
+import gdata_oauth
+import secrets
+
+class gdocs_renderer(renderer.debuggable_abstaining_renderer):
+    """A renderer to fetches and munge docs from drive.google.com"""
+
+    query = 'title="Grocery (go/grocery)" OR title="Costco List (go/costco)"'
+
+    def __init__(self, name_to_timeout_dict, oauth):
+        super(gdocs_renderer, self).__init__(name_to_timeout_dict, False)
+        self.oauth = oauth
+        self.client = self.oauth.docs_service()
+
+    def debug_prefix(self):
+        return "gdocs"
+
+    def periodic_render(self, key):
+        result = []
+        page_token = None
+        while True:
+            try:
+                param = {}
+                if page_token:
+                    param['pageToken'] = page_token
+                param['q'] = self.query
+                print "QUERY: %s" % param['q']
+
+                files = self.client.files().list(**param).execute()
+                result.extend(files['items'])
+                page_token = files.get('nextPageToken')
+                if not page_token:
+                    break
+            except:
+                print("********* TRYING TO REFRESH GDOCS CLIENT *********")
+                self.oauth.refresh_token()
+                self.client = self.oauth.docs_service()
+                return False
+
+        def boost_font_size(matchobj):
+            x = int(matchobj.group(1))
+            x *= 2.33
+            x = int(x)
+            return "font-size:%dpt" % (x)
+
+        for f in result:
+            print f['title']
+            print f['id']
+            self.debug_print("%s (%s)\n" % (f['title'], f['id']))
+            title = f['title']
+            url = f['exportLinks']['text/html']
+            print f
+            print "Fetching %s..." % url
+            resp, contents = self.client._http.request(url)
+            print resp.status
+            print contents
+            if resp.status == 200:
+                print "Got contents."
+                contents = re.sub('<body class="..">', '', contents)
+                contents = contents.replace('</body>', '')
+                contents = re.sub('font-size:([0-9]+)pt', boost_font_size, contents)
+                f = file_writer.file_writer('%s_2_3600.html' % title)
+                now = datetime.datetime.now()
+                f.write("""
+<H1>%s</H1>
+<!-- Last updated at %s -->
+<HR>
+<DIV STYLE="-webkit-column-count: 2; -moz-column-count: 2; column-count: 2;">
+%s
+</DIV>""" % (title, now, contents))
+                f.close()
+            else:
+                self.debug_print("error: %s" % resp)
+                return False
+        return True
+
+
+#oauth = gdata_oauth.OAuth(secrets.google_client_id,
+#                          secrets.google_client_secret)
+#x = gdocs_renderer({"Testing", 12345},
+#                   oauth)
+#x.periodic_render("Test")
diff --git a/generic_news_rss_renderer.py b/generic_news_rss_renderer.py
new file mode 100644 (file)
index 0000000..b87ab05
--- /dev/null
@@ -0,0 +1,188 @@
+import file_writer
+import grab_bag
+import renderer
+import httplib
+import page_builder
+import profanity_filter
+import random
+import re
+import xml.etree.ElementTree as ET
+
+class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
+    def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
+        super(generic_news_rss_renderer, self).__init__(name_to_timeout_dict, False)
+        self.debug = 1
+        self.feed_site = feed_site
+        self.feed_uris = feed_uris
+        self.page_title = page_title
+        self.news = grab_bag.grab_bag()
+        self.details = grab_bag.grab_bag()
+        self.filter = profanity_filter.profanity_filter()
+
+    def debug_prefix(self):
+        pass
+
+    def get_headlines_page_prefix(self):
+        pass
+
+    def get_details_page_prefix(self):
+        pass
+
+    def should_use_https(self):
+        pass
+
+    def should_profanity_filter(self):
+        return False
+
+    def find_title(self, item):
+        return item.findtext('title')
+
+    def munge_title(self, title):
+        return title
+
+    def find_description(self, item):
+        return item.findtext('description')
+
+    def munge_description(self, description):
+        description = re.sub('<[^>]+>', '', description)
+        return description
+
+    def find_link(self, item):
+        return item.findtext('link')
+
+    def find_image(self, item):
+        return item.findtext('image')
+
+    def item_is_interesting_for_headlines(self, title, description, item):
+        pass
+
+    def item_is_interesting_for_article(self, title, description, item):
+        pass
+
+    def periodic_render(self, key):
+        if key == "Fetch News":
+            return self.fetch_news()
+        elif key == "Shuffle News":
+            return self.shuffle_news()
+        else:
+            raise error('Unexpected operation')
+
+    def shuffle_news(self):
+        headlines = page_builder.page_builder()
+        headlines.set_layout(page_builder.page_builder.LAYOUT_FOUR_ITEMS)
+        headlines.set_title("%s" % self.page_title)
+        subset = self.news.subset(4)
+        if subset is None:
+            self.debug_print("Not enough messages to choose from.")
+            return False
+        for msg in subset:
+            headlines.add_item(msg)
+        f = file_writer.file_writer('%s_4_none.html' % (
+            self.get_headlines_page_prefix()))
+        headlines.render_html(f)
+        f.close()
+
+        details = page_builder.page_builder()
+        details.set_layout(page_builder.page_builder.LAYOUT_ONE_ITEM)
+        details.set_title("%s" % self.page_title)
+        subset = self.details.subset(1)
+        if subset is None:
+            self.debug_print("Not enough details to choose from.");
+            return False
+        for msg in subset:
+            blurb = msg
+            blurb += "</TD>\n"
+            details.add_item(blurb)
+        g = file_writer.file_writer('%s_6_none.html' % (
+            self.get_details_page_prefix()))
+        details.render_html(g)
+        g.close()
+        return True
+
+    def fetch_news(self):
+        count = 0
+        self.news.clear()
+        self.details.clear()
+
+        for uri in self.feed_uris:
+            if self.should_use_https():
+                self.debug_print("Fetching: https://%s%s" % (self.feed_site, uri))
+                self.conn = httplib.HTTPSConnection(self.feed_site)
+            else:
+                self.debug_print("Fetching: http://%s%s" % (self.feed_site, uri))
+                self.conn = httplib.HTTPConnection(self.feed_site)
+            self.conn.request(
+                "GET",
+                uri,
+                None,
+                {"Accept-Charset": "utf-8"})
+            response = self.conn.getresponse()
+            if response.status != 200:
+                print("%s: RSS fetch_news error, response: %d" % (self.page_title,
+                                                                  response.status))
+                self.debug_print(response.read())
+                return False
+
+            rss = ET.fromstring(response.read())
+            channel = rss[0]
+            for item in channel.getchildren():
+                title = self.find_title(item)
+                if title is not None:
+                    title = self.munge_title(title)
+                description = item.findtext('description')
+                if description is not None:
+                    description = self.munge_description(description)
+                link = item.findtext('link')
+                image = item.findtext('image')
+
+                if (title is None or
+                    not self.item_is_interesting_for_headlines(title,
+                                                               description,
+                                                               item)):
+                    self.debug_print('Item "%s" is not interesting' % title)
+                    continue
+
+                if (self.should_profanity_filter() and
+                    (self.filter.contains_bad_words(title) or
+                    self.filter.contains_bad_words(description))):
+                    self.debug_print('Found bad words in item "%s"' % title)
+                    continue
+
+                #print u"Title: %s\nDescription: %s\nLink: %s\nImage: %s\n" % (
+                #    title, description, link, image)
+
+                blurb = u"""<DIV style="padding:8px;
+                                 font-size:34pt;
+                                 -webkit-column-break-inside:avoid;">"""
+                if image is not None:
+                    blurb += '<IMG SRC="%s" ALIGN=LEFT HEIGHT=115 style="padding:8px;">\n' % image
+                blurb += '<P><B>%s</B>' % title
+
+                if (description is not None and
+                    self.item_is_interesting_for_article(title, description, item)):
+                    longblurb = blurb
+                    longblurb += "<BR>"
+                    longblurb += description
+                    longblurb += "</DIV>"
+                    longblurb = longblurb.replace("font-size:34pt",
+                                                  "font-size:44pt")
+                    self.details.add(longblurb.encode('utf-8', errors='ignore'))
+
+                blurb += "</DIV>"
+                self.news.add(blurb.encode('utf-8', errors='ignore'))
+                count += 1
+        return count > 0
+
+# Test
+#x = generic_news_rss_renderer(
+#    {"Fetch News" : 1,
+#     "Shuffle News" : 1},
+#    "rss.cnn.com",
+#    [ "/rss/generic_news_topstories.rss",
+#      "/rss/money_latest.rss",
+#      "/rss/generic_news_tech.rss",
+#    ],
+#    "Test" )
+#if x.fetch_news() == 0:
+#    print "Error fetching news, no items fetched."
+#x.shuffle_news()
diff --git a/gkeep_renderer.py b/gkeep_renderer.py
new file mode 100644 (file)
index 0000000..c882526
--- /dev/null
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+
+import constants
+import file_writer
+import gkeepapi
+import os
+import re
+import renderer
+import secrets
+
+class gkeep_renderer(renderer.debuggable_abstaining_renderer):
+    def __init__(self, name_to_timeout_dict):
+        super(gkeep_renderer, self).__init__(name_to_timeout_dict, True)
+        self.keep = gkeepapi.Keep()
+        success = self.keep.login(secrets.google_keep_username,
+                                  secrets.google_keep_password)
+        if success:
+            self.debug_print("Connected with gkeep.")
+        else:
+            self.debug_print("Error connecting with gkeep.")
+        self.colors_by_name = {
+            'white' : '#002222',
+            'green' : '#345920',
+            'darkblue' : '#1F3A5F',
+            'blue' : '#2D545E',
+            'orange' : '#604A19',
+            'red' : '#5C2B29',
+            'purple' : '#42275E',
+            'pink' : '#5B2245',
+            'yellow' : '#635D19',
+            'brown' : '#442F19',
+            'gray' : '#3c3f4c',
+            'teal' : '#16504B'
+        }
+
+    def debug_prefix(self):
+        return "gkeep"
+
+    def periodic_render(self, key):
+        strikethrough = re.compile(u'\u2611([^\n]*)\n', re.UNICODE)
+        linkify = re.compile(r'.*(https?:\/\/\S+).*')
+
+        self.keep.sync()
+        result_list = self.keep.find(labels=[self.keep.findLabel('kiosk')])
+        for note in result_list:
+            title = note.title
+            title = title.replace(" ", "-")
+            title = title.replace("/", "")
+
+            filename = "%s_2_3600.html" % title
+            contents = note.text + "\n"
+            self.debug_print("Note title '%s'" % title)
+            if contents != '' and not contents.isspace():
+                contents = strikethrough.sub(r'<font color="#999999">` <del>\1</del></font>\n', contents)
+                contents = contents.replace('`', u'\u2611')
+                #self.debug_print("Note contents:\n%s" % contents)
+                contents = linkify.sub(r'<a href="\1">\1</a>', contents)
+                individual_lines = contents.split("\n")
+                num_lines = len(individual_lines)
+                max_length = 0
+                for x in individual_lines:
+                    length = len(x)
+                    if length > max_length:
+                        max_length = length
+                contents = contents.replace("\n", "<BR>\n")
+                color = note.color.name.lower()
+                if color in self.colors_by_name.keys():
+                    color = self.colors_by_name[color]
+                else:
+                    self.debug_print("Unknown color '%s'" % color)
+                f = file_writer.file_writer(filename)
+                f.write("""
+<STYLE type="text/css">
+a:link { color:#88bfbf; }
+</STYLE>
+<DIV STYLE="border-radius: 25px; border-style: solid; padding: 20px; background-color: %s; color: #eeeeee; font-size: x-large;">
+<p style="color: #ffffff; font-size:larger"><B>%s</B></p>
+<HR style="border-top: 3px solid white;">""" % (color, note.title))
+                if num_lines >= 12 and max_length < 120:
+                    self.debug_print("%d lines (max=%d chars): two columns" %
+                                     (num_lines, max_length))
+                    f.write("<TABLE BORDER=0 WIDTH=100%%><TR valign=\"top\">")
+                    f.write("<TD WIDTH=50%% style=\"color:#eeeeee; font-size:large\">\n")
+                    f.write("<FONT>")
+                    count = 0
+                    for x in individual_lines:
+                        f.write(x + "<BR>\n")
+                        count += 1
+                        if count == num_lines / 2:
+                            f.write("</FONT></TD>\n")
+                            f.write("<TD WIDTH=50%% style=\"color:#eeeeee; font-size:large\">\n")
+                            f.write("<FONT>")
+                    f.write("</FONT></TD></TR></TABLE></DIV>\n");
+                else:
+                    self.debug_print("%d lines (max=%d chars): one column" %
+                                     (num_lines, max_length))
+                    f.write("<FONT>%s</FONT>" % contents)
+                f.write("</DIV>")
+                f.close()
+            else:
+                self.debug_print("Note is empty, deleting %s." % filename)
+                _ = os.path.join(constants.pages_dir, filename)
+                try:
+                    os.remove(_)
+                except:
+                    pass
+        return True
+
+# Test
+#x = gkeep_renderer({"Test", 1234})
+#x.periodic_render("Test")
diff --git a/globals.py b/globals.py
new file mode 100644 (file)
index 0000000..8420ebd
--- /dev/null
@@ -0,0 +1,11 @@
+data = {}
+
+def put(key, value):
+    data[key] = value
+
+def get(key):
+    if key in data:
+        return data[key]
+    else:
+        return None
+
diff --git a/grab_bag.py b/grab_bag.py
new file mode 100644 (file)
index 0000000..49582fb
--- /dev/null
@@ -0,0 +1,29 @@
+import random
+
+class grab_bag(object):
+    def __init__(self):
+        self.contents = set()
+
+    def clear(self):
+        self.contents.clear()
+
+    def add(self, item):
+        if item not in self.contents:
+            self.contents.add(item)
+
+    def add_all(self, collection):
+        for x in collection:
+            self.add(x)
+
+    def subset(self, count):
+        if len(self.contents) < count:
+            return None
+        subset = random.sample(self.contents, count)
+        return subset
+
+    def size(self):
+        return len(self.contents)
+
+#x = grab_bag()
+#x.add_all([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
+#print x.subset(3)
diff --git a/health_renderer.py b/health_renderer.py
new file mode 100644 (file)
index 0000000..55e4cf9
--- /dev/null
@@ -0,0 +1,137 @@
+import constants
+import file_writer
+import os
+import renderer
+import time
+
+class periodic_health_renderer(renderer.debuggable_abstaining_renderer):
+    def __init__(self, name_to_timeout_dict):
+        super(periodic_health_renderer, self).__init__(name_to_timeout_dict, False)
+
+    def debug_prefix(self):
+        return "health"
+
+    def periodic_render(self, key):
+        f = file_writer.file_writer('periodic-health_6_300.html')
+        timestamps = '/timestamps/'
+        days = constants.seconds_per_day
+        hours = constants.seconds_per_hour
+        mins = constants.seconds_per_minute
+        limits = {
+            timestamps + 'last_rsnapshot_hourly'          : hours * 24,
+            timestamps + 'last_rsnapshot_daily'           : days * 3,
+            timestamps + 'last_rsnapshot_weekly'          : days * 14,
+            timestamps + 'last_rsnapshot_monthly'         : days * 70,
+
+            timestamps + 'last_zfssnapshot_hourly'        : hours * 5,
+            timestamps + 'last_zfssnapshot_daily'         : hours * 36,
+            timestamps + 'last_zfssnapshot_weekly'        : days * 9,
+            timestamps + 'last_zfssnapshot_monthly'       : days * 70,
+            timestamps + 'last_zfssnapshot_cleanup'       : hours * 24,
+
+            timestamps + 'last_disk_selftest_short'       : days * 14,
+            timestamps + 'last_disk_selftest_long'        : days * 31,
+            timestamps + 'last_zfs_scrub'                 : days * 9,
+            timestamps + 'last_zfs_scrub_backup'          : days * 9,
+
+            timestamps + 'last_zfsxfer_backup.house'      : hours * 36,
+            timestamps + 'last_zfsxfer_ski.dyn.guru.org'  : days * 7,
+            timestamps + 'last_photos_sync'               : hours * 8,
+            timestamps + 'last_disk_selftest_backup_short': days * 14,
+            timestamps + 'last_disk_selftest_backup_long' : days * 31,
+
+            timestamps + 'last_healthy_wifi'              : mins * 10,
+            timestamps + 'last_healthy_network'           : mins * 10,
+            timestamps + 'last_scott_sync'                : days * 2,
+        }
+        self.write_header(f)
+
+        now = time.time()
+        n = 0
+        for x in sorted(limits):
+            ts = os.stat(x).st_mtime
+            age = now - ts
+            self.debug_print("%s -- age is %ds, limit is %ds" % (x, age, limits[x]))
+            if age < limits[x]:
+                f.write('<TD BGCOLOR="#007010" HEIGHT=100 WIDTH=33% STYLE="text-size:60%; vertical-align: middle;">\n')
+            else:
+                f.write('<TD BGCOLOR="#990000" HEIGHT=100 WIDTH=33% CLASS="invalid" STYLE="text-size:60%; vertical-align:middle;">\n')
+            f.write("  <CENTER><FONT SIZE=-2>\n")
+
+            name = x.replace(timestamps, "")
+            name = name.replace("last_", "")
+            name = name.replace("_", "&nbsp;")
+            days = divmod(age, constants.seconds_per_day)
+            hours = divmod(days[1], constants.seconds_per_hour)
+            minutes = divmod(hours[1], constants.seconds_per_minute)
+
+            self.debug_print("%s is %d days %02d:%02d old." % (
+                name, days[0], hours[0], minutes[0]))
+            f.write("%s<BR>\n<B>%d</b> days <B>%02d</B>:<B>%02d</B> old.\n" % (
+                name, days[0], hours[0], minutes[0]))
+            f.write("</FONT></CENTER>\n</TD>\n\n")
+            n += 1
+            if n % 3 == 0:
+                f.write("</TR>\n<TR>\n<!-- ------------------- -->\n")
+        self.write_footer(f)
+        f.close()
+        return True
+
+    def write_header(self, f):
+        f.write("""
+<HTML>
+<HEAD>
+<STYLE>
+@-webkit-keyframes invalid {
+  from { background-color: #ff6400; }
+  to { background-color: #ff0000; }
+  padding-right: 25px;
+  padding-left: 25px;
+}
+@-moz-keyframes invalid {
+  from { background-color: #ff6400; }
+  to { background-color: #ff0000; }
+  padding-right: 25px;
+  padding-left: 25px;
+}
+@-o-keyframes invalid {
+  from { background-color: #ff6400; }
+  to { background-color: #ff0000; }
+  padding-right: 25px;
+  padding-left: 25px;
+}
+@keyframes invalid {
+  from { background-color: #ff6400; }
+  to { background-color: #ff0000; }
+  padding-right: 25px;
+  padding-left: 25px;
+}
+.invalid {
+  -webkit-animation: invalid 1s infinite; /* Safari 4+ */
+  -moz-animation:    invalid 1s infinite; /* Fx 5+ */
+  -o-animation:      invalid 1s infinite; /* Opera 12+ */
+  animation:         invalid 1s infinite; /* IE 10+ */
+}
+</STYLE>
+<meta http-equiv="cache-control" content="max-age=0" />
+<meta http-equiv="cache-control" content="no-cache" />
+<meta http-equiv="expires" content="0" />
+<meta http-equiv="expires" content="Tue, 01 Jan 1980 1:00:00 GMT" />
+<meta http-equiv="pragma" content="no-cache" />
+</HEAD>
+<BODY>
+<H1>Periodic Cronjob Health Report</H1>
+<HR>
+<CENTER>
+<TABLE BORDER=0 WIDTH=99% style="font-size:16pt">
+<TR>
+""")
+
+    def write_footer(self, f):
+        f.write("""
+</TR>
+</TABLE>
+</BODY>
+</HTML>""")
+
+#test = periodic_health_renderer({"Test", 123})
diff --git a/kiosk.py b/kiosk.py
new file mode 100755 (executable)
index 0000000..379e196
--- /dev/null
+++ b/kiosk.py
@@ -0,0 +1,274 @@
+#!/usr/local/bin/python
+
+import sys
+import traceback
+import os
+from threading import Thread
+import time
+from datetime import datetime
+import constants
+import renderer
+import renderer
+import renderer_catalog
+import chooser
+import logging
+import trigger_catalog
+import utils
+
+def thread_change_current():
+    page_chooser = chooser.weighted_random_chooser_with_triggers(
+        trigger_catalog.get_triggers())
+    swap_page_target = 0
+    last_page = ""
+    while True:
+        now = time.time()
+        (page, triggered) = page_chooser.choose_next_page()
+
+        if triggered:
+            print('chooser[%s] - WE ARE TRIGGERED.' % utils.timestamp())
+            if page != last_page:
+                print('chooser[%s] - EMERGENCY PAGE %s LOAD NEEDED' % (
+                    utils.timestamp(), page))
+                f = open(os.path.join(constants.pages_dir,
+                                      "current.shtml"), "w")
+                emit_wrapped(f, page)
+                f.close()
+
+                # Notify XMLHTTP clients that they need to refresh now.
+                path = os.path.join(constants.pages_dir,
+                                    "reload_immediately.html")
+                f = open(path, 'w')
+                f.write("Reload, suckers!")
+                f.close()
+                time.sleep(0.750)
+                os.remove(path)
+                last_page = page
+                swap_page_target = now + constants.refresh_period_sec
+
+        elif now >= swap_page_target:
+            if (page == last_page):
+                print('chooser[%s] - nominal choice got the same page...' % (
+                    utils.timestamp()))
+                continue
+            print('chooser[%s] - nominal choice of %s' % (utils.timestamp(), page))
+            try:
+                f = open(os.path.join(constants.pages_dir,
+                                      "current.shtml"), "w")
+                emit_wrapped(f, page)
+                f.close()
+                last_page = page
+                swap_page_target = now + constants.refresh_period_sec
+            except:
+                print('chooser[%s] - page does not exist?!' % (utils.timestamp()))
+                continue
+        time.sleep(1.0)
+
+def pick_background_color():
+    now = datetime.now()
+    if now.hour <= 6 or now.hour >= 21:
+        return "E6B8B8"
+    elif now.hour == 7 or now.hour == 20:
+        return "EECDCD"
+    else:
+        return "FFFFFF"
+
+def emit_wrapped(f, filename):
+    age = utils.describe_age_of_file_briefly("pages/%s" % filename)
+    bgcolor = pick_background_color()
+    f.write("""
+<HEAD>
+  <TITLE>Kitchen Kiosk</TITLE>
+  <LINK rel="stylesheet" type="text/css" href="style.css">
+  <SCRIPT TYPE="text/javascript">
+
+  // Zoom the 'contents' div to fit without scrollbars and then make
+  // it visible.
+  function zoomScreen() {
+    z = 285;
+    do {
+      document.getElementById("content").style.zoom = z+"%%";
+      var body = document.body;
+      var html = document.documentElement;
+      var height = Math.max(body.scrollHeight,
+                            body.offsetHeight,
+                            html.clientHeight,
+                            html.scrollHeight,
+                            html.offsetHeight);
+      var windowHeight = window.innerHeight;
+      var width = Math.max(body.scrollWidth,
+                           body.offsetWidth,
+                           html.clientWidth,
+                           html.scrollWidth,
+                           html.offsetWidth);
+      var windowWidth = window.innerWidth;
+      var heightRatio = height / windowHeight;
+      var widthRatio = width / windowWidth;
+
+      if (heightRatio <= 1.0 && widthRatio <= 1.0) {
+        break;
+      }
+      z -= 4;
+    } while(z >= 70);
+    document.getElementById("content").style.visibility = "visible";
+  }
+
+  // Load IMG tags with DATA-SRC attributes late.
+  function lateLoadImages() {
+    var image = document.getElementsByTagName('img');
+    for (var i = 0; i < image.length; i++) {
+      if (image[i].getAttribute('DATA-SRC')) {
+        image[i].setAttribute('SRC', image[i].getAttribute('DATA-SRC'));
+      }
+    }
+  }
+
+  // Operate the clock at the top of the page.
+  function runClock() {
+    var today = new Date();
+    var h = today.getHours();
+    var ampm = h >= 12 ? 'pm' : 'am';
+    h = h %% 12;
+    h = h ? h : 12; // the hour '0' should be '12'
+    var m = maybeAddZero(today.getMinutes());
+    var colon = ":";
+    if (today.getSeconds() %% 2 == 0) {
+      colon = "<FONT STYLE='color: #%s; font-size: 4vmin; font-weight: bold'>:</FONT>";
+    }
+    document.getElementById("time").innerHTML = h + colon + m + ampm;
+    document.getElementById("date").innerHTML = today.toDateString();
+    var t = setTimeout(function(){runClock()}, 1000);
+  }
+
+  // Helper method for running the clock.
+  function maybeAddZero(x) {
+    return (x < 10) ? "0" + x : x;
+  }
+
+  // Do something on page load.
+  function addLoadEvent(func) {
+    var oldonload = window.onload;
+    if (typeof window.onload != 'function') {
+      window.onload = func;
+    } else {
+      window.onload = function() {
+        if (oldonload) {
+          oldonload();
+        }
+        func();
+      }
+    }
+  }
+
+  // Sleep thread helper.
+  const sleep = (milliseconds) => {
+    return new Promise(resolve => setTimeout(resolve, milliseconds))
+  }
+
+  var loaded = false;
+  var loadedDate = new Date();
+
+  addLoadEvent(zoomScreen);
+  addLoadEvent(runClock);
+  addLoadEvent(lateLoadImages);
+  addLoadEvent(function() {
+    loaded = true;
+  });
+
+  // Reload the page after a certain amount of time has passed or
+  // immediately if told to do so.
+  (function poll() {
+    setTimeout(
+      function() {
+        var now = new Date();
+        var deltaMs = now.getTime() - loadedDate.getTime();
+
+        // Reload unconditionally after 22 sec.
+        if (deltaMs > %d) {
+          window.location.reload();
+        }
+
+        // Reload immediately if told.
+        var xhr = new XMLHttpRequest();
+        xhr.open('GET',
+                 'http://wannabe.house/kiosk/pages/reload_immediately.html');
+        xhr.onload =
+          function() {
+            if (xhr.status === 200) {
+              window.location.reload();
+            } else {
+              sleep(500).then(() => {
+                poll();
+              });
+            }
+          };
+        xhr.send();
+      }, 500);
+  })();
+</SCRIPT>
+</HEAD>
+<BODY BGCOLOR="#%s">
+    <TABLE style="height:100%%; width:100%%" BORDER=0>
+    <TR HEIGHT=30>
+        <TD ALIGN="left">
+            <DIV id="date">&nbsp;</DIV>
+        </TD>
+        <TD ALIGN="center"><FONT COLOR=#bbbbbb>
+            <DIV id="info"></DIV></FONT>
+        </TD>
+        <TD ALIGN="right">
+            <DIV id="time">&nbsp;</DIV>
+        </TD>
+    </TR>
+    <TR STYLE="vertical-align:top">
+        <TD COLSPAN=3>
+            <DIV ID="content" STYLE="zoom: 1; visibility: hidden;">
+                <!-- BEGIN main page contents. -->
+<!--#include virtual=\"%s\"-->
+                <!-- END main page contents. -->
+            </DIV>
+            <BR>
+            <P ALIGN="right">
+                <FONT SIZE=2 COLOR=#bbbbbb>%s @ %s ago.</FONT>
+            </P>
+        </TD>
+    </TR>
+    </TABLE>
+</BODY>""" % (bgcolor,
+              constants.refresh_period_sec * 1000,
+              bgcolor,
+              filename,
+              filename,
+              age))
+
+def thread_invoke_renderers():
+    while True:
+        for r in renderer_catalog.get_renderers():
+            try:
+                r.render()
+            except Exception as e:
+                traceback.print_exc()
+                print("renderer[%s] unknown exception, swallowing it." % (
+                    utils.timestamp()))
+            except Error as e:
+                traceback.print_exc()
+                print("renderer[%s] unknown error, swallowing it." % (
+                    utils.timestamp()))
+        time.sleep(constants.render_period_sec)
+
+if __name__ == "__main__":
+    logging.basicConfig()
+    changer_thread = None
+    renderer_thread = None
+    while True:
+        if (changer_thread == None or
+            not changer_thread.is_alive()):
+            print("chooser[%s] - (Re?)initializing chooser thread..." % utils.timestamp())
+            changer_thread = Thread(target = thread_change_current, args=())
+            changer_thread.start()
+        if (renderer_thread == None or
+            not renderer_thread.is_alive()):
+            print("renderer[%s] - (Re?)initializing render thread..." % utils.timestamp())
+            renderer_thread = Thread(target = thread_invoke_renderers, args=())
+            renderer_thread.start()
+        time.sleep(10000)
+    print("Should never get here.")
diff --git a/local_photos_mirror_renderer.py b/local_photos_mirror_renderer.py
new file mode 100644 (file)
index 0000000..020683d
--- /dev/null
@@ -0,0 +1,100 @@
+import os
+import file_writer
+import renderer
+import sets
+import random
+
+class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
+    """A renderer that uses a local mirror of Google photos"""
+
+    album_root_directory = "/usr/local/export/www/gphotos/albums"
+
+    album_whitelist = sets.ImmutableSet([
+        '1208 Newer Alex Photos',
+        '1013 Scott and Lynn',
+        '0106 Key West 2019',
+        '1017 Olympic Sculpture Park',
+        '0212 Chihuly Glass',
+        '0730 Trip to East Coast \'16',
+        '0715 Barn',
+        '1009 East Coast 2018',
+        '0819 Skiing with Alex',
+        '0819 Friends',
+        '0227 Trip to California, \'16',
+        '0407 London, 2018',
+        '0528 Ohme Gardens',
+        '0809 Bangkok and Phuket, 2003',
+        '0803 Blue Angels... Seafair',
+        '0719 Dunn Gardens',
+        '0514 Krakow 2009',
+        '0515 Tuscany 2008',
+        '0508 Yosemite 2010',
+        '0611 Sonoma',
+        '1025 NJ 2015',
+        '0407 Las Vegas, 2017',
+    ])
+
+    extension_whitelist = sets.ImmutableSet([
+        'jpg',
+        'gif',
+        'JPG',
+        'jpeg',
+        'GIF',
+    ])
+
+    def __init__(self, name_to_timeout_dict):
+        super(local_photos_mirror_renderer, self).__init__(name_to_timeout_dict, False)
+        self.candidate_photos = set()
+
+    def debug_prefix(self):
+        return "local_photos_mirror"
+
+    def periodic_render(self, key):
+        if (key == 'Index Photos'):
+            return self.index_photos()
+        elif (key == 'Choose Photo'):
+            return self.choose_photo()
+        else:
+            raise error('Unexpected operation')
+
+    # Walk the filesystem looking for photos in whitelisted albums and
+    # keep their paths in memory.
+    def index_photos(self):
+        for root, subdirs, files in os.walk(self.album_root_directory):
+            last_dir = root.rsplit('/', 1)[1]
+            if last_dir in self.album_whitelist:
+                for x in files:
+                    extension = x.rsplit('.', 1)[1]
+                    if extension in self.extension_whitelist:
+                        photo_path = os.path.join(root, x)
+                        photo_url = photo_path.replace(
+                            "/usr/local/export/www/",
+                            "http://10.0.0.18/",
+                            1)
+                        self.candidate_photos.add(photo_url)
+        return True
+
+    # Pick one of the cached URLs and build a page.
+    def choose_photo(self):
+        if len(self.candidate_photos) == 0:
+            print("No photos!")
+            return False
+        path = random.sample(self.candidate_photos, 1)[0]
+        f = file_writer.file_writer('photo_23_none.html')
+        f.write("""
+<style>
+body{background-color:#303030;}
+div#time{color:#dddddd;}
+div#date{color:#dddddd;}
+</style>
+<center>""")
+        f.write('<img src="%s" style="display:block;max-width=800;max-height:600;width:auto;height:auto">' % path)
+        f.write("</center>")
+        f.close()
+        return True
+
+# Test code
+#x = local_photos_mirror_renderer({"Index Photos": (60 * 60 * 12),
+#                                  "Choose Photo": (1)})
+#x.index_photos()
+#x.choose_photo()
diff --git a/logger.py b/logger.py
new file mode 100644 (file)
index 0000000..3d65386
--- /dev/null
+++ b/logger.py
@@ -0,0 +1,32 @@
+import logging
+from logging.handlers import WatchedFileHandler
+
+class logger(object):
+    def __init__(self, module):
+        logger = logging.getLogger(module)
+        logger.setLevel(logging.DEBUG)
+
+        # create console handler and set level to debug
+        #console = logging.StreamHandler()
+        #console.setLevel(logging.DEBUG)
+
+        # create a file logger and set level to debug
+        f = WatchedFileHandler(filename='/var/log/kiosk.log')
+        f.setLevel(logging.INFO)  # change this to logging.DEBUG for more verbosity
+        # create formatter
+        formatter = logging.Formatter(
+            fmt='%(asctime)s - %(levelname)s - %(message)s',
+            datefmt='%m/%d/%Y %I:%M:%S %p')
+
+        # add formatter to both
+        #console.setFormatter(formatter)
+
+        f.setFormatter(formatter)
+        logger.addHandler(f)
+
+        # add console to logger
+        #logger.addHandler(console)
+        self.logger = logger
+
+    def get(self):
+        return self.logger
diff --git a/mynorthwest_rss_renderer.py b/mynorthwest_rss_renderer.py
new file mode 100644 (file)
index 0000000..38bcd28
--- /dev/null
@@ -0,0 +1,40 @@
+import generic_news_rss_renderer
+
+class mynorthwest_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
+    def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
+        super(mynorthwest_rss_renderer, self).__init__(
+            name_to_timeout_dict,
+            feed_site,
+            feed_uris,
+            page_title)
+        self.debug = 1
+
+    def debug_prefix(self):
+        return "mynorthwest(%s)" % (self.page_title)
+
+    def get_headlines_page_prefix(self):
+        return "mynorthwest-%s" % (self.page_title)
+
+    def get_details_page_prefix(self):
+        return "mynorthwest-details-%s" % (self.page_title)
+
+    def should_use_https(self):
+        return True
+
+    def item_is_interesting_for_headlines(self, title, description, item):
+        return True
+
+    def item_is_interesting_for_article(self, title, description, item):
+        return True
+
+# Test
+#x = mynorthwest_rss_renderer(
+#    {"Fetch News" : 1,
+#     "Shuffle News" : 1},
+#    "mynorthwest.com",
+#    [ "/feed/" ],
+#    "Test" )
+#if x.fetch_news() == 0:
+#    print "Error fetching news, no items fetched."
+#x.shuffle_news()
+
diff --git a/myq_trigger.py b/myq_trigger.py
new file mode 100644 (file)
index 0000000..da5b2f1
--- /dev/null
@@ -0,0 +1,11 @@
+import constants
+import globals
+import trigger
+
+class myq_trigger(trigger.trigger):
+    def get_triggered_page_list(self):
+        if globals.get("myq_triggered") == True:
+            print "****** MyQ garage door is open page trigger ******"
+            return (constants.myq_pagename, trigger.trigger.PRIORITY_HIGH)
+        else:
+            return None
diff --git a/page_builder.py b/page_builder.py
new file mode 100644 (file)
index 0000000..369d442
--- /dev/null
@@ -0,0 +1,93 @@
+import sys
+
+class page_builder(object):
+    LAYOUT_AUTO = 0
+    LAYOUT_ONE_ITEM = 1
+    LAYOUT_FOUR_ITEMS = 2
+    LAYOUT_MANY_ITEMS = 3
+    LAYOUT_TEXT_ONE_COLUMN = 4
+    LAYOUT_TEXT_TWO_COLUMNS = 5
+
+    def __init__(self):
+        self.title = None
+        self.style = ""
+        self.layout = page_builder.LAYOUT_AUTO
+        self.items = []
+        self.debug_info = None
+
+    def set_layout(self, layout):
+        self.layout = layout
+        return self
+
+    def set_title(self, title):
+        self.title = title
+        return self
+
+    def set_style(self, style):
+        self.style = style
+        return self
+
+    def add_item(self, item):
+        self.items.append(item)
+        return self
+
+    def set_debug_info(self, debug_info):
+        self.debug_info = debug_info
+        return self
+
+    def __pick_layout(self):
+        if len(self.items) == 1:
+            self.layout = page_builder.LAYOUT_ONE_ITEM
+        elif len(self.items) <= 4:
+            self.layout = page_builder.LAYOUT_FOUR_ITEMS
+        else:
+            self.layout = page_builder.LAYOUT_MANY_ITEMS
+
+    def __render_header(self, f):
+        if self.title is not None:
+            f.write("<H1>%s</H1>\n" % self.title)
+        f.write("<HR>\n<TABLE WIDTH=99% BORDER=0>\n<TR>\n")
+        if self.style is not None:
+            f.write(self.style)
+
+    def __render_footer(self, f):
+        f.write("</TR>\n</TABLE>\n")
+
+    def render_html(self, f):
+        if self.layout == page_builder.LAYOUT_AUTO or self.layout is None:
+            self.__pick_layout()
+
+        self.__render_header(f)
+
+        items_per_row = None
+
+        if self.layout == page_builder.LAYOUT_ONE_ITEM:
+            items_per_row = 1
+
+        elif self.layout == page_builder.LAYOUT_FOUR_ITEMS:
+            items_per_row = 2
+
+        elif self.layout == page_builder.LAYOUT_MANY_ITEMS:
+            items_per_row = 3
+
+        elif self.layout == page_builder.LAYOUT_TEXT_TWO_COLUMNS:
+            items_per_row = 1
+
+        elif self.layout == page_builder.LAYOUT_TEXT_ONE_COLUMN:
+            items_per_row = 1
+
+        else:
+            print "Error, unknown layout type: %d" % self.layout
+
+        count = 0
+        self.items.sort(key=len, reverse=True)
+        for item in self.items:
+            f.write('<TD WIDTH=50%% STYLE="padding: 10px;">\n%s\n</TD>\n' % item)
+            count += 1
+            if count % items_per_row == 0:
+                f.write("</TR>\n<TR>\n")
+
+        self.__render_footer(f)
+
+#x = page_builder()
+#x.set_title("title").add_item("item1").add_item("item2").add_item("item3").render_html(sys.stdout)
diff --git a/pages/cabin_2_none.html b/pages/cabin_2_none.html
new file mode 100644 (file)
index 0000000..fb1de4d
--- /dev/null
@@ -0,0 +1,50 @@
+<html>
+<head>
+<meta http-equiv="cache-control" content="max-age=0" />
+<meta http-equiv="cache-control" content="no-cache" />
+<meta http-equiv="expires" content="0" />
+<meta http-equiv="expires" content="Tue, 01 Jan 1980 1:00:00 GMT" />
+<meta http-equiv="pragma" content="no-cache" />
+<link rel="icon" 
+      type="image/jpeg"
+      href="http://ski.dyn.guru.org/favicon.jpg">
+</head>
+<body><h1>Ski Critter Cabin</h1><hr><P>
+<center>
+<table border=0>
+<tr>
+<td>
+<center>
+<img src="http://ski.dyn.guru.org/webcam/videostream.cgi"
+     height="384">
+<br>
+</center>
+</td>
+<td>
+<center>
+<img src="http://ski.dyn.guru.org/rpi/~pi/one_day.png"
+     height="384">
+<br>
+</center>
+</td>
+</tr>
+<tr>
+<td>
+<center>
+<img src="http://ski.dyn.guru.org/rpi/~pi/one_week.png"
+     height="384">
+<br>
+</center>
+</td>
+<td>
+<center>
+<img src="http://ski.dyn.guru.org/rpi/~pi/one_month.png"
+     height="384">
+<br>
+</center>
+</td>
+</tr>
+</table>
+</center>
+</body>
+</html>
diff --git a/pages/clock_10_none.html b/pages/clock_10_none.html
new file mode 100755 (executable)
index 0000000..737ba6b
--- /dev/null
@@ -0,0 +1,261 @@
+<head>
+<!-- Thank you to Toby Pitman for posting this really cool article (and
+     javascript / css / image) explaining how to make a running analog
+     clock webpage.  See: http://css-tricks.com/css3-clock for more info. -->
+  <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+  <title>Analog Clock</title>
+  <script type="text/javascript" src="jquery-1.2.6.min.js"></script>
+  <style type="text/css">
+    * {
+      margin: 0px;
+      padding: 0px;
+    }
+    body {
+      background-color: #222222;
+    }
+    div#time {
+      color:#dddddd;
+    }
+    div#date{
+      color:#dddddd;
+    }
+    table {
+      margin:0px;
+      border-spacing:0px;
+      padding:0px;
+      border-collapse:collapse;
+      font-size:100%;
+      line-height:0.5;
+    }
+    tr {
+      font-size:100%;
+      line-height:0.5;
+    }
+    td {
+      font-size:100%;
+      line-height:0.5;
+    }
+    #clock {
+      position: relative;
+      width: 600px;
+      height: 600px;
+      margin: 20px auto 0 auto;
+      list-style: none;
+    }
+    #analog {
+      position: relative;
+      width: 100%;
+    }
+    #txt {
+      position: absolute;
+      top: 280px;
+      left: 375px;
+    }
+    #sec, #min, #hour {
+      position: absolute;
+      width: 30px;
+      height: 600px;
+      top: 0px;
+      left: 285px;
+    }
+    #sec {
+      background: url("clockimg/sechand.png");
+      z-index: 3;
+    }
+    #min {
+      background: url("clockimg/minhand.png");
+      z-index: 2;
+      width: 30px;
+      height: 600px;
+      top: 0px;
+      left: 285px;
+    }
+    #hour {
+      background: url("clockimg/hourhand.png");
+      z-index: 1;
+    }
+    .digit {
+      display: block;
+    }
+  </style>
+  <script type="text/javascript">
+    $(document).ready(
+      function() {
+        $.fn.preload = function() {
+          this.each(function() {
+            $('<img/>')[0].src = this;
+          });
+        }
+
+        var face = new Image();
+        face.onload = function() {
+          $("#clock").css("background", "url(clockimg/smallface.gif)");
+        }
+        face.src = "clockimg/smallface.gif";
+
+        var digit_images = [
+          "clockimg/0.png",
+          "clockimg/1.png",
+          "clockimg/2.png",
+          "clockimg/3.png",
+          "clockimg/4.png",
+          "clockimg/5.png",
+          "clockimg/6.png",
+          "clockimg/7.png",
+          "clockimg/8.png",
+          "clockimg/9.png"
+        ];
+
+        var month_images = [
+          "clockimg/jan.png",
+          "clockimg/feb.png",
+          "clockimg/mar.png",
+          "clockimg/apr.png",
+          "clockimg/may.png",
+          "clockimg/jun.png",
+          "clockimg/jul.png",
+          "clockimg/aug.png",
+          "clockimg/sep.png",
+          "clockimg/oct.png",
+          "clockimg/nov.png",
+          "clockimg/dec.png"
+        ];
+
+        var day_images = [
+          "clockimg/sun.png",
+          "clockimg/mon.png",
+          "clockimg/tue.png",
+          "clockimg/wed.png",
+          "clockimg/thu.png",
+          "clockimg/fri.png",
+          "clockimg/sat.png",
+        ]
+
+        $(digit_images).preload();
+
+        // Rotate second hand and set seconds.
+        setInterval(
+          function() {
+            var date = new Date();
+            var ms = date.getMilliseconds();
+            var seconds = date.getSeconds();
+            var sdegree = seconds * 6 + (ms / 166);
+            var srotate = "rotate(" + sdegree + "deg)";
+            $("#sec").css({"-moz-transform" : srotate,
+                           "-webkit-transform" : srotate});
+            var s1 = Math.floor(seconds / 10);
+            var s2 = seconds % 10;
+            $("#s1").css({"content" : "url(" + digit_images[s1] + ")"});
+            $("#s2").css({"content" : "url(" + digit_images[s2] + ")"});
+          }, 75);
+
+        // Rotate minute hand and set minutes.
+        function updateMinutes() {
+          var date = new Date();
+          var mins = date.getMinutes();
+          var seconds = date.getSeconds();
+          var mdegree = mins * 6 + (seconds / 10);
+          var mrotate = "rotate(" + mdegree + "deg)";
+          $("#min").css({"-moz-transform" : mrotate,
+                         "-webkit-transform" : mrotate});
+          var m1 = Math.floor(mins / 10);
+          var m2 = mins % 10;
+          $("#m1").css({"content" : "url(" + digit_images[m1] + ")"});
+          $("#m2").css({"content" : "url(" + digit_images[m2] + ")"});
+        }
+        updateMinutes();
+        setInterval(function(){updateMinutes();}, 1000);
+
+        // Rotate hour hand and set hours.
+        function updateHours() {
+          var date = new Date();
+          var hours = date.getHours();
+          var mins = date.getMinutes();
+          var hdegree = hours * 30 + (mins / 2);
+          var hrotate = "rotate(" + hdegree + "deg)";
+          $("#hour").css({"-moz-transform" : hrotate,
+                          "-webkit-transform" : hrotate});
+          if (hours > 12) {
+            hours = hours - 12;
+          }
+          if (hours <= 0) {
+            hours = 12;
+          }
+          var h1 = Math.floor(hours / 10);
+          var h2 = hours % 10;
+          $("#h1").css({"content" : "url(" + digit_images[h1] + ")"});
+          $("#h2").css({"content" : "url(" + digit_images[h2] + ")"});
+        }
+        updateHours();
+        setInterval(function(){updateHours();}, 1000);
+
+        function updateDate() {
+          var date = new Date();
+          var day = date.getDate();
+          var d1 = Math.floor(day / 10);
+          var d2 = day % 10;
+          $("#d1").css({"content" : "url(" + digit_images[d1] + ")"});
+          $("#d2").css({"content" : "url(" + digit_images[d2] + ")"});
+          $("#dd1").css({"content" : "url(" + digit_images[d1] + ")"});
+          $("#dd2").css({"content" : "url(" + digit_images[d2] + ")"});
+          var dow = date.getDay();
+          $("#dow").css({"content" : "url(" + day_images[dow] + ")"});
+          var month = date.getMonth();
+          $("#mm").css({"content" : "url(" + month_images[month] + ")"});
+        }
+        updateDate();
+        setInterval(function(){updateDate();}, 1000);
+      });
+  </script>
+</head>
+<body>
+  <table border=0 width=100%>
+    <tr>
+      <td width=50%>
+        <ul id="clock">
+          <div id="txt">
+            <table style="background-color:#999999" border>
+              <td><img src="0.png" height=26 id="d1" class="digit"></td>
+              <td><img src="1.png" height=26 id="d2" class="digit"></td>
+            </table>
+          </div>
+          <li id="sec"></li>
+          <li id="hour"></li>
+          <li id="min"></li>
+        </ul>
+      </td>
+      <td STYLE="vertical-align:middle">
+          <li><div id="analog">
+            <center>
+              <table style="background-color:#999999;height:80px;" border>
+                <tr>
+                <td><img src="clockimg/2.png" id="h1" class="digit"></td>
+                <td><img src="clockimg/3.png" id="h2" class="digit"></td>
+                <td style="border: none">&nbsp;&nbsp;</td>
+                <td><img src="clockimg/4.png" id="m1" class="digit"></td>
+                <td><img src="clockimg/5.png" id="m2" class="digit"></td>
+                <td style="border: none">&nbsp;&nbsp;</td>
+                <td><img src="clockimg/6.png" id="s1" class="digit"></td>
+                <td><img src="clockimg/7.png" id="s2" class="digit"></td>
+                </tr>
+              </table>
+              <br>
+              <table style="background-color:#999999;height:80px;" border>
+                <tr>
+                <td><img src="clockimg/sun.png" id="dow" class="digit"></td>
+                <td style="border: none">&nbsp;&nbsp;</td>
+                <td><img src="clockimg/8.png" id="dd1" class="digit"></td>
+                <td><img src="clockimg/9.png" id="dd2" class="digit"></td>
+                <td style="border: none">&nbsp;&nbsp;</td>
+                <td><img src="clockimg/jan.png" id="mm" class="digit"></td>
+                </tr>
+              </table>
+            </center>
+            </div>
+          </li>
+        </ul>
+      </td>
+    </tr>
+  </table>
+</body>
+</html>
diff --git a/pages/clockimg/0.png b/pages/clockimg/0.png
new file mode 100644 (file)
index 0000000..7c1cd69
Binary files /dev/null and b/pages/clockimg/0.png differ
diff --git a/pages/clockimg/1.png b/pages/clockimg/1.png
new file mode 100644 (file)
index 0000000..adf043e
Binary files /dev/null and b/pages/clockimg/1.png differ
diff --git a/pages/clockimg/2.png b/pages/clockimg/2.png
new file mode 100644 (file)
index 0000000..132a8c1
Binary files /dev/null and b/pages/clockimg/2.png differ
diff --git a/pages/clockimg/3.png b/pages/clockimg/3.png
new file mode 100644 (file)
index 0000000..420da43
Binary files /dev/null and b/pages/clockimg/3.png differ
diff --git a/pages/clockimg/4.png b/pages/clockimg/4.png
new file mode 100644 (file)
index 0000000..d47774c
Binary files /dev/null and b/pages/clockimg/4.png differ
diff --git a/pages/clockimg/5.png b/pages/clockimg/5.png
new file mode 100644 (file)
index 0000000..f187f49
Binary files /dev/null and b/pages/clockimg/5.png differ
diff --git a/pages/clockimg/6.png b/pages/clockimg/6.png
new file mode 100644 (file)
index 0000000..ec531ca
Binary files /dev/null and b/pages/clockimg/6.png differ
diff --git a/pages/clockimg/7.png b/pages/clockimg/7.png
new file mode 100644 (file)
index 0000000..6b315e1
Binary files /dev/null and b/pages/clockimg/7.png differ
diff --git a/pages/clockimg/8.png b/pages/clockimg/8.png
new file mode 100644 (file)
index 0000000..ed787cb
Binary files /dev/null and b/pages/clockimg/8.png differ
diff --git a/pages/clockimg/9.png b/pages/clockimg/9.png
new file mode 100644 (file)
index 0000000..eb01cea
Binary files /dev/null and b/pages/clockimg/9.png differ
diff --git a/pages/clockimg/apr.png b/pages/clockimg/apr.png
new file mode 100644 (file)
index 0000000..e849a13
Binary files /dev/null and b/pages/clockimg/apr.png differ
diff --git a/pages/clockimg/aug.png b/pages/clockimg/aug.png
new file mode 100644 (file)
index 0000000..a296835
Binary files /dev/null and b/pages/clockimg/aug.png differ
diff --git a/pages/clockimg/clockface.png b/pages/clockimg/clockface.png
new file mode 100644 (file)
index 0000000..325e718
Binary files /dev/null and b/pages/clockimg/clockface.png differ
diff --git a/pages/clockimg/clockface_old.png b/pages/clockimg/clockface_old.png
new file mode 100644 (file)
index 0000000..356fae0
Binary files /dev/null and b/pages/clockimg/clockface_old.png differ
diff --git a/pages/clockimg/dash.png b/pages/clockimg/dash.png
new file mode 100644 (file)
index 0000000..986df0e
Binary files /dev/null and b/pages/clockimg/dash.png differ
diff --git a/pages/clockimg/dec.png b/pages/clockimg/dec.png
new file mode 100644 (file)
index 0000000..323fbed
Binary files /dev/null and b/pages/clockimg/dec.png differ
diff --git a/pages/clockimg/dial.png b/pages/clockimg/dial.png
new file mode 100644 (file)
index 0000000..0452f49
Binary files /dev/null and b/pages/clockimg/dial.png differ
diff --git a/pages/clockimg/feb.png b/pages/clockimg/feb.png
new file mode 100644 (file)
index 0000000..341f8d2
Binary files /dev/null and b/pages/clockimg/feb.png differ
diff --git a/pages/clockimg/fri.png b/pages/clockimg/fri.png
new file mode 100644 (file)
index 0000000..7cb7858
Binary files /dev/null and b/pages/clockimg/fri.png differ
diff --git a/pages/clockimg/hourhand.png b/pages/clockimg/hourhand.png
new file mode 100644 (file)
index 0000000..9e53322
Binary files /dev/null and b/pages/clockimg/hourhand.png differ
diff --git a/pages/clockimg/jan.png b/pages/clockimg/jan.png
new file mode 100644 (file)
index 0000000..43757d1
Binary files /dev/null and b/pages/clockimg/jan.png differ
diff --git a/pages/clockimg/jul.png b/pages/clockimg/jul.png
new file mode 100644 (file)
index 0000000..3fc5cf7
Binary files /dev/null and b/pages/clockimg/jul.png differ
diff --git a/pages/clockimg/jun.png b/pages/clockimg/jun.png
new file mode 100644 (file)
index 0000000..c5298d9
Binary files /dev/null and b/pages/clockimg/jun.png differ
diff --git a/pages/clockimg/mar.png b/pages/clockimg/mar.png
new file mode 100644 (file)
index 0000000..85961a9
Binary files /dev/null and b/pages/clockimg/mar.png differ
diff --git a/pages/clockimg/may.png b/pages/clockimg/may.png
new file mode 100644 (file)
index 0000000..4bfbbad
Binary files /dev/null and b/pages/clockimg/may.png differ
diff --git a/pages/clockimg/minhand.png b/pages/clockimg/minhand.png
new file mode 100644 (file)
index 0000000..0aa0113
Binary files /dev/null and b/pages/clockimg/minhand.png differ
diff --git a/pages/clockimg/mon.png b/pages/clockimg/mon.png
new file mode 100644 (file)
index 0000000..e9674c2
Binary files /dev/null and b/pages/clockimg/mon.png differ
diff --git a/pages/clockimg/nov.png b/pages/clockimg/nov.png
new file mode 100644 (file)
index 0000000..80980ed
Binary files /dev/null and b/pages/clockimg/nov.png differ
diff --git a/pages/clockimg/oct.png b/pages/clockimg/oct.png
new file mode 100644 (file)
index 0000000..8ce3264
Binary files /dev/null and b/pages/clockimg/oct.png differ
diff --git a/pages/clockimg/sat.png b/pages/clockimg/sat.png
new file mode 100644 (file)
index 0000000..f193de3
Binary files /dev/null and b/pages/clockimg/sat.png differ
diff --git a/pages/clockimg/sechand.png b/pages/clockimg/sechand.png
new file mode 100644 (file)
index 0000000..7ea7258
Binary files /dev/null and b/pages/clockimg/sechand.png differ
diff --git a/pages/clockimg/sep.png b/pages/clockimg/sep.png
new file mode 100644 (file)
index 0000000..cfc1140
Binary files /dev/null and b/pages/clockimg/sep.png differ
diff --git a/pages/clockimg/slash.png b/pages/clockimg/slash.png
new file mode 100644 (file)
index 0000000..e0b9289
Binary files /dev/null and b/pages/clockimg/slash.png differ
diff --git a/pages/clockimg/smallface.gif b/pages/clockimg/smallface.gif
new file mode 100644 (file)
index 0000000..db38bae
Binary files /dev/null and b/pages/clockimg/smallface.gif differ
diff --git a/pages/clockimg/smallface.jpg b/pages/clockimg/smallface.jpg
new file mode 100644 (file)
index 0000000..1460a57
Binary files /dev/null and b/pages/clockimg/smallface.jpg differ
diff --git a/pages/clockimg/sun.png b/pages/clockimg/sun.png
new file mode 100644 (file)
index 0000000..7da6094
Binary files /dev/null and b/pages/clockimg/sun.png differ
diff --git a/pages/clockimg/thu.png b/pages/clockimg/thu.png
new file mode 100644 (file)
index 0000000..c36bd11
Binary files /dev/null and b/pages/clockimg/thu.png differ
diff --git a/pages/clockimg/tue.png b/pages/clockimg/tue.png
new file mode 100644 (file)
index 0000000..8e9bca8
Binary files /dev/null and b/pages/clockimg/tue.png differ
diff --git a/pages/clockimg/wed.png b/pages/clockimg/wed.png
new file mode 100644 (file)
index 0000000..f5ce0a5
Binary files /dev/null and b/pages/clockimg/wed.png differ
diff --git a/pages/hidden/cabin.html b/pages/hidden/cabin.html
new file mode 100644 (file)
index 0000000..83f62bf
--- /dev/null
@@ -0,0 +1,9 @@
+<H1>Cabin Driveway Alert</H1>
+<HR>
+<CENTER>
+  <IMG SRC="http://ski.dyn.guru.org/webcam/videostream.cgi"
+       HEIGHT=720
+       STYLE="border-style: solid;
+              border-width: 15px;
+              border-color: red;">
+</CENTER>
diff --git a/pages/hidden/driveway.html b/pages/hidden/driveway.html
new file mode 100644 (file)
index 0000000..8b5bd97
--- /dev/null
@@ -0,0 +1,9 @@
+<H1>Front Driveway Alert</H1>
+<HR>
+<CENTER>
+  <IMG SRC="http://webcam.house:88/cgi-bin/CGIStream.cgi?cmd=GetMJStream"
+       HEIGHT=720
+       STYLE="border-style: solid;
+              border-width: 15px;
+              border-color: red;">
+</CENTER>
diff --git a/pages/jquery-1.2.6.min.js b/pages/jquery-1.2.6.min.js
new file mode 100755 (executable)
index 0000000..82b98e1
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * jQuery 1.2.6 - New Wave Javascript
+ *
+ * Copyright (c) 2008 John Resig (jquery.com)
+ * Dual licensed under the MIT (MIT-LICENSE.txt)
+ * and GPL (GPL-LICENSE.txt) licenses.
+ *
+ * $Date: 2008-05-24 14:22:17 -0400 (Sat, 24 May 2008) $
+ * $Rev: 5685 $
+ */
+(function(){var _jQuery=window.jQuery,_$=window.$;var jQuery=window.jQuery=window.$=function(selector,context){return new jQuery.fn.init(selector,context);};var quickExpr=/^[^<]*(<(.|\s)+>)[^>]*$|^#(\w+)$/,isSimple=/^.[^:#\[\.]*$/,undefined;jQuery.fn=jQuery.prototype={init:function(selector,context){selector=selector||document;if(selector.nodeType){this[0]=selector;this.length=1;return this;}if(typeof selector=="string"){var match=quickExpr.exec(selector);if(match&&(match[1]||!context)){if(match[1])selector=jQuery.clean([match[1]],context);else{var elem=document.getElementById(match[3]);if(elem){if(elem.id!=match[3])return jQuery().find(selector);return jQuery(elem);}selector=[];}}else
+return jQuery(context).find(selector);}else if(jQuery.isFunction(selector))return jQuery(document)[jQuery.fn.ready?"ready":"load"](selector);return this.setArray(jQuery.makeArray(selector));},jquery:"1.2.6",size:function(){return this.length;},length:0,get:function(num){return num==undefined?jQuery.makeArray(this):this[num];},pushStack:function(elems){var ret=jQuery(elems);ret.prevObject=this;return ret;},setArray:function(elems){this.length=0;Array.prototype.push.apply(this,elems);return this;},each:function(callback,args){return jQuery.each(this,callback,args);},index:function(elem){var ret=-1;return jQuery.inArray(elem&&elem.jquery?elem[0]:elem,this);},attr:function(name,value,type){var options=name;if(name.constructor==String)if(value===undefined)return this[0]&&jQuery[type||"attr"](this[0],name);else{options={};options[name]=value;}return this.each(function(i){for(name in options)jQuery.attr(type?this.style:this,name,jQuery.prop(this,options[name],type,i,name));});},css:function(key,value){if((key=='width'||key=='height')&&parseFloat(value)<0)value=undefined;return this.attr(key,value,"curCSS");},text:function(text){if(typeof text!="object"&&text!=null)return this.empty().append((this[0]&&this[0].ownerDocument||document).createTextNode(text));var ret="";jQuery.each(text||this,function(){jQuery.each(this.childNodes,function(){if(this.nodeType!=8)ret+=this.nodeType!=1?this.nodeValue:jQuery.fn.text([this]);});});return ret;},wrapAll:function(html){if(this[0])jQuery(html,this[0].ownerDocument).clone().insertBefore(this[0]).map(function(){var elem=this;while(elem.firstChild)elem=elem.firstChild;return elem;}).append(this);return this;},wrapInner:function(html){return this.each(function(){jQuery(this).contents().wrapAll(html);});},wrap:function(html){return this.each(function(){jQuery(this).wrapAll(html);});},append:function(){return this.domManip(arguments,true,false,function(elem){if(this.nodeType==1)this.appendChild(elem);});},prepend:function(){return this.domManip(arguments,true,true,function(elem){if(this.nodeType==1)this.insertBefore(elem,this.firstChild);});},before:function(){return this.domManip(arguments,false,false,function(elem){this.parentNode.insertBefore(elem,this);});},after:function(){return this.domManip(arguments,false,true,function(elem){this.parentNode.insertBefore(elem,this.nextSibling);});},end:function(){return this.prevObject||jQuery([]);},find:function(selector){var elems=jQuery.map(this,function(elem){return jQuery.find(selector,elem);});return this.pushStack(/[^+>] [^+>]/.test(selector)||selector.indexOf("..")>-1?jQuery.unique(elems):elems);},clone:function(events){var ret=this.map(function(){if(jQuery.browser.msie&&!jQuery.isXMLDoc(this)){var clone=this.cloneNode(true),container=document.createElement("div");container.appendChild(clone);return jQuery.clean([container.innerHTML])[0];}else
+return this.cloneNode(true);});var clone=ret.find("*").andSelf().each(function(){if(this[expando]!=undefined)this[expando]=null;});if(events===true)this.find("*").andSelf().each(function(i){if(this.nodeType==3)return;var events=jQuery.data(this,"events");for(var type in events)for(var handler in events[type])jQuery.event.add(clone[i],type,events[type][handler],events[type][handler].data);});return ret;},filter:function(selector){return this.pushStack(jQuery.isFunction(selector)&&jQuery.grep(this,function(elem,i){return selector.call(elem,i);})||jQuery.multiFilter(selector,this));},not:function(selector){if(selector.constructor==String)if(isSimple.test(selector))return this.pushStack(jQuery.multiFilter(selector,this,true));else
+selector=jQuery.multiFilter(selector,this);var isArrayLike=selector.length&&selector[selector.length-1]!==undefined&&!selector.nodeType;return this.filter(function(){return isArrayLike?jQuery.inArray(this,selector)<0:this!=selector;});},add:function(selector){return this.pushStack(jQuery.unique(jQuery.merge(this.get(),typeof selector=='string'?jQuery(selector):jQuery.makeArray(selector))));},is:function(selector){return!!selector&&jQuery.multiFilter(selector,this).length>0;},hasClass:function(selector){return this.is("."+selector);},val:function(value){if(value==undefined){if(this.length){var elem=this[0];if(jQuery.nodeName(elem,"select")){var index=elem.selectedIndex,values=[],options=elem.options,one=elem.type=="select-one";if(index<0)return null;for(var i=one?index:0,max=one?index+1:options.length;i<max;i++){var option=options[i];if(option.selected){value=jQuery.browser.msie&&!option.attributes.value.specified?option.text:option.value;if(one)return value;values.push(value);}}return values;}else
+return(this[0].value||"").replace(/\r/g,"");}return undefined;}if(value.constructor==Number)value+='';return this.each(function(){if(this.nodeType!=1)return;if(value.constructor==Array&&/radio|checkbox/.test(this.type))this.checked=(jQuery.inArray(this.value,value)>=0||jQuery.inArray(this.name,value)>=0);else if(jQuery.nodeName(this,"select")){var values=jQuery.makeArray(value);jQuery("option",this).each(function(){this.selected=(jQuery.inArray(this.value,values)>=0||jQuery.inArray(this.text,values)>=0);});if(!values.length)this.selectedIndex=-1;}else
+this.value=value;});},html:function(value){return value==undefined?(this[0]?this[0].innerHTML:null):this.empty().append(value);},replaceWith:function(value){return this.after(value).remove();},eq:function(i){return this.slice(i,i+1);},slice:function(){return this.pushStack(Array.prototype.slice.apply(this,arguments));},map:function(callback){return this.pushStack(jQuery.map(this,function(elem,i){return callback.call(elem,i,elem);}));},andSelf:function(){return this.add(this.prevObject);},data:function(key,value){var parts=key.split(".");parts[1]=parts[1]?"."+parts[1]:"";if(value===undefined){var data=this.triggerHandler("getData"+parts[1]+"!",[parts[0]]);if(data===undefined&&this.length)data=jQuery.data(this[0],key);return data===undefined&&parts[1]?this.data(parts[0]):data;}else
+return this.trigger("setData"+parts[1]+"!",[parts[0],value]).each(function(){jQuery.data(this,key,value);});},removeData:function(key){return this.each(function(){jQuery.removeData(this,key);});},domManip:function(args,table,reverse,callback){var clone=this.length>1,elems;return this.each(function(){if(!elems){elems=jQuery.clean(args,this.ownerDocument);if(reverse)elems.reverse();}var obj=this;if(table&&jQuery.nodeName(this,"table")&&jQuery.nodeName(elems[0],"tr"))obj=this.getElementsByTagName("tbody")[0]||this.appendChild(this.ownerDocument.createElement("tbody"));var scripts=jQuery([]);jQuery.each(elems,function(){var elem=clone?jQuery(this).clone(true)[0]:this;if(jQuery.nodeName(elem,"script"))scripts=scripts.add(elem);else{if(elem.nodeType==1)scripts=scripts.add(jQuery("script",elem).remove());callback.call(obj,elem);}});scripts.each(evalScript);});}};jQuery.fn.init.prototype=jQuery.fn;function evalScript(i,elem){if(elem.src)jQuery.ajax({url:elem.src,async:false,dataType:"script"});else
+jQuery.globalEval(elem.text||elem.textContent||elem.innerHTML||"");if(elem.parentNode)elem.parentNode.removeChild(elem);}function now(){return+new Date;}jQuery.extend=jQuery.fn.extend=function(){var target=arguments[0]||{},i=1,length=arguments.length,deep=false,options;if(target.constructor==Boolean){deep=target;target=arguments[1]||{};i=2;}if(typeof target!="object"&&typeof target!="function")target={};if(length==i){target=this;--i;}for(;i<length;i++)if((options=arguments[i])!=null)for(var name in options){var src=target[name],copy=options[name];if(target===copy)continue;if(deep&&copy&&typeof copy=="object"&&!copy.nodeType)target[name]=jQuery.extend(deep,src||(copy.length!=null?[]:{}),copy);else if(copy!==undefined)target[name]=copy;}return target;};var expando="jQuery"+now(),uuid=0,windowData={},exclude=/z-?index|font-?weight|opacity|zoom|line-?height/i,defaultView=document.defaultView||{};jQuery.extend({noConflict:function(deep){window.$=_$;if(deep)window.jQuery=_jQuery;return jQuery;},isFunction:function(fn){return!!fn&&typeof fn!="string"&&!fn.nodeName&&fn.constructor!=Array&&/^[\s[]?function/.test(fn+"");},isXMLDoc:function(elem){return elem.documentElement&&!elem.body||elem.tagName&&elem.ownerDocument&&!elem.ownerDocument.body;},globalEval:function(data){data=jQuery.trim(data);if(data){var head=document.getElementsByTagName("head")[0]||document.documentElement,script=document.createElement("script");script.type="text/javascript";if(jQuery.browser.msie)script.text=data;else
+script.appendChild(document.createTextNode(data));head.insertBefore(script,head.firstChild);head.removeChild(script);}},nodeName:function(elem,name){return elem.nodeName&&elem.nodeName.toUpperCase()==name.toUpperCase();},cache:{},data:function(elem,name,data){elem=elem==window?windowData:elem;var id=elem[expando];if(!id)id=elem[expando]=++uuid;if(name&&!jQuery.cache[id])jQuery.cache[id]={};if(data!==undefined)jQuery.cache[id][name]=data;return name?jQuery.cache[id][name]:id;},removeData:function(elem,name){elem=elem==window?windowData:elem;var id=elem[expando];if(name){if(jQuery.cache[id]){delete jQuery.cache[id][name];name="";for(name in jQuery.cache[id])break;if(!name)jQuery.removeData(elem);}}else{try{delete elem[expando];}catch(e){if(elem.removeAttribute)elem.removeAttribute(expando);}delete jQuery.cache[id];}},each:function(object,callback,args){var name,i=0,length=object.length;if(args){if(length==undefined){for(name in object)if(callback.apply(object[name],args)===false)break;}else
+for(;i<length;)if(callback.apply(object[i++],args)===false)break;}else{if(length==undefined){for(name in object)if(callback.call(object[name],name,object[name])===false)break;}else
+for(var value=object[0];i<length&&callback.call(value,i,value)!==false;value=object[++i]){}}return object;},prop:function(elem,value,type,i,name){if(jQuery.isFunction(value))value=value.call(elem,i);return value&&value.constructor==Number&&type=="curCSS"&&!exclude.test(name)?value+"px":value;},className:{add:function(elem,classNames){jQuery.each((classNames||"").split(/\s+/),function(i,className){if(elem.nodeType==1&&!jQuery.className.has(elem.className,className))elem.className+=(elem.className?" ":"")+className;});},remove:function(elem,classNames){if(elem.nodeType==1)elem.className=classNames!=undefined?jQuery.grep(elem.className.split(/\s+/),function(className){return!jQuery.className.has(classNames,className);}).join(" "):"";},has:function(elem,className){return jQuery.inArray(className,(elem.className||elem).toString().split(/\s+/))>-1;}},swap:function(elem,options,callback){var old={};for(var name in options){old[name]=elem.style[name];elem.style[name]=options[name];}callback.call(elem);for(var name in options)elem.style[name]=old[name];},css:function(elem,name,force){if(name=="width"||name=="height"){var val,props={position:"absolute",visibility:"hidden",display:"block"},which=name=="width"?["Left","Right"]:["Top","Bottom"];function getWH(){val=name=="width"?elem.offsetWidth:elem.offsetHeight;var padding=0,border=0;jQuery.each(which,function(){padding+=parseFloat(jQuery.curCSS(elem,"padding"+this,true))||0;border+=parseFloat(jQuery.curCSS(elem,"border"+this+"Width",true))||0;});val-=Math.round(padding+border);}if(jQuery(elem).is(":visible"))getWH();else
+jQuery.swap(elem,props,getWH);return Math.max(0,val);}return jQuery.curCSS(elem,name,force);},curCSS:function(elem,name,force){var ret,style=elem.style;function color(elem){if(!jQuery.browser.safari)return false;var ret=defaultView.getComputedStyle(elem,null);return!ret||ret.getPropertyValue("color")=="";}if(name=="opacity"&&jQuery.browser.msie){ret=jQuery.attr(style,"opacity");return ret==""?"1":ret;}if(jQuery.browser.opera&&name=="display"){var save=style.outline;style.outline="0 solid black";style.outline=save;}if(name.match(/float/i))name=styleFloat;if(!force&&style&&style[name])ret=style[name];else if(defaultView.getComputedStyle){if(name.match(/float/i))name="float";name=name.replace(/([A-Z])/g,"-$1").toLowerCase();var computedStyle=defaultView.getComputedStyle(elem,null);if(computedStyle&&!color(elem))ret=computedStyle.getPropertyValue(name);else{var swap=[],stack=[],a=elem,i=0;for(;a&&color(a);a=a.parentNode)stack.unshift(a);for(;i<stack.length;i++)if(color(stack[i])){swap[i]=stack[i].style.display;stack[i].style.display="block";}ret=name=="display"&&swap[stack.length-1]!=null?"none":(computedStyle&&computedStyle.getPropertyValue(name))||"";for(i=0;i<swap.length;i++)if(swap[i]!=null)stack[i].style.display=swap[i];}if(name=="opacity"&&ret=="")ret="1";}else if(elem.currentStyle){var camelCase=name.replace(/\-(\w)/g,function(all,letter){return letter.toUpperCase();});ret=elem.currentStyle[name]||elem.currentStyle[camelCase];if(!/^\d+(px)?$/i.test(ret)&&/^\d/.test(ret)){var left=style.left,rsLeft=elem.runtimeStyle.left;elem.runtimeStyle.left=elem.currentStyle.left;style.left=ret||0;ret=style.pixelLeft+"px";style.left=left;elem.runtimeStyle.left=rsLeft;}}return ret;},clean:function(elems,context){var ret=[];context=context||document;if(typeof context.createElement=='undefined')context=context.ownerDocument||context[0]&&context[0].ownerDocument||document;jQuery.each(elems,function(i,elem){if(!elem)return;if(elem.constructor==Number)elem+='';if(typeof elem=="string"){elem=elem.replace(/(<(\w+)[^>]*?)\/>/g,function(all,front,tag){return tag.match(/^(abbr|br|col|img|input|link|meta|param|hr|area|embed)$/i)?all:front+"></"+tag+">";});var tags=jQuery.trim(elem).toLowerCase(),div=context.createElement("div");var wrap=!tags.indexOf("<opt")&&[1,"<select multiple='multiple'>","</select>"]||!tags.indexOf("<leg")&&[1,"<fieldset>","</fieldset>"]||tags.match(/^<(thead|tbody|tfoot|colg|cap)/)&&[1,"<table>","</table>"]||!tags.indexOf("<tr")&&[2,"<table><tbody>","</tbody></table>"]||(!tags.indexOf("<td")||!tags.indexOf("<th"))&&[3,"<table><tbody><tr>","</tr></tbody></table>"]||!tags.indexOf("<col")&&[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"]||jQuery.browser.msie&&[1,"div<div>","</div>"]||[0,"",""];div.innerHTML=wrap[1]+elem+wrap[2];while(wrap[0]--)div=div.lastChild;if(jQuery.browser.msie){var tbody=!tags.indexOf("<table")&&tags.indexOf("<tbody")<0?div.firstChild&&div.firstChild.childNodes:wrap[1]=="<table>"&&tags.indexOf("<tbody")<0?div.childNodes:[];for(var j=tbody.length-1;j>=0;--j)if(jQuery.nodeName(tbody[j],"tbody")&&!tbody[j].childNodes.length)tbody[j].parentNode.removeChild(tbody[j]);if(/^\s/.test(elem))div.insertBefore(context.createTextNode(elem.match(/^\s*/)[0]),div.firstChild);}elem=jQuery.makeArray(div.childNodes);}if(elem.length===0&&(!jQuery.nodeName(elem,"form")&&!jQuery.nodeName(elem,"select")))return;if(elem[0]==undefined||jQuery.nodeName(elem,"form")||elem.options)ret.push(elem);else
+ret=jQuery.merge(ret,elem);});return ret;},attr:function(elem,name,value){if(!elem||elem.nodeType==3||elem.nodeType==8)return undefined;var notxml=!jQuery.isXMLDoc(elem),set=value!==undefined,msie=jQuery.browser.msie;name=notxml&&jQuery.props[name]||name;if(elem.tagName){var special=/href|src|style/.test(name);if(name=="selected"&&jQuery.browser.safari)elem.parentNode.selectedIndex;if(name in elem&&notxml&&!special){if(set){if(name=="type"&&jQuery.nodeName(elem,"input")&&elem.parentNode)throw"type property can't be changed";elem[name]=value;}if(jQuery.nodeName(elem,"form")&&elem.getAttributeNode(name))return elem.getAttributeNode(name).nodeValue;return elem[name];}if(msie&&notxml&&name=="style")return jQuery.attr(elem.style,"cssText",value);if(set)elem.setAttribute(name,""+value);var attr=msie&&notxml&&special?elem.getAttribute(name,2):elem.getAttribute(name);return attr===null?undefined:attr;}if(msie&&name=="opacity"){if(set){elem.zoom=1;elem.filter=(elem.filter||"").replace(/alpha\([^)]*\)/,"")+(parseInt(value)+''=="NaN"?"":"alpha(opacity="+value*100+")");}return elem.filter&&elem.filter.indexOf("opacity=")>=0?(parseFloat(elem.filter.match(/opacity=([^)]*)/)[1])/100)+'':"";}name=name.replace(/-([a-z])/ig,function(all,letter){return letter.toUpperCase();});if(set)elem[name]=value;return elem[name];},trim:function(text){return(text||"").replace(/^\s+|\s+$/g,"");},makeArray:function(array){var ret=[];if(array!=null){var i=array.length;if(i==null||array.split||array.setInterval||array.call)ret[0]=array;else
+while(i)ret[--i]=array[i];}return ret;},inArray:function(elem,array){for(var i=0,length=array.length;i<length;i++)if(array[i]===elem)return i;return-1;},merge:function(first,second){var i=0,elem,pos=first.length;if(jQuery.browser.msie){while(elem=second[i++])if(elem.nodeType!=8)first[pos++]=elem;}else
+while(elem=second[i++])first[pos++]=elem;return first;},unique:function(array){var ret=[],done={};try{for(var i=0,length=array.length;i<length;i++){var id=jQuery.data(array[i]);if(!done[id]){done[id]=true;ret.push(array[i]);}}}catch(e){ret=array;}return ret;},grep:function(elems,callback,inv){var ret=[];for(var i=0,length=elems.length;i<length;i++)if(!inv!=!callback(elems[i],i))ret.push(elems[i]);return ret;},map:function(elems,callback){var ret=[];for(var i=0,length=elems.length;i<length;i++){var value=callback(elems[i],i);if(value!=null)ret[ret.length]=value;}return ret.concat.apply([],ret);}});var userAgent=navigator.userAgent.toLowerCase();jQuery.browser={version:(userAgent.match(/.+(?:rv|it|ra|ie)[\/: ]([\d.]+)/)||[])[1],safari:/webkit/.test(userAgent),opera:/opera/.test(userAgent),msie:/msie/.test(userAgent)&&!/opera/.test(userAgent),mozilla:/mozilla/.test(userAgent)&&!/(compatible|webkit)/.test(userAgent)};var styleFloat=jQuery.browser.msie?"styleFloat":"cssFloat";jQuery.extend({boxModel:!jQuery.browser.msie||document.compatMode=="CSS1Compat",props:{"for":"htmlFor","class":"className","float":styleFloat,cssFloat:styleFloat,styleFloat:styleFloat,readonly:"readOnly",maxlength:"maxLength",cellspacing:"cellSpacing"}});jQuery.each({parent:function(elem){return elem.parentNode;},parents:function(elem){return jQuery.dir(elem,"parentNode");},next:function(elem){return jQuery.nth(elem,2,"nextSibling");},prev:function(elem){return jQuery.nth(elem,2,"previousSibling");},nextAll:function(elem){return jQuery.dir(elem,"nextSibling");},prevAll:function(elem){return jQuery.dir(elem,"previousSibling");},siblings:function(elem){return jQuery.sibling(elem.parentNode.firstChild,elem);},children:function(elem){return jQuery.sibling(elem.firstChild);},contents:function(elem){return jQuery.nodeName(elem,"iframe")?elem.contentDocument||elem.contentWindow.document:jQuery.makeArray(elem.childNodes);}},function(name,fn){jQuery.fn[name]=function(selector){var ret=jQuery.map(this,fn);if(selector&&typeof selector=="string")ret=jQuery.multiFilter(selector,ret);return this.pushStack(jQuery.unique(ret));};});jQuery.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(name,original){jQuery.fn[name]=function(){var args=arguments;return this.each(function(){for(var i=0,length=args.length;i<length;i++)jQuery(args[i])[original](this);});};});jQuery.each({removeAttr:function(name){jQuery.attr(this,name,"");if(this.nodeType==1)this.removeAttribute(name);},addClass:function(classNames){jQuery.className.add(this,classNames);},removeClass:function(classNames){jQuery.className.remove(this,classNames);},toggleClass:function(classNames){jQuery.className[jQuery.className.has(this,classNames)?"remove":"add"](this,classNames);},remove:function(selector){if(!selector||jQuery.filter(selector,[this]).r.length){jQuery("*",this).add(this).each(function(){jQuery.event.remove(this);jQuery.removeData(this);});if(this.parentNode)this.parentNode.removeChild(this);}},empty:function(){jQuery(">*",this).remove();while(this.firstChild)this.removeChild(this.firstChild);}},function(name,fn){jQuery.fn[name]=function(){return this.each(fn,arguments);};});jQuery.each(["Height","Width"],function(i,name){var type=name.toLowerCase();jQuery.fn[type]=function(size){return this[0]==window?jQuery.browser.opera&&document.body["client"+name]||jQuery.browser.safari&&window["inner"+name]||document.compatMode=="CSS1Compat"&&document.documentElement["client"+name]||document.body["client"+name]:this[0]==document?Math.max(Math.max(document.body["scroll"+name],document.documentElement["scroll"+name]),Math.max(document.body["offset"+name],document.documentElement["offset"+name])):size==undefined?(this.length?jQuery.css(this[0],type):null):this.css(type,size.constructor==String?size:size+"px");};});function num(elem,prop){return elem[0]&&parseInt(jQuery.curCSS(elem[0],prop,true),10)||0;}var chars=jQuery.browser.safari&&parseInt(jQuery.browser.version)<417?"(?:[\\w*_-]|\\\\.)":"(?:[\\w\u0128-\uFFFF*_-]|\\\\.)",quickChild=new RegExp("^>\\s*("+chars+"+)"),quickID=new RegExp("^("+chars+"+)(#)("+chars+"+)"),quickClass=new RegExp("^([#.]?)("+chars+"*)");jQuery.extend({expr:{"":function(a,i,m){return m[2]=="*"||jQuery.nodeName(a,m[2]);},"#":function(a,i,m){return a.getAttribute("id")==m[2];},":":{lt:function(a,i,m){return i<m[3]-0;},gt:function(a,i,m){return i>m[3]-0;},nth:function(a,i,m){return m[3]-0==i;},eq:function(a,i,m){return m[3]-0==i;},first:function(a,i){return i==0;},last:function(a,i,m,r){return i==r.length-1;},even:function(a,i){return i%2==0;},odd:function(a,i){return i%2;},"first-child":function(a){return a.parentNode.getElementsByTagName("*")[0]==a;},"last-child":function(a){return jQuery.nth(a.parentNode.lastChild,1,"previousSibling")==a;},"only-child":function(a){return!jQuery.nth(a.parentNode.lastChild,2,"previousSibling");},parent:function(a){return a.firstChild;},empty:function(a){return!a.firstChild;},contains:function(a,i,m){return(a.textContent||a.innerText||jQuery(a).text()||"").indexOf(m[3])>=0;},visible:function(a){return"hidden"!=a.type&&jQuery.css(a,"display")!="none"&&jQuery.css(a,"visibility")!="hidden";},hidden:function(a){return"hidden"==a.type||jQuery.css(a,"display")=="none"||jQuery.css(a,"visibility")=="hidden";},enabled:function(a){return!a.disabled;},disabled:function(a){return a.disabled;},checked:function(a){return a.checked;},selected:function(a){return a.selected||jQuery.attr(a,"selected");},text:function(a){return"text"==a.type;},radio:function(a){return"radio"==a.type;},checkbox:function(a){return"checkbox"==a.type;},file:function(a){return"file"==a.type;},password:function(a){return"password"==a.type;},submit:function(a){return"submit"==a.type;},image:function(a){return"image"==a.type;},reset:function(a){return"reset"==a.type;},button:function(a){return"button"==a.type||jQuery.nodeName(a,"button");},input:function(a){return/input|select|textarea|button/i.test(a.nodeName);},has:function(a,i,m){return jQuery.find(m[3],a).length;},header:function(a){return/h\d/i.test(a.nodeName);},animated:function(a){return jQuery.grep(jQuery.timers,function(fn){return a==fn.elem;}).length;}}},parse:[/^(\[) *@?([\w-]+) *([!*$^~=]*) *('?"?)(.*?)\4 *\]/,/^(:)([\w-]+)\("?'?(.*?(\(.*?\))?[^(]*?)"?'?\)/,new RegExp("^([:.#]*)("+chars+"+)")],multiFilter:function(expr,elems,not){var old,cur=[];while(expr&&expr!=old){old=expr;var f=jQuery.filter(expr,elems,not);expr=f.t.replace(/^\s*,\s*/,"");cur=not?elems=f.r:jQuery.merge(cur,f.r);}return cur;},find:function(t,context){if(typeof t!="string")return[t];if(context&&context.nodeType!=1&&context.nodeType!=9)return[];context=context||document;var ret=[context],done=[],last,nodeName;while(t&&last!=t){var r=[];last=t;t=jQuery.trim(t);var foundToken=false,re=quickChild,m=re.exec(t);if(m){nodeName=m[1].toUpperCase();for(var i=0;ret[i];i++)for(var c=ret[i].firstChild;c;c=c.nextSibling)if(c.nodeType==1&&(nodeName=="*"||c.nodeName.toUpperCase()==nodeName))r.push(c);ret=r;t=t.replace(re,"");if(t.indexOf(" ")==0)continue;foundToken=true;}else{re=/^([>+~])\s*(\w*)/i;if((m=re.exec(t))!=null){r=[];var merge={};nodeName=m[2].toUpperCase();m=m[1];for(var j=0,rl=ret.length;j<rl;j++){var n=m=="~"||m=="+"?ret[j].nextSibling:ret[j].firstChild;for(;n;n=n.nextSibling)if(n.nodeType==1){var id=jQuery.data(n);if(m=="~"&&merge[id])break;if(!nodeName||n.nodeName.toUpperCase()==nodeName){if(m=="~")merge[id]=true;r.push(n);}if(m=="+")break;}}ret=r;t=jQuery.trim(t.replace(re,""));foundToken=true;}}if(t&&!foundToken){if(!t.indexOf(",")){if(context==ret[0])ret.shift();done=jQuery.merge(done,ret);r=ret=[context];t=" "+t.substr(1,t.length);}else{var re2=quickID;var m=re2.exec(t);if(m){m=[0,m[2],m[3],m[1]];}else{re2=quickClass;m=re2.exec(t);}m[2]=m[2].replace(/\\/g,"");var elem=ret[ret.length-1];if(m[1]=="#"&&elem&&elem.getElementById&&!jQuery.isXMLDoc(elem)){var oid=elem.getElementById(m[2]);if((jQuery.browser.msie||jQuery.browser.opera)&&oid&&typeof oid.id=="string"&&oid.id!=m[2])oid=jQuery('[@id="'+m[2]+'"]',elem)[0];ret=r=oid&&(!m[3]||jQuery.nodeName(oid,m[3]))?[oid]:[];}else{for(var i=0;ret[i];i++){var tag=m[1]=="#"&&m[3]?m[3]:m[1]!=""||m[0]==""?"*":m[2];if(tag=="*"&&ret[i].nodeName.toLowerCase()=="object")tag="param";r=jQuery.merge(r,ret[i].getElementsByTagName(tag));}if(m[1]==".")r=jQuery.classFilter(r,m[2]);if(m[1]=="#"){var tmp=[];for(var i=0;r[i];i++)if(r[i].getAttribute("id")==m[2]){tmp=[r[i]];break;}r=tmp;}ret=r;}t=t.replace(re2,"");}}if(t){var val=jQuery.filter(t,r);ret=r=val.r;t=jQuery.trim(val.t);}}if(t)ret=[];if(ret&&context==ret[0])ret.shift();done=jQuery.merge(done,ret);return done;},classFilter:function(r,m,not){m=" "+m+" ";var tmp=[];for(var i=0;r[i];i++){var pass=(" "+r[i].className+" ").indexOf(m)>=0;if(!not&&pass||not&&!pass)tmp.push(r[i]);}return tmp;},filter:function(t,r,not){var last;while(t&&t!=last){last=t;var p=jQuery.parse,m;for(var i=0;p[i];i++){m=p[i].exec(t);if(m){t=t.substring(m[0].length);m[2]=m[2].replace(/\\/g,"");break;}}if(!m)break;if(m[1]==":"&&m[2]=="not")r=isSimple.test(m[3])?jQuery.filter(m[3],r,true).r:jQuery(r).not(m[3]);else if(m[1]==".")r=jQuery.classFilter(r,m[2],not);else if(m[1]=="["){var tmp=[],type=m[3];for(var i=0,rl=r.length;i<rl;i++){var a=r[i],z=a[jQuery.props[m[2]]||m[2]];if(z==null||/href|src|selected/.test(m[2]))z=jQuery.attr(a,m[2])||'';if((type==""&&!!z||type=="="&&z==m[5]||type=="!="&&z!=m[5]||type=="^="&&z&&!z.indexOf(m[5])||type=="$="&&z.substr(z.length-m[5].length)==m[5]||(type=="*="||type=="~=")&&z.indexOf(m[5])>=0)^not)tmp.push(a);}r=tmp;}else if(m[1]==":"&&m[2]=="nth-child"){var merge={},tmp=[],test=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(m[3]=="even"&&"2n"||m[3]=="odd"&&"2n+1"||!/\D/.test(m[3])&&"0n+"+m[3]||m[3]),first=(test[1]+(test[2]||1))-0,last=test[3]-0;for(var i=0,rl=r.length;i<rl;i++){var node=r[i],parentNode=node.parentNode,id=jQuery.data(parentNode);if(!merge[id]){var c=1;for(var n=parentNode.firstChild;n;n=n.nextSibling)if(n.nodeType==1)n.nodeIndex=c++;merge[id]=true;}var add=false;if(first==0){if(node.nodeIndex==last)add=true;}else if((node.nodeIndex-last)%first==0&&(node.nodeIndex-last)/first>=0)add=true;if(add^not)tmp.push(node);}r=tmp;}else{var fn=jQuery.expr[m[1]];if(typeof fn=="object")fn=fn[m[2]];if(typeof fn=="string")fn=eval("false||function(a,i){return "+fn+";}");r=jQuery.grep(r,function(elem,i){return fn(elem,i,m,r);},not);}}return{r:r,t:t};},dir:function(elem,dir){var matched=[],cur=elem[dir];while(cur&&cur!=document){if(cur.nodeType==1)matched.push(cur);cur=cur[dir];}return matched;},nth:function(cur,result,dir,elem){result=result||1;var num=0;for(;cur;cur=cur[dir])if(cur.nodeType==1&&++num==result)break;return cur;},sibling:function(n,elem){var r=[];for(;n;n=n.nextSibling){if(n.nodeType==1&&n!=elem)r.push(n);}return r;}});jQuery.event={add:function(elem,types,handler,data){if(elem.nodeType==3||elem.nodeType==8)return;if(jQuery.browser.msie&&elem.setInterval)elem=window;if(!handler.guid)handler.guid=this.guid++;if(data!=undefined){var fn=handler;handler=this.proxy(fn,function(){return fn.apply(this,arguments);});handler.data=data;}var events=jQuery.data(elem,"events")||jQuery.data(elem,"events",{}),handle=jQuery.data(elem,"handle")||jQuery.data(elem,"handle",function(){if(typeof jQuery!="undefined"&&!jQuery.event.triggered)return jQuery.event.handle.apply(arguments.callee.elem,arguments);});handle.elem=elem;jQuery.each(types.split(/\s+/),function(index,type){var parts=type.split(".");type=parts[0];handler.type=parts[1];var handlers=events[type];if(!handlers){handlers=events[type]={};if(!jQuery.event.special[type]||jQuery.event.special[type].setup.call(elem)===false){if(elem.addEventListener)elem.addEventListener(type,handle,false);else if(elem.attachEvent)elem.attachEvent("on"+type,handle);}}handlers[handler.guid]=handler;jQuery.event.global[type]=true;});elem=null;},guid:1,global:{},remove:function(elem,types,handler){if(elem.nodeType==3||elem.nodeType==8)return;var events=jQuery.data(elem,"events"),ret,index;if(events){if(types==undefined||(typeof types=="string"&&types.charAt(0)=="."))for(var type in events)this.remove(elem,type+(types||""));else{if(types.type){handler=types.handler;types=types.type;}jQuery.each(types.split(/\s+/),function(index,type){var parts=type.split(".");type=parts[0];if(events[type]){if(handler)delete events[type][handler.guid];else
+for(handler in events[type])if(!parts[1]||events[type][handler].type==parts[1])delete events[type][handler];for(ret in events[type])break;if(!ret){if(!jQuery.event.special[type]||jQuery.event.special[type].teardown.call(elem)===false){if(elem.removeEventListener)elem.removeEventListener(type,jQuery.data(elem,"handle"),false);else if(elem.detachEvent)elem.detachEvent("on"+type,jQuery.data(elem,"handle"));}ret=null;delete events[type];}}});}for(ret in events)break;if(!ret){var handle=jQuery.data(elem,"handle");if(handle)handle.elem=null;jQuery.removeData(elem,"events");jQuery.removeData(elem,"handle");}}},trigger:function(type,data,elem,donative,extra){data=jQuery.makeArray(data);if(type.indexOf("!")>=0){type=type.slice(0,-1);var exclusive=true;}if(!elem){if(this.global[type])jQuery("*").add([window,document]).trigger(type,data);}else{if(elem.nodeType==3||elem.nodeType==8)return undefined;var val,ret,fn=jQuery.isFunction(elem[type]||null),event=!data[0]||!data[0].preventDefault;if(event){data.unshift({type:type,target:elem,preventDefault:function(){},stopPropagation:function(){},timeStamp:now()});data[0][expando]=true;}data[0].type=type;if(exclusive)data[0].exclusive=true;var handle=jQuery.data(elem,"handle");if(handle)val=handle.apply(elem,data);if((!fn||(jQuery.nodeName(elem,'a')&&type=="click"))&&elem["on"+type]&&elem["on"+type].apply(elem,data)===false)val=false;if(event)data.shift();if(extra&&jQuery.isFunction(extra)){ret=extra.apply(elem,val==null?data:data.concat(val));if(ret!==undefined)val=ret;}if(fn&&donative!==false&&val!==false&&!(jQuery.nodeName(elem,'a')&&type=="click")){this.triggered=true;try{elem[type]();}catch(e){}}this.triggered=false;}return val;},handle:function(event){var val,ret,namespace,all,handlers;event=arguments[0]=jQuery.event.fix(event||window.event);namespace=event.type.split(".");event.type=namespace[0];namespace=namespace[1];all=!namespace&&!event.exclusive;handlers=(jQuery.data(this,"events")||{})[event.type];for(var j in handlers){var handler=handlers[j];if(all||handler.type==namespace){event.handler=handler;event.data=handler.data;ret=handler.apply(this,arguments);if(val!==false)val=ret;if(ret===false){event.preventDefault();event.stopPropagation();}}}return val;},fix:function(event){if(event[expando]==true)return event;var originalEvent=event;event={originalEvent:originalEvent};var props="altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode metaKey newValue originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target timeStamp toElement type view wheelDelta which".split(" ");for(var i=props.length;i;i--)event[props[i]]=originalEvent[props[i]];event[expando]=true;event.preventDefault=function(){if(originalEvent.preventDefault)originalEvent.preventDefault();originalEvent.returnValue=false;};event.stopPropagation=function(){if(originalEvent.stopPropagation)originalEvent.stopPropagation();originalEvent.cancelBubble=true;};event.timeStamp=event.timeStamp||now();if(!event.target)event.target=event.srcElement||document;if(event.target.nodeType==3)event.target=event.target.parentNode;if(!event.relatedTarget&&event.fromElement)event.relatedTarget=event.fromElement==event.target?event.toElement:event.fromElement;if(event.pageX==null&&event.clientX!=null){var doc=document.documentElement,body=document.body;event.pageX=event.clientX+(doc&&doc.scrollLeft||body&&body.scrollLeft||0)-(doc.clientLeft||0);event.pageY=event.clientY+(doc&&doc.scrollTop||body&&body.scrollTop||0)-(doc.clientTop||0);}if(!event.which&&((event.charCode||event.charCode===0)?event.charCode:event.keyCode))event.which=event.charCode||event.keyCode;if(!event.metaKey&&event.ctrlKey)event.metaKey=event.ctrlKey;if(!event.which&&event.button)event.which=(event.button&1?1:(event.button&2?3:(event.button&4?2:0)));return event;},proxy:function(fn,proxy){proxy.guid=fn.guid=fn.guid||proxy.guid||this.guid++;return proxy;},special:{ready:{setup:function(){bindReady();return;},teardown:function(){return;}},mouseenter:{setup:function(){if(jQuery.browser.msie)return false;jQuery(this).bind("mouseover",jQuery.event.special.mouseenter.handler);return true;},teardown:function(){if(jQuery.browser.msie)return false;jQuery(this).unbind("mouseover",jQuery.event.special.mouseenter.handler);return true;},handler:function(event){if(withinElement(event,this))return true;event.type="mouseenter";return jQuery.event.handle.apply(this,arguments);}},mouseleave:{setup:function(){if(jQuery.browser.msie)return false;jQuery(this).bind("mouseout",jQuery.event.special.mouseleave.handler);return true;},teardown:function(){if(jQuery.browser.msie)return false;jQuery(this).unbind("mouseout",jQuery.event.special.mouseleave.handler);return true;},handler:function(event){if(withinElement(event,this))return true;event.type="mouseleave";return jQuery.event.handle.apply(this,arguments);}}}};jQuery.fn.extend({bind:function(type,data,fn){return type=="unload"?this.one(type,data,fn):this.each(function(){jQuery.event.add(this,type,fn||data,fn&&data);});},one:function(type,data,fn){var one=jQuery.event.proxy(fn||data,function(event){jQuery(this).unbind(event,one);return(fn||data).apply(this,arguments);});return this.each(function(){jQuery.event.add(this,type,one,fn&&data);});},unbind:function(type,fn){return this.each(function(){jQuery.event.remove(this,type,fn);});},trigger:function(type,data,fn){return this.each(function(){jQuery.event.trigger(type,data,this,true,fn);});},triggerHandler:function(type,data,fn){return this[0]&&jQuery.event.trigger(type,data,this[0],false,fn);},toggle:function(fn){var args=arguments,i=1;while(i<args.length)jQuery.event.proxy(fn,args[i++]);return this.click(jQuery.event.proxy(fn,function(event){this.lastToggle=(this.lastToggle||0)%i;event.preventDefault();return args[this.lastToggle++].apply(this,arguments)||false;}));},hover:function(fnOver,fnOut){return this.bind('mouseenter',fnOver).bind('mouseleave',fnOut);},ready:function(fn){bindReady();if(jQuery.isReady)fn.call(document,jQuery);else
+jQuery.readyList.push(function(){return fn.call(this,jQuery);});return this;}});jQuery.extend({isReady:false,readyList:[],ready:function(){if(!jQuery.isReady){jQuery.isReady=true;if(jQuery.readyList){jQuery.each(jQuery.readyList,function(){this.call(document);});jQuery.readyList=null;}jQuery(document).triggerHandler("ready");}}});var readyBound=false;function bindReady(){if(readyBound)return;readyBound=true;if(document.addEventListener&&!jQuery.browser.opera)document.addEventListener("DOMContentLoaded",jQuery.ready,false);if(jQuery.browser.msie&&window==top)(function(){if(jQuery.isReady)return;try{document.documentElement.doScroll("left");}catch(error){setTimeout(arguments.callee,0);return;}jQuery.ready();})();if(jQuery.browser.opera)document.addEventListener("DOMContentLoaded",function(){if(jQuery.isReady)return;for(var i=0;i<document.styleSheets.length;i++)if(document.styleSheets[i].disabled){setTimeout(arguments.callee,0);return;}jQuery.ready();},false);if(jQuery.browser.safari){var numStyles;(function(){if(jQuery.isReady)return;if(document.readyState!="loaded"&&document.readyState!="complete"){setTimeout(arguments.callee,0);return;}if(numStyles===undefined)numStyles=jQuery("style, link[rel=stylesheet]").length;if(document.styleSheets.length!=numStyles){setTimeout(arguments.callee,0);return;}jQuery.ready();})();}jQuery.event.add(window,"load",jQuery.ready);}jQuery.each(("blur,focus,load,resize,scroll,unload,click,dblclick,"+"mousedown,mouseup,mousemove,mouseover,mouseout,change,select,"+"submit,keydown,keypress,keyup,error").split(","),function(i,name){jQuery.fn[name]=function(fn){return fn?this.bind(name,fn):this.trigger(name);};});var withinElement=function(event,elem){var parent=event.relatedTarget;while(parent&&parent!=elem)try{parent=parent.parentNode;}catch(error){parent=elem;}return parent==elem;};jQuery(window).bind("unload",function(){jQuery("*").add(document).unbind();});jQuery.fn.extend({_load:jQuery.fn.load,load:function(url,params,callback){if(typeof url!='string')return this._load(url);var off=url.indexOf(" ");if(off>=0){var selector=url.slice(off,url.length);url=url.slice(0,off);}callback=callback||function(){};var type="GET";if(params)if(jQuery.isFunction(params)){callback=params;params=null;}else{params=jQuery.param(params);type="POST";}var self=this;jQuery.ajax({url:url,type:type,dataType:"html",data:params,complete:function(res,status){if(status=="success"||status=="notmodified")self.html(selector?jQuery("<div/>").append(res.responseText.replace(/<script(.|\s)*?\/script>/g,"")).find(selector):res.responseText);self.each(callback,[res.responseText,status,res]);}});return this;},serialize:function(){return jQuery.param(this.serializeArray());},serializeArray:function(){return this.map(function(){return jQuery.nodeName(this,"form")?jQuery.makeArray(this.elements):this;}).filter(function(){return this.name&&!this.disabled&&(this.checked||/select|textarea/i.test(this.nodeName)||/text|hidden|password/i.test(this.type));}).map(function(i,elem){var val=jQuery(this).val();return val==null?null:val.constructor==Array?jQuery.map(val,function(val,i){return{name:elem.name,value:val};}):{name:elem.name,value:val};}).get();}});jQuery.each("ajaxStart,ajaxStop,ajaxComplete,ajaxError,ajaxSuccess,ajaxSend".split(","),function(i,o){jQuery.fn[o]=function(f){return this.bind(o,f);};});var jsc=now();jQuery.extend({get:function(url,data,callback,type){if(jQuery.isFunction(data)){callback=data;data=null;}return jQuery.ajax({type:"GET",url:url,data:data,success:callback,dataType:type});},getScript:function(url,callback){return jQuery.get(url,null,callback,"script");},getJSON:function(url,data,callback){return jQuery.get(url,data,callback,"json");},post:function(url,data,callback,type){if(jQuery.isFunction(data)){callback=data;data={};}return jQuery.ajax({type:"POST",url:url,data:data,success:callback,dataType:type});},ajaxSetup:function(settings){jQuery.extend(jQuery.ajaxSettings,settings);},ajaxSettings:{url:location.href,global:true,type:"GET",timeout:0,contentType:"application/x-www-form-urlencoded",processData:true,async:true,data:null,username:null,password:null,accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},ajax:function(s){s=jQuery.extend(true,s,jQuery.extend(true,{},jQuery.ajaxSettings,s));var jsonp,jsre=/=\?(&|$)/g,status,data,type=s.type.toUpperCase();if(s.data&&s.processData&&typeof s.data!="string")s.data=jQuery.param(s.data);if(s.dataType=="jsonp"){if(type=="GET"){if(!s.url.match(jsre))s.url+=(s.url.match(/\?/)?"&":"?")+(s.jsonp||"callback")+"=?";}else if(!s.data||!s.data.match(jsre))s.data=(s.data?s.data+"&":"")+(s.jsonp||"callback")+"=?";s.dataType="json";}if(s.dataType=="json"&&(s.data&&s.data.match(jsre)||s.url.match(jsre))){jsonp="jsonp"+jsc++;if(s.data)s.data=(s.data+"").replace(jsre,"="+jsonp+"$1");s.url=s.url.replace(jsre,"="+jsonp+"$1");s.dataType="script";window[jsonp]=function(tmp){data=tmp;success();complete();window[jsonp]=undefined;try{delete window[jsonp];}catch(e){}if(head)head.removeChild(script);};}if(s.dataType=="script"&&s.cache==null)s.cache=false;if(s.cache===false&&type=="GET"){var ts=now();var ret=s.url.replace(/(\?|&)_=.*?(&|$)/,"$1_="+ts+"$2");s.url=ret+((ret==s.url)?(s.url.match(/\?/)?"&":"?")+"_="+ts:"");}if(s.data&&type=="GET"){s.url+=(s.url.match(/\?/)?"&":"?")+s.data;s.data=null;}if(s.global&&!jQuery.active++)jQuery.event.trigger("ajaxStart");var remote=/^(?:\w+:)?\/\/([^\/?#]+)/;if(s.dataType=="script"&&type=="GET"&&remote.test(s.url)&&remote.exec(s.url)[1]!=location.host){var head=document.getElementsByTagName("head")[0];var script=document.createElement("script");script.src=s.url;if(s.scriptCharset)script.charset=s.scriptCharset;if(!jsonp){var done=false;script.onload=script.onreadystatechange=function(){if(!done&&(!this.readyState||this.readyState=="loaded"||this.readyState=="complete")){done=true;success();complete();head.removeChild(script);}};}head.appendChild(script);return undefined;}var requestDone=false;var xhr=window.ActiveXObject?new ActiveXObject("Microsoft.XMLHTTP"):new XMLHttpRequest();if(s.username)xhr.open(type,s.url,s.async,s.username,s.password);else
+xhr.open(type,s.url,s.async);try{if(s.data)xhr.setRequestHeader("Content-Type",s.contentType);if(s.ifModified)xhr.setRequestHeader("If-Modified-Since",jQuery.lastModified[s.url]||"Thu, 01 Jan 1970 00:00:00 GMT");xhr.setRequestHeader("X-Requested-With","XMLHttpRequest");xhr.setRequestHeader("Accept",s.dataType&&s.accepts[s.dataType]?s.accepts[s.dataType]+", */*":s.accepts._default);}catch(e){}if(s.beforeSend&&s.beforeSend(xhr,s)===false){s.global&&jQuery.active--;xhr.abort();return false;}if(s.global)jQuery.event.trigger("ajaxSend",[xhr,s]);var onreadystatechange=function(isTimeout){if(!requestDone&&xhr&&(xhr.readyState==4||isTimeout=="timeout")){requestDone=true;if(ival){clearInterval(ival);ival=null;}status=isTimeout=="timeout"&&"timeout"||!jQuery.httpSuccess(xhr)&&"error"||s.ifModified&&jQuery.httpNotModified(xhr,s.url)&&"notmodified"||"success";if(status=="success"){try{data=jQuery.httpData(xhr,s.dataType,s.dataFilter);}catch(e){status="parsererror";}}if(status=="success"){var modRes;try{modRes=xhr.getResponseHeader("Last-Modified");}catch(e){}if(s.ifModified&&modRes)jQuery.lastModified[s.url]=modRes;if(!jsonp)success();}else
+jQuery.handleError(s,xhr,status);complete();if(s.async)xhr=null;}};if(s.async){var ival=setInterval(onreadystatechange,13);if(s.timeout>0)setTimeout(function(){if(xhr){xhr.abort();if(!requestDone)onreadystatechange("timeout");}},s.timeout);}try{xhr.send(s.data);}catch(e){jQuery.handleError(s,xhr,null,e);}if(!s.async)onreadystatechange();function success(){if(s.success)s.success(data,status);if(s.global)jQuery.event.trigger("ajaxSuccess",[xhr,s]);}function complete(){if(s.complete)s.complete(xhr,status);if(s.global)jQuery.event.trigger("ajaxComplete",[xhr,s]);if(s.global&&!--jQuery.active)jQuery.event.trigger("ajaxStop");}return xhr;},handleError:function(s,xhr,status,e){if(s.error)s.error(xhr,status,e);if(s.global)jQuery.event.trigger("ajaxError",[xhr,s,e]);},active:0,httpSuccess:function(xhr){try{return!xhr.status&&location.protocol=="file:"||(xhr.status>=200&&xhr.status<300)||xhr.status==304||xhr.status==1223||jQuery.browser.safari&&xhr.status==undefined;}catch(e){}return false;},httpNotModified:function(xhr,url){try{var xhrRes=xhr.getResponseHeader("Last-Modified");return xhr.status==304||xhrRes==jQuery.lastModified[url]||jQuery.browser.safari&&xhr.status==undefined;}catch(e){}return false;},httpData:function(xhr,type,filter){var ct=xhr.getResponseHeader("content-type"),xml=type=="xml"||!type&&ct&&ct.indexOf("xml")>=0,data=xml?xhr.responseXML:xhr.responseText;if(xml&&data.documentElement.tagName=="parsererror")throw"parsererror";if(filter)data=filter(data,type);if(type=="script")jQuery.globalEval(data);if(type=="json")data=eval("("+data+")");return data;},param:function(a){var s=[];if(a.constructor==Array||a.jquery)jQuery.each(a,function(){s.push(encodeURIComponent(this.name)+"="+encodeURIComponent(this.value));});else
+for(var j in a)if(a[j]&&a[j].constructor==Array)jQuery.each(a[j],function(){s.push(encodeURIComponent(j)+"="+encodeURIComponent(this));});else
+s.push(encodeURIComponent(j)+"="+encodeURIComponent(jQuery.isFunction(a[j])?a[j]():a[j]));return s.join("&").replace(/%20/g,"+");}});jQuery.fn.extend({show:function(speed,callback){return speed?this.animate({height:"show",width:"show",opacity:"show"},speed,callback):this.filter(":hidden").each(function(){this.style.display=this.oldblock||"";if(jQuery.css(this,"display")=="none"){var elem=jQuery("<"+this.tagName+" />").appendTo("body");this.style.display=elem.css("display");if(this.style.display=="none")this.style.display="block";elem.remove();}}).end();},hide:function(speed,callback){return speed?this.animate({height:"hide",width:"hide",opacity:"hide"},speed,callback):this.filter(":visible").each(function(){this.oldblock=this.oldblock||jQuery.css(this,"display");this.style.display="none";}).end();},_toggle:jQuery.fn.toggle,toggle:function(fn,fn2){return jQuery.isFunction(fn)&&jQuery.isFunction(fn2)?this._toggle.apply(this,arguments):fn?this.animate({height:"toggle",width:"toggle",opacity:"toggle"},fn,fn2):this.each(function(){jQuery(this)[jQuery(this).is(":hidden")?"show":"hide"]();});},slideDown:function(speed,callback){return this.animate({height:"show"},speed,callback);},slideUp:function(speed,callback){return this.animate({height:"hide"},speed,callback);},slideToggle:function(speed,callback){return this.animate({height:"toggle"},speed,callback);},fadeIn:function(speed,callback){return this.animate({opacity:"show"},speed,callback);},fadeOut:function(speed,callback){return this.animate({opacity:"hide"},speed,callback);},fadeTo:function(speed,to,callback){return this.animate({opacity:to},speed,callback);},animate:function(prop,speed,easing,callback){var optall=jQuery.speed(speed,easing,callback);return this[optall.queue===false?"each":"queue"](function(){if(this.nodeType!=1)return false;var opt=jQuery.extend({},optall),p,hidden=jQuery(this).is(":hidden"),self=this;for(p in prop){if(prop[p]=="hide"&&hidden||prop[p]=="show"&&!hidden)return opt.complete.call(this);if(p=="height"||p=="width"){opt.display=jQuery.css(this,"display");opt.overflow=this.style.overflow;}}if(opt.overflow!=null)this.style.overflow="hidden";opt.curAnim=jQuery.extend({},prop);jQuery.each(prop,function(name,val){var e=new jQuery.fx(self,opt,name);if(/toggle|show|hide/.test(val))e[val=="toggle"?hidden?"show":"hide":val](prop);else{var parts=val.toString().match(/^([+-]=)?([\d+-.]+)(.*)$/),start=e.cur(true)||0;if(parts){var end=parseFloat(parts[2]),unit=parts[3]||"px";if(unit!="px"){self.style[name]=(end||1)+unit;start=((end||1)/e.cur(true))*start;self.style[name]=start+unit;}if(parts[1])end=((parts[1]=="-="?-1:1)*end)+start;e.custom(start,end,unit);}else
+e.custom(start,val,"");}});return true;});},queue:function(type,fn){if(jQuery.isFunction(type)||(type&&type.constructor==Array)){fn=type;type="fx";}if(!type||(typeof type=="string"&&!fn))return queue(this[0],type);return this.each(function(){if(fn.constructor==Array)queue(this,type,fn);else{queue(this,type).push(fn);if(queue(this,type).length==1)fn.call(this);}});},stop:function(clearQueue,gotoEnd){var timers=jQuery.timers;if(clearQueue)this.queue([]);this.each(function(){for(var i=timers.length-1;i>=0;i--)if(timers[i].elem==this){if(gotoEnd)timers[i](true);timers.splice(i,1);}});if(!gotoEnd)this.dequeue();return this;}});var queue=function(elem,type,array){if(elem){type=type||"fx";var q=jQuery.data(elem,type+"queue");if(!q||array)q=jQuery.data(elem,type+"queue",jQuery.makeArray(array));}return q;};jQuery.fn.dequeue=function(type){type=type||"fx";return this.each(function(){var q=queue(this,type);q.shift();if(q.length)q[0].call(this);});};jQuery.extend({speed:function(speed,easing,fn){var opt=speed&&speed.constructor==Object?speed:{complete:fn||!fn&&easing||jQuery.isFunction(speed)&&speed,duration:speed,easing:fn&&easing||easing&&easing.constructor!=Function&&easing};opt.duration=(opt.duration&&opt.duration.constructor==Number?opt.duration:jQuery.fx.speeds[opt.duration])||jQuery.fx.speeds.def;opt.old=opt.complete;opt.complete=function(){if(opt.queue!==false)jQuery(this).dequeue();if(jQuery.isFunction(opt.old))opt.old.call(this);};return opt;},easing:{linear:function(p,n,firstNum,diff){return firstNum+diff*p;},swing:function(p,n,firstNum,diff){return((-Math.cos(p*Math.PI)/2)+0.5)*diff+firstNum;}},timers:[],timerId:null,fx:function(elem,options,prop){this.options=options;this.elem=elem;this.prop=prop;if(!options.orig)options.orig={};}});jQuery.fx.prototype={update:function(){if(this.options.step)this.options.step.call(this.elem,this.now,this);(jQuery.fx.step[this.prop]||jQuery.fx.step._default)(this);if(this.prop=="height"||this.prop=="width")this.elem.style.display="block";},cur:function(force){if(this.elem[this.prop]!=null&&this.elem.style[this.prop]==null)return this.elem[this.prop];var r=parseFloat(jQuery.css(this.elem,this.prop,force));return r&&r>-10000?r:parseFloat(jQuery.curCSS(this.elem,this.prop))||0;},custom:function(from,to,unit){this.startTime=now();this.start=from;this.end=to;this.unit=unit||this.unit||"px";this.now=this.start;this.pos=this.state=0;this.update();var self=this;function t(gotoEnd){return self.step(gotoEnd);}t.elem=this.elem;jQuery.timers.push(t);if(jQuery.timerId==null){jQuery.timerId=setInterval(function(){var timers=jQuery.timers;for(var i=0;i<timers.length;i++)if(!timers[i]())timers.splice(i--,1);if(!timers.length){clearInterval(jQuery.timerId);jQuery.timerId=null;}},13);}},show:function(){this.options.orig[this.prop]=jQuery.attr(this.elem.style,this.prop);this.options.show=true;this.custom(0,this.cur());if(this.prop=="width"||this.prop=="height")this.elem.style[this.prop]="1px";jQuery(this.elem).show();},hide:function(){this.options.orig[this.prop]=jQuery.attr(this.elem.style,this.prop);this.options.hide=true;this.custom(this.cur(),0);},step:function(gotoEnd){var t=now();if(gotoEnd||t>this.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;var done=true;for(var i in this.options.curAnim)if(this.options.curAnim[i]!==true)done=false;if(done){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;this.elem.style.display=this.options.display;if(jQuery.css(this.elem,"display")=="none")this.elem.style.display="block";}if(this.options.hide)this.elem.style.display="none";if(this.options.hide||this.options.show)for(var p in this.options.curAnim)jQuery.attr(this.elem.style,p,this.options.orig[p]);}if(done)this.options.complete.call(this.elem);return false;}else{var n=t-this.startTime;this.state=n/this.options.duration;this.pos=jQuery.easing[this.options.easing||(jQuery.easing.swing?"swing":"linear")](this.state,n,0,1,this.options.duration);this.now=this.start+((this.end-this.start)*this.pos);this.update();}return true;}};jQuery.extend(jQuery.fx,{speeds:{slow:600,fast:200,def:400},step:{scrollLeft:function(fx){fx.elem.scrollLeft=fx.now;},scrollTop:function(fx){fx.elem.scrollTop=fx.now;},opacity:function(fx){jQuery.attr(fx.elem.style,"opacity",fx.now);},_default:function(fx){fx.elem.style[fx.prop]=fx.now+fx.unit;}}});jQuery.fn.offset=function(){var left=0,top=0,elem=this[0],results;if(elem)with(jQuery.browser){var parent=elem.parentNode,offsetChild=elem,offsetParent=elem.offsetParent,doc=elem.ownerDocument,safari2=safari&&parseInt(version)<522&&!/adobeair/i.test(userAgent),css=jQuery.curCSS,fixed=css(elem,"position")=="fixed";if(elem.getBoundingClientRect){var box=elem.getBoundingClientRect();add(box.left+Math.max(doc.documentElement.scrollLeft,doc.body.scrollLeft),box.top+Math.max(doc.documentElement.scrollTop,doc.body.scrollTop));add(-doc.documentElement.clientLeft,-doc.documentElement.clientTop);}else{add(elem.offsetLeft,elem.offsetTop);while(offsetParent){add(offsetParent.offsetLeft,offsetParent.offsetTop);if(mozilla&&!/^t(able|d|h)$/i.test(offsetParent.tagName)||safari&&!safari2)border(offsetParent);if(!fixed&&css(offsetParent,"position")=="fixed")fixed=true;offsetChild=/^body$/i.test(offsetParent.tagName)?offsetChild:offsetParent;offsetParent=offsetParent.offsetParent;}while(parent&&parent.tagName&&!/^body|html$/i.test(parent.tagName)){if(!/^inline|table.*$/i.test(css(parent,"display")))add(-parent.scrollLeft,-parent.scrollTop);if(mozilla&&css(parent,"overflow")!="visible")border(parent);parent=parent.parentNode;}if((safari2&&(fixed||css(offsetChild,"position")=="absolute"))||(mozilla&&css(offsetChild,"position")!="absolute"))add(-doc.body.offsetLeft,-doc.body.offsetTop);if(fixed)add(Math.max(doc.documentElement.scrollLeft,doc.body.scrollLeft),Math.max(doc.documentElement.scrollTop,doc.body.scrollTop));}results={top:top,left:left};}function border(elem){add(jQuery.curCSS(elem,"borderLeftWidth",true),jQuery.curCSS(elem,"borderTopWidth",true));}function add(l,t){left+=parseInt(l,10)||0;top+=parseInt(t,10)||0;}return results;};jQuery.fn.extend({position:function(){var left=0,top=0,results;if(this[0]){var offsetParent=this.offsetParent(),offset=this.offset(),parentOffset=/^body|html$/i.test(offsetParent[0].tagName)?{top:0,left:0}:offsetParent.offset();offset.top-=num(this,'marginTop');offset.left-=num(this,'marginLeft');parentOffset.top+=num(offsetParent,'borderTopWidth');parentOffset.left+=num(offsetParent,'borderLeftWidth');results={top:offset.top-parentOffset.top,left:offset.left-parentOffset.left};}return results;},offsetParent:function(){var offsetParent=this[0].offsetParent;while(offsetParent&&(!/^body|html$/i.test(offsetParent.tagName)&&jQuery.css(offsetParent,'position')=='static'))offsetParent=offsetParent.offsetParent;return jQuery(offsetParent);}});jQuery.each(['Left','Top'],function(i,name){var method='scroll'+name;jQuery.fn[method]=function(val){if(!this[0])return;return val!=undefined?this.each(function(){this==window||this==document?window.scrollTo(!i?val:jQuery(window).scrollLeft(),i?val:jQuery(window).scrollTop()):this[method]=val;}):this[0]==window||this[0]==document?self[i?'pageYOffset':'pageXOffset']||jQuery.boxModel&&document.documentElement[method]||document.body[method]:this[0][method];};});jQuery.each(["Height","Width"],function(i,name){var tl=i?"Left":"Top",br=i?"Right":"Bottom";jQuery.fn["inner"+name]=function(){return this[name.toLowerCase()]()+num(this,"padding"+tl)+num(this,"padding"+br);};jQuery.fn["outer"+name]=function(margin){return this["inner"+name]()+num(this,"border"+tl+"Width")+num(this,"border"+br+"Width")+(margin?num(this,"margin"+tl)+num(this,"margin"+br):0);};});})();
\ No newline at end of file
diff --git a/pages/radar_2_none.html b/pages/radar_2_none.html
new file mode 100644 (file)
index 0000000..c3b2e91
--- /dev/null
@@ -0,0 +1,29 @@
+<H1>Current Radar / Satellite:</H1><HR>
+<CENTER>
+<TABLE WIDTH=99%>
+<TR>
+<TD WIDTH=50%>
+<CENTER>
+  <IMG SRC="http://fin.bimedia.net/KOMO/komo_radar_seattle_metro_640.gif">
+</CENTER>
+</TD>
+<TD>
+<CENTER>
+  <IMG SRC="http://fin.bimedia.net/KOMO/komo_radar_westwa_640.gif">
+</CENTER>
+</TD>
+</TR>
+<TR>
+<TD>
+<CENTER>
+  <IMG SRC="http://fin.bimedia.net/KOMO/komo_sat_regional_640.gif">
+</CENTER>
+</TD>
+<TD>
+<CENTER>
+  <IMG SRC="http://fin.bimedia.net/KOMO/komo_sat_wide_640.gif">
+</CENTER>
+</TD>
+</TR>
+</TABLE>
+</CENTER>
diff --git a/pages/stevens_cams_1_none.html b/pages/stevens_cams_1_none.html
new file mode 100644 (file)
index 0000000..4f408c6
--- /dev/null
@@ -0,0 +1,23 @@
+<h1>Stevens Pass Webcams:</h1>
+<hr>
+<center>
+<table border=0>
+<tr>
+<td>
+<img alt="Web Cam Image" height="384" src="http://www.stevenspass.com/cams/base1/" style="BORDER-BOTTOM: #5270a3 1px solid; BORDER-LEFT: #5270a3 1px solid; BORDER-TOP: #5270a3 1px solid; BORDER-RIGHT: #5270a3 1px solid" width="512" />
+</td>
+<td>
+<img height="384" name="webCam2" src="http://www.stevenspass.com/cams/mountain/" style="BORDER-BOTTOM: #5270a3 1px solid; BORDER-LEFT: #5270a3 1px solid; BORDER-TOP: #5270a3 1px solid; BORDER-RIGHT: #5270a3 1px solid" width="512" />
+</td>
+</tr>
+<tr>
+<td>
+<img alt="Stevens Pass Summit" border="0" height="392" src="http://www.stevenspass.com/cams/summit-east/" width="512" />
+<br />
+</td>
+<td>
+<img alt="Stevens Pass Summit" border="0" height="392" src="http://www.stevenspass.com/cams/summit-west/" width="512" />
+</td>
+</tr>
+</table>
+</center>
diff --git a/pages/style.css b/pages/style.css
new file mode 100644 (file)
index 0000000..ccebbf5
--- /dev/null
@@ -0,0 +1,58 @@
+* {
+    font-family: helvetica, arial, sans-serif;
+    font-size: 2.5vmin;
+    line-height: 1.33;
+    font-style: normal;
+}
+p {
+    margin-top: 0em;
+    margin-bottom: 0em;
+}
+h1 {
+    color: maroon;
+    font-size: 3.8vmin;
+    padding-bottom: 0px;
+    margin-bottom: 0px;
+}
+h2 {
+    font-size: 115%;
+    font-size: 3.0vmin;
+}
+h3 {
+    font-size: 2.5vmin;
+}
+h4 {
+    font-size: 2.5vmin;
+}
+h5 {
+    font-size: 2.5vmin;
+}
+hr {
+    padding-top: 1px;
+    padding-bottom: 1px;
+    margin: 0px;
+}
+td {
+    vertical-align: top;
+}
+div#time {
+    color: green;
+    font-size: 2.5vmin;
+    font-weight: bold;
+}
+div#date {
+    color: green;
+    font-size: 2.5vmin;
+    font-weight: bold;
+}
+.fit {
+    max-width: 100%;
+    max-height: 100%;
+}
+.center {
+    display: block;
+    margin: auto;
+}
+.longonly {
+    font-size: 90%;
+}
diff --git a/pages/wenatchee-cams_3_none.html b/pages/wenatchee-cams_3_none.html
new file mode 100644 (file)
index 0000000..92eb9b2
--- /dev/null
@@ -0,0 +1,23 @@
+<h1>Lake Wenatchee Webcams:</h1>
+<hr>
+<center>
+<table border=0>
+<tr>
+<td>
+<img alt="Fish Lake" width="512" height="384" src="http://lakewenatcheeinfo.com/images/cam2/Fishlake.jpg" />
+</td>
+<td>
+<img alt="Lake Wenatchee" width="512" height="384" src="http://home.comcast.net/~rmakela/jpeg/lake.jpg" />
+</td>
+</tr>
+<tr>
+<td>
+<img alt="Cabin Driveway" border="0" height="392" width="512" src="http://webcam:[email protected]/webcam/videostream.cgi"/>
+<br />
+</td>
+<td>
+<img alt="Midway" border="0" height="392" width="512" src="http://lakewenatcheeinfo.com/images/cam2/midwaylatest.jpg" />
+</td>
+</tr>
+</table>
+</center>
diff --git a/pages/wsdot-bridges_3_none.html b/pages/wsdot-bridges_3_none.html
new file mode 100644 (file)
index 0000000..8c254f0
--- /dev/null
@@ -0,0 +1,13 @@
+<BODY>
+  <h1>WSDOT Traffic</h1>
+  <HR>
+  <CENTER>
+    <TABLE BORDER=0 WIDTH=96%>
+      <TR>
+        <TD ALIGN=CENTER>
+          <IMG SRC="http://images.wsdot.wa.gov/nwflow/flowmaps/bridges.gif" HEIGHT=850>
+        </TD>
+      </TR>
+    </TABLE>
+  </CENTER>
+</BODY>
diff --git a/picasa_renderer.py b/picasa_renderer.py
new file mode 100644 (file)
index 0000000..d15bd7b
--- /dev/null
@@ -0,0 +1,169 @@
+import httplib
+import gdata_oauth
+import file_writer
+import renderer
+import gdata
+import secrets
+import sets
+import random
+from oauth2client.client import AccessTokenRefreshError
+
+class picasa_renderer(renderer.debuggable_abstaining_renderer):
+    """A renderer to fetch photos from picasaweb.google.com"""
+
+    album_whitelist = sets.ImmutableSet([
+        'Alex',
+        'Alex 6.0..8.0 years old',
+        'Alex 3.0..4.0 years old',
+        'Barn',
+        'Bangkok and Phukey, 2003',
+        'Blue Angels... Seafair',
+        'Carol Ann and Owen',
+        'Chahuly Glass',
+        'Dunn Gardens',
+        'East Coast, 2011',
+        'East Coast, 2013',
+        'Friends',
+        'Gasches',
+        'Gasch Wedding',
+        'Hiking and Ohme Gardens',
+        'Hiking',
+        'Karen\'s Wedding',
+        'Key West 2019',
+        'Krakow 2009',
+        'Munich, July 2018',
+        'NJ 2015',
+        'NW Trek',
+        'Oahu 2010'
+        'Ocean Shores 2009',
+        'Ohme Gardens',
+        'Olympic Sculpture Park',
+        'Paintings',
+        'Puerto Vallarta',
+        'Photos from posts',
+        'Random',
+        'SFO 2014',
+        'Soccer',
+        'Skiing with Alex',
+        'Tuscany 2008',
+        "Trip to California '16",
+        "Trip to East Coast '16",
+        'Yosemite 2010',
+        'Zoo',
+    ])
+
+    def __init__(self, name_to_timeout_dict, oauth):
+        super(picasa_renderer, self).__init__(name_to_timeout_dict, False)
+        self.oauth = oauth
+        self.photo_urls = {}
+        self.width = {}
+        self.height = {}
+        self.is_video = {}
+
+    def debug_prefix(self):
+        return "picasa"
+
+    def periodic_render(self, key):
+        if (key == 'Fetch Photos'):
+            return self.fetch_photos()
+        elif (key == 'Shuffle Cached Photos'):
+            return self.shuffle_cached()
+        else:
+            raise error('Unexpected operation')
+
+    # Just fetch and cache the photo URLs in memory.
+    def fetch_photos(self):
+        try:
+            temp_photo_urls = {}
+            temp_width = {}
+            temp_height = {}
+            temp_is_video = {}
+            conn = httplib.HTTPSConnection("photoslibrary.googleapis.com")
+            conn.request("GET",
+                         "/v1/albums",
+                         None,
+                         { "Authorization": "%s %s" % (self.oauth.token['token_type'], self.oauth.token['access_token'])
+                         })
+            response = conn.getresponse()
+            if response.status != 200:
+                print("Failed to fetch albums, status %d\n" % response.status)
+            print response.read()
+            albums = self.pws.GetUserFeed().entry
+            for album in albums:
+                if (album.title.text not in picasa_renderer.album_whitelist):
+                    continue
+                photos = self.pws.GetFeed(
+                    '/data/feed/api/user/%s/albumid/%s?kind=photo&imgmax=1024u' %
+                    (secrets.google_username, album.gphoto_id.text))
+                for photo in photos.entry:
+                    id = '%s/%s' % (photo.albumid.text, photo.gphoto_id.text)
+                    temp_is_video[id] = False
+                    resolution = 999999
+                    for x in photo.media.content:
+                        if "video" in x.type and int(x.height) < resolution:
+                            url = x.url
+                            resolution = int(x.height)
+                            temp_width[id] = x.width
+                            temp_height[id] = x.height
+                            temp_is_video[id] = True
+                        else:
+                            if resolution == 999999:
+                                url = x.url
+                                temp_width[id] = x.width
+                                temp_height[id] = x.height
+                                temp_is_video[id] = False
+                    temp_photo_urls[id] = url
+            self.photo_urls = temp_photo_urls
+            self.width = temp_width
+            self.height = temp_height
+            self.is_video = temp_is_video
+            return True
+        except (gdata.service.RequestError,
+                gdata.photos.service.GooglePhotosException,
+                AccessTokenRefreshError):
+            print("******** TRYING TO REFRESH PHOTOS CLIENT *********")
+            self.oauth.refresh_token()
+            self.client = self.oauth.photos_service()
+            return False
+
+    # Pick one of the cached URLs and build a page.
+    def shuffle_cached(self):
+        if len(self.photo_urls) == 0:
+            print("No photos!")
+            return False
+        pid = random.sample(self.photo_urls, 1)
+        id = pid[0]
+        refresh = 15
+        if (self.is_video[id]): refresh = 60
+
+        f = file_writer.file_writer('photo_23_none.html')
+        f.write("""
+<style>
+body{background-color:#303030;}
+div#time{color:#dddddd;}
+div#date{color:#dddddd;}
+</style>
+<center>""")
+        if self.is_video[id]:
+            f.write('<iframe src="%s" seamless width=%s height=%s></iframe>' % (self.photo_urls[id], self.width[id], self.height[id]))
+        else:
+            f.write('<img src="%s" width=%s alt="%s">' % (self.photo_urls[id], self.width[id], self.photo_urls[id]))
+        f.write("</center>")
+        f.close()
+        return True
+
+# Test code
+oauth = gdata_oauth.OAuth(secrets.google_client_id,
+                          secrets.google_client_secret)
+oauth.get_new_token()
+if not oauth.has_token():
+    user_code = oauth.get_user_code()
+    print('------------------------------------------------------------')
+    print('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
+        oauth.verification_url, user_code))
+    oauth.get_new_token()
+x = picasa_renderer({"Fetch Photos": (60 * 60 * 12),
+                     "Shuffle Cached Photos": (1)},
+                    oauth)
+x.fetch_photos()
+
diff --git a/pollen_renderer.py b/pollen_renderer.py
new file mode 100644 (file)
index 0000000..bcc6c38
--- /dev/null
@@ -0,0 +1,138 @@
+import file_writer
+from bs4 import BeautifulSoup
+import renderer
+import httplib
+import re
+
+class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
+    def __init__(self, name_to_timeout_dict):
+        super(pollen_count_renderer, self).__init__(name_to_timeout_dict, False)
+        self.site = 'www.nwasthma.com'
+        self.uri = '/pollen/pollen-count/'
+        self.trees = []
+        self.grasses = []
+        self.weeds = []
+
+    def debug_prefix(self):
+        return "pollen"
+
+    def fetch_html(self):
+        conn = httplib.HTTPConnection(self.site)
+        conn.request(
+                "GET",
+                self.uri,
+                None,
+                {})
+        response = conn.getresponse()
+        if response.status != 200:
+            print('Connection to %s/%s failed, status %d' % (self.site,
+                                                             self.uri,
+                                                             response.status))
+            return False
+        return response.read()
+
+    def append_crap(self, text, tc, tr, tcomment, kind, maximum):
+        desc = ""
+        color = "#00d000"
+        if tr != None and tr.string != None:
+            desc = tr.string.encode('utf-8')
+            if "edium" in desc:
+                color = "#a0a000"
+            elif "igh" in desc:
+                color = "#d00000"
+
+        count = 0
+        if tc != None and tc.string != None:
+            try:
+                count = int(tc.string.encode('utf-8'))
+            except:
+                count = 0
+        proportion = float(count) / float(maximum)
+        width = int(proportion * 600.0)
+
+        comment = ""
+        if tcomment != None and tcomment.string != None:
+            comment = "%s" % (tcomment.string.encode('utf-8'))
+
+        # Label:
+        text = text + '<TR><TD WIDTH=10%% STYLE="font-size: 22pt">%s:</TD>' % (kind)
+
+        # Bar graph with text in it (possibly overspilling):
+        text = text + '<TD HEIGHT=80><DIV STYLE="width: %d; height: 80; overflow: visible; background-color: %s; font-size: 16pt">' % (width, color)
+        text = text + 'count=%d,&nbsp;%s&nbsp;%s</DIV>' % (count, desc, comment)
+        return text
+
+    def munge(self, raw):
+        soup = BeautifulSoup(raw, "html.parser")
+
+        text = """
+<H1>Pollen Count, Seattle</H1>
+<HR>
+<CENTER>
+<TABLE BODER WIDTH=800>"""
+        date = "<CENTER><B>Unknown Date</B></CENTER>"
+        for x in soup.find_all('p'):
+            if x == None or x.string == None:
+                continue
+            txt = x.string.encode('utf-8')
+            m = re.match("[0-9][0-9].[0-9][0-9].20[0-9][0-9]", txt)
+            if m != None:
+                date = "<CENTER><B>%s</B></CENTER>" % (txt)
+                y = x.find_next_sibling('p')
+                if y != None and y.string != None:
+                    txt = y.string.encode('utf-8')
+                    date = date + "<BR>%s<HR>" % txt
+        text = text + '<TR><TD COLSPAN=3 STYLE="font-size:16pt">%s</TD></TR>\n' % (
+            date)
+
+        trees = soup.find('td', text=re.compile('[Tt]rees:'))
+        if trees != None:
+            tc = trees.find_next_sibling('td')
+            tr = tc.find_next_sibling('td')
+            tcomment = tr.find_next_sibling('td')
+            text = self.append_crap(text, tc, tr, tcomment, "Trees", 650)
+
+        grasses = soup.find('td', text=re.compile('[Gg]rasses:'))
+        if grasses != None:
+            gc = grasses.find_next_sibling('td')
+            gr = gc.find_next_sibling('td')
+            gcomment = gr.find_next_sibling('td')
+            text = self.append_crap(text, gc, gr, gcomment, "Grasses", 35)
+
+        weeds = soup.find('td', text=re.compile('[Ww]eeds:'))
+        if weeds != None:
+            wc = weeds.find_next_sibling('td')
+            wr = wc.find_next_sibling('td')
+            wcomment = wr.find_next_sibling('td')
+            text = self.append_crap(text, wc, wr, wcomment, "Weeds", 25)
+        text = text + """
+<TR>
+  <TD COLSPAN=3 STYLE="font-size:16pt">
+<HR>
+<B>Absent:</B> No symptoms.<BR>
+<B>Low:</B> Only individuals extremely sensitive to these pollens will experience symptoms.<BR>
+<B>Moderate:</B> Many individuals sensitive to these pollens will experience symptoms<BR>
+<B>High:</B> Most individuals with any sensitivity to these pollens will experience symptoms.<BR>
+<B>Very High:</B> Almost all individuals with any sensitivity at all to these pollens will experience symptoms. Extremely sensitive people could have severe problems.
+  </TD>
+</TR>
+</TABLE>
+</CENTER>"""
+        return text
+
+    def poll_pollen(self):
+        raw = self.fetch_html()
+        cooked = self.munge(raw)
+        f = file_writer.file_writer('pollen_4_360.html')
+        f.write(cooked)
+        f.close()
+        return True
+
+    def periodic_render(self, key):
+        self.debug_print("executing action %s" % key)
+        if key == "Poll":
+            return self.poll_pollen()
+        else:
+            raise error("Unknown operaiton")
+
+#test = pollen_count_renderer({"Test", 123})
diff --git a/profanity_filter.py b/profanity_filter.py
new file mode 100644 (file)
index 0000000..7b378cc
--- /dev/null
@@ -0,0 +1,453 @@
+import string
+import re
+
+class profanity_filter:
+    def __init__(self):
+        self.arrBad = [
+            'acrotomophilia',
+            'anal',
+            'anally',
+            'anilingus',
+            'anus',
+            'arsehole',
+            'ass',
+            'asses',
+            'asshole',
+            'assmunch',
+            'auto erotic',
+            'autoerotic',
+            'babeland',
+            'baby batter',
+            'ball gag',
+            'ball gravy',
+            'ball kicking',
+            'ball licking',
+            'ball sack',
+            'ball zack',
+            'ball sucking',
+            'bangbros',
+            'bareback',
+            'barely legal',
+            'barenaked',
+            'bastardo',
+            'bastinado',
+            'bbw',
+            'bdsm',
+            'beaver cleaver',
+            'beaver lips',
+            'bestiality',
+            'bi curious',
+            'big black',
+            'big breasts',
+            'big knockers',
+            'big tits',
+            'bimbos',
+            'birdlock',
+            'bitch',
+            'bitches',
+            'black cock',
+            'blonde action',
+            'blonde on blonde',
+            'blow j',
+            'blow your l',
+            'blow ourselves',
+            'blow m',
+            'blue waffle',
+            'blumpkin',
+            'bollocks',
+            'bondage',
+            'boner',
+            'boob',
+            'boobs',
+            'booty call',
+            'breasts',
+            'brown showers',
+            'brunette action',
+            'bukkake',
+            'bulldyke',
+            'bullshit',
+            'bullet vibe',
+            'bung hole',
+            'bunghole',
+            'busty',
+            'butt',
+            'buttcheeks',
+            'butthole',
+            'camel toe',
+            'camgirl',
+            'camslut',
+            'camwhore',
+            'carpet muncher',
+            'carpetmuncher',
+            'chocolate rosebuds',
+            'circlejerk',
+            'cleveland steamer',
+            'clit',
+            'clitoris',
+            'clover clamps',
+            'clusterfuck',
+            'cock',
+            'cocks',
+            'coprolagnia',
+            'coprophilia',
+            'cornhole',
+            'creampie',
+            'cream pie',
+            'cum',
+            'cumming',
+            'cunnilingus',
+            'cunt',
+            'damn',
+            'darkie',
+            'date rape',
+            'daterape',
+            'deep throat',
+            'deepthroat',
+            'dick',
+            'dildo',
+            'dirty pillows',
+            'dirty sanchez',
+            'dog style',
+            'doggie style',
+            'doggiestyle',
+            'doggy style',
+            'doggystyle',
+            'dolcett',
+            'domination',
+            'dominatrix',
+            'dommes',
+            'donkey punch',
+            'double dick',
+            'double dong',
+            'double penetration',
+            'dp action',
+            'dtf',
+            'eat my ass',
+            'ecchi',
+            'ejaculation',
+            'erotic',
+            'erotism',
+            'escort',
+            'ethical slut',
+            'eunuch',
+            'faggot',
+            'posts each week',
+            'fecal',
+            'felch',
+            'fellatio',
+            'feltch',
+            'female squirting',
+            'femdom',
+            'figging',
+            'fingering',
+            'fisting',
+            'foot fetish',
+            'footjob',
+            'frotting',
+            'fuck',
+            'fucking',
+            'fuckin',
+            'fuckin\'',
+            'fucked',
+            'fuckers',
+            'fuck buttons',
+            'fuckhead',
+            'fudge packer',
+            'fudgepacker',
+            'futanari',
+            'g-spot',
+            'gspot',
+            'gang bang',
+            'gay sex',
+            'genitals',
+            'giant cock',
+            'girl on',
+            'girl on top',
+            'girls gone wild',
+            'goatcx',
+            'goatse',
+            'goddamn',
+            'gokkun',
+            'golden shower',
+            'goo girl',
+            'goodpoop',
+            'goregasm',
+            'grope',
+            'group sex',
+            'guro',
+            'hand job',
+            'handjob',
+            'hard core',
+            'hardcore',
+            'hentai',
+            'homoerotic',
+            'honkey',
+            'hooker',
+            'horny',
+            'hot chick',
+            'how to kill',
+            'how to murder',
+            'huge fat',
+            'humping',
+            'incest',
+            'intercourse',
+            'jack off',
+            'jail bait',
+            'jailbait',
+            'jerk off',
+            'jigaboo',
+            'jiggaboo',
+            'jiggerboo',
+            'jizz',
+            'juggs',
+            'kike',
+            'kinbaku',
+            'kinkster',
+            'kinky',
+            'knobbing',
+            'leather restraint',
+            'lemon party',
+            'lolita',
+            'lovemaking',
+            'lpt request',
+            'make me come',
+            'male squirting',
+            'masturbate',
+            'masturbated',
+            'masturbating',
+            'menage a trois',
+            'milf',
+            'milfs',
+            'missionary position',
+            'motherfucker',
+            'mound of venus',
+            'mr hands',
+            'muff diver',
+            'muffdiving',
+            'nambla',
+            'nawashi',
+            'negro',
+            'neonazi',
+            'nig nog',
+            'nigga',
+            'nigger',
+            'nimphomania',
+            'nipple',
+            'not safe for',
+            'nsfw',
+            'nsfw images',
+            'nude',
+            'nudity',
+            'nutsack',
+            'nut sack',
+            'nympho',
+            'nymphomania',
+            'octopussy',
+            'omorashi',
+            'one night stand',
+            'orgasm',
+            'orgy',
+            'paedophile',
+            'panties',
+            'panty',
+            'pedobear',
+            'pedophile',
+            'pegging',
+            'pee',
+            'penis',
+            'phone sex',
+            'piss pig',
+            'pissing',
+            'pisspig',
+            'playboy',
+            'pleasure chest',
+            'pole smoker',
+            'ponyplay',
+            'poof',
+            'poop chute',
+            'poopchute',
+            'porn',
+            'pornhub',
+            'porno',
+            'pornography',
+            'prince albert',
+            'pthc',
+            'pube',
+            'pubes',
+            'pussy',
+            'pussies',
+            'queaf',
+            'queer',
+            'raghead',
+            'raging boner',
+            'rape',
+            'raping',
+            'rapist',
+            'rectum',
+            'reverse cowgirl',
+            'rimjob',
+            'rimming',
+            'rosy palm',
+            'rusty trombone',
+            's&m',
+            'sadism',
+            'scat',
+            'schlong',
+            'scissoring',
+            'semen',
+            'sex',
+            'sexo',
+            'sexy',
+            'shaved beaver',
+            'shaved pussy',
+            'shemale',
+            'shibari',
+            'shit',
+            'shota',
+            'shrimping',
+            'slanteye',
+            'slut',
+            'smut',
+            'snatch',
+            'snowballing',
+            'sodomize',
+            'sodomy',
+            'spic',
+            'spooge',
+            'spread legs',
+            'strap on',
+            'strapon',
+            'strappado',
+            'strip club',
+            'style doggy',
+            'suck',
+            'sucks',
+            'suicide girls',
+            'sultry women',
+            'swastika',
+            'swinger',
+            'tainted love',
+            'taste my',
+            'tea bagging',
+            'threesome',
+            'throating',
+            'tied up',
+            'tight white',
+            'tit',
+            'tits',
+            'titties',
+            'titty',
+            'tongue in a',
+            'topless',
+            'tosser',
+            'towelhead',
+            'tranny',
+            'tribadism',
+            'tub girl',
+            'tubgirl',
+            'tushy',
+            'twat',
+            'twink',
+            'twinkie',
+            'undressing',
+            'upskirt',
+            'urethra play',
+            'urophilia',
+            'vagina',
+            'venus mound',
+            'vibrator',
+            'violet blue',
+            'violet wand',
+            'vorarephilia',
+            'voyeur',
+            'vulva',
+            'wank',
+            'wet dream',
+            'wetback',
+            'white power',
+            'whore',
+            'women rapping',
+            'wrapping men',
+            'wrinkled starfish',
+            'xx',
+            'xxx',
+            'yaoi',
+            'yellow showers',
+            'yiffy',
+            'zoophilia',
+        ]
+
+    def normalize(self, text):
+        result = text.lower()
+        result = result.replace('_', ' ')
+        for x in string.punctuation:
+            result = result.replace(x, '')
+        result = re.sub(
+            r"e?s$", "", result)
+        return result
+
+    def filter_bad_words(self, text):
+        badWordMask = '!@#$%!@#$%^~!@%^~@#$%!@#$%^~!'
+
+        brokenStr1 = text.split()
+        for word in brokenStr1:
+            if (self.normalize(word) in self.arrBad or
+                word in self.arrBad):
+                print('***** PROFANITY WORD="%s"' % word)
+                text = text.replace(word, badWordMask[:len(word)])
+
+        if len(brokenStr1) > 1:
+            bigrams = zip(brokenStr1, brokenStr1[1:])
+            for bigram in bigrams:
+                phrase = "%s %s" % (bigram[0], bigram[1])
+                if (self.normalize(phrase) in self.arrBad or
+                    phrase in self.arrBad):
+                    print('***** PROFANITY PHRASE="%s"' % phrase)
+                    text = text.replace(bigram[0], badWordMask[:len(bigram[0])])
+                    text = text.replace(bigram[1], badWordMask[:len(bigram[1])])
+
+        if len(brokenStr1) > 2:
+            trigrams = zip(brokenStr1, brokenStr1[1:], brokenStr1[2:])
+            for trigram in trigrams:
+                phrase = "%s %s %s" % (trigram[0], trigram[1], trigram[2])
+                if (self.normalize(phrase) in self.arrBad or
+                    phrase in self.arrBad):
+                    print('***** PROFANITY PHRASE="%s"' % phrase)
+                    text = text.replace(trigram[0], badWordMask[:len(trigram[0])])
+                    text = text.replace(trigram[1], badWordMask[:len(trigram[1])])
+                    text = text.replace(trigram[2], badWordMask[:len(trigram[2])])
+        return text
+
+    def contains_bad_words(self, text):
+        brokenStr1 = text.split()
+        for word in brokenStr1:
+            if (self.normalize(word) in self.arrBad or
+                word in self.arrBad):
+                print('***** PROFANITY WORD="%s"' % word)
+                return True
+
+        if len(brokenStr1) > 1:
+            bigrams = zip(brokenStr1, brokenStr1[1:])
+            for bigram in bigrams:
+                phrase = "%s %s" % (bigram[0], bigram[1])
+                if (self.normalize(phrase) in self.arrBad or
+                    phrase in self.arrBad):
+                    print('***** PROFANITY PHRASE="%s"' % phrase)
+                    return True
+
+        if len(brokenStr1) > 2:
+            trigrams = zip(brokenStr1, brokenStr1[1:], brokenStr1[2:])
+            for trigram in trigrams:
+                phrase = "%s %s %s" % (trigram[0], trigram[1], trigram[2])
+                if (self.normalize(phrase) in self.arrBad or
+                    phrase in self.arrBad):
+                    print('***** PROFANITY PHRASE="%s"' % phrase)
+                    return True
+
+        return False
+
+#x = profanity_filter()
+#print(x.filter_bad_words("Fuck this auto erotic shit, it's not safe for work."))
+#print(x.contains_bad_words("cream pie their daughter."))
+#print(x.contains_bad_words("If you tell someone your penis is 6 inches it's pretty believable.  If you say it's half a foot no one will believe you."))
+#print(x.normalize("dickes"));
diff --git a/reddit_renderer.py b/reddit_renderer.py
new file mode 100644 (file)
index 0000000..05b641d
--- /dev/null
@@ -0,0 +1,156 @@
+import constants
+import file_writer
+import grab_bag
+import renderer
+import secrets
+import page_builder
+import praw
+import profanity_filter
+import random
+
+class reddit_renderer(renderer.debuggable_abstaining_renderer):
+    """A renderer to pull text content from reddit."""
+
+    def __init__(self, name_to_timeout_dict, subreddit_list, min_votes, font_size):
+        super(reddit_renderer, self).__init__(name_to_timeout_dict, True)
+        self.subreddit_list = subreddit_list
+        self.praw = praw.Reddit(client_id=secrets.reddit_client_id,
+                                client_secret=secrets.reddit_client_secret,
+                                user_agent="Yoshiatsu's Kitchen Kiosk by u/yoshiatsu ver 0.1, See http://wannabe.guru.org/svn/kiosk/trunk/reddit_renderer.py")
+        self.min_votes = min_votes
+        self.font_size = font_size
+        self.messages = grab_bag.grab_bag()
+        self.filter = profanity_filter.profanity_filter()
+        self.deduper = set()
+
+    def debug_prefix(self):
+        x = ""
+        for subreddit in self.subreddit_list:
+            x += ("%s " % subreddit)
+        return "reddit(%s)" % x.strip()
+
+    def periodic_render(self, key):
+        self.debug_print('called for "%s"' % key)
+        if key == "Scrape":
+            return self.scrape_reddit()
+        elif key == "Shuffle":
+            return self.shuffle_messages()
+        else:
+            raise error('Unexpected operation')
+
+    def append_message(self, messages):
+        for msg in messages:
+            if (not self.filter.contains_bad_words(msg.title)
+                and msg.ups > self.min_votes
+                and not msg.title in self.deduper):
+                try:
+                    self.deduper.add(msg.title)
+                    content = "%d" % msg.ups
+                    if (msg.thumbnail != "self" and
+                        msg.thumbnail != "default" and
+                        msg.thumbnail != ""):
+                        content = '<IMG SRC="%s">' % msg.thumbnail
+                    x = u"""
+<TABLE STYLE="font-size:%dpt;">
+  <TR>
+    <!-- The number of upvotes or item image: -->
+    <TD STYLE="font-weight:900; padding:8px;">
+      <FONT COLOR="maroon" SIZE=40>%s</FONT>
+    </TD>
+
+    <!-- The content and author: -->
+    <TD>
+      <B>%s</B><BR><FONT COLOR=#bbbbbb>(%s)</FONT>
+    </TD>
+  </TR>
+</TABLE>""" % (self.font_size, content, msg.title, msg.author)
+                    self.messages.add(x.encode('utf8'))
+                except:
+                    self.debug_print('Unexpected exception, skipping message.')
+            else:
+                self.debug_print('skipped message "%s" for profanity or low score' % (
+                    msg.title.encode('utf8')))
+
+    def scrape_reddit(self):
+        self.deduper.clear()
+        self.messages.clear()
+        for subreddit in self.subreddit_list:
+            try:
+                msg = self.praw.subreddit(subreddit).hot()
+                self.append_message(msg)
+            except:
+                pass
+            try:
+                msg = self.praw.subreddit(subreddit).new()
+                self.append_message(msg)
+            except:
+                pass
+            try:
+                msg = self.praw.subreddit(subreddit).rising()
+                self.append_message(msg)
+            except:
+                pass
+            try:
+                msg = self.praw.subreddit(subreddit).controversial('week')
+                self.append_message(msg)
+            except:
+                pass
+            try:
+                msg = self.praw.subreddit(subreddit).top('day')
+                self.append_message(msg)
+            except:
+                pass
+            self.debug_print("There are now %d messages" % self.messages.size())
+        return True
+
+    def shuffle_messages(self):
+        layout = page_builder.page_builder()
+        layout.set_layout(page_builder.page_builder.LAYOUT_FOUR_ITEMS)
+        x = ""
+        for subreddit in self.subreddit_list:
+            x += ("%s " % subreddit)
+        if len(x) > 30:
+            if "SeaWA" in x:
+                x = "[local interests]"
+            else:
+                x = "Unknown, fixme"
+        layout.set_title("Reddit /r/%s" % x.strip())
+        subset = self.messages.subset(4)
+        if subset is None:
+            self.debug_print("Not enough messages to pick from.")
+            return False
+        for msg in subset:
+            layout.add_item(msg)
+        f = file_writer.file_writer("%s_4_10800.html" % self.subreddit_list[0])
+        layout.render_html(f)
+        f.close()
+        return True
+
+class til_reddit_renderer(reddit_renderer):
+    def __init__(self, name_to_timeout_dict):
+        super(til_reddit_renderer, self).__init__(
+            name_to_timeout_dict, ["todayilearned"], 200, 20)
+
+class quotes_reddit_renderer(reddit_renderer):
+    def __init__(self, name_to_timeout_dict):
+        super(quotes_reddit_renderer, self).__init__(
+            name_to_timeout_dict, ["quotes"], 200, 20)
+
+class showerthoughts_reddit_renderer(reddit_renderer):
+    def __init__(self, name_to_timeout_dict):
+        super(showerthoughts_reddit_renderer, self).__init__(
+            name_to_timeout_dict, ["showerthoughts"], 350, 24)
+
+class seattle_reddit_renderer(reddit_renderer):
+    def __init__(self, name_to_timeout_dict):
+        super(seattle_reddit_renderer, self).__init__(
+            name_to_timeout_dict, ["seattle","seattleWA","SeaWA","bellevue","kirkland", "CoronavirusWA"], 50, 24)
+
+class lifeprotips_reddit_renderer(reddit_renderer):
+    def __init__(self, name_to_timeout_dict):
+        super(lifeprotips_reddit_renderer, self).__init__(
+            name_to_timeout_dict, ["lifeprotips"], 100, 24)
+
+#x = reddit_renderer({"Test", 1234}, ["seattle","bellevue"], 50, 24)
+#x.periodic_render("Scrape")
+#x.periodic_render("Shuffle")
diff --git a/renderer.py b/renderer.py
new file mode 100644 (file)
index 0000000..721b374
--- /dev/null
@@ -0,0 +1,77 @@
+import time
+from datetime import datetime
+
+class renderer(object):
+    """Base class for something that can render."""
+    def render(self):
+        pass
+
+class abstaining_renderer(renderer):
+    """A renderer that doesn't do it all the time."""
+    def __init__(self, name_to_timeout_dict):
+        self.name_to_timeout_dict = name_to_timeout_dict;
+        self.last_runs = {}
+        for key in name_to_timeout_dict:
+            self.last_runs[key] = 0
+
+    def should_render(self, keys_to_skip):
+        now = time.time()
+        for key in self.name_to_timeout_dict:
+            if key in keys_to_skip:
+                continue
+            if (now - self.last_runs[key]) > self.name_to_timeout_dict[key]:
+                return key
+        return None
+
+    def render(self):
+        tries = {}
+        keys_to_skip = set()
+        while True:
+            key = self.should_render(keys_to_skip)
+            if key == None:
+                break
+            if key in tries:
+                tries[key] += 1
+            else:
+                tries[key] = 0
+
+            if tries[key] > 5:
+                print('Too many retries for "%s", giving up for now' % key)
+                keys_to_skip.add(key)
+            else:
+                msg = 'renderer: periodic render event for "%s"' % key
+                if (tries[key] > 1):
+                    msg = msg + " (try %d)" % tries[key]
+                print(msg)
+                if (self.periodic_render(key)):
+                    self.last_runs[key] = time.time()
+
+    def periodic_render(self, key):
+        pass
+
+class debuggable_abstaining_renderer(abstaining_renderer):
+    def __init__(self, name_to_timeout_dict, debug):
+        super(debuggable_abstaining_renderer, self).__init__(name_to_timeout_dict);
+        self.debug = debug
+
+    def debug_prefix(self):
+        return "none"
+
+    def being_debugged(self):
+        return self.debug
+
+    def debug_print(self, template, *args):
+        try:
+            if self.being_debugged():
+                if args:
+                    msg = template.format(args)
+                else:
+                    msg = template
+
+                # current date and time
+                now = datetime.now()
+                timestamp = now.strftime("%d-%b-%Y (%H:%M:%S.%f)")
+                print "%s(%s): %s" % (self.debug_prefix(), timestamp, msg)
+        except Exception as e:
+            print "Exception in debug_print!"
+            print e
diff --git a/renderer_catalog.py b/renderer_catalog.py
new file mode 100644 (file)
index 0000000..c789aaa
--- /dev/null
@@ -0,0 +1,165 @@
+import bellevue_reporter_rss_renderer
+import constants
+import cnn_rss_renderer
+import gdata_oauth
+import gcal_renderer
+import gkeep_renderer
+import health_renderer
+import local_photos_mirror_renderer
+import mynorthwest_rss_renderer
+import myq_renderer
+import pollen_renderer
+import reddit_renderer
+import renderer
+import seattletimes_rss_renderer
+import secrets
+import stevens_renderer
+import stranger_renderer
+import stock_renderer
+import twitter_renderer
+import weather_renderer
+import wsj_rss_renderer
+
+oauth = gdata_oauth.OAuth(secrets.google_client_id,
+                          secrets.google_client_secret)
+if not oauth.has_token():
+    user_code = oauth.get_user_code()
+    print('------------------------------------------------------------')
+    print('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
+        oauth.verification_url, user_code))
+    oauth.get_new_token()
+
+seconds = 1
+minutes = 60
+hours = constants.seconds_per_hour
+always = seconds * 1
+
+# Note, the 1s updates don't really update every second; there's a max
+# frequency in the renderer thread of ~once a minute.  It just means that
+# everytime it check these will be stale and happen.
+__registry = [
+                 stranger_renderer.stranger_events_renderer(
+                     {"Fetch Events" : (hours * 12),
+                      "Shuffle Events" : (always)}),
+#                 pollen_renderer.pollen_count_renderer(
+#                     {"Poll" : (hours * 1)}),
+                 myq_renderer.garage_door_renderer(
+                     {"Poll MyQ" : (minutes * 5),
+                      "Update Page" : (minutes * 5)}),
+                 bellevue_reporter_rss_renderer.bellevue_reporter_rss_renderer(
+                     {"Fetch News" : (hours * 1),
+                      "Shuffle News" : (always)},
+                     "www.bellevuereporter.com",
+                     [ "/feed/" ],
+                     "Bellevue Reporter" ),
+                 mynorthwest_rss_renderer.mynorthwest_rss_renderer(
+                     {"Fetch News" : (hours * 1),
+                      "Shuffle News" : (always)},
+                     "mynorthwest.com",
+                     [ "/feed/" ],
+                     "MyNorthwest News" ),
+                 cnn_rss_renderer.cnn_rss_renderer(
+                     {"Fetch News" : (hours * 1),
+                      "Shuffle News" : (always)},
+                     "rss.cnn.com",
+                     [ "/rss/money_latest.rss",
+                       "/rss/money_mostpopular.rss",
+                       "/rss/money_news_economy.rss",
+                       "/rss/money_news_companies.rss" ],
+                     "CNNMoney" ),
+                 cnn_rss_renderer.cnn_rss_renderer(
+                     {"Fetch News" : (hours * 1),
+                      "Shuffle News" : (always)},
+                     "rss.cnn.com",
+                     [ "/rss/cnn_tech.rss",
+                       "/rss/money_technology.rss" ],
+                     "CNNTechnology" ),
+                 cnn_rss_renderer.cnn_rss_renderer(
+                     {"Fetch News" : (hours * 1),
+                      "Shuffle News" : (always)},
+                     "rss.cnn.com",
+                     [ "/rss/cnn_topstories.rss",
+                       "/rss/cnn_world.rss",
+                       "/rss/cnn_us.rss" ],
+                     "CNNNews" ),
+                 wsj_rss_renderer.wsj_rss_renderer(
+                     {"Fetch News" : (hours * 1),
+                      "Shuffle News" : (always)},
+                     "feeds.a.dj.com",
+                     [ "/rss/RSSWorldNews.xml" ],
+                     "WSJNews" ),
+                 wsj_rss_renderer.wsj_rss_renderer(
+                     {"Fetch News" : (hours * 1),
+                      "Shuffle News" : (always)},
+                     "feeds.a.dj.com",
+                     [ "/rss/RSSMarketsMain.xml",
+                       "/rss/WSJcomUSBusiness.xml"],
+                     "WSJBusiness" ),
+                 health_renderer.periodic_health_renderer(
+                     {"Update Perioidic Job Health" : (seconds * 45)}),
+                 stock_renderer.stock_quote_renderer(
+                     {"Update Prices" : (hours * 1)},
+                     [ "MSFT",
+                       "SPY",
+                       "GBTC",
+                       "IEMG",
+                       "OPTAX",
+                       "SPAB",
+                       "SPHD",
+                       "SGOL",
+                       "VDC",
+                       "VYMI",
+                       "VNQ",
+                       "VNQI" ]),
+                 stevens_renderer.stevens_pass_conditions_renderer(
+                     {"Fetch Pass Conditions" : (hours * 1)},
+                     "www.wsdot.com",
+                     [ "/traffic/rssfeeds/stevens/Default.aspx" ]),
+                 seattletimes_rss_renderer.seattletimes_rss_renderer(
+                     {"Fetch News" : (hours * 1),
+                      "Shuffle News" : (always)},
+                     "www.seattletimes.com",
+                     [ "/pacific-nw-magazine/feed/",
+                       "/life/feed/",
+                       "/outdoors/feed/" ],
+                     "Seattle Times Segments"),
+                 weather_renderer.weather_renderer(
+                     {"Fetch Weather (Bellevue)": (hours * 4)},
+                     "home"),
+                 weather_renderer.weather_renderer(
+                     {"Fetch Weather (Stevens)": (hours * 4)},
+                     "stevens"),
+                 weather_renderer.weather_renderer(
+                     {"Fetch Weather (Telma)" : (hours * 4)},
+                     "telma"),
+                 local_photos_mirror_renderer.local_photos_mirror_renderer(
+                     {"Index Photos": (hours * 24),
+                      "Choose Photo": (always)}),
+                 gkeep_renderer.gkeep_renderer(
+                     {"Update": (minutes * 10)}),
+                 gcal_renderer.gcal_renderer(
+                     {"Render Upcoming Events": (hours * 2),
+                      "Look For Triggered Events": (always)},
+                     oauth),
+                 reddit_renderer.showerthoughts_reddit_renderer(
+                     {"Scrape": (hours * 6),
+                      "Shuffle": (always)} ),
+                 reddit_renderer.til_reddit_renderer(
+                     {"Scrape": (hours * 6),
+                      "Shuffle": (always)} ),
+                 reddit_renderer.seattle_reddit_renderer(
+                     {"Scrape": (hours * 6),
+                      "Shuffle": (always)}),
+                 reddit_renderer.quotes_reddit_renderer(
+                     {"Scrape": (hours * 6),
+                      "Shuffle": (always)}),
+                 reddit_renderer.lifeprotips_reddit_renderer(
+                     {"Scrape": (hours * 6),
+                      "Shuffle": (always)}),
+                 twitter_renderer.twitter_renderer(
+                     {"Fetch Tweets": (minutes * 15),
+                      "Shuffle Tweets": (always)})
+]
+
+def get_renderers():
+    return __registry
diff --git a/reuters_rss_renderer.py b/reuters_rss_renderer.py
new file mode 100644 (file)
index 0000000..d78102f
--- /dev/null
@@ -0,0 +1,129 @@
+import constants
+import file_writer
+import grab_bag
+import renderer
+import datetime
+import httplib
+import page_builder
+import profanity_filter
+import random
+import re
+import sets
+import xml.etree.ElementTree as ET
+
+class reuters_rss_renderer(renderer.debuggable_abstaining_renderer):
+    def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page):
+        super(reuters_rss_renderer, self).__init__(name_to_timeout_dict, False)
+        self.debug = 1
+        self.feed_site = feed_site
+        self.feed_uris = feed_uris
+        self.page = page
+        self.news = grab_bag.grab_bag()
+        self.details = grab_bag.grab_bag()
+        self.filter = profanity_filter.profanity_filter()
+
+    def debug_prefix(self):
+        return "reuters(%s)" % (self.page)
+
+    def periodic_render(self, key):
+        if key == "Fetch News":
+            return self.fetch_news()
+        elif key == "Shuffle News":
+            return self.shuffle_news()
+        else:
+            raise error('Unexpected operation')
+
+    def shuffle_news(self):
+        headlines = page_builder.page_builder()
+        headlines.set_layout(page_builder.page_builder.LAYOUT_FOUR_ITEMS)
+        headlines.set_title("%s" % self.page)
+        subset = self.news.subset(4)
+        if subset is None:
+            self.debug_print("Not enough messages to choose from.")
+            return False
+        for msg in subset:
+            headlines.add_item(msg)
+        f = file_writer.file_writer('reuters-%s_4_none.html' % self.page)
+        headlines.render_html(f)
+        f.close()
+
+        details = page_builder.page_builder()
+        details.set_layout(page_builder.page_builder.LAYOUT_ONE_ITEM)
+        details.set_title("%s" % self.page)
+        subset = self.details.subset(1)
+        if subset is None:
+            self.debug_print("Not enough details to choose from.");
+            return False
+        for msg in subset:
+            blurb = msg
+            blurb += "</TD>\n"
+            details.add_item(blurb)
+        g = file_writer.file_writer('reuters-details-%s_6_none.html' % self.page)
+        details.render_html(g)
+        g.close()
+        return True
+
+    def fetch_news(self):
+        count = 0
+        self.news.clear()
+        self.details.clear()
+        oldest = datetime.datetime.now() - datetime.timedelta(14)
+
+        for uri in self.feed_uris:
+            self.conn = httplib.HTTPConnection(self.feed_site)
+            self.conn.request(
+                "GET",
+                uri,
+                None,
+                {"Accept-Charset": "utf-8"})
+            response = self.conn.getresponse()
+            if response.status != 200:
+                print("%s: RSS fetch_news error, response: %d" % (self.page,
+                                                                  response.status))
+                self.debug_print(response.read())
+                return False
+
+            rss = ET.fromstring(response.read())
+            channel = rss[0]
+            for item in channel.getchildren():
+                title = item.findtext('title')
+                if (title is None or
+                    "euters" in title or
+                    title == "Editor's Choice" or
+                    self.filter.contains_bad_words(title)):
+                    continue
+                pubdate = item.findtext('pubDate')
+                image = item.findtext('image')
+                descr = item.findtext('description')
+                if descr is not None:
+                    descr = re.sub('<[^>]+>', '', descr)
+
+                blurb = """<DIV style="padding:8px;
+                                       font-size:34pt;
+                                       -webkit-column-break-inside:avoid;">"""
+                if image is not None:
+                    blurb += '<IMG SRC=\"%s\" ALIGN=LEFT HEIGHT=115" style="padding:8px;">\n' % image
+                blurb += '<P><B>%s</B>' % title
+
+                if pubdate != None:
+                    # Thu, 04 Jun 2015 08:16:35 GMT|-0400
+                    pubdate = pubdate.rsplit(' ', 1)[0]
+                    dt = datetime.datetime.strptime(pubdate,
+                                                    '%a, %d %b %Y %H:%M:%S')
+                    if dt < oldest:
+                        continue
+                    blurb += dt.strftime(" <FONT COLOR=#bbbbbb>(%a&nbsp;%b&nbsp;%d)</FONT>")
+
+                if descr is not None:
+                    longblurb = blurb
+                    longblurb += "<BR>"
+                    longblurb += descr
+                    longblurb += "</DIV>"
+                    longblurb = longblurb.replace("font-size:34pt",
+                                                  "font-size:44pt")
+
+                self.details.add(longblurb.encode('utf8'))
+                blurb += "</DIV>"
+                self.news.add(blurb.encode('utf8'))
+                count += 1
+        return count > 0
diff --git a/seattletimes_rss_renderer.py b/seattletimes_rss_renderer.py
new file mode 100644 (file)
index 0000000..906e00e
--- /dev/null
@@ -0,0 +1,90 @@
+import datetime
+import generic_news_rss_renderer as gnrss
+import sets
+
+class seattletimes_rss_renderer(gnrss.generic_news_rss_renderer):
+    interesting_categories = sets.ImmutableSet([
+        'Nation',
+        'World',
+        'Life',
+        'Technology'
+        'Local News',
+        'Food',
+        'Drink',
+        'Today File',
+        'Seahawks',
+        'Oddities',
+        'Packfic NW',
+        'Home',
+        'Garden',
+        'Travel',
+        'Outdoors',
+    ])
+
+    def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
+        super(seattletimes_rss_renderer, self).__init__(
+            name_to_timeout_dict,
+            feed_site,
+            feed_uris,
+            page_title)
+        self.oldest = datetime.datetime.now() - datetime.timedelta(14)
+        self.debug_print("oldest story we'll keep: %s" % self.oldest)
+
+    def debug_prefix(self):
+        return "seattletimes"
+
+    def get_headlines_page_prefix(self):
+        return "seattletimes-nonnews"
+
+    def get_details_page_prefix(self):
+        return "seattletimes-details-nonnews"
+
+    def should_use_https(self):
+        return True
+
+    def item_is_interesting_for_headlines(self, title, description, item):
+        if item.tag != "item":
+            self.debug_print("Item.tag isn't item?!")
+            return False
+
+        details = {}
+        for detail in item.getchildren():
+            self.debug_print("detail %s => %s (%s)" % (detail.tag,
+                                                       detail.attrib,
+                                                       detail.text))
+            if detail.text != None:
+                details[detail.tag] = detail.text
+        if "category" not in details:
+            self.debug_print("No category in details?!")
+            self.debug_print(details)
+            return False
+
+        interesting = False
+        for x in seattletimes_rss_renderer.interesting_categories:
+            if x in details["category"]:
+                self.debug_print("%s looks like a good category." % x)
+                interesting = True
+        if not interesting:
+            return False
+
+        if 'enclosure' in details:
+            if 'pubDate' in details:
+                x = details['pubDate']
+                x = x.rsplit(' ', 1)[0]
+                # Fri, 13 Nov 2015 10:07:00
+                dt = datetime.datetime.strptime(x, '%a, %d %b %Y %H:%M:%S')
+                if dt < self.oldest:
+                    self.debug_print("%s is too old." % (
+                        details["pubDate"]))
+                    return False
+        return True
+
+    def item_is_interesting_for_article(self, title, description, item):
+        return len(description) >= 65
+
+#x = seattletimes_rss_renderer({"Test", 123},
+#                              "www.seattletimes.com",
+#                              [ "/life/feed/" ],
+#                              "nonnews")
+#x.periodic_render("Fetch News")
+#x.periodic_render("Shuffle News")
diff --git a/secrets.py b/secrets.py
new file mode 100644 (file)
index 0000000..b25f1fa
--- /dev/null
@@ -0,0 +1,22 @@
+#!/usr/local/bin/python
+
+# This can be generated at wunderground.com.
+wunderground_key = "<your key here>"
+
+# This is your google account and secrets from the pantheon (GCP)
+# console for your app.
+google_username = "<you>@gmail.com"
+google_key = "<your key here>"
+google_client_id = '<your app here>.apps.googleusercontent.com'
+google_client_secret = '<your secret here>'
+
+# These are from your myq mobile app login.
+myq_username = "<you>@gmail.com"
+myq_password = "<your password here>"
+myq_appid = "<your appid here>"
+
+# These can be generated on the developer console at Twitter.
+twitter_consumer_key = "<your twitter app consumer key here>"
+twitter_consumer_secret = "<you guessed it>"
+twitter_access_token = "<your twitter app access token here>"
+twitter_access_token_secret = "<your twitter app access token secret>"
diff --git a/stdin_trigger.py b/stdin_trigger.py
new file mode 100644 (file)
index 0000000..584f2b1
--- /dev/null
@@ -0,0 +1,24 @@
+import select
+import sys
+import trigger
+import logger
+
+log = logger.logger(__name__).get()
+
+class stdin_trigger(trigger.trigger):
+    def get_triggered_page_list(self):
+        count = 0
+        while True:
+            r, w, x = select.select([sys.stdin], [], [], 0)
+            if len(r) == 0: break
+
+            count += 1
+            if count > 10: break
+
+            for fh in r:
+                if fh == sys.stdin:
+                    message = sys.stdin.readline().rstrip()
+                    if message == "": break
+
+                    log.info("***** stdin trigger saw: \"%s\" *****" % message)
+        return None
diff --git a/stevens_renderer.py b/stevens_renderer.py
new file mode 100644 (file)
index 0000000..ab904ce
--- /dev/null
@@ -0,0 +1,45 @@
+import renderer
+import file_writer
+import httplib
+import xml.etree.ElementTree as ET
+
+class stevens_pass_conditions_renderer(renderer.debuggable_abstaining_renderer):
+    def __init__(self, name_to_timeout_dict, feed_site, feed_uris):
+        super(stevens_pass_conditions_renderer, self).__init__(
+            name_to_timeout_dict, False)
+        self.feed_site = feed_site
+        self.feed_uris = feed_uris
+
+    def debug_prefix(self):
+        return "stevens"
+
+    def periodic_render(self, key):
+        f = file_writer.file_writer('stevens-conditions_1_none.html')
+        for uri in self.feed_uris:
+            self.conn = httplib.HTTPSConnection(self.feed_site)
+            self.conn.request(
+                "GET",
+                uri,
+                None,
+                {"Accept-Charset": "utf-8"})
+            response = self.conn.getresponse()
+            if response.status == 200:
+                raw = response.read()
+                rss = ET.fromstring(raw)
+                channel = rss[0]
+                for item in channel.getchildren():
+                    if item.tag == "title":
+                        f.write("<h1>%s</h1><hr>" % item.text)
+                        f.write('<IMG WIDTH=512 ALIGN=RIGHT HEIGHT=382 SRC="https://images.wsdot.wa.gov/nc/002vc06430.jpg?t=637059938785646824" style="padding:8px;">')
+                    elif item.tag == "item":
+                        for x in item.getchildren():
+                            if x.tag == "description":
+                                text = x.text
+                                text = text.replace("<strong>Stevens Pass US2</strong><br/>", "")
+                                text = text.replace("<br/><br/>", "<BR>")
+                                text = text.replace("<strong>Elevation Meters:</strong>1238<BR>", "")
+                                f.write('<P>\n%s\n' % text)
+                f.close()
+                return True
+        f.close()
+        return False
diff --git a/stock_renderer.py b/stock_renderer.py
new file mode 100644 (file)
index 0000000..5a9be49
--- /dev/null
@@ -0,0 +1,161 @@
+from bs4 import BeautifulSoup
+from threading import Thread
+import datetime
+import file_writer
+import json
+import re
+import renderer
+import random
+import secrets
+import time
+import urllib2
+
+class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
+    # format exchange:symbol
+    def __init__(self, name_to_timeout_dict, symbols):
+        super(stock_quote_renderer, self).__init__(name_to_timeout_dict, False)
+        self.symbols = symbols
+        self.prefix = "https://www.alphavantage.co/query?"
+        self.thread = None
+
+    def debug_prefix(self):
+        return "stock"
+
+    def get_random_key(self):
+        return random.choice(secrets.alphavantage_keys)
+
+    def periodic_render(self, key):
+        now = datetime.datetime.now()
+        if (now.hour < (9 - 3) or
+            now.hour >= (17 - 3) or
+            datetime.datetime.today().weekday() > 4):
+            self.debug_print("The stock market is closed so not re-rendering")
+            return True
+
+        if (self.thread is None or not self.thread.is_alive()):
+            self.debug_print("Spinning up a background thread...")
+            self.thread = Thread(target = self.thread_internal_render, args=())
+            self.thread.start()
+        return True
+
+    def thread_internal_render(self):
+        symbols_finished = 0
+        f = file_writer.file_writer('stock_3_86400.html')
+        f.write("<H1>Stock Quotes</H1><HR>")
+        f.write("<TABLE WIDTH=99%>")
+        for symbol in self.symbols:
+#            print "---------- Working on %s\n" % symbol
+
+            # https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=5min&apikey=<key>
+
+            # https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=MSFT&apikey=<key>
+
+            attempts = 0
+            cooked = ""
+            while True:
+                key = self.get_random_key()
+                url = self.prefix + "function=GLOBAL_QUOTE&symbol=%s&apikey=%s" % (symbol, key)
+                raw = urllib2.urlopen(url).read()
+                cooked = json.loads(raw)
+                if u'Global Quote' not in cooked:
+#                    print "%s\n" % cooked
+                    print "Failure %d, sleep %d sec...\n" % (attempts + 1,
+                                                             2 ** attempts)
+                    time.sleep(2 ** attempts)
+                    attempts += 1
+                    if attempts > 10: # we'll wait up to 512 seconds per symbol
+                        break
+                else:
+                    break
+
+            # These fuckers...
+            if u'Global Quote' not in cooked:
+                print "Can't get data for symbol %s: %s\n" % (
+                    symbol, raw)
+                continue
+            cooked = cooked[u'Global Quote']
+
+            # {
+            #   u'Global Quote':
+            #     {
+            #       u'01. symbol': u'MSFT',
+            #       u'02. open': u'151.2900',
+            #       u'03. high': u'151.8900',
+            #       u'04. low': u'150.7650',
+            #       u'05. price': u'151.1300',
+            #       u'06. volume': u'16443559',
+            #       u'07. latest trading day': u'2019-12-10',
+            #       u'08. previous close': u'151.3600',
+            #       u'09. change': u'-0.2300'
+            #       u'10. change percent': u'-0.1520%',
+            #     }
+            # }
+
+            price = "?????"
+            if u'05. price' in cooked:
+                price = cooked[u'05. price']
+                price = price[:-2]
+
+            percent_change = "?????"
+            if u'10. change percent' in cooked:
+                percent_change = cooked[u'10. change percent']
+                if not '-' in percent_change:
+                    percent_change = "+" + percent_change
+
+            change = "?????"
+            cell_color = "#bbbbbb"
+            if u'09. change' in cooked:
+                change = cooked[u'09. change']
+                if "-" in change:
+                    cell_color = "#b00000"
+                else:
+                    cell_color = "#009000"
+                change = change[:-2]
+
+            if symbols_finished % 4 == 0:
+                if (symbols_finished > 0):
+                    f.write("</TR>")
+                f.write("<TR>")
+            symbols_finished += 1
+
+            f.write("""
+<TD WIDTH=20%% HEIGHT=150 BGCOLOR="%s">
+  <!-- Container -->
+  <DIV style="position:relative;
+              height:150px;">
+    <!-- Symbol -->
+    <DIV style="position:absolute;
+                bottom:50;
+                right:-20;
+                -webkit-transform:rotate(-90deg);
+                font-size:28pt;
+                font-family: helvetica, arial, sans-serif;
+                font-weight:900;
+                -webkit-text-stroke: 2px black;
+                color: #ddd">
+      %s
+    </DIV>
+    <!-- Current price, Change today and percent change today -->
+    <DIV style="position:absolute;
+                left:10;
+                top:20;
+                font-size:23pt;
+                font-family: helvetica, arial, sans-serif;
+                width:70%%">
+            $%s<BR>
+            <I>(%s)</I><BR>
+            <B>$%s</B>
+    </DIV>
+  </DIV>
+</TD>""" % (cell_color,
+            symbol,
+            price,
+            percent_change,
+            change))
+        f.write("</TR></TABLE>")
+        f.close()
+        return True
+
+#x = stock_quote_renderer({}, ["MSFT", "GOOG", "GOOGL", "OPTAX", "VNQ"])
+#x.periodic_render(None)
+#x.periodic_render(None)
diff --git a/stranger_renderer.py b/stranger_renderer.py
new file mode 100644 (file)
index 0000000..c0389f6
--- /dev/null
@@ -0,0 +1,183 @@
+from bs4 import BeautifulSoup
+import datetime
+import file_writer
+import grab_bag
+import httplib
+import page_builder
+import profanity_filter
+import random
+import re
+import renderer
+import sets
+
+class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
+    def __init__(self, name_to_timeout_dict):
+        super(stranger_events_renderer, self).__init__(name_to_timeout_dict, True)
+        self.feed_site = "everout.thestranger.com"
+        self.events = grab_bag.grab_bag()
+
+    def debug_prefix(self):
+        return "stranger"
+
+    def periodic_render(self, key):
+        self.debug_print("called for action %s" % key)
+        if key == "Fetch Events":
+            return self.fetch_events()
+        elif key == "Shuffle Events":
+            return self.shuffle_events()
+        else:
+            raise error("Unknown operaiton")
+
+    def get_style(self):
+        return """
+<STYLE>
+.calendar-post {
+  line-height: 96%;
+}
+.calendar-post-title a {
+  text-decoration: none;
+  color:black;
+  font-size:125%
+  line-height:104%;
+}
+.calendar-post-date {
+}
+.calendar-post-category {
+}
+.calendar-post-location {
+}
+.calendar-post-price {
+}
+.calendar-touch-link {
+}
+.calendar-category {
+  background-color:lightyellow;
+  color:black;
+  border:none;
+  font-size:50%;
+  padding:1px;
+}
+.calendar-post-price-mobile {
+  visibility: hidden;
+}
+.img-responsive {
+  float: left;
+  margin: 10px 10px 10px 10px;
+}
+</STYLE>"""
+
+    def shuffle_events(self):
+        layout = page_builder.page_builder()
+        layout.set_layout(page_builder.page_builder.LAYOUT_FOUR_ITEMS)
+        layout.set_title("Stranger Events")
+        layout.set_style(self.get_style())
+        subset = self.events.subset(4)
+        if subset is None:
+            self.debug_print("Not enough events to build page.")
+            return False
+
+        for msg in subset:
+            layout.add_item(msg)
+        f = file_writer.file_writer('stranger-events_2_none.html')
+        layout.render_html(f)
+        f.close()
+        return True
+
+    def fetch_events(self):
+        self.events.clear()
+        feed_uris = [
+            "/events/?page=1&picks=true",
+            "/events/?page=2&picks=true",
+            "/events/?page=3&picks=true",
+        ]
+        now = datetime.datetime.now()
+        ts = now + datetime.timedelta(1)
+        tomorrow = datetime.datetime.strftime(ts, "%Y-%m-%d")
+        feed_uris.append("/events/?start-date=%s&picks=true" % tomorrow)
+        delta = 5 - now.weekday()
+        if delta <= 0:
+            delta += 7
+        if delta > 1:
+            ts = now + datetime.timedelta(delta)
+            next_sat = datetime.datetime.strftime(ts, "%Y-%m-%d")
+            feed_uris.append("/events/?start-date=%s&page=1&picks=true" % next_sat)
+            feed_uris.append("/events/?start-date=%s&page=2&picks=true" % next_sat)
+        delta += 1
+        if delta > 1:
+            ts = now + datetime.timedelta(delta)
+            next_sun = datetime.datetime.strftime(ts, "%Y-%m-%d")
+            feed_uris.append("/events/?start-date=%s&page=1&picks=true" % next_sun)
+            feed_uris.append("/events/?start-date=%s&page=2&picks=true" % next_sun)
+
+        for uri in feed_uris:
+            self.debug_print("fetching '%s'" % uri)
+            self.conn = httplib.HTTPSConnection(self.feed_site)
+            self.conn.request(
+                "GET",
+                uri,
+                None,
+                {"Accept-Charset": "utf-8"})
+            response = self.conn.getresponse()
+            if response.status != 200:
+                print("stranger: Failed, status %d" % (response.status))
+                continue
+
+            raw = response.read()
+            soup = BeautifulSoup(raw, "html.parser")
+            filter = profanity_filter.profanity_filter()
+            for x in soup.find_all('div', class_='row event list-item mb-3 py-3'):
+                text = x.get_text();
+                if (filter.contains_bad_words(text)):
+                    continue
+
+#          <div class="row event list-item mb-3 py-3">
+#          <div class="col-12">
+#                <a class="category-tag" href="?category=on-demand">On Demand</a>
+#          </div> // col-12
+#          <div class="col-md-3 order-1 order-md-3">
+#              <a href="https://everout.thestranger.com/events/spliff-2020-on-demand/e24125/">
+#                  <img class="img-responsive" src="https://d2i729k8wyri5w.cloudfront.net/eyJidWNrZXQiOiAiZXZlcm91dC1pbWFnZXMtcHJvZHVjdGlvbiIsICJrZXkiOiAiaW1hZ2UtMTU5MTA2NTQxODU5NzA5My1vcmlnaW5hbC1sb2dvLmpwZWciLCAiZWRpdHMiOiB7InJlc2l6ZSI6IHsiZml0IjogImNvdmVyIiwgIndpZHRoIjogNDAwLCAiaGVpZ2h0IjogMzAwfX19">
+#              </a>
+#          </div> // col-md-3 order-1 order-md-3
+#          <div class="col-md-6 order-2 order-md-1 event-details">
+#             <h3 class="mb-0 event-title">
+#                 <a href="https://everout.thestranger.com/events/spliff-2020-on-demand/e24125/"><span class="staff-pick fas fa-star" aria-hidden="true"></span></a>
+#                 <a href="https://everout.thestranger.com/events/spliff-2020-on-demand/e24125/">
+#                 <span class="title-link">SPLIFF 2020 - On Demand</span>
+#                 </a>
+#             </h3>
+#             <div class="event-date">
+#               Every day
+#             </div> // event-date
+#             <div class="event-time">
+#             </div> // event-time
+#          </div> // col-md-6 order-2 order-md-1 event-details
+#          <div class="col-md-3 order-3 order-md-2 location-column">
+#            <div class="location-name">
+#              <i class="fad fa-map-marker-alt"></i> <a href="https://everout.thestranger.com/locations/the-stranger-online/l27660/">The Stranger (Online)</a>
+#            </div> // location-name
+#            <div class="location-region">
+#            </div> // location-region
+#            <ul class="event-tags">
+#              <li>$10 - $20</li>
+#            </ul>
+#          </div> // col-md-3 order-3 order-md-2 location-colum
+#        </div> // row event list-item mb-3 py-3
+
+                raw = unicode(x)
+                raw = raw.replace('src="/',
+                                  'align="left" src="https://www.thestranger.com/')
+                raw = raw.replace('href="/',
+                                  'href="https://www.thestranger.com/')
+                raw = raw.replace('FREE', 'Free')
+                raw = raw.replace('Save Event', '')
+                raw = re.sub('^\s*$', '', raw, 0, re.MULTILINE)
+                raw = re.sub('\n+', '\n', raw)
+                raw = re.sub('<span[^<>]*class="calendar-post-ticket"[^<>]*>.*</#span>', '', raw, 0, re.DOTALL | re.IGNORECASE)
+                self.events.add(raw.encode('utf-8'))
+            self.debug_print("fetched %d events so far." % self.events.size())
+        return self.events.size() > 0
+
+x = stranger_events_renderer({"Test", 123})
+x.periodic_render("Fetch Events")
+x.periodic_render("Shuffle Events")
diff --git a/trigger.py b/trigger.py
new file mode 100644 (file)
index 0000000..d2c3163
--- /dev/null
@@ -0,0 +1,10 @@
+
+class trigger(object):
+    """Base class for something that can trigger a page becomming active."""
+
+    PRIORITY_HIGH = 100
+    PRIORITY_NORMAL = 50
+    PRIORITY_LOW = 0
+
+    def get_triggered_page_list(self):
+        return None
diff --git a/trigger_catalog.py b/trigger_catalog.py
new file mode 100644 (file)
index 0000000..0d36224
--- /dev/null
@@ -0,0 +1,10 @@
+import camera_trigger
+import gcal_trigger
+import myq_trigger
+
+__registry = [ camera_trigger.any_camera_trigger(),
+               myq_trigger.myq_trigger(),
+               gcal_trigger.gcal_trigger() ]
+
+def get_triggers():
+    return __registry
diff --git a/twitter_renderer.py b/twitter_renderer.py
new file mode 100644 (file)
index 0000000..49c39da
--- /dev/null
@@ -0,0 +1,108 @@
+import file_writer
+import random
+import renderer
+import profanity_filter
+import re
+import secrets
+import tweepy
+
+class twitter_renderer(renderer.debuggable_abstaining_renderer):
+    def __init__(self, name_to_timeout_dict):
+        super(twitter_renderer, self).__init__(name_to_timeout_dict, False)
+        self.debug = 1
+        self.tweets_by_author = dict()
+        self.handles_by_author = dict()
+        self.filter = profanity_filter.profanity_filter()
+        self.urlfinder = re.compile(
+            "((http|https)://[\-A-Za-z0-9\\.]+/[\?\&\-A-Za-z0-9_\\.]+)")
+
+        # == OAuth Authentication ==
+        #
+        # This mode of authentication is the new preferred way
+        # of authenticating with Twitter.
+
+        # The consumer keys can be found on your application's Details
+        # page located at https://dev.twitter.com/apps (under "OAuth settings")
+        consumer_key=secrets.twitter_consumer_key
+        consumer_secret=secrets.twitter_consumer_secret
+
+        # The access tokens can be found on your applications's Details
+        # page located at https://dev.twitter.com/apps (located
+        # under "Your access token")
+        access_token=secrets.twitter_access_token
+        access_token_secret=secrets.twitter_access_token_secret
+
+        auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
+        auth.set_access_token(access_token, access_token_secret)
+        self.api = tweepy.API(auth)
+
+    def debug_prefix(self):
+        return "twitter"
+
+    def linkify(self, value):
+        return self.urlfinder.sub(r'<a href="\1">\1</a>', value)
+
+    def periodic_render(self, key):
+        if key == "Fetch Tweets":
+            return self.fetch_tweets()
+        elif key == "Shuffle Tweets":
+            return self.shuffle_tweets()
+        else:
+            raise error('Unexpected operation')
+
+    def fetch_tweets(self):
+        try:
+            tweets = self.api.home_timeline(tweet_mode='extended', count=200)
+        except:
+            print "Exception while fetching tweets!"
+            return False
+        for tweet in tweets:
+            author = tweet.author.name
+            author_handle = tweet.author.screen_name
+            self.handles_by_author[author] = author_handle
+            if author not in self.tweets_by_author:
+                self.tweets_by_author[author] = list()
+            l = self.tweets_by_author[author]
+            l.append(tweet)
+        return True
+
+    def shuffle_tweets(self):
+        authors = self.tweets_by_author.keys()
+        author = random.choice(authors)
+        handle = self.handles_by_author[author]
+        tweets = self.tweets_by_author[author]
+        already_seen = set()
+        f = file_writer.file_writer('twitter_5_none.html')
+        f.write('<TABLE WIDTH=96%><TR><TD WIDTH=80%>')
+        f.write('<H2>%s (@%s)</H2></TD>\n' % (author, handle))
+        f.write('<TD ALIGN="right" VALIGN="top">')
+        f.write('<IMG SRC="twitter.png" WIDTH=42></TD></TR></TABLE>\n')
+        f.write('<HR>\n<UL>\n')
+        count = 0
+        length = 0
+        for tweet in tweets:
+            text = tweet.full_text
+            if ((text not in already_seen) and
+                (not self.filter.contains_bad_words(text))):
+                already_seen.add(text)
+                text = self.linkify(text)
+                f.write(u'<LI><B>%s</B>\n' % text)
+                count += 1
+                length += len(text)
+                if count > 3 or length > 270:
+                    break
+        f.write('</UL>\n')
+        f.close()
+        return True
+
+# Test
+t = twitter_renderer(
+    {"Fetch Tweets" : 1,
+     "Shuffle Tweets" : 1})
+#x = "bla bla bla https://t.co/EjWnT3UA9U bla bla"
+#x = t.linkify(x)
+#print x
+if t.fetch_tweets() == 0:
+    print "Error fetching tweets, none fetched."
+else:
+    t.shuffle_tweets()
diff --git a/utils.py b/utils.py
new file mode 100644 (file)
index 0000000..51a29e7
--- /dev/null
+++ b/utils.py
@@ -0,0 +1,63 @@
+import time
+import os
+import constants
+from datetime import datetime
+
+def timestamp():
+    t = datetime.fromtimestamp(time.time())
+    return t.strftime('%d/%b/%Y:%H:%M:%S%Z')
+
+def describe_age_of_file(filename):
+    try:
+        now = time.time()
+        ts = os.stat(filename).st_ctime
+        age = now - ts
+        return describe_duration(age)
+    except Exception as e:
+        return "?????"
+
+def describe_age_of_file_briefly(filename):
+    try:
+        now = time.time()
+        ts = os.stat(filename).st_ctime
+        age = now - ts
+        return describe_duration_briefly(age)
+    except Exception as e:
+        return "?????"
+
+def describe_duration(age):
+    days = divmod(age, constants.seconds_per_day)
+    hours = divmod(days[1], constants.seconds_per_hour)
+    minutes = divmod(hours[1], constants.seconds_per_minute)
+
+    descr = ""
+    if (days[0] > 1):
+        descr = "%d days, " % days[0]
+    elif (days[0] == 1):
+        descr = "1 day, "
+    if (hours[0] > 1):
+        descr = descr + ("%d hours, " % hours[0])
+    elif (hours[0] == 1):
+        descr = descr + "1 hour, "
+    if (len(descr) > 0):
+        descr = descr + "and "
+    if (minutes[0] == 1):
+        descr = descr + "1 minute"
+    else:
+        descr = descr + ("%d minutes" % minutes[0])
+    return descr
+
+def describe_duration_briefly(age):
+    days = divmod(age, constants.seconds_per_day)
+    hours = divmod(days[1], constants.seconds_per_hour)
+    minutes = divmod(hours[1], constants.seconds_per_minute)
+    descr = ""
+    if (days[0] > 0):
+        descr = "%dd " % days[0]
+    if (hours[0] > 0):
+        descr = descr + ("%dh " % hours[0])
+    descr = descr + ("%dm" % minutes[0])
+    return descr
+
+#x = describe_age_of_file_briefly("pages/clock_10_none.html")
+#print x
diff --git a/weather_renderer.py b/weather_renderer.py
new file mode 100644 (file)
index 0000000..26c49ca
--- /dev/null
@@ -0,0 +1,390 @@
+from datetime import datetime
+import file_writer
+import renderer
+import json
+import re
+import secrets
+import urllib2
+import random
+
+class weather_renderer(renderer.debuggable_abstaining_renderer):
+    """A renderer to fetch forecast from wunderground."""
+
+    def __init__(self,
+                 name_to_timeout_dict,
+                 file_prefix):
+        super(weather_renderer, self).__init__(name_to_timeout_dict, False)
+        self.file_prefix = file_prefix
+
+    def debug_prefix(self):
+        return "weather(%s)" % (self.file_prefix)
+
+    def periodic_render(self, key):
+        return self.fetch_weather()
+
+    def describe_time(self, index):
+        if (index <= 1):
+            return "overnight"
+        elif (index <= 3):
+            return "morning"
+        elif (index <= 5):
+            return "afternoon"
+        else:
+            return "evening"
+
+    def describe_wind(self, mph):
+        if mph <= 0.3:
+            return "calm"
+        elif mph <= 5.0:
+            return "light"
+        elif mph < 15.0:
+            return "breezy"
+        elif mph <= 25.0:
+            return "gusty"
+        else:
+            return "heavy"
+
+    def describe_magnitude(self, mm):
+        if (mm < 2):
+            return "light"
+        elif (mm < 10):
+            return "moderate"
+        else:
+            return "heavy"
+
+    def describe_precip(self, rain, snow):
+        if rain == 0 and snow == 0:
+            return "no precipitation"
+        magnitude = rain + snow
+        if rain > 0 and snow > 0:
+            return "a %s mix of rain and snow" % self.describe_magnitude(magnitude)
+        elif rain > 0:
+            return "%s rain" % self.describe_magnitude(magnitude)
+        elif snow > 0:
+            return "%s snow" % self.describe_magnitude(magnitude)
+
+    def fix_caps(self, s):
+        r = ""
+        s = s.lower()
+        for x in s.split("."):
+            x = x.strip()
+            r += x.capitalize() + ". "
+        r = r.replace(". .", ".")
+        return r
+
+    def pick_icon(self, conditions, rain, snow):
+        #                     rain     snow    clouds    sun
+        # fog.gif
+        # hazy.gif
+        # clear.gif
+        # mostlycloudy.gif     F         F        6+      X
+        # partlycloudy.gif     F         F        4+      4-
+        # cloudy.gif
+        # partlysunny.gif      F         F        X       5+
+        # mostlysunny.gif      F         F        X       6+
+        # rain.gif             T         F        X       X
+        # sleet.gif            T         T        X       X
+        # flurries.gif         F         T        X       X    (<1")
+        # snow.gif             F         T        X       X    (else)
+        # sunny.gif            F         F        X       7+
+        # tstorms.gif
+        seen_rain = False
+        seen_snow = False
+        cloud_count = 0
+        clear_count = 0
+        total_snow = 0
+        count = min(len(conditions), len(rain), len(snow))
+        for x in xrange(0, count):
+            seen_rain = rain[x] > 0;
+            seen_snow = snow[x] > 0;
+            total_snow += snow[x]
+            txt = conditions[x].lower()
+            if ("cloud" in txt):
+                cloud_count += 1
+            if ("clear" in txt or "sun" in txt):
+                clear_count += 1
+
+        if (seen_rain and seen_snow):
+            if (total_snow < 10):
+                return "sleet.gif"
+            else:
+                return "snow.gif"
+        if (seen_snow):
+            if (total_snow < 10):
+                return "flurries.gif"
+            else:
+                return "snow.gif"
+        if (seen_rain):
+            return "rain.gif"
+        if (cloud_count >= 6):
+            return "mostlycloudy.gif"
+        elif (cloud_count >= 4):
+            return "partlycloudy.gif"
+        if (clear_count >= 7):
+            return "sunny.gif"
+        elif (clear_count >= 6):
+            return "mostlysunny.gif"
+        elif (clear_count >= 4):
+            return "partlysunny.gif"
+        return "clear.gif"
+
+    def describe_weather(self,
+                         high, low,
+                         wind, conditions, rain, snow):
+        # High temp: 65
+        # Low temp: 44
+        #             -onight------  -morning----- -afternoon--  -evening----
+        #             12a-3a  3a-6a  6a-9a  9a-12p 12p-3p 3p-6p  6p-9p 9p-12p
+        # Wind:       [12.1   3.06   3.47   4.12   3.69   3.31   2.73  2.1]
+        # Conditions: [Clouds Clouds Clouds Clouds Clouds Clouds Clear Clear]
+        # Rain:       [0.4    0.2    0      0      0      0      0     0]
+        # Snow:       [0      0      0      0      0      0      0     0]
+        high = int(high)
+        low = int(low)
+        count = min(len(wind), len(conditions), len(rain), len(snow))
+        descr = ""
+
+        lcondition = ""
+        lwind = ""
+        lprecip = ""
+        ltime = ""
+        for x in xrange(0, count):
+            time = self.describe_time(x)
+            current = ""
+            chunks = 0
+
+            txt = conditions[x]
+            if txt == "Clouds":
+                txt = "cloudy"
+            elif txt == "Rain":
+                txt = "rainy"
+
+            if (txt != lcondition):
+                if txt != "Snow" and txt != "Rain":
+                    current += txt
+                    chunks += 1
+                lcondition = txt
+
+            txt = self.describe_wind(wind[x])
+            if (txt != lwind):
+                if (len(current) > 0):
+                    current += " with "
+                current += txt + " winds"
+                lwind = txt
+                chunks += 1
+
+            txt = self.describe_precip(rain[x], snow[x])
+            if (txt != lprecip):
+                if (len(current) > 0):
+                    if (chunks > 1):
+                        current += " and "
+                    else:
+                        current += " with "
+                chunks += 1
+                current += txt
+                lprecip = txt
+
+            if (len(current)):
+                if (ltime != time):
+                    if (random.randint(0, 3) == 0):
+                        if (time != "overnight"):
+                            descr += current + " in the " + time + ". "
+                        descr += current + " overnight. "
+                    else:
+                        if (time != "overnight"):
+                            descr += "In the "
+                        descr += time + ", " + current + ". "
+                else:
+                    current = current.replace("cloudy", "clouds")
+                    descr += current + " developing. "
+                ltime = time
+        if (ltime == "overnight" or ltime == "morning"):
+            descr += "Conditions continuing the rest of the day. "
+        descr = descr.replace("with breezy winds", "and breezy")
+        descr = descr.replace("Clear developing", "Skies clearing")
+        descr = self.fix_caps(descr)
+        return descr
+
+    def fetch_weather(self):
+        if self.file_prefix == "stevens":
+            text_location = "Stevens Pass, WA"
+            param = "lat=47.74&lon=-121.08"
+        elif self.file_prefix == "telma":
+            text_location = "Telma, WA"
+            param = "lat=47.84&lon=-120.81"
+        else:
+            text_location = "Bellevue, WA"
+            param = "id=5786882"
+
+        www = urllib2.urlopen('http://api.openweathermap.org/data/2.5/forecast?%s&APPID=%s&units=imperial' % (
+            param, secrets.openweather_key))
+        response = www.read()
+        www.close()
+        parsed_json = json.loads(response)
+
+        # https://openweathermap.org/forecast5
+        # {"cod":"200",
+        #  "message":0.0036,
+        #  "cnt":40,
+        #  "list":[
+        #     {"dt":1485799200,
+        #      "main":{"temp":261.45,"temp_min":259.086,"temp_max":261.45,"pressure":1023.48,"sea_level":1045.39,"grnd_level":1023.48,"humidity":79,"temp_kf":2.37},
+        #      "weather":[
+        #         {"id":800,"main":"Clear","description":"clear sky","icon":"02n"}
+        #      ],
+        #     "clouds":{"all":8},
+        #     "wind":{"speed":4.77,"deg":232.505},
+        #     "snow":{},
+        #     "sys":{"pod":"n"},
+        #     "dt_txt":"2017-01-30 18:00:00"
+        #     },
+        #     {"dt":1485810000,....
+        f = file_writer.file_writer('weather-%s_3_10800.html' % self.file_prefix)
+        f.write("""
+<h1>Weather at %s:</h1>
+<hr>
+<center>
+<table width=99%% cellspacing=10 border=0>
+        <tr>""" % text_location)
+        count = parsed_json['cnt']
+
+        ts = {}
+        highs = {}
+        lows = {}
+        wind = {}
+        conditions = {}
+        rain = {}
+        snow = {}
+        for x in xrange(0, count):
+            data = parsed_json['list'][x]
+            dt = data['dt_txt']  # 2019-10-07 18:00:00
+            date = dt.split(" ")[0]
+            time = dt.split(" ")[1]
+            wind[date] = []
+            conditions[date] = []
+            highs[date] = -99999
+            lows[date] = +99999
+            rain[date] = []
+            snow[date] = []
+            ts[date] = 0
+
+        for x in xrange(0, count):
+            data = parsed_json['list'][x]
+            dt = data['dt_txt']  # 2019-10-07 18:00:00
+            date = dt.split(" ")[0]
+            time = dt.split(" ")[1]
+            _ = data['dt']
+            if (_ > ts[date]):
+                ts[date] = _
+            temp = data["main"]["temp"]
+            if (highs[date] < temp):
+                highs[date] = temp
+            if (temp < lows[date]):
+                lows[date] = temp
+            wind[date].append(data["wind"]["speed"])
+            conditions[date].append(data["weather"][0]["main"])
+            if "rain" in data and "3h" in data["rain"]:
+                rain[date].append(data["rain"]["3h"])
+            else:
+                rain[date].append(0)
+            if "snow" in data and "3h" in data["snow"]:
+                snow[date].append(data["snow"]["3h"])
+            else:
+                snow[date].append(0)
+
+            # {u'clouds': {u'all': 0},
+            #  u'sys': {u'pod': u'd'},
+            #  u'dt_txt': u'2019-10-09 21:00:00',
+            #  u'weather': [
+            #      {u'main': u'Clear',
+            #       u'id': 800,
+            #       u'icon': u'01d',
+            #       u'description': u'clear sky'}
+            #  ],
+            #  u'dt': 1570654800,
+            #  u'main': {
+            #       u'temp_kf': 0,
+            #       u'temp': 54.74,
+            #       u'grnd_level': 1018.95,
+            #       u'temp_max': 54.74,
+            #       u'sea_level': 1026.46,
+            #       u'humidity': 37,
+            #       u'pressure': 1026.46,
+            #       u'temp_min': 54.74
+            #  },
+            #  u'wind': {u'speed': 6.31, u'deg': 10.09}}
+
+        # Next 5 half-days
+        #for x in xrange(0, 5):
+        #    fcast = parsed_json['forecast']['txt_forecast']['forecastday'][x]
+        #    text = fcast['fcttext']
+        #    text = re.subn(r' ([0-9]+)F', r' \1&deg;F', text)[0]
+        #    f.write('<td style="vertical-align:top;font-size:75%%"><P STYLE="padding:8px;">%s</P></td>' % text)
+        #f.write('</tr></table>')
+        #f.close()
+        #return True
+
+        #f.write("<table border=0 cellspacing=10>\n")
+        days_seen = {}
+        for date in sorted(highs.keys()):
+            today = datetime.fromtimestamp(ts[date])
+            formatted_date = today.strftime('%a %e %b')
+            if (formatted_date in days_seen):
+                continue;
+            days_seen[formatted_date] = True
+        num_days = len(days_seen.keys())
+
+        days_seen = {}
+        for date in sorted(highs.keys()):
+            precip = 0.0
+            for _ in rain[date]:
+                precip += _
+            for _ in snow[date]:
+                precip += _
+
+            today = datetime.fromtimestamp(ts[date])
+            formatted_date = today.strftime('%a %e %b')
+            if (formatted_date in days_seen):
+                continue;
+            days_seen[formatted_date] = True
+            f.write('<td width=%d%% style="vertical-align:top;">\n' % (100 / num_days))
+            f.write('<table border=0>\n')
+
+            # Date
+            f.write('  <tr><td colspan=3 height=50><b><center><font size=6>' + formatted_date + '</font></center></b></td></tr>\n')
+
+            # Icon
+            f.write('  <tr><td colspan=3 height=100><center><img src="/icons/weather/%s" height=125></center></td></tr>\n' %
+                    self.pick_icon(conditions[date], rain[date], snow[date]))
+
+            # Low temp
+            color = "#000099"
+            if (lows[date] <= 32.5):
+                color = "#009999"
+            f.write('  <tr><td width=33%% align=left><font color="%s"><b>%d&deg;F&nbsp;&nbsp;</b></font></td>\n' % (
+                color, int(lows[date])))
+
+            # Total precip
+            precip *= 0.0393701
+            if (precip > 0.025):
+                f.write('      <td width=33%%><center><b><font style="background-color:#dfdfff; color:#003355">%3.1f"</font></b></center></td>\n' % precip)
+            else:
+                f.write('      <td width=33%>&nbsp;</td>\n')
+
+            # High temp
+            color = "#800000"
+            if (highs[date] >= 80):
+                color = "#AA0000"
+            f.write('      <td align=right><font color="%s"><b>&nbsp;&nbsp;%d&deg;F</b></font></td></tr>\n' % (
+                color, int(highs[date])))
+
+            # Text "description"
+            f.write('<tr><td colspan=3 style="vertical-align:top;font-size:75%%">%s</td></tr>\n' %
+                    self.describe_weather(highs[date], lows[date], wind[date], conditions[date], rain[date], snow[date]))
+            f.write('</table>\n</td>\n')
+        f.write("</tr></table></center>")
+        return True
+
+#x = weather_renderer({"Stevens": 1000},
+#                     "stevens")
+#x.periodic_render("Stevens")
diff --git a/wsj_rss_renderer.py b/wsj_rss_renderer.py
new file mode 100644 (file)
index 0000000..8e2b0cc
--- /dev/null
@@ -0,0 +1,41 @@
+import generic_news_rss_renderer
+
+class wsj_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
+    def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
+        super(wsj_rss_renderer, self).__init__(
+            name_to_timeout_dict,
+            feed_site,
+            feed_uris,
+            page_title)
+        self.debug = 1
+
+    def debug_prefix(self):
+        return "wsj(%s)" % (self.page_title)
+
+    def get_headlines_page_prefix(self):
+        return "wsj-%s" % (self.page_title)
+
+    def get_details_page_prefix(self):
+        return "wsj-details-%s" % (self.page_title)
+
+    def should_use_https(self):
+        return True
+
+    def item_is_interesting_for_headlines(self, title, description, item):
+        return ("WSJ.com" not in title and
+                "WSJ.com" not in description)
+
+    def item_is_interesting_for_article(self, title, description, item):
+        return ("WSJ.com" not in title and
+                "WSJ.com" not in description)
+
+# Test
+#x = wsj_rss_renderer(
+#    {"Fetch News" : 1,
+#     "Shuffle News" : 1},
+#    "feeds.a.dj.com",
+#    [ "/rss/RSSWorldNews.xml" ],
+#    "Test" )
+#if x.fetch_news() == 0:
+#    print "Error fetching news, no items fetched."
+#x.shuffle_news()