Format codebase w/ black.
authorScott Gasch <[email protected]>
Mon, 14 Dec 2020 19:27:34 +0000 (11:27 -0800)
committerScott Gasch <[email protected]>
Mon, 14 Dec 2020 19:27:34 +0000 (11:27 -0800)
39 files changed:
bellevue_reporter_rss_renderer.py
camera_trigger.py
chooser.py
cnn_rss_renderer.py
decorators.py
file_writer.py
gcal_renderer.py
gcal_trigger.py
gdata_oauth.py
gdocs_renderer.py
generic_news_rss_renderer.py
gkeep_renderer.py
globals.py
google_news_rss_renderer.py
grab_bag.py
health_renderer.py
kiosk.py
local_photos_mirror_renderer.py
mynorthwest_rss_renderer.py
myq_renderer.py
myq_trigger.py
page_builder.py
picasa_renderer.py
pollen_renderer.py
profanity_filter.py
reddit_renderer.py
renderer.py
renderer_catalog.py
reuters_rss_renderer.py
seattletimes_rss_renderer.py
stevens_renderer.py
stock_renderer.py
stranger_renderer.py
trigger.py
trigger_catalog.py
twitter_renderer.py
utils.py
weather_renderer.py
wsj_rss_renderer.py

index 78ec69484069901e8e67292d22c25333700658c5..1bd351475a16e20af841e84a2cf06d3c0200a2cb 100644 (file)
@@ -1,13 +1,12 @@
 import generic_news_rss_renderer as gnrss
 import re
 
+
 class bellevue_reporter_rss_renderer(gnrss.generic_news_rss_renderer):
     def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
         super(bellevue_reporter_rss_renderer, self).__init__(
-            name_to_timeout_dict,
-            feed_site,
-            feed_uris,
-            page_title)
+            name_to_timeout_dict, feed_site, feed_uris, page_title
+        )
         self.debug = 1
 
     def debug_prefix(self):
@@ -23,20 +22,23 @@ class bellevue_reporter_rss_renderer(gnrss.generic_news_rss_renderer):
         return True
 
     def munge_description(self, description):
-        description = re.sub('<[^>]+>', '', description)
-        description = re.sub('Bellevue\s+Reporter\s+Bellevue\s+Reporter', '',
-                             description)
-        description = re.sub('\s*\-\s*Your local homepage\.\s*', '', description)
+        description = re.sub("<[^>]+>", "", description)
+        description = re.sub(
+            "Bellevue\s+Reporter\s+Bellevue\s+Reporter", "", description
+        )
+        description = re.sub("\s*\-\s*Your local homepage\.\s*", "", description)
         return description
 
     def item_is_interesting_for_headlines(self, title, description, item):
         if self.is_item_older_than_n_days(item, 10):
             self.debug_print("%s: is too old!" % title)
             return False
-        if (title.find("NFL") != -1 or
-            re.search("[Ll]ive [Ss]tream", title) != None or
-            re.search("[Ll]ive[Ss]tream", title) != None or
-            re.search("[Ll]ive [Ss]tream", description) != None):
+        if (
+            title.find("NFL") != -1
+            or re.search("[Ll]ive [Ss]tream", title) != None
+            or re.search("[Ll]ive[Ss]tream", title) != None
+            or re.search("[Ll]ive [Ss]tream", description) != None
+        ):
             self.debug_print("%s: looks like it's about football." % title)
             return False
         return True
@@ -45,33 +47,36 @@ class bellevue_reporter_rss_renderer(gnrss.generic_news_rss_renderer):
         if self.is_item_older_than_n_days(item, 10):
             self.debug_print("%s: is too old!" % title)
             return False
-        if (title.find(" NFL") != -1 or
-            re.search("[Ll]ive [Ss]tream", title) != None or
-            re.search("[Ll]ive[Ss]tream", title) != None or
-            re.search("[Ll]ive [Ss]tream", description) != None):
+        if (
+            title.find(" NFL") != -1
+            or re.search("[Ll]ive [Ss]tream", title) != None
+            or re.search("[Ll]ive[Ss]tream", title) != None
+            or re.search("[Ll]ive [Ss]tream", description) != None
+        ):
             self.debug_print("%s: looks like it's about football." % title)
             return False
         return True
 
+
 # Test
-#x = bellevue_reporter_rss_renderer(
+# x = bellevue_reporter_rss_renderer(
 #    {"Fetch News" : 1,
 #     "Shuffle News" : 1},
 #    "www.bellevuereporter.com",
 #    [ "/feed/" ],
 #    "Test" )
-#d = """
-#<DIV style="padding:8px;
+# d = """
+# <DIV style="padding:8px;
 #     font-size:44pt;
 #     -webkit-column-break-inside:avoid;"><P>
-#<B>Task force will tackle issues of racial justice, police reform</B>
-#<BR>Bellevue Reporter
-#Bellevue Reporter - Your local homepage.
-#Inslee names civil rights activists, pastors, and cops to panel that may forge ideas f#or new laws Task force will tackle issues of racial justice, police reform
-#Wire Service
-#</DIV>"""
-#d = x.munge_description(d)
-#print d
-#if x.fetch_news() == 0:
+# <B>Task force will tackle issues of racial justice, police reform</B>
+# <BR>Bellevue Reporter
+# Bellevue Reporter - Your local homepage.
+# Inslee names civil rights activists, pastors, and cops to panel that may forge ideas f#or new laws Task force will tackle issues of racial justice, police reform
+# Wire Service
+# </DIV>"""
+# d = x.munge_description(d)
+# print d
+# if x.fetch_news() == 0:
 #    print "Error fetching news, no items fetched."
-#x.shuffle_news()
+# x.shuffle_news()
index 64cb6383f375c6fdd58e067abc7e308311d3333c..0f42ca20782cd06ecf46e23d68acd017a5d9f9a2 100644 (file)
@@ -5,27 +5,28 @@ import trigger
 import utils
 from datetime import datetime
 
+
 class any_camera_trigger(trigger.trigger):
     def __init__(self):
         self.triggers_in_the_past_seven_min = {
-            "driveway" :       0,
-            "frontdoor" :      0,
-            "cabin_driveway" : 0,
-            "backyard" :       0,
+            "driveway": 0,
+            "frontdoor": 0,
+            "cabin_driveway": 0,
+            "backyard": 0,
         }
         self.last_trigger = {
-            "driveway" :       0,
-            "frontdoor" :      0,
-            "cabin_driveway" : 0,
-            "backyard" :       0,
+            "driveway": 0,
+            "frontdoor": 0,
+            "cabin_driveway": 0,
+            "backyard": 0,
         }
 
     def choose_priority(self, camera, age):
         base_priority_by_camera = {
-            "driveway" : 1,
-            "frontdoor" : 2,
-            "cabin_driveway" : 1,
-            "backyard" : 0,
+            "driveway": 1,
+            "frontdoor": 2,
+            "cabin_driveway": 1,
+            "backyard": 0,
         }
         priority = base_priority_by_camera[camera]
         if age < 10:
@@ -39,10 +40,7 @@ class any_camera_trigger(trigger.trigger):
     def get_triggered_page_list(self):
         triggers = []
         cameras_with_recent_triggers = 0
-        camera_list = [ "driveway",
-                        "frontdoor",
-                        "cabin_driveway",
-                        "backyard" ]
+        camera_list = ["driveway", "frontdoor", "cabin_driveway", "backyard"]
 
         now = time.time()
         try:
@@ -51,8 +49,7 @@ class any_camera_trigger(trigger.trigger):
             for camera in camera_list:
                 file = "/timestamps/last_camera_motion_%s" % camera
                 ts = os.stat(file).st_ctime
-                if (ts != self.last_trigger[camera] and
-                    (now - ts) < 10):
+                if ts != self.last_trigger[camera] and (now - ts) < 10:
                     print("Camera: %s, age %s" % (camera, (now - ts)))
                     self.last_trigger[camera] = ts
                     cameras_with_recent_triggers += 1
@@ -74,17 +71,28 @@ class any_camera_trigger(trigger.trigger):
             # triggered at the same time.
             for camera in camera_list:
                 if (now - self.last_trigger[camera]) < 10:
-                    if (self.triggers_in_the_past_seven_min[camera] <= 4 or
-                        cameras_with_recent_triggers > 1):
+                    if (
+                        self.triggers_in_the_past_seven_min[camera] <= 4
+                        or cameras_with_recent_triggers > 1
+                    ):
                         ts = utils.timestamp()
                         p = self.choose_priority(camera, age)
-                        print(("%s: ****** %s[%d] CAMERA TRIGGER ******" % (
-                            ts, camera, p)))
-                        triggers.append( ( "hidden/%s.html" % camera,
-                                           self.choose_priority(camera, age)) )
+                        print(
+                            (
+                                "%s: ****** %s[%d] CAMERA TRIGGER ******"
+                                % (ts, camera, p)
+                            )
+                        )
+                        triggers.append(
+                            (
+                                "hidden/%s.html" % camera,
+                                self.choose_priority(camera, age),
+                            )
+                        )
                     else:
-                        print(("%s: Camera %s too spammy, squelching it" % (
-                            ts, camera)))
+                        print(
+                            ("%s: Camera %s too spammy, squelching it" % (ts, camera))
+                        )
         except Exception as e:
             print(e)
             pass
@@ -94,5 +102,6 @@ class any_camera_trigger(trigger.trigger):
         else:
             return triggers
 
-#x = any_camera_trigger()
-#print(x.get_triggered_page_list())
+
+# x = any_camera_trigger()
+# print(x.get_triggered_page_list())
index 9bf98e303a91d95c7aba32272f0351da4e7c1e0d..ac8948a9a8df6958b99c32642151ed613baef9f9 100644 (file)
@@ -8,24 +8,30 @@ import glob
 import constants
 import trigger
 
+
 class chooser(object):
     """Base class of a thing that chooses pages"""
+
     def get_page_list(self):
         now = time.time()
         valid_filename = re.compile("([^_]+)_(\d+)_([^\.]+)\.html")
         filenames = []
-        pages = [ f for f in os.listdir(constants.pages_dir)
-                  if os.path.isfile(os.path.join(constants.pages_dir, f))]
+        pages = [
+            f
+            for f in os.listdir(constants.pages_dir)
+            if os.path.isfile(os.path.join(constants.pages_dir, f))
+        ]
         for page in pages:
             result = re.match(valid_filename, page)
             if result != None:
                 print(('chooser: candidate page: "%s"' % page))
-                if (result.group(3) != "none"):
+                if result.group(3) != "none":
                     freshness_requirement = int(result.group(3))
-                    last_modified = int(os.path.getmtime(
-                        os.path.join(constants.pages_dir, page)))
-                    age = (now - last_modified)
-                    if (age > freshness_requirement):
+                    last_modified = int(
+                        os.path.getmtime(os.path.join(constants.pages_dir, page))
+                    )
+                    age = now - last_modified
+                    if age > freshness_requirement:
                         print(('chooser: "%s" is too old.' % page))
                         continue
                 filenames.append(page)
@@ -34,8 +40,10 @@ class chooser(object):
     def choose_next_page(self):
         pass
 
+
 class weighted_random_chooser(chooser):
     """Chooser that does it via weighted RNG."""
+
     def dont_choose_page_twice_in_a_row_filter(self, choice):
         if choice == self.last_choice:
             return False
@@ -53,8 +61,7 @@ class weighted_random_chooser(chooser):
         self.filter_list.append(self.dont_choose_page_twice_in_a_row_filter)
 
     def choose_next_page(self):
-        if (self.pages == None or
-            self.count % 100 == 0):
+        if self.pages == None or self.count % 100 == 0:
             self.pages = self.get_page_list()
 
         total_weight = 0
@@ -65,7 +72,7 @@ class weighted_random_chooser(chooser):
                 weight = int(result.group(2))
                 weights.append(weight)
                 total_weight += weight
-        if (total_weight <= 0):
+        if total_weight <= 0:
             raise error
 
         while True:
@@ -91,8 +98,10 @@ class weighted_random_chooser(chooser):
             self.count += 1
             return choice
 
+
 class weighted_random_chooser_with_triggers(weighted_random_chooser):
     """Same as WRC but has trigger events"""
+
     def __init__(self, trigger_list, filter_list):
         weighted_random_chooser.__init__(self, filter_list)
         self.trigger_list = trigger_list
@@ -111,14 +120,13 @@ class weighted_random_chooser_with_triggers(weighted_random_chooser):
         return triggered
 
     def choose_next_page(self):
-        if (self.pages == None or
-            self.count % 100 == 0):
+        if self.pages == None or self.count % 100 == 0:
             self.pages = self.get_page_list()
 
         triggered = self.check_for_triggers()
 
         # First try to satisfy from the page queue.
-        if (len(self.page_queue) > 0):
+        if len(self.page_queue) > 0:
             print("chooser: Pulling page from queue...")
             page = None
             priority = None
@@ -133,8 +141,10 @@ class weighted_random_chooser_with_triggers(weighted_random_chooser):
         else:
             return weighted_random_chooser.choose_next_page(self), False
 
+
 class rotating_chooser(chooser):
     """Chooser that does it in a rotation"""
+
     def __init__(self):
         self.valid_filename = re.compile("([^_]+)_(\d+)_([^\.]+)\.html")
         self.pages = None
@@ -142,14 +152,13 @@ class rotating_chooser(chooser):
         self.count = 0
 
     def choose_next_page(self):
-        if (self.pages == None or
-            self.count % 100 == 0):
+        if self.pages == None or self.count % 100 == 0:
             self.pages = self.get_page_list()
 
         if len(self.pages) == 0:
             raise error
 
-        if (self.current >= len(self.pages)):
+        if self.current >= len(self.pages):
             self.current = 0
 
         page = self.pages[self.current]
@@ -157,18 +166,21 @@ class rotating_chooser(chooser):
         self.count += 1
         return page
 
+
 # Test
 def filter_news_during_dinnertime(page):
     now = datetime.datetime.now()
     is_dinnertime = now.hour >= 17 and now.hour <= 20
-    return (not is_dinnertime or
-            not ("cnn" in page or
-                 "news" in page or
-                 "mynorthwest" in page or
-                 "seattle" in page or
-                 "stranger" in page or
-                 "twitter" in page or
-                 "wsj" in page))
-
-#x = weighted_random_chooser_with_triggers([], [ filter_news_during_dinnertime ])
-#print(x.choose_next_page())
+    return not is_dinnertime or not (
+        "cnn" in page
+        or "news" in page
+        or "mynorthwest" in page
+        or "seattle" in page
+        or "stranger" in page
+        or "twitter" in page
+        or "wsj" in page
+    )
+
+
+# x = weighted_random_chooser_with_triggers([], [ filter_news_during_dinnertime ])
+# print(x.choose_next_page())
index 413b58a81da19129a75ce60547a2d996e6013b39..c1ae7fdacbb49fccf0ab55233462508e48abfe70 100644 (file)
@@ -1,13 +1,12 @@
 import generic_news_rss_renderer
 import re
 
+
 class cnn_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
     def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
         super(cnn_rss_renderer, self).__init__(
-            name_to_timeout_dict,
-            feed_site,
-            feed_uris,
-            page_title)
+            name_to_timeout_dict, feed_site, feed_uris, page_title
+        )
         self.debug = 1
 
     def debug_prefix(self):
@@ -20,16 +19,14 @@ class cnn_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
         return "cnn-details-%s" % (self.page_title)
 
     def munge_description(self, description):
-        description = re.sub('[Rr]ead full story for latest details.',
-                             '',
-                             description)
-        description = re.sub('<[^>]+>', '', description)
+        description = re.sub("[Rr]ead full story for latest details.", "", description)
+        description = re.sub("<[^>]+>", "", description)
         return description
 
     def find_image(self, item):
-        image = item.findtext('media:thumbnail')
+        image = item.findtext("media:thumbnail")
         if image is not None:
-            image_url = image.get('url')
+            image_url = image.get("url")
             return image_url
         return None
 
@@ -40,17 +37,20 @@ class cnn_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
         if self.is_item_older_than_n_days(item, 14):
             self.debug_print("%s: is too old!" % title)
             return False
-        return re.search(r'[Cc][Nn][Nn][A-Za-z]*\.com', title) is None
+        return re.search(r"[Cc][Nn][Nn][A-Za-z]*\.com", title) is None
 
     def item_is_interesting_for_article(self, title, description, item):
         if self.is_item_older_than_n_days(item, 7):
             self.debug_print("%s: is too old!" % title)
             return False
-        return (re.search(r'[Cc][Nn][Nn][A-Za-z]*\.com', title) is None and
-                len(description) >= 65)
+        return (
+            re.search(r"[Cc][Nn][Nn][A-Za-z]*\.com", title) is None
+            and len(description) >= 65
+        )
+
 
 # Test
-#x = cnn_rss_renderer(
+# x = cnn_rss_renderer(
 #    {"Fetch News" : 1,
 #     "Shuffle News" : 1},
 #    "rss.cnn.com",
@@ -59,6 +59,6 @@ class cnn_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
 #     "/rss/cnn_tech.rss",
 #    ],
 #    "Test" )
-#if x.fetch_news() == 0:
+# if x.fetch_news() == 0:
 #    print("Error fetching news, no items fetched.")
-#x.shuffle_news()
+# x.shuffle_news()
index 7a088796a9cce63616412a8cbfccf16c2f8bb2fa..1f50bf8a60731c793fb49e62dc1d105a93a42b2e 100644 (file)
@@ -1,6 +1,7 @@
 from datetime import datetime
 import functools
 
+
 def invokation_logged(func):
     @functools.wraps(func)
     def wrapper(*args, **kwargs):
@@ -12,13 +13,15 @@ def invokation_logged(func):
         timestamp = now.strftime("%d-%b-%Y (%H:%M:%S.%f)")
         print("%s(%s): Exited function" % (func.__name__, timestamp))
         return ret
+
     return wrapper
 
+
 # Test
-#@invokation_logged
-#def f(x):
+# @invokation_logged
+# def f(x):
 #    print(x * x)
 #    return x * x
 #
-#q = f(10)
-#print(q)
+# q = f(10)
+# print(q)
index 0d95f71d505e4736aef184935e0b3e0f22bce185..988d0a03ac49378c06079b6901cb29dc11fa5633 100644 (file)
@@ -1,9 +1,10 @@
 import constants
 import os
 
+
 def remove_tricky_unicode(x):
     try:
-        x = x.decode('utf-8')
+        x = x.decode("utf-8")
         x = x.replace("\u2018", "'").replace("\u2019", "'")
         x = x.replace("\u201c", '"').replace("\u201d", '"')
         x = x.replace("\u2e3a", "-").replace("\u2014", "-")
@@ -11,12 +12,12 @@ def remove_tricky_unicode(x):
         pass
     return x
 
+
 class file_writer:
     def __init__(self, filename):
-        self.full_filename = os.path.join(constants.pages_dir,
-                                          filename)
-        self.f = open(self.full_filename, 'wb')
-        self.xforms = [ remove_tricky_unicode ]
+        self.full_filename = os.path.join(constants.pages_dir, filename)
+        self.f = open(self.full_filename, "wb")
+        self.xforms = [remove_tricky_unicode]
 
     def add_xform(self, xform):
         self.xforms.append(xform)
@@ -24,7 +25,7 @@ class file_writer:
     def write(self, data):
         for xform in self.xforms:
             data = xform(data)
-        self.f.write(data.encode('utf-8'))
+        self.f.write(data.encode("utf-8"))
 
     def done(self):
         self.f.close()
@@ -32,11 +33,12 @@ class file_writer:
     def close(self):
         self.done()
 
+
 # Test
-#def toupper(x):
+# def toupper(x):
 #    return x.upper()
 #
-#fw = file_writer("test")
-#fw.add_xform(toupper)
-#fw.write(u"This is a \u201ctest\u201d. \n")
-#fw.done()
+# fw = file_writer("test")
+# fw.add_xform(toupper)
+# fw.write(u"This is a \u201ctest\u201d. \n")
+# fw.done()
index a248d1d93ff13fc3d8ecda322f9720ab8b240395..e6657795ed91d1c057364b7f8f1ada105bf2bb1f 100644 (file)
@@ -8,25 +8,29 @@ import os
 import renderer
 import time
 
+
 class gcal_renderer(renderer.debuggable_abstaining_renderer):
     """A renderer to fetch upcoming events from www.google.com/calendar"""
 
-    calendar_whitelist = frozenset([
-        'Alex\'s calendar',
-        'Family',
-        'Holidays in United States',
-        'Lynn Gasch',
-        'Lynn\'s Work',
-        '[email protected]',
-        'Scott Gasch External - Misc',
-        'Birthdays',  # <-- from g+ contacts
-    ])
+    calendar_whitelist = frozenset(
+        [
+            "Alex's calendar",
+            "Family",
+            "Holidays in United States",
+            "Lynn Gasch",
+            "Lynn's Work",
+            "[email protected]",
+            "Scott Gasch External - Misc",
+            "Birthdays",  # <-- from g+ contacts
+        ]
+    )
 
     class comparable_event(object):
         """A helper class to sort events."""
+
         def __init__(self, start_time, end_time, summary, calendar):
             if start_time is None:
-                assert(end_time is None)
+                assert end_time is None
             self.start_time = start_time
             self.end_time = end_time
             self.summary = summary
@@ -37,16 +41,15 @@ class gcal_renderer(renderer.debuggable_abstaining_renderer):
                 return self.summary < that.summary
             if self.start_time is None or that.start_time is None:
                 return self.start_time is None
-            return (self.start_time,
-                    self.end_time,
-                    self.summary,
-                    self.calendar) < (that.start_time,
-                                      that.end_time,
-                                      that.summary,
-                                      that.calendar)
+            return (self.start_time, self.end_time, self.summary, self.calendar) < (
+                that.start_time,
+                that.end_time,
+                that.summary,
+                that.calendar,
+            )
 
         def __str__(self):
-            return '[%s]&nbsp;%s' % (self.timestamp(), self.friendly_name())
+            return "[%s]&nbsp;%s" % (self.timestamp(), self.friendly_name())
 
         def friendly_name(self):
             name = self.summary
@@ -56,12 +59,12 @@ class gcal_renderer(renderer.debuggable_abstaining_renderer):
         def timestamp(self):
             if self.start_time is None:
                 return "None"
-            elif (self.start_time.hour == 0):
-                return datetime.datetime.strftime(self.start_time,
-                                                   '%a %b %d %Y')
+            elif self.start_time.hour == 0:
+                return datetime.datetime.strftime(self.start_time, "%a %b %d %Y")
             else:
-                return datetime.datetime.strftime(self.start_time,
-                                                  '%a %b %d %Y %H:%M%p')
+                return datetime.datetime.strftime(
+                    self.start_time, "%a %b %d %Y %H:%M%p"
+                )
 
     def __init__(self, name_to_timeout_dict, oauth):
         super(gcal_renderer, self).__init__(name_to_timeout_dict, True)
@@ -75,17 +78,19 @@ class gcal_renderer(renderer.debuggable_abstaining_renderer):
 
     def periodic_render(self, key):
         self.debug_print('called for "%s"' % key)
-        if (key == "Render Upcoming Events"):
+        if key == "Render Upcoming Events":
             return self.render_upcoming_events()
-        elif (key == "Look For Triggered Events"):
+        elif key == "Look For Triggered Events":
             return self.look_for_triggered_events()
         else:
-            raise error('Unexpected operation')
+            raise error("Unexpected operation")
 
     def render_upcoming_events(self):
         page_token = None
+
         def format_datetime(x):
-            return datetime.datetime.strftime(x, '%Y-%m-%dT%H:%M:%SZ')
+            return datetime.datetime.strftime(x, "%Y-%m-%dT%H:%M:%SZ")
+
         now = datetime.datetime.now()
         time_min = now - datetime.timedelta(1)
         time_max = now + datetime.timedelta(95)
@@ -96,74 +101,87 @@ class gcal_renderer(renderer.debuggable_abstaining_renderer):
         # Writes 2 files:
         #  + "upcoming events",
         #  + a countdown timer for a subser of events,
-        f = file_writer.file_writer('gcal_3_86400.html')
-        f.write('<h1>Upcoming Calendar Events:</h1><hr>\n')
-        f.write('<center><table width=96%>\n')
+        f = file_writer.file_writer("gcal_3_86400.html")
+        f.write("<h1>Upcoming Calendar Events:</h1><hr>\n")
+        f.write("<center><table width=96%>\n")
 
-        g = file_writer.file_writer('countdown_3_7200.html')
-        g.write('<h1>Countdowns:</h1><hr><ul>\n')
+        g = file_writer.file_writer("countdown_3_7200.html")
+        g.write("<h1>Countdowns:</h1><hr><ul>\n")
 
         try:
             self.sortable_events = []
             self.countdown_events = []
             while True:
-                calendar_list = self.client.calendarList().list(
-                    pageToken=page_token).execute()
-                for calendar in calendar_list['items']:
-                    if (calendar['summary'] in gcal_renderer.calendar_whitelist):
-                        events = self.client.events().list(
-                            calendarId=calendar['id'],
-                            singleEvents=True,
-                            timeMin=time_min,
-                            timeMax=time_max,
-                            maxResults=50).execute()
+                calendar_list = (
+                    self.client.calendarList().list(pageToken=page_token).execute()
+                )
+                for calendar in calendar_list["items"]:
+                    if calendar["summary"] in gcal_renderer.calendar_whitelist:
+                        events = (
+                            self.client.events()
+                            .list(
+                                calendarId=calendar["id"],
+                                singleEvents=True,
+                                timeMin=time_min,
+                                timeMax=time_max,
+                                maxResults=50,
+                            )
+                            .execute()
+                        )
 
                         def parse_date(x):
-                            y = x.get('date')
+                            y = x.get("date")
                             if y:
-                                y = datetime.datetime.strptime(y, '%Y-%m-%d')
+                                y = datetime.datetime.strptime(y, "%Y-%m-%d")
                             else:
-                                y = x.get('dateTime')
+                                y = x.get("dateTime")
                                 if y:
-                                    y = datetime.datetime.strptime(y[:-6],
-                                                         '%Y-%m-%dT%H:%M:%S')
+                                    y = datetime.datetime.strptime(
+                                        y[:-6], "%Y-%m-%dT%H:%M:%S"
+                                    )
                                 else:
                                     y = None
                             return y
 
-                        for event in events['items']:
+                        for event in events["items"]:
                             try:
-                                summary = event['summary']
-                                self.debug_print("event '%s' (%s to %s)" % (
-                                    summary, event['start'], event['end']))
-                                start = parse_date(event['start'])
-                                end = parse_date(event['end'])
+                                summary = event["summary"]
+                                self.debug_print(
+                                    "event '%s' (%s to %s)"
+                                    % (summary, event["start"], event["end"])
+                                )
+                                start = parse_date(event["start"])
+                                end = parse_date(event["end"])
                                 self.sortable_events.append(
-                                    gcal_renderer.comparable_event(start,
-                                                                   end,
-                                                                   summary,
-                                                                   calendar['summary']))
-                                if ('countdown' in summary or
-                                    'Holidays' in calendar['summary'] or
-                                    'Countdown' in summary):
+                                    gcal_renderer.comparable_event(
+                                        start, end, summary, calendar["summary"]
+                                    )
+                                )
+                                if (
+                                    "countdown" in summary
+                                    or "Holidays" in calendar["summary"]
+                                    or "Countdown" in summary
+                                ):
                                     self.debug_print("event is countdown worthy")
                                     self.countdown_events.append(
-                                        gcal_renderer.comparable_event(start,
-                                                                       end,
-                                                                       summary,
-                                                                       calendar['summary']))
+                                        gcal_renderer.comparable_event(
+                                            start, end, summary, calendar["summary"]
+                                        )
+                                    )
                             except Exception as e:
-                                print("gcal unknown exception, skipping event.");
+                                print("gcal unknown exception, skipping event.")
                     else:
-                        self.debug_print("Skipping calendar '%s'" % calendar['summary'])
-                page_token = calendar_list.get('nextPageToken')
-                if not page_token: break
+                        self.debug_print("Skipping calendar '%s'" % calendar["summary"])
+                page_token = calendar_list.get("nextPageToken")
+                if not page_token:
+                    break
 
             self.sortable_events.sort()
             upcoming_sortable_events = self.sortable_events[:12]
             for event in upcoming_sortable_events:
                 self.debug_print("sorted event: %s" % event.friendly_name())
-                f.write("""
+                f.write(
+                    """
 <tr>
   <td style="padding-right: 1em;">
     %s
@@ -171,15 +189,17 @@ class gcal_renderer(renderer.debuggable_abstaining_renderer):
   <td style="padding-left: 1em;">
     %s
   </td>
-</tr>\n""" % (event.timestamp(), event.friendly_name()))
-            f.write('</table></center>\n')
+</tr>\n"""
+                    % (event.timestamp(), event.friendly_name())
+                )
+            f.write("</table></center>\n")
             f.close()
 
             self.countdown_events.sort()
             upcoming_countdown_events = self.countdown_events[:12]
             now = datetime.datetime.now()
             count = 0
-            timestamps = { }
+            timestamps = {}
             for event in upcoming_countdown_events:
                 eventstamp = event.start_time
                 delta = eventstamp - now
@@ -190,17 +210,23 @@ class gcal_renderer(renderer.debuggable_abstaining_renderer):
                     days = divmod(x, constants.seconds_per_day)
                     hours = divmod(days[1], constants.seconds_per_hour)
                     minutes = divmod(hours[1], constants.seconds_per_minute)
-                    g.write('<li><SPAN id="%s">%d days, %02d:%02d</SPAN> until %s</li>\n' % (identifier, days[0], hours[0], minutes[0], name))
+                    g.write(
+                        '<li><SPAN id="%s">%d days, %02d:%02d</SPAN> until %s</li>\n'
+                        % (identifier, days[0], hours[0], minutes[0], name)
+                    )
                     timestamps[identifier] = time.mktime(eventstamp.timetuple())
                     count += 1
-                    self.debug_print("countdown to %s is %dd %dh %dm" % (
-                        name, days[0], hours[0], minutes[0]))
-            g.write('</ul>')
-            g.write('<SCRIPT>\nlet timestampMap = new Map([')
+                    self.debug_print(
+                        "countdown to %s is %dd %dh %dm"
+                        % (name, days[0], hours[0], minutes[0])
+                    )
+            g.write("</ul>")
+            g.write("<SCRIPT>\nlet timestampMap = new Map([")
             for x in list(timestamps.keys()):
                 g.write('    ["%s", %f],\n' % (x, timestamps[x] * 1000.0))
-            g.write(']);\n\n')
-            g.write("""
+            g.write("]);\n\n")
+            g.write(
+                """
 // Pad things with a leading zero if necessary.
 function pad(n) {
     return (n < 10) ? ("0" + n) : n;
@@ -231,7 +257,8 @@ var fn = setInterval(function() {
         }
     }
 }, 1000);
-</script>""");
+</script>"""
+            )
             g.close()
             return True
         except (gdata.service.RequestError, AccessTokenRefreshError):
@@ -244,8 +271,8 @@ var fn = setInterval(function() {
 
     def look_for_triggered_events(self):
         f = file_writer.file_writer(constants.gcal_imminent_pagename)
-        f.write('<h1>Imminent Upcoming Calendar Events:</h1>\n<hr>\n')
-        f.write('<center><table width=99%>\n')
+        f.write("<h1>Imminent Upcoming Calendar Events:</h1>\n<hr>\n")
+        f.write("<center><table width=99%>\n")
         now = datetime.datetime.now()
         count = 0
         for event in self.sortable_events:
@@ -259,7 +286,10 @@ var fn = setInterval(function() {
                 eventstamp = event.start_time
                 name = event.friendly_name()
                 calendar = event.calendar
-                f.write("<LI> %s (%s) upcoming in %d minutes.\n" % (name, calendar, minutes[0]))
+                f.write(
+                    "<LI> %s (%s) upcoming in %d minutes.\n"
+                    % (name, calendar, minutes[0])
+                )
                 count += 1
         f.write("</table>")
         f.close()
index 870020adb553b60d0ab62cda6ce214802311612a..de19d1a1b2a5a28dce7a3e0f7265984b638d2e0e 100644 (file)
@@ -2,6 +2,7 @@ import constants
 import globals
 import trigger
 
+
 class gcal_trigger(trigger.trigger):
     def get_triggered_page_list(self):
         if globals.get("gcal_triggered") == True:
@@ -10,6 +11,7 @@ class gcal_trigger(trigger.trigger):
         else:
             return None
 
-#globals.put('gcal_triggered', True)
-#x = gcal_trigger()
-#x.get_triggered_page_list()
+
+# globals.put('gcal_triggered', True)
+# x = gcal_trigger()
+# x.get_triggered_page_list()
index f88b2f5cd60a89874cf4845f69e9e5a866ea11a6..1f9cd67b1e59e9188f4f9486c4923493e5b84a8b 100644 (file)
@@ -5,10 +5,11 @@
 
 import sys
 import urllib.request, urllib.parse, urllib.error
+
 try:
-    import http.client     # python2
+    import http.client  # python2
 except ImportError:
-    import http.client # python3
+    import http.client  # python3
 import os.path
 import json
 import time
@@ -22,29 +23,30 @@ from googleapiclient.discovery import build
 import datetime
 import ssl
 
+
 class OAuth:
     def __init__(self, client_id, client_secret):
         print("gdata: initializing oauth token...")
         self.client_id = client_id
         self.client_secret = client_secret
         self.user_code = None
-        #print 'Client id: %s' % (client_id)
-        #print 'Client secret: %s' % (client_secret)
+        # print 'Client id: %s' % (client_id)
+        # print 'Client secret: %s' % (client_secret)
         self.token = None
         self.device_code = None
         self.verfication_url = None
-        self.token_file = 'client_secrets.json'
+        self.token_file = "client_secrets.json"
         self.scope = [
             #'https://www.googleapis.com/auth/calendar',
             #'https://www.googleapis.com/auth/drive',
             #'https://docs.google.com/feeds',
             #'https://www.googleapis.com/auth/calendar.readonly',
             #'https://picasaweb.google.com/data/',
-            'https://www.googleapis.com/auth/photoslibrary.readonly',
+            "https://www.googleapis.com/auth/photoslibrary.readonly",
             #'http://picasaweb.google.com/data/',
             #'https://www.google.com/calendar/feeds/',
         ]
-        self.host = 'accounts.google.com'
+        self.host = "accounts.google.com"
         self.reset_connection()
         self.load_token()
         self.last_action = 0
@@ -54,7 +56,7 @@ class OAuth:
     # exception, after which we always get httplib.CannotSendRequest errors.
     # When this happens, we try re-creating the exception.
     def reset_connection(self):
-        self.ssl_ctx = ssl.create_default_context(cafile='/usr/local/etc/ssl/cert.pem')
+        self.ssl_ctx = ssl.create_default_context(cafile="/usr/local/etc/ssl/cert.pem")
         http.client.HTTPConnection.debuglevel = 2
         self.conn = http.client.HTTPSConnection(self.host, context=self.ssl_ctx)
 
@@ -67,7 +69,7 @@ class OAuth:
             f.close()
 
     def save_token(self):
-        f = open(self.token_file, 'w')
+        f = open(self.token_file, "w")
         f.write(json.dumps(self.token))
         f.close()
 
@@ -82,18 +84,18 @@ class OAuth:
         self.conn.request(
             "POST",
             "/o/oauth2/device/code",
-            urllib.parse.urlencode({
-                'client_id': self.client_id,
-                'scope'    : ' '.join(self.scope)
-            }),
-            {"Content-type": "application/x-www-form-urlencoded"})
+            urllib.parse.urlencode(
+                {"client_id": self.client_id, "scope": " ".join(self.scope)}
+            ),
+            {"Content-type": "application/x-www-form-urlencoded"},
+        )
         response = self.conn.getresponse()
         if response.status == 200:
             data = json.loads(response.read())
-            self.device_code = data['device_code']
-            self.user_code = data['user_code']
-            self.verification_url = data['verification_url']
-            self.retry_interval = data['interval']
+            self.device_code = data["device_code"]
+            self.user_code = data["user_code"]
+            self.verification_url = data["verification_url"]
+            self.retry_interval = data["interval"]
         else:
             print(("gdata: %d" % response.status))
             print((response.read()))
@@ -110,17 +112,20 @@ class OAuth:
             self.conn.request(
                 "POST",
                 "/o/oauth2/token",
-                urllib.parse.urlencode({
-                    'client_id'     : self.client_id,
-                    'client_secret' : self.client_secret,
-                    'code'          : self.device_code,
-                    'grant_type'    : 'http://oauth.net/grant_type/device/1.0'
-                    }),
-                {"Content-type": "application/x-www-form-urlencoded"})
+                urllib.parse.urlencode(
+                    {
+                        "client_id": self.client_id,
+                        "client_secret": self.client_secret,
+                        "code": self.device_code,
+                        "grant_type": "http://oauth.net/grant_type/device/1.0",
+                    }
+                ),
+                {"Content-type": "application/x-www-form-urlencoded"},
+            )
             response = self.conn.getresponse()
             if response.status == 200:
                 data = json.loads(response.read())
-                if 'access_token' in data:
+                if "access_token" in data:
                     self.token = data
                     self.save_token()
                 else:
@@ -135,29 +140,32 @@ class OAuth:
             print("gdata: not refreshing yet, too soon...")
             return False
         else:
-            print('gdata: trying to refresh oauth token...')
+            print("gdata: trying to refresh oauth token...")
         self.reset_connection()
-        refresh_token = self.token['refresh_token']
+        refresh_token = self.token["refresh_token"]
         self.conn.request(
             "POST",
             "/o/oauth2/token",
-            urllib.parse.urlencode({
-                'client_id'     : self.client_id,
-                'client_secret' : self.client_secret,
-                'refresh_token' : refresh_token,
-                'grant_type'    : 'refresh_token'
-                }),
-            {"Content-type": "application/x-www-form-urlencoded"})
+            urllib.parse.urlencode(
+                {
+                    "client_id": self.client_id,
+                    "client_secret": self.client_secret,
+                    "refresh_token": refresh_token,
+                    "grant_type": "refresh_token",
+                }
+            ),
+            {"Content-type": "application/x-www-form-urlencoded"},
+        )
 
         response = self.conn.getresponse()
         self.last_action = time.time()
         if response.status == 200:
             data = json.loads(response.read())
-            if 'access_token' in data:
+            if "access_token" in data:
                 self.token = data
                 # in fact we NEVER get a new refresh token at this point
-                if not 'refresh_token' in self.token:
-                    self.token['refresh_token'] = refresh_token
+                if not "refresh_token" in self.token:
+                    self.token["refresh_token"] = refresh_token
                     self.save_token()
                 return True
         print(("gdata: unexpected response %d to renewal request" % response.status))
@@ -171,35 +179,40 @@ class OAuth:
     # https://developers.google.com/picasa-web/
     def photos_service(self):
         headers = {
-            "Authorization": "%s %s"  % (self.token['token_type'], self.token['access_token'])
+            "Authorization": "%s %s"
+            % (self.token["token_type"], self.token["access_token"])
         }
         client = gdata.photos.service.PhotosService(additional_headers=headers)
         return client
 
     # https://developers.google.com/drive/
     def docs_service(self):
-        cred = OAuth2Credentials(self.token['access_token'],
-                                 self.client_id,
-                                 self.client_secret,
-                                 self.token['refresh_token'],
-                                 datetime.datetime.now(),
-                                 'http://accounts.google.com/o/oauth2/token',
-                                 'KitchenKiosk/0.9')
+        cred = OAuth2Credentials(
+            self.token["access_token"],
+            self.client_id,
+            self.client_secret,
+            self.token["refresh_token"],
+            datetime.datetime.now(),
+            "http://accounts.google.com/o/oauth2/token",
+            "KitchenKiosk/0.9",
+        )
         http = httplib2.Http(disable_ssl_certificate_validation=True)
         http = cred.authorize(http)
-        service = build('drive', 'v2', http)
+        service = build("drive", "v2", http)
         return service
 
     # https://developers.google.com/google-apps/calendar/
     def calendar_service(self):
-        cred = OAuth2Credentials(self.token['access_token'],
-                                 self.client_id,
-                                 self.client_secret,
-                                 self.token['refresh_token'],
-                                 datetime.datetime.now(),
-                                 'http://accounts.google.com/o/oauth2/token',
-                                 'KitchenKiosk/0.9')
+        cred = OAuth2Credentials(
+            self.token["access_token"],
+            self.client_id,
+            self.client_secret,
+            self.token["refresh_token"],
+            datetime.datetime.now(),
+            "http://accounts.google.com/o/oauth2/token",
+            "KitchenKiosk/0.9",
+        )
         http = httplib2.Http(disable_ssl_certificate_validation=True)
         http = cred.authorize(http)
-        service = build('calendar', 'v3', http)
+        service = build("calendar", "v3", http)
         return service
index d734a2deb90c4051db50d9d1fb97af2492be9f9b..44203e539094f12468189aba8bde52e520edcdf0 100644 (file)
@@ -5,6 +5,7 @@ import sets
 import gdata_oauth
 import secrets
 
+
 class gdocs_renderer(renderer.debuggable_abstaining_renderer):
     """A renderer to fetches and munge docs from drive.google.com"""
 
@@ -25,13 +26,13 @@ class gdocs_renderer(renderer.debuggable_abstaining_renderer):
             try:
                 param = {}
                 if page_token:
-                    param['pageToken'] = page_token
-                param['q'] = self.query
-                print("QUERY: %s" % param['q'])
+                    param["pageToken"] = page_token
+                param["q"] = self.query
+                print("QUERY: %s" % param["q"])
 
                 files = self.client.files().list(**param).execute()
-                result.extend(files['items'])
-                page_token = files.get('nextPageToken')
+                result.extend(files["items"])
+                page_token = files.get("nextPageToken")
                 if not page_token:
                     break
             except:
@@ -47,11 +48,11 @@ class gdocs_renderer(renderer.debuggable_abstaining_renderer):
             return "font-size:%dpt" % (x)
 
         for f in result:
-            print(f['title'])
-            print(f['id'])
-            self.debug_print("%s (%s)\n" % (f['title'], f['id']))
-            title = f['title']
-            url = f['exportLinks']['text/html']
+            print(f["title"])
+            print(f["id"])
+            self.debug_print("%s (%s)\n" % (f["title"], f["id"]))
+            title = f["title"]
+            url = f["exportLinks"]["text/html"]
             print(f)
             print("Fetching %s..." % url)
             resp, contents = self.client._http.request(url)
@@ -59,18 +60,21 @@ class gdocs_renderer(renderer.debuggable_abstaining_renderer):
             print(contents)
             if resp.status == 200:
                 print("Got contents.")
-                contents = re.sub('<body class="..">', '', contents)
-                contents = contents.replace('</body>', '')
-                contents = re.sub('font-size:([0-9]+)pt', boost_font_size, contents)
-                f = file_writer.file_writer('%s_2_3600.html' % title)
+                contents = re.sub('<body class="..">', "", contents)
+                contents = contents.replace("</body>", "")
+                contents = re.sub("font-size:([0-9]+)pt", boost_font_size, contents)
+                f = file_writer.file_writer("%s_2_3600.html" % title)
                 now = datetime.datetime.now()
-                f.write("""
+                f.write(
+                    """
 <H1>%s</H1>
 <!-- Last updated at %s -->
 <HR>
 <DIV STYLE="-webkit-column-count: 2; -moz-column-count: 2; column-count: 2;">
 %s
-</DIV>""" % (title, now, contents))
+</DIV>"""
+                    % (title, now, contents)
+                )
                 f.close()
             else:
                 self.debug_print("error: %s" % resp)
@@ -78,8 +82,8 @@ class gdocs_renderer(renderer.debuggable_abstaining_renderer):
         return True
 
 
-#oauth = gdata_oauth.OAuth(secrets.google_client_id,
+# oauth = gdata_oauth.OAuth(secrets.google_client_id,
 #                          secrets.google_client_secret)
-#x = gdocs_renderer({"Testing", 12345},
+# x = gdocs_renderer({"Testing", 12345},
 #                   oauth)
-#x.periodic_render("Test")
+# x.periodic_render("Test")
index 698f7aa5bd6e4ca14f1cb71b19d80ca7907dc9f7..3bc5f1be147026b7cac5f95eddfc569951f6e506 100644 (file)
@@ -10,10 +10,10 @@ import random
 import re
 import xml.etree.ElementTree as ET
 
+
 class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
     def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
-        super(generic_news_rss_renderer, self).__init__(name_to_timeout_dict,
-                                                        False)
+        super(generic_news_rss_renderer, self).__init__(name_to_timeout_dict, False)
         self.debug = 1
         self.feed_site = feed_site
         self.feed_uris = feed_uris
@@ -44,32 +44,32 @@ class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
         return False
 
     def find_title(self, item):
-        return item.findtext('title')
+        return item.findtext("title")
 
     def munge_title(self, title):
         return title
 
     def find_description(self, item):
-        return item.findtext('description')
+        return item.findtext("description")
 
     def munge_description(self, description):
-        description = re.sub('<[^>]+>', '', description)
+        description = re.sub("<[^>]+>", "", description)
         return description
 
     def find_link(self, item):
-        return item.findtext('link')
+        return item.findtext("link")
 
     def munge_link(self, link):
         return link
 
     def find_image(self, item):
-        return item.findtext('image')
+        return item.findtext("image")
 
     def munge_image(self, image):
         return image
 
     def find_pubdate(self, item):
-        return item.findtext('pubDate')
+        return item.findtext("pubDate")
 
     def munge_pubdate(self, pubdate):
         return pubdate
@@ -84,7 +84,7 @@ class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
             tzinfo = pubdate.tzinfo
             now = datetime.datetime.now(tzinfo)
             delta = (now - pubdate).total_seconds() / (60 * 60 * 24)
-            if (delta > n):
+            if delta > n:
                 return True
         return False
 
@@ -97,7 +97,7 @@ class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
         elif key == "Shuffle News":
             return self.shuffle_news()
         else:
-            raise error('Unexpected operation')
+            raise error("Unexpected operation")
 
     def shuffle_news(self):
         headlines = page_builder.page_builder()
@@ -109,7 +109,8 @@ class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
             return False
         for msg in subset:
             headlines.add_item(msg)
-        headlines.set_custom_html("""
+        headlines.set_custom_html(
+            """
 <STYLE>
 a:link {
   color: black;
@@ -126,16 +127,19 @@ a:active {
   text-decoration: none;
   font-weight: bold;
 }
-</STYLE>""")
-        f = file_writer.file_writer('%s_%s_25900.html' % (
-            self.get_headlines_page_prefix(),
-            self.get_headlines_page_priority()))
+</STYLE>"""
+        )
+        f = file_writer.file_writer(
+            "%s_%s_25900.html"
+            % (self.get_headlines_page_prefix(), self.get_headlines_page_priority())
+        )
         headlines.render_html(f)
         f.close()
 
         details = page_builder.page_builder()
         details.set_layout(page_builder.page_builder.LAYOUT_ONE_ITEM)
-        details.set_custom_html("""
+        details.set_custom_html(
+            """
 <STYLE>
 a:link {
   color: black;
@@ -152,19 +156,21 @@ a:active {
   text-decoration: none;
   font-weight: bold;
 }
-</STYLE>""")
+</STYLE>"""
+        )
         details.set_title("%s" % self.page_title)
         subset = self.details.subset(1)
         if subset is None:
-            self.debug_print("Not enough details to choose from.");
+            self.debug_print("Not enough details to choose from.")
             return False
         for msg in subset:
             blurb = msg
-            blurb += u'</TD>'
+            blurb += u"</TD>"
             details.add_item(blurb)
-        g = file_writer.file_writer('%s_%s_86400.html' % (
-            self.get_details_page_prefix(),
-            self.get_details_page_priority()))
+        g = file_writer.file_writer(
+            "%s_%s_86400.html"
+            % (self.get_details_page_prefix(), self.get_details_page_priority())
+        )
         details.render_html(g)
         g.close()
         return True
@@ -185,9 +191,12 @@ a:active {
                 "GET",
                 uri,
                 None,
-                { "Accept": "*/*",
-                  "Cache-control": "max-age=59",
-                  "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"})
+                {
+                    "Accept": "*/*",
+                    "Cache-control": "max-age=59",
+                    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
+                },
+            )
             try:
                 response = self.conn.getresponse()
             except:
@@ -195,8 +204,12 @@ a:active {
                 return False
 
             if response.status != 200:
-                print(("%s: RSS fetch_news error, response: %d" % (self.page_title,
-                                                                  response.status)))
+                print(
+                    (
+                        "%s: RSS fetch_news error, response: %d"
+                        % (self.page_title, response.status)
+                    )
+                )
                 self.debug_print(response.read())
                 return False
 
@@ -206,26 +219,26 @@ a:active {
                 title = self.find_title(item)
                 if title is not None:
                     title = self.munge_title(title)
-                description = item.findtext('description')
+                description = item.findtext("description")
                 if description is not None:
                     description = self.munge_description(description)
                 image = self.find_image(item)
                 if image is not None:
                     image = self.munge_image(image)
-                link = item.findtext('link')
+                link = item.findtext("link")
                 if link is not None:
                     link = self.munge_link(link)
 
-                if (title is None or
-                    not self.item_is_interesting_for_headlines(title,
-                                                               description,
-                                                               item)):
+                if title is None or not self.item_is_interesting_for_headlines(
+                    title, description, item
+                ):
                     self.debug_print('Item "%s" is not interesting' % title)
                     continue
 
-                if (self.should_profanity_filter() and
-                    (self.filter.contains_bad_words(title) or
-                    self.filter.contains_bad_words(description))):
+                if self.should_profanity_filter() and (
+                    self.filter.contains_bad_words(title)
+                    or self.filter.contains_bad_words(description)
+                ):
                     self.debug_print('Found bad words in item "%s"' % title)
                     continue
 
@@ -237,7 +250,7 @@ a:active {
                     blurb += u'style="padding:8px;">'
 
                 if link is None:
-                    blurb += u'<P><B>%s</B>' % title
+                    blurb += u"<P><B>%s</B>" % title
                 else:
                     blurb += u'<P><B><A HREF="%s">%s</A></B>' % (link, title)
 
@@ -246,19 +259,18 @@ a:active {
                     pubdate = self.munge_pubdate(pubdate)
                     ts = parse(pubdate)
                     blurb += u"  <FONT COLOR=#cccccc>%s</FONT>" % (
-                        ts.strftime("%b&nbsp;%d"))
+                        ts.strftime("%b&nbsp;%d")
+                    )
 
-                if (description is not None and
-                    self.item_is_interesting_for_article(title,
-                                                         description,
-                                                         item)):
+                if description is not None and self.item_is_interesting_for_article(
+                    title, description, item
+                ):
                     longblurb = blurb
 
                     longblurb += u"<BR>"
                     longblurb += description
                     longblurb += u"</DIV>"
-                    longblurb = longblurb.replace("font-size:34pt",
-                                                  "font-size:44pt")
+                    longblurb = longblurb.replace("font-size:34pt", "font-size:44pt")
                     self.details.add(longblurb)
 
                 blurb += u"</DIV>"
index de1116d1ef04dd2e47a6eff1b96450c21ce8897d..cba8596777aeb5689ed15e4b5f9a2e21aabf7d53 100644 (file)
@@ -8,40 +8,42 @@ import re
 import renderer
 import secrets
 
+
 class gkeep_renderer(renderer.debuggable_abstaining_renderer):
     def __init__(self, name_to_timeout_dict):
         super(gkeep_renderer, self).__init__(name_to_timeout_dict, True)
         self.keep = gkeepapi.Keep()
-        success = self.keep.login(secrets.google_keep_username,
-                                  secrets.google_keep_password)
+        success = self.keep.login(
+            secrets.google_keep_username, secrets.google_keep_password
+        )
         if success:
             self.debug_print("Connected with gkeep.")
         else:
             self.debug_print("Error connecting with gkeep.")
         self.colors_by_name = {
-            'white' : '#002222',
-            'green' : '#345920',
-            'darkblue' : '#1F3A5F',
-            'blue' : '#2D545E',
-            'orange' : '#604A19',
-            'red' : '#5C2B29',
-            'purple' : '#42275E',
-            'pink' : '#5B2245',
-            'yellow' : '#635D19',
-            'brown' : '#442F19',
-            'gray' : '#3c3f4c',
-            'teal' : '#16504B'
+            "white": "#002222",
+            "green": "#345920",
+            "darkblue": "#1F3A5F",
+            "blue": "#2D545E",
+            "orange": "#604A19",
+            "red": "#5C2B29",
+            "purple": "#42275E",
+            "pink": "#5B2245",
+            "yellow": "#635D19",
+            "brown": "#442F19",
+            "gray": "#3c3f4c",
+            "teal": "#16504B",
         }
 
     def debug_prefix(self):
         return "gkeep"
 
     def periodic_render(self, key):
-        strikethrough = re.compile('(\u2611[^\n]*)\n', re.UNICODE)
-        linkify = re.compile(r'.*(https?:\/\/\S+).*')
+        strikethrough = re.compile("(\u2611[^\n]*)\n", re.UNICODE)
+        linkify = re.compile(r".*(https?:\/\/\S+).*")
 
         self.keep.sync()
-        result_list = self.keep.find(labels=[self.keep.findLabel('kiosk')])
+        result_list = self.keep.find(labels=[self.keep.findLabel("kiosk")])
         for note in result_list:
             title = note.title
             title = title.replace(" ", "-")
@@ -50,11 +52,12 @@ class gkeep_renderer(renderer.debuggable_abstaining_renderer):
             filename = "%s_2_3600.html" % title
             contents = note.text + "\n"
             self.debug_print("Note title '%s'" % title)
-            if contents != '' and not contents.isspace():
-                contents = strikethrough.sub('', contents)
+            if contents != "" and not contents.isspace():
+                contents = strikethrough.sub("", contents)
                 self.debug_print("Note contents:\n%s" % contents)
-                contents = contents.replace(u'\u2610 ',
-                                            u'<LI><INPUT TYPE="checkbox">&nbsp;')
+                contents = contents.replace(
+                    u"\u2610 ", u'<LI><INPUT TYPE="checkbox">&nbsp;'
+                )
                 contents = linkify.sub(r'<a href="\1">\1</a>', contents)
 
                 individual_lines = contents.split("\n")
@@ -65,10 +68,10 @@ class gkeep_renderer(renderer.debuggable_abstaining_renderer):
                     length = len(x)
                     if length > max_length:
                         max_length = length
-                    leading_spaces = len(x) - len(x.lstrip(' '))
+                    leading_spaces = len(x) - len(x.lstrip(" "))
                     leading_spaces /= 2
                     leading_spaces = int(leading_spaces)
-                    x = x.lstrip(' ')
+                    x = x.lstrip(" ")
                     # self.debug_print(" * (%d) '%s'" % (leading_spaces, x))
                     for y in range(0, leading_spaces):
                         x = "<UL>" + x
@@ -83,19 +86,23 @@ class gkeep_renderer(renderer.debuggable_abstaining_renderer):
                 else:
                     self.debug_print("Unknown color '%s'" % color)
                 f = file_writer.file_writer(filename)
-                f.write("""
+                f.write(
+                    """
 <STYLE type="text/css">
   a:link { color:#88bfbf; }
   ul { list-style-type:none; }
 </STYLE>
 <DIV STYLE="border-radius: 25px; border-style: solid; padding: 20px; background-color: %s; color: #eeeeee; font-size: x-large;">
 <p style="color: #ffffff; font-size:larger"><B>%s</B></p>
-<HR style="border-top: 3px solid white;">""" % (color, note.title))
+<HR style="border-top: 3px solid white;">"""
+                    % (color, note.title)
+                )
                 if num_lines >= 12 and max_length < 120:
-                    self.debug_print("%d lines (max=%d chars): two columns" %
-                                     (num_lines, max_length))
-                    f.write("<TABLE BORDER=0 WIDTH=100%%><TR valign=\"top\">")
-                    f.write("<TD WIDTH=50%% style=\"color:#eeeeee; font-size:large\">\n")
+                    self.debug_print(
+                        "%d lines (max=%d chars): two columns" % (num_lines, max_length)
+                    )
+                    f.write('<TABLE BORDER=0 WIDTH=100%%><TR valign="top">')
+                    f.write('<TD WIDTH=50%% style="color:#eeeeee; font-size:large">\n')
                     f.write("<FONT><UL STYLE='list-style-type:none'>")
                     count = 0
                     for x in individual_lines:
@@ -103,12 +110,15 @@ class gkeep_renderer(renderer.debuggable_abstaining_renderer):
                         count += 1
                         if count == num_lines / 2:
                             f.write("</UL></FONT></TD>\n")
-                            f.write("<TD WIDTH=50%% style=\"color:#eeeeee; font-size:large\">\n")
+                            f.write(
+                                '<TD WIDTH=50%% style="color:#eeeeee; font-size:large">\n'
+                            )
                             f.write("<FONT><UL STYLE='list-style-type:none'>")
-                    f.write("</UL></FONT></TD></TR></TABLE></DIV>\n");
+                    f.write("</UL></FONT></TD></TR></TABLE></DIV>\n")
                 else:
-                    self.debug_print("%d lines (max=%d chars): one column" %
-                                     (num_lines, max_length))
+                    self.debug_print(
+                        "%d lines (max=%d chars): one column" % (num_lines, max_length)
+                    )
                     f.write("<FONT><UL>%s</UL></FONT>" % contents)
                 f.write("</DIV>")
                 f.close()
@@ -121,6 +131,7 @@ class gkeep_renderer(renderer.debuggable_abstaining_renderer):
                     pass
         return True
 
+
 # Test
-#x = gkeep_renderer({"Test", 1234})
-#x.periodic_render("Test")
+# x = gkeep_renderer({"Test", 1234})
+# x.periodic_render("Test")
index 8420ebd9913d5fefb5c1e1161f52657d7ca60cf7..f992574ef77511a471512020c006385e7adf7206 100644 (file)
@@ -1,11 +1,12 @@
 data = {}
 
+
 def put(key, value):
     data[key] = value
 
+
 def get(key):
     if key in data:
         return data[key]
     else:
         return None
-
index b4290f3f6c0c9628ebf1b61150fe044e31abdbef..ad92c26523cf690c0062ee00e1a0ab9332635b97 100644 (file)
@@ -2,13 +2,12 @@ from bs4 import BeautifulSoup
 import generic_news_rss_renderer
 import re
 
+
 class google_news_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
     def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
         super(google_news_rss_renderer, self).__init__(
-            name_to_timeout_dict,
-            feed_site,
-            feed_uris,
-            page_title)
+            name_to_timeout_dict, feed_site, feed_uris, page_title
+        )
         self.debug = 1
 
     def debug_prefix(self):
@@ -21,8 +20,8 @@ class google_news_rss_renderer(generic_news_rss_renderer.generic_news_rss_render
         return "google-news-details"
 
     def find_description(self, item):
-        descr = item.findtext('description')
-        source = item.findtext('source')
+        descr = item.findtext("description")
+        source = item.findtext("source")
         if source is not None:
             descr = descr + " (%s)" % source
         return descr
@@ -37,8 +36,8 @@ class google_news_rss_renderer(generic_news_rss_renderer.generic_news_rss_render
 
     def munge_description(self, description):
         soup = BeautifulSoup(description)
-        for a in soup.findAll('a'):
-            del a['href']
+        for a in soup.findAll("a"):
+            del a["href"]
         descr = str(soup)
         return munge_description_internal(descr)
 
@@ -54,18 +53,18 @@ class google_news_rss_renderer(generic_news_rss_renderer.generic_news_rss_render
     def item_is_interesting_for_article(self, title, description, item):
         return not self.is_item_older_than_n_days(item, 2)
 
+
 # Test
-#x = google_news_rss_renderer(
+# x = google_news_rss_renderer(
 #    {"Fetch News" : 1,
 #     "Shuffle News" : 1},
 #    "news.google.com",
 #    [ "/rss?hl=en-US&gl=US&ceid=US:en" ],
 #    "Test" )
-#if x.fetch_news() == 0:
+# if x.fetch_news() == 0:
 #    print("Error fetching news, no items fetched.")
-#x.shuffle_news()
+# x.shuffle_news()
 #
-#descr = "this is a lot of really long text about nothign in particular.  It's pretty interesting, don't you think?  I hope that the munge description method works by both truncating it and remembering to close any open <LI>items as well as making sure not to truncate in the middle of a <A HREF=\"whatever\" these are a bunch of useless arguments to the A tag that make it really long so that the truncate will happen in the middle of it.  I'm getting kind of tired of typing shit so I'm going to revert to copy pasta now.  Sorry if you were getting into this story.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.</A></LI> Out!"
-#d = x.munge_description_internal(descr)
-#print(d)
-
+# descr = "this is a lot of really long text about nothign in particular.  It's pretty interesting, don't you think?  I hope that the munge description method works by both truncating it and remembering to close any open <LI>items as well as making sure not to truncate in the middle of a <A HREF=\"whatever\" these are a bunch of useless arguments to the A tag that make it really long so that the truncate will happen in the middle of it.  I'm getting kind of tired of typing shit so I'm going to revert to copy pasta now.  Sorry if you were getting into this story.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.</A></LI> Out!"
+# d = x.munge_description_internal(descr)
+# print(d)
index 49582fb16e6fafe2688d952abccdfe6d098fff7b..a427256ca0f1b1530a0e757391dc027b1391099a 100644 (file)
@@ -1,5 +1,6 @@
 import random
 
+
 class grab_bag(object):
     def __init__(self):
         self.contents = set()
@@ -24,6 +25,7 @@ class grab_bag(object):
     def size(self):
         return len(self.contents)
 
-#x = grab_bag()
-#x.add_all([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
-#print x.subset(3)
+
+# x = grab_bag()
+# x.add_all([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
+# print x.subset(3)
index 63f923fe74e04b6c7274c76edd8061b1bdfbefd9..74819a52327dccf0b73302188d043e95086466c9 100644 (file)
@@ -4,6 +4,7 @@ import os
 import renderer
 import time
 
+
 class periodic_health_renderer(renderer.debuggable_abstaining_renderer):
     def __init__(self, name_to_timeout_dict):
         super(periodic_health_renderer, self).__init__(name_to_timeout_dict, False)
@@ -12,50 +13,44 @@ class periodic_health_renderer(renderer.debuggable_abstaining_renderer):
         return "health"
 
     def periodic_render(self, key):
-        f = file_writer.file_writer('periodic-health_6_300.html')
-        timestamps = '/timestamps/'
+        f = file_writer.file_writer("periodic-health_6_300.html")
+        timestamps = "/timestamps/"
         days = constants.seconds_per_day
         hours = constants.seconds_per_hour
         mins = constants.seconds_per_minute
         minutes = mins
         limits = {
-            timestamps + 'last_http_probe_wannabe_house'  : mins * 10,
-            timestamps + 'last_http_probe_meerkat_cabin'  : mins * 10,
-            timestamps + 'last_http_probe_dns_house'      : mins * 10,
-            timestamps + 'last_http_probe_rpi_cabin'      : mins * 10,
-            timestamps + 'last_http_probe_rpi_house'      : mins * 10,
-            timestamps + 'last_http_probe_therm_house'    : mins * 10,
-
-            timestamps + 'last_rsnapshot_hourly'          : hours * 24,
-            timestamps + 'last_rsnapshot_daily'           : days * 3,
-            timestamps + 'last_rsnapshot_weekly'          : days * 14,
-            timestamps + 'last_rsnapshot_monthly'         : days * 70,
-
-            timestamps + 'last_zfssnapshot_hourly'        : hours * 5,
-            timestamps + 'last_zfssnapshot_daily'         : hours * 36,
-            timestamps + 'last_zfssnapshot_weekly'        : days * 9,
-            timestamps + 'last_zfssnapshot_monthly'       : days * 70,
-            timestamps + 'last_zfssnapshot_cleanup'       : hours * 24,
-
-            timestamps + 'last_zfs_scrub'                 : days * 9,
-            timestamps + 'last_backup_zfs_scrub'          : days * 9,
-            timestamps + 'last_cabin_zfs_scrub'           : days * 9,
-
-            timestamps + 'last_zfsxfer_backup.house'      : hours * 36,
-            timestamps + 'last_zfsxfer_ski.dyn.guru.org'  : days * 7,
-            timestamps + 'last_photos_sync'               : hours * 8,
-
-            timestamps + 'last_disk_selftest_short'       : days * 14,
-            timestamps + 'last_disk_selftest_long'        : days * 31,
-            timestamps + 'last_backup_disk_selftest_short': days * 14,
-            timestamps + 'last_backup_disk_selftest_long' : days * 31,
-            timestamps + 'last_cabin_disk_selftest_short' : days * 14,
-            timestamps + 'last_cabin_disk_selftest_long'  : days * 31,
-
-            timestamps + 'last_cabin_rpi_ping'            : mins * 10,
-            timestamps + 'last_healthy_wifi'              : mins * 10,
-            timestamps + 'last_healthy_network'           : mins * 10,
-            timestamps + 'last_scott_sync'                : days * 2,
+            timestamps + "last_http_probe_wannabe_house": mins * 10,
+            timestamps + "last_http_probe_meerkat_cabin": mins * 10,
+            timestamps + "last_http_probe_dns_house": mins * 10,
+            timestamps + "last_http_probe_rpi_cabin": mins * 10,
+            timestamps + "last_http_probe_rpi_house": mins * 10,
+            timestamps + "last_http_probe_therm_house": mins * 10,
+            timestamps + "last_rsnapshot_hourly": hours * 24,
+            timestamps + "last_rsnapshot_daily": days * 3,
+            timestamps + "last_rsnapshot_weekly": days * 14,
+            timestamps + "last_rsnapshot_monthly": days * 70,
+            timestamps + "last_zfssnapshot_hourly": hours * 5,
+            timestamps + "last_zfssnapshot_daily": hours * 36,
+            timestamps + "last_zfssnapshot_weekly": days * 9,
+            timestamps + "last_zfssnapshot_monthly": days * 70,
+            timestamps + "last_zfssnapshot_cleanup": hours * 24,
+            timestamps + "last_zfs_scrub": days * 9,
+            timestamps + "last_backup_zfs_scrub": days * 9,
+            timestamps + "last_cabin_zfs_scrub": days * 9,
+            timestamps + "last_zfsxfer_backup.house": hours * 36,
+            timestamps + "last_zfsxfer_ski.dyn.guru.org": days * 7,
+            timestamps + "last_photos_sync": hours * 8,
+            timestamps + "last_disk_selftest_short": days * 14,
+            timestamps + "last_disk_selftest_long": days * 31,
+            timestamps + "last_backup_disk_selftest_short": days * 14,
+            timestamps + "last_backup_disk_selftest_long": days * 31,
+            timestamps + "last_cabin_disk_selftest_short": days * 14,
+            timestamps + "last_cabin_disk_selftest_long": days * 31,
+            timestamps + "last_cabin_rpi_ping": mins * 10,
+            timestamps + "last_healthy_wifi": mins * 10,
+            timestamps + "last_healthy_network": mins * 10,
+            timestamps + "last_scott_sync": days * 2,
         }
         self.write_header(f)
 
@@ -66,9 +61,13 @@ class periodic_health_renderer(renderer.debuggable_abstaining_renderer):
             age = now - ts
             self.debug_print("%s -- age is %ds, limit is %ds" % (x, age, limits[x]))
             if age < limits[x]:
-                f.write('<TD BGCOLOR="#007010" HEIGHT=100 WIDTH=33% STYLE="text-size:60%; vertical-align: middle;">\n')
+                f.write(
+                    '<TD BGCOLOR="#007010" HEIGHT=100 WIDTH=33% STYLE="text-size:60%; vertical-align: middle;">\n'
+                )
             else:
-                f.write('<TD BGCOLOR="#990000" HEIGHT=100 WIDTH=33% CLASS="invalid" STYLE="text-size:60%; vertical-align:middle;">\n')
+                f.write(
+                    '<TD BGCOLOR="#990000" HEIGHT=100 WIDTH=33% CLASS="invalid" STYLE="text-size:60%; vertical-align:middle;">\n'
+                )
             f.write("  <CENTER><FONT SIZE=-2>\n")
 
             name = x.replace(timestamps, "")
@@ -78,10 +77,13 @@ class periodic_health_renderer(renderer.debuggable_abstaining_renderer):
             hours = divmod(days[1], constants.seconds_per_hour)
             minutes = divmod(hours[1], constants.seconds_per_minute)
 
-            self.debug_print("%s is %d days %02d:%02d old." % (
-                name, days[0], hours[0], minutes[0]))
-            f.write("%s<BR>\n<B>%d</b> days <B>%02d</B>:<B>%02d</B> old.\n" % (
-                name, days[0], hours[0], minutes[0]))
+            self.debug_print(
+                "%s is %d days %02d:%02d old." % (name, days[0], hours[0], minutes[0])
+            )
+            f.write(
+                "%s<BR>\n<B>%d</b> days <B>%02d</B>:<B>%02d</B> old.\n"
+                % (name, days[0], hours[0], minutes[0])
+            )
             f.write("</FONT></CENTER>\n</TD>\n\n")
             n += 1
             if n % 3 == 0:
@@ -91,7 +93,8 @@ class periodic_health_renderer(renderer.debuggable_abstaining_renderer):
         return True
 
     def write_header(self, f):
-        f.write("""
+        f.write(
+            """
 <HTML>
 <HEAD>
 <STYLE>
@@ -138,14 +141,18 @@ class periodic_health_renderer(renderer.debuggable_abstaining_renderer):
 <CENTER>
 <TABLE BORDER=0 WIDTH=99% style="font-size:16pt">
 <TR>
-""")
+"""
+        )
 
     def write_footer(self, f):
-        f.write("""
+        f.write(
+            """
 </TR>
 </TABLE>
 </BODY>
-</HTML>""")
+</HTML>"""
+        )
+
 
 test = periodic_health_renderer({"Test", 123})
 test.periodic_render("Test")
index d9f607e3447b08e7616cfe66ef94e6793ce78d55..c5b09138a139c45364c54b2ea80c0b60dd4b786b 100755 (executable)
--- a/kiosk.py
+++ b/kiosk.py
@@ -15,22 +15,25 @@ import logging
 import trigger_catalog
 import utils
 
+
 def filter_news_during_dinnertime(page):
     now = datetime.now()
     is_dinnertime = now.hour >= 17 and now.hour <= 20
-    return (not is_dinnertime or
-            not ("cnn" in page or
-                 "news" in page or
-                 "mynorthwest" in page or
-                 "seattle" in page or
-                 "stranger" in page or
-                 "twitter" in page or
-                 "wsj" in page))
+    return not is_dinnertime or not (
+        "cnn" in page
+        or "news" in page
+        or "mynorthwest" in page
+        or "seattle" in page
+        or "stranger" in page
+        or "twitter" in page
+        or "wsj" in page
+    )
+
 
 def thread_change_current():
     page_chooser = chooser.weighted_random_chooser_with_triggers(
-        trigger_catalog.get_triggers(),
-        [ filter_news_during_dinnertime ])
+        trigger_catalog.get_triggers(), [filter_news_during_dinnertime]
+    )
     swap_page_target = 0
     last_page = ""
     while True:
@@ -38,27 +41,26 @@ def thread_change_current():
         (page, triggered) = page_chooser.choose_next_page()
 
         if triggered:
-            print('chooser[%s] - WE ARE TRIGGERED.' % utils.timestamp())
+            print("chooser[%s] - WE ARE TRIGGERED." % utils.timestamp())
             if page != last_page:
-                print('chooser[%s] - EMERGENCY PAGE %s LOAD NEEDED' % (
-                    utils.timestamp(), page))
+                print(
+                    "chooser[%s] - EMERGENCY PAGE %s LOAD NEEDED"
+                    % (utils.timestamp(), page)
+                )
                 try:
-                    f = open(os.path.join(constants.pages_dir,
-                                          'current.shtml'), 'w')
+                    f = open(os.path.join(constants.pages_dir, "current.shtml"), "w")
                     emit_wrapped(f, page)
                     f.close()
                 except:
-                    print('chooser[%s] - page does not exist?!' % (
-                        utils.timestamp()))
+                    print("chooser[%s] - page does not exist?!" % (utils.timestamp()))
                     continue
                 last_page = page
                 swap_page_target = now + constants.refresh_period_sec
 
                 # Also notify XMLHTTP clients that they need to refresh now.
-                path = os.path.join(constants.pages_dir,
-                                    'reload_immediately.html')
-                f = open(path, 'w')
-                f.write('Reload, suckers!')
+                path = os.path.join(constants.pages_dir, "reload_immediately.html")
+                f = open(path, "w")
+                f.write("Reload, suckers!")
                 f.close()
 
                 # Fix this hack... maybe read the webserver logs and see if it
@@ -67,23 +69,27 @@ def thread_change_current():
                 os.remove(path)
 
         elif now >= swap_page_target:
-            if (page == last_page):
-                print(('chooser[%s] - nominal choice got the same page...' % (
-                    utils.timestamp())))
+            if page == last_page:
+                print(
+                    (
+                        "chooser[%s] - nominal choice got the same page..."
+                        % (utils.timestamp())
+                    )
+                )
                 continue
-            print('chooser[%s] - nominal choice of %s' % (utils.timestamp(), page))
+            print("chooser[%s] - nominal choice of %s" % (utils.timestamp(), page))
             try:
-                f = open(os.path.join(constants.pages_dir,
-                                      'current.shtml'), 'w')
+                f = open(os.path.join(constants.pages_dir, "current.shtml"), "w")
                 emit_wrapped(f, page)
                 f.close()
             except:
-                print('chooser[%s] - page does not exist?!' % (utils.timestamp()))
+                print("chooser[%s] - page does not exist?!" % (utils.timestamp()))
                 continue
             last_page = page
             swap_page_target = now + constants.refresh_period_sec
         time.sleep(1)
 
+
 def pick_background_color():
     now = datetime.now()
     if now.hour <= 6 or now.hour >= 21:
@@ -93,10 +99,12 @@ def pick_background_color():
     else:
         return "FFFFFF"
 
+
 def emit_wrapped(f, filename):
     age = utils.describe_age_of_file_briefly("pages/%s" % filename)
     bgcolor = pick_background_color()
-    f.write("""
+    f.write(
+        """
 <HEAD>
   <TITLE>Kitchen Kiosk</TITLE>
   <LINK rel="stylesheet" type="text/css" href="style.css">
@@ -280,53 +288,66 @@ def emit_wrapped(f, filename):
         </TD>
     </TR>
     </TABLE>
-</BODY>""" % (bgcolor,
-              constants.refresh_period_sec * 1000,
-              bgcolor,
-              filename,
-              filename,
-              age))
+</BODY>"""
+        % (
+            bgcolor,
+            constants.refresh_period_sec * 1000,
+            bgcolor,
+            filename,
+            filename,
+            age,
+        )
+    )
+
 
 def thread_invoke_renderers():
     while True:
-        print("renderer[%s]: invoking all renderers in catalog..." % (
-            utils.timestamp()))
+        print(
+            "renderer[%s]: invoking all renderers in catalog..." % (utils.timestamp())
+        )
         for r in renderer_catalog.get_renderers():
             now = time.time()
             try:
                 r.render()
             except Exception as e:
                 traceback.print_exc()
-                print("renderer[%s] unknown exception in %s, swallowing it." % (
-                    utils.timestamp(), r.get_name()))
+                print(
+                    "renderer[%s] unknown exception in %s, swallowing it."
+                    % (utils.timestamp(), r.get_name())
+                )
             except Error as e:
                 traceback.print_exc()
-                print("renderer[%s] unknown error in %s, swallowing it." % (
-                    utils.timestamp(), r.get_name()))
+                print(
+                    "renderer[%s] unknown error in %s, swallowing it."
+                    % (utils.timestamp(), r.get_name())
+                )
             delta = time.time() - now
-            if (delta > 1.0):
-                print("renderer[%s]: Warning: %s's rendering took %5.2fs." % (
-                    utils.timestamp(), r.get_name(), delta))
-        print("renderer[%s]: thread having a little break for %ds..." % (
-            utils.timestamp(), constants.render_period_sec))
+            if delta > 1.0:
+                print(
+                    "renderer[%s]: Warning: %s's rendering took %5.2fs."
+                    % (utils.timestamp(), r.get_name(), delta)
+                )
+        print(
+            "renderer[%s]: thread having a little break for %ds..."
+            % (utils.timestamp(), constants.render_period_sec)
+        )
         time.sleep(constants.render_period_sec)
 
+
 if __name__ == "__main__":
     logging.basicConfig()
     changer_thread = None
     renderer_thread = None
     while True:
-        if (changer_thread == None or
-            not changer_thread.is_alive()):
-            print("MAIN[%s] - (Re?)initializing chooser thread..." % (
-                utils.timestamp()))
-            changer_thread = Thread(target = thread_change_current, args=())
+        if changer_thread == None or not changer_thread.is_alive():
+            print(
+                "MAIN[%s] - (Re?)initializing chooser thread..." % (utils.timestamp())
+            )
+            changer_thread = Thread(target=thread_change_current, args=())
             changer_thread.start()
-        if (renderer_thread == None or
-            not renderer_thread.is_alive()):
-            print("MAIN[%s] - (Re?)initializing render thread..." % (
-                utils.timestamp()))
-            renderer_thread = Thread(target = thread_invoke_renderers, args=())
+        if renderer_thread == None or not renderer_thread.is_alive():
+            print("MAIN[%s] - (Re?)initializing render thread..." % (utils.timestamp()))
+            renderer_thread = Thread(target=thread_invoke_renderers, args=())
             renderer_thread.start()
         time.sleep(60)
     print("Should never get here.")
index 0b8f7fc0a4b9e8724b8dc257e1df35724c121785..2e5499dcc4a472559633ac88fc75aa64fdfbc0f2 100644 (file)
@@ -4,53 +4,58 @@ import renderer
 import random
 import re
 
+
 class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
     """A renderer that uses a local mirror of Google photos"""
 
     album_root_directory = "/usr/local/export/www/gphotos/albums"
 
-    album_whitelist = frozenset([
-        '8-Mile Lake Hike',
-        'Bangkok and Phuket, 2003',
-        'Barn',
-        'Blue Angels... Seafair',
-        'Chihuly Glass',
-        'Dunn Gardens',
-        'East Coast 2018',
-        'Fall \'17',
-        'Friends',
-        'Hiking',
-        'Key West 2019',
-        'Krakow 2009',
-        'Kubota Gardens',
-        'Las Vegas, 2017',
-        'London, 2018',
-        'Munich, July 2018',
-        'NJ 2015',
-        'Newer Alex Photos',
-        'Ohme Gardens',
-        'Olympic Sculpture Park',
-        'Prague and Munich 2019',
-        'Random',
-        'Scott and Lynn',
-        'SFO 2014',
-        'Skiing with Alex',
-        'Sonoma',
-        'Trip to California, \'16',
-        'Trip to San Francisco',
-        'Trip to East Coast \'16',
-        'Tuscany 2008',
-        'Yosemite 2010',
-        'Zoo',
-    ])
+    album_whitelist = frozenset(
+        [
+            "8-Mile Lake Hike",
+            "Bangkok and Phuket, 2003",
+            "Barn",
+            "Blue Angels... Seafair",
+            "Chihuly Glass",
+            "Dunn Gardens",
+            "East Coast 2018",
+            "Fall '17",
+            "Friends",
+            "Hiking",
+            "Key West 2019",
+            "Krakow 2009",
+            "Kubota Gardens",
+            "Las Vegas, 2017",
+            "London, 2018",
+            "Munich, July 2018",
+            "NJ 2015",
+            "Newer Alex Photos",
+            "Ohme Gardens",
+            "Olympic Sculpture Park",
+            "Prague and Munich 2019",
+            "Random",
+            "Scott and Lynn",
+            "SFO 2014",
+            "Skiing with Alex",
+            "Sonoma",
+            "Trip to California, '16",
+            "Trip to San Francisco",
+            "Trip to East Coast '16",
+            "Tuscany 2008",
+            "Yosemite 2010",
+            "Zoo",
+        ]
+    )
 
-    extension_whitelist = frozenset([
-        'jpg',
-        'gif',
-        'JPG',
-        'jpeg',
-        'GIF',
-    ])
+    extension_whitelist = frozenset(
+        [
+            "jpg",
+            "gif",
+            "JPG",
+            "jpeg",
+            "GIF",
+        ]
+    )
 
     def __init__(self, name_to_timeout_dict):
         super(local_photos_mirror_renderer, self).__init__(name_to_timeout_dict, False)
@@ -60,16 +65,16 @@ class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
         return "local_photos_mirror"
 
     def periodic_render(self, key):
-        if (key == 'Index Photos'):
+        if key == "Index Photos":
             return self.index_photos()
-        elif (key == 'Choose Photo'):
+        elif key == "Choose Photo":
             return self.choose_photo()
         else:
-            raise error('Unexpected operation')
+            raise error("Unexpected operation")
 
     def album_is_in_whitelist(self, name):
         for wlalbum in self.album_whitelist:
-            if re.search('\d+ %s' % wlalbum, name) != None:
+            if re.search("\d+ %s" % wlalbum, name) != None:
                 return True
         return False
 
@@ -77,16 +82,15 @@ class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
     # keep their paths in memory.
     def index_photos(self):
         for root, subdirs, files in os.walk(self.album_root_directory):
-            last_dir = root.rsplit('/', 1)[1]
+            last_dir = root.rsplit("/", 1)[1]
             if self.album_is_in_whitelist(last_dir):
                 for x in files:
-                    extension = x.rsplit('.', 1)[1]
+                    extension = x.rsplit(".", 1)[1]
                     if extension in self.extension_whitelist:
                         photo_path = os.path.join(root, x)
                         photo_url = photo_path.replace(
-                            "/usr/local/export/www/",
-                            "http://10.0.0.18/",
-                            1)
+                            "/usr/local/export/www/", "http://10.0.0.18/", 1
+                        )
                         self.candidate_photos.add(photo_url)
         return True
 
@@ -96,21 +100,27 @@ class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
             print("No photos!")
             return False
         path = random.sample(self.candidate_photos, 1)[0]
-        f = file_writer.file_writer('photo_23_3600.html')
-        f.write("""
+        f = file_writer.file_writer("photo_23_3600.html")
+        f.write(
+            """
 <style>
 body{background-color:#303030;}
 div#time{color:#dddddd;}
 div#date{color:#dddddd;}
 </style>
-<center>""")
-        f.write('<img src="%s" style="display:block;max-width=800;max-height:600;width:auto;height:auto">' % path)
+<center>"""
+        )
+        f.write(
+            '<img src="%s" style="display:block;max-width=800;max-height:600;width:auto;height:auto">'
+            % path
+        )
         f.write("</center>")
         f.close()
         return True
 
+
 # Test code
-#x = local_photos_mirror_renderer({"Index Photos": (60 * 60 * 12),
+# x = local_photos_mirror_renderer({"Index Photos": (60 * 60 * 12),
 #                                  "Choose Photo": (1)})
-#x.index_photos()
-#x.choose_photo()
+# x.index_photos()
+# x.choose_photo()
index cfd3cf254e9bbb4968fa53988d40b6375890d6ea..fbe73bbd84ef3095c258f1c596509df9f7ef97c7 100644 (file)
@@ -1,12 +1,11 @@
 import generic_news_rss_renderer
 
+
 class mynorthwest_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
     def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
         super(mynorthwest_rss_renderer, self).__init__(
-            name_to_timeout_dict,
-            feed_site,
-            feed_uris,
-            page_title)
+            name_to_timeout_dict, feed_site, feed_uris, page_title
+        )
         self.debug = 1
 
     def debug_prefix(self):
@@ -19,9 +18,9 @@ class mynorthwest_rss_renderer(generic_news_rss_renderer.generic_news_rss_render
         return "mynorthwest-details-%s" % (self.page_title)
 
     def find_image(self, item):
-        image = item.findtext('media:content')
+        image = item.findtext("media:content")
         if image is not None:
-            image_url = image.get('url')
+            image_url = image.get("url")
             return image_url
         return None
 
@@ -40,14 +39,14 @@ class mynorthwest_rss_renderer(generic_news_rss_renderer.generic_news_rss_render
             return False
         return True
 
+
 # Test
-#x = mynorthwest_rss_renderer(
+# x = mynorthwest_rss_renderer(
 #    {"Fetch News" : 1,
 #     "Shuffle News" : 1},
 #    "mynorthwest.com",
 #    [ "/feed/" ],
 #    "Test" )
-#if x.fetch_news() == 0:
+# if x.fetch_news() == 0:
 #    print "Error fetching news, no items fetched."
-#x.shuffle_news()
-
+# x.shuffle_news()
index ca405346dc10f88d98689886ae198ecd98195e8b..1e666489e66cf0366532e3658a6f9a6bc2763a01 100644 (file)
@@ -8,6 +8,7 @@ import file_writer
 import renderer
 import secrets
 
+
 class garage_door_renderer(renderer.debuggable_abstaining_renderer):
     def __init__(self, name_to_timeout_dict):
         super(garage_door_renderer, self).__init__(name_to_timeout_dict, False)
@@ -28,21 +29,24 @@ class garage_door_renderer(renderer.debuggable_abstaining_renderer):
 
     async def poll_myq(self):
         async with ClientSession() as websession:
-            myq = await pymyq.login(secrets.myq_username,
-                                    secrets.myq_password,
-                                    websession)
+            myq = await pymyq.login(
+                secrets.myq_username, secrets.myq_password, websession
+            )
             self.doors = myq.devices
             return len(self.doors) > 0
 
     def update_page(self):
         f = file_writer.file_writer(constants.myq_pagename)
-        f.write("""
+        f.write(
+            """
 <H1>Garage Door Status</H1>
 <!-- Last updated at %s -->
 <HR>
 <TABLE BORDER=0 WIDTH=99%%>
   <TR>
-""" % self.last_update)
+"""
+            % self.last_update
+        )
         html = self.do_door("Near House")
         if html == None:
             return False
@@ -52,9 +56,11 @@ class garage_door_renderer(renderer.debuggable_abstaining_renderer):
         if html == None:
             return False
         f.write(html)
-        f.write("""
+        f.write(
+            """
   </TR>
-</TABLE>""")
+</TABLE>"""
+        )
         f.close()
         return True
 
@@ -106,15 +112,20 @@ class garage_door_renderer(renderer.debuggable_abstaining_renderer):
   <B>%s</B></FONT><BR>
   for %d day(s), %02d:%02d.
   </CENTER>
-</TD>""" % (name,
-            self.get_state_icon(state),
-            width,
-            color,
-            state,
-            days[0], hours[0], minutes[0])
+</TD>""" % (
+                    name,
+                    self.get_state_icon(state),
+                    width,
+                    color,
+                    state,
+                    days[0],
+                    hours[0],
+                    minutes[0],
+                )
         return None
 
+
 # Test
-x = garage_door_renderer({"Test" : 1})
+x = garage_door_renderer({"Test": 1})
 x.periodic_render("Poll MyQ")
 x.periodic_render("Update Page")
index 838f51a545dd981af699086bc5057750b7f89295..255091e7c1f93569d52336bbbc15fc2a57307619 100644 (file)
@@ -2,6 +2,7 @@ import constants
 import globals
 import trigger
 
+
 class myq_trigger(trigger.trigger):
     def get_triggered_page_list(self):
         if globals.get("myq_triggered") == True:
index 4aa72fe85f9851f9d221ce6be4fb73971834f952..fa800d8cf4fb3f6c0f1d5bd1ea3958b6c8dab1c9 100644 (file)
@@ -1,5 +1,6 @@
 import sys
 
+
 class page_builder(object):
     LAYOUT_AUTO = 0
     LAYOUT_ONE_ITEM = 1
@@ -96,5 +97,6 @@ class page_builder(object):
     def set_custom_html(self, html):
         self.custom_html = html
 
-#x = page_builder()
-#x.set_title("title").add_item("item1").add_item("item2").add_item("item3").render_html(sys.stdout)
+
+# x = page_builder()
+# x.set_title("title").add_item("item1").add_item("item2").add_item("item3").render_html(sys.stdout)
index 9de0c2d543acc7b617f1fc4e173cc8d837460a15..c3c45b757c2a8bb3ef9c8897bc6d9b29a68f60be 100644 (file)
@@ -8,49 +8,51 @@ import sets
 import random
 from oauth2client.client import AccessTokenRefreshError
 
+
 class picasa_renderer(renderer.debuggable_abstaining_renderer):
     """A renderer to fetch photos from picasaweb.google.com"""
 
-    album_whitelist = sets.ImmutableSet([
-        'Alex',
-        'Alex 6.0..8.0 years old',
-        'Alex 3.0..4.0 years old',
-        'Barn',
-        'Bangkok and Phukey, 2003',
-        'Blue Angels... Seafair',
-        'Carol Ann and Owen',
-        'Chahuly Glass',
-        'Dunn Gardens',
-        'East Coast, 2011',
-        'East Coast, 2013',
-        'Friends',
-        'Gasches',
-        'Gasch Wedding',
-        'Hiking and Ohme Gardens',
-        'Hiking',
-        'Karen\'s Wedding',
-        'Key West 2019',
-        'Krakow 2009',
-        'Munich, July 2018',
-        'NJ 2015',
-        'NW Trek',
-        'Oahu 2010'
-        'Ocean Shores 2009',
-        'Ohme Gardens',
-        'Olympic Sculpture Park',
-        'Paintings',
-        'Puerto Vallarta',
-        'Photos from posts',
-        'Random',
-        'SFO 2014',
-        'Soccer',
-        'Skiing with Alex',
-        'Tuscany 2008',
-        "Trip to California '16",
-        "Trip to East Coast '16",
-        'Yosemite 2010',
-        'Zoo',
-    ])
+    album_whitelist = sets.ImmutableSet(
+        [
+            "Alex",
+            "Alex 6.0..8.0 years old",
+            "Alex 3.0..4.0 years old",
+            "Barn",
+            "Bangkok and Phukey, 2003",
+            "Blue Angels... Seafair",
+            "Carol Ann and Owen",
+            "Chahuly Glass",
+            "Dunn Gardens",
+            "East Coast, 2011",
+            "East Coast, 2013",
+            "Friends",
+            "Gasches",
+            "Gasch Wedding",
+            "Hiking and Ohme Gardens",
+            "Hiking",
+            "Karen's Wedding",
+            "Key West 2019",
+            "Krakow 2009",
+            "Munich, July 2018",
+            "NJ 2015",
+            "NW Trek",
+            "Oahu 2010" "Ocean Shores 2009",
+            "Ohme Gardens",
+            "Olympic Sculpture Park",
+            "Paintings",
+            "Puerto Vallarta",
+            "Photos from posts",
+            "Random",
+            "SFO 2014",
+            "Soccer",
+            "Skiing with Alex",
+            "Tuscany 2008",
+            "Trip to California '16",
+            "Trip to East Coast '16",
+            "Yosemite 2010",
+            "Zoo",
+        ]
+    )
 
     def __init__(self, name_to_timeout_dict, oauth):
         super(picasa_renderer, self).__init__(name_to_timeout_dict, False)
@@ -64,12 +66,12 @@ class picasa_renderer(renderer.debuggable_abstaining_renderer):
         return "picasa"
 
     def periodic_render(self, key):
-        if (key == 'Fetch Photos'):
+        if key == "Fetch Photos":
             return self.fetch_photos()
-        elif (key == 'Shuffle Cached Photos'):
+        elif key == "Shuffle Cached Photos":
             return self.shuffle_cached()
         else:
-            raise error('Unexpected operation')
+            raise error("Unexpected operation")
 
     # Just fetch and cache the photo URLs in memory.
     def fetch_photos(self):
@@ -79,24 +81,29 @@ class picasa_renderer(renderer.debuggable_abstaining_renderer):
             temp_height = {}
             temp_is_video = {}
             conn = http.client.HTTPSConnection("photoslibrary.googleapis.com")
-            conn.request("GET",
-                         "/v1/albums",
-                         None,
-                         { "Authorization": "%s %s" % (self.oauth.token['token_type'], self.oauth.token['access_token'])
-                         })
+            conn.request(
+                "GET",
+                "/v1/albums",
+                None,
+                {
+                    "Authorization": "%s %s"
+                    % (self.oauth.token["token_type"], self.oauth.token["access_token"])
+                },
+            )
             response = conn.getresponse()
             if response.status != 200:
                 print(("Failed to fetch albums, status %d\n" % response.status))
             print(response.read())
             albums = self.pws.GetUserFeed().entry
             for album in albums:
-                if (album.title.text not in picasa_renderer.album_whitelist):
+                if album.title.text not in picasa_renderer.album_whitelist:
                     continue
                 photos = self.pws.GetFeed(
-                    '/data/feed/api/user/%s/albumid/%s?kind=photo&imgmax=1024u' %
-                    (secrets.google_username, album.gphoto_id.text))
+                    "/data/feed/api/user/%s/albumid/%s?kind=photo&imgmax=1024u"
+                    % (secrets.google_username, album.gphoto_id.text)
+                )
                 for photo in photos.entry:
-                    id = '%s/%s' % (photo.albumid.text, photo.gphoto_id.text)
+                    id = "%s/%s" % (photo.albumid.text, photo.gphoto_id.text)
                     temp_is_video[id] = False
                     resolution = 999999
                     for x in photo.media.content:
@@ -118,9 +125,11 @@ class picasa_renderer(renderer.debuggable_abstaining_renderer):
             self.height = temp_height
             self.is_video = temp_is_video
             return True
-        except (gdata.service.RequestError,
-                gdata.photos.service.GooglePhotosException,
-                AccessTokenRefreshError):
+        except (
+            gdata.service.RequestError,
+            gdata.photos.service.GooglePhotosException,
+            AccessTokenRefreshError,
+        ):
             print("******** TRYING TO REFRESH PHOTOS CLIENT *********")
             self.oauth.refresh_token()
             self.client = self.oauth.photos_service()
@@ -134,36 +143,48 @@ class picasa_renderer(renderer.debuggable_abstaining_renderer):
         pid = random.sample(self.photo_urls, 1)
         id = pid[0]
         refresh = 15
-        if (self.is_video[id]): refresh = 60
+        if self.is_video[id]:
+            refresh = 60
 
-        f = file_writer.file_writer('photo_23_none.html')
-        f.write("""
+        f = file_writer.file_writer("photo_23_none.html")
+        f.write(
+            """
 <style>
 body{background-color:#303030;}
 div#time{color:#dddddd;}
 div#date{color:#dddddd;}
 </style>
-<center>""")
+<center>"""
+        )
         if self.is_video[id]:
-            f.write('<iframe src="%s" seamless width=%s height=%s></iframe>' % (self.photo_urls[id], self.width[id], self.height[id]))
+            f.write(
+                '<iframe src="%s" seamless width=%s height=%s></iframe>'
+                % (self.photo_urls[id], self.width[id], self.height[id])
+            )
         else:
-            f.write('<img src="%s" width=%s alt="%s">' % (self.photo_urls[id], self.width[id], self.photo_urls[id]))
+            f.write(
+                '<img src="%s" width=%s alt="%s">'
+                % (self.photo_urls[id], self.width[id], self.photo_urls[id])
+            )
         f.write("</center>")
         f.close()
         return True
 
+
 # Test code
-oauth = gdata_oauth.OAuth(secrets.google_client_id,
-                          secrets.google_client_secret)
+oauth = gdata_oauth.OAuth(secrets.google_client_id, secrets.google_client_secret)
 oauth.get_new_token()
 if not oauth.has_token():
     user_code = oauth.get_user_code()
-    print('------------------------------------------------------------')
-    print(('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
-        oauth.verification_url, user_code)))
+    print("------------------------------------------------------------")
+    print(
+        (
+            'Go to %s and enter the code "%s" (no quotes, case-sensitive)'
+            % (oauth.verification_url, user_code)
+        )
+    )
     oauth.get_new_token()
-x = picasa_renderer({"Fetch Photos": (60 * 60 * 12),
-                     "Shuffle Cached Photos": (1)},
-                    oauth)
+x = picasa_renderer(
+    {"Fetch Photos": (60 * 60 * 12), "Shuffle Cached Photos": (1)}, oauth
+)
 x.fetch_photos()
-
index 745ad52a5e50465f0fda1f124bc0d059b1fe2815..a8f6170492b0ea99c0fa4317f71d41be8d72b610 100644 (file)
@@ -4,11 +4,12 @@ import renderer
 import http.client
 import re
 
+
 class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
     def __init__(self, name_to_timeout_dict):
         super(pollen_count_renderer, self).__init__(name_to_timeout_dict, False)
-        self.site = 'www.nwasthma.com'
-        self.uri = '/pollen/pollen-count/'
+        self.site = "www.nwasthma.com"
+        self.uri = "/pollen/pollen-count/"
         self.trees = []
         self.grasses = []
         self.weeds = []
@@ -18,16 +19,15 @@ class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
 
     def fetch_html(self):
         conn = http.client.HTTPConnection(self.site)
-        conn.request(
-                "GET",
-                self.uri,
-                None,
-                {})
+        conn.request("GET", self.uri, None, {})
         response = conn.getresponse()
         if response.status != 200:
-            print(('Connection to %s/%s failed, status %d' % (self.site,
-                                                             self.uri,
-                                                             response.status)))
+            print(
+                (
+                    "Connection to %s/%s failed, status %d"
+                    % (self.site, self.uri, response.status)
+                )
+            )
             return False
         return response.read()
 
@@ -35,7 +35,7 @@ class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
         desc = ""
         color = "#00d000"
         if tr != None and tr.string != None:
-            desc = tr.string.encode('utf-8')
+            desc = tr.string.encode("utf-8")
             if "edium" in desc:
                 color = "#a0a000"
             elif "igh" in desc:
@@ -44,7 +44,7 @@ class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
         count = 0
         if tc != None and tc.string != None:
             try:
-                count = int(tc.string.encode('utf-8'))
+                count = int(tc.string.encode("utf-8"))
             except:
                 count = 0
         proportion = float(count) / float(maximum)
@@ -52,14 +52,18 @@ class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
 
         comment = ""
         if tcomment != None and tcomment.string != None:
-            comment = "%s" % (tcomment.string.encode('utf-8'))
+            comment = "%s" % (tcomment.string.encode("utf-8"))
 
         # Label:
         text = text + '<TR><TD WIDTH=10%% STYLE="font-size: 22pt">%s:</TD>' % (kind)
 
         # Bar graph with text in it (possibly overspilling):
-        text = text + '<TD HEIGHT=80><DIV STYLE="width: %d; height: 80; overflow: visible; background-color: %s; font-size: 16pt">' % (width, color)
-        text = text + 'count=%d,&nbsp;%s&nbsp;%s</DIV>' % (count, desc, comment)
+        text = (
+            text
+            + '<TD HEIGHT=80><DIV STYLE="width: %d; height: 80; overflow: visible; background-color: %s; font-size: 16pt">'
+            % (width, color)
+        )
+        text = text + "count=%d,&nbsp;%s&nbsp;%s</DIV>" % (count, desc, comment)
         return text
 
     def munge(self, raw):
@@ -71,41 +75,42 @@ class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
 <CENTER>
 <TABLE BODER WIDTH=800>"""
         date = "<CENTER><B>Unknown Date</B></CENTER>"
-        for x in soup.find_all('p'):
+        for x in soup.find_all("p"):
             if x == None or x.string == None:
                 continue
-            txt = x.string.encode('utf-8')
+            txt = x.string.encode("utf-8")
             m = re.match("[0-9][0-9].[0-9][0-9].20[0-9][0-9]", txt)
             if m != None:
                 date = "<CENTER><B>%s</B></CENTER>" % (txt)
-                y = x.find_next_sibling('p')
+                y = x.find_next_sibling("p")
                 if y != None and y.string != None:
-                    txt = y.string.encode('utf-8')
+                    txt = y.string.encode("utf-8")
                     date = date + "<BR>%s<HR>" % txt
-        text = text + '<TR><TD COLSPAN=3 STYLE="font-size:16pt">%s</TD></TR>\n' % (
-            date)
+        text = text + '<TR><TD COLSPAN=3 STYLE="font-size:16pt">%s</TD></TR>\n' % (date)
 
-        trees = soup.find('td', text=re.compile('[Tt]rees:'))
+        trees = soup.find("td", text=re.compile("[Tt]rees:"))
         if trees != None:
-            tc = trees.find_next_sibling('td')
-            tr = tc.find_next_sibling('td')
-            tcomment = tr.find_next_sibling('td')
+            tc = trees.find_next_sibling("td")
+            tr = tc.find_next_sibling("td")
+            tcomment = tr.find_next_sibling("td")
             text = self.append_crap(text, tc, tr, tcomment, "Trees", 650)
 
-        grasses = soup.find('td', text=re.compile('[Gg]rasses:'))
+        grasses = soup.find("td", text=re.compile("[Gg]rasses:"))
         if grasses != None:
-            gc = grasses.find_next_sibling('td')
-            gr = gc.find_next_sibling('td')
-            gcomment = gr.find_next_sibling('td')
+            gc = grasses.find_next_sibling("td")
+            gr = gc.find_next_sibling("td")
+            gcomment = gr.find_next_sibling("td")
             text = self.append_crap(text, gc, gr, gcomment, "Grasses", 35)
 
-        weeds = soup.find('td', text=re.compile('[Ww]eeds:'))
+        weeds = soup.find("td", text=re.compile("[Ww]eeds:"))
         if weeds != None:
-            wc = weeds.find_next_sibling('td')
-            wr = wc.find_next_sibling('td')
-            wcomment = wr.find_next_sibling('td')
+            wc = weeds.find_next_sibling("td")
+            wr = wc.find_next_sibling("td")
+            wcomment = wr.find_next_sibling("td")
             text = self.append_crap(text, wc, wr, wcomment, "Weeds", 25)
-        text = text + """
+        text = (
+            text
+            + """
 <TR>
   <TD COLSPAN=3 STYLE="font-size:16pt">
 <HR>
@@ -118,12 +123,13 @@ class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
 </TR>
 </TABLE>
 </CENTER>"""
+        )
         return text
 
     def poll_pollen(self):
         raw = self.fetch_html()
         cooked = self.munge(raw)
-        f = file_writer.file_writer('pollen_4_360.html')
+        f = file_writer.file_writer("pollen_4_360.html")
         f.write(cooked)
         f.close()
         return True
@@ -135,4 +141,5 @@ class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
         else:
             raise error("Unknown operaiton")
 
-#test = pollen_count_renderer({"Test", 123})
+
+# test = pollen_count_renderer({"Test", 123})
index 894855845fc3c318d66923b03ed78aa2a9099362..0925e67f7397f4aafe6be52345e4c2a3d9f8d993 100644 (file)
 import string
 import re
 
+
 class profanity_filter:
     def __init__(self):
         self.arrBad = [
-            'acrotomophilia',
-            'anal',
-            'anally',
-            'anilingus',
-            'anus',
-            'arsehole',
-            'ass',
-            'asses',
-            'asshole',
-            'assmunch',
-            'auto erotic',
-            'autoerotic',
-            'babeland',
-            'baby batter',
-            'ball gag',
-            'ball gravy',
-            'ball kicking',
-            'ball licking',
-            'ball sack',
-            'ball zack',
-            'ball sucking',
-            'bangbros',
-            'bareback',
-            'barely legal',
-            'barenaked',
-            'bastardo',
-            'bastinado',
-            'bbw',
-            'bdsm',
-            'beaver cleaver',
-            'beaver lips',
-            'bestiality',
-            'bi curious',
-            'big black',
-            'big breasts',
-            'big knockers',
-            'big tits',
-            'bimbos',
-            'birdlock',
-            'bitch',
-            'bitches',
-            'black cock',
-            'blonde action',
-            'blonde on blonde',
-            'blow j',
-            'blow your l',
-            'blow ourselves',
-            'blow m',
-            'blue waffle',
-            'blumpkin',
-            'bollocks',
-            'bondage',
-            'boner',
-            'boob',
-            'boobs',
-            'booty call',
-            'breasts',
-            'brown showers',
-            'brunette action',
-            'bukkake',
-            'bulldyke',
-            'bullshit',
-            'bullet vibe',
-            'bung hole',
-            'bunghole',
-            'busty',
-            'butt',
-            'buttcheeks',
-            'butthole',
-            'camel toe',
-            'camgirl',
-            'camslut',
-            'camwhore',
-            'carpet muncher',
-            'carpetmuncher',
-            'chocolate rosebuds',
-            'circlejerk',
-            'cleveland steamer',
-            'clit',
-            'clitoris',
-            'clover clamps',
-            'clusterfuck',
-            'cock',
-            'cocks',
-            'coprolagnia',
-            'coprophilia',
-            'cornhole',
-            'creampie',
-            'cream pie',
-            'cum',
-            'cumming',
-            'cunnilingus',
-            'cunt',
-            'damn',
-            'darkie',
-            'date rape',
-            'daterape',
-            'deep throat',
-            'deepthroat',
-            'dick',
-            'dildo',
-            'dirty pillows',
-            'dirty sanchez',
-            'dog style',
-            'doggie style',
-            'doggiestyle',
-            'doggy style',
-            'doggystyle',
-            'dolcett',
-            'domination',
-            'dominatrix',
-            'dommes',
-            'donkey punch',
-            'double dick',
-            'double dong',
-            'double penetration',
-            'dp action',
-            'dtf',
-            'eat my ass',
-            'ecchi',
-            'ejaculation',
-            'erection',
-            'erotic',
-            'erotism',
-            'escort',
-            'ethical slut',
-            'eunuch',
-            'faggot',
-            'posts each week',
-            'fecal',
-            'felch',
-            'fellatio',
-            'feltch',
-            'female squirting',
-            'femdom',
-            'figging',
-            'fingering',
-            'fisting',
-            'foot fetish',
-            'footjob',
-            'frotting',
-            'fuck',
-            'fucking',
-            'fuckin',
-            'fuckin\'',
-            'fucked',
-            'fuckers',
-            'fuck buttons',
-            'fuckhead',
-            'fudge packer',
-            'fudgepacker',
-            'futanari',
-            'g-spot',
-            'gspot',
-            'gang bang',
-            'gay sex',
-            'genitals',
-            'giant cock',
-            'girl on',
-            'girl on top',
-            'girls gone wild',
-            'goatcx',
-            'goatse',
-            'goddamn',
-            'gokkun',
-            'golden shower',
-            'goo girl',
-            'goodpoop',
-            'goregasm',
-            'grope',
-            'group sex',
-            'guro',
-            'hand job',
-            'handjob',
-            'hard core',
-            'hardcore',
-            'hentai',
-            'homoerotic',
-            'honkey',
-            'hooker',
-            'horny',
-            'hot chick',
-            'how to kill',
-            'how to murder',
-            'huge fat',
-            'humping',
-            'incest',
-            'intercourse',
-            'jack off',
-            'jail bait',
-            'jailbait',
-            'jerk off',
-            'jerking off',
-            'jigaboo',
-            'jiggaboo',
-            'jiggerboo',
-            'jizz',
-            'juggs',
-            'kike',
-            'kinbaku',
-            'kinkster',
-            'kinky',
-            'knobbing',
-            'leather restraint',
-            'lemon party',
-            'lolita',
-            'lovemaking',
-            'lpt request',
-            'make me come',
-            'male squirting',
-            'masturbate',
-            'masturbated',
-            'masturbating',
-            'menage a trois',
-            'milf',
-            'milfs',
-            'missionary position',
-            'motherfucker',
-            'mound of venus',
-            'mr hands',
-            'muff diver',
-            'muffdiving',
-            'nambla',
-            'nawashi',
-            'negro',
-            'neonazi',
-            'nig nog',
-            'nigga',
-            'nigger',
-            'nimphomania',
-            'nipple',
-            'not safe for',
-            'nsfw',
-            'nsfw images',
-            'nude',
-            'nudity',
-            'nutsack',
-            'nut sack',
-            'nympho',
-            'nymphomania',
-            'octopussy',
-            'omorashi',
-            'one night stand',
-            'orgasm',
-            'orgy',
-            'paedophile',
-            'panties',
-            'panty',
-            'pedobear',
-            'pedophile',
-            'pegging',
-            'pee',
-            'penis',
-            'phone sex',
-            'piss pig',
-            'pissing',
-            'pisspig',
-            'playboy',
-            'pleasure chest',
-            'pole smoker',
-            'ponyplay',
-            'poof',
-            'poop chute',
-            'poopchute',
-            'porn',
-            'pornhub',
-            'porno',
-            'pornography',
-            'prince albert',
-            'pthc',
-            'pube',
-            'pubes',
-            'pussy',
-            'pussies',
-            'queaf',
-            'queer',
-            'raghead',
-            'raging boner',
-            'rape',
-            'raping',
-            'rapist',
-            'rectum',
-            'reverse cowgirl',
-            'rimjob',
-            'rimming',
-            'rosy palm',
-            'rusty trombone',
-            's&m',
-            'sadism',
-            'scat',
-            'schlong',
-            'scissoring',
-            'semen',
-            'sex',
-            'sexo',
-            'sexy',
-            'shaved beaver',
-            'shaved pussy',
-            'shemale',
-            'shibari',
-            'shit',
-            'shota',
-            'shrimping',
-            'slanteye',
-            'slut',
-            'smut',
-            'snatch',
-            'snowballing',
-            'sodomize',
-            'sodomy',
-            'spic',
-            'spooge',
-            'spread legs',
-            'strap on',
-            'strapon',
-            'strappado',
-            'strip club',
-            'style doggy',
-            'suck',
-            'sucks',
-            'suicide girls',
-            'sultry women',
-            'swastika',
-            'swinger',
-            'tainted love',
-            'taste my',
-            'tea bagging',
-            'threesome',
-            'throating',
-            'tied up',
-            'tight white',
-            'tit',
-            'tits',
-            'titties',
-            'titty',
-            'tongue in a',
-            'topless',
-            'tosser',
-            'towelhead',
-            'tranny',
-            'tribadism',
-            'tub girl',
-            'tubgirl',
-            'tushy',
-            'twat',
-            'twink',
-            'twinkie',
-            'undressing',
-            'upskirt',
-            'urethra play',
-            'urophilia',
-            'vagina',
-            'venus mound',
-            'vibrator',
-            'violet blue',
-            'violet wand',
-            'vorarephilia',
-            'voyeur',
-            'vulva',
-            'wank',
-            'wet dream',
-            'wetback',
-            'white power',
-            'whore',
-            'women rapping',
-            'wrapping men',
-            'wrinkled starfish',
-            'xx',
-            'xxx',
-            'yaoi',
-            'yellow showers',
-            'yiffy',
-            'zoophilia',
+            "acrotomophilia",
+            "anal",
+            "anally",
+            "anilingus",
+            "anus",
+            "arsehole",
+            "ass",
+            "asses",
+            "asshole",
+            "assmunch",
+            "auto erotic",
+            "autoerotic",
+            "babeland",
+            "baby batter",
+            "ball gag",
+            "ball gravy",
+            "ball kicking",
+            "ball licking",
+            "ball sack",
+            "ball zack",
+            "ball sucking",
+            "bangbros",
+            "bareback",
+            "barely legal",
+            "barenaked",
+            "bastardo",
+            "bastinado",
+            "bbw",
+            "bdsm",
+            "beaver cleaver",
+            "beaver lips",
+            "bestiality",
+            "bi curious",
+            "big black",
+            "big breasts",
+            "big knockers",
+            "big tits",
+            "bimbos",
+            "birdlock",
+            "bitch",
+            "bitches",
+            "black cock",
+            "blonde action",
+            "blonde on blonde",
+            "blow j",
+            "blow your l",
+            "blow ourselves",
+            "blow m",
+            "blue waffle",
+            "blumpkin",
+            "bollocks",
+            "bondage",
+            "boner",
+            "boob",
+            "boobs",
+            "booty call",
+            "breasts",
+            "brown showers",
+            "brunette action",
+            "bukkake",
+            "bulldyke",
+            "bullshit",
+            "bullet vibe",
+            "bung hole",
+            "bunghole",
+            "busty",
+            "butt",
+            "buttcheeks",
+            "butthole",
+            "camel toe",
+            "camgirl",
+            "camslut",
+            "camwhore",
+            "carpet muncher",
+            "carpetmuncher",
+            "chocolate rosebuds",
+            "circlejerk",
+            "cleveland steamer",
+            "clit",
+            "clitoris",
+            "clover clamps",
+            "clusterfuck",
+            "cock",
+            "cocks",
+            "coprolagnia",
+            "coprophilia",
+            "cornhole",
+            "creampie",
+            "cream pie",
+            "cum",
+            "cumming",
+            "cunnilingus",
+            "cunt",
+            "damn",
+            "darkie",
+            "date rape",
+            "daterape",
+            "deep throat",
+            "deepthroat",
+            "dick",
+            "dildo",
+            "dirty pillows",
+            "dirty sanchez",
+            "dog style",
+            "doggie style",
+            "doggiestyle",
+            "doggy style",
+            "doggystyle",
+            "dolcett",
+            "domination",
+            "dominatrix",
+            "dommes",
+            "donkey punch",
+            "double dick",
+            "double dong",
+            "double penetration",
+            "dp action",
+            "dtf",
+            "eat my ass",
+            "ecchi",
+            "ejaculation",
+            "erection",
+            "erotic",
+            "erotism",
+            "escort",
+            "ethical slut",
+            "eunuch",
+            "faggot",
+            "posts each week",
+            "fecal",
+            "felch",
+            "fellatio",
+            "feltch",
+            "female squirting",
+            "femdom",
+            "figging",
+            "fingering",
+            "fisting",
+            "foot fetish",
+            "footjob",
+            "frotting",
+            "fuck",
+            "fucking",
+            "fuckin",
+            "fuckin'",
+            "fucked",
+            "fuckers",
+            "fuck buttons",
+            "fuckhead",
+            "fudge packer",
+            "fudgepacker",
+            "futanari",
+            "g-spot",
+            "gspot",
+            "gang bang",
+            "gay sex",
+            "genitals",
+            "giant cock",
+            "girl on",
+            "girl on top",
+            "girls gone wild",
+            "goatcx",
+            "goatse",
+            "goddamn",
+            "gokkun",
+            "golden shower",
+            "goo girl",
+            "goodpoop",
+            "goregasm",
+            "grope",
+            "group sex",
+            "guro",
+            "hand job",
+            "handjob",
+            "hard core",
+            "hardcore",
+            "hentai",
+            "homoerotic",
+            "honkey",
+            "hooker",
+            "horny",
+            "hot chick",
+            "how to kill",
+            "how to murder",
+            "huge fat",
+            "humping",
+            "incest",
+            "intercourse",
+            "jack off",
+            "jail bait",
+            "jailbait",
+            "jerk off",
+            "jerking off",
+            "jigaboo",
+            "jiggaboo",
+            "jiggerboo",
+            "jizz",
+            "juggs",
+            "kike",
+            "kinbaku",
+            "kinkster",
+            "kinky",
+            "knobbing",
+            "leather restraint",
+            "lemon party",
+            "lolita",
+            "lovemaking",
+            "lpt request",
+            "make me come",
+            "male squirting",
+            "masturbate",
+            "masturbated",
+            "masturbating",
+            "menage a trois",
+            "milf",
+            "milfs",
+            "missionary position",
+            "motherfucker",
+            "mound of venus",
+            "mr hands",
+            "muff diver",
+            "muffdiving",
+            "nambla",
+            "nawashi",
+            "negro",
+            "neonazi",
+            "nig nog",
+            "nigga",
+            "nigger",
+            "nimphomania",
+            "nipple",
+            "not safe for",
+            "nsfw",
+            "nsfw images",
+            "nude",
+            "nudity",
+            "nutsack",
+            "nut sack",
+            "nympho",
+            "nymphomania",
+            "octopussy",
+            "omorashi",
+            "one night stand",
+            "orgasm",
+            "orgy",
+            "paedophile",
+            "panties",
+            "panty",
+            "pedobear",
+            "pedophile",
+            "pegging",
+            "pee",
+            "penis",
+            "phone sex",
+            "piss pig",
+            "pissing",
+            "pisspig",
+            "playboy",
+            "pleasure chest",
+            "pole smoker",
+            "ponyplay",
+            "poof",
+            "poop chute",
+            "poopchute",
+            "porn",
+            "pornhub",
+            "porno",
+            "pornography",
+            "prince albert",
+            "pthc",
+            "pube",
+            "pubes",
+            "pussy",
+            "pussies",
+            "queaf",
+            "queer",
+            "raghead",
+            "raging boner",
+            "rape",
+            "raping",
+            "rapist",
+            "rectum",
+            "reverse cowgirl",
+            "rimjob",
+            "rimming",
+            "rosy palm",
+            "rusty trombone",
+            "s&m",
+            "sadism",
+            "scat",
+            "schlong",
+            "scissoring",
+            "semen",
+            "sex",
+            "sexo",
+            "sexy",
+            "shaved beaver",
+            "shaved pussy",
+            "shemale",
+            "shibari",
+            "shit",
+            "shota",
+            "shrimping",
+            "slanteye",
+            "slut",
+            "smut",
+            "snatch",
+            "snowballing",
+            "sodomize",
+            "sodomy",
+            "spic",
+            "spooge",
+            "spread legs",
+            "strap on",
+            "strapon",
+            "strappado",
+            "strip club",
+            "style doggy",
+            "suck",
+            "sucks",
+            "suicide girls",
+            "sultry women",
+            "swastika",
+            "swinger",
+            "tainted love",
+            "taste my",
+            "tea bagging",
+            "threesome",
+            "throating",
+            "tied up",
+            "tight white",
+            "tit",
+            "tits",
+            "titties",
+            "titty",
+            "tongue in a",
+            "topless",
+            "tosser",
+            "towelhead",
+            "tranny",
+            "tribadism",
+            "tub girl",
+            "tubgirl",
+            "tushy",
+            "twat",
+            "twink",
+            "twinkie",
+            "undressing",
+            "upskirt",
+            "urethra play",
+            "urophilia",
+            "vagina",
+            "venus mound",
+            "vibrator",
+            "violet blue",
+            "violet wand",
+            "vorarephilia",
+            "voyeur",
+            "vulva",
+            "wank",
+            "wet dream",
+            "wetback",
+            "white power",
+            "whore",
+            "women rapping",
+            "wrapping men",
+            "wrinkled starfish",
+            "xx",
+            "xxx",
+            "yaoi",
+            "yellow showers",
+            "yiffy",
+            "zoophilia",
         ]
 
     def normalize(self, text):
         result = text.lower()
-        result = result.replace('_', ' ')
+        result = result.replace("_", " ")
         for x in string.punctuation:
-            result = result.replace(x, '')
-        result = re.sub(
-            r"e?s$", "", result)
+            result = result.replace(x, "")
+        result = re.sub(r"e?s$", "", result)
         return result
 
     def filter_bad_words(self, text):
-        badWordMask = '!@#$%!@#$%^~!@%^~@#$%!@#$%^~!'
+        badWordMask = "!@#$%!@#$%^~!@%^~@#$%!@#$%^~!"
 
         brokenStr1 = text.split()
         for word in brokenStr1:
-            if (self.normalize(word) in self.arrBad or
-                word in self.arrBad):
+            if self.normalize(word) in self.arrBad or word in self.arrBad:
                 print(('***** PROFANITY WORD="%s"' % word))
-                text = text.replace(word, badWordMask[:len(word)])
+                text = text.replace(word, badWordMask[: len(word)])
 
         if len(brokenStr1) > 1:
             bigrams = list(zip(brokenStr1, brokenStr1[1:]))
             for bigram in bigrams:
                 phrase = "%s %s" % (bigram[0], bigram[1])
-                if (self.normalize(phrase) in self.arrBad or
-                    phrase in self.arrBad):
+                if self.normalize(phrase) in self.arrBad or phrase in self.arrBad:
                     print(('***** PROFANITY PHRASE="%s"' % phrase))
-                    text = text.replace(bigram[0], badWordMask[:len(bigram[0])])
-                    text = text.replace(bigram[1], badWordMask[:len(bigram[1])])
+                    text = text.replace(bigram[0], badWordMask[: len(bigram[0])])
+                    text = text.replace(bigram[1], badWordMask[: len(bigram[1])])
 
         if len(brokenStr1) > 2:
             trigrams = list(zip(brokenStr1, brokenStr1[1:], brokenStr1[2:]))
             for trigram in trigrams:
                 phrase = "%s %s %s" % (trigram[0], trigram[1], trigram[2])
-                if (self.normalize(phrase) in self.arrBad or
-                    phrase in self.arrBad):
+                if self.normalize(phrase) in self.arrBad or phrase in self.arrBad:
                     print(('***** PROFANITY PHRASE="%s"' % phrase))
-                    text = text.replace(trigram[0], badWordMask[:len(trigram[0])])
-                    text = text.replace(trigram[1], badWordMask[:len(trigram[1])])
-                    text = text.replace(trigram[2], badWordMask[:len(trigram[2])])
+                    text = text.replace(trigram[0], badWordMask[: len(trigram[0])])
+                    text = text.replace(trigram[1], badWordMask[: len(trigram[1])])
+                    text = text.replace(trigram[2], badWordMask[: len(trigram[2])])
         return text
 
     def contains_bad_words(self, text):
         brokenStr1 = text.split()
         for word in brokenStr1:
-            if (self.normalize(word) in self.arrBad or
-                word in self.arrBad):
+            if self.normalize(word) in self.arrBad or word in self.arrBad:
                 print(('***** PROFANITY WORD="%s"' % word))
                 return True
 
@@ -432,8 +428,7 @@ class profanity_filter:
             bigrams = list(zip(brokenStr1, brokenStr1[1:]))
             for bigram in bigrams:
                 phrase = "%s %s" % (bigram[0], bigram[1])
-                if (self.normalize(phrase) in self.arrBad or
-                    phrase in self.arrBad):
+                if self.normalize(phrase) in self.arrBad or phrase in self.arrBad:
                     print(('***** PROFANITY PHRASE="%s"' % phrase))
                     return True
 
@@ -441,15 +436,15 @@ class profanity_filter:
             trigrams = list(zip(brokenStr1, brokenStr1[1:], brokenStr1[2:]))
             for trigram in trigrams:
                 phrase = "%s %s %s" % (trigram[0], trigram[1], trigram[2])
-                if (self.normalize(phrase) in self.arrBad or
-                    phrase in self.arrBad):
+                if self.normalize(phrase) in self.arrBad or phrase in self.arrBad:
                     print(('***** PROFANITY PHRASE="%s"' % phrase))
                     return True
 
         return False
 
-#x = profanity_filter()
-#print(x.filter_bad_words("Fuck this auto erotic shit, it's not safe for work."))
-#print(x.contains_bad_words("cream pie their daughter."))
-#print(x.contains_bad_words("If you tell someone your penis is 6 inches it's pretty believable.  If you say it's half a foot no one will believe you."))
-#print(x.normalize("dickes"));
+
+# x = profanity_filter()
+# print(x.filter_bad_words("Fuck this auto erotic shit, it's not safe for work."))
+# print(x.contains_bad_words("cream pie their daughter."))
+# print(x.contains_bad_words("If you tell someone your penis is 6 inches it's pretty believable.  If you say it's half a foot no one will believe you."))
+# print(x.normalize("dickes"));
index 91cd33a43a1a0c78558981091d89b524b5deebb0..cae9b6f0058be939453404684946a957d014a650 100644 (file)
@@ -9,15 +9,18 @@ import profanity_filter
 import random
 import renderer_catalog
 
+
 class reddit_renderer(renderer.debuggable_abstaining_renderer):
     """A renderer to pull text content from reddit."""
 
     def __init__(self, name_to_timeout_dict, subreddit_list, min_votes, font_size):
         super(reddit_renderer, self).__init__(name_to_timeout_dict, True)
         self.subreddit_list = subreddit_list
-        self.praw = praw.Reddit(client_id=secrets.reddit_client_id,
-                                client_secret=secrets.reddit_client_secret,
-                                user_agent=secrets.reddit_user_agent)
+        self.praw = praw.Reddit(
+            client_id=secrets.reddit_client_id,
+            client_secret=secrets.reddit_client_secret,
+            user_agent=secrets.reddit_user_agent,
+        )
         self.min_votes = min_votes
         self.font_size = font_size
         self.messages = grab_bag.grab_bag()
@@ -27,7 +30,7 @@ class reddit_renderer(renderer.debuggable_abstaining_renderer):
     def debug_prefix(self):
         x = ""
         for subreddit in self.subreddit_list:
-            x += ("%s " % subreddit)
+            x += "%s " % subreddit
         return "reddit(%s)" % x.strip()
 
     def periodic_render(self, key):
@@ -37,19 +40,23 @@ class reddit_renderer(renderer.debuggable_abstaining_renderer):
         elif key == "Shuffle":
             return self.shuffle_messages()
         else:
-            raise error('Unexpected operation')
+            raise error("Unexpected operation")
 
     def append_message(self, messages):
         for msg in messages:
-            if (not self.filter.contains_bad_words(msg.title)
+            if (
+                not self.filter.contains_bad_words(msg.title)
                 and msg.ups > self.min_votes
-                and not msg.title in self.deduper):
+                and not msg.title in self.deduper
+            ):
                 try:
                     self.deduper.add(msg.title)
                     content = "%d" % msg.ups
-                    if (msg.thumbnail != "self" and
-                        msg.thumbnail != "default" and
-                        msg.thumbnail != ""):
+                    if (
+                        msg.thumbnail != "self"
+                        and msg.thumbnail != "default"
+                        and msg.thumbnail != ""
+                    ):
                         content = '<IMG SRC="%s">' % msg.thumbnail
                     x = """
 <TABLE STYLE="font-size:%dpt;">
@@ -64,13 +71,19 @@ class reddit_renderer(renderer.debuggable_abstaining_renderer):
       <B>%s</B><BR><FONT COLOR=#bbbbbb>(%s)</FONT>
     </TD>
   </TR>
-</TABLE>""" % (self.font_size, content, msg.title, msg.author)
+</TABLE>""" % (
+                        self.font_size,
+                        content,
+                        msg.title,
+                        msg.author,
+                    )
                     self.messages.add(x)
                 except:
-                    self.debug_print('Unexpected exception, skipping message.')
+                    self.debug_print("Unexpected exception, skipping message.")
             else:
-                self.debug_print('skipped message "%s" for profanity or low score' % (
-                    msg.title))
+                self.debug_print(
+                    'skipped message "%s" for profanity or low score' % (msg.title)
+                )
 
     def scrape_reddit(self):
         self.deduper.clear()
@@ -92,12 +105,12 @@ class reddit_renderer(renderer.debuggable_abstaining_renderer):
             except:
                 pass
             try:
-                msg = self.praw.subreddit(subreddit).controversial('week')
+                msg = self.praw.subreddit(subreddit).controversial("week")
                 self.append_message(msg)
             except:
                 pass
             try:
-                msg = self.praw.subreddit(subreddit).top('day')
+                msg = self.praw.subreddit(subreddit).top("day")
                 self.append_message(msg)
             except:
                 pass
@@ -109,7 +122,7 @@ class reddit_renderer(renderer.debuggable_abstaining_renderer):
         layout.set_layout(page_builder.page_builder.LAYOUT_FOUR_ITEMS)
         x = ""
         for subreddit in self.subreddit_list:
-            x += ("%s " % subreddit)
+            x += "%s " % subreddit
         if len(x) > 30:
             if "SeaWA" in x:
                 x = "[local interests]"
@@ -127,31 +140,45 @@ class reddit_renderer(renderer.debuggable_abstaining_renderer):
         f.close()
         return True
 
+
 class til_reddit_renderer(reddit_renderer):
     def __init__(self, name_to_timeout_dict):
         super(til_reddit_renderer, self).__init__(
-            name_to_timeout_dict, ["todayilearned"], 200, 20)
+            name_to_timeout_dict, ["todayilearned"], 200, 20
+        )
+
 
 class quotes_reddit_renderer(reddit_renderer):
     def __init__(self, name_to_timeout_dict):
         super(quotes_reddit_renderer, self).__init__(
-            name_to_timeout_dict, ["quotes"], 200, 20)
+            name_to_timeout_dict, ["quotes"], 200, 20
+        )
+
 
 class showerthoughts_reddit_renderer(reddit_renderer):
     def __init__(self, name_to_timeout_dict):
         super(showerthoughts_reddit_renderer, self).__init__(
-            name_to_timeout_dict, ["showerthoughts"], 350, 24)
+            name_to_timeout_dict, ["showerthoughts"], 350, 24
+        )
+
 
 class seattle_reddit_renderer(reddit_renderer):
     def __init__(self, name_to_timeout_dict):
         super(seattle_reddit_renderer, self).__init__(
-            name_to_timeout_dict, ["seattle","seattleWA","SeaWA","bellevue","kirkland", "CoronavirusWA"], 50, 24)
+            name_to_timeout_dict,
+            ["seattle", "seattleWA", "SeaWA", "bellevue", "kirkland", "CoronavirusWA"],
+            50,
+            24,
+        )
+
 
 class lifeprotips_reddit_renderer(reddit_renderer):
     def __init__(self, name_to_timeout_dict):
         super(lifeprotips_reddit_renderer, self).__init__(
-            name_to_timeout_dict, ["lifeprotips"], 100, 24)
+            name_to_timeout_dict, ["lifeprotips"], 100, 24
+        )
+
 
-#x = reddit_renderer({"Test", 1234}, ["seattle","bellevue"], 50, 24)
-#x.periodic_render("Scrape")
-#x.periodic_render("Shuffle")
+# x = reddit_renderer({"Test", 1234}, ["seattle","bellevue"], 50, 24)
+# x.periodic_render("Scrape")
+# x.periodic_render("Shuffle")
index b78eb2b713e3b4901a912f8e763a90ee818953f3..2be7780c1c85ec02808abea8eb55fbd00fdbea57 100644 (file)
@@ -2,6 +2,7 @@ import time
 from datetime import datetime
 from decorators import invokation_logged
 
+
 class renderer(object):
     """Base class for something that can render."""
 
@@ -12,10 +13,12 @@ class renderer(object):
     def get_name(self):
         return self.__class__.__name__
 
+
 class abstaining_renderer(renderer):
     """A renderer that doesn't do it all the time."""
+
     def __init__(self, name_to_timeout_dict):
-        self.name_to_timeout_dict = name_to_timeout_dict;
+        self.name_to_timeout_dict = name_to_timeout_dict
         self.last_runs = {}
         for key in name_to_timeout_dict:
             self.last_runs[key] = 0
@@ -23,8 +26,9 @@ class abstaining_renderer(renderer):
     def should_render(self, keys_to_skip):
         now = time.time()
         for key in self.name_to_timeout_dict:
-            if (((now - self.last_runs[key]) > self.name_to_timeout_dict[key]) and
-                key not in keys_to_skip):
+            if (
+                (now - self.last_runs[key]) > self.name_to_timeout_dict[key]
+            ) and key not in keys_to_skip:
                 return key
         return None
 
@@ -42,24 +46,27 @@ class abstaining_renderer(renderer):
                 tries_per_key[key] = 0
 
             if tries_per_key[key] >= 3:
-                print('renderer: Too many failures for "%s.%s", giving up' % (
-                    self.get_name(), key))
+                print(
+                    'renderer: Too many failures for "%s.%s", giving up'
+                    % (self.get_name(), key)
+                )
                 keys_to_skip.add(key)
             else:
                 msg = 'renderer: executing "%s.%s"' % (self.get_name(), key)
-                if (tries_per_key[key] > 1):
+                if tries_per_key[key] > 1:
                     msg = msg + " (retry #%d)" % tries_per_key[key]
                 print(msg)
-                if (self.periodic_render(key)):
+                if self.periodic_render(key):
                     self.last_runs[key] = time.time()
 
     @invokation_logged
     def periodic_render(self, key):
         pass
 
+
 class debuggable_abstaining_renderer(abstaining_renderer):
     def __init__(self, name_to_timeout_dict, debug):
-        super(debuggable_abstaining_renderer, self).__init__(name_to_timeout_dict);
+        super(debuggable_abstaining_renderer, self).__init__(name_to_timeout_dict)
         self.debug = debug
 
     def debug_prefix(self):
index 794bd6f59f7aa0bedc461aa7823c94fd88c2972a..7e0bf834981b176faba50caf264d293e8e9d2ffe 100644 (file)
@@ -21,13 +21,16 @@ import twitter_renderer
 import weather_renderer
 import wsj_rss_renderer
 
-oauth = gdata_oauth.OAuth(secrets.google_client_id,
-                          secrets.google_client_secret)
+oauth = gdata_oauth.OAuth(secrets.google_client_id, secrets.google_client_secret)
 if not oauth.has_token():
     user_code = oauth.get_user_code()
-    print('------------------------------------------------------------')
-    print(('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
-        oauth.verification_url, user_code)))
+    print("------------------------------------------------------------")
+    print(
+        (
+            'Go to %s and enter the code "%s" (no quotes, case-sensitive)'
+            % (oauth.verification_url, user_code)
+        )
+    )
     oauth.get_new_token()
 
 seconds = 1
@@ -39,125 +42,120 @@ always = seconds * 1
 # frequency in the renderer thread of ~once a minute.  It just means that
 # everytime it check these will be stale and happen.
 __registry = [
-                 stranger_renderer.stranger_events_renderer(
-                     {"Fetch Events" : (hours * 12),
-                      "Shuffle Events" : (always)}),
-#                 pollen_renderer.pollen_count_renderer(
-#                     {"Poll" : (hours * 1)}),
-                 myq_renderer.garage_door_renderer(
-                     {"Poll MyQ" : (minutes * 5),
-                      "Update Page" : (always)}),
-                 bellevue_reporter_rss_renderer.bellevue_reporter_rss_renderer(
-                     {"Fetch News" : (hours * 1),
-                      "Shuffle News" : (always)},
-                     "www.bellevuereporter.com",
-                     [ "/feed/" ],
-                     "Bellevue Reporter" ),
-                 mynorthwest_rss_renderer.mynorthwest_rss_renderer(
-                     {"Fetch News" : (hours * 1),
-                      "Shuffle News" : (always)},
-                     "mynorthwest.com",
-                     [ "/feed/" ],
-                     "MyNorthwest News" ),
-                 cnn_rss_renderer.cnn_rss_renderer(
-                     {"Fetch News" : (hours * 1),
-                      "Shuffle News" : (always)},
-                     "rss.cnn.com",
-                     [ "/rss/cnn_tech.rss",
-                       "/rss/money_technology.rss" ],
-                     "CNNTechnology" ),
-                 cnn_rss_renderer.cnn_rss_renderer(
-                     {"Fetch News" : (hours * 1),
-                      "Shuffle News" : (always)},
-                     "rss.cnn.com",
-                     [ "/rss/cnn_topstories.rss",
-                       "/rss/cnn_world.rss",
-                       "/rss/cnn_us.rss" ],
-                     "CNNNews" ),
-                 wsj_rss_renderer.wsj_rss_renderer(
-                     {"Fetch News" : (hours * 1),
-                      "Shuffle News" : (always)},
-                     "feeds.a.dj.com",
-                     [ "/rss/RSSWorldNews.xml" ],
-                     "WSJNews" ),
-                 wsj_rss_renderer.wsj_rss_renderer(
-                     {"Fetch News" : (hours * 1),
-                      "Shuffle News" : (always)},
-                     "feeds.a.dj.com",
-                     [ "/rss/RSSMarketsMain.xml",
-                       "/rss/WSJcomUSBusiness.xml"],
-                     "WSJBusiness" ),
-                 google_news_rss_renderer.google_news_rss_renderer(
-                     {"Fetch News" : (minutes * 30),
-                      "Shuffle News" : (always)},
-                      "news.google.com",
-                      [ "/rss?hl=en-US&gl=US&ceid=US:en" ],
-                     "Google News" ),
-                 health_renderer.periodic_health_renderer(
-                     {"Update Perioidic Job Health" : (seconds * 45)}),
-                 stock_renderer.stock_quote_renderer(
-                     {"Update Prices" : (hours * 1)},
-                     [ "MSFT",
-                       "SPY",
-                       "GBTC",
-                       "IEMG",
-                       "OPTAX",
-                       "SPAB",
-                       "SPHD",
-                       "SGOL",
-                       "VDC",
-                       "VYMI",
-                       "VNQ",
-                       "VNQI" ]),
-                 stevens_renderer.stevens_pass_conditions_renderer(
-                     {"Fetch Pass Conditions" : (hours * 1)},
-                     "www.wsdot.com",
-                     [ "/traffic/rssfeeds/stevens/Default.aspx" ]),
-                 seattletimes_rss_renderer.seattletimes_rss_renderer(
-                     {"Fetch News" : (hours * 4),
-                      "Shuffle News" : (always)},
-                     "www.seattletimes.com",
-                     [ "/pacific-nw-magazine/feed/",
-                       "/life/feed/",
-                       "/outdoors/feed/" ],
-                     "Seattle Times Segments"),
-                 weather_renderer.weather_renderer(
-                     {"Fetch Weather (Bellevue)": (hours * 4)},
-                     "home"),
-                 weather_renderer.weather_renderer(
-                     {"Fetch Weather (Stevens)": (hours * 4)},
-                     "stevens"),
-                 weather_renderer.weather_renderer(
-                     {"Fetch Weather (Telma)" : (hours * 4)},
-                     "telma"),
-                 local_photos_mirror_renderer.local_photos_mirror_renderer(
-                     {"Index Photos": (hours * 24),
-                      "Choose Photo": (always)}),
-                 gkeep_renderer.gkeep_renderer(
-                     {"Update": (minutes * 10)}),
-                 gcal_renderer.gcal_renderer(
-                     {"Render Upcoming Events": (hours * 2),
-                      "Look For Triggered Events": (always)},
-                     oauth),
-                 reddit_renderer.showerthoughts_reddit_renderer(
-                     {"Scrape": (hours * 6),
-                      "Shuffle": (always)} ),
-                 reddit_renderer.til_reddit_renderer(
-                     {"Scrape": (hours * 6),
-                      "Shuffle": (always)} ),
-                 reddit_renderer.seattle_reddit_renderer(
-                     {"Scrape": (hours * 6),
-                      "Shuffle": (always)}),
-                 reddit_renderer.quotes_reddit_renderer(
-                     {"Scrape": (hours * 6),
-                      "Shuffle": (always)}),
-                 reddit_renderer.lifeprotips_reddit_renderer(
-                     {"Scrape": (hours * 6),
-                      "Shuffle": (always)}),
-                 twitter_renderer.twitter_renderer(
-                     {"Fetch Tweets": (minutes * 15),
-                      "Shuffle Tweets": (always)})
+    stranger_renderer.stranger_events_renderer(
+        {"Fetch Events": (hours * 12), "Shuffle Events": (always)}
+    ),
+    #                 pollen_renderer.pollen_count_renderer(
+    #                     {"Poll" : (hours * 1)}),
+    myq_renderer.garage_door_renderer(
+        {"Poll MyQ": (minutes * 5), "Update Page": (always)}
+    ),
+    bellevue_reporter_rss_renderer.bellevue_reporter_rss_renderer(
+        {"Fetch News": (hours * 1), "Shuffle News": (always)},
+        "www.bellevuereporter.com",
+        ["/feed/"],
+        "Bellevue Reporter",
+    ),
+    mynorthwest_rss_renderer.mynorthwest_rss_renderer(
+        {"Fetch News": (hours * 1), "Shuffle News": (always)},
+        "mynorthwest.com",
+        ["/feed/"],
+        "MyNorthwest News",
+    ),
+    cnn_rss_renderer.cnn_rss_renderer(
+        {"Fetch News": (hours * 1), "Shuffle News": (always)},
+        "rss.cnn.com",
+        ["/rss/cnn_tech.rss", "/rss/money_technology.rss"],
+        "CNNTechnology",
+    ),
+    cnn_rss_renderer.cnn_rss_renderer(
+        {"Fetch News": (hours * 1), "Shuffle News": (always)},
+        "rss.cnn.com",
+        ["/rss/cnn_topstories.rss", "/rss/cnn_world.rss", "/rss/cnn_us.rss"],
+        "CNNNews",
+    ),
+    wsj_rss_renderer.wsj_rss_renderer(
+        {"Fetch News": (hours * 1), "Shuffle News": (always)},
+        "feeds.a.dj.com",
+        ["/rss/RSSWorldNews.xml"],
+        "WSJNews",
+    ),
+    wsj_rss_renderer.wsj_rss_renderer(
+        {"Fetch News": (hours * 1), "Shuffle News": (always)},
+        "feeds.a.dj.com",
+        ["/rss/RSSMarketsMain.xml", "/rss/WSJcomUSBusiness.xml"],
+        "WSJBusiness",
+    ),
+    google_news_rss_renderer.google_news_rss_renderer(
+        {"Fetch News": (minutes * 30), "Shuffle News": (always)},
+        "news.google.com",
+        ["/rss?hl=en-US&gl=US&ceid=US:en"],
+        "Google News",
+    ),
+    health_renderer.periodic_health_renderer(
+        {"Update Perioidic Job Health": (seconds * 45)}
+    ),
+    stock_renderer.stock_quote_renderer(
+        {"Update Prices": (hours * 1)},
+        [
+            "MSFT",
+            "SPY",
+            "GBTC",
+            "IEMG",
+            "OPTAX",
+            "SPAB",
+            "SPHD",
+            "SGOL",
+            "VDC",
+            "VYMI",
+            "VNQ",
+            "VNQI",
+        ],
+    ),
+    stevens_renderer.stevens_pass_conditions_renderer(
+        {"Fetch Pass Conditions": (hours * 1)},
+        "www.wsdot.com",
+        ["/traffic/rssfeeds/stevens/Default.aspx"],
+    ),
+    seattletimes_rss_renderer.seattletimes_rss_renderer(
+        {"Fetch News": (hours * 4), "Shuffle News": (always)},
+        "www.seattletimes.com",
+        ["/pacific-nw-magazine/feed/", "/life/feed/", "/outdoors/feed/"],
+        "Seattle Times Segments",
+    ),
+    weather_renderer.weather_renderer(
+        {"Fetch Weather (Bellevue)": (hours * 4)}, "home"
+    ),
+    weather_renderer.weather_renderer(
+        {"Fetch Weather (Stevens)": (hours * 4)}, "stevens"
+    ),
+    weather_renderer.weather_renderer({"Fetch Weather (Telma)": (hours * 4)}, "telma"),
+    local_photos_mirror_renderer.local_photos_mirror_renderer(
+        {"Index Photos": (hours * 24), "Choose Photo": (always)}
+    ),
+    gkeep_renderer.gkeep_renderer({"Update": (minutes * 10)}),
+    gcal_renderer.gcal_renderer(
+        {"Render Upcoming Events": (hours * 2), "Look For Triggered Events": (always)},
+        oauth,
+    ),
+    reddit_renderer.showerthoughts_reddit_renderer(
+        {"Scrape": (hours * 6), "Shuffle": (always)}
+    ),
+    reddit_renderer.til_reddit_renderer({"Scrape": (hours * 6), "Shuffle": (always)}),
+    reddit_renderer.seattle_reddit_renderer(
+        {"Scrape": (hours * 6), "Shuffle": (always)}
+    ),
+    reddit_renderer.quotes_reddit_renderer(
+        {"Scrape": (hours * 6), "Shuffle": (always)}
+    ),
+    reddit_renderer.lifeprotips_reddit_renderer(
+        {"Scrape": (hours * 6), "Shuffle": (always)}
+    ),
+    twitter_renderer.twitter_renderer(
+        {"Fetch Tweets": (minutes * 15), "Shuffle Tweets": (always)}
+    ),
 ]
 
+
 def get_renderers():
     return __registry
index e84b3cc3834ec9bd798f24e4be476dd6c1132a25..5525e2e1d14935aadd4c91bd15668ca89daecf3c 100644 (file)
@@ -11,6 +11,7 @@ import re
 import sets
 import xml.etree.ElementTree as ET
 
+
 class reuters_rss_renderer(renderer.debuggable_abstaining_renderer):
     def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page):
         super(reuters_rss_renderer, self).__init__(name_to_timeout_dict, False)
@@ -31,7 +32,7 @@ class reuters_rss_renderer(renderer.debuggable_abstaining_renderer):
         elif key == "Shuffle News":
             return self.shuffle_news()
         else:
-            raise error('Unexpected operation')
+            raise error("Unexpected operation")
 
     def shuffle_news(self):
         headlines = page_builder.page_builder()
@@ -43,7 +44,7 @@ class reuters_rss_renderer(renderer.debuggable_abstaining_renderer):
             return False
         for msg in subset:
             headlines.add_item(msg)
-        f = file_writer.file_writer('reuters-%s_4_none.html' % self.page)
+        f = file_writer.file_writer("reuters-%s_4_none.html" % self.page)
         headlines.render_html(f)
         f.close()
 
@@ -52,13 +53,13 @@ class reuters_rss_renderer(renderer.debuggable_abstaining_renderer):
         details.set_title("%s" % self.page)
         subset = self.details.subset(1)
         if subset is None:
-            self.debug_print("Not enough details to choose from.");
+            self.debug_print("Not enough details to choose from.")
             return False
         for msg in subset:
             blurb = msg
             blurb += "</TD>\n"
             details.add_item(blurb)
-        g = file_writer.file_writer('reuters-details-%s_6_none.html' % self.page)
+        g = file_writer.file_writer("reuters-details-%s_6_none.html" % self.page)
         details.render_html(g)
         g.close()
         return True
@@ -71,59 +72,64 @@ class reuters_rss_renderer(renderer.debuggable_abstaining_renderer):
 
         for uri in self.feed_uris:
             self.conn = http.client.HTTPConnection(self.feed_site)
-            self.conn.request(
-                "GET",
-                uri,
-                None,
-                {"Accept-Charset": "utf-8"})
+            self.conn.request("GET", uri, None, {"Accept-Charset": "utf-8"})
             response = self.conn.getresponse()
             if response.status != 200:
-                print(("%s: RSS fetch_news error, response: %d" % (self.page,
-                                                                  response.status)))
+                print(
+                    (
+                        "%s: RSS fetch_news error, response: %d"
+                        % (self.page, response.status)
+                    )
+                )
                 self.debug_print(response.read())
                 return False
 
             rss = ET.fromstring(response.read())
             channel = rss[0]
             for item in channel.getchildren():
-                title = item.findtext('title')
-                if (title is None or
-                    "euters" in title or
-                    title == "Editor's Choice" or
-                    self.filter.contains_bad_words(title)):
+                title = item.findtext("title")
+                if (
+                    title is None
+                    or "euters" in title
+                    or title == "Editor's Choice"
+                    or self.filter.contains_bad_words(title)
+                ):
                     continue
-                pubdate = item.findtext('pubDate')
-                image = item.findtext('image')
-                descr = item.findtext('description')
+                pubdate = item.findtext("pubDate")
+                image = item.findtext("image")
+                descr = item.findtext("description")
                 if descr is not None:
-                    descr = re.sub('<[^>]+>', '', descr)
+                    descr = re.sub("<[^>]+>", "", descr)
 
                 blurb = """<DIV style="padding:8px;
                                        font-size:34pt;
                                        -webkit-column-break-inside:avoid;">"""
                 if image is not None:
-                    blurb += '<IMG SRC=\"%s\" ALIGN=LEFT HEIGHT=115" style="padding:8px;">\n' % image
-                blurb += '<P><B>%s</B>' % title
+                    blurb += (
+                        '<IMG SRC="%s" ALIGN=LEFT HEIGHT=115" style="padding:8px;">\n'
+                        % image
+                    )
+                blurb += "<P><B>%s</B>" % title
 
                 if pubdate != None:
                     # Thu, 04 Jun 2015 08:16:35 GMT|-0400
-                    pubdate = pubdate.rsplit(' ', 1)[0]
-                    dt = datetime.datetime.strptime(pubdate,
-                                                    '%a, %d %b %Y %H:%M:%S')
+                    pubdate = pubdate.rsplit(" ", 1)[0]
+                    dt = datetime.datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S")
                     if dt < oldest:
                         continue
-                    blurb += dt.strftime(" <FONT COLOR=#bbbbbb>(%a&nbsp;%b&nbsp;%d)</FONT>")
+                    blurb += dt.strftime(
+                        " <FONT COLOR=#bbbbbb>(%a&nbsp;%b&nbsp;%d)</FONT>"
+                    )
 
                 if descr is not None:
                     longblurb = blurb
                     longblurb += "<BR>"
                     longblurb += descr
                     longblurb += "</DIV>"
-                    longblurb = longblurb.replace("font-size:34pt",
-                                                  "font-size:44pt")
+                    longblurb = longblurb.replace("font-size:34pt", "font-size:44pt")
 
-                self.details.add(longblurb.encode('utf8'))
+                self.details.add(longblurb.encode("utf8"))
                 blurb += "</DIV>"
-                self.news.add(blurb.encode('utf8'))
+                self.news.add(blurb.encode("utf8"))
                 count += 1
         return count > 0
index 8a36f4f93622c11ebc3253559dba365bc536152e..18ed2fc3b97e9de3c64e32538a7349d160be6bf1 100644 (file)
@@ -1,31 +1,31 @@
 import datetime
 import generic_news_rss_renderer as gnrss
 
+
 class seattletimes_rss_renderer(gnrss.generic_news_rss_renderer):
-    interesting_categories = frozenset([
-        'Nation',
-        'World',
-        'Life',
-        'Technology'
-        'Local News',
-        'Food',
-        'Drink',
-        'Today File',
-        'Seahawks',
-        'Oddities',
-        'Packfic NW',
-        'Home',
-        'Garden',
-        'Travel',
-        'Outdoors',
-    ])
+    interesting_categories = frozenset(
+        [
+            "Nation",
+            "World",
+            "Life",
+            "Technology" "Local News",
+            "Food",
+            "Drink",
+            "Today File",
+            "Seahawks",
+            "Oddities",
+            "Packfic NW",
+            "Home",
+            "Garden",
+            "Travel",
+            "Outdoors",
+        ]
+    )
 
     def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
         super(seattletimes_rss_renderer, self).__init__(
-            name_to_timeout_dict,
-            feed_site,
-            feed_uris,
-            page_title)
+            name_to_timeout_dict, feed_site, feed_uris, page_title
+        )
 
     def debug_prefix(self):
         return "seattletimes"
@@ -49,9 +49,9 @@ class seattletimes_rss_renderer(gnrss.generic_news_rss_renderer):
 
         details = {}
         for detail in item.getchildren():
-            self.debug_print("detail %s => %s (%s)" % (detail.tag,
-                                                       detail.attrib,
-                                                       detail.text))
+            self.debug_print(
+                "detail %s => %s (%s)" % (detail.tag, detail.attrib, detail.text)
+            )
             if detail.text != None:
                 details[detail.tag] = detail.text
         if "category" not in details:
@@ -74,10 +74,11 @@ class seattletimes_rss_renderer(gnrss.generic_news_rss_renderer):
             return False
         return len(description) >= 65
 
+
 # Test
-#x = seattletimes_rss_renderer({"Test", 123},
+# x = seattletimes_rss_renderer({"Test", 123},
 #                              "www.seattletimes.com",
 #                              [ "/life/feed/" ],
 #                              "nonnews")
-#x.periodic_render("Fetch News")
-#x.periodic_render("Shuffle News")
+# x.periodic_render("Fetch News")
+# x.periodic_render("Shuffle News")
index 18f300b2e1b7aed53bdb412ec975e0c9eeab891e..ed2afa4c5ff7f67cf07094baaf035bfe471543f9 100644 (file)
@@ -3,10 +3,12 @@ import file_writer
 import http.client
 import xml.etree.ElementTree as ET
 
+
 class stevens_pass_conditions_renderer(renderer.debuggable_abstaining_renderer):
     def __init__(self, name_to_timeout_dict, feed_site, feed_uris):
         super(stevens_pass_conditions_renderer, self).__init__(
-            name_to_timeout_dict, False)
+            name_to_timeout_dict, False
+        )
         self.feed_site = feed_site
         self.feed_uris = feed_uris
 
@@ -14,14 +16,10 @@ class stevens_pass_conditions_renderer(renderer.debuggable_abstaining_renderer):
         return "stevens"
 
     def periodic_render(self, key):
-        f = file_writer.file_writer('stevens-conditions_1_86400.html')
+        f = file_writer.file_writer("stevens-conditions_1_86400.html")
         for uri in self.feed_uris:
             self.conn = http.client.HTTPSConnection(self.feed_site)
-            self.conn.request(
-                "GET",
-                uri,
-                None,
-                {"Accept-Charset": "utf-8"})
+            self.conn.request("GET", uri, None, {"Accept-Charset": "utf-8"})
             response = self.conn.getresponse()
             if response.status == 200:
                 raw = response.read()
@@ -30,15 +28,21 @@ class stevens_pass_conditions_renderer(renderer.debuggable_abstaining_renderer):
                 for item in channel.getchildren():
                     if item.tag == "title":
                         f.write("<h1>%s</h1><hr>" % item.text)
-                        f.write('<IMG WIDTH=512 ALIGN=RIGHT HEIGHT=382 SRC="https://images.wsdot.wa.gov/nc/002vc06430.jpg?t=637059938785646824" style="padding:8px;">')
+                        f.write(
+                            '<IMG WIDTH=512 ALIGN=RIGHT HEIGHT=382 SRC="https://images.wsdot.wa.gov/nc/002vc06430.jpg?t=637059938785646824" style="padding:8px;">'
+                        )
                     elif item.tag == "item":
                         for x in item.getchildren():
                             if x.tag == "description":
                                 text = x.text
-                                text = text.replace("<strong>Stevens Pass US2</strong><br/>", "")
+                                text = text.replace(
+                                    "<strong>Stevens Pass US2</strong><br/>", ""
+                                )
                                 text = text.replace("<br/><br/>", "<BR>")
-                                text = text.replace("<strong>Elevation Meters:</strong>1238<BR>", "")
-                                f.write('<P>\n%s\n' % text)
+                                text = text.replace(
+                                    "<strong>Elevation Meters:</strong>1238<BR>", ""
+                                )
+                                f.write("<P>\n%s\n" % text)
                 f.close()
                 return True
         f.close()
index f8491e6a9eb73b8ba06ea35ffd1eb96f5ca0b5c2..7b34455eb610283946eeb37a9db571449b9e3c22 100644 (file)
@@ -10,6 +10,7 @@ import secrets
 import time
 import urllib.request, urllib.error, urllib.parse
 
+
 class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
     # format exchange:symbol
     def __init__(self, name_to_timeout_dict, symbols):
@@ -26,25 +27,27 @@ class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
 
     def periodic_render(self, key):
         now = datetime.datetime.now()
-        if (now.hour < (9 - 3) or
-            now.hour >= (17 - 3) or
-            datetime.datetime.today().weekday() > 4):
+        if (
+            now.hour < (9 - 3)
+            or now.hour >= (17 - 3)
+            or datetime.datetime.today().weekday() > 4
+        ):
             self.debug_print("The stock market is closed so not re-rendering")
             return True
 
-        if (self.thread is None or not self.thread.is_alive()):
+        if self.thread is None or not self.thread.is_alive():
             self.debug_print("Spinning up a background thread...")
-            self.thread = Thread(target = self.thread_internal_render, args=())
+            self.thread = Thread(target=self.thread_internal_render, args=())
             self.thread.start()
         return True
 
     def thread_internal_render(self):
         symbols_finished = 0
-        f = file_writer.file_writer('stock_3_86400.html')
+        f = file_writer.file_writer("stock_3_86400.html")
         f.write("<H1>Stock Quotes</H1><HR>")
         f.write("<TABLE WIDTH=99%>")
         for symbol in self.symbols:
-#            print "---------- Working on %s\n" % symbol
+            #            print "---------- Working on %s\n" % symbol
 
             # https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=5min&apikey=<key>
 
@@ -54,26 +57,29 @@ class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
             cooked = ""
             while True:
                 key = self.get_random_key()
-                url = self.prefix + "function=GLOBAL_QUOTE&symbol=%s&apikey=%s" % (symbol, key)
+                url = self.prefix + "function=GLOBAL_QUOTE&symbol=%s&apikey=%s" % (
+                    symbol,
+                    key,
+                )
                 raw = urllib.request.urlopen(url).read()
                 cooked = json.loads(raw)
-                if 'Global Quote' not in cooked:
-#                    print "%s\n" % cooked
-                    print("Failure %d, sleep %d sec...\n" % (attempts + 1,
-                                                             2 ** attempts))
+                if "Global Quote" not in cooked:
+                    #                    print "%s\n" % cooked
+                    print(
+                        "Failure %d, sleep %d sec...\n" % (attempts + 1, 2 ** attempts)
+                    )
                     time.sleep(2 ** attempts)
                     attempts += 1
-                    if attempts > 10: # we'll wait up to 512 seconds per symbol
+                    if attempts > 10:  # we'll wait up to 512 seconds per symbol
                         break
                 else:
                     break
 
             # These fuckers...
-            if 'Global Quote' not in cooked:
-                print("Can't get data for symbol %s: %s\n" % (
-                    symbol, raw))
+            if "Global Quote" not in cooked:
+                print("Can't get data for symbol %s: %s\n" % (symbol, raw))
                 continue
-            cooked = cooked['Global Quote']
+            cooked = cooked["Global Quote"]
 
             # {
             #   u'Global Quote':
@@ -92,20 +98,20 @@ class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
             # }
 
             price = "?????"
-            if '05. price' in cooked:
-                price = cooked['05. price']
+            if "05. price" in cooked:
+                price = cooked["05. price"]
                 price = price[:-2]
 
             percent_change = "?????"
-            if '10. change percent' in cooked:
-                percent_change = cooked['10. change percent']
-                if not '-' in percent_change:
+            if "10. change percent" in cooked:
+                percent_change = cooked["10. change percent"]
+                if not "-" in percent_change:
                     percent_change = "+" + percent_change
 
             change = "?????"
             cell_color = "#bbbbbb"
-            if '09. change' in cooked:
-                change = cooked['09. change']
+            if "09. change" in cooked:
+                change = cooked["09. change"]
                 if "-" in change:
                     cell_color = "#b00000"
                 else:
@@ -113,12 +119,13 @@ class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
                 change = change[:-2]
 
             if symbols_finished % 4 == 0:
-                if (symbols_finished > 0):
+                if symbols_finished > 0:
                     f.write("</TR>")
                 f.write("<TR>")
             symbols_finished += 1
 
-            f.write("""
+            f.write(
+                """
 <TD WIDTH=20%% HEIGHT=150 BGCOLOR="%s">
   <!-- Container -->
   <DIV style="position:relative;
@@ -147,15 +154,14 @@ class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
             <B>$%s</B>
     </DIV>
   </DIV>
-</TD>""" % (cell_color,
-            symbol,
-            price,
-            percent_change,
-            change))
+</TD>"""
+                % (cell_color, symbol, price, percent_change, change)
+            )
         f.write("</TR></TABLE>")
         f.close()
         return True
 
-#x = stock_quote_renderer({}, ["MSFT", "GOOG", "GOOGL", "OPTAX", "VNQ"])
-#x.periodic_render(None)
-#x.periodic_render(None)
+
+# x = stock_quote_renderer({}, ["MSFT", "GOOG", "GOOGL", "OPTAX", "VNQ"])
+# x.periodic_render(None)
+# x.periodic_render(None)
index 2084c395a4fa4502612abeb32b26d2abffe0b65e..4020353c4e4e5001c22d5f99cd996d41e099bd23 100644 (file)
@@ -10,6 +10,7 @@ import re
 import renderer
 import renderer_catalog
 
+
 class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
     def __init__(self, name_to_timeout_dict):
         super(stranger_events_renderer, self).__init__(name_to_timeout_dict, True)
@@ -78,7 +79,7 @@ class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
 
         for msg in subset:
             layout.add_item(msg)
-        f = file_writer.file_writer('stranger-events_2_36000.html')
+        f = file_writer.file_writer("stranger-events_2_36000.html")
         layout.render_html(f)
         f.close()
         return True
@@ -100,28 +101,31 @@ class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
         if delta > 1:
             ts = now + datetime.timedelta(delta)
             next_sat = datetime.datetime.strftime(ts, "%Y-%m-%d")
-            feed_uris.append("/stranger-seattle/events/?start-date=%s&page=1" % next_sat)
-            feed_uris.append("/stranger-seattle/events/?start-date=%s&page=2" % next_sat)
+            feed_uris.append(
+                "/stranger-seattle/events/?start-date=%s&page=1" % next_sat
+            )
+            feed_uris.append(
+                "/stranger-seattle/events/?start-date=%s&page=2" % next_sat
+            )
         delta += 1
         if delta > 1:
             ts = now + datetime.timedelta(delta)
             next_sun = datetime.datetime.strftime(ts, "%Y-%m-%d")
-            feed_uris.append("/stranger-seattle/events/?start-date=%s&page=1" % next_sun)
-            feed_uris.append("/stranger-seattle/events/?start-date=%s&page=2" % next_sun)
+            feed_uris.append(
+                "/stranger-seattle/events/?start-date=%s&page=1" % next_sun
+            )
+            feed_uris.append(
+                "/stranger-seattle/events/?start-date=%s&page=2" % next_sun
+            )
 
         for uri in feed_uris:
             try:
                 self.debug_print("fetching 'https://%s%s'" % (self.feed_site, uri))
                 self.conn = http.client.HTTPSConnection(self.feed_site)
-                self.conn.request(
-                    "GET",
-                    uri,
-                    None,
-                    {"Accept-Charset": "utf-8"})
+                self.conn.request("GET", uri, None, {"Accept-Charset": "utf-8"})
                 response = self.conn.getresponse()
                 if response.status != 200:
-                    self.debug_print("Connection failed, status %d" % (
-                        response.status))
+                    self.debug_print("Connection failed, status %d" % (response.status))
                     self.debug_print(response.getheaders())
                     continue
                 raw = response.read()
@@ -131,25 +135,32 @@ class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
 
             soup = BeautifulSoup(raw, "html.parser")
             filter = profanity_filter.profanity_filter()
-            for x in soup.find_all('div', class_='row event list-item mb-3 py-3'):
-                text = x.get_text();
-                if (filter.contains_bad_words(text)):
+            for x in soup.find_all("div", class_="row event list-item mb-3 py-3"):
+                text = x.get_text()
+                if filter.contains_bad_words(text):
                     continue
                 raw = str(x)
-                raw = raw.replace('src="/',
-                                  'align="left" src="https://www.thestranger.com/')
-                raw = raw.replace('href="/',
-                                  'href="https://www.thestranger.com/')
-                raw = raw.replace('FREE', 'Free')
-                raw = raw.replace('Save Event', '')
-                raw = re.sub('^\s*$', '', raw, 0, re.MULTILINE)
-                #raw = re.sub('\n+', '\n', raw)
-                raw = re.sub('<span[^<>]*class="calendar-post-ticket"[^<>]*>.*</#span>', '', raw, 0, re.DOTALL | re.IGNORECASE)
+                raw = raw.replace(
+                    'src="/', 'align="left" src="https://www.thestranger.com/'
+                )
+                raw = raw.replace('href="/', 'href="https://www.thestranger.com/')
+                raw = raw.replace("FREE", "Free")
+                raw = raw.replace("Save Event", "")
+                raw = re.sub("^\s*$", "", raw, 0, re.MULTILINE)
+                # raw = re.sub('\n+', '\n', raw)
+                raw = re.sub(
+                    '<span[^<>]*class="calendar-post-ticket"[^<>]*>.*</#span>',
+                    "",
+                    raw,
+                    0,
+                    re.DOTALL | re.IGNORECASE,
+                )
                 self.events.add(raw)
             self.debug_print("fetched %d events so far." % self.events.size())
         return self.events.size() > 0
 
+
 # Test
-#x = stranger_events_renderer({"Test", 123})
-#x.periodic_render("Fetch Events")
-#x.periodic_render("Shuffle Events")
+# x = stranger_events_renderer({"Test", 123})
+# x.periodic_render("Fetch Events")
+# x.periodic_render("Shuffle Events")
index d2c31635d084888736fb7712f65d8054f1e5c630..9bb7ec5b155a22c257aaa2e0d1f1f33b42005e73 100644 (file)
@@ -1,4 +1,3 @@
-
 class trigger(object):
     """Base class for something that can trigger a page becomming active."""
 
index 0d362240161a53760c1a80db8611b6322df1bff8..cf8c82aa61032a7a03c92f8fd8017da43c4d39cb 100644 (file)
@@ -2,9 +2,12 @@ import camera_trigger
 import gcal_trigger
 import myq_trigger
 
-__registry = [ camera_trigger.any_camera_trigger(),
-               myq_trigger.myq_trigger(),
-               gcal_trigger.gcal_trigger() ]
+__registry = [
+    camera_trigger.any_camera_trigger(),
+    myq_trigger.myq_trigger(),
+    gcal_trigger.gcal_trigger(),
+]
+
 
 def get_triggers():
     return __registry
index 173842ef0ea2801182760ff7a1d0111f1739bff7..1c9dbeebcf31e924f0ffb75e730921f3d04a3940 100644 (file)
@@ -6,6 +6,7 @@ import re
 import secrets
 import tweepy
 
+
 class twitter_renderer(renderer.debuggable_abstaining_renderer):
     def __init__(self, name_to_timeout_dict):
         super(twitter_renderer, self).__init__(name_to_timeout_dict, False)
@@ -14,7 +15,8 @@ class twitter_renderer(renderer.debuggable_abstaining_renderer):
         self.handles_by_author = dict()
         self.filter = profanity_filter.profanity_filter()
         self.urlfinder = re.compile(
-            "((http|https)://[\-A-Za-z0-9\\.]+/[\?\&\-A-Za-z0-9_\\.]+)")
+            "((http|https)://[\-A-Za-z0-9\\.]+/[\?\&\-A-Za-z0-9_\\.]+)"
+        )
 
         # == OAuth Authentication ==
         #
@@ -23,14 +25,14 @@ class twitter_renderer(renderer.debuggable_abstaining_renderer):
 
         # The consumer keys can be found on your application's Details
         # page located at https://dev.twitter.com/apps (under "OAuth settings")
-        consumer_key=secrets.twitter_consumer_key
-        consumer_secret=secrets.twitter_consumer_secret
+        consumer_key = secrets.twitter_consumer_key
+        consumer_secret = secrets.twitter_consumer_secret
 
         # The access tokens can be found on your applications's Details
         # page located at https://dev.twitter.com/apps (located
         # under "Your access token")
-        access_token=secrets.twitter_access_token
-        access_token_secret=secrets.twitter_access_token_secret
+        access_token = secrets.twitter_access_token
+        access_token_secret = secrets.twitter_access_token_secret
 
         auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
         auth.set_access_token(access_token, access_token_secret)
@@ -48,11 +50,11 @@ class twitter_renderer(renderer.debuggable_abstaining_renderer):
         elif key == "Shuffle Tweets":
             return self.shuffle_tweets()
         else:
-            raise error('Unexpected operation')
+            raise error("Unexpected operation")
 
     def fetch_tweets(self):
         try:
-            tweets = self.api.home_timeline(tweet_mode='extended', count=200)
+            tweets = self.api.home_timeline(tweet_mode="extended", count=200)
         except:
             print("Exception while fetching tweets!")
             return False
@@ -72,37 +74,39 @@ class twitter_renderer(renderer.debuggable_abstaining_renderer):
         handle = self.handles_by_author[author]
         tweets = self.tweets_by_author[author]
         already_seen = set()
-        f = file_writer.file_writer('twitter_10_3600.html')
-        f.write('<TABLE WIDTH=96%><TR><TD WIDTH=86%>')
-        f.write('<H2>%s (@%s)</H2></TD>\n' % (author, handle))
+        f = file_writer.file_writer("twitter_10_3600.html")
+        f.write("<TABLE WIDTH=96%><TR><TD WIDTH=86%>")
+        f.write("<H2>%s (@%s)</H2></TD>\n" % (author, handle))
         f.write('<TD ALIGN="right" VALIGN="top">')
         f.write('<IMG SRC="twitter.png" WIDTH=42></TD></TR></TABLE>\n')
-        f.write('<HR>\n<UL>\n')
+        f.write("<HR>\n<UL>\n")
         count = 0
         length = 0
         for tweet in tweets:
             text = tweet.full_text
-            if ((text not in already_seen) and
-                (not self.filter.contains_bad_words(text))):
+            if (text not in already_seen) and (
+                not self.filter.contains_bad_words(text)
+            ):
                 already_seen.add(text)
                 text = self.linkify(text)
-                f.write('<LI><B>%s</B>\n' % text)
+                f.write("<LI><B>%s</B>\n" % text)
                 count += 1
                 length += len(text)
                 if count > 3 or length > 270:
                     break
-        f.write('</UL>\n')
+        f.write("</UL>\n")
         f.close()
         return True
 
+
 # Test
-#t = twitter_renderer(
+# t = twitter_renderer(
 #    {"Fetch Tweets" : 1,
 #     "Shuffle Tweets" : 1})
-#x = "bla bla bla https://t.co/EjWnT3UA9U bla bla"
-#x = t.linkify(x)
-#print x
-#if t.fetch_tweets() == 0:
+# x = "bla bla bla https://t.co/EjWnT3UA9U bla bla"
+# x = t.linkify(x)
+# print x
+# if t.fetch_tweets() == 0:
 #    print("Error fetching tweets, none fetched.")
-#else:
+# else:
 #    t.shuffle_tweets()
index 51a29e7680400268ea09ecda867df6ad41105545..fb22e4dd052acfa4731dc62a30b928084cb00051 100644 (file)
--- a/utils.py
+++ b/utils.py
@@ -3,9 +3,11 @@ import os
 import constants
 from datetime import datetime
 
+
 def timestamp():
     t = datetime.fromtimestamp(time.time())
-    return t.strftime('%d/%b/%Y:%H:%M:%S%Z')
+    return t.strftime("%d/%b/%Y:%H:%M:%S%Z")
+
 
 def describe_age_of_file(filename):
     try:
@@ -16,6 +18,7 @@ def describe_age_of_file(filename):
     except Exception as e:
         return "?????"
 
+
 def describe_age_of_file_briefly(filename):
     try:
         now = time.time()
@@ -25,39 +28,42 @@ def describe_age_of_file_briefly(filename):
     except Exception as e:
         return "?????"
 
+
 def describe_duration(age):
     days = divmod(age, constants.seconds_per_day)
     hours = divmod(days[1], constants.seconds_per_hour)
     minutes = divmod(hours[1], constants.seconds_per_minute)
 
     descr = ""
-    if (days[0] > 1):
+    if days[0] > 1:
         descr = "%d days, " % days[0]
-    elif (days[0] == 1):
+    elif days[0] == 1:
         descr = "1 day, "
-    if (hours[0] > 1):
+    if hours[0] > 1:
         descr = descr + ("%d hours, " % hours[0])
-    elif (hours[0] == 1):
+    elif hours[0] == 1:
         descr = descr + "1 hour, "
-    if (len(descr) > 0):
+    if len(descr) > 0:
         descr = descr + "and "
-    if (minutes[0] == 1):
+    if minutes[0] == 1:
         descr = descr + "1 minute"
     else:
         descr = descr + ("%d minutes" % minutes[0])
     return descr
 
+
 def describe_duration_briefly(age):
     days = divmod(age, constants.seconds_per_day)
     hours = divmod(days[1], constants.seconds_per_hour)
     minutes = divmod(hours[1], constants.seconds_per_minute)
     descr = ""
-    if (days[0] > 0):
+    if days[0] > 0:
         descr = "%dd " % days[0]
-    if (hours[0] > 0):
+    if hours[0] > 0:
         descr = descr + ("%dh " % hours[0])
     descr = descr + ("%dm" % minutes[0])
     return descr
 
-#x = describe_age_of_file_briefly("pages/clock_10_none.html")
-#print x
+
+# x = describe_age_of_file_briefly("pages/clock_10_none.html")
+# print x
index fdd4fe125d3257a1303797b338d6981dcaca56fc..e11703bd5cdc3980530ada67d0658226d43daa66 100644 (file)
@@ -7,12 +7,11 @@ import secrets
 import urllib.request, urllib.error, urllib.parse
 import random
 
+
 class weather_renderer(renderer.debuggable_abstaining_renderer):
     """A renderer to fetch forecast from wunderground."""
 
-    def __init__(self,
-                 name_to_timeout_dict,
-                 file_prefix):
+    def __init__(self, name_to_timeout_dict, file_prefix):
         super(weather_renderer, self).__init__(name_to_timeout_dict, False)
         self.file_prefix = file_prefix
 
@@ -23,11 +22,11 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
         return self.fetch_weather()
 
     def describe_time(self, index):
-        if (index <= 1):
+        if index <= 1:
             return "overnight"
-        elif (index <= 3):
+        elif index <= 3:
             return "morning"
-        elif (index <= 5):
+        elif index <= 5:
             return "afternoon"
         else:
             return "evening"
@@ -45,9 +44,9 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
             return "heavy"
 
     def describe_magnitude(self, mm):
-        if (mm < 2):
+        if mm < 2:
             return "light"
-        elif (mm < 10):
+        elif mm < 10:
             return "moderate"
         else:
             return "heavy"
@@ -95,42 +94,40 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
         total_snow = 0
         count = min(len(conditions), len(rain), len(snow))
         for x in range(0, count):
-            seen_rain = rain[x] > 0;
-            seen_snow = snow[x] > 0;
+            seen_rain = rain[x] > 0
+            seen_snow = snow[x] > 0
             total_snow += snow[x]
             txt = conditions[x].lower()
-            if ("cloud" in txt):
+            if "cloud" in txt:
                 cloud_count += 1
-            if ("clear" in txt or "sun" in txt):
+            if "clear" in txt or "sun" in txt:
                 clear_count += 1
 
-        if (seen_rain and seen_snow):
-            if (total_snow < 10):
+        if seen_rain and seen_snow:
+            if total_snow < 10:
                 return "sleet.gif"
             else:
                 return "snow.gif"
-        if (seen_snow):
-            if (total_snow < 10):
+        if seen_snow:
+            if total_snow < 10:
                 return "flurries.gif"
             else:
                 return "snow.gif"
-        if (seen_rain):
+        if seen_rain:
             return "rain.gif"
-        if (cloud_count >= 6):
+        if cloud_count >= 6:
             return "mostlycloudy.gif"
-        elif (cloud_count >= 4):
+        elif cloud_count >= 4:
             return "partlycloudy.gif"
-        if (clear_count >= 7):
+        if clear_count >= 7:
             return "sunny.gif"
-        elif (clear_count >= 6):
+        elif clear_count >= 6:
             return "mostlysunny.gif"
-        elif (clear_count >= 4):
+        elif clear_count >= 4:
             return "partlysunny.gif"
         return "clear.gif"
 
-    def describe_weather(self,
-                         high, low,
-                         wind, conditions, rain, snow):
+    def describe_weather(self, high, low, wind, conditions, rain, snow):
         # High temp: 65
         # Low temp: 44
         #             -onight------  -morning----- -afternoon--  -evening----
@@ -159,24 +156,24 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
             elif txt == "Rain":
                 txt = "rainy"
 
-            if (txt != lcondition):
+            if txt != lcondition:
                 if txt != "Snow" and txt != "Rain":
                     current += txt
                     chunks += 1
                 lcondition = txt
 
             txt = self.describe_wind(wind[x])
-            if (txt != lwind):
-                if (len(current) > 0):
+            if txt != lwind:
+                if len(current) > 0:
                     current += " with "
                 current += txt + " winds"
                 lwind = txt
                 chunks += 1
 
             txt = self.describe_precip(rain[x], snow[x])
-            if (txt != lprecip):
-                if (len(current) > 0):
-                    if (chunks > 1):
+            if txt != lprecip:
+                if len(current) > 0:
+                    if chunks > 1:
                         current += " and "
                     else:
                         current += " with "
@@ -184,21 +181,21 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
                 current += txt
                 lprecip = txt
 
-            if (len(current)):
-                if (ltime != time):
-                    if (random.randint(0, 3) == 0):
-                        if (time != "overnight"):
+            if len(current):
+                if ltime != time:
+                    if random.randint(0, 3) == 0:
+                        if time != "overnight":
                             descr += current + " in the " + time + ". "
                         descr += current + " overnight. "
                     else:
-                        if (time != "overnight"):
+                        if time != "overnight":
                             descr += "In the "
                         descr += time + ", " + current + ". "
                 else:
                     current = current.replace("cloudy", "clouds")
                     descr += current + " developing. "
                 ltime = time
-        if (ltime == "overnight" or ltime == "morning"):
+        if ltime == "overnight" or ltime == "morning":
             descr += "Conditions continuing the rest of the day. "
         descr = descr.replace("with breezy winds", "and breezy")
         descr = descr.replace("Clear developing", "Skies clearing")
@@ -216,8 +213,10 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
             text_location = "Bellevue, WA"
             param = "id=5786882"
 
-        www = urllib.request.urlopen('http://api.openweathermap.org/data/2.5/forecast?%s&APPID=%s&units=imperial' % (
-            param, secrets.openweather_key))
+        www = urllib.request.urlopen(
+            "http://api.openweathermap.org/data/2.5/forecast?%s&APPID=%s&units=imperial"
+            % (param, secrets.openweather_key)
+        )
         response = www.read()
         www.close()
         parsed_json = json.loads(response)
@@ -239,14 +238,17 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
         #     "dt_txt":"2017-01-30 18:00:00"
         #     },
         #     {"dt":1485810000,....
-        f = file_writer.file_writer('weather-%s_3_10800.html' % self.file_prefix)
-        f.write("""
+        f = file_writer.file_writer("weather-%s_3_10800.html" % self.file_prefix)
+        f.write(
+            """
 <h1>Weather at %s:</h1>
 <hr>
 <center>
 <table width=99%% cellspacing=10 border=0>
-        <tr>""" % text_location)
-        count = parsed_json['cnt']
+        <tr>"""
+            % text_location
+        )
+        count = parsed_json["cnt"]
 
         ts = {}
         highs = {}
@@ -256,8 +258,8 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
         rain = {}
         snow = {}
         for x in range(0, count):
-            data = parsed_json['list'][x]
-            dt = data['dt_txt']  # 2019-10-07 18:00:00
+            data = parsed_json["list"][x]
+            dt = data["dt_txt"]  # 2019-10-07 18:00:00
             date = dt.split(" ")[0]
             time = dt.split(" ")[1]
             wind[date] = []
@@ -269,17 +271,17 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
             ts[date] = 0
 
         for x in range(0, count):
-            data = parsed_json['list'][x]
-            dt = data['dt_txt']  # 2019-10-07 18:00:00
+            data = parsed_json["list"][x]
+            dt = data["dt_txt"]  # 2019-10-07 18:00:00
             date = dt.split(" ")[0]
             time = dt.split(" ")[1]
-            _ = data['dt']
-            if (_ > ts[date]):
+            _ = data["dt"]
+            if _ > ts[date]:
                 ts[date] = _
             temp = data["main"]["temp"]
-            if (highs[date] < temp):
+            if highs[date] < temp:
                 highs[date] = temp
-            if (temp < lows[date]):
+            if temp < lows[date]:
                 lows[date] = temp
             wind[date].append(data["wind"]["speed"])
             conditions[date].append(data["weather"][0]["main"])
@@ -315,22 +317,22 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
             #  u'wind': {u'speed': 6.31, u'deg': 10.09}}
 
         # Next 5 half-days
-        #for x in xrange(0, 5):
+        # for x in xrange(0, 5):
         #    fcast = parsed_json['forecast']['txt_forecast']['forecastday'][x]
         #    text = fcast['fcttext']
         #    text = re.subn(r' ([0-9]+)F', r' \1&deg;F', text)[0]
         #    f.write('<td style="vertical-align:top;font-size:75%%"><P STYLE="padding:8px;">%s</P></td>' % text)
-        #f.write('</tr></table>')
-        #f.close()
-        #return True
+        # f.write('</tr></table>')
+        # f.close()
+        # return True
 
-        #f.write("<table border=0 cellspacing=10>\n")
+        # f.write("<table border=0 cellspacing=10>\n")
         days_seen = {}
         for date in sorted(highs.keys()):
             today = datetime.fromtimestamp(ts[date])
-            formatted_date = today.strftime('%a %e %b')
-            if (formatted_date in days_seen):
-                continue;
+            formatted_date = today.strftime("%a %e %b")
+            if formatted_date in days_seen:
+                continue
             days_seen[formatted_date] = True
         num_days = len(list(days_seen.keys()))
 
@@ -343,48 +345,71 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
                 precip += _
 
             today = datetime.fromtimestamp(ts[date])
-            formatted_date = today.strftime('%a %e %b')
-            if (formatted_date in days_seen):
-                continue;
+            formatted_date = today.strftime("%a %e %b")
+            if formatted_date in days_seen:
+                continue
             days_seen[formatted_date] = True
             f.write('<td width=%d%% style="vertical-align:top;">\n' % (100 / num_days))
-            f.write('<table border=0>\n')
+            f.write("<table border=0>\n")
 
             # Date
-            f.write('  <tr><td colspan=3 height=50><b><center><font size=6>' + formatted_date + '</font></center></b></td></tr>\n')
+            f.write(
+                "  <tr><td colspan=3 height=50><b><center><font size=6>"
+                + formatted_date
+                + "</font></center></b></td></tr>\n"
+            )
 
             # Icon
-            f.write('  <tr><td colspan=3 height=100><center><img src="/icons/weather/%s" height=125></center></td></tr>\n' %
-                    self.pick_icon(conditions[date], rain[date], snow[date]))
+            f.write(
+                '  <tr><td colspan=3 height=100><center><img src="/icons/weather/%s" height=125></center></td></tr>\n'
+                % self.pick_icon(conditions[date], rain[date], snow[date])
+            )
 
             # Low temp
             color = "#000099"
-            if (lows[date] <= 32.5):
+            if lows[date] <= 32.5:
                 color = "#009999"
-            f.write('  <tr><td width=33%% align=left><font color="%s"><b>%d&deg;F&nbsp;&nbsp;</b></font></td>\n' % (
-                color, int(lows[date])))
+            f.write(
+                '  <tr><td width=33%% align=left><font color="%s"><b>%d&deg;F&nbsp;&nbsp;</b></font></td>\n'
+                % (color, int(lows[date]))
+            )
 
             # Total precip
             precip *= 0.0393701
-            if (precip > 0.025):
-                f.write('      <td width=33%%><center><b><font style="background-color:#dfdfff; color:#003355">%3.1f"</font></b></center></td>\n' % precip)
+            if precip > 0.025:
+                f.write(
+                    '      <td width=33%%><center><b><font style="background-color:#dfdfff; color:#003355">%3.1f"</font></b></center></td>\n'
+                    % precip
+                )
             else:
-                f.write('      <td width=33%>&nbsp;</td>\n')
+                f.write("      <td width=33%>&nbsp;</td>\n")
 
             # High temp
             color = "#800000"
-            if (highs[date] >= 80):
+            if highs[date] >= 80:
                 color = "#AA0000"
-            f.write('      <td align=right><font color="%s"><b>&nbsp;&nbsp;%d&deg;F</b></font></td></tr>\n' % (
-                color, int(highs[date])))
+            f.write(
+                '      <td align=right><font color="%s"><b>&nbsp;&nbsp;%d&deg;F</b></font></td></tr>\n'
+                % (color, int(highs[date]))
+            )
 
             # Text "description"
-            f.write('<tr><td colspan=3 style="vertical-align:top;font-size:75%%">%s</td></tr>\n' %
-                    self.describe_weather(highs[date], lows[date], wind[date], conditions[date], rain[date], snow[date]))
-            f.write('</table>\n</td>\n')
+            f.write(
+                '<tr><td colspan=3 style="vertical-align:top;font-size:75%%">%s</td></tr>\n'
+                % self.describe_weather(
+                    highs[date],
+                    lows[date],
+                    wind[date],
+                    conditions[date],
+                    rain[date],
+                    snow[date],
+                )
+            )
+            f.write("</table>\n</td>\n")
         f.write("</tr></table></center>")
         return True
 
-#x = weather_renderer({"Stevens": 1000},
+
+# x = weather_renderer({"Stevens": 1000},
 #                     "stevens")
-#x.periodic_render("Stevens")
+# x.periodic_render("Stevens")
index a8ccf29c871e54dd4c09a915e0796e539710ed4e..f9410186b5014fd000f692212de03dd2ee6afe37 100644 (file)
@@ -1,12 +1,11 @@
 import generic_news_rss_renderer
 
+
 class wsj_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
     def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
         super(wsj_rss_renderer, self).__init__(
-            name_to_timeout_dict,
-            feed_site,
-            feed_uris,
-            page_title)
+            name_to_timeout_dict, feed_site, feed_uris, page_title
+        )
         self.debug = 1
 
     def debug_prefix(self):
@@ -19,9 +18,9 @@ class wsj_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
         return "wsj-details-%s" % (self.page_title)
 
     def find_image(self, item):
-        image = item.findtext('image')
+        image = item.findtext("image")
         if image is not None:
-            url = image.get('url')
+            url = image.get("url")
             return url
         return None
 
@@ -32,23 +31,22 @@ class wsj_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
         if self.is_item_older_than_n_days(item, 7):
             self.debug_print("%s: is too old!" % title)
             return False
-        return ("WSJ.com" not in title and
-                "WSJ.com" not in description)
+        return "WSJ.com" not in title and "WSJ.com" not in description
 
     def item_is_interesting_for_article(self, title, description, item):
         if self.is_item_older_than_n_days(item, 7):
             self.debug_print("%s: is too old!" % title)
             return False
-        return ("WSJ.com" not in title and
-                "WSJ.com" not in description)
+        return "WSJ.com" not in title and "WSJ.com" not in description
+
 
 # Test
-#x = wsj_rss_renderer(
+# x = wsj_rss_renderer(
 #    {"Fetch News" : 1,
 #     "Shuffle News" : 1},
 #    "feeds.a.dj.com",
 #    [ "/rss/RSSWorldNews.xml" ],
 #    "Test" )
-#if x.fetch_news() == 0:
+# if x.fetch_news() == 0:
 #    print "Error fetching news, no items fetched."
-#x.shuffle_news()
+# x.shuffle_news()