Various changes
authorScott Gasch <[email protected]>
Sat, 28 Nov 2020 21:25:21 +0000 (13:25 -0800)
committerScott Gasch <[email protected]>
Sat, 28 Nov 2020 21:25:21 +0000 (13:25 -0800)
19 files changed:
bellevue_reporter_rss_renderer.py
chooser.py
constants.py
gcal_renderer.py
generic_news_rss_renderer.py
gkeep_renderer.py
google_news_rss_renderer.py
health_renderer.py
kiosk.py
local_photos_mirror_renderer.py
myq_renderer.py
profanity_filter.py
reddit_renderer.py
renderer.py
renderer_catalog.py
seattletimes_rss_renderer.py
stevens_renderer.py
stranger_renderer.py
twitter_renderer.py

index b71a34bafcdc60d80957da7ed2c01e9d8084cca8..78ec69484069901e8e67292d22c25333700658c5 100644 (file)
@@ -33,12 +33,24 @@ class bellevue_reporter_rss_renderer(gnrss.generic_news_rss_renderer):
         if self.is_item_older_than_n_days(item, 10):
             self.debug_print("%s: is too old!" % title)
             return False
+        if (title.find("NFL") != -1 or
+            re.search("[Ll]ive [Ss]tream", title) != None or
+            re.search("[Ll]ive[Ss]tream", title) != None or
+            re.search("[Ll]ive [Ss]tream", description) != None):
+            self.debug_print("%s: looks like it's about football." % title)
+            return False
         return True
 
     def item_is_interesting_for_article(self, title, description, item):
         if self.is_item_older_than_n_days(item, 10):
             self.debug_print("%s: is too old!" % title)
             return False
+        if (title.find(" NFL") != -1 or
+            re.search("[Ll]ive [Ss]tream", title) != None or
+            re.search("[Ll]ive[Ss]tream", title) != None or
+            re.search("[Ll]ive [Ss]tream", description) != None):
+            self.debug_print("%s: looks like it's about football." % title)
+            return False
         return True
 
 # Test
index df662da35a930a83d5d41860b24f44145f023d66..9bf98e303a91d95c7aba32272f0351da4e7c1e0d 100644 (file)
@@ -1,3 +1,4 @@
+import datetime
 import os
 import random
 import re
@@ -34,12 +35,22 @@ class chooser(object):
         pass
 
 class weighted_random_chooser(chooser):
-    """Chooser that does it via weighted RNG"""
-    def __init__(self):
+    """Chooser that does it via weighted RNG."""
+    def dont_choose_page_twice_in_a_row_filter(self, choice):
+        if choice == self.last_choice:
+            return False
+        self.last_choice = choice
+        return True
+
+    def __init__(self, filter_list):
         self.last_choice = ""
         self.valid_filename = re.compile("([^_]+)_(\d+)_([^\.]+)\.html")
         self.pages = None
         self.count = 0
+        self.filter_list = filter_list
+        if filter_list is None:
+            self.filter_list = []
+        self.filter_list.append(self.dont_choose_page_twice_in_a_row_filter)
 
     def choose_next_page(self):
         if (self.pages == None or
@@ -54,26 +65,39 @@ class weighted_random_chooser(chooser):
                 weight = int(result.group(2))
                 weights.append(weight)
                 total_weight += weight
-
         if (total_weight <= 0):
             raise error
 
         while True:
-            pick = random.randrange(0, total_weight - 1)
+            random_pick = random.randrange(0, total_weight - 1)
             so_far = 0
             for x in range(0, len(weights)):
                 so_far += weights[x]
-                if (so_far > pick and
-                    self.pages[x] != self.last_choice):
-                    self.last_choice = self.pages[x]
-                    self.count += 1
-                    return self.pages[x]
+                if so_far > random_pick:
+                    break
+            choice = self.pages[x]
+
+            # Allow filter list to suppress pages.
+            choice_is_filtered = False
+            for f in self.filter_list:
+                if not f(choice):
+                    print("chooser: %s filtered by %s" % (choice, f.__name__))
+                    choice_is_filtered = True
+                    break
+            if choice_is_filtered:
+                continue
+
+            # We're good...
+            self.count += 1
+            return choice
 
 class weighted_random_chooser_with_triggers(weighted_random_chooser):
     """Same as WRC but has trigger events"""
-    def __init__(self, trigger_list):
-        weighted_random_chooser.__init__(self)
+    def __init__(self, trigger_list, filter_list):
+        weighted_random_chooser.__init__(self, filter_list)
         self.trigger_list = trigger_list
+        if trigger_list is None:
+            self.trigger_list = []
         self.page_queue = set(())
 
     def check_for_triggers(self):
@@ -93,9 +117,9 @@ class weighted_random_chooser_with_triggers(weighted_random_chooser):
 
         triggered = self.check_for_triggers()
 
-        # First try to satisfy from the page queue
+        # First try to satisfy from the page queue.
         if (len(self.page_queue) > 0):
-            print("Pulling page from queue")
+            print("chooser: Pulling page from queue...")
             page = None
             priority = None
             for t in self.page_queue:
@@ -133,4 +157,18 @@ class rotating_chooser(chooser):
         self.count += 1
         return page
 
-#x = weighted_random_chooser_with_triggers(None)
+# Test
+def filter_news_during_dinnertime(page):
+    now = datetime.datetime.now()
+    is_dinnertime = now.hour >= 17 and now.hour <= 20
+    return (not is_dinnertime or
+            not ("cnn" in page or
+                 "news" in page or
+                 "mynorthwest" in page or
+                 "seattle" in page or
+                 "stranger" in page or
+                 "twitter" in page or
+                 "wsj" in page))
+
+#x = weighted_random_chooser_with_triggers([], [ filter_news_during_dinnertime ])
+#print(x.choose_next_page())
index 880fb854075af48612ff4f7e84b75c8d359a8cbf..3dfa4a3b7eec82bfa6720ce7690681e62f3f589a 100644 (file)
@@ -1,5 +1,3 @@
-#!/usr/local/bin/python
-
 refresh_period_sec = 22
 render_period_sec = 30
 pages_dir = "/usr/local/export/www/kiosk/pages"
index c3be3d70164bbf9c55f4450c16c64a41cf185fd8..a248d1d93ff13fc3d8ecda322f9720ab8b240395 100644 (file)
@@ -86,8 +86,9 @@ class gcal_renderer(renderer.debuggable_abstaining_renderer):
         page_token = None
         def format_datetime(x):
             return datetime.datetime.strftime(x, '%Y-%m-%dT%H:%M:%SZ')
-        time_min = datetime.datetime.now()
-        time_max = time_min + datetime.timedelta(95)
+        now = datetime.datetime.now()
+        time_min = now - datetime.timedelta(1)
+        time_max = now + datetime.timedelta(95)
         time_min, time_max = list(map(format_datetime, (time_min, time_max)))
         self.debug_print("time_min is %s" % time_min)
         self.debug_print("time_max is %s" % time_max)
@@ -95,7 +96,7 @@ class gcal_renderer(renderer.debuggable_abstaining_renderer):
         # Writes 2 files:
         #  + "upcoming events",
         #  + a countdown timer for a subser of events,
-        f = file_writer.file_writer('gcal_3_none.html')
+        f = file_writer.file_writer('gcal_3_86400.html')
         f.write('<h1>Upcoming Calendar Events:</h1><hr>\n')
         f.write('<center><table width=96%>\n')
 
index 5cf6e6c897109bb810b615e39e58560f9f091fe2..698f7aa5bd6e4ca14f1cb71b19d80ca7907dc9f7 100644 (file)
@@ -127,7 +127,7 @@ a:active {
   font-weight: bold;
 }
 </STYLE>""")
-        f = file_writer.file_writer('%s_%s_none.html' % (
+        f = file_writer.file_writer('%s_%s_25900.html' % (
             self.get_headlines_page_prefix(),
             self.get_headlines_page_priority()))
         headlines.render_html(f)
@@ -162,7 +162,7 @@ a:active {
             blurb = msg
             blurb += u'</TD>'
             details.add_item(blurb)
-        g = file_writer.file_writer('%s_%s_none.html' % (
+        g = file_writer.file_writer('%s_%s_86400.html' % (
             self.get_details_page_prefix(),
             self.get_details_page_priority()))
         details.render_html(g)
@@ -177,16 +177,23 @@ a:active {
         for uri in self.feed_uris:
             if self.should_use_https():
                 self.debug_print("Fetching: https://%s%s" % (self.feed_site, uri))
-                self.conn = http.client.HTTPSConnection(self.feed_site)
+                self.conn = http.client.HTTPSConnection(self.feed_site, timeout=20)
             else:
                 self.debug_print("Fetching: http://%s%s" % (self.feed_site, uri))
-                self.conn = http.client.HTTPConnection(self.feed_site)
+                self.conn = http.client.HTTPConnection(self.feed_site, timeout=20)
             self.conn.request(
                 "GET",
                 uri,
                 None,
-                {"Accept-Charset": "utf-8"})
-            response = self.conn.getresponse()
+                { "Accept": "*/*",
+                  "Cache-control": "max-age=59",
+                  "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"})
+            try:
+                response = self.conn.getresponse()
+            except:
+                print("Exception in generic RSS renderer HTTP connection")
+                return False
+
             if response.status != 200:
                 print(("%s: RSS fetch_news error, response: %d" % (self.page_title,
                                                                   response.status)))
index f8313bd64f239a49b6ca741b0af7b88090a46e56..de1116d1ef04dd2e47a6eff1b96450c21ce8897d 100644 (file)
@@ -52,10 +52,9 @@ class gkeep_renderer(renderer.debuggable_abstaining_renderer):
             self.debug_print("Note title '%s'" % title)
             if contents != '' and not contents.isspace():
                 contents = strikethrough.sub('', contents)
+                self.debug_print("Note contents:\n%s" % contents)
                 contents = contents.replace(u'\u2610 ',
                                             u'<LI><INPUT TYPE="checkbox">&nbsp;')
-
-                #self.debug_print("Note contents:\n%s" % contents)
                 contents = linkify.sub(r'<a href="\1">\1</a>', contents)
 
                 individual_lines = contents.split("\n")
@@ -66,13 +65,14 @@ class gkeep_renderer(renderer.debuggable_abstaining_renderer):
                     length = len(x)
                     if length > max_length:
                         max_length = length
-                    spaces = len(x) - len(x.lstrip(' '))
-                    spaces /= 2
-                    spaces = int(spaces)
+                    leading_spaces = len(x) - len(x.lstrip(' '))
+                    leading_spaces /= 2
+                    leading_spaces = int(leading_spaces)
                     x = x.lstrip(' ')
-                    for y in range(0, spaces):
+                    # self.debug_print(" * (%d) '%s'" % (leading_spaces, x))
+                    for y in range(0, leading_spaces):
                         x = "<UL>" + x
-                    for y in range(0, spaces):
+                    for y in range(0, leading_spaces):
                         x = x + "</UL>"
                     contents = contents + x + "\n"
 
@@ -122,5 +122,5 @@ class gkeep_renderer(renderer.debuggable_abstaining_renderer):
         return True
 
 # Test
-x = gkeep_renderer({"Test", 1234})
-x.periodic_render("Test")
+#x = gkeep_renderer({"Test", 1234})
+#x.periodic_render("Test")
index 7ca37e880666b139cbbca1e5cfdf7c9da7e1b62f..b4290f3f6c0c9628ebf1b61150fe044e31abdbef 100644 (file)
@@ -27,15 +27,20 @@ class google_news_rss_renderer(generic_news_rss_renderer.generic_news_rss_render
             descr = descr + " (%s)" % source
         return descr
 
+    def munge_description_internal(self, descr):
+        if len(descr) > 450:
+            descr = descr[:450]
+            descr = re.sub(r"\<[^\>]*$", "", descr)
+            descr = descr + " [...]"
+        descr += "</A></LI></UL></OL></P>"
+        return descr
+
     def munge_description(self, description):
         soup = BeautifulSoup(description)
         for a in soup.findAll('a'):
             del a['href']
         descr = str(soup)
-        if len(descr) > 400:
-            descr = descr[:400]
-            descr = descr + " [...]"
-        return descr
+        return munge_description_internal(descr)
 
     def find_image(self, item):
         return None
@@ -59,3 +64,8 @@ class google_news_rss_renderer(generic_news_rss_renderer.generic_news_rss_render
 #if x.fetch_news() == 0:
 #    print("Error fetching news, no items fetched.")
 #x.shuffle_news()
+#
+#descr = "this is a lot of really long text about nothign in particular.  It's pretty interesting, don't you think?  I hope that the munge description method works by both truncating it and remembering to close any open <LI>items as well as making sure not to truncate in the middle of a <A HREF=\"whatever\" these are a bunch of useless arguments to the A tag that make it really long so that the truncate will happen in the middle of it.  I'm getting kind of tired of typing shit so I'm going to revert to copy pasta now.  Sorry if you were getting into this story.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.  The quick brown fox jumps over the lazy dog.</A></LI> Out!"
+#d = x.munge_description_internal(descr)
+#print(d)
+
index 4eeba985163d8d512ef8f60ce3151e09ddc6262c..63f923fe74e04b6c7274c76edd8061b1bdfbefd9 100644 (file)
@@ -17,7 +17,15 @@ class periodic_health_renderer(renderer.debuggable_abstaining_renderer):
         days = constants.seconds_per_day
         hours = constants.seconds_per_hour
         mins = constants.seconds_per_minute
+        minutes = mins
         limits = {
+            timestamps + 'last_http_probe_wannabe_house'  : mins * 10,
+            timestamps + 'last_http_probe_meerkat_cabin'  : mins * 10,
+            timestamps + 'last_http_probe_dns_house'      : mins * 10,
+            timestamps + 'last_http_probe_rpi_cabin'      : mins * 10,
+            timestamps + 'last_http_probe_rpi_house'      : mins * 10,
+            timestamps + 'last_http_probe_therm_house'    : mins * 10,
+
             timestamps + 'last_rsnapshot_hourly'          : hours * 24,
             timestamps + 'last_rsnapshot_daily'           : days * 3,
             timestamps + 'last_rsnapshot_weekly'          : days * 14,
@@ -29,17 +37,22 @@ class periodic_health_renderer(renderer.debuggable_abstaining_renderer):
             timestamps + 'last_zfssnapshot_monthly'       : days * 70,
             timestamps + 'last_zfssnapshot_cleanup'       : hours * 24,
 
-            timestamps + 'last_disk_selftest_short'       : days * 14,
-            timestamps + 'last_disk_selftest_long'        : days * 31,
             timestamps + 'last_zfs_scrub'                 : days * 9,
-            timestamps + 'last_zfs_scrub_backup'          : days * 9,
+            timestamps + 'last_backup_zfs_scrub'          : days * 9,
+            timestamps + 'last_cabin_zfs_scrub'           : days * 9,
 
             timestamps + 'last_zfsxfer_backup.house'      : hours * 36,
             timestamps + 'last_zfsxfer_ski.dyn.guru.org'  : days * 7,
             timestamps + 'last_photos_sync'               : hours * 8,
-            timestamps + 'last_disk_selftest_backup_short': days * 14,
-            timestamps + 'last_disk_selftest_backup_long' : days * 31,
 
+            timestamps + 'last_disk_selftest_short'       : days * 14,
+            timestamps + 'last_disk_selftest_long'        : days * 31,
+            timestamps + 'last_backup_disk_selftest_short': days * 14,
+            timestamps + 'last_backup_disk_selftest_long' : days * 31,
+            timestamps + 'last_cabin_disk_selftest_short' : days * 14,
+            timestamps + 'last_cabin_disk_selftest_long'  : days * 31,
+
+            timestamps + 'last_cabin_rpi_ping'            : mins * 10,
             timestamps + 'last_healthy_wifi'              : mins * 10,
             timestamps + 'last_healthy_network'           : mins * 10,
             timestamps + 'last_scott_sync'                : days * 2,
index 6003165a674e1f2ed76f18f8ffe8812f7c67b6e5..d9f607e3447b08e7616cfe66ef94e6793ce78d55 100755 (executable)
--- a/kiosk.py
+++ b/kiosk.py
@@ -1,4 +1,4 @@
-#!/usr/local/bin/python3.7
+#!/usr/bin/env python3
 
 import sys
 import traceback
@@ -15,9 +15,22 @@ import logging
 import trigger_catalog
 import utils
 
+def filter_news_during_dinnertime(page):
+    now = datetime.now()
+    is_dinnertime = now.hour >= 17 and now.hour <= 20
+    return (not is_dinnertime or
+            not ("cnn" in page or
+                 "news" in page or
+                 "mynorthwest" in page or
+                 "seattle" in page or
+                 "stranger" in page or
+                 "twitter" in page or
+                 "wsj" in page))
+
 def thread_change_current():
     page_chooser = chooser.weighted_random_chooser_with_triggers(
-        trigger_catalog.get_triggers())
+        trigger_catalog.get_triggers(),
+        [ filter_news_during_dinnertime ])
     swap_page_target = 0
     last_page = ""
     while True:
@@ -246,13 +259,14 @@ def emit_wrapped(f, filename):
     <TR STYLE="vertical-align:top">
         <TD COLSPAN=3>
             <DIV ID="content" STYLE="zoom: 1; visibility: hidden;">
-                <!-- BEGIN main page contents. -->
+              <!-- BEGIN main page contents. -->
 <!--#include virtual=\"%s\"-->
-                <!-- END main page contents. -->
+              <!-- END main page contents. -->
             </DIV>
             <BR>
+            <DIV STYLE="position: absolute; top:1030px; width:99%%">
             <P ALIGN="right">
-                <FONT SIZE=2 COLOR=#bbbbbb>%s @ %s ago.</FONT>
+              <FONT SIZE=2 COLOR=#bbbbbb>%s @ %s ago.</FONT>
             </P>
             <HR id="countdown" STYLE="width:0px;
                                       text-align:left;
@@ -262,6 +276,7 @@ def emit_wrapped(f, filename):
                                       height:5px;
                                       visibility:hidden;
                                       background-color:#ffffff;">
+            </DIV>
         </TD>
     </TR>
     </TABLE>
index 32e0c1e3e06a60c53acbb495864933908ee72d07..0b8f7fc0a4b9e8724b8dc257e1df35724c121785 100644 (file)
@@ -2,6 +2,7 @@ import os
 import file_writer
 import renderer
 import random
+import re
 
 class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
     """A renderer that uses a local mirror of Google photos"""
@@ -9,28 +10,38 @@ class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
     album_root_directory = "/usr/local/export/www/gphotos/albums"
 
     album_whitelist = frozenset([
-        '1208 Newer Alex Photos',
-        '1013 Scott and Lynn',
-        '0106 Key West 2019',
-        '1017 Olympic Sculpture Park',
-        '0212 Chihuly Glass',
-        '0730 Trip to East Coast \'16',
-        '0715 Barn',
-        '1009 East Coast 2018',
-        '0819 Skiing with Alex',
-        '0819 Friends',
-        '0227 Trip to California, \'16',
-        '0407 London, 2018',
-        '0528 Ohme Gardens',
-        '0809 Bangkok and Phuket, 2003',
-        '0803 Blue Angels... Seafair',
-        '0719 Dunn Gardens',
-        '0514 Krakow 2009',
-        '0515 Tuscany 2008',
-        '0508 Yosemite 2010',
-        '0611 Sonoma',
-        '1025 NJ 2015',
-        '0407 Las Vegas, 2017',
+        '8-Mile Lake Hike',
+        'Bangkok and Phuket, 2003',
+        'Barn',
+        'Blue Angels... Seafair',
+        'Chihuly Glass',
+        'Dunn Gardens',
+        'East Coast 2018',
+        'Fall \'17',
+        'Friends',
+        'Hiking',
+        'Key West 2019',
+        'Krakow 2009',
+        'Kubota Gardens',
+        'Las Vegas, 2017',
+        'London, 2018',
+        'Munich, July 2018',
+        'NJ 2015',
+        'Newer Alex Photos',
+        'Ohme Gardens',
+        'Olympic Sculpture Park',
+        'Prague and Munich 2019',
+        'Random',
+        'Scott and Lynn',
+        'SFO 2014',
+        'Skiing with Alex',
+        'Sonoma',
+        'Trip to California, \'16',
+        'Trip to San Francisco',
+        'Trip to East Coast \'16',
+        'Tuscany 2008',
+        'Yosemite 2010',
+        'Zoo',
     ])
 
     extension_whitelist = frozenset([
@@ -56,12 +67,18 @@ class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
         else:
             raise error('Unexpected operation')
 
+    def album_is_in_whitelist(self, name):
+        for wlalbum in self.album_whitelist:
+            if re.search('\d+ %s' % wlalbum, name) != None:
+                return True
+        return False
+
     # Walk the filesystem looking for photos in whitelisted albums and
     # keep their paths in memory.
     def index_photos(self):
         for root, subdirs, files in os.walk(self.album_root_directory):
             last_dir = root.rsplit('/', 1)[1]
-            if last_dir in self.album_whitelist:
+            if self.album_is_in_whitelist(last_dir):
                 for x in files:
                     extension = x.rsplit('.', 1)[1]
                     if extension in self.extension_whitelist:
@@ -79,7 +96,7 @@ class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
             print("No photos!")
             return False
         path = random.sample(self.candidate_photos, 1)[0]
-        f = file_writer.file_writer('photo_23_none.html')
+        f = file_writer.file_writer('photo_23_3600.html')
         f.write("""
 <style>
 body{background-color:#303030;}
index 91e946b9a621291046e56f4e681f2eb80ce9bd74..ca405346dc10f88d98689886ae198ecd98195e8b 100644 (file)
@@ -89,7 +89,7 @@ class garage_door_renderer(renderer.debuggable_abstaining_renderer):
                 hours = divmod(days[1], constants.seconds_per_hour)
                 minutes = divmod(hours[1], constants.seconds_per_minute)
                 width = 0
-                if is_night and door.get_status() == "open":
+                if is_night and door.state == "open":
                     color = "border-color: #ff0000;"
                     width = 15
                 else:
@@ -115,6 +115,6 @@ class garage_door_renderer(renderer.debuggable_abstaining_renderer):
         return None
 
 # Test
-#x = garage_door_renderer({"Test" : 1})
-#x.periodic_render("Poll MyQ")
-#x.periodic_render("Update Page")
+x = garage_door_renderer({"Test" : 1})
+x.periodic_render("Poll MyQ")
+x.periodic_render("Update Page")
index 1c862eb5f54f3769008ec4a73944f15c61bf60e0..894855845fc3c318d66923b03ed78aa2a9099362 100644 (file)
@@ -125,6 +125,7 @@ class profanity_filter:
             'eat my ass',
             'ecchi',
             'ejaculation',
+            'erection',
             'erotic',
             'erotism',
             'escort',
@@ -195,6 +196,7 @@ class profanity_filter:
             'jail bait',
             'jailbait',
             'jerk off',
+            'jerking off',
             'jigaboo',
             'jiggaboo',
             'jiggerboo',
index c0b31aef26aec1c376b303bab8d0fd4492aa0b58..91cd33a43a1a0c78558981091d89b524b5deebb0 100644 (file)
@@ -7,6 +7,7 @@ import page_builder
 import praw
 import profanity_filter
 import random
+import renderer_catalog
 
 class reddit_renderer(renderer.debuggable_abstaining_renderer):
     """A renderer to pull text content from reddit."""
index e5fbbaa2d3cd59f278baa4bc369fbfa3705bc520..b78eb2b713e3b4901a912f8e763a90ee818953f3 100644 (file)
@@ -1,8 +1,11 @@
 import time
 from datetime import datetime
+from decorators import invokation_logged
 
 class renderer(object):
     """Base class for something that can render."""
+
+    @invokation_logged
     def render(self):
         pass
 
@@ -50,6 +53,7 @@ class abstaining_renderer(renderer):
                 if (self.periodic_render(key)):
                     self.last_runs[key] = time.time()
 
+    @invokation_logged
     def periodic_render(self, key):
         pass
 
index 738b4d15d77c6153ccc44e652f4472803e496f9e..794bd6f59f7aa0bedc461aa7823c94fd88c2972a 100644 (file)
@@ -114,7 +114,7 @@ __registry = [
                      "www.wsdot.com",
                      [ "/traffic/rssfeeds/stevens/Default.aspx" ]),
                  seattletimes_rss_renderer.seattletimes_rss_renderer(
-                     {"Fetch News" : (hours * 1),
+                     {"Fetch News" : (hours * 4),
                       "Shuffle News" : (always)},
                      "www.seattletimes.com",
                      [ "/pacific-nw-magazine/feed/",
index fae27b1260bfb17ac0f9988dec27b5aec915a946..8a36f4f93622c11ebc3253559dba365bc536152e 100644 (file)
@@ -74,6 +74,7 @@ class seattletimes_rss_renderer(gnrss.generic_news_rss_renderer):
             return False
         return len(description) >= 65
 
+# Test
 #x = seattletimes_rss_renderer({"Test", 123},
 #                              "www.seattletimes.com",
 #                              [ "/life/feed/" ],
index eca0dcb517dac6f2a71e41734400f93268e44fbb..18f300b2e1b7aed53bdb412ec975e0c9eeab891e 100644 (file)
@@ -14,7 +14,7 @@ class stevens_pass_conditions_renderer(renderer.debuggable_abstaining_renderer):
         return "stevens"
 
     def periodic_render(self, key):
-        f = file_writer.file_writer('stevens-conditions_1_none.html')
+        f = file_writer.file_writer('stevens-conditions_1_86400.html')
         for uri in self.feed_uris:
             self.conn = http.client.HTTPSConnection(self.feed_site)
             self.conn.request(
index a8698e2be325ab9e5e94554d7d44429165816406..2084c395a4fa4502612abeb32b26d2abffe0b65e 100644 (file)
@@ -8,6 +8,7 @@ import profanity_filter
 import random
 import re
 import renderer
+import renderer_catalog
 
 class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
     def __init__(self, name_to_timeout_dict):
@@ -77,7 +78,7 @@ class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
 
         for msg in subset:
             layout.add_item(msg)
-        f = file_writer.file_writer('stranger-events_2_none.html')
+        f = file_writer.file_writer('stranger-events_2_36000.html')
         layout.render_html(f)
         f.close()
         return True
index 8a82e5e645731ba43eacb517db5c10891c138ca3..173842ef0ea2801182760ff7a1d0111f1739bff7 100644 (file)
@@ -72,7 +72,7 @@ class twitter_renderer(renderer.debuggable_abstaining_renderer):
         handle = self.handles_by_author[author]
         tweets = self.tweets_by_author[author]
         already_seen = set()
-        f = file_writer.file_writer('twitter_10_none.html')
+        f = file_writer.file_writer('twitter_10_3600.html')
         f.write('<TABLE WIDTH=96%><TR><TD WIDTH=86%>')
         f.write('<H2>%s (@%s)</H2></TD>\n' % (author, handle))
         f.write('<TD ALIGN="right" VALIGN="top">')
@@ -96,13 +96,13 @@ class twitter_renderer(renderer.debuggable_abstaining_renderer):
         return True
 
 # Test
-t = twitter_renderer(
-    {"Fetch Tweets" : 1,
-     "Shuffle Tweets" : 1})
+#t = twitter_renderer(
+#    {"Fetch Tweets" : 1,
+#     "Shuffle Tweets" : 1})
 #x = "bla bla bla https://t.co/EjWnT3UA9U bla bla"
 #x = t.linkify(x)
 #print x
-if t.fetch_tweets() == 0:
-    print("Error fetching tweets, none fetched.")
-else:
-    t.shuffle_tweets()
+#if t.fetch_tweets() == 0:
+#    print("Error fetching tweets, none fetched.")
+#else:
+#    t.shuffle_tweets()