if self.is_item_older_than_n_days(item, 10):
self.debug_print("%s: is too old!" % title)
return False
+ if (title.find("NFL") != -1 or
+ re.search("[Ll]ive [Ss]tream", title) != None or
+ re.search("[Ll]ive[Ss]tream", title) != None or
+ re.search("[Ll]ive [Ss]tream", description) != None):
+ self.debug_print("%s: looks like it's about football." % title)
+ return False
return True
def item_is_interesting_for_article(self, title, description, item):
if self.is_item_older_than_n_days(item, 10):
self.debug_print("%s: is too old!" % title)
return False
+ if (title.find(" NFL") != -1 or
+ re.search("[Ll]ive [Ss]tream", title) != None or
+ re.search("[Ll]ive[Ss]tream", title) != None or
+ re.search("[Ll]ive [Ss]tream", description) != None):
+ self.debug_print("%s: looks like it's about football." % title)
+ return False
return True
# Test
+import datetime
import os
import random
import re
pass
class weighted_random_chooser(chooser):
- """Chooser that does it via weighted RNG"""
- def __init__(self):
+ """Chooser that does it via weighted RNG."""
+ def dont_choose_page_twice_in_a_row_filter(self, choice):
+ if choice == self.last_choice:
+ return False
+ self.last_choice = choice
+ return True
+
+ def __init__(self, filter_list):
self.last_choice = ""
self.valid_filename = re.compile("([^_]+)_(\d+)_([^\.]+)\.html")
self.pages = None
self.count = 0
+ self.filter_list = filter_list
+ if filter_list is None:
+ self.filter_list = []
+ self.filter_list.append(self.dont_choose_page_twice_in_a_row_filter)
def choose_next_page(self):
if (self.pages == None or
weight = int(result.group(2))
weights.append(weight)
total_weight += weight
-
if (total_weight <= 0):
raise error
while True:
- pick = random.randrange(0, total_weight - 1)
+ random_pick = random.randrange(0, total_weight - 1)
so_far = 0
for x in range(0, len(weights)):
so_far += weights[x]
- if (so_far > pick and
- self.pages[x] != self.last_choice):
- self.last_choice = self.pages[x]
- self.count += 1
- return self.pages[x]
+ if so_far > random_pick:
+ break
+ choice = self.pages[x]
+
+ # Allow filter list to suppress pages.
+ choice_is_filtered = False
+ for f in self.filter_list:
+ if not f(choice):
+ print("chooser: %s filtered by %s" % (choice, f.__name__))
+ choice_is_filtered = True
+ break
+ if choice_is_filtered:
+ continue
+
+ # We're good...
+ self.count += 1
+ return choice
class weighted_random_chooser_with_triggers(weighted_random_chooser):
"""Same as WRC but has trigger events"""
- def __init__(self, trigger_list):
- weighted_random_chooser.__init__(self)
+ def __init__(self, trigger_list, filter_list):
+ weighted_random_chooser.__init__(self, filter_list)
self.trigger_list = trigger_list
+ if trigger_list is None:
+ self.trigger_list = []
self.page_queue = set(())
def check_for_triggers(self):
triggered = self.check_for_triggers()
- # First try to satisfy from the page queue
+ # First try to satisfy from the page queue.
if (len(self.page_queue) > 0):
- print("Pulling page from queue")
+ print("chooser: Pulling page from queue...")
page = None
priority = None
for t in self.page_queue:
self.count += 1
return page
-#x = weighted_random_chooser_with_triggers(None)
+# Test
+def filter_news_during_dinnertime(page):
+ now = datetime.datetime.now()
+ is_dinnertime = now.hour >= 17 and now.hour <= 20
+ return (not is_dinnertime or
+ not ("cnn" in page or
+ "news" in page or
+ "mynorthwest" in page or
+ "seattle" in page or
+ "stranger" in page or
+ "twitter" in page or
+ "wsj" in page))
+
+#x = weighted_random_chooser_with_triggers([], [ filter_news_during_dinnertime ])
+#print(x.choose_next_page())
-#!/usr/local/bin/python
-
refresh_period_sec = 22
render_period_sec = 30
pages_dir = "/usr/local/export/www/kiosk/pages"
page_token = None
def format_datetime(x):
return datetime.datetime.strftime(x, '%Y-%m-%dT%H:%M:%SZ')
- time_min = datetime.datetime.now()
- time_max = time_min + datetime.timedelta(95)
+ now = datetime.datetime.now()
+ time_min = now - datetime.timedelta(1)
+ time_max = now + datetime.timedelta(95)
time_min, time_max = list(map(format_datetime, (time_min, time_max)))
self.debug_print("time_min is %s" % time_min)
self.debug_print("time_max is %s" % time_max)
# Writes 2 files:
# + "upcoming events",
# + a countdown timer for a subser of events,
- f = file_writer.file_writer('gcal_3_none.html')
+ f = file_writer.file_writer('gcal_3_86400.html')
f.write('<h1>Upcoming Calendar Events:</h1><hr>\n')
f.write('<center><table width=96%>\n')
font-weight: bold;
}
</STYLE>""")
- f = file_writer.file_writer('%s_%s_none.html' % (
+ f = file_writer.file_writer('%s_%s_25900.html' % (
self.get_headlines_page_prefix(),
self.get_headlines_page_priority()))
headlines.render_html(f)
blurb = msg
blurb += u'</TD>'
details.add_item(blurb)
- g = file_writer.file_writer('%s_%s_none.html' % (
+ g = file_writer.file_writer('%s_%s_86400.html' % (
self.get_details_page_prefix(),
self.get_details_page_priority()))
details.render_html(g)
for uri in self.feed_uris:
if self.should_use_https():
self.debug_print("Fetching: https://%s%s" % (self.feed_site, uri))
- self.conn = http.client.HTTPSConnection(self.feed_site)
+ self.conn = http.client.HTTPSConnection(self.feed_site, timeout=20)
else:
self.debug_print("Fetching: http://%s%s" % (self.feed_site, uri))
- self.conn = http.client.HTTPConnection(self.feed_site)
+ self.conn = http.client.HTTPConnection(self.feed_site, timeout=20)
self.conn.request(
"GET",
uri,
None,
- {"Accept-Charset": "utf-8"})
- response = self.conn.getresponse()
+ { "Accept": "*/*",
+ "Cache-control": "max-age=59",
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"})
+ try:
+ response = self.conn.getresponse()
+ except:
+ print("Exception in generic RSS renderer HTTP connection")
+ return False
+
if response.status != 200:
print(("%s: RSS fetch_news error, response: %d" % (self.page_title,
response.status)))
self.debug_print("Note title '%s'" % title)
if contents != '' and not contents.isspace():
contents = strikethrough.sub('', contents)
+ self.debug_print("Note contents:\n%s" % contents)
contents = contents.replace(u'\u2610 ',
u'<LI><INPUT TYPE="checkbox"> ')
-
- #self.debug_print("Note contents:\n%s" % contents)
contents = linkify.sub(r'<a href="\1">\1</a>', contents)
individual_lines = contents.split("\n")
length = len(x)
if length > max_length:
max_length = length
- spaces = len(x) - len(x.lstrip(' '))
- spaces /= 2
- spaces = int(spaces)
+ leading_spaces = len(x) - len(x.lstrip(' '))
+ leading_spaces /= 2
+ leading_spaces = int(leading_spaces)
x = x.lstrip(' ')
- for y in range(0, spaces):
+ # self.debug_print(" * (%d) '%s'" % (leading_spaces, x))
+ for y in range(0, leading_spaces):
x = "<UL>" + x
- for y in range(0, spaces):
+ for y in range(0, leading_spaces):
x = x + "</UL>"
contents = contents + x + "\n"
return True
# Test
-x = gkeep_renderer({"Test", 1234})
-x.periodic_render("Test")
+#x = gkeep_renderer({"Test", 1234})
+#x.periodic_render("Test")
descr = descr + " (%s)" % source
return descr
+ def munge_description_internal(self, descr):
+ if len(descr) > 450:
+ descr = descr[:450]
+ descr = re.sub(r"\<[^\>]*$", "", descr)
+ descr = descr + " [...]"
+ descr += "</A></LI></UL></OL></P>"
+ return descr
+
def munge_description(self, description):
soup = BeautifulSoup(description)
for a in soup.findAll('a'):
del a['href']
descr = str(soup)
- if len(descr) > 400:
- descr = descr[:400]
- descr = descr + " [...]"
- return descr
+ return munge_description_internal(descr)
def find_image(self, item):
return None
#if x.fetch_news() == 0:
# print("Error fetching news, no items fetched.")
#x.shuffle_news()
+#
+#descr = "this is a lot of really long text about nothign in particular. It's pretty interesting, don't you think? I hope that the munge description method works by both truncating it and remembering to close any open <LI>items as well as making sure not to truncate in the middle of a <A HREF=\"whatever\" these are a bunch of useless arguments to the A tag that make it really long so that the truncate will happen in the middle of it. I'm getting kind of tired of typing shit so I'm going to revert to copy pasta now. Sorry if you were getting into this story. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog.</A></LI> Out!"
+#d = x.munge_description_internal(descr)
+#print(d)
+
days = constants.seconds_per_day
hours = constants.seconds_per_hour
mins = constants.seconds_per_minute
+ minutes = mins
limits = {
+ timestamps + 'last_http_probe_wannabe_house' : mins * 10,
+ timestamps + 'last_http_probe_meerkat_cabin' : mins * 10,
+ timestamps + 'last_http_probe_dns_house' : mins * 10,
+ timestamps + 'last_http_probe_rpi_cabin' : mins * 10,
+ timestamps + 'last_http_probe_rpi_house' : mins * 10,
+ timestamps + 'last_http_probe_therm_house' : mins * 10,
+
timestamps + 'last_rsnapshot_hourly' : hours * 24,
timestamps + 'last_rsnapshot_daily' : days * 3,
timestamps + 'last_rsnapshot_weekly' : days * 14,
timestamps + 'last_zfssnapshot_monthly' : days * 70,
timestamps + 'last_zfssnapshot_cleanup' : hours * 24,
- timestamps + 'last_disk_selftest_short' : days * 14,
- timestamps + 'last_disk_selftest_long' : days * 31,
timestamps + 'last_zfs_scrub' : days * 9,
- timestamps + 'last_zfs_scrub_backup' : days * 9,
+ timestamps + 'last_backup_zfs_scrub' : days * 9,
+ timestamps + 'last_cabin_zfs_scrub' : days * 9,
timestamps + 'last_zfsxfer_backup.house' : hours * 36,
timestamps + 'last_zfsxfer_ski.dyn.guru.org' : days * 7,
timestamps + 'last_photos_sync' : hours * 8,
- timestamps + 'last_disk_selftest_backup_short': days * 14,
- timestamps + 'last_disk_selftest_backup_long' : days * 31,
+ timestamps + 'last_disk_selftest_short' : days * 14,
+ timestamps + 'last_disk_selftest_long' : days * 31,
+ timestamps + 'last_backup_disk_selftest_short': days * 14,
+ timestamps + 'last_backup_disk_selftest_long' : days * 31,
+ timestamps + 'last_cabin_disk_selftest_short' : days * 14,
+ timestamps + 'last_cabin_disk_selftest_long' : days * 31,
+
+ timestamps + 'last_cabin_rpi_ping' : mins * 10,
timestamps + 'last_healthy_wifi' : mins * 10,
timestamps + 'last_healthy_network' : mins * 10,
timestamps + 'last_scott_sync' : days * 2,
-#!/usr/local/bin/python3.7
+#!/usr/bin/env python3
import sys
import traceback
import trigger_catalog
import utils
+def filter_news_during_dinnertime(page):
+ now = datetime.now()
+ is_dinnertime = now.hour >= 17 and now.hour <= 20
+ return (not is_dinnertime or
+ not ("cnn" in page or
+ "news" in page or
+ "mynorthwest" in page or
+ "seattle" in page or
+ "stranger" in page or
+ "twitter" in page or
+ "wsj" in page))
+
def thread_change_current():
page_chooser = chooser.weighted_random_chooser_with_triggers(
- trigger_catalog.get_triggers())
+ trigger_catalog.get_triggers(),
+ [ filter_news_during_dinnertime ])
swap_page_target = 0
last_page = ""
while True:
<TR STYLE="vertical-align:top">
<TD COLSPAN=3>
<DIV ID="content" STYLE="zoom: 1; visibility: hidden;">
- <!-- BEGIN main page contents. -->
+ <!-- BEGIN main page contents. -->
<!--#include virtual=\"%s\"-->
- <!-- END main page contents. -->
+ <!-- END main page contents. -->
</DIV>
<BR>
+ <DIV STYLE="position: absolute; top:1030px; width:99%%">
<P ALIGN="right">
- <FONT SIZE=2 COLOR=#bbbbbb>%s @ %s ago.</FONT>
+ <FONT SIZE=2 COLOR=#bbbbbb>%s @ %s ago.</FONT>
</P>
<HR id="countdown" STYLE="width:0px;
text-align:left;
height:5px;
visibility:hidden;
background-color:#ffffff;">
+ </DIV>
</TD>
</TR>
</TABLE>
import file_writer
import renderer
import random
+import re
class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
"""A renderer that uses a local mirror of Google photos"""
album_root_directory = "/usr/local/export/www/gphotos/albums"
album_whitelist = frozenset([
- '1208 Newer Alex Photos',
- '1013 Scott and Lynn',
- '0106 Key West 2019',
- '1017 Olympic Sculpture Park',
- '0212 Chihuly Glass',
- '0730 Trip to East Coast \'16',
- '0715 Barn',
- '1009 East Coast 2018',
- '0819 Skiing with Alex',
- '0819 Friends',
- '0227 Trip to California, \'16',
- '0407 London, 2018',
- '0528 Ohme Gardens',
- '0809 Bangkok and Phuket, 2003',
- '0803 Blue Angels... Seafair',
- '0719 Dunn Gardens',
- '0514 Krakow 2009',
- '0515 Tuscany 2008',
- '0508 Yosemite 2010',
- '0611 Sonoma',
- '1025 NJ 2015',
- '0407 Las Vegas, 2017',
+ '8-Mile Lake Hike',
+ 'Bangkok and Phuket, 2003',
+ 'Barn',
+ 'Blue Angels... Seafair',
+ 'Chihuly Glass',
+ 'Dunn Gardens',
+ 'East Coast 2018',
+ 'Fall \'17',
+ 'Friends',
+ 'Hiking',
+ 'Key West 2019',
+ 'Krakow 2009',
+ 'Kubota Gardens',
+ 'Las Vegas, 2017',
+ 'London, 2018',
+ 'Munich, July 2018',
+ 'NJ 2015',
+ 'Newer Alex Photos',
+ 'Ohme Gardens',
+ 'Olympic Sculpture Park',
+ 'Prague and Munich 2019',
+ 'Random',
+ 'Scott and Lynn',
+ 'SFO 2014',
+ 'Skiing with Alex',
+ 'Sonoma',
+ 'Trip to California, \'16',
+ 'Trip to San Francisco',
+ 'Trip to East Coast \'16',
+ 'Tuscany 2008',
+ 'Yosemite 2010',
+ 'Zoo',
])
extension_whitelist = frozenset([
else:
raise error('Unexpected operation')
+ def album_is_in_whitelist(self, name):
+ for wlalbum in self.album_whitelist:
+ if re.search('\d+ %s' % wlalbum, name) != None:
+ return True
+ return False
+
# Walk the filesystem looking for photos in whitelisted albums and
# keep their paths in memory.
def index_photos(self):
for root, subdirs, files in os.walk(self.album_root_directory):
last_dir = root.rsplit('/', 1)[1]
- if last_dir in self.album_whitelist:
+ if self.album_is_in_whitelist(last_dir):
for x in files:
extension = x.rsplit('.', 1)[1]
if extension in self.extension_whitelist:
print("No photos!")
return False
path = random.sample(self.candidate_photos, 1)[0]
- f = file_writer.file_writer('photo_23_none.html')
+ f = file_writer.file_writer('photo_23_3600.html')
f.write("""
<style>
body{background-color:#303030;}
hours = divmod(days[1], constants.seconds_per_hour)
minutes = divmod(hours[1], constants.seconds_per_minute)
width = 0
- if is_night and door.get_status() == "open":
+ if is_night and door.state == "open":
color = "border-color: #ff0000;"
width = 15
else:
return None
# Test
-#x = garage_door_renderer({"Test" : 1})
-#x.periodic_render("Poll MyQ")
-#x.periodic_render("Update Page")
+x = garage_door_renderer({"Test" : 1})
+x.periodic_render("Poll MyQ")
+x.periodic_render("Update Page")
'eat my ass',
'ecchi',
'ejaculation',
+ 'erection',
'erotic',
'erotism',
'escort',
'jail bait',
'jailbait',
'jerk off',
+ 'jerking off',
'jigaboo',
'jiggaboo',
'jiggerboo',
import praw
import profanity_filter
import random
+import renderer_catalog
class reddit_renderer(renderer.debuggable_abstaining_renderer):
"""A renderer to pull text content from reddit."""
import time
from datetime import datetime
+from decorators import invokation_logged
class renderer(object):
"""Base class for something that can render."""
+
+ @invokation_logged
def render(self):
pass
if (self.periodic_render(key)):
self.last_runs[key] = time.time()
+ @invokation_logged
def periodic_render(self, key):
pass
"www.wsdot.com",
[ "/traffic/rssfeeds/stevens/Default.aspx" ]),
seattletimes_rss_renderer.seattletimes_rss_renderer(
- {"Fetch News" : (hours * 1),
+ {"Fetch News" : (hours * 4),
"Shuffle News" : (always)},
"www.seattletimes.com",
[ "/pacific-nw-magazine/feed/",
return False
return len(description) >= 65
+# Test
#x = seattletimes_rss_renderer({"Test", 123},
# "www.seattletimes.com",
# [ "/life/feed/" ],
return "stevens"
def periodic_render(self, key):
- f = file_writer.file_writer('stevens-conditions_1_none.html')
+ f = file_writer.file_writer('stevens-conditions_1_86400.html')
for uri in self.feed_uris:
self.conn = http.client.HTTPSConnection(self.feed_site)
self.conn.request(
import random
import re
import renderer
+import renderer_catalog
class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict):
for msg in subset:
layout.add_item(msg)
- f = file_writer.file_writer('stranger-events_2_none.html')
+ f = file_writer.file_writer('stranger-events_2_36000.html')
layout.render_html(f)
f.close()
return True
handle = self.handles_by_author[author]
tweets = self.tweets_by_author[author]
already_seen = set()
- f = file_writer.file_writer('twitter_10_none.html')
+ f = file_writer.file_writer('twitter_10_3600.html')
f.write('<TABLE WIDTH=96%><TR><TD WIDTH=86%>')
f.write('<H2>%s (@%s)</H2></TD>\n' % (author, handle))
f.write('<TD ALIGN="right" VALIGN="top">')
return True
# Test
-t = twitter_renderer(
- {"Fetch Tweets" : 1,
- "Shuffle Tweets" : 1})
+#t = twitter_renderer(
+# {"Fetch Tweets" : 1,
+# "Shuffle Tweets" : 1})
#x = "bla bla bla https://t.co/EjWnT3UA9U bla bla"
#x = t.linkify(x)
#print x
-if t.fetch_tweets() == 0:
- print("Error fetching tweets, none fetched.")
-else:
- t.shuffle_tweets()
+#if t.fetch_tweets() == 0:
+# print("Error fetching tweets, none fetched.")
+#else:
+# t.shuffle_tweets()