import generic_news_rss_renderer as gnrss
import re
+
class bellevue_reporter_rss_renderer(gnrss.generic_news_rss_renderer):
def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
super(bellevue_reporter_rss_renderer, self).__init__(
- name_to_timeout_dict,
- feed_site,
- feed_uris,
- page_title)
+ name_to_timeout_dict, feed_site, feed_uris, page_title
+ )
self.debug = 1
def debug_prefix(self):
return True
def munge_description(self, description):
- description = re.sub('<[^>]+>', '', description)
- description = re.sub('Bellevue\s+Reporter\s+Bellevue\s+Reporter', '',
- description)
- description = re.sub('\s*\-\s*Your local homepage\.\s*', '', description)
+ description = re.sub("<[^>]+>", "", description)
+ description = re.sub(
+ "Bellevue\s+Reporter\s+Bellevue\s+Reporter", "", description
+ )
+ description = re.sub("\s*\-\s*Your local homepage\.\s*", "", description)
return description
def item_is_interesting_for_headlines(self, title, description, item):
if self.is_item_older_than_n_days(item, 10):
self.debug_print("%s: is too old!" % title)
return False
- if (title.find("NFL") != -1 or
- re.search("[Ll]ive [Ss]tream", title) != None or
- re.search("[Ll]ive[Ss]tream", title) != None or
- re.search("[Ll]ive [Ss]tream", description) != None):
+ if (
+ title.find("NFL") != -1
+ or re.search("[Ll]ive [Ss]tream", title) != None
+ or re.search("[Ll]ive[Ss]tream", title) != None
+ or re.search("[Ll]ive [Ss]tream", description) != None
+ ):
self.debug_print("%s: looks like it's about football." % title)
return False
return True
if self.is_item_older_than_n_days(item, 10):
self.debug_print("%s: is too old!" % title)
return False
- if (title.find(" NFL") != -1 or
- re.search("[Ll]ive [Ss]tream", title) != None or
- re.search("[Ll]ive[Ss]tream", title) != None or
- re.search("[Ll]ive [Ss]tream", description) != None):
+ if (
+ title.find(" NFL") != -1
+ or re.search("[Ll]ive [Ss]tream", title) != None
+ or re.search("[Ll]ive[Ss]tream", title) != None
+ or re.search("[Ll]ive [Ss]tream", description) != None
+ ):
self.debug_print("%s: looks like it's about football." % title)
return False
return True
+
# Test
-#x = bellevue_reporter_rss_renderer(
+# x = bellevue_reporter_rss_renderer(
# {"Fetch News" : 1,
# "Shuffle News" : 1},
# "www.bellevuereporter.com",
# [ "/feed/" ],
# "Test" )
-#d = """
-#<DIV style="padding:8px;
+# d = """
+# <DIV style="padding:8px;
# font-size:44pt;
# -webkit-column-break-inside:avoid;"><P>
-#<B>Task force will tackle issues of racial justice, police reform</B>
-#<BR>Bellevue Reporter
-#Bellevue Reporter - Your local homepage.
-#Inslee names civil rights activists, pastors, and cops to panel that may forge ideas f#or new laws Task force will tackle issues of racial justice, police reform
-#Wire Service
-#</DIV>"""
-#d = x.munge_description(d)
-#print d
-#if x.fetch_news() == 0:
+# <B>Task force will tackle issues of racial justice, police reform</B>
+# <BR>Bellevue Reporter
+# Bellevue Reporter - Your local homepage.
+# Inslee names civil rights activists, pastors, and cops to panel that may forge ideas f#or new laws Task force will tackle issues of racial justice, police reform
+# Wire Service
+# </DIV>"""
+# d = x.munge_description(d)
+# print d
+# if x.fetch_news() == 0:
# print "Error fetching news, no items fetched."
-#x.shuffle_news()
+# x.shuffle_news()
import utils
from datetime import datetime
+
class any_camera_trigger(trigger.trigger):
def __init__(self):
self.triggers_in_the_past_seven_min = {
- "driveway" : 0,
- "frontdoor" : 0,
- "cabin_driveway" : 0,
- "backyard" : 0,
+ "driveway": 0,
+ "frontdoor": 0,
+ "cabin_driveway": 0,
+ "backyard": 0,
}
self.last_trigger = {
- "driveway" : 0,
- "frontdoor" : 0,
- "cabin_driveway" : 0,
- "backyard" : 0,
+ "driveway": 0,
+ "frontdoor": 0,
+ "cabin_driveway": 0,
+ "backyard": 0,
}
def choose_priority(self, camera, age):
base_priority_by_camera = {
- "driveway" : 1,
- "frontdoor" : 2,
- "cabin_driveway" : 1,
- "backyard" : 0,
+ "driveway": 1,
+ "frontdoor": 2,
+ "cabin_driveway": 1,
+ "backyard": 0,
}
priority = base_priority_by_camera[camera]
if age < 10:
def get_triggered_page_list(self):
triggers = []
cameras_with_recent_triggers = 0
- camera_list = [ "driveway",
- "frontdoor",
- "cabin_driveway",
- "backyard" ]
+ camera_list = ["driveway", "frontdoor", "cabin_driveway", "backyard"]
now = time.time()
try:
for camera in camera_list:
file = "/timestamps/last_camera_motion_%s" % camera
ts = os.stat(file).st_ctime
- if (ts != self.last_trigger[camera] and
- (now - ts) < 10):
+ if ts != self.last_trigger[camera] and (now - ts) < 10:
print("Camera: %s, age %s" % (camera, (now - ts)))
self.last_trigger[camera] = ts
cameras_with_recent_triggers += 1
# triggered at the same time.
for camera in camera_list:
if (now - self.last_trigger[camera]) < 10:
- if (self.triggers_in_the_past_seven_min[camera] <= 4 or
- cameras_with_recent_triggers > 1):
+ if (
+ self.triggers_in_the_past_seven_min[camera] <= 4
+ or cameras_with_recent_triggers > 1
+ ):
ts = utils.timestamp()
p = self.choose_priority(camera, age)
- print(("%s: ****** %s[%d] CAMERA TRIGGER ******" % (
- ts, camera, p)))
- triggers.append( ( "hidden/%s.html" % camera,
- self.choose_priority(camera, age)) )
+ print(
+ (
+ "%s: ****** %s[%d] CAMERA TRIGGER ******"
+ % (ts, camera, p)
+ )
+ )
+ triggers.append(
+ (
+ "hidden/%s.html" % camera,
+ self.choose_priority(camera, age),
+ )
+ )
else:
- print(("%s: Camera %s too spammy, squelching it" % (
- ts, camera)))
+ print(
+ ("%s: Camera %s too spammy, squelching it" % (ts, camera))
+ )
except Exception as e:
print(e)
pass
else:
return triggers
-#x = any_camera_trigger()
-#print(x.get_triggered_page_list())
+
+# x = any_camera_trigger()
+# print(x.get_triggered_page_list())
import constants
import trigger
+
class chooser(object):
"""Base class of a thing that chooses pages"""
+
def get_page_list(self):
now = time.time()
valid_filename = re.compile("([^_]+)_(\d+)_([^\.]+)\.html")
filenames = []
- pages = [ f for f in os.listdir(constants.pages_dir)
- if os.path.isfile(os.path.join(constants.pages_dir, f))]
+ pages = [
+ f
+ for f in os.listdir(constants.pages_dir)
+ if os.path.isfile(os.path.join(constants.pages_dir, f))
+ ]
for page in pages:
result = re.match(valid_filename, page)
if result != None:
print(('chooser: candidate page: "%s"' % page))
- if (result.group(3) != "none"):
+ if result.group(3) != "none":
freshness_requirement = int(result.group(3))
- last_modified = int(os.path.getmtime(
- os.path.join(constants.pages_dir, page)))
- age = (now - last_modified)
- if (age > freshness_requirement):
+ last_modified = int(
+ os.path.getmtime(os.path.join(constants.pages_dir, page))
+ )
+ age = now - last_modified
+ if age > freshness_requirement:
print(('chooser: "%s" is too old.' % page))
continue
filenames.append(page)
def choose_next_page(self):
pass
+
class weighted_random_chooser(chooser):
"""Chooser that does it via weighted RNG."""
+
def dont_choose_page_twice_in_a_row_filter(self, choice):
if choice == self.last_choice:
return False
self.filter_list.append(self.dont_choose_page_twice_in_a_row_filter)
def choose_next_page(self):
- if (self.pages == None or
- self.count % 100 == 0):
+ if self.pages == None or self.count % 100 == 0:
self.pages = self.get_page_list()
total_weight = 0
weight = int(result.group(2))
weights.append(weight)
total_weight += weight
- if (total_weight <= 0):
+ if total_weight <= 0:
raise error
while True:
self.count += 1
return choice
+
class weighted_random_chooser_with_triggers(weighted_random_chooser):
"""Same as WRC but has trigger events"""
+
def __init__(self, trigger_list, filter_list):
weighted_random_chooser.__init__(self, filter_list)
self.trigger_list = trigger_list
return triggered
def choose_next_page(self):
- if (self.pages == None or
- self.count % 100 == 0):
+ if self.pages == None or self.count % 100 == 0:
self.pages = self.get_page_list()
triggered = self.check_for_triggers()
# First try to satisfy from the page queue.
- if (len(self.page_queue) > 0):
+ if len(self.page_queue) > 0:
print("chooser: Pulling page from queue...")
page = None
priority = None
else:
return weighted_random_chooser.choose_next_page(self), False
+
class rotating_chooser(chooser):
"""Chooser that does it in a rotation"""
+
def __init__(self):
self.valid_filename = re.compile("([^_]+)_(\d+)_([^\.]+)\.html")
self.pages = None
self.count = 0
def choose_next_page(self):
- if (self.pages == None or
- self.count % 100 == 0):
+ if self.pages == None or self.count % 100 == 0:
self.pages = self.get_page_list()
if len(self.pages) == 0:
raise error
- if (self.current >= len(self.pages)):
+ if self.current >= len(self.pages):
self.current = 0
page = self.pages[self.current]
self.count += 1
return page
+
# Test
def filter_news_during_dinnertime(page):
now = datetime.datetime.now()
is_dinnertime = now.hour >= 17 and now.hour <= 20
- return (not is_dinnertime or
- not ("cnn" in page or
- "news" in page or
- "mynorthwest" in page or
- "seattle" in page or
- "stranger" in page or
- "twitter" in page or
- "wsj" in page))
-
-#x = weighted_random_chooser_with_triggers([], [ filter_news_during_dinnertime ])
-#print(x.choose_next_page())
+ return not is_dinnertime or not (
+ "cnn" in page
+ or "news" in page
+ or "mynorthwest" in page
+ or "seattle" in page
+ or "stranger" in page
+ or "twitter" in page
+ or "wsj" in page
+ )
+
+
+# x = weighted_random_chooser_with_triggers([], [ filter_news_during_dinnertime ])
+# print(x.choose_next_page())
import generic_news_rss_renderer
import re
+
class cnn_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
super(cnn_rss_renderer, self).__init__(
- name_to_timeout_dict,
- feed_site,
- feed_uris,
- page_title)
+ name_to_timeout_dict, feed_site, feed_uris, page_title
+ )
self.debug = 1
def debug_prefix(self):
return "cnn-details-%s" % (self.page_title)
def munge_description(self, description):
- description = re.sub('[Rr]ead full story for latest details.',
- '',
- description)
- description = re.sub('<[^>]+>', '', description)
+ description = re.sub("[Rr]ead full story for latest details.", "", description)
+ description = re.sub("<[^>]+>", "", description)
return description
def find_image(self, item):
- image = item.findtext('media:thumbnail')
+ image = item.findtext("media:thumbnail")
if image is not None:
- image_url = image.get('url')
+ image_url = image.get("url")
return image_url
return None
if self.is_item_older_than_n_days(item, 14):
self.debug_print("%s: is too old!" % title)
return False
- return re.search(r'[Cc][Nn][Nn][A-Za-z]*\.com', title) is None
+ return re.search(r"[Cc][Nn][Nn][A-Za-z]*\.com", title) is None
def item_is_interesting_for_article(self, title, description, item):
if self.is_item_older_than_n_days(item, 7):
self.debug_print("%s: is too old!" % title)
return False
- return (re.search(r'[Cc][Nn][Nn][A-Za-z]*\.com', title) is None and
- len(description) >= 65)
+ return (
+ re.search(r"[Cc][Nn][Nn][A-Za-z]*\.com", title) is None
+ and len(description) >= 65
+ )
+
# Test
-#x = cnn_rss_renderer(
+# x = cnn_rss_renderer(
# {"Fetch News" : 1,
# "Shuffle News" : 1},
# "rss.cnn.com",
# "/rss/cnn_tech.rss",
# ],
# "Test" )
-#if x.fetch_news() == 0:
+# if x.fetch_news() == 0:
# print("Error fetching news, no items fetched.")
-#x.shuffle_news()
+# x.shuffle_news()
from datetime import datetime
import functools
+
def invokation_logged(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
timestamp = now.strftime("%d-%b-%Y (%H:%M:%S.%f)")
print("%s(%s): Exited function" % (func.__name__, timestamp))
return ret
+
return wrapper
+
# Test
-#@invokation_logged
-#def f(x):
+# @invokation_logged
+# def f(x):
# print(x * x)
# return x * x
#
-#q = f(10)
-#print(q)
+# q = f(10)
+# print(q)
import constants
import os
+
def remove_tricky_unicode(x):
try:
- x = x.decode('utf-8')
+ x = x.decode("utf-8")
x = x.replace("\u2018", "'").replace("\u2019", "'")
x = x.replace("\u201c", '"').replace("\u201d", '"')
x = x.replace("\u2e3a", "-").replace("\u2014", "-")
pass
return x
+
class file_writer:
def __init__(self, filename):
- self.full_filename = os.path.join(constants.pages_dir,
- filename)
- self.f = open(self.full_filename, 'wb')
- self.xforms = [ remove_tricky_unicode ]
+ self.full_filename = os.path.join(constants.pages_dir, filename)
+ self.f = open(self.full_filename, "wb")
+ self.xforms = [remove_tricky_unicode]
def add_xform(self, xform):
self.xforms.append(xform)
def write(self, data):
for xform in self.xforms:
data = xform(data)
- self.f.write(data.encode('utf-8'))
+ self.f.write(data.encode("utf-8"))
def done(self):
self.f.close()
def close(self):
self.done()
+
# Test
-#def toupper(x):
+# def toupper(x):
# return x.upper()
#
-#fw = file_writer("test")
-#fw.add_xform(toupper)
-#fw.write(u"This is a \u201ctest\u201d. \n")
-#fw.done()
+# fw = file_writer("test")
+# fw.add_xform(toupper)
+# fw.write(u"This is a \u201ctest\u201d. \n")
+# fw.done()
import renderer
import time
+
class gcal_renderer(renderer.debuggable_abstaining_renderer):
"""A renderer to fetch upcoming events from www.google.com/calendar"""
- calendar_whitelist = frozenset([
- 'Alex\'s calendar',
- 'Family',
- 'Holidays in United States',
- 'Lynn Gasch',
- 'Lynn\'s Work',
- 'scott.gasch@gmail.com',
- 'Scott Gasch External - Misc',
- 'Birthdays', # <-- from g+ contacts
- ])
+ calendar_whitelist = frozenset(
+ [
+ "Alex's calendar",
+ "Family",
+ "Holidays in United States",
+ "Lynn Gasch",
+ "Lynn's Work",
+ "scott.gasch@gmail.com",
+ "Scott Gasch External - Misc",
+ "Birthdays", # <-- from g+ contacts
+ ]
+ )
class comparable_event(object):
"""A helper class to sort events."""
+
def __init__(self, start_time, end_time, summary, calendar):
if start_time is None:
- assert(end_time is None)
+ assert end_time is None
self.start_time = start_time
self.end_time = end_time
self.summary = summary
return self.summary < that.summary
if self.start_time is None or that.start_time is None:
return self.start_time is None
- return (self.start_time,
- self.end_time,
- self.summary,
- self.calendar) < (that.start_time,
- that.end_time,
- that.summary,
- that.calendar)
+ return (self.start_time, self.end_time, self.summary, self.calendar) < (
+ that.start_time,
+ that.end_time,
+ that.summary,
+ that.calendar,
+ )
def __str__(self):
- return '[%s] %s' % (self.timestamp(), self.friendly_name())
+ return "[%s] %s" % (self.timestamp(), self.friendly_name())
def friendly_name(self):
name = self.summary
def timestamp(self):
if self.start_time is None:
return "None"
- elif (self.start_time.hour == 0):
- return datetime.datetime.strftime(self.start_time,
- '%a %b %d %Y')
+ elif self.start_time.hour == 0:
+ return datetime.datetime.strftime(self.start_time, "%a %b %d %Y")
else:
- return datetime.datetime.strftime(self.start_time,
- '%a %b %d %Y %H:%M%p')
+ return datetime.datetime.strftime(
+ self.start_time, "%a %b %d %Y %H:%M%p"
+ )
def __init__(self, name_to_timeout_dict, oauth):
super(gcal_renderer, self).__init__(name_to_timeout_dict, True)
def periodic_render(self, key):
self.debug_print('called for "%s"' % key)
- if (key == "Render Upcoming Events"):
+ if key == "Render Upcoming Events":
return self.render_upcoming_events()
- elif (key == "Look For Triggered Events"):
+ elif key == "Look For Triggered Events":
return self.look_for_triggered_events()
else:
- raise error('Unexpected operation')
+ raise error("Unexpected operation")
def render_upcoming_events(self):
page_token = None
+
def format_datetime(x):
- return datetime.datetime.strftime(x, '%Y-%m-%dT%H:%M:%SZ')
+ return datetime.datetime.strftime(x, "%Y-%m-%dT%H:%M:%SZ")
+
now = datetime.datetime.now()
time_min = now - datetime.timedelta(1)
time_max = now + datetime.timedelta(95)
# Writes 2 files:
# + "upcoming events",
# + a countdown timer for a subser of events,
- f = file_writer.file_writer('gcal_3_86400.html')
- f.write('<h1>Upcoming Calendar Events:</h1><hr>\n')
- f.write('<center><table width=96%>\n')
+ f = file_writer.file_writer("gcal_3_86400.html")
+ f.write("<h1>Upcoming Calendar Events:</h1><hr>\n")
+ f.write("<center><table width=96%>\n")
- g = file_writer.file_writer('countdown_3_7200.html')
- g.write('<h1>Countdowns:</h1><hr><ul>\n')
+ g = file_writer.file_writer("countdown_3_7200.html")
+ g.write("<h1>Countdowns:</h1><hr><ul>\n")
try:
self.sortable_events = []
self.countdown_events = []
while True:
- calendar_list = self.client.calendarList().list(
- pageToken=page_token).execute()
- for calendar in calendar_list['items']:
- if (calendar['summary'] in gcal_renderer.calendar_whitelist):
- events = self.client.events().list(
- calendarId=calendar['id'],
- singleEvents=True,
- timeMin=time_min,
- timeMax=time_max,
- maxResults=50).execute()
+ calendar_list = (
+ self.client.calendarList().list(pageToken=page_token).execute()
+ )
+ for calendar in calendar_list["items"]:
+ if calendar["summary"] in gcal_renderer.calendar_whitelist:
+ events = (
+ self.client.events()
+ .list(
+ calendarId=calendar["id"],
+ singleEvents=True,
+ timeMin=time_min,
+ timeMax=time_max,
+ maxResults=50,
+ )
+ .execute()
+ )
def parse_date(x):
- y = x.get('date')
+ y = x.get("date")
if y:
- y = datetime.datetime.strptime(y, '%Y-%m-%d')
+ y = datetime.datetime.strptime(y, "%Y-%m-%d")
else:
- y = x.get('dateTime')
+ y = x.get("dateTime")
if y:
- y = datetime.datetime.strptime(y[:-6],
- '%Y-%m-%dT%H:%M:%S')
+ y = datetime.datetime.strptime(
+ y[:-6], "%Y-%m-%dT%H:%M:%S"
+ )
else:
y = None
return y
- for event in events['items']:
+ for event in events["items"]:
try:
- summary = event['summary']
- self.debug_print("event '%s' (%s to %s)" % (
- summary, event['start'], event['end']))
- start = parse_date(event['start'])
- end = parse_date(event['end'])
+ summary = event["summary"]
+ self.debug_print(
+ "event '%s' (%s to %s)"
+ % (summary, event["start"], event["end"])
+ )
+ start = parse_date(event["start"])
+ end = parse_date(event["end"])
self.sortable_events.append(
- gcal_renderer.comparable_event(start,
- end,
- summary,
- calendar['summary']))
- if ('countdown' in summary or
- 'Holidays' in calendar['summary'] or
- 'Countdown' in summary):
+ gcal_renderer.comparable_event(
+ start, end, summary, calendar["summary"]
+ )
+ )
+ if (
+ "countdown" in summary
+ or "Holidays" in calendar["summary"]
+ or "Countdown" in summary
+ ):
self.debug_print("event is countdown worthy")
self.countdown_events.append(
- gcal_renderer.comparable_event(start,
- end,
- summary,
- calendar['summary']))
+ gcal_renderer.comparable_event(
+ start, end, summary, calendar["summary"]
+ )
+ )
except Exception as e:
- print("gcal unknown exception, skipping event.");
+ print("gcal unknown exception, skipping event.")
else:
- self.debug_print("Skipping calendar '%s'" % calendar['summary'])
- page_token = calendar_list.get('nextPageToken')
- if not page_token: break
+ self.debug_print("Skipping calendar '%s'" % calendar["summary"])
+ page_token = calendar_list.get("nextPageToken")
+ if not page_token:
+ break
self.sortable_events.sort()
upcoming_sortable_events = self.sortable_events[:12]
for event in upcoming_sortable_events:
self.debug_print("sorted event: %s" % event.friendly_name())
- f.write("""
+ f.write(
+ """
<tr>
<td style="padding-right: 1em;">
%s
<td style="padding-left: 1em;">
%s
</td>
-</tr>\n""" % (event.timestamp(), event.friendly_name()))
- f.write('</table></center>\n')
+</tr>\n"""
+ % (event.timestamp(), event.friendly_name())
+ )
+ f.write("</table></center>\n")
f.close()
self.countdown_events.sort()
upcoming_countdown_events = self.countdown_events[:12]
now = datetime.datetime.now()
count = 0
- timestamps = { }
+ timestamps = {}
for event in upcoming_countdown_events:
eventstamp = event.start_time
delta = eventstamp - now
days = divmod(x, constants.seconds_per_day)
hours = divmod(days[1], constants.seconds_per_hour)
minutes = divmod(hours[1], constants.seconds_per_minute)
- g.write('<li><SPAN id="%s">%d days, %02d:%02d</SPAN> until %s</li>\n' % (identifier, days[0], hours[0], minutes[0], name))
+ g.write(
+ '<li><SPAN id="%s">%d days, %02d:%02d</SPAN> until %s</li>\n'
+ % (identifier, days[0], hours[0], minutes[0], name)
+ )
timestamps[identifier] = time.mktime(eventstamp.timetuple())
count += 1
- self.debug_print("countdown to %s is %dd %dh %dm" % (
- name, days[0], hours[0], minutes[0]))
- g.write('</ul>')
- g.write('<SCRIPT>\nlet timestampMap = new Map([')
+ self.debug_print(
+ "countdown to %s is %dd %dh %dm"
+ % (name, days[0], hours[0], minutes[0])
+ )
+ g.write("</ul>")
+ g.write("<SCRIPT>\nlet timestampMap = new Map([")
for x in list(timestamps.keys()):
g.write(' ["%s", %f],\n' % (x, timestamps[x] * 1000.0))
- g.write(']);\n\n')
- g.write("""
+ g.write("]);\n\n")
+ g.write(
+ """
// Pad things with a leading zero if necessary.
function pad(n) {
return (n < 10) ? ("0" + n) : n;
}
}
}, 1000);
-</script>""");
+</script>"""
+ )
g.close()
return True
except (gdata.service.RequestError, AccessTokenRefreshError):
def look_for_triggered_events(self):
f = file_writer.file_writer(constants.gcal_imminent_pagename)
- f.write('<h1>Imminent Upcoming Calendar Events:</h1>\n<hr>\n')
- f.write('<center><table width=99%>\n')
+ f.write("<h1>Imminent Upcoming Calendar Events:</h1>\n<hr>\n")
+ f.write("<center><table width=99%>\n")
now = datetime.datetime.now()
count = 0
for event in self.sortable_events:
eventstamp = event.start_time
name = event.friendly_name()
calendar = event.calendar
- f.write("<LI> %s (%s) upcoming in %d minutes.\n" % (name, calendar, minutes[0]))
+ f.write(
+ "<LI> %s (%s) upcoming in %d minutes.\n"
+ % (name, calendar, minutes[0])
+ )
count += 1
f.write("</table>")
f.close()
import globals
import trigger
+
class gcal_trigger(trigger.trigger):
def get_triggered_page_list(self):
if globals.get("gcal_triggered") == True:
else:
return None
-#globals.put('gcal_triggered', True)
-#x = gcal_trigger()
-#x.get_triggered_page_list()
+
+# globals.put('gcal_triggered', True)
+# x = gcal_trigger()
+# x.get_triggered_page_list()
import sys
import urllib.request, urllib.parse, urllib.error
+
try:
- import http.client # python2
+ import http.client # python2
except ImportError:
- import http.client # python3
+ import http.client # python3
import os.path
import json
import time
import datetime
import ssl
+
class OAuth:
def __init__(self, client_id, client_secret):
print("gdata: initializing oauth token...")
self.client_id = client_id
self.client_secret = client_secret
self.user_code = None
- #print 'Client id: %s' % (client_id)
- #print 'Client secret: %s' % (client_secret)
+ # print 'Client id: %s' % (client_id)
+ # print 'Client secret: %s' % (client_secret)
self.token = None
self.device_code = None
self.verfication_url = None
- self.token_file = 'client_secrets.json'
+ self.token_file = "client_secrets.json"
self.scope = [
#'https://www.googleapis.com/auth/calendar',
#'https://www.googleapis.com/auth/drive',
#'https://docs.google.com/feeds',
#'https://www.googleapis.com/auth/calendar.readonly',
#'https://picasaweb.google.com/data/',
- 'https://www.googleapis.com/auth/photoslibrary.readonly',
+ "https://www.googleapis.com/auth/photoslibrary.readonly",
#'http://picasaweb.google.com/data/',
#'https://www.google.com/calendar/feeds/',
]
- self.host = 'accounts.google.com'
+ self.host = "accounts.google.com"
self.reset_connection()
self.load_token()
self.last_action = 0
# exception, after which we always get httplib.CannotSendRequest errors.
# When this happens, we try re-creating the exception.
def reset_connection(self):
- self.ssl_ctx = ssl.create_default_context(cafile='/usr/local/etc/ssl/cert.pem')
+ self.ssl_ctx = ssl.create_default_context(cafile="/usr/local/etc/ssl/cert.pem")
http.client.HTTPConnection.debuglevel = 2
self.conn = http.client.HTTPSConnection(self.host, context=self.ssl_ctx)
f.close()
def save_token(self):
- f = open(self.token_file, 'w')
+ f = open(self.token_file, "w")
f.write(json.dumps(self.token))
f.close()
self.conn.request(
"POST",
"/o/oauth2/device/code",
- urllib.parse.urlencode({
- 'client_id': self.client_id,
- 'scope' : ' '.join(self.scope)
- }),
- {"Content-type": "application/x-www-form-urlencoded"})
+ urllib.parse.urlencode(
+ {"client_id": self.client_id, "scope": " ".join(self.scope)}
+ ),
+ {"Content-type": "application/x-www-form-urlencoded"},
+ )
response = self.conn.getresponse()
if response.status == 200:
data = json.loads(response.read())
- self.device_code = data['device_code']
- self.user_code = data['user_code']
- self.verification_url = data['verification_url']
- self.retry_interval = data['interval']
+ self.device_code = data["device_code"]
+ self.user_code = data["user_code"]
+ self.verification_url = data["verification_url"]
+ self.retry_interval = data["interval"]
else:
print(("gdata: %d" % response.status))
print((response.read()))
self.conn.request(
"POST",
"/o/oauth2/token",
- urllib.parse.urlencode({
- 'client_id' : self.client_id,
- 'client_secret' : self.client_secret,
- 'code' : self.device_code,
- 'grant_type' : 'http://oauth.net/grant_type/device/1.0'
- }),
- {"Content-type": "application/x-www-form-urlencoded"})
+ urllib.parse.urlencode(
+ {
+ "client_id": self.client_id,
+ "client_secret": self.client_secret,
+ "code": self.device_code,
+ "grant_type": "http://oauth.net/grant_type/device/1.0",
+ }
+ ),
+ {"Content-type": "application/x-www-form-urlencoded"},
+ )
response = self.conn.getresponse()
if response.status == 200:
data = json.loads(response.read())
- if 'access_token' in data:
+ if "access_token" in data:
self.token = data
self.save_token()
else:
print("gdata: not refreshing yet, too soon...")
return False
else:
- print('gdata: trying to refresh oauth token...')
+ print("gdata: trying to refresh oauth token...")
self.reset_connection()
- refresh_token = self.token['refresh_token']
+ refresh_token = self.token["refresh_token"]
self.conn.request(
"POST",
"/o/oauth2/token",
- urllib.parse.urlencode({
- 'client_id' : self.client_id,
- 'client_secret' : self.client_secret,
- 'refresh_token' : refresh_token,
- 'grant_type' : 'refresh_token'
- }),
- {"Content-type": "application/x-www-form-urlencoded"})
+ urllib.parse.urlencode(
+ {
+ "client_id": self.client_id,
+ "client_secret": self.client_secret,
+ "refresh_token": refresh_token,
+ "grant_type": "refresh_token",
+ }
+ ),
+ {"Content-type": "application/x-www-form-urlencoded"},
+ )
response = self.conn.getresponse()
self.last_action = time.time()
if response.status == 200:
data = json.loads(response.read())
- if 'access_token' in data:
+ if "access_token" in data:
self.token = data
# in fact we NEVER get a new refresh token at this point
- if not 'refresh_token' in self.token:
- self.token['refresh_token'] = refresh_token
+ if not "refresh_token" in self.token:
+ self.token["refresh_token"] = refresh_token
self.save_token()
return True
print(("gdata: unexpected response %d to renewal request" % response.status))
# https://developers.google.com/picasa-web/
def photos_service(self):
headers = {
- "Authorization": "%s %s" % (self.token['token_type'], self.token['access_token'])
+ "Authorization": "%s %s"
+ % (self.token["token_type"], self.token["access_token"])
}
client = gdata.photos.service.PhotosService(additional_headers=headers)
return client
# https://developers.google.com/drive/
def docs_service(self):
- cred = OAuth2Credentials(self.token['access_token'],
- self.client_id,
- self.client_secret,
- self.token['refresh_token'],
- datetime.datetime.now(),
- 'http://accounts.google.com/o/oauth2/token',
- 'KitchenKiosk/0.9')
+ cred = OAuth2Credentials(
+ self.token["access_token"],
+ self.client_id,
+ self.client_secret,
+ self.token["refresh_token"],
+ datetime.datetime.now(),
+ "http://accounts.google.com/o/oauth2/token",
+ "KitchenKiosk/0.9",
+ )
http = httplib2.Http(disable_ssl_certificate_validation=True)
http = cred.authorize(http)
- service = build('drive', 'v2', http)
+ service = build("drive", "v2", http)
return service
# https://developers.google.com/google-apps/calendar/
def calendar_service(self):
- cred = OAuth2Credentials(self.token['access_token'],
- self.client_id,
- self.client_secret,
- self.token['refresh_token'],
- datetime.datetime.now(),
- 'http://accounts.google.com/o/oauth2/token',
- 'KitchenKiosk/0.9')
+ cred = OAuth2Credentials(
+ self.token["access_token"],
+ self.client_id,
+ self.client_secret,
+ self.token["refresh_token"],
+ datetime.datetime.now(),
+ "http://accounts.google.com/o/oauth2/token",
+ "KitchenKiosk/0.9",
+ )
http = httplib2.Http(disable_ssl_certificate_validation=True)
http = cred.authorize(http)
- service = build('calendar', 'v3', http)
+ service = build("calendar", "v3", http)
return service
import gdata_oauth
import secrets
+
class gdocs_renderer(renderer.debuggable_abstaining_renderer):
"""A renderer to fetches and munge docs from drive.google.com"""
try:
param = {}
if page_token:
- param['pageToken'] = page_token
- param['q'] = self.query
- print("QUERY: %s" % param['q'])
+ param["pageToken"] = page_token
+ param["q"] = self.query
+ print("QUERY: %s" % param["q"])
files = self.client.files().list(**param).execute()
- result.extend(files['items'])
- page_token = files.get('nextPageToken')
+ result.extend(files["items"])
+ page_token = files.get("nextPageToken")
if not page_token:
break
except:
return "font-size:%dpt" % (x)
for f in result:
- print(f['title'])
- print(f['id'])
- self.debug_print("%s (%s)\n" % (f['title'], f['id']))
- title = f['title']
- url = f['exportLinks']['text/html']
+ print(f["title"])
+ print(f["id"])
+ self.debug_print("%s (%s)\n" % (f["title"], f["id"]))
+ title = f["title"]
+ url = f["exportLinks"]["text/html"]
print(f)
print("Fetching %s..." % url)
resp, contents = self.client._http.request(url)
print(contents)
if resp.status == 200:
print("Got contents.")
- contents = re.sub('<body class="..">', '', contents)
- contents = contents.replace('</body>', '')
- contents = re.sub('font-size:([0-9]+)pt', boost_font_size, contents)
- f = file_writer.file_writer('%s_2_3600.html' % title)
+ contents = re.sub('<body class="..">', "", contents)
+ contents = contents.replace("</body>", "")
+ contents = re.sub("font-size:([0-9]+)pt", boost_font_size, contents)
+ f = file_writer.file_writer("%s_2_3600.html" % title)
now = datetime.datetime.now()
- f.write("""
+ f.write(
+ """
<H1>%s</H1>
<!-- Last updated at %s -->
<HR>
<DIV STYLE="-webkit-column-count: 2; -moz-column-count: 2; column-count: 2;">
%s
-</DIV>""" % (title, now, contents))
+</DIV>"""
+ % (title, now, contents)
+ )
f.close()
else:
self.debug_print("error: %s" % resp)
return True
-#oauth = gdata_oauth.OAuth(secrets.google_client_id,
+# oauth = gdata_oauth.OAuth(secrets.google_client_id,
# secrets.google_client_secret)
-#x = gdocs_renderer({"Testing", 12345},
+# x = gdocs_renderer({"Testing", 12345},
# oauth)
-#x.periodic_render("Test")
+# x.periodic_render("Test")
import re
import xml.etree.ElementTree as ET
+
class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
- super(generic_news_rss_renderer, self).__init__(name_to_timeout_dict,
- False)
+ super(generic_news_rss_renderer, self).__init__(name_to_timeout_dict, False)
self.debug = 1
self.feed_site = feed_site
self.feed_uris = feed_uris
return False
def find_title(self, item):
- return item.findtext('title')
+ return item.findtext("title")
def munge_title(self, title):
return title
def find_description(self, item):
- return item.findtext('description')
+ return item.findtext("description")
def munge_description(self, description):
- description = re.sub('<[^>]+>', '', description)
+ description = re.sub("<[^>]+>", "", description)
return description
def find_link(self, item):
- return item.findtext('link')
+ return item.findtext("link")
def munge_link(self, link):
return link
def find_image(self, item):
- return item.findtext('image')
+ return item.findtext("image")
def munge_image(self, image):
return image
def find_pubdate(self, item):
- return item.findtext('pubDate')
+ return item.findtext("pubDate")
def munge_pubdate(self, pubdate):
return pubdate
tzinfo = pubdate.tzinfo
now = datetime.datetime.now(tzinfo)
delta = (now - pubdate).total_seconds() / (60 * 60 * 24)
- if (delta > n):
+ if delta > n:
return True
return False
elif key == "Shuffle News":
return self.shuffle_news()
else:
- raise error('Unexpected operation')
+ raise error("Unexpected operation")
def shuffle_news(self):
headlines = page_builder.page_builder()
return False
for msg in subset:
headlines.add_item(msg)
- headlines.set_custom_html("""
+ headlines.set_custom_html(
+ """
<STYLE>
a:link {
color: black;
text-decoration: none;
font-weight: bold;
}
-</STYLE>""")
- f = file_writer.file_writer('%s_%s_25900.html' % (
- self.get_headlines_page_prefix(),
- self.get_headlines_page_priority()))
+</STYLE>"""
+ )
+ f = file_writer.file_writer(
+ "%s_%s_25900.html"
+ % (self.get_headlines_page_prefix(), self.get_headlines_page_priority())
+ )
headlines.render_html(f)
f.close()
details = page_builder.page_builder()
details.set_layout(page_builder.page_builder.LAYOUT_ONE_ITEM)
- details.set_custom_html("""
+ details.set_custom_html(
+ """
<STYLE>
a:link {
color: black;
text-decoration: none;
font-weight: bold;
}
-</STYLE>""")
+</STYLE>"""
+ )
details.set_title("%s" % self.page_title)
subset = self.details.subset(1)
if subset is None:
- self.debug_print("Not enough details to choose from.");
+ self.debug_print("Not enough details to choose from.")
return False
for msg in subset:
blurb = msg
- blurb += u'</TD>'
+ blurb += u"</TD>"
details.add_item(blurb)
- g = file_writer.file_writer('%s_%s_86400.html' % (
- self.get_details_page_prefix(),
- self.get_details_page_priority()))
+ g = file_writer.file_writer(
+ "%s_%s_86400.html"
+ % (self.get_details_page_prefix(), self.get_details_page_priority())
+ )
details.render_html(g)
g.close()
return True
"GET",
uri,
None,
- { "Accept": "*/*",
- "Cache-control": "max-age=59",
- "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"})
+ {
+ "Accept": "*/*",
+ "Cache-control": "max-age=59",
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
+ },
+ )
try:
response = self.conn.getresponse()
except:
return False
if response.status != 200:
- print(("%s: RSS fetch_news error, response: %d" % (self.page_title,
- response.status)))
+ print(
+ (
+ "%s: RSS fetch_news error, response: %d"
+ % (self.page_title, response.status)
+ )
+ )
self.debug_print(response.read())
return False
title = self.find_title(item)
if title is not None:
title = self.munge_title(title)
- description = item.findtext('description')
+ description = item.findtext("description")
if description is not None:
description = self.munge_description(description)
image = self.find_image(item)
if image is not None:
image = self.munge_image(image)
- link = item.findtext('link')
+ link = item.findtext("link")
if link is not None:
link = self.munge_link(link)
- if (title is None or
- not self.item_is_interesting_for_headlines(title,
- description,
- item)):
+ if title is None or not self.item_is_interesting_for_headlines(
+ title, description, item
+ ):
self.debug_print('Item "%s" is not interesting' % title)
continue
- if (self.should_profanity_filter() and
- (self.filter.contains_bad_words(title) or
- self.filter.contains_bad_words(description))):
+ if self.should_profanity_filter() and (
+ self.filter.contains_bad_words(title)
+ or self.filter.contains_bad_words(description)
+ ):
self.debug_print('Found bad words in item "%s"' % title)
continue
blurb += u'style="padding:8px;">'
if link is None:
- blurb += u'<P><B>%s</B>' % title
+ blurb += u"<P><B>%s</B>" % title
else:
blurb += u'<P><B><A HREF="%s">%s</A></B>' % (link, title)
pubdate = self.munge_pubdate(pubdate)
ts = parse(pubdate)
blurb += u" <FONT COLOR=#cccccc>%s</FONT>" % (
- ts.strftime("%b %d"))
+ ts.strftime("%b %d")
+ )
- if (description is not None and
- self.item_is_interesting_for_article(title,
- description,
- item)):
+ if description is not None and self.item_is_interesting_for_article(
+ title, description, item
+ ):
longblurb = blurb
longblurb += u"<BR>"
longblurb += description
longblurb += u"</DIV>"
- longblurb = longblurb.replace("font-size:34pt",
- "font-size:44pt")
+ longblurb = longblurb.replace("font-size:34pt", "font-size:44pt")
self.details.add(longblurb)
blurb += u"</DIV>"
import renderer
import secrets
+
class gkeep_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict):
super(gkeep_renderer, self).__init__(name_to_timeout_dict, True)
self.keep = gkeepapi.Keep()
- success = self.keep.login(secrets.google_keep_username,
- secrets.google_keep_password)
+ success = self.keep.login(
+ secrets.google_keep_username, secrets.google_keep_password
+ )
if success:
self.debug_print("Connected with gkeep.")
else:
self.debug_print("Error connecting with gkeep.")
self.colors_by_name = {
- 'white' : '#002222',
- 'green' : '#345920',
- 'darkblue' : '#1F3A5F',
- 'blue' : '#2D545E',
- 'orange' : '#604A19',
- 'red' : '#5C2B29',
- 'purple' : '#42275E',
- 'pink' : '#5B2245',
- 'yellow' : '#635D19',
- 'brown' : '#442F19',
- 'gray' : '#3c3f4c',
- 'teal' : '#16504B'
+ "white": "#002222",
+ "green": "#345920",
+ "darkblue": "#1F3A5F",
+ "blue": "#2D545E",
+ "orange": "#604A19",
+ "red": "#5C2B29",
+ "purple": "#42275E",
+ "pink": "#5B2245",
+ "yellow": "#635D19",
+ "brown": "#442F19",
+ "gray": "#3c3f4c",
+ "teal": "#16504B",
}
def debug_prefix(self):
return "gkeep"
def periodic_render(self, key):
- strikethrough = re.compile('(\u2611[^\n]*)\n', re.UNICODE)
- linkify = re.compile(r'.*(https?:\/\/\S+).*')
+ strikethrough = re.compile("(\u2611[^\n]*)\n", re.UNICODE)
+ linkify = re.compile(r".*(https?:\/\/\S+).*")
self.keep.sync()
- result_list = self.keep.find(labels=[self.keep.findLabel('kiosk')])
+ result_list = self.keep.find(labels=[self.keep.findLabel("kiosk")])
for note in result_list:
title = note.title
title = title.replace(" ", "-")
filename = "%s_2_3600.html" % title
contents = note.text + "\n"
self.debug_print("Note title '%s'" % title)
- if contents != '' and not contents.isspace():
- contents = strikethrough.sub('', contents)
+ if contents != "" and not contents.isspace():
+ contents = strikethrough.sub("", contents)
self.debug_print("Note contents:\n%s" % contents)
- contents = contents.replace(u'\u2610 ',
- u'<LI><INPUT TYPE="checkbox"> ')
+ contents = contents.replace(
+ u"\u2610 ", u'<LI><INPUT TYPE="checkbox"> '
+ )
contents = linkify.sub(r'<a href="\1">\1</a>', contents)
individual_lines = contents.split("\n")
length = len(x)
if length > max_length:
max_length = length
- leading_spaces = len(x) - len(x.lstrip(' '))
+ leading_spaces = len(x) - len(x.lstrip(" "))
leading_spaces /= 2
leading_spaces = int(leading_spaces)
- x = x.lstrip(' ')
+ x = x.lstrip(" ")
# self.debug_print(" * (%d) '%s'" % (leading_spaces, x))
for y in range(0, leading_spaces):
x = "<UL>" + x
else:
self.debug_print("Unknown color '%s'" % color)
f = file_writer.file_writer(filename)
- f.write("""
+ f.write(
+ """
<STYLE type="text/css">
a:link { color:#88bfbf; }
ul { list-style-type:none; }
</STYLE>
<DIV STYLE="border-radius: 25px; border-style: solid; padding: 20px; background-color: %s; color: #eeeeee; font-size: x-large;">
<p style="color: #ffffff; font-size:larger"><B>%s</B></p>
-<HR style="border-top: 3px solid white;">""" % (color, note.title))
+<HR style="border-top: 3px solid white;">"""
+ % (color, note.title)
+ )
if num_lines >= 12 and max_length < 120:
- self.debug_print("%d lines (max=%d chars): two columns" %
- (num_lines, max_length))
- f.write("<TABLE BORDER=0 WIDTH=100%%><TR valign=\"top\">")
- f.write("<TD WIDTH=50%% style=\"color:#eeeeee; font-size:large\">\n")
+ self.debug_print(
+ "%d lines (max=%d chars): two columns" % (num_lines, max_length)
+ )
+ f.write('<TABLE BORDER=0 WIDTH=100%%><TR valign="top">')
+ f.write('<TD WIDTH=50%% style="color:#eeeeee; font-size:large">\n')
f.write("<FONT><UL STYLE='list-style-type:none'>")
count = 0
for x in individual_lines:
count += 1
if count == num_lines / 2:
f.write("</UL></FONT></TD>\n")
- f.write("<TD WIDTH=50%% style=\"color:#eeeeee; font-size:large\">\n")
+ f.write(
+ '<TD WIDTH=50%% style="color:#eeeeee; font-size:large">\n'
+ )
f.write("<FONT><UL STYLE='list-style-type:none'>")
- f.write("</UL></FONT></TD></TR></TABLE></DIV>\n");
+ f.write("</UL></FONT></TD></TR></TABLE></DIV>\n")
else:
- self.debug_print("%d lines (max=%d chars): one column" %
- (num_lines, max_length))
+ self.debug_print(
+ "%d lines (max=%d chars): one column" % (num_lines, max_length)
+ )
f.write("<FONT><UL>%s</UL></FONT>" % contents)
f.write("</DIV>")
f.close()
pass
return True
+
# Test
-#x = gkeep_renderer({"Test", 1234})
-#x.periodic_render("Test")
+# x = gkeep_renderer({"Test", 1234})
+# x.periodic_render("Test")
data = {}
+
def put(key, value):
data[key] = value
+
def get(key):
if key in data:
return data[key]
else:
return None
-
import generic_news_rss_renderer
import re
+
class google_news_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
super(google_news_rss_renderer, self).__init__(
- name_to_timeout_dict,
- feed_site,
- feed_uris,
- page_title)
+ name_to_timeout_dict, feed_site, feed_uris, page_title
+ )
self.debug = 1
def debug_prefix(self):
return "google-news-details"
def find_description(self, item):
- descr = item.findtext('description')
- source = item.findtext('source')
+ descr = item.findtext("description")
+ source = item.findtext("source")
if source is not None:
descr = descr + " (%s)" % source
return descr
def munge_description(self, description):
soup = BeautifulSoup(description)
- for a in soup.findAll('a'):
- del a['href']
+ for a in soup.findAll("a"):
+ del a["href"]
descr = str(soup)
return munge_description_internal(descr)
def item_is_interesting_for_article(self, title, description, item):
return not self.is_item_older_than_n_days(item, 2)
+
# Test
-#x = google_news_rss_renderer(
+# x = google_news_rss_renderer(
# {"Fetch News" : 1,
# "Shuffle News" : 1},
# "news.google.com",
# [ "/rss?hl=en-US&gl=US&ceid=US:en" ],
# "Test" )
-#if x.fetch_news() == 0:
+# if x.fetch_news() == 0:
# print("Error fetching news, no items fetched.")
-#x.shuffle_news()
+# x.shuffle_news()
#
-#descr = "this is a lot of really long text about nothign in particular. It's pretty interesting, don't you think? I hope that the munge description method works by both truncating it and remembering to close any open <LI>items as well as making sure not to truncate in the middle of a <A HREF=\"whatever\" these are a bunch of useless arguments to the A tag that make it really long so that the truncate will happen in the middle of it. I'm getting kind of tired of typing shit so I'm going to revert to copy pasta now. Sorry if you were getting into this story. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog.</A></LI> Out!"
-#d = x.munge_description_internal(descr)
-#print(d)
-
+# descr = "this is a lot of really long text about nothign in particular. It's pretty interesting, don't you think? I hope that the munge description method works by both truncating it and remembering to close any open <LI>items as well as making sure not to truncate in the middle of a <A HREF=\"whatever\" these are a bunch of useless arguments to the A tag that make it really long so that the truncate will happen in the middle of it. I'm getting kind of tired of typing shit so I'm going to revert to copy pasta now. Sorry if you were getting into this story. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog.</A></LI> Out!"
+# d = x.munge_description_internal(descr)
+# print(d)
import random
+
class grab_bag(object):
def __init__(self):
self.contents = set()
def size(self):
return len(self.contents)
-#x = grab_bag()
-#x.add_all([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
-#print x.subset(3)
+
+# x = grab_bag()
+# x.add_all([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
+# print x.subset(3)
import renderer
import time
+
class periodic_health_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict):
super(periodic_health_renderer, self).__init__(name_to_timeout_dict, False)
return "health"
def periodic_render(self, key):
- f = file_writer.file_writer('periodic-health_6_300.html')
- timestamps = '/timestamps/'
+ f = file_writer.file_writer("periodic-health_6_300.html")
+ timestamps = "/timestamps/"
days = constants.seconds_per_day
hours = constants.seconds_per_hour
mins = constants.seconds_per_minute
minutes = mins
limits = {
- timestamps + 'last_http_probe_wannabe_house' : mins * 10,
- timestamps + 'last_http_probe_meerkat_cabin' : mins * 10,
- timestamps + 'last_http_probe_dns_house' : mins * 10,
- timestamps + 'last_http_probe_rpi_cabin' : mins * 10,
- timestamps + 'last_http_probe_rpi_house' : mins * 10,
- timestamps + 'last_http_probe_therm_house' : mins * 10,
-
- timestamps + 'last_rsnapshot_hourly' : hours * 24,
- timestamps + 'last_rsnapshot_daily' : days * 3,
- timestamps + 'last_rsnapshot_weekly' : days * 14,
- timestamps + 'last_rsnapshot_monthly' : days * 70,
-
- timestamps + 'last_zfssnapshot_hourly' : hours * 5,
- timestamps + 'last_zfssnapshot_daily' : hours * 36,
- timestamps + 'last_zfssnapshot_weekly' : days * 9,
- timestamps + 'last_zfssnapshot_monthly' : days * 70,
- timestamps + 'last_zfssnapshot_cleanup' : hours * 24,
-
- timestamps + 'last_zfs_scrub' : days * 9,
- timestamps + 'last_backup_zfs_scrub' : days * 9,
- timestamps + 'last_cabin_zfs_scrub' : days * 9,
-
- timestamps + 'last_zfsxfer_backup.house' : hours * 36,
- timestamps + 'last_zfsxfer_ski.dyn.guru.org' : days * 7,
- timestamps + 'last_photos_sync' : hours * 8,
-
- timestamps + 'last_disk_selftest_short' : days * 14,
- timestamps + 'last_disk_selftest_long' : days * 31,
- timestamps + 'last_backup_disk_selftest_short': days * 14,
- timestamps + 'last_backup_disk_selftest_long' : days * 31,
- timestamps + 'last_cabin_disk_selftest_short' : days * 14,
- timestamps + 'last_cabin_disk_selftest_long' : days * 31,
-
- timestamps + 'last_cabin_rpi_ping' : mins * 10,
- timestamps + 'last_healthy_wifi' : mins * 10,
- timestamps + 'last_healthy_network' : mins * 10,
- timestamps + 'last_scott_sync' : days * 2,
+ timestamps + "last_http_probe_wannabe_house": mins * 10,
+ timestamps + "last_http_probe_meerkat_cabin": mins * 10,
+ timestamps + "last_http_probe_dns_house": mins * 10,
+ timestamps + "last_http_probe_rpi_cabin": mins * 10,
+ timestamps + "last_http_probe_rpi_house": mins * 10,
+ timestamps + "last_http_probe_therm_house": mins * 10,
+ timestamps + "last_rsnapshot_hourly": hours * 24,
+ timestamps + "last_rsnapshot_daily": days * 3,
+ timestamps + "last_rsnapshot_weekly": days * 14,
+ timestamps + "last_rsnapshot_monthly": days * 70,
+ timestamps + "last_zfssnapshot_hourly": hours * 5,
+ timestamps + "last_zfssnapshot_daily": hours * 36,
+ timestamps + "last_zfssnapshot_weekly": days * 9,
+ timestamps + "last_zfssnapshot_monthly": days * 70,
+ timestamps + "last_zfssnapshot_cleanup": hours * 24,
+ timestamps + "last_zfs_scrub": days * 9,
+ timestamps + "last_backup_zfs_scrub": days * 9,
+ timestamps + "last_cabin_zfs_scrub": days * 9,
+ timestamps + "last_zfsxfer_backup.house": hours * 36,
+ timestamps + "last_zfsxfer_ski.dyn.guru.org": days * 7,
+ timestamps + "last_photos_sync": hours * 8,
+ timestamps + "last_disk_selftest_short": days * 14,
+ timestamps + "last_disk_selftest_long": days * 31,
+ timestamps + "last_backup_disk_selftest_short": days * 14,
+ timestamps + "last_backup_disk_selftest_long": days * 31,
+ timestamps + "last_cabin_disk_selftest_short": days * 14,
+ timestamps + "last_cabin_disk_selftest_long": days * 31,
+ timestamps + "last_cabin_rpi_ping": mins * 10,
+ timestamps + "last_healthy_wifi": mins * 10,
+ timestamps + "last_healthy_network": mins * 10,
+ timestamps + "last_scott_sync": days * 2,
}
self.write_header(f)
age = now - ts
self.debug_print("%s -- age is %ds, limit is %ds" % (x, age, limits[x]))
if age < limits[x]:
- f.write('<TD BGCOLOR="#007010" HEIGHT=100 WIDTH=33% STYLE="text-size:60%; vertical-align: middle;">\n')
+ f.write(
+ '<TD BGCOLOR="#007010" HEIGHT=100 WIDTH=33% STYLE="text-size:60%; vertical-align: middle;">\n'
+ )
else:
- f.write('<TD BGCOLOR="#990000" HEIGHT=100 WIDTH=33% CLASS="invalid" STYLE="text-size:60%; vertical-align:middle;">\n')
+ f.write(
+ '<TD BGCOLOR="#990000" HEIGHT=100 WIDTH=33% CLASS="invalid" STYLE="text-size:60%; vertical-align:middle;">\n'
+ )
f.write(" <CENTER><FONT SIZE=-2>\n")
name = x.replace(timestamps, "")
hours = divmod(days[1], constants.seconds_per_hour)
minutes = divmod(hours[1], constants.seconds_per_minute)
- self.debug_print("%s is %d days %02d:%02d old." % (
- name, days[0], hours[0], minutes[0]))
- f.write("%s<BR>\n<B>%d</b> days <B>%02d</B>:<B>%02d</B> old.\n" % (
- name, days[0], hours[0], minutes[0]))
+ self.debug_print(
+ "%s is %d days %02d:%02d old." % (name, days[0], hours[0], minutes[0])
+ )
+ f.write(
+ "%s<BR>\n<B>%d</b> days <B>%02d</B>:<B>%02d</B> old.\n"
+ % (name, days[0], hours[0], minutes[0])
+ )
f.write("</FONT></CENTER>\n</TD>\n\n")
n += 1
if n % 3 == 0:
return True
def write_header(self, f):
- f.write("""
+ f.write(
+ """
<HTML>
<HEAD>
<STYLE>
<CENTER>
<TABLE BORDER=0 WIDTH=99% style="font-size:16pt">
<TR>
-""")
+"""
+ )
def write_footer(self, f):
- f.write("""
+ f.write(
+ """
</TR>
</TABLE>
</BODY>
-</HTML>""")
+</HTML>"""
+ )
+
test = periodic_health_renderer({"Test", 123})
test.periodic_render("Test")
import trigger_catalog
import utils
+
def filter_news_during_dinnertime(page):
now = datetime.now()
is_dinnertime = now.hour >= 17 and now.hour <= 20
- return (not is_dinnertime or
- not ("cnn" in page or
- "news" in page or
- "mynorthwest" in page or
- "seattle" in page or
- "stranger" in page or
- "twitter" in page or
- "wsj" in page))
+ return not is_dinnertime or not (
+ "cnn" in page
+ or "news" in page
+ or "mynorthwest" in page
+ or "seattle" in page
+ or "stranger" in page
+ or "twitter" in page
+ or "wsj" in page
+ )
+
def thread_change_current():
page_chooser = chooser.weighted_random_chooser_with_triggers(
- trigger_catalog.get_triggers(),
- [ filter_news_during_dinnertime ])
+ trigger_catalog.get_triggers(), [filter_news_during_dinnertime]
+ )
swap_page_target = 0
last_page = ""
while True:
(page, triggered) = page_chooser.choose_next_page()
if triggered:
- print('chooser[%s] - WE ARE TRIGGERED.' % utils.timestamp())
+ print("chooser[%s] - WE ARE TRIGGERED." % utils.timestamp())
if page != last_page:
- print('chooser[%s] - EMERGENCY PAGE %s LOAD NEEDED' % (
- utils.timestamp(), page))
+ print(
+ "chooser[%s] - EMERGENCY PAGE %s LOAD NEEDED"
+ % (utils.timestamp(), page)
+ )
try:
- f = open(os.path.join(constants.pages_dir,
- 'current.shtml'), 'w')
+ f = open(os.path.join(constants.pages_dir, "current.shtml"), "w")
emit_wrapped(f, page)
f.close()
except:
- print('chooser[%s] - page does not exist?!' % (
- utils.timestamp()))
+ print("chooser[%s] - page does not exist?!" % (utils.timestamp()))
continue
last_page = page
swap_page_target = now + constants.refresh_period_sec
# Also notify XMLHTTP clients that they need to refresh now.
- path = os.path.join(constants.pages_dir,
- 'reload_immediately.html')
- f = open(path, 'w')
- f.write('Reload, suckers!')
+ path = os.path.join(constants.pages_dir, "reload_immediately.html")
+ f = open(path, "w")
+ f.write("Reload, suckers!")
f.close()
# Fix this hack... maybe read the webserver logs and see if it
os.remove(path)
elif now >= swap_page_target:
- if (page == last_page):
- print(('chooser[%s] - nominal choice got the same page...' % (
- utils.timestamp())))
+ if page == last_page:
+ print(
+ (
+ "chooser[%s] - nominal choice got the same page..."
+ % (utils.timestamp())
+ )
+ )
continue
- print('chooser[%s] - nominal choice of %s' % (utils.timestamp(), page))
+ print("chooser[%s] - nominal choice of %s" % (utils.timestamp(), page))
try:
- f = open(os.path.join(constants.pages_dir,
- 'current.shtml'), 'w')
+ f = open(os.path.join(constants.pages_dir, "current.shtml"), "w")
emit_wrapped(f, page)
f.close()
except:
- print('chooser[%s] - page does not exist?!' % (utils.timestamp()))
+ print("chooser[%s] - page does not exist?!" % (utils.timestamp()))
continue
last_page = page
swap_page_target = now + constants.refresh_period_sec
time.sleep(1)
+
def pick_background_color():
now = datetime.now()
if now.hour <= 6 or now.hour >= 21:
else:
return "FFFFFF"
+
def emit_wrapped(f, filename):
age = utils.describe_age_of_file_briefly("pages/%s" % filename)
bgcolor = pick_background_color()
- f.write("""
+ f.write(
+ """
<HEAD>
<TITLE>Kitchen Kiosk</TITLE>
<LINK rel="stylesheet" type="text/css" href="style.css">
</TD>
</TR>
</TABLE>
-</BODY>""" % (bgcolor,
- constants.refresh_period_sec * 1000,
- bgcolor,
- filename,
- filename,
- age))
+</BODY>"""
+ % (
+ bgcolor,
+ constants.refresh_period_sec * 1000,
+ bgcolor,
+ filename,
+ filename,
+ age,
+ )
+ )
+
def thread_invoke_renderers():
while True:
- print("renderer[%s]: invoking all renderers in catalog..." % (
- utils.timestamp()))
+ print(
+ "renderer[%s]: invoking all renderers in catalog..." % (utils.timestamp())
+ )
for r in renderer_catalog.get_renderers():
now = time.time()
try:
r.render()
except Exception as e:
traceback.print_exc()
- print("renderer[%s] unknown exception in %s, swallowing it." % (
- utils.timestamp(), r.get_name()))
+ print(
+ "renderer[%s] unknown exception in %s, swallowing it."
+ % (utils.timestamp(), r.get_name())
+ )
except Error as e:
traceback.print_exc()
- print("renderer[%s] unknown error in %s, swallowing it." % (
- utils.timestamp(), r.get_name()))
+ print(
+ "renderer[%s] unknown error in %s, swallowing it."
+ % (utils.timestamp(), r.get_name())
+ )
delta = time.time() - now
- if (delta > 1.0):
- print("renderer[%s]: Warning: %s's rendering took %5.2fs." % (
- utils.timestamp(), r.get_name(), delta))
- print("renderer[%s]: thread having a little break for %ds..." % (
- utils.timestamp(), constants.render_period_sec))
+ if delta > 1.0:
+ print(
+ "renderer[%s]: Warning: %s's rendering took %5.2fs."
+ % (utils.timestamp(), r.get_name(), delta)
+ )
+ print(
+ "renderer[%s]: thread having a little break for %ds..."
+ % (utils.timestamp(), constants.render_period_sec)
+ )
time.sleep(constants.render_period_sec)
+
if __name__ == "__main__":
logging.basicConfig()
changer_thread = None
renderer_thread = None
while True:
- if (changer_thread == None or
- not changer_thread.is_alive()):
- print("MAIN[%s] - (Re?)initializing chooser thread..." % (
- utils.timestamp()))
- changer_thread = Thread(target = thread_change_current, args=())
+ if changer_thread == None or not changer_thread.is_alive():
+ print(
+ "MAIN[%s] - (Re?)initializing chooser thread..." % (utils.timestamp())
+ )
+ changer_thread = Thread(target=thread_change_current, args=())
changer_thread.start()
- if (renderer_thread == None or
- not renderer_thread.is_alive()):
- print("MAIN[%s] - (Re?)initializing render thread..." % (
- utils.timestamp()))
- renderer_thread = Thread(target = thread_invoke_renderers, args=())
+ if renderer_thread == None or not renderer_thread.is_alive():
+ print("MAIN[%s] - (Re?)initializing render thread..." % (utils.timestamp()))
+ renderer_thread = Thread(target=thread_invoke_renderers, args=())
renderer_thread.start()
time.sleep(60)
print("Should never get here.")
import random
import re
+
class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
"""A renderer that uses a local mirror of Google photos"""
album_root_directory = "/usr/local/export/www/gphotos/albums"
- album_whitelist = frozenset([
- '8-Mile Lake Hike',
- 'Bangkok and Phuket, 2003',
- 'Barn',
- 'Blue Angels... Seafair',
- 'Chihuly Glass',
- 'Dunn Gardens',
- 'East Coast 2018',
- 'Fall \'17',
- 'Friends',
- 'Hiking',
- 'Key West 2019',
- 'Krakow 2009',
- 'Kubota Gardens',
- 'Las Vegas, 2017',
- 'London, 2018',
- 'Munich, July 2018',
- 'NJ 2015',
- 'Newer Alex Photos',
- 'Ohme Gardens',
- 'Olympic Sculpture Park',
- 'Prague and Munich 2019',
- 'Random',
- 'Scott and Lynn',
- 'SFO 2014',
- 'Skiing with Alex',
- 'Sonoma',
- 'Trip to California, \'16',
- 'Trip to San Francisco',
- 'Trip to East Coast \'16',
- 'Tuscany 2008',
- 'Yosemite 2010',
- 'Zoo',
- ])
+ album_whitelist = frozenset(
+ [
+ "8-Mile Lake Hike",
+ "Bangkok and Phuket, 2003",
+ "Barn",
+ "Blue Angels... Seafair",
+ "Chihuly Glass",
+ "Dunn Gardens",
+ "East Coast 2018",
+ "Fall '17",
+ "Friends",
+ "Hiking",
+ "Key West 2019",
+ "Krakow 2009",
+ "Kubota Gardens",
+ "Las Vegas, 2017",
+ "London, 2018",
+ "Munich, July 2018",
+ "NJ 2015",
+ "Newer Alex Photos",
+ "Ohme Gardens",
+ "Olympic Sculpture Park",
+ "Prague and Munich 2019",
+ "Random",
+ "Scott and Lynn",
+ "SFO 2014",
+ "Skiing with Alex",
+ "Sonoma",
+ "Trip to California, '16",
+ "Trip to San Francisco",
+ "Trip to East Coast '16",
+ "Tuscany 2008",
+ "Yosemite 2010",
+ "Zoo",
+ ]
+ )
- extension_whitelist = frozenset([
- 'jpg',
- 'gif',
- 'JPG',
- 'jpeg',
- 'GIF',
- ])
+ extension_whitelist = frozenset(
+ [
+ "jpg",
+ "gif",
+ "JPG",
+ "jpeg",
+ "GIF",
+ ]
+ )
def __init__(self, name_to_timeout_dict):
super(local_photos_mirror_renderer, self).__init__(name_to_timeout_dict, False)
return "local_photos_mirror"
def periodic_render(self, key):
- if (key == 'Index Photos'):
+ if key == "Index Photos":
return self.index_photos()
- elif (key == 'Choose Photo'):
+ elif key == "Choose Photo":
return self.choose_photo()
else:
- raise error('Unexpected operation')
+ raise error("Unexpected operation")
def album_is_in_whitelist(self, name):
for wlalbum in self.album_whitelist:
- if re.search('\d+ %s' % wlalbum, name) != None:
+ if re.search("\d+ %s" % wlalbum, name) != None:
return True
return False
# keep their paths in memory.
def index_photos(self):
for root, subdirs, files in os.walk(self.album_root_directory):
- last_dir = root.rsplit('/', 1)[1]
+ last_dir = root.rsplit("/", 1)[1]
if self.album_is_in_whitelist(last_dir):
for x in files:
- extension = x.rsplit('.', 1)[1]
+ extension = x.rsplit(".", 1)[1]
if extension in self.extension_whitelist:
photo_path = os.path.join(root, x)
photo_url = photo_path.replace(
- "/usr/local/export/www/",
- "http://10.0.0.18/",
- 1)
+ "/usr/local/export/www/", "http://10.0.0.18/", 1
+ )
self.candidate_photos.add(photo_url)
return True
print("No photos!")
return False
path = random.sample(self.candidate_photos, 1)[0]
- f = file_writer.file_writer('photo_23_3600.html')
- f.write("""
+ f = file_writer.file_writer("photo_23_3600.html")
+ f.write(
+ """
<style>
body{background-color:#303030;}
div#time{color:#dddddd;}
div#date{color:#dddddd;}
</style>
-<center>""")
- f.write('<img src="%s" style="display:block;max-width=800;max-height:600;width:auto;height:auto">' % path)
+<center>"""
+ )
+ f.write(
+ '<img src="%s" style="display:block;max-width=800;max-height:600;width:auto;height:auto">'
+ % path
+ )
f.write("</center>")
f.close()
return True
+
# Test code
-#x = local_photos_mirror_renderer({"Index Photos": (60 * 60 * 12),
+# x = local_photos_mirror_renderer({"Index Photos": (60 * 60 * 12),
# "Choose Photo": (1)})
-#x.index_photos()
-#x.choose_photo()
+# x.index_photos()
+# x.choose_photo()
import generic_news_rss_renderer
+
class mynorthwest_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
super(mynorthwest_rss_renderer, self).__init__(
- name_to_timeout_dict,
- feed_site,
- feed_uris,
- page_title)
+ name_to_timeout_dict, feed_site, feed_uris, page_title
+ )
self.debug = 1
def debug_prefix(self):
return "mynorthwest-details-%s" % (self.page_title)
def find_image(self, item):
- image = item.findtext('media:content')
+ image = item.findtext("media:content")
if image is not None:
- image_url = image.get('url')
+ image_url = image.get("url")
return image_url
return None
return False
return True
+
# Test
-#x = mynorthwest_rss_renderer(
+# x = mynorthwest_rss_renderer(
# {"Fetch News" : 1,
# "Shuffle News" : 1},
# "mynorthwest.com",
# [ "/feed/" ],
# "Test" )
-#if x.fetch_news() == 0:
+# if x.fetch_news() == 0:
# print "Error fetching news, no items fetched."
-#x.shuffle_news()
-
+# x.shuffle_news()
import renderer
import secrets
+
class garage_door_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict):
super(garage_door_renderer, self).__init__(name_to_timeout_dict, False)
async def poll_myq(self):
async with ClientSession() as websession:
- myq = await pymyq.login(secrets.myq_username,
- secrets.myq_password,
- websession)
+ myq = await pymyq.login(
+ secrets.myq_username, secrets.myq_password, websession
+ )
self.doors = myq.devices
return len(self.doors) > 0
def update_page(self):
f = file_writer.file_writer(constants.myq_pagename)
- f.write("""
+ f.write(
+ """
<H1>Garage Door Status</H1>
<!-- Last updated at %s -->
<HR>
<TABLE BORDER=0 WIDTH=99%%>
<TR>
-""" % self.last_update)
+"""
+ % self.last_update
+ )
html = self.do_door("Near House")
if html == None:
return False
if html == None:
return False
f.write(html)
- f.write("""
+ f.write(
+ """
</TR>
-</TABLE>""")
+</TABLE>"""
+ )
f.close()
return True
<B>%s</B></FONT><BR>
for %d day(s), %02d:%02d.
</CENTER>
-</TD>""" % (name,
- self.get_state_icon(state),
- width,
- color,
- state,
- days[0], hours[0], minutes[0])
+</TD>""" % (
+ name,
+ self.get_state_icon(state),
+ width,
+ color,
+ state,
+ days[0],
+ hours[0],
+ minutes[0],
+ )
return None
+
# Test
-x = garage_door_renderer({"Test" : 1})
+x = garage_door_renderer({"Test": 1})
x.periodic_render("Poll MyQ")
x.periodic_render("Update Page")
import globals
import trigger
+
class myq_trigger(trigger.trigger):
def get_triggered_page_list(self):
if globals.get("myq_triggered") == True:
import sys
+
class page_builder(object):
LAYOUT_AUTO = 0
LAYOUT_ONE_ITEM = 1
def set_custom_html(self, html):
self.custom_html = html
-#x = page_builder()
-#x.set_title("title").add_item("item1").add_item("item2").add_item("item3").render_html(sys.stdout)
+
+# x = page_builder()
+# x.set_title("title").add_item("item1").add_item("item2").add_item("item3").render_html(sys.stdout)
import random
from oauth2client.client import AccessTokenRefreshError
+
class picasa_renderer(renderer.debuggable_abstaining_renderer):
"""A renderer to fetch photos from picasaweb.google.com"""
- album_whitelist = sets.ImmutableSet([
- 'Alex',
- 'Alex 6.0..8.0 years old',
- 'Alex 3.0..4.0 years old',
- 'Barn',
- 'Bangkok and Phukey, 2003',
- 'Blue Angels... Seafair',
- 'Carol Ann and Owen',
- 'Chahuly Glass',
- 'Dunn Gardens',
- 'East Coast, 2011',
- 'East Coast, 2013',
- 'Friends',
- 'Gasches',
- 'Gasch Wedding',
- 'Hiking and Ohme Gardens',
- 'Hiking',
- 'Karen\'s Wedding',
- 'Key West 2019',
- 'Krakow 2009',
- 'Munich, July 2018',
- 'NJ 2015',
- 'NW Trek',
- 'Oahu 2010'
- 'Ocean Shores 2009',
- 'Ohme Gardens',
- 'Olympic Sculpture Park',
- 'Paintings',
- 'Puerto Vallarta',
- 'Photos from posts',
- 'Random',
- 'SFO 2014',
- 'Soccer',
- 'Skiing with Alex',
- 'Tuscany 2008',
- "Trip to California '16",
- "Trip to East Coast '16",
- 'Yosemite 2010',
- 'Zoo',
- ])
+ album_whitelist = sets.ImmutableSet(
+ [
+ "Alex",
+ "Alex 6.0..8.0 years old",
+ "Alex 3.0..4.0 years old",
+ "Barn",
+ "Bangkok and Phukey, 2003",
+ "Blue Angels... Seafair",
+ "Carol Ann and Owen",
+ "Chahuly Glass",
+ "Dunn Gardens",
+ "East Coast, 2011",
+ "East Coast, 2013",
+ "Friends",
+ "Gasches",
+ "Gasch Wedding",
+ "Hiking and Ohme Gardens",
+ "Hiking",
+ "Karen's Wedding",
+ "Key West 2019",
+ "Krakow 2009",
+ "Munich, July 2018",
+ "NJ 2015",
+ "NW Trek",
+ "Oahu 2010" "Ocean Shores 2009",
+ "Ohme Gardens",
+ "Olympic Sculpture Park",
+ "Paintings",
+ "Puerto Vallarta",
+ "Photos from posts",
+ "Random",
+ "SFO 2014",
+ "Soccer",
+ "Skiing with Alex",
+ "Tuscany 2008",
+ "Trip to California '16",
+ "Trip to East Coast '16",
+ "Yosemite 2010",
+ "Zoo",
+ ]
+ )
def __init__(self, name_to_timeout_dict, oauth):
super(picasa_renderer, self).__init__(name_to_timeout_dict, False)
return "picasa"
def periodic_render(self, key):
- if (key == 'Fetch Photos'):
+ if key == "Fetch Photos":
return self.fetch_photos()
- elif (key == 'Shuffle Cached Photos'):
+ elif key == "Shuffle Cached Photos":
return self.shuffle_cached()
else:
- raise error('Unexpected operation')
+ raise error("Unexpected operation")
# Just fetch and cache the photo URLs in memory.
def fetch_photos(self):
temp_height = {}
temp_is_video = {}
conn = http.client.HTTPSConnection("photoslibrary.googleapis.com")
- conn.request("GET",
- "/v1/albums",
- None,
- { "Authorization": "%s %s" % (self.oauth.token['token_type'], self.oauth.token['access_token'])
- })
+ conn.request(
+ "GET",
+ "/v1/albums",
+ None,
+ {
+ "Authorization": "%s %s"
+ % (self.oauth.token["token_type"], self.oauth.token["access_token"])
+ },
+ )
response = conn.getresponse()
if response.status != 200:
print(("Failed to fetch albums, status %d\n" % response.status))
print(response.read())
albums = self.pws.GetUserFeed().entry
for album in albums:
- if (album.title.text not in picasa_renderer.album_whitelist):
+ if album.title.text not in picasa_renderer.album_whitelist:
continue
photos = self.pws.GetFeed(
- '/data/feed/api/user/%s/albumid/%s?kind=photo&imgmax=1024u' %
- (secrets.google_username, album.gphoto_id.text))
+ "/data/feed/api/user/%s/albumid/%s?kind=photo&imgmax=1024u"
+ % (secrets.google_username, album.gphoto_id.text)
+ )
for photo in photos.entry:
- id = '%s/%s' % (photo.albumid.text, photo.gphoto_id.text)
+ id = "%s/%s" % (photo.albumid.text, photo.gphoto_id.text)
temp_is_video[id] = False
resolution = 999999
for x in photo.media.content:
self.height = temp_height
self.is_video = temp_is_video
return True
- except (gdata.service.RequestError,
- gdata.photos.service.GooglePhotosException,
- AccessTokenRefreshError):
+ except (
+ gdata.service.RequestError,
+ gdata.photos.service.GooglePhotosException,
+ AccessTokenRefreshError,
+ ):
print("******** TRYING TO REFRESH PHOTOS CLIENT *********")
self.oauth.refresh_token()
self.client = self.oauth.photos_service()
pid = random.sample(self.photo_urls, 1)
id = pid[0]
refresh = 15
- if (self.is_video[id]): refresh = 60
+ if self.is_video[id]:
+ refresh = 60
- f = file_writer.file_writer('photo_23_none.html')
- f.write("""
+ f = file_writer.file_writer("photo_23_none.html")
+ f.write(
+ """
<style>
body{background-color:#303030;}
div#time{color:#dddddd;}
div#date{color:#dddddd;}
</style>
-<center>""")
+<center>"""
+ )
if self.is_video[id]:
- f.write('<iframe src="%s" seamless width=%s height=%s></iframe>' % (self.photo_urls[id], self.width[id], self.height[id]))
+ f.write(
+ '<iframe src="%s" seamless width=%s height=%s></iframe>'
+ % (self.photo_urls[id], self.width[id], self.height[id])
+ )
else:
- f.write('<img src="%s" width=%s alt="%s">' % (self.photo_urls[id], self.width[id], self.photo_urls[id]))
+ f.write(
+ '<img src="%s" width=%s alt="%s">'
+ % (self.photo_urls[id], self.width[id], self.photo_urls[id])
+ )
f.write("</center>")
f.close()
return True
+
# Test code
-oauth = gdata_oauth.OAuth(secrets.google_client_id,
- secrets.google_client_secret)
+oauth = gdata_oauth.OAuth(secrets.google_client_id, secrets.google_client_secret)
oauth.get_new_token()
if not oauth.has_token():
user_code = oauth.get_user_code()
- print('------------------------------------------------------------')
- print(('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
- oauth.verification_url, user_code)))
+ print("------------------------------------------------------------")
+ print(
+ (
+ 'Go to %s and enter the code "%s" (no quotes, case-sensitive)'
+ % (oauth.verification_url, user_code)
+ )
+ )
oauth.get_new_token()
-x = picasa_renderer({"Fetch Photos": (60 * 60 * 12),
- "Shuffle Cached Photos": (1)},
- oauth)
+x = picasa_renderer(
+ {"Fetch Photos": (60 * 60 * 12), "Shuffle Cached Photos": (1)}, oauth
+)
x.fetch_photos()
-
import http.client
import re
+
class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict):
super(pollen_count_renderer, self).__init__(name_to_timeout_dict, False)
- self.site = 'www.nwasthma.com'
- self.uri = '/pollen/pollen-count/'
+ self.site = "www.nwasthma.com"
+ self.uri = "/pollen/pollen-count/"
self.trees = []
self.grasses = []
self.weeds = []
def fetch_html(self):
conn = http.client.HTTPConnection(self.site)
- conn.request(
- "GET",
- self.uri,
- None,
- {})
+ conn.request("GET", self.uri, None, {})
response = conn.getresponse()
if response.status != 200:
- print(('Connection to %s/%s failed, status %d' % (self.site,
- self.uri,
- response.status)))
+ print(
+ (
+ "Connection to %s/%s failed, status %d"
+ % (self.site, self.uri, response.status)
+ )
+ )
return False
return response.read()
desc = ""
color = "#00d000"
if tr != None and tr.string != None:
- desc = tr.string.encode('utf-8')
+ desc = tr.string.encode("utf-8")
if "edium" in desc:
color = "#a0a000"
elif "igh" in desc:
count = 0
if tc != None and tc.string != None:
try:
- count = int(tc.string.encode('utf-8'))
+ count = int(tc.string.encode("utf-8"))
except:
count = 0
proportion = float(count) / float(maximum)
comment = ""
if tcomment != None and tcomment.string != None:
- comment = "%s" % (tcomment.string.encode('utf-8'))
+ comment = "%s" % (tcomment.string.encode("utf-8"))
# Label:
text = text + '<TR><TD WIDTH=10%% STYLE="font-size: 22pt">%s:</TD>' % (kind)
# Bar graph with text in it (possibly overspilling):
- text = text + '<TD HEIGHT=80><DIV STYLE="width: %d; height: 80; overflow: visible; background-color: %s; font-size: 16pt">' % (width, color)
- text = text + 'count=%d, %s %s</DIV>' % (count, desc, comment)
+ text = (
+ text
+ + '<TD HEIGHT=80><DIV STYLE="width: %d; height: 80; overflow: visible; background-color: %s; font-size: 16pt">'
+ % (width, color)
+ )
+ text = text + "count=%d, %s %s</DIV>" % (count, desc, comment)
return text
def munge(self, raw):
<CENTER>
<TABLE BODER WIDTH=800>"""
date = "<CENTER><B>Unknown Date</B></CENTER>"
- for x in soup.find_all('p'):
+ for x in soup.find_all("p"):
if x == None or x.string == None:
continue
- txt = x.string.encode('utf-8')
+ txt = x.string.encode("utf-8")
m = re.match("[0-9][0-9].[0-9][0-9].20[0-9][0-9]", txt)
if m != None:
date = "<CENTER><B>%s</B></CENTER>" % (txt)
- y = x.find_next_sibling('p')
+ y = x.find_next_sibling("p")
if y != None and y.string != None:
- txt = y.string.encode('utf-8')
+ txt = y.string.encode("utf-8")
date = date + "<BR>%s<HR>" % txt
- text = text + '<TR><TD COLSPAN=3 STYLE="font-size:16pt">%s</TD></TR>\n' % (
- date)
+ text = text + '<TR><TD COLSPAN=3 STYLE="font-size:16pt">%s</TD></TR>\n' % (date)
- trees = soup.find('td', text=re.compile('[Tt]rees:'))
+ trees = soup.find("td", text=re.compile("[Tt]rees:"))
if trees != None:
- tc = trees.find_next_sibling('td')
- tr = tc.find_next_sibling('td')
- tcomment = tr.find_next_sibling('td')
+ tc = trees.find_next_sibling("td")
+ tr = tc.find_next_sibling("td")
+ tcomment = tr.find_next_sibling("td")
text = self.append_crap(text, tc, tr, tcomment, "Trees", 650)
- grasses = soup.find('td', text=re.compile('[Gg]rasses:'))
+ grasses = soup.find("td", text=re.compile("[Gg]rasses:"))
if grasses != None:
- gc = grasses.find_next_sibling('td')
- gr = gc.find_next_sibling('td')
- gcomment = gr.find_next_sibling('td')
+ gc = grasses.find_next_sibling("td")
+ gr = gc.find_next_sibling("td")
+ gcomment = gr.find_next_sibling("td")
text = self.append_crap(text, gc, gr, gcomment, "Grasses", 35)
- weeds = soup.find('td', text=re.compile('[Ww]eeds:'))
+ weeds = soup.find("td", text=re.compile("[Ww]eeds:"))
if weeds != None:
- wc = weeds.find_next_sibling('td')
- wr = wc.find_next_sibling('td')
- wcomment = wr.find_next_sibling('td')
+ wc = weeds.find_next_sibling("td")
+ wr = wc.find_next_sibling("td")
+ wcomment = wr.find_next_sibling("td")
text = self.append_crap(text, wc, wr, wcomment, "Weeds", 25)
- text = text + """
+ text = (
+ text
+ + """
<TR>
<TD COLSPAN=3 STYLE="font-size:16pt">
<HR>
</TR>
</TABLE>
</CENTER>"""
+ )
return text
def poll_pollen(self):
raw = self.fetch_html()
cooked = self.munge(raw)
- f = file_writer.file_writer('pollen_4_360.html')
+ f = file_writer.file_writer("pollen_4_360.html")
f.write(cooked)
f.close()
return True
else:
raise error("Unknown operaiton")
-#test = pollen_count_renderer({"Test", 123})
+
+# test = pollen_count_renderer({"Test", 123})
import string
import re
+
class profanity_filter:
def __init__(self):
self.arrBad = [
- 'acrotomophilia',
- 'anal',
- 'anally',
- 'anilingus',
- 'anus',
- 'arsehole',
- 'ass',
- 'asses',
- 'asshole',
- 'assmunch',
- 'auto erotic',
- 'autoerotic',
- 'babeland',
- 'baby batter',
- 'ball gag',
- 'ball gravy',
- 'ball kicking',
- 'ball licking',
- 'ball sack',
- 'ball zack',
- 'ball sucking',
- 'bangbros',
- 'bareback',
- 'barely legal',
- 'barenaked',
- 'bastardo',
- 'bastinado',
- 'bbw',
- 'bdsm',
- 'beaver cleaver',
- 'beaver lips',
- 'bestiality',
- 'bi curious',
- 'big black',
- 'big breasts',
- 'big knockers',
- 'big tits',
- 'bimbos',
- 'birdlock',
- 'bitch',
- 'bitches',
- 'black cock',
- 'blonde action',
- 'blonde on blonde',
- 'blow j',
- 'blow your l',
- 'blow ourselves',
- 'blow m',
- 'blue waffle',
- 'blumpkin',
- 'bollocks',
- 'bondage',
- 'boner',
- 'boob',
- 'boobs',
- 'booty call',
- 'breasts',
- 'brown showers',
- 'brunette action',
- 'bukkake',
- 'bulldyke',
- 'bullshit',
- 'bullet vibe',
- 'bung hole',
- 'bunghole',
- 'busty',
- 'butt',
- 'buttcheeks',
- 'butthole',
- 'camel toe',
- 'camgirl',
- 'camslut',
- 'camwhore',
- 'carpet muncher',
- 'carpetmuncher',
- 'chocolate rosebuds',
- 'circlejerk',
- 'cleveland steamer',
- 'clit',
- 'clitoris',
- 'clover clamps',
- 'clusterfuck',
- 'cock',
- 'cocks',
- 'coprolagnia',
- 'coprophilia',
- 'cornhole',
- 'creampie',
- 'cream pie',
- 'cum',
- 'cumming',
- 'cunnilingus',
- 'cunt',
- 'damn',
- 'darkie',
- 'date rape',
- 'daterape',
- 'deep throat',
- 'deepthroat',
- 'dick',
- 'dildo',
- 'dirty pillows',
- 'dirty sanchez',
- 'dog style',
- 'doggie style',
- 'doggiestyle',
- 'doggy style',
- 'doggystyle',
- 'dolcett',
- 'domination',
- 'dominatrix',
- 'dommes',
- 'donkey punch',
- 'double dick',
- 'double dong',
- 'double penetration',
- 'dp action',
- 'dtf',
- 'eat my ass',
- 'ecchi',
- 'ejaculation',
- 'erection',
- 'erotic',
- 'erotism',
- 'escort',
- 'ethical slut',
- 'eunuch',
- 'faggot',
- 'posts each week',
- 'fecal',
- 'felch',
- 'fellatio',
- 'feltch',
- 'female squirting',
- 'femdom',
- 'figging',
- 'fingering',
- 'fisting',
- 'foot fetish',
- 'footjob',
- 'frotting',
- 'fuck',
- 'fucking',
- 'fuckin',
- 'fuckin\'',
- 'fucked',
- 'fuckers',
- 'fuck buttons',
- 'fuckhead',
- 'fudge packer',
- 'fudgepacker',
- 'futanari',
- 'g-spot',
- 'gspot',
- 'gang bang',
- 'gay sex',
- 'genitals',
- 'giant cock',
- 'girl on',
- 'girl on top',
- 'girls gone wild',
- 'goatcx',
- 'goatse',
- 'goddamn',
- 'gokkun',
- 'golden shower',
- 'goo girl',
- 'goodpoop',
- 'goregasm',
- 'grope',
- 'group sex',
- 'guro',
- 'hand job',
- 'handjob',
- 'hard core',
- 'hardcore',
- 'hentai',
- 'homoerotic',
- 'honkey',
- 'hooker',
- 'horny',
- 'hot chick',
- 'how to kill',
- 'how to murder',
- 'huge fat',
- 'humping',
- 'incest',
- 'intercourse',
- 'jack off',
- 'jail bait',
- 'jailbait',
- 'jerk off',
- 'jerking off',
- 'jigaboo',
- 'jiggaboo',
- 'jiggerboo',
- 'jizz',
- 'juggs',
- 'kike',
- 'kinbaku',
- 'kinkster',
- 'kinky',
- 'knobbing',
- 'leather restraint',
- 'lemon party',
- 'lolita',
- 'lovemaking',
- 'lpt request',
- 'make me come',
- 'male squirting',
- 'masturbate',
- 'masturbated',
- 'masturbating',
- 'menage a trois',
- 'milf',
- 'milfs',
- 'missionary position',
- 'motherfucker',
- 'mound of venus',
- 'mr hands',
- 'muff diver',
- 'muffdiving',
- 'nambla',
- 'nawashi',
- 'negro',
- 'neonazi',
- 'nig nog',
- 'nigga',
- 'nigger',
- 'nimphomania',
- 'nipple',
- 'not safe for',
- 'nsfw',
- 'nsfw images',
- 'nude',
- 'nudity',
- 'nutsack',
- 'nut sack',
- 'nympho',
- 'nymphomania',
- 'octopussy',
- 'omorashi',
- 'one night stand',
- 'orgasm',
- 'orgy',
- 'paedophile',
- 'panties',
- 'panty',
- 'pedobear',
- 'pedophile',
- 'pegging',
- 'pee',
- 'penis',
- 'phone sex',
- 'piss pig',
- 'pissing',
- 'pisspig',
- 'playboy',
- 'pleasure chest',
- 'pole smoker',
- 'ponyplay',
- 'poof',
- 'poop chute',
- 'poopchute',
- 'porn',
- 'pornhub',
- 'porno',
- 'pornography',
- 'prince albert',
- 'pthc',
- 'pube',
- 'pubes',
- 'pussy',
- 'pussies',
- 'queaf',
- 'queer',
- 'raghead',
- 'raging boner',
- 'rape',
- 'raping',
- 'rapist',
- 'rectum',
- 'reverse cowgirl',
- 'rimjob',
- 'rimming',
- 'rosy palm',
- 'rusty trombone',
- 's&m',
- 'sadism',
- 'scat',
- 'schlong',
- 'scissoring',
- 'semen',
- 'sex',
- 'sexo',
- 'sexy',
- 'shaved beaver',
- 'shaved pussy',
- 'shemale',
- 'shibari',
- 'shit',
- 'shota',
- 'shrimping',
- 'slanteye',
- 'slut',
- 'smut',
- 'snatch',
- 'snowballing',
- 'sodomize',
- 'sodomy',
- 'spic',
- 'spooge',
- 'spread legs',
- 'strap on',
- 'strapon',
- 'strappado',
- 'strip club',
- 'style doggy',
- 'suck',
- 'sucks',
- 'suicide girls',
- 'sultry women',
- 'swastika',
- 'swinger',
- 'tainted love',
- 'taste my',
- 'tea bagging',
- 'threesome',
- 'throating',
- 'tied up',
- 'tight white',
- 'tit',
- 'tits',
- 'titties',
- 'titty',
- 'tongue in a',
- 'topless',
- 'tosser',
- 'towelhead',
- 'tranny',
- 'tribadism',
- 'tub girl',
- 'tubgirl',
- 'tushy',
- 'twat',
- 'twink',
- 'twinkie',
- 'undressing',
- 'upskirt',
- 'urethra play',
- 'urophilia',
- 'vagina',
- 'venus mound',
- 'vibrator',
- 'violet blue',
- 'violet wand',
- 'vorarephilia',
- 'voyeur',
- 'vulva',
- 'wank',
- 'wet dream',
- 'wetback',
- 'white power',
- 'whore',
- 'women rapping',
- 'wrapping men',
- 'wrinkled starfish',
- 'xx',
- 'xxx',
- 'yaoi',
- 'yellow showers',
- 'yiffy',
- 'zoophilia',
+ "acrotomophilia",
+ "anal",
+ "anally",
+ "anilingus",
+ "anus",
+ "arsehole",
+ "ass",
+ "asses",
+ "asshole",
+ "assmunch",
+ "auto erotic",
+ "autoerotic",
+ "babeland",
+ "baby batter",
+ "ball gag",
+ "ball gravy",
+ "ball kicking",
+ "ball licking",
+ "ball sack",
+ "ball zack",
+ "ball sucking",
+ "bangbros",
+ "bareback",
+ "barely legal",
+ "barenaked",
+ "bastardo",
+ "bastinado",
+ "bbw",
+ "bdsm",
+ "beaver cleaver",
+ "beaver lips",
+ "bestiality",
+ "bi curious",
+ "big black",
+ "big breasts",
+ "big knockers",
+ "big tits",
+ "bimbos",
+ "birdlock",
+ "bitch",
+ "bitches",
+ "black cock",
+ "blonde action",
+ "blonde on blonde",
+ "blow j",
+ "blow your l",
+ "blow ourselves",
+ "blow m",
+ "blue waffle",
+ "blumpkin",
+ "bollocks",
+ "bondage",
+ "boner",
+ "boob",
+ "boobs",
+ "booty call",
+ "breasts",
+ "brown showers",
+ "brunette action",
+ "bukkake",
+ "bulldyke",
+ "bullshit",
+ "bullet vibe",
+ "bung hole",
+ "bunghole",
+ "busty",
+ "butt",
+ "buttcheeks",
+ "butthole",
+ "camel toe",
+ "camgirl",
+ "camslut",
+ "camwhore",
+ "carpet muncher",
+ "carpetmuncher",
+ "chocolate rosebuds",
+ "circlejerk",
+ "cleveland steamer",
+ "clit",
+ "clitoris",
+ "clover clamps",
+ "clusterfuck",
+ "cock",
+ "cocks",
+ "coprolagnia",
+ "coprophilia",
+ "cornhole",
+ "creampie",
+ "cream pie",
+ "cum",
+ "cumming",
+ "cunnilingus",
+ "cunt",
+ "damn",
+ "darkie",
+ "date rape",
+ "daterape",
+ "deep throat",
+ "deepthroat",
+ "dick",
+ "dildo",
+ "dirty pillows",
+ "dirty sanchez",
+ "dog style",
+ "doggie style",
+ "doggiestyle",
+ "doggy style",
+ "doggystyle",
+ "dolcett",
+ "domination",
+ "dominatrix",
+ "dommes",
+ "donkey punch",
+ "double dick",
+ "double dong",
+ "double penetration",
+ "dp action",
+ "dtf",
+ "eat my ass",
+ "ecchi",
+ "ejaculation",
+ "erection",
+ "erotic",
+ "erotism",
+ "escort",
+ "ethical slut",
+ "eunuch",
+ "faggot",
+ "posts each week",
+ "fecal",
+ "felch",
+ "fellatio",
+ "feltch",
+ "female squirting",
+ "femdom",
+ "figging",
+ "fingering",
+ "fisting",
+ "foot fetish",
+ "footjob",
+ "frotting",
+ "fuck",
+ "fucking",
+ "fuckin",
+ "fuckin'",
+ "fucked",
+ "fuckers",
+ "fuck buttons",
+ "fuckhead",
+ "fudge packer",
+ "fudgepacker",
+ "futanari",
+ "g-spot",
+ "gspot",
+ "gang bang",
+ "gay sex",
+ "genitals",
+ "giant cock",
+ "girl on",
+ "girl on top",
+ "girls gone wild",
+ "goatcx",
+ "goatse",
+ "goddamn",
+ "gokkun",
+ "golden shower",
+ "goo girl",
+ "goodpoop",
+ "goregasm",
+ "grope",
+ "group sex",
+ "guro",
+ "hand job",
+ "handjob",
+ "hard core",
+ "hardcore",
+ "hentai",
+ "homoerotic",
+ "honkey",
+ "hooker",
+ "horny",
+ "hot chick",
+ "how to kill",
+ "how to murder",
+ "huge fat",
+ "humping",
+ "incest",
+ "intercourse",
+ "jack off",
+ "jail bait",
+ "jailbait",
+ "jerk off",
+ "jerking off",
+ "jigaboo",
+ "jiggaboo",
+ "jiggerboo",
+ "jizz",
+ "juggs",
+ "kike",
+ "kinbaku",
+ "kinkster",
+ "kinky",
+ "knobbing",
+ "leather restraint",
+ "lemon party",
+ "lolita",
+ "lovemaking",
+ "lpt request",
+ "make me come",
+ "male squirting",
+ "masturbate",
+ "masturbated",
+ "masturbating",
+ "menage a trois",
+ "milf",
+ "milfs",
+ "missionary position",
+ "motherfucker",
+ "mound of venus",
+ "mr hands",
+ "muff diver",
+ "muffdiving",
+ "nambla",
+ "nawashi",
+ "negro",
+ "neonazi",
+ "nig nog",
+ "nigga",
+ "nigger",
+ "nimphomania",
+ "nipple",
+ "not safe for",
+ "nsfw",
+ "nsfw images",
+ "nude",
+ "nudity",
+ "nutsack",
+ "nut sack",
+ "nympho",
+ "nymphomania",
+ "octopussy",
+ "omorashi",
+ "one night stand",
+ "orgasm",
+ "orgy",
+ "paedophile",
+ "panties",
+ "panty",
+ "pedobear",
+ "pedophile",
+ "pegging",
+ "pee",
+ "penis",
+ "phone sex",
+ "piss pig",
+ "pissing",
+ "pisspig",
+ "playboy",
+ "pleasure chest",
+ "pole smoker",
+ "ponyplay",
+ "poof",
+ "poop chute",
+ "poopchute",
+ "porn",
+ "pornhub",
+ "porno",
+ "pornography",
+ "prince albert",
+ "pthc",
+ "pube",
+ "pubes",
+ "pussy",
+ "pussies",
+ "queaf",
+ "queer",
+ "raghead",
+ "raging boner",
+ "rape",
+ "raping",
+ "rapist",
+ "rectum",
+ "reverse cowgirl",
+ "rimjob",
+ "rimming",
+ "rosy palm",
+ "rusty trombone",
+ "s&m",
+ "sadism",
+ "scat",
+ "schlong",
+ "scissoring",
+ "semen",
+ "sex",
+ "sexo",
+ "sexy",
+ "shaved beaver",
+ "shaved pussy",
+ "shemale",
+ "shibari",
+ "shit",
+ "shota",
+ "shrimping",
+ "slanteye",
+ "slut",
+ "smut",
+ "snatch",
+ "snowballing",
+ "sodomize",
+ "sodomy",
+ "spic",
+ "spooge",
+ "spread legs",
+ "strap on",
+ "strapon",
+ "strappado",
+ "strip club",
+ "style doggy",
+ "suck",
+ "sucks",
+ "suicide girls",
+ "sultry women",
+ "swastika",
+ "swinger",
+ "tainted love",
+ "taste my",
+ "tea bagging",
+ "threesome",
+ "throating",
+ "tied up",
+ "tight white",
+ "tit",
+ "tits",
+ "titties",
+ "titty",
+ "tongue in a",
+ "topless",
+ "tosser",
+ "towelhead",
+ "tranny",
+ "tribadism",
+ "tub girl",
+ "tubgirl",
+ "tushy",
+ "twat",
+ "twink",
+ "twinkie",
+ "undressing",
+ "upskirt",
+ "urethra play",
+ "urophilia",
+ "vagina",
+ "venus mound",
+ "vibrator",
+ "violet blue",
+ "violet wand",
+ "vorarephilia",
+ "voyeur",
+ "vulva",
+ "wank",
+ "wet dream",
+ "wetback",
+ "white power",
+ "whore",
+ "women rapping",
+ "wrapping men",
+ "wrinkled starfish",
+ "xx",
+ "xxx",
+ "yaoi",
+ "yellow showers",
+ "yiffy",
+ "zoophilia",
]
def normalize(self, text):
result = text.lower()
- result = result.replace('_', ' ')
+ result = result.replace("_", " ")
for x in string.punctuation:
- result = result.replace(x, '')
- result = re.sub(
- r"e?s$", "", result)
+ result = result.replace(x, "")
+ result = re.sub(r"e?s$", "", result)
return result
def filter_bad_words(self, text):
- badWordMask = '!@#$%!@#$%^~!@%^~@#$%!@#$%^~!'
+ badWordMask = "!@#$%!@#$%^~!@%^~@#$%!@#$%^~!"
brokenStr1 = text.split()
for word in brokenStr1:
- if (self.normalize(word) in self.arrBad or
- word in self.arrBad):
+ if self.normalize(word) in self.arrBad or word in self.arrBad:
print(('***** PROFANITY WORD="%s"' % word))
- text = text.replace(word, badWordMask[:len(word)])
+ text = text.replace(word, badWordMask[: len(word)])
if len(brokenStr1) > 1:
bigrams = list(zip(brokenStr1, brokenStr1[1:]))
for bigram in bigrams:
phrase = "%s %s" % (bigram[0], bigram[1])
- if (self.normalize(phrase) in self.arrBad or
- phrase in self.arrBad):
+ if self.normalize(phrase) in self.arrBad or phrase in self.arrBad:
print(('***** PROFANITY PHRASE="%s"' % phrase))
- text = text.replace(bigram[0], badWordMask[:len(bigram[0])])
- text = text.replace(bigram[1], badWordMask[:len(bigram[1])])
+ text = text.replace(bigram[0], badWordMask[: len(bigram[0])])
+ text = text.replace(bigram[1], badWordMask[: len(bigram[1])])
if len(brokenStr1) > 2:
trigrams = list(zip(brokenStr1, brokenStr1[1:], brokenStr1[2:]))
for trigram in trigrams:
phrase = "%s %s %s" % (trigram[0], trigram[1], trigram[2])
- if (self.normalize(phrase) in self.arrBad or
- phrase in self.arrBad):
+ if self.normalize(phrase) in self.arrBad or phrase in self.arrBad:
print(('***** PROFANITY PHRASE="%s"' % phrase))
- text = text.replace(trigram[0], badWordMask[:len(trigram[0])])
- text = text.replace(trigram[1], badWordMask[:len(trigram[1])])
- text = text.replace(trigram[2], badWordMask[:len(trigram[2])])
+ text = text.replace(trigram[0], badWordMask[: len(trigram[0])])
+ text = text.replace(trigram[1], badWordMask[: len(trigram[1])])
+ text = text.replace(trigram[2], badWordMask[: len(trigram[2])])
return text
def contains_bad_words(self, text):
brokenStr1 = text.split()
for word in brokenStr1:
- if (self.normalize(word) in self.arrBad or
- word in self.arrBad):
+ if self.normalize(word) in self.arrBad or word in self.arrBad:
print(('***** PROFANITY WORD="%s"' % word))
return True
bigrams = list(zip(brokenStr1, brokenStr1[1:]))
for bigram in bigrams:
phrase = "%s %s" % (bigram[0], bigram[1])
- if (self.normalize(phrase) in self.arrBad or
- phrase in self.arrBad):
+ if self.normalize(phrase) in self.arrBad or phrase in self.arrBad:
print(('***** PROFANITY PHRASE="%s"' % phrase))
return True
trigrams = list(zip(brokenStr1, brokenStr1[1:], brokenStr1[2:]))
for trigram in trigrams:
phrase = "%s %s %s" % (trigram[0], trigram[1], trigram[2])
- if (self.normalize(phrase) in self.arrBad or
- phrase in self.arrBad):
+ if self.normalize(phrase) in self.arrBad or phrase in self.arrBad:
print(('***** PROFANITY PHRASE="%s"' % phrase))
return True
return False
-#x = profanity_filter()
-#print(x.filter_bad_words("Fuck this auto erotic shit, it's not safe for work."))
-#print(x.contains_bad_words("cream pie their daughter."))
-#print(x.contains_bad_words("If you tell someone your penis is 6 inches it's pretty believable. If you say it's half a foot no one will believe you."))
-#print(x.normalize("dickes"));
+
+# x = profanity_filter()
+# print(x.filter_bad_words("Fuck this auto erotic shit, it's not safe for work."))
+# print(x.contains_bad_words("cream pie their daughter."))
+# print(x.contains_bad_words("If you tell someone your penis is 6 inches it's pretty believable. If you say it's half a foot no one will believe you."))
+# print(x.normalize("dickes"));
import random
import renderer_catalog
+
class reddit_renderer(renderer.debuggable_abstaining_renderer):
"""A renderer to pull text content from reddit."""
def __init__(self, name_to_timeout_dict, subreddit_list, min_votes, font_size):
super(reddit_renderer, self).__init__(name_to_timeout_dict, True)
self.subreddit_list = subreddit_list
- self.praw = praw.Reddit(client_id=secrets.reddit_client_id,
- client_secret=secrets.reddit_client_secret,
- user_agent=secrets.reddit_user_agent)
+ self.praw = praw.Reddit(
+ client_id=secrets.reddit_client_id,
+ client_secret=secrets.reddit_client_secret,
+ user_agent=secrets.reddit_user_agent,
+ )
self.min_votes = min_votes
self.font_size = font_size
self.messages = grab_bag.grab_bag()
def debug_prefix(self):
x = ""
for subreddit in self.subreddit_list:
- x += ("%s " % subreddit)
+ x += "%s " % subreddit
return "reddit(%s)" % x.strip()
def periodic_render(self, key):
elif key == "Shuffle":
return self.shuffle_messages()
else:
- raise error('Unexpected operation')
+ raise error("Unexpected operation")
def append_message(self, messages):
for msg in messages:
- if (not self.filter.contains_bad_words(msg.title)
+ if (
+ not self.filter.contains_bad_words(msg.title)
and msg.ups > self.min_votes
- and not msg.title in self.deduper):
+ and not msg.title in self.deduper
+ ):
try:
self.deduper.add(msg.title)
content = "%d" % msg.ups
- if (msg.thumbnail != "self" and
- msg.thumbnail != "default" and
- msg.thumbnail != ""):
+ if (
+ msg.thumbnail != "self"
+ and msg.thumbnail != "default"
+ and msg.thumbnail != ""
+ ):
content = '<IMG SRC="%s">' % msg.thumbnail
x = """
<TABLE STYLE="font-size:%dpt;">
<B>%s</B><BR><FONT COLOR=#bbbbbb>(%s)</FONT>
</TD>
</TR>
-</TABLE>""" % (self.font_size, content, msg.title, msg.author)
+</TABLE>""" % (
+ self.font_size,
+ content,
+ msg.title,
+ msg.author,
+ )
self.messages.add(x)
except:
- self.debug_print('Unexpected exception, skipping message.')
+ self.debug_print("Unexpected exception, skipping message.")
else:
- self.debug_print('skipped message "%s" for profanity or low score' % (
- msg.title))
+ self.debug_print(
+ 'skipped message "%s" for profanity or low score' % (msg.title)
+ )
def scrape_reddit(self):
self.deduper.clear()
except:
pass
try:
- msg = self.praw.subreddit(subreddit).controversial('week')
+ msg = self.praw.subreddit(subreddit).controversial("week")
self.append_message(msg)
except:
pass
try:
- msg = self.praw.subreddit(subreddit).top('day')
+ msg = self.praw.subreddit(subreddit).top("day")
self.append_message(msg)
except:
pass
layout.set_layout(page_builder.page_builder.LAYOUT_FOUR_ITEMS)
x = ""
for subreddit in self.subreddit_list:
- x += ("%s " % subreddit)
+ x += "%s " % subreddit
if len(x) > 30:
if "SeaWA" in x:
x = "[local interests]"
f.close()
return True
+
class til_reddit_renderer(reddit_renderer):
def __init__(self, name_to_timeout_dict):
super(til_reddit_renderer, self).__init__(
- name_to_timeout_dict, ["todayilearned"], 200, 20)
+ name_to_timeout_dict, ["todayilearned"], 200, 20
+ )
+
class quotes_reddit_renderer(reddit_renderer):
def __init__(self, name_to_timeout_dict):
super(quotes_reddit_renderer, self).__init__(
- name_to_timeout_dict, ["quotes"], 200, 20)
+ name_to_timeout_dict, ["quotes"], 200, 20
+ )
+
class showerthoughts_reddit_renderer(reddit_renderer):
def __init__(self, name_to_timeout_dict):
super(showerthoughts_reddit_renderer, self).__init__(
- name_to_timeout_dict, ["showerthoughts"], 350, 24)
+ name_to_timeout_dict, ["showerthoughts"], 350, 24
+ )
+
class seattle_reddit_renderer(reddit_renderer):
def __init__(self, name_to_timeout_dict):
super(seattle_reddit_renderer, self).__init__(
- name_to_timeout_dict, ["seattle","seattleWA","SeaWA","bellevue","kirkland", "CoronavirusWA"], 50, 24)
+ name_to_timeout_dict,
+ ["seattle", "seattleWA", "SeaWA", "bellevue", "kirkland", "CoronavirusWA"],
+ 50,
+ 24,
+ )
+
class lifeprotips_reddit_renderer(reddit_renderer):
def __init__(self, name_to_timeout_dict):
super(lifeprotips_reddit_renderer, self).__init__(
- name_to_timeout_dict, ["lifeprotips"], 100, 24)
+ name_to_timeout_dict, ["lifeprotips"], 100, 24
+ )
+
-#x = reddit_renderer({"Test", 1234}, ["seattle","bellevue"], 50, 24)
-#x.periodic_render("Scrape")
-#x.periodic_render("Shuffle")
+# x = reddit_renderer({"Test", 1234}, ["seattle","bellevue"], 50, 24)
+# x.periodic_render("Scrape")
+# x.periodic_render("Shuffle")
from datetime import datetime
from decorators import invokation_logged
+
class renderer(object):
"""Base class for something that can render."""
def get_name(self):
return self.__class__.__name__
+
class abstaining_renderer(renderer):
"""A renderer that doesn't do it all the time."""
+
def __init__(self, name_to_timeout_dict):
- self.name_to_timeout_dict = name_to_timeout_dict;
+ self.name_to_timeout_dict = name_to_timeout_dict
self.last_runs = {}
for key in name_to_timeout_dict:
self.last_runs[key] = 0
def should_render(self, keys_to_skip):
now = time.time()
for key in self.name_to_timeout_dict:
- if (((now - self.last_runs[key]) > self.name_to_timeout_dict[key]) and
- key not in keys_to_skip):
+ if (
+ (now - self.last_runs[key]) > self.name_to_timeout_dict[key]
+ ) and key not in keys_to_skip:
return key
return None
tries_per_key[key] = 0
if tries_per_key[key] >= 3:
- print('renderer: Too many failures for "%s.%s", giving up' % (
- self.get_name(), key))
+ print(
+ 'renderer: Too many failures for "%s.%s", giving up'
+ % (self.get_name(), key)
+ )
keys_to_skip.add(key)
else:
msg = 'renderer: executing "%s.%s"' % (self.get_name(), key)
- if (tries_per_key[key] > 1):
+ if tries_per_key[key] > 1:
msg = msg + " (retry #%d)" % tries_per_key[key]
print(msg)
- if (self.periodic_render(key)):
+ if self.periodic_render(key):
self.last_runs[key] = time.time()
@invokation_logged
def periodic_render(self, key):
pass
+
class debuggable_abstaining_renderer(abstaining_renderer):
def __init__(self, name_to_timeout_dict, debug):
- super(debuggable_abstaining_renderer, self).__init__(name_to_timeout_dict);
+ super(debuggable_abstaining_renderer, self).__init__(name_to_timeout_dict)
self.debug = debug
def debug_prefix(self):
import weather_renderer
import wsj_rss_renderer
-oauth = gdata_oauth.OAuth(secrets.google_client_id,
- secrets.google_client_secret)
+oauth = gdata_oauth.OAuth(secrets.google_client_id, secrets.google_client_secret)
if not oauth.has_token():
user_code = oauth.get_user_code()
- print('------------------------------------------------------------')
- print(('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
- oauth.verification_url, user_code)))
+ print("------------------------------------------------------------")
+ print(
+ (
+ 'Go to %s and enter the code "%s" (no quotes, case-sensitive)'
+ % (oauth.verification_url, user_code)
+ )
+ )
oauth.get_new_token()
seconds = 1
# frequency in the renderer thread of ~once a minute. It just means that
# everytime it check these will be stale and happen.
__registry = [
- stranger_renderer.stranger_events_renderer(
- {"Fetch Events" : (hours * 12),
- "Shuffle Events" : (always)}),
-# pollen_renderer.pollen_count_renderer(
-# {"Poll" : (hours * 1)}),
- myq_renderer.garage_door_renderer(
- {"Poll MyQ" : (minutes * 5),
- "Update Page" : (always)}),
- bellevue_reporter_rss_renderer.bellevue_reporter_rss_renderer(
- {"Fetch News" : (hours * 1),
- "Shuffle News" : (always)},
- "www.bellevuereporter.com",
- [ "/feed/" ],
- "Bellevue Reporter" ),
- mynorthwest_rss_renderer.mynorthwest_rss_renderer(
- {"Fetch News" : (hours * 1),
- "Shuffle News" : (always)},
- "mynorthwest.com",
- [ "/feed/" ],
- "MyNorthwest News" ),
- cnn_rss_renderer.cnn_rss_renderer(
- {"Fetch News" : (hours * 1),
- "Shuffle News" : (always)},
- "rss.cnn.com",
- [ "/rss/cnn_tech.rss",
- "/rss/money_technology.rss" ],
- "CNNTechnology" ),
- cnn_rss_renderer.cnn_rss_renderer(
- {"Fetch News" : (hours * 1),
- "Shuffle News" : (always)},
- "rss.cnn.com",
- [ "/rss/cnn_topstories.rss",
- "/rss/cnn_world.rss",
- "/rss/cnn_us.rss" ],
- "CNNNews" ),
- wsj_rss_renderer.wsj_rss_renderer(
- {"Fetch News" : (hours * 1),
- "Shuffle News" : (always)},
- "feeds.a.dj.com",
- [ "/rss/RSSWorldNews.xml" ],
- "WSJNews" ),
- wsj_rss_renderer.wsj_rss_renderer(
- {"Fetch News" : (hours * 1),
- "Shuffle News" : (always)},
- "feeds.a.dj.com",
- [ "/rss/RSSMarketsMain.xml",
- "/rss/WSJcomUSBusiness.xml"],
- "WSJBusiness" ),
- google_news_rss_renderer.google_news_rss_renderer(
- {"Fetch News" : (minutes * 30),
- "Shuffle News" : (always)},
- "news.google.com",
- [ "/rss?hl=en-US&gl=US&ceid=US:en" ],
- "Google News" ),
- health_renderer.periodic_health_renderer(
- {"Update Perioidic Job Health" : (seconds * 45)}),
- stock_renderer.stock_quote_renderer(
- {"Update Prices" : (hours * 1)},
- [ "MSFT",
- "SPY",
- "GBTC",
- "IEMG",
- "OPTAX",
- "SPAB",
- "SPHD",
- "SGOL",
- "VDC",
- "VYMI",
- "VNQ",
- "VNQI" ]),
- stevens_renderer.stevens_pass_conditions_renderer(
- {"Fetch Pass Conditions" : (hours * 1)},
- "www.wsdot.com",
- [ "/traffic/rssfeeds/stevens/Default.aspx" ]),
- seattletimes_rss_renderer.seattletimes_rss_renderer(
- {"Fetch News" : (hours * 4),
- "Shuffle News" : (always)},
- "www.seattletimes.com",
- [ "/pacific-nw-magazine/feed/",
- "/life/feed/",
- "/outdoors/feed/" ],
- "Seattle Times Segments"),
- weather_renderer.weather_renderer(
- {"Fetch Weather (Bellevue)": (hours * 4)},
- "home"),
- weather_renderer.weather_renderer(
- {"Fetch Weather (Stevens)": (hours * 4)},
- "stevens"),
- weather_renderer.weather_renderer(
- {"Fetch Weather (Telma)" : (hours * 4)},
- "telma"),
- local_photos_mirror_renderer.local_photos_mirror_renderer(
- {"Index Photos": (hours * 24),
- "Choose Photo": (always)}),
- gkeep_renderer.gkeep_renderer(
- {"Update": (minutes * 10)}),
- gcal_renderer.gcal_renderer(
- {"Render Upcoming Events": (hours * 2),
- "Look For Triggered Events": (always)},
- oauth),
- reddit_renderer.showerthoughts_reddit_renderer(
- {"Scrape": (hours * 6),
- "Shuffle": (always)} ),
- reddit_renderer.til_reddit_renderer(
- {"Scrape": (hours * 6),
- "Shuffle": (always)} ),
- reddit_renderer.seattle_reddit_renderer(
- {"Scrape": (hours * 6),
- "Shuffle": (always)}),
- reddit_renderer.quotes_reddit_renderer(
- {"Scrape": (hours * 6),
- "Shuffle": (always)}),
- reddit_renderer.lifeprotips_reddit_renderer(
- {"Scrape": (hours * 6),
- "Shuffle": (always)}),
- twitter_renderer.twitter_renderer(
- {"Fetch Tweets": (minutes * 15),
- "Shuffle Tweets": (always)})
+ stranger_renderer.stranger_events_renderer(
+ {"Fetch Events": (hours * 12), "Shuffle Events": (always)}
+ ),
+ # pollen_renderer.pollen_count_renderer(
+ # {"Poll" : (hours * 1)}),
+ myq_renderer.garage_door_renderer(
+ {"Poll MyQ": (minutes * 5), "Update Page": (always)}
+ ),
+ bellevue_reporter_rss_renderer.bellevue_reporter_rss_renderer(
+ {"Fetch News": (hours * 1), "Shuffle News": (always)},
+ "www.bellevuereporter.com",
+ ["/feed/"],
+ "Bellevue Reporter",
+ ),
+ mynorthwest_rss_renderer.mynorthwest_rss_renderer(
+ {"Fetch News": (hours * 1), "Shuffle News": (always)},
+ "mynorthwest.com",
+ ["/feed/"],
+ "MyNorthwest News",
+ ),
+ cnn_rss_renderer.cnn_rss_renderer(
+ {"Fetch News": (hours * 1), "Shuffle News": (always)},
+ "rss.cnn.com",
+ ["/rss/cnn_tech.rss", "/rss/money_technology.rss"],
+ "CNNTechnology",
+ ),
+ cnn_rss_renderer.cnn_rss_renderer(
+ {"Fetch News": (hours * 1), "Shuffle News": (always)},
+ "rss.cnn.com",
+ ["/rss/cnn_topstories.rss", "/rss/cnn_world.rss", "/rss/cnn_us.rss"],
+ "CNNNews",
+ ),
+ wsj_rss_renderer.wsj_rss_renderer(
+ {"Fetch News": (hours * 1), "Shuffle News": (always)},
+ "feeds.a.dj.com",
+ ["/rss/RSSWorldNews.xml"],
+ "WSJNews",
+ ),
+ wsj_rss_renderer.wsj_rss_renderer(
+ {"Fetch News": (hours * 1), "Shuffle News": (always)},
+ "feeds.a.dj.com",
+ ["/rss/RSSMarketsMain.xml", "/rss/WSJcomUSBusiness.xml"],
+ "WSJBusiness",
+ ),
+ google_news_rss_renderer.google_news_rss_renderer(
+ {"Fetch News": (minutes * 30), "Shuffle News": (always)},
+ "news.google.com",
+ ["/rss?hl=en-US&gl=US&ceid=US:en"],
+ "Google News",
+ ),
+ health_renderer.periodic_health_renderer(
+ {"Update Perioidic Job Health": (seconds * 45)}
+ ),
+ stock_renderer.stock_quote_renderer(
+ {"Update Prices": (hours * 1)},
+ [
+ "MSFT",
+ "SPY",
+ "GBTC",
+ "IEMG",
+ "OPTAX",
+ "SPAB",
+ "SPHD",
+ "SGOL",
+ "VDC",
+ "VYMI",
+ "VNQ",
+ "VNQI",
+ ],
+ ),
+ stevens_renderer.stevens_pass_conditions_renderer(
+ {"Fetch Pass Conditions": (hours * 1)},
+ "www.wsdot.com",
+ ["/traffic/rssfeeds/stevens/Default.aspx"],
+ ),
+ seattletimes_rss_renderer.seattletimes_rss_renderer(
+ {"Fetch News": (hours * 4), "Shuffle News": (always)},
+ "www.seattletimes.com",
+ ["/pacific-nw-magazine/feed/", "/life/feed/", "/outdoors/feed/"],
+ "Seattle Times Segments",
+ ),
+ weather_renderer.weather_renderer(
+ {"Fetch Weather (Bellevue)": (hours * 4)}, "home"
+ ),
+ weather_renderer.weather_renderer(
+ {"Fetch Weather (Stevens)": (hours * 4)}, "stevens"
+ ),
+ weather_renderer.weather_renderer({"Fetch Weather (Telma)": (hours * 4)}, "telma"),
+ local_photos_mirror_renderer.local_photos_mirror_renderer(
+ {"Index Photos": (hours * 24), "Choose Photo": (always)}
+ ),
+ gkeep_renderer.gkeep_renderer({"Update": (minutes * 10)}),
+ gcal_renderer.gcal_renderer(
+ {"Render Upcoming Events": (hours * 2), "Look For Triggered Events": (always)},
+ oauth,
+ ),
+ reddit_renderer.showerthoughts_reddit_renderer(
+ {"Scrape": (hours * 6), "Shuffle": (always)}
+ ),
+ reddit_renderer.til_reddit_renderer({"Scrape": (hours * 6), "Shuffle": (always)}),
+ reddit_renderer.seattle_reddit_renderer(
+ {"Scrape": (hours * 6), "Shuffle": (always)}
+ ),
+ reddit_renderer.quotes_reddit_renderer(
+ {"Scrape": (hours * 6), "Shuffle": (always)}
+ ),
+ reddit_renderer.lifeprotips_reddit_renderer(
+ {"Scrape": (hours * 6), "Shuffle": (always)}
+ ),
+ twitter_renderer.twitter_renderer(
+ {"Fetch Tweets": (minutes * 15), "Shuffle Tweets": (always)}
+ ),
]
+
def get_renderers():
return __registry
import sets
import xml.etree.ElementTree as ET
+
class reuters_rss_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page):
super(reuters_rss_renderer, self).__init__(name_to_timeout_dict, False)
elif key == "Shuffle News":
return self.shuffle_news()
else:
- raise error('Unexpected operation')
+ raise error("Unexpected operation")
def shuffle_news(self):
headlines = page_builder.page_builder()
return False
for msg in subset:
headlines.add_item(msg)
- f = file_writer.file_writer('reuters-%s_4_none.html' % self.page)
+ f = file_writer.file_writer("reuters-%s_4_none.html" % self.page)
headlines.render_html(f)
f.close()
details.set_title("%s" % self.page)
subset = self.details.subset(1)
if subset is None:
- self.debug_print("Not enough details to choose from.");
+ self.debug_print("Not enough details to choose from.")
return False
for msg in subset:
blurb = msg
blurb += "</TD>\n"
details.add_item(blurb)
- g = file_writer.file_writer('reuters-details-%s_6_none.html' % self.page)
+ g = file_writer.file_writer("reuters-details-%s_6_none.html" % self.page)
details.render_html(g)
g.close()
return True
for uri in self.feed_uris:
self.conn = http.client.HTTPConnection(self.feed_site)
- self.conn.request(
- "GET",
- uri,
- None,
- {"Accept-Charset": "utf-8"})
+ self.conn.request("GET", uri, None, {"Accept-Charset": "utf-8"})
response = self.conn.getresponse()
if response.status != 200:
- print(("%s: RSS fetch_news error, response: %d" % (self.page,
- response.status)))
+ print(
+ (
+ "%s: RSS fetch_news error, response: %d"
+ % (self.page, response.status)
+ )
+ )
self.debug_print(response.read())
return False
rss = ET.fromstring(response.read())
channel = rss[0]
for item in channel.getchildren():
- title = item.findtext('title')
- if (title is None or
- "euters" in title or
- title == "Editor's Choice" or
- self.filter.contains_bad_words(title)):
+ title = item.findtext("title")
+ if (
+ title is None
+ or "euters" in title
+ or title == "Editor's Choice"
+ or self.filter.contains_bad_words(title)
+ ):
continue
- pubdate = item.findtext('pubDate')
- image = item.findtext('image')
- descr = item.findtext('description')
+ pubdate = item.findtext("pubDate")
+ image = item.findtext("image")
+ descr = item.findtext("description")
if descr is not None:
- descr = re.sub('<[^>]+>', '', descr)
+ descr = re.sub("<[^>]+>", "", descr)
blurb = """<DIV style="padding:8px;
font-size:34pt;
-webkit-column-break-inside:avoid;">"""
if image is not None:
- blurb += '<IMG SRC=\"%s\" ALIGN=LEFT HEIGHT=115" style="padding:8px;">\n' % image
- blurb += '<P><B>%s</B>' % title
+ blurb += (
+ '<IMG SRC="%s" ALIGN=LEFT HEIGHT=115" style="padding:8px;">\n'
+ % image
+ )
+ blurb += "<P><B>%s</B>" % title
if pubdate != None:
# Thu, 04 Jun 2015 08:16:35 GMT|-0400
- pubdate = pubdate.rsplit(' ', 1)[0]
- dt = datetime.datetime.strptime(pubdate,
- '%a, %d %b %Y %H:%M:%S')
+ pubdate = pubdate.rsplit(" ", 1)[0]
+ dt = datetime.datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S")
if dt < oldest:
continue
- blurb += dt.strftime(" <FONT COLOR=#bbbbbb>(%a %b %d)</FONT>")
+ blurb += dt.strftime(
+ " <FONT COLOR=#bbbbbb>(%a %b %d)</FONT>"
+ )
if descr is not None:
longblurb = blurb
longblurb += "<BR>"
longblurb += descr
longblurb += "</DIV>"
- longblurb = longblurb.replace("font-size:34pt",
- "font-size:44pt")
+ longblurb = longblurb.replace("font-size:34pt", "font-size:44pt")
- self.details.add(longblurb.encode('utf8'))
+ self.details.add(longblurb.encode("utf8"))
blurb += "</DIV>"
- self.news.add(blurb.encode('utf8'))
+ self.news.add(blurb.encode("utf8"))
count += 1
return count > 0
import datetime
import generic_news_rss_renderer as gnrss
+
class seattletimes_rss_renderer(gnrss.generic_news_rss_renderer):
- interesting_categories = frozenset([
- 'Nation',
- 'World',
- 'Life',
- 'Technology'
- 'Local News',
- 'Food',
- 'Drink',
- 'Today File',
- 'Seahawks',
- 'Oddities',
- 'Packfic NW',
- 'Home',
- 'Garden',
- 'Travel',
- 'Outdoors',
- ])
+ interesting_categories = frozenset(
+ [
+ "Nation",
+ "World",
+ "Life",
+ "Technology" "Local News",
+ "Food",
+ "Drink",
+ "Today File",
+ "Seahawks",
+ "Oddities",
+ "Packfic NW",
+ "Home",
+ "Garden",
+ "Travel",
+ "Outdoors",
+ ]
+ )
def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
super(seattletimes_rss_renderer, self).__init__(
- name_to_timeout_dict,
- feed_site,
- feed_uris,
- page_title)
+ name_to_timeout_dict, feed_site, feed_uris, page_title
+ )
def debug_prefix(self):
return "seattletimes"
details = {}
for detail in item.getchildren():
- self.debug_print("detail %s => %s (%s)" % (detail.tag,
- detail.attrib,
- detail.text))
+ self.debug_print(
+ "detail %s => %s (%s)" % (detail.tag, detail.attrib, detail.text)
+ )
if detail.text != None:
details[detail.tag] = detail.text
if "category" not in details:
return False
return len(description) >= 65
+
# Test
-#x = seattletimes_rss_renderer({"Test", 123},
+# x = seattletimes_rss_renderer({"Test", 123},
# "www.seattletimes.com",
# [ "/life/feed/" ],
# "nonnews")
-#x.periodic_render("Fetch News")
-#x.periodic_render("Shuffle News")
+# x.periodic_render("Fetch News")
+# x.periodic_render("Shuffle News")
import http.client
import xml.etree.ElementTree as ET
+
class stevens_pass_conditions_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict, feed_site, feed_uris):
super(stevens_pass_conditions_renderer, self).__init__(
- name_to_timeout_dict, False)
+ name_to_timeout_dict, False
+ )
self.feed_site = feed_site
self.feed_uris = feed_uris
return "stevens"
def periodic_render(self, key):
- f = file_writer.file_writer('stevens-conditions_1_86400.html')
+ f = file_writer.file_writer("stevens-conditions_1_86400.html")
for uri in self.feed_uris:
self.conn = http.client.HTTPSConnection(self.feed_site)
- self.conn.request(
- "GET",
- uri,
- None,
- {"Accept-Charset": "utf-8"})
+ self.conn.request("GET", uri, None, {"Accept-Charset": "utf-8"})
response = self.conn.getresponse()
if response.status == 200:
raw = response.read()
for item in channel.getchildren():
if item.tag == "title":
f.write("<h1>%s</h1><hr>" % item.text)
- f.write('<IMG WIDTH=512 ALIGN=RIGHT HEIGHT=382 SRC="https://images.wsdot.wa.gov/nc/002vc06430.jpg?t=637059938785646824" style="padding:8px;">')
+ f.write(
+ '<IMG WIDTH=512 ALIGN=RIGHT HEIGHT=382 SRC="https://images.wsdot.wa.gov/nc/002vc06430.jpg?t=637059938785646824" style="padding:8px;">'
+ )
elif item.tag == "item":
for x in item.getchildren():
if x.tag == "description":
text = x.text
- text = text.replace("<strong>Stevens Pass US2</strong><br/>", "")
+ text = text.replace(
+ "<strong>Stevens Pass US2</strong><br/>", ""
+ )
text = text.replace("<br/><br/>", "<BR>")
- text = text.replace("<strong>Elevation Meters:</strong>1238<BR>", "")
- f.write('<P>\n%s\n' % text)
+ text = text.replace(
+ "<strong>Elevation Meters:</strong>1238<BR>", ""
+ )
+ f.write("<P>\n%s\n" % text)
f.close()
return True
f.close()
import time
import urllib.request, urllib.error, urllib.parse
+
class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
# format exchange:symbol
def __init__(self, name_to_timeout_dict, symbols):
def periodic_render(self, key):
now = datetime.datetime.now()
- if (now.hour < (9 - 3) or
- now.hour >= (17 - 3) or
- datetime.datetime.today().weekday() > 4):
+ if (
+ now.hour < (9 - 3)
+ or now.hour >= (17 - 3)
+ or datetime.datetime.today().weekday() > 4
+ ):
self.debug_print("The stock market is closed so not re-rendering")
return True
- if (self.thread is None or not self.thread.is_alive()):
+ if self.thread is None or not self.thread.is_alive():
self.debug_print("Spinning up a background thread...")
- self.thread = Thread(target = self.thread_internal_render, args=())
+ self.thread = Thread(target=self.thread_internal_render, args=())
self.thread.start()
return True
def thread_internal_render(self):
symbols_finished = 0
- f = file_writer.file_writer('stock_3_86400.html')
+ f = file_writer.file_writer("stock_3_86400.html")
f.write("<H1>Stock Quotes</H1><HR>")
f.write("<TABLE WIDTH=99%>")
for symbol in self.symbols:
-# print "---------- Working on %s\n" % symbol
+ # print "---------- Working on %s\n" % symbol
# https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=5min&apikey=<key>
cooked = ""
while True:
key = self.get_random_key()
- url = self.prefix + "function=GLOBAL_QUOTE&symbol=%s&apikey=%s" % (symbol, key)
+ url = self.prefix + "function=GLOBAL_QUOTE&symbol=%s&apikey=%s" % (
+ symbol,
+ key,
+ )
raw = urllib.request.urlopen(url).read()
cooked = json.loads(raw)
- if 'Global Quote' not in cooked:
-# print "%s\n" % cooked
- print("Failure %d, sleep %d sec...\n" % (attempts + 1,
- 2 ** attempts))
+ if "Global Quote" not in cooked:
+ # print "%s\n" % cooked
+ print(
+ "Failure %d, sleep %d sec...\n" % (attempts + 1, 2 ** attempts)
+ )
time.sleep(2 ** attempts)
attempts += 1
- if attempts > 10: # we'll wait up to 512 seconds per symbol
+ if attempts > 10: # we'll wait up to 512 seconds per symbol
break
else:
break
# These fuckers...
- if 'Global Quote' not in cooked:
- print("Can't get data for symbol %s: %s\n" % (
- symbol, raw))
+ if "Global Quote" not in cooked:
+ print("Can't get data for symbol %s: %s\n" % (symbol, raw))
continue
- cooked = cooked['Global Quote']
+ cooked = cooked["Global Quote"]
# {
# u'Global Quote':
# }
price = "?????"
- if '05. price' in cooked:
- price = cooked['05. price']
+ if "05. price" in cooked:
+ price = cooked["05. price"]
price = price[:-2]
percent_change = "?????"
- if '10. change percent' in cooked:
- percent_change = cooked['10. change percent']
- if not '-' in percent_change:
+ if "10. change percent" in cooked:
+ percent_change = cooked["10. change percent"]
+ if not "-" in percent_change:
percent_change = "+" + percent_change
change = "?????"
cell_color = "#bbbbbb"
- if '09. change' in cooked:
- change = cooked['09. change']
+ if "09. change" in cooked:
+ change = cooked["09. change"]
if "-" in change:
cell_color = "#b00000"
else:
change = change[:-2]
if symbols_finished % 4 == 0:
- if (symbols_finished > 0):
+ if symbols_finished > 0:
f.write("</TR>")
f.write("<TR>")
symbols_finished += 1
- f.write("""
+ f.write(
+ """
<TD WIDTH=20%% HEIGHT=150 BGCOLOR="%s">
<!-- Container -->
<DIV style="position:relative;
<B>$%s</B>
</DIV>
</DIV>
-</TD>""" % (cell_color,
- symbol,
- price,
- percent_change,
- change))
+</TD>"""
+ % (cell_color, symbol, price, percent_change, change)
+ )
f.write("</TR></TABLE>")
f.close()
return True
-#x = stock_quote_renderer({}, ["MSFT", "GOOG", "GOOGL", "OPTAX", "VNQ"])
-#x.periodic_render(None)
-#x.periodic_render(None)
+
+# x = stock_quote_renderer({}, ["MSFT", "GOOG", "GOOGL", "OPTAX", "VNQ"])
+# x.periodic_render(None)
+# x.periodic_render(None)
import renderer
import renderer_catalog
+
class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict):
super(stranger_events_renderer, self).__init__(name_to_timeout_dict, True)
for msg in subset:
layout.add_item(msg)
- f = file_writer.file_writer('stranger-events_2_36000.html')
+ f = file_writer.file_writer("stranger-events_2_36000.html")
layout.render_html(f)
f.close()
return True
if delta > 1:
ts = now + datetime.timedelta(delta)
next_sat = datetime.datetime.strftime(ts, "%Y-%m-%d")
- feed_uris.append("/stranger-seattle/events/?start-date=%s&page=1" % next_sat)
- feed_uris.append("/stranger-seattle/events/?start-date=%s&page=2" % next_sat)
+ feed_uris.append(
+ "/stranger-seattle/events/?start-date=%s&page=1" % next_sat
+ )
+ feed_uris.append(
+ "/stranger-seattle/events/?start-date=%s&page=2" % next_sat
+ )
delta += 1
if delta > 1:
ts = now + datetime.timedelta(delta)
next_sun = datetime.datetime.strftime(ts, "%Y-%m-%d")
- feed_uris.append("/stranger-seattle/events/?start-date=%s&page=1" % next_sun)
- feed_uris.append("/stranger-seattle/events/?start-date=%s&page=2" % next_sun)
+ feed_uris.append(
+ "/stranger-seattle/events/?start-date=%s&page=1" % next_sun
+ )
+ feed_uris.append(
+ "/stranger-seattle/events/?start-date=%s&page=2" % next_sun
+ )
for uri in feed_uris:
try:
self.debug_print("fetching 'https://%s%s'" % (self.feed_site, uri))
self.conn = http.client.HTTPSConnection(self.feed_site)
- self.conn.request(
- "GET",
- uri,
- None,
- {"Accept-Charset": "utf-8"})
+ self.conn.request("GET", uri, None, {"Accept-Charset": "utf-8"})
response = self.conn.getresponse()
if response.status != 200:
- self.debug_print("Connection failed, status %d" % (
- response.status))
+ self.debug_print("Connection failed, status %d" % (response.status))
self.debug_print(response.getheaders())
continue
raw = response.read()
soup = BeautifulSoup(raw, "html.parser")
filter = profanity_filter.profanity_filter()
- for x in soup.find_all('div', class_='row event list-item mb-3 py-3'):
- text = x.get_text();
- if (filter.contains_bad_words(text)):
+ for x in soup.find_all("div", class_="row event list-item mb-3 py-3"):
+ text = x.get_text()
+ if filter.contains_bad_words(text):
continue
raw = str(x)
- raw = raw.replace('src="/',
- 'align="left" src="https://www.thestranger.com/')
- raw = raw.replace('href="/',
- 'href="https://www.thestranger.com/')
- raw = raw.replace('FREE', 'Free')
- raw = raw.replace('Save Event', '')
- raw = re.sub('^\s*$', '', raw, 0, re.MULTILINE)
- #raw = re.sub('\n+', '\n', raw)
- raw = re.sub('<span[^<>]*class="calendar-post-ticket"[^<>]*>.*</#span>', '', raw, 0, re.DOTALL | re.IGNORECASE)
+ raw = raw.replace(
+ 'src="/', 'align="left" src="https://www.thestranger.com/'
+ )
+ raw = raw.replace('href="/', 'href="https://www.thestranger.com/')
+ raw = raw.replace("FREE", "Free")
+ raw = raw.replace("Save Event", "")
+ raw = re.sub("^\s*$", "", raw, 0, re.MULTILINE)
+ # raw = re.sub('\n+', '\n', raw)
+ raw = re.sub(
+ '<span[^<>]*class="calendar-post-ticket"[^<>]*>.*</#span>',
+ "",
+ raw,
+ 0,
+ re.DOTALL | re.IGNORECASE,
+ )
self.events.add(raw)
self.debug_print("fetched %d events so far." % self.events.size())
return self.events.size() > 0
+
# Test
-#x = stranger_events_renderer({"Test", 123})
-#x.periodic_render("Fetch Events")
-#x.periodic_render("Shuffle Events")
+# x = stranger_events_renderer({"Test", 123})
+# x.periodic_render("Fetch Events")
+# x.periodic_render("Shuffle Events")
-
class trigger(object):
"""Base class for something that can trigger a page becomming active."""
import gcal_trigger
import myq_trigger
-__registry = [ camera_trigger.any_camera_trigger(),
- myq_trigger.myq_trigger(),
- gcal_trigger.gcal_trigger() ]
+__registry = [
+ camera_trigger.any_camera_trigger(),
+ myq_trigger.myq_trigger(),
+ gcal_trigger.gcal_trigger(),
+]
+
def get_triggers():
return __registry
import secrets
import tweepy
+
class twitter_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict):
super(twitter_renderer, self).__init__(name_to_timeout_dict, False)
self.handles_by_author = dict()
self.filter = profanity_filter.profanity_filter()
self.urlfinder = re.compile(
- "((http|https)://[\-A-Za-z0-9\\.]+/[\?\&\-A-Za-z0-9_\\.]+)")
+ "((http|https)://[\-A-Za-z0-9\\.]+/[\?\&\-A-Za-z0-9_\\.]+)"
+ )
# == OAuth Authentication ==
#
# The consumer keys can be found on your application's Details
# page located at https://dev.twitter.com/apps (under "OAuth settings")
- consumer_key=secrets.twitter_consumer_key
- consumer_secret=secrets.twitter_consumer_secret
+ consumer_key = secrets.twitter_consumer_key
+ consumer_secret = secrets.twitter_consumer_secret
# The access tokens can be found on your applications's Details
# page located at https://dev.twitter.com/apps (located
# under "Your access token")
- access_token=secrets.twitter_access_token
- access_token_secret=secrets.twitter_access_token_secret
+ access_token = secrets.twitter_access_token
+ access_token_secret = secrets.twitter_access_token_secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
elif key == "Shuffle Tweets":
return self.shuffle_tweets()
else:
- raise error('Unexpected operation')
+ raise error("Unexpected operation")
def fetch_tweets(self):
try:
- tweets = self.api.home_timeline(tweet_mode='extended', count=200)
+ tweets = self.api.home_timeline(tweet_mode="extended", count=200)
except:
print("Exception while fetching tweets!")
return False
handle = self.handles_by_author[author]
tweets = self.tweets_by_author[author]
already_seen = set()
- f = file_writer.file_writer('twitter_10_3600.html')
- f.write('<TABLE WIDTH=96%><TR><TD WIDTH=86%>')
- f.write('<H2>%s (@%s)</H2></TD>\n' % (author, handle))
+ f = file_writer.file_writer("twitter_10_3600.html")
+ f.write("<TABLE WIDTH=96%><TR><TD WIDTH=86%>")
+ f.write("<H2>%s (@%s)</H2></TD>\n" % (author, handle))
f.write('<TD ALIGN="right" VALIGN="top">')
f.write('<IMG SRC="twitter.png" WIDTH=42></TD></TR></TABLE>\n')
- f.write('<HR>\n<UL>\n')
+ f.write("<HR>\n<UL>\n")
count = 0
length = 0
for tweet in tweets:
text = tweet.full_text
- if ((text not in already_seen) and
- (not self.filter.contains_bad_words(text))):
+ if (text not in already_seen) and (
+ not self.filter.contains_bad_words(text)
+ ):
already_seen.add(text)
text = self.linkify(text)
- f.write('<LI><B>%s</B>\n' % text)
+ f.write("<LI><B>%s</B>\n" % text)
count += 1
length += len(text)
if count > 3 or length > 270:
break
- f.write('</UL>\n')
+ f.write("</UL>\n")
f.close()
return True
+
# Test
-#t = twitter_renderer(
+# t = twitter_renderer(
# {"Fetch Tweets" : 1,
# "Shuffle Tweets" : 1})
-#x = "bla bla bla https://t.co/EjWnT3UA9U bla bla"
-#x = t.linkify(x)
-#print x
-#if t.fetch_tweets() == 0:
+# x = "bla bla bla https://t.co/EjWnT3UA9U bla bla"
+# x = t.linkify(x)
+# print x
+# if t.fetch_tweets() == 0:
# print("Error fetching tweets, none fetched.")
-#else:
+# else:
# t.shuffle_tweets()
import constants
from datetime import datetime
+
def timestamp():
t = datetime.fromtimestamp(time.time())
- return t.strftime('%d/%b/%Y:%H:%M:%S%Z')
+ return t.strftime("%d/%b/%Y:%H:%M:%S%Z")
+
def describe_age_of_file(filename):
try:
except Exception as e:
return "?????"
+
def describe_age_of_file_briefly(filename):
try:
now = time.time()
except Exception as e:
return "?????"
+
def describe_duration(age):
days = divmod(age, constants.seconds_per_day)
hours = divmod(days[1], constants.seconds_per_hour)
minutes = divmod(hours[1], constants.seconds_per_minute)
descr = ""
- if (days[0] > 1):
+ if days[0] > 1:
descr = "%d days, " % days[0]
- elif (days[0] == 1):
+ elif days[0] == 1:
descr = "1 day, "
- if (hours[0] > 1):
+ if hours[0] > 1:
descr = descr + ("%d hours, " % hours[0])
- elif (hours[0] == 1):
+ elif hours[0] == 1:
descr = descr + "1 hour, "
- if (len(descr) > 0):
+ if len(descr) > 0:
descr = descr + "and "
- if (minutes[0] == 1):
+ if minutes[0] == 1:
descr = descr + "1 minute"
else:
descr = descr + ("%d minutes" % minutes[0])
return descr
+
def describe_duration_briefly(age):
days = divmod(age, constants.seconds_per_day)
hours = divmod(days[1], constants.seconds_per_hour)
minutes = divmod(hours[1], constants.seconds_per_minute)
descr = ""
- if (days[0] > 0):
+ if days[0] > 0:
descr = "%dd " % days[0]
- if (hours[0] > 0):
+ if hours[0] > 0:
descr = descr + ("%dh " % hours[0])
descr = descr + ("%dm" % minutes[0])
return descr
-#x = describe_age_of_file_briefly("pages/clock_10_none.html")
-#print x
+
+# x = describe_age_of_file_briefly("pages/clock_10_none.html")
+# print x
import urllib.request, urllib.error, urllib.parse
import random
+
class weather_renderer(renderer.debuggable_abstaining_renderer):
"""A renderer to fetch forecast from wunderground."""
- def __init__(self,
- name_to_timeout_dict,
- file_prefix):
+ def __init__(self, name_to_timeout_dict, file_prefix):
super(weather_renderer, self).__init__(name_to_timeout_dict, False)
self.file_prefix = file_prefix
return self.fetch_weather()
def describe_time(self, index):
- if (index <= 1):
+ if index <= 1:
return "overnight"
- elif (index <= 3):
+ elif index <= 3:
return "morning"
- elif (index <= 5):
+ elif index <= 5:
return "afternoon"
else:
return "evening"
return "heavy"
def describe_magnitude(self, mm):
- if (mm < 2):
+ if mm < 2:
return "light"
- elif (mm < 10):
+ elif mm < 10:
return "moderate"
else:
return "heavy"
total_snow = 0
count = min(len(conditions), len(rain), len(snow))
for x in range(0, count):
- seen_rain = rain[x] > 0;
- seen_snow = snow[x] > 0;
+ seen_rain = rain[x] > 0
+ seen_snow = snow[x] > 0
total_snow += snow[x]
txt = conditions[x].lower()
- if ("cloud" in txt):
+ if "cloud" in txt:
cloud_count += 1
- if ("clear" in txt or "sun" in txt):
+ if "clear" in txt or "sun" in txt:
clear_count += 1
- if (seen_rain and seen_snow):
- if (total_snow < 10):
+ if seen_rain and seen_snow:
+ if total_snow < 10:
return "sleet.gif"
else:
return "snow.gif"
- if (seen_snow):
- if (total_snow < 10):
+ if seen_snow:
+ if total_snow < 10:
return "flurries.gif"
else:
return "snow.gif"
- if (seen_rain):
+ if seen_rain:
return "rain.gif"
- if (cloud_count >= 6):
+ if cloud_count >= 6:
return "mostlycloudy.gif"
- elif (cloud_count >= 4):
+ elif cloud_count >= 4:
return "partlycloudy.gif"
- if (clear_count >= 7):
+ if clear_count >= 7:
return "sunny.gif"
- elif (clear_count >= 6):
+ elif clear_count >= 6:
return "mostlysunny.gif"
- elif (clear_count >= 4):
+ elif clear_count >= 4:
return "partlysunny.gif"
return "clear.gif"
- def describe_weather(self,
- high, low,
- wind, conditions, rain, snow):
+ def describe_weather(self, high, low, wind, conditions, rain, snow):
# High temp: 65
# Low temp: 44
# -onight------ -morning----- -afternoon-- -evening----
elif txt == "Rain":
txt = "rainy"
- if (txt != lcondition):
+ if txt != lcondition:
if txt != "Snow" and txt != "Rain":
current += txt
chunks += 1
lcondition = txt
txt = self.describe_wind(wind[x])
- if (txt != lwind):
- if (len(current) > 0):
+ if txt != lwind:
+ if len(current) > 0:
current += " with "
current += txt + " winds"
lwind = txt
chunks += 1
txt = self.describe_precip(rain[x], snow[x])
- if (txt != lprecip):
- if (len(current) > 0):
- if (chunks > 1):
+ if txt != lprecip:
+ if len(current) > 0:
+ if chunks > 1:
current += " and "
else:
current += " with "
current += txt
lprecip = txt
- if (len(current)):
- if (ltime != time):
- if (random.randint(0, 3) == 0):
- if (time != "overnight"):
+ if len(current):
+ if ltime != time:
+ if random.randint(0, 3) == 0:
+ if time != "overnight":
descr += current + " in the " + time + ". "
descr += current + " overnight. "
else:
- if (time != "overnight"):
+ if time != "overnight":
descr += "In the "
descr += time + ", " + current + ". "
else:
current = current.replace("cloudy", "clouds")
descr += current + " developing. "
ltime = time
- if (ltime == "overnight" or ltime == "morning"):
+ if ltime == "overnight" or ltime == "morning":
descr += "Conditions continuing the rest of the day. "
descr = descr.replace("with breezy winds", "and breezy")
descr = descr.replace("Clear developing", "Skies clearing")
text_location = "Bellevue, WA"
param = "id=5786882"
- www = urllib.request.urlopen('http://api.openweathermap.org/data/2.5/forecast?%s&APPID=%s&units=imperial' % (
- param, secrets.openweather_key))
+ www = urllib.request.urlopen(
+ "http://api.openweathermap.org/data/2.5/forecast?%s&APPID=%s&units=imperial"
+ % (param, secrets.openweather_key)
+ )
response = www.read()
www.close()
parsed_json = json.loads(response)
# "dt_txt":"2017-01-30 18:00:00"
# },
# {"dt":1485810000,....
- f = file_writer.file_writer('weather-%s_3_10800.html' % self.file_prefix)
- f.write("""
+ f = file_writer.file_writer("weather-%s_3_10800.html" % self.file_prefix)
+ f.write(
+ """
<h1>Weather at %s:</h1>
<hr>
<center>
<table width=99%% cellspacing=10 border=0>
- <tr>""" % text_location)
- count = parsed_json['cnt']
+ <tr>"""
+ % text_location
+ )
+ count = parsed_json["cnt"]
ts = {}
highs = {}
rain = {}
snow = {}
for x in range(0, count):
- data = parsed_json['list'][x]
- dt = data['dt_txt'] # 2019-10-07 18:00:00
+ data = parsed_json["list"][x]
+ dt = data["dt_txt"] # 2019-10-07 18:00:00
date = dt.split(" ")[0]
time = dt.split(" ")[1]
wind[date] = []
ts[date] = 0
for x in range(0, count):
- data = parsed_json['list'][x]
- dt = data['dt_txt'] # 2019-10-07 18:00:00
+ data = parsed_json["list"][x]
+ dt = data["dt_txt"] # 2019-10-07 18:00:00
date = dt.split(" ")[0]
time = dt.split(" ")[1]
- _ = data['dt']
- if (_ > ts[date]):
+ _ = data["dt"]
+ if _ > ts[date]:
ts[date] = _
temp = data["main"]["temp"]
- if (highs[date] < temp):
+ if highs[date] < temp:
highs[date] = temp
- if (temp < lows[date]):
+ if temp < lows[date]:
lows[date] = temp
wind[date].append(data["wind"]["speed"])
conditions[date].append(data["weather"][0]["main"])
# u'wind': {u'speed': 6.31, u'deg': 10.09}}
# Next 5 half-days
- #for x in xrange(0, 5):
+ # for x in xrange(0, 5):
# fcast = parsed_json['forecast']['txt_forecast']['forecastday'][x]
# text = fcast['fcttext']
# text = re.subn(r' ([0-9]+)F', r' \1°F', text)[0]
# f.write('<td style="vertical-align:top;font-size:75%%"><P STYLE="padding:8px;">%s</P></td>' % text)
- #f.write('</tr></table>')
- #f.close()
- #return True
+ # f.write('</tr></table>')
+ # f.close()
+ # return True
- #f.write("<table border=0 cellspacing=10>\n")
+ # f.write("<table border=0 cellspacing=10>\n")
days_seen = {}
for date in sorted(highs.keys()):
today = datetime.fromtimestamp(ts[date])
- formatted_date = today.strftime('%a %e %b')
- if (formatted_date in days_seen):
- continue;
+ formatted_date = today.strftime("%a %e %b")
+ if formatted_date in days_seen:
+ continue
days_seen[formatted_date] = True
num_days = len(list(days_seen.keys()))
precip += _
today = datetime.fromtimestamp(ts[date])
- formatted_date = today.strftime('%a %e %b')
- if (formatted_date in days_seen):
- continue;
+ formatted_date = today.strftime("%a %e %b")
+ if formatted_date in days_seen:
+ continue
days_seen[formatted_date] = True
f.write('<td width=%d%% style="vertical-align:top;">\n' % (100 / num_days))
- f.write('<table border=0>\n')
+ f.write("<table border=0>\n")
# Date
- f.write(' <tr><td colspan=3 height=50><b><center><font size=6>' + formatted_date + '</font></center></b></td></tr>\n')
+ f.write(
+ " <tr><td colspan=3 height=50><b><center><font size=6>"
+ + formatted_date
+ + "</font></center></b></td></tr>\n"
+ )
# Icon
- f.write(' <tr><td colspan=3 height=100><center><img src="/icons/weather/%s" height=125></center></td></tr>\n' %
- self.pick_icon(conditions[date], rain[date], snow[date]))
+ f.write(
+ ' <tr><td colspan=3 height=100><center><img src="/icons/weather/%s" height=125></center></td></tr>\n'
+ % self.pick_icon(conditions[date], rain[date], snow[date])
+ )
# Low temp
color = "#000099"
- if (lows[date] <= 32.5):
+ if lows[date] <= 32.5:
color = "#009999"
- f.write(' <tr><td width=33%% align=left><font color="%s"><b>%d°F </b></font></td>\n' % (
- color, int(lows[date])))
+ f.write(
+ ' <tr><td width=33%% align=left><font color="%s"><b>%d°F </b></font></td>\n'
+ % (color, int(lows[date]))
+ )
# Total precip
precip *= 0.0393701
- if (precip > 0.025):
- f.write(' <td width=33%%><center><b><font style="background-color:#dfdfff; color:#003355">%3.1f"</font></b></center></td>\n' % precip)
+ if precip > 0.025:
+ f.write(
+ ' <td width=33%%><center><b><font style="background-color:#dfdfff; color:#003355">%3.1f"</font></b></center></td>\n'
+ % precip
+ )
else:
- f.write(' <td width=33%> </td>\n')
+ f.write(" <td width=33%> </td>\n")
# High temp
color = "#800000"
- if (highs[date] >= 80):
+ if highs[date] >= 80:
color = "#AA0000"
- f.write(' <td align=right><font color="%s"><b> %d°F</b></font></td></tr>\n' % (
- color, int(highs[date])))
+ f.write(
+ ' <td align=right><font color="%s"><b> %d°F</b></font></td></tr>\n'
+ % (color, int(highs[date]))
+ )
# Text "description"
- f.write('<tr><td colspan=3 style="vertical-align:top;font-size:75%%">%s</td></tr>\n' %
- self.describe_weather(highs[date], lows[date], wind[date], conditions[date], rain[date], snow[date]))
- f.write('</table>\n</td>\n')
+ f.write(
+ '<tr><td colspan=3 style="vertical-align:top;font-size:75%%">%s</td></tr>\n'
+ % self.describe_weather(
+ highs[date],
+ lows[date],
+ wind[date],
+ conditions[date],
+ rain[date],
+ snow[date],
+ )
+ )
+ f.write("</table>\n</td>\n")
f.write("</tr></table></center>")
return True
-#x = weather_renderer({"Stevens": 1000},
+
+# x = weather_renderer({"Stevens": 1000},
# "stevens")
-#x.periodic_render("Stevens")
+# x.periodic_render("Stevens")
import generic_news_rss_renderer
+
class wsj_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
super(wsj_rss_renderer, self).__init__(
- name_to_timeout_dict,
- feed_site,
- feed_uris,
- page_title)
+ name_to_timeout_dict, feed_site, feed_uris, page_title
+ )
self.debug = 1
def debug_prefix(self):
return "wsj-details-%s" % (self.page_title)
def find_image(self, item):
- image = item.findtext('image')
+ image = item.findtext("image")
if image is not None:
- url = image.get('url')
+ url = image.get("url")
return url
return None
if self.is_item_older_than_n_days(item, 7):
self.debug_print("%s: is too old!" % title)
return False
- return ("WSJ.com" not in title and
- "WSJ.com" not in description)
+ return "WSJ.com" not in title and "WSJ.com" not in description
def item_is_interesting_for_article(self, title, description, item):
if self.is_item_older_than_n_days(item, 7):
self.debug_print("%s: is too old!" % title)
return False
- return ("WSJ.com" not in title and
- "WSJ.com" not in description)
+ return "WSJ.com" not in title and "WSJ.com" not in description
+
# Test
-#x = wsj_rss_renderer(
+# x = wsj_rss_renderer(
# {"Fetch News" : 1,
# "Shuffle News" : 1},
# "feeds.a.dj.com",
# [ "/rss/RSSWorldNews.xml" ],
# "Test" )
-#if x.fetch_news() == 0:
+# if x.fetch_news() == 0:
# print "Error fetching news, no items fetched."
-#x.shuffle_news()
+# x.shuffle_news()