if (self.triggers_in_the_past_seven_min[camera] <= 4 or
cameras_with_recent_triggers > 1):
p = self.choose_priority(camera, age)
- print "%s: ****** %s[%d] CAMERA TRIGGER ******" % (
- ts, camera, p)
+ print(("%s: ****** %s[%d] CAMERA TRIGGER ******" % (
+ ts, camera, p)))
triggers.append( ( "hidden/%s.html" % camera,
self.choose_priority(camera, age)) )
else:
- print "%s: Camera %s too spammy, squelching it" % (
- ts, camera)
+ print(("%s: Camera %s too spammy, squelching it" % (
+ ts, camera)))
except Exception as e:
- print e
+ print(e)
pass
if len(triggers) == 0:
return triggers
#x = any_camera_trigger()
-#print x.get_triggered_page_list()
+#print(x.get_triggered_page_list())
for page in pages:
result = re.match(valid_filename, page)
if result != None:
- print('chooser: candidate page: "%s"' % page)
+ print(('chooser: candidate page: "%s"' % page))
if (result.group(3) != "none"):
freshness_requirement = int(result.group(3))
last_modified = int(os.path.getmtime(
os.path.join(constants.pages_dir, page)))
age = (now - last_modified)
if (age > freshness_requirement):
- print ('"%s" is too old.' % page)
+ print(('"%s" is too old.' % page))
continue
filenames.append(page)
return filenames
total_weight += weight
if (total_weight <= 0):
- raise(error("No valid candidate pages found!"))
+ raise error
while True:
pick = random.randrange(0, total_weight - 1)
so_far = 0
- for x in xrange(0, len(weights)):
+ for x in range(0, len(weights)):
so_far += weights[x]
if (so_far > pick and
self.pages[x] != self.last_choice):
# First try to satisfy from the page queue
if (len(self.page_queue) > 0):
- print "Pulling page from queue"
+ print("Pulling page from queue")
page = None
priority = None
for t in self.page_queue:
self.pages = self.get_page_list()
if len(self.pages) == 0:
- raise(error("No pages!"))
+ raise error
if (self.current >= len(self.pages)):
self.current = 0
return False
def item_is_interesting_for_headlines(self, title, description, item):
- return "CNN.com" not in title
+ return re.search(r'[Cc][Nn][Nn][A-Za-z]*\.com', title) is None
def item_is_interesting_for_article(self, title, description, item):
- return len(description) >= 65
+ return (re.search(r'[Cc][Nn][Nn][A-Za-z]*\.com', title) is None and
+ len(description) >= 65)
# Test
#x = cnn_rss_renderer(
def remove_tricky_unicode(x):
try:
x = x.decode('utf-8')
- x = x.replace(u"\u2018", "'").replace(u"\u2019", "'")
- x = x.replace(u"\u201c", '"').replace(u"\u201d", '"')
- x = x.replace(u"\u2e3a", "-").replace(u"\u2014", "-")
+ x = x.replace("\u2018", "'").replace("\u2019", "'")
+ x = x.replace("\u201c", '"').replace("\u201d", '"')
+ x = x.replace("\u2e3a", "-").replace("\u2014", "-")
except:
pass
return x
def __init__(self, filename):
self.full_filename = os.path.join(constants.pages_dir,
filename)
- self.f = open(self.full_filename, 'w')
+ self.f = open(self.full_filename, 'wb')
self.xforms = [ remove_tricky_unicode ]
def add_xform(self, xform):
import globals
import os
import renderer
-import sets
import time
class gcal_renderer(renderer.debuggable_abstaining_renderer):
"""A renderer to fetch upcoming events from www.google.com/calendar"""
- calendar_whitelist = sets.ImmutableSet([
+ calendar_whitelist = frozenset([
'Alex\'s calendar',
'Family',
'Holidays in United States',
return datetime.datetime.strftime(x, '%Y-%m-%dT%H:%M:%SZ')
time_min = datetime.datetime.now()
time_max = time_min + datetime.timedelta(95)
- time_min, time_max = map(format_datetime, (time_min, time_max))
+ time_min, time_max = list(map(format_datetime, (time_min, time_max)))
self.debug_print("time_min is %s" % time_min)
self.debug_print("time_max is %s" % time_max)
name, days[0], hours[0], minutes[0]))
g.write('</ul>')
g.write('<SCRIPT>\nlet timestampMap = new Map([')
- for x in timestamps.keys():
+ for x in list(timestamps.keys()):
g.write(' ["%s", %f],\n' % (x, timestamps[x] * 1000.0))
g.write(']);\n\n')
g.write("""
class gcal_trigger(trigger.trigger):
def get_triggered_page_list(self):
if globals.get("gcal_triggered") == True:
- print "****** gcal has an imminent upcoming event. ******"
+ print("****** gcal has an imminent upcoming event. ******")
return (constants.gcal_imminent_pagename, trigger.trigger.PRIORITY_HIGH)
else:
return None
# https://developers.google.com/picasa-web/
import sys
-import urllib
+import urllib.request, urllib.parse, urllib.error
try:
- import httplib # python2
+ import http.client # python2
except ImportError:
import http.client # python3
import os.path
import gdata.calendar.service
import gdata.docs.service
import gdata.photos.service, gdata.photos
-from apiclient.discovery import build
+from googleapiclient.discovery import build
import httplib2
-from apiclient.discovery import build
+from googleapiclient.discovery import build
import datetime
import ssl
# When this happens, we try re-creating the exception.
def reset_connection(self):
self.ssl_ctx = ssl.create_default_context(cafile='/usr/local/etc/ssl/cert.pem')
- httplib.HTTPConnection.debuglevel = 2
- self.conn = httplib.HTTPSConnection(self.host, context=self.ssl_ctx)
+ http.client.HTTPConnection.debuglevel = 2
+ self.conn = http.client.HTTPSConnection(self.host, context=self.ssl_ctx)
def load_token(self):
token = None
self.conn.request(
"POST",
"/o/oauth2/device/code",
- urllib.urlencode({
+ urllib.parse.urlencode({
'client_id': self.client_id,
'scope' : ' '.join(self.scope)
}),
self.verification_url = data['verification_url']
self.retry_interval = data['interval']
else:
- print("gdata: %d" % response.status)
- print(response.read())
+ print(("gdata: %d" % response.status))
+ print((response.read()))
sys.exit()
return self.user_code
self.conn.request(
"POST",
"/o/oauth2/token",
- urllib.urlencode({
+ urllib.parse.urlencode({
'client_id' : self.client_id,
'client_secret' : self.client_secret,
'code' : self.device_code,
time.sleep(self.retry_interval + 2)
else:
print("gdata: failed to get token")
- print(response.status)
- print(response.read())
+ print((response.status))
+ print((response.read()))
def refresh_token(self):
if self.checking_too_often():
self.conn.request(
"POST",
"/o/oauth2/token",
- urllib.urlencode({
+ urllib.parse.urlencode({
'client_id' : self.client_id,
'client_secret' : self.client_secret,
'refresh_token' : refresh_token,
self.token['refresh_token'] = refresh_token
self.save_token()
return True
- print("gdata: unexpected response %d to renewal request" % response.status)
- print(response.read())
+ print(("gdata: unexpected response %d to renewal request" % response.status))
+ print((response.read()))
return False
def checking_too_often(self):
if page_token:
param['pageToken'] = page_token
param['q'] = self.query
- print "QUERY: %s" % param['q']
+ print("QUERY: %s" % param['q'])
files = self.client.files().list(**param).execute()
result.extend(files['items'])
return "font-size:%dpt" % (x)
for f in result:
- print f['title']
- print f['id']
+ print(f['title'])
+ print(f['id'])
self.debug_print("%s (%s)\n" % (f['title'], f['id']))
title = f['title']
url = f['exportLinks']['text/html']
- print f
- print "Fetching %s..." % url
+ print(f)
+ print("Fetching %s..." % url)
resp, contents = self.client._http.request(url)
- print resp.status
- print contents
+ print(resp.status)
+ print(contents)
if resp.status == 200:
- print "Got contents."
+ print("Got contents.")
contents = re.sub('<body class="..">', '', contents)
contents = contents.replace('</body>', '')
contents = re.sub('font-size:([0-9]+)pt', boost_font_size, contents)
import file_writer
import grab_bag
import renderer
-import httplib
+import http.client
import page_builder
import profanity_filter
import random
for uri in self.feed_uris:
if self.should_use_https():
self.debug_print("Fetching: https://%s%s" % (self.feed_site, uri))
- self.conn = httplib.HTTPSConnection(self.feed_site)
+ self.conn = http.client.HTTPSConnection(self.feed_site)
else:
self.debug_print("Fetching: http://%s%s" % (self.feed_site, uri))
- self.conn = httplib.HTTPConnection(self.feed_site)
+ self.conn = http.client.HTTPConnection(self.feed_site)
self.conn.request(
"GET",
uri,
{"Accept-Charset": "utf-8"})
response = self.conn.getresponse()
if response.status != 200:
- print("%s: RSS fetch_news error, response: %d" % (self.page_title,
- response.status))
+ print(("%s: RSS fetch_news error, response: %d" % (self.page_title,
+ response.status)))
self.debug_print(response.read())
return False
#print u"Title: %s\nDescription: %s\nLink: %s\nImage: %s\n" % (
# title, description, link, image)
- blurb = u"""<DIV style="padding:8px;
+ blurb = """<DIV style="padding:8px;
font-size:34pt;
-webkit-column-break-inside:avoid;">"""
if image is not None:
return "gkeep"
def periodic_render(self, key):
- strikethrough = re.compile(u'\u2611([^\n]*)\n', re.UNICODE)
+ strikethrough = re.compile('\u2611([^\n]*)\n', re.UNICODE)
linkify = re.compile(r'.*(https?:\/\/\S+).*')
self.keep.sync()
self.debug_print("Note title '%s'" % title)
if contents != '' and not contents.isspace():
contents = strikethrough.sub(r'<font color="#999999">` <del>\1</del></font>\n', contents)
- contents = contents.replace('`', u'\u2611')
+ contents = contents.replace('`', '\u2611')
#self.debug_print("Note contents:\n%s" % contents)
contents = linkify.sub(r'<a href="\1">\1</a>', contents)
individual_lines = contents.split("\n")
max_length = length
contents = contents.replace("\n", "<BR>\n")
color = note.color.name.lower()
- if color in self.colors_by_name.keys():
+ if color in list(self.colors_by_name.keys()):
color = self.colors_by_name[color]
else:
self.debug_print("Unknown color '%s'" % color)
-#!/usr/local/bin/python
+#!/usr/local/bin/python3.7
import sys
import traceback
(page, triggered) = page_chooser.choose_next_page()
if triggered:
- print('chooser[%s] - WE ARE TRIGGERED.' % utils.timestamp())
+ print(('chooser[%s] - WE ARE TRIGGERED.' % utils.timestamp()))
if page != last_page:
- print('chooser[%s] - EMERGENCY PAGE %s LOAD NEEDED' % (
- utils.timestamp(), page))
+ print(('chooser[%s] - EMERGENCY PAGE %s LOAD NEEDED' % (
+ utils.timestamp(), page)))
f = open(os.path.join(constants.pages_dir,
"current.shtml"), "w")
emit_wrapped(f, page)
elif now >= swap_page_target:
if (page == last_page):
- print('chooser[%s] - nominal choice got the same page...' % (
- utils.timestamp()))
+ print(('chooser[%s] - nominal choice got the same page...' % (
+ utils.timestamp())))
continue
- print('chooser[%s] - nominal choice of %s' % (utils.timestamp(), page))
+ print(('chooser[%s] - nominal choice of %s' % (utils.timestamp(), page)))
try:
f = open(os.path.join(constants.pages_dir,
"current.shtml"), "w")
last_page = page
swap_page_target = now + constants.refresh_period_sec
except:
- print('chooser[%s] - page does not exist?!' % (utils.timestamp()))
+ print(('chooser[%s] - page does not exist?!' % (utils.timestamp())))
continue
time.sleep(1.0)
def thread_invoke_renderers():
while True:
+ print("Renderer thread[%s]: invoking all renderers in catalog..." % (
+ utils.timestamp()))
for r in renderer_catalog.get_renderers():
try:
r.render()
except Exception as e:
traceback.print_exc()
- print("renderer[%s] unknown exception, swallowing it." % (
- utils.timestamp()))
+ print(("renderer[%s] unknown exception, swallowing it." % (
+ utils.timestamp())))
except Error as e:
traceback.print_exc()
- print("renderer[%s] unknown error, swallowing it." % (
- utils.timestamp()))
+ print(("renderer[%s] unknown error, swallowing it." % (
+ utils.timestamp())))
time.sleep(constants.render_period_sec)
if __name__ == "__main__":
while True:
if (changer_thread == None or
not changer_thread.is_alive()):
- print("chooser[%s] - (Re?)initializing chooser thread..." % utils.timestamp())
+ print(("chooser[%s] - (Re?)initializing chooser thread..." % utils.timestamp()))
changer_thread = Thread(target = thread_change_current, args=())
changer_thread.start()
if (renderer_thread == None or
not renderer_thread.is_alive()):
- print("renderer[%s] - (Re?)initializing render thread..." % utils.timestamp())
+ print(("renderer[%s] - (Re?)initializing render thread..." % utils.timestamp()))
renderer_thread = Thread(target = thread_invoke_renderers, args=())
renderer_thread.start()
time.sleep(10000)
import os
import file_writer
import renderer
-import sets
import random
class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
album_root_directory = "/usr/local/export/www/gphotos/albums"
- album_whitelist = sets.ImmutableSet([
+ album_whitelist = frozenset([
'1208 Newer Alex Photos',
'1013 Scott and Lynn',
'0106 Key West 2019',
'0407 Las Vegas, 2017',
])
- extension_whitelist = sets.ImmutableSet([
+ extension_whitelist = frozenset([
'jpg',
'gif',
'JPG',
import datetime
import file_writer
import globals
-import httplib
+import http.client
import json
import renderer
import secrets
class myq_trigger(trigger.trigger):
def get_triggered_page_list(self):
if globals.get("myq_triggered") == True:
- print "****** MyQ garage door is open page trigger ******"
+ print("****** MyQ garage door is open page trigger ******")
return (constants.myq_pagename, trigger.trigger.PRIORITY_HIGH)
else:
return None
items_per_row = 1
else:
- print "Error, unknown layout type: %d" % self.layout
+ print("Error, unknown layout type: %d" % self.layout)
count = 0
self.items.sort(key=len, reverse=True)
-import httplib
+import http.client
import gdata_oauth
import file_writer
import renderer
temp_width = {}
temp_height = {}
temp_is_video = {}
- conn = httplib.HTTPSConnection("photoslibrary.googleapis.com")
+ conn = http.client.HTTPSConnection("photoslibrary.googleapis.com")
conn.request("GET",
"/v1/albums",
None,
})
response = conn.getresponse()
if response.status != 200:
- print("Failed to fetch albums, status %d\n" % response.status)
- print response.read()
+ print(("Failed to fetch albums, status %d\n" % response.status))
+ print(response.read())
albums = self.pws.GetUserFeed().entry
for album in albums:
if (album.title.text not in picasa_renderer.album_whitelist):
if not oauth.has_token():
user_code = oauth.get_user_code()
print('------------------------------------------------------------')
- print('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
- oauth.verification_url, user_code))
+ print(('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
+ oauth.verification_url, user_code)))
oauth.get_new_token()
x = picasa_renderer({"Fetch Photos": (60 * 60 * 12),
"Shuffle Cached Photos": (1)},
import file_writer
from bs4 import BeautifulSoup
import renderer
-import httplib
+import http.client
import re
class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
return "pollen"
def fetch_html(self):
- conn = httplib.HTTPConnection(self.site)
+ conn = http.client.HTTPConnection(self.site)
conn.request(
"GET",
self.uri,
{})
response = conn.getresponse()
if response.status != 200:
- print('Connection to %s/%s failed, status %d' % (self.site,
+ print(('Connection to %s/%s failed, status %d' % (self.site,
self.uri,
- response.status))
+ response.status)))
return False
return response.read()
for word in brokenStr1:
if (self.normalize(word) in self.arrBad or
word in self.arrBad):
- print('***** PROFANITY WORD="%s"' % word)
+ print(('***** PROFANITY WORD="%s"' % word))
text = text.replace(word, badWordMask[:len(word)])
if len(brokenStr1) > 1:
- bigrams = zip(brokenStr1, brokenStr1[1:])
+ bigrams = list(zip(brokenStr1, brokenStr1[1:]))
for bigram in bigrams:
phrase = "%s %s" % (bigram[0], bigram[1])
if (self.normalize(phrase) in self.arrBad or
phrase in self.arrBad):
- print('***** PROFANITY PHRASE="%s"' % phrase)
+ print(('***** PROFANITY PHRASE="%s"' % phrase))
text = text.replace(bigram[0], badWordMask[:len(bigram[0])])
text = text.replace(bigram[1], badWordMask[:len(bigram[1])])
if len(brokenStr1) > 2:
- trigrams = zip(brokenStr1, brokenStr1[1:], brokenStr1[2:])
+ trigrams = list(zip(brokenStr1, brokenStr1[1:], brokenStr1[2:]))
for trigram in trigrams:
phrase = "%s %s %s" % (trigram[0], trigram[1], trigram[2])
if (self.normalize(phrase) in self.arrBad or
phrase in self.arrBad):
- print('***** PROFANITY PHRASE="%s"' % phrase)
+ print(('***** PROFANITY PHRASE="%s"' % phrase))
text = text.replace(trigram[0], badWordMask[:len(trigram[0])])
text = text.replace(trigram[1], badWordMask[:len(trigram[1])])
text = text.replace(trigram[2], badWordMask[:len(trigram[2])])
for word in brokenStr1:
if (self.normalize(word) in self.arrBad or
word in self.arrBad):
- print('***** PROFANITY WORD="%s"' % word)
+ print(('***** PROFANITY WORD="%s"' % word))
return True
if len(brokenStr1) > 1:
- bigrams = zip(brokenStr1, brokenStr1[1:])
+ bigrams = list(zip(brokenStr1, brokenStr1[1:]))
for bigram in bigrams:
phrase = "%s %s" % (bigram[0], bigram[1])
if (self.normalize(phrase) in self.arrBad or
phrase in self.arrBad):
- print('***** PROFANITY PHRASE="%s"' % phrase)
+ print(('***** PROFANITY PHRASE="%s"' % phrase))
return True
if len(brokenStr1) > 2:
- trigrams = zip(brokenStr1, brokenStr1[1:], brokenStr1[2:])
+ trigrams = list(zip(brokenStr1, brokenStr1[1:], brokenStr1[2:]))
for trigram in trigrams:
phrase = "%s %s %s" % (trigram[0], trigram[1], trigram[2])
if (self.normalize(phrase) in self.arrBad or
phrase in self.arrBad):
- print('***** PROFANITY PHRASE="%s"' % phrase)
+ print(('***** PROFANITY PHRASE="%s"' % phrase))
return True
return False
msg.thumbnail != "default" and
msg.thumbnail != ""):
content = '<IMG SRC="%s">' % msg.thumbnail
- x = u"""
+ x = """
<TABLE STYLE="font-size:%dpt;">
<TR>
<!-- The number of upvotes or item image: -->
tries[key] = 0
if tries[key] > 5:
- print('Too many retries for "%s", giving up for now' % key)
+ print(('Too many retries for "%s.%s", giving up for now' % (
+ key, self.__class__)))
keys_to_skip.add(key)
else:
- msg = 'renderer: periodic render event for "%s"' % key
+ msg = 'renderer: periodic render event for "%s.%s"' % (
+ key, self.__class__)
if (tries[key] > 1):
msg = msg + " (try %d)" % tries[key]
print(msg)
# current date and time
now = datetime.now()
timestamp = now.strftime("%d-%b-%Y (%H:%M:%S.%f)")
- print "%s(%s): %s" % (self.debug_prefix(), timestamp, msg)
+ print("%s(%s): %s" % (self.debug_prefix(), timestamp, msg))
except Exception as e:
- print "Exception in debug_print!"
- print e
+ print("Exception in debug_print!")
+ print(e)
if not oauth.has_token():
user_code = oauth.get_user_code()
print('------------------------------------------------------------')
- print('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
- oauth.verification_url, user_code))
+ print(('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
+ oauth.verification_url, user_code)))
oauth.get_new_token()
seconds = 1
import grab_bag
import renderer
import datetime
-import httplib
+import http.client
import page_builder
import profanity_filter
import random
oldest = datetime.datetime.now() - datetime.timedelta(14)
for uri in self.feed_uris:
- self.conn = httplib.HTTPConnection(self.feed_site)
+ self.conn = http.client.HTTPConnection(self.feed_site)
self.conn.request(
"GET",
uri,
{"Accept-Charset": "utf-8"})
response = self.conn.getresponse()
if response.status != 200:
- print("%s: RSS fetch_news error, response: %d" % (self.page,
- response.status))
+ print(("%s: RSS fetch_news error, response: %d" % (self.page,
+ response.status)))
self.debug_print(response.read())
return False
import datetime
import generic_news_rss_renderer as gnrss
-import sets
class seattletimes_rss_renderer(gnrss.generic_news_rss_renderer):
- interesting_categories = sets.ImmutableSet([
+ interesting_categories = frozenset([
'Nation',
'World',
'Life',
import renderer
import file_writer
-import httplib
+import http.client
import xml.etree.ElementTree as ET
class stevens_pass_conditions_renderer(renderer.debuggable_abstaining_renderer):
def periodic_render(self, key):
f = file_writer.file_writer('stevens-conditions_1_none.html')
for uri in self.feed_uris:
- self.conn = httplib.HTTPSConnection(self.feed_site)
+ self.conn = http.client.HTTPSConnection(self.feed_site)
self.conn.request(
"GET",
uri,
import random
import secrets
import time
-import urllib2
+import urllib.request, urllib.error, urllib.parse
class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
# format exchange:symbol
while True:
key = self.get_random_key()
url = self.prefix + "function=GLOBAL_QUOTE&symbol=%s&apikey=%s" % (symbol, key)
- raw = urllib2.urlopen(url).read()
+ raw = urllib.request.urlopen(url).read()
cooked = json.loads(raw)
- if u'Global Quote' not in cooked:
+ if 'Global Quote' not in cooked:
# print "%s\n" % cooked
- print "Failure %d, sleep %d sec...\n" % (attempts + 1,
- 2 ** attempts)
+ print("Failure %d, sleep %d sec...\n" % (attempts + 1,
+ 2 ** attempts))
time.sleep(2 ** attempts)
attempts += 1
if attempts > 10: # we'll wait up to 512 seconds per symbol
break
# These fuckers...
- if u'Global Quote' not in cooked:
- print "Can't get data for symbol %s: %s\n" % (
- symbol, raw)
+ if 'Global Quote' not in cooked:
+ print("Can't get data for symbol %s: %s\n" % (
+ symbol, raw))
continue
- cooked = cooked[u'Global Quote']
+ cooked = cooked['Global Quote']
# {
# u'Global Quote':
# }
price = "?????"
- if u'05. price' in cooked:
- price = cooked[u'05. price']
+ if '05. price' in cooked:
+ price = cooked['05. price']
price = price[:-2]
percent_change = "?????"
- if u'10. change percent' in cooked:
- percent_change = cooked[u'10. change percent']
+ if '10. change percent' in cooked:
+ percent_change = cooked['10. change percent']
if not '-' in percent_change:
percent_change = "+" + percent_change
change = "?????"
cell_color = "#bbbbbb"
- if u'09. change' in cooked:
- change = cooked[u'09. change']
+ if '09. change' in cooked:
+ change = cooked['09. change']
if "-" in change:
cell_color = "#b00000"
else:
import datetime
import file_writer
import grab_bag
-import httplib
+import http.client
import page_builder
import profanity_filter
import random
import re
import renderer
-import sets
class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict):
feed_uris.append("/events/?start-date=%s&page=2&picks=true" % next_sun)
for uri in feed_uris:
- self.debug_print("fetching '%s'" % uri)
- self.conn = httplib.HTTPSConnection(self.feed_site)
- self.conn.request(
- "GET",
- uri,
- None,
- {"Accept-Charset": "utf-8"})
- response = self.conn.getresponse()
- if response.status != 200:
- print("stranger: Failed, status %d" % (response.status))
+ try:
+ self.debug_print("fetching '%s'" % uri)
+ self.conn = http.client.HTTPSConnection(self.feed_site)
+ self.conn.request(
+ "GET",
+ uri,
+ None,
+ {"Accept-Charset": "utf-8"})
+ response = self.conn.getresponse()
+ if response.status != 200:
+ self.debug_print("Connection failed, status %d" % (
+ response.status))
+ continue
+ raw = response.read()
+ except:
+ self.debug_print("Exception talking to the stranger, ignoring.")
continue
- raw = response.read()
soup = BeautifulSoup(raw, "html.parser")
filter = profanity_filter.profanity_filter()
for x in soup.find_all('div', class_='row event list-item mb-3 py-3'):
text = x.get_text();
if (filter.contains_bad_words(text)):
continue
-
-# <div class="row event list-item mb-3 py-3">
-# <div class="col-12">
-# <a class="category-tag" href="?category=on-demand">On Demand</a>
-# </div> // col-12
-# <div class="col-md-3 order-1 order-md-3">
-# <a href="https://everout.thestranger.com/events/spliff-2020-on-demand/e24125/">
-# <img class="img-responsive" src="https://d2i729k8wyri5w.cloudfront.net/eyJidWNrZXQiOiAiZXZlcm91dC1pbWFnZXMtcHJvZHVjdGlvbiIsICJrZXkiOiAiaW1hZ2UtMTU5MTA2NTQxODU5NzA5My1vcmlnaW5hbC1sb2dvLmpwZWciLCAiZWRpdHMiOiB7InJlc2l6ZSI6IHsiZml0IjogImNvdmVyIiwgIndpZHRoIjogNDAwLCAiaGVpZ2h0IjogMzAwfX19">
-# </a>
-# </div> // col-md-3 order-1 order-md-3
-# <div class="col-md-6 order-2 order-md-1 event-details">
-# <h3 class="mb-0 event-title">
-# <a href="https://everout.thestranger.com/events/spliff-2020-on-demand/e24125/"><span class="staff-pick fas fa-star" aria-hidden="true"></span></a>
-# <a href="https://everout.thestranger.com/events/spliff-2020-on-demand/e24125/">
-# <span class="title-link">SPLIFF 2020 - On Demand</span>
-# </a>
-# </h3>
-# <div class="event-date">
-# Every day
-# </div> // event-date
-# <div class="event-time">
-# </div> // event-time
-# </div> // col-md-6 order-2 order-md-1 event-details
-# <div class="col-md-3 order-3 order-md-2 location-column">
-# <div class="location-name">
-# <i class="fad fa-map-marker-alt"></i> <a href="https://everout.thestranger.com/locations/the-stranger-online/l27660/">The Stranger (Online)</a>
-# </div> // location-name
-# <div class="location-region">
-# </div> // location-region
-# <ul class="event-tags">
-# <li>$10 - $20</li>
-# </ul>
-# </div> // col-md-3 order-3 order-md-2 location-colum
-# </div> // row event list-item mb-3 py-3
-
- raw = unicode(x)
+ raw = str(x)
raw = raw.replace('src="/',
'align="left" src="https://www.thestranger.com/')
raw = raw.replace('href="/',
self.debug_print("fetched %d events so far." % self.events.size())
return self.events.size() > 0
-x = stranger_events_renderer({"Test", 123})
-x.periodic_render("Fetch Events")
-x.periodic_render("Shuffle Events")
+# Test
+#x = stranger_events_renderer({"Test", 123})
+#x.periodic_render("Fetch Events")
+#x.periodic_render("Shuffle Events")
try:
tweets = self.api.home_timeline(tweet_mode='extended', count=200)
except:
- print "Exception while fetching tweets!"
+ print("Exception while fetching tweets!")
return False
for tweet in tweets:
author = tweet.author.name
return True
def shuffle_tweets(self):
- authors = self.tweets_by_author.keys()
+ authors = list(self.tweets_by_author.keys())
author = random.choice(authors)
handle = self.handles_by_author[author]
tweets = self.tweets_by_author[author]
(not self.filter.contains_bad_words(text))):
already_seen.add(text)
text = self.linkify(text)
- f.write(u'<LI><B>%s</B>\n' % text)
+ f.write('<LI><B>%s</B>\n' % text)
count += 1
length += len(text)
if count > 3 or length > 270:
#x = t.linkify(x)
#print x
if t.fetch_tweets() == 0:
- print "Error fetching tweets, none fetched."
+ print("Error fetching tweets, none fetched.")
else:
t.shuffle_tweets()
import json
import re
import secrets
-import urllib2
+import urllib.request, urllib.error, urllib.parse
import random
class weather_renderer(renderer.debuggable_abstaining_renderer):
clear_count = 0
total_snow = 0
count = min(len(conditions), len(rain), len(snow))
- for x in xrange(0, count):
+ for x in range(0, count):
seen_rain = rain[x] > 0;
seen_snow = snow[x] > 0;
total_snow += snow[x]
lwind = ""
lprecip = ""
ltime = ""
- for x in xrange(0, count):
+ for x in range(0, count):
time = self.describe_time(x)
current = ""
chunks = 0
text_location = "Bellevue, WA"
param = "id=5786882"
- www = urllib2.urlopen('http://api.openweathermap.org/data/2.5/forecast?%s&APPID=%s&units=imperial' % (
+ www = urllib.request.urlopen('http://api.openweathermap.org/data/2.5/forecast?%s&APPID=%s&units=imperial' % (
param, secrets.openweather_key))
response = www.read()
www.close()
conditions = {}
rain = {}
snow = {}
- for x in xrange(0, count):
+ for x in range(0, count):
data = parsed_json['list'][x]
dt = data['dt_txt'] # 2019-10-07 18:00:00
date = dt.split(" ")[0]
snow[date] = []
ts[date] = 0
- for x in xrange(0, count):
+ for x in range(0, count):
data = parsed_json['list'][x]
dt = data['dt_txt'] # 2019-10-07 18:00:00
date = dt.split(" ")[0]
if (formatted_date in days_seen):
continue;
days_seen[formatted_date] = True
- num_days = len(days_seen.keys())
+ num_days = len(list(days_seen.keys()))
days_seen = {}
for date in sorted(highs.keys()):