The changes necessary to upgrade the kiosk to use python3.7.
authorScott Gasch <[email protected]>
Fri, 3 Jul 2020 04:04:48 +0000 (21:04 -0700)
committerScott Gasch <[email protected]>
Fri, 3 Jul 2020 04:04:48 +0000 (21:04 -0700)
28 files changed:
camera_trigger.py
chooser.py
cnn_rss_renderer.py
file_writer.py
gcal_renderer.py
gcal_trigger.py
gdata_oauth.py
gdocs_renderer.py
generic_news_rss_renderer.py
gkeep_renderer.py
kiosk.py
local_photos_mirror_renderer.py
myq_renderer.py
myq_trigger.py
page_builder.py
picasa_renderer.py
pollen_renderer.py
profanity_filter.py
reddit_renderer.py
renderer.py
renderer_catalog.py
reuters_rss_renderer.py
seattletimes_rss_renderer.py
stevens_renderer.py
stock_renderer.py
stranger_renderer.py
twitter_renderer.py
weather_renderer.py

index 6e63dd1d2790bb9d963f33f5c09ce65973b3e67a..74829e32d8b54cd9612e3b9259f515f596be848d 100644 (file)
@@ -73,15 +73,15 @@ class any_camera_trigger(trigger.trigger):
                     if (self.triggers_in_the_past_seven_min[camera] <= 4 or
                         cameras_with_recent_triggers > 1):
                         p = self.choose_priority(camera, age)
-                        print "%s: ****** %s[%d] CAMERA TRIGGER ******" % (
-                            ts, camera, p)
+                        print(("%s: ****** %s[%d] CAMERA TRIGGER ******" % (
+                            ts, camera, p)))
                         triggers.append( ( "hidden/%s.html" % camera,
                                            self.choose_priority(camera, age)) )
                     else:
-                        print "%s: Camera %s too spammy, squelching it" % (
-                            ts, camera)
+                        print(("%s: Camera %s too spammy, squelching it" % (
+                            ts, camera)))
         except Exception as e:
-            print e
+            print(e)
             pass
 
         if len(triggers) == 0:
@@ -90,4 +90,4 @@ class any_camera_trigger(trigger.trigger):
             return triggers
 
 #x = any_camera_trigger()
-#print x.get_triggered_page_list()
+#print(x.get_triggered_page_list())
index 47a2cb71a065c35010d3411bffb58e7e9a0d981b..f6a9a497d3cd6c1205e6e31e81cee340c7056247 100644 (file)
@@ -18,14 +18,14 @@ class chooser(object):
         for page in pages:
             result = re.match(valid_filename, page)
             if result != None:
-                print('chooser: candidate page: "%s"' % page)
+                print(('chooser: candidate page: "%s"' % page))
                 if (result.group(3) != "none"):
                     freshness_requirement = int(result.group(3))
                     last_modified = int(os.path.getmtime(
                         os.path.join(constants.pages_dir, page)))
                     age = (now - last_modified)
                     if (age > freshness_requirement):
-                        print ('"%s" is too old.' % page)
+                        print(('"%s" is too old.' % page))
                         continue
                 filenames.append(page)
         return filenames
@@ -56,12 +56,12 @@ class weighted_random_chooser(chooser):
                 total_weight += weight
 
         if (total_weight <= 0):
-            raise(error("No valid candidate pages found!"))
+            raise error
 
         while True:
             pick = random.randrange(0, total_weight - 1)
             so_far = 0
-            for x in xrange(0, len(weights)):
+            for x in range(0, len(weights)):
                 so_far += weights[x]
                 if (so_far > pick and
                     self.pages[x] != self.last_choice):
@@ -95,7 +95,7 @@ class weighted_random_chooser_with_triggers(weighted_random_chooser):
 
         # First try to satisfy from the page queue
         if (len(self.page_queue) > 0):
-            print "Pulling page from queue"
+            print("Pulling page from queue")
             page = None
             priority = None
             for t in self.page_queue:
@@ -123,7 +123,7 @@ class rotating_chooser(chooser):
             self.pages = self.get_page_list()
 
         if len(self.pages) == 0:
-            raise(error("No pages!"))
+            raise error
 
         if (self.current >= len(self.pages)):
             self.current = 0
index 402d9878fa3f8caf608a839327f9164ce19a05e2..7ecfa19b851c11826ba1c52299c3dd691ab2e782 100644 (file)
@@ -28,10 +28,11 @@ class cnn_rss_renderer(generic_news_rss_renderer.generic_news_rss_renderer):
         return False
 
     def item_is_interesting_for_headlines(self, title, description, item):
-        return "CNN.com" not in title
+        return re.search(r'[Cc][Nn][Nn][A-Za-z]*\.com', title) is None
 
     def item_is_interesting_for_article(self, title, description, item):
-        return len(description) >= 65
+        return (re.search(r'[Cc][Nn][Nn][A-Za-z]*\.com', title) is None and
+                len(description) >= 65)
 
 # Test
 #x = cnn_rss_renderer(
index aba7d8f25f5b0efda0df5d5ef55548af0aeb0c26..0d95f71d505e4736aef184935e0b3e0f22bce185 100644 (file)
@@ -4,9 +4,9 @@ import os
 def remove_tricky_unicode(x):
     try:
         x = x.decode('utf-8')
-        x = x.replace(u"\u2018", "'").replace(u"\u2019", "'")
-        x = x.replace(u"\u201c", '"').replace(u"\u201d", '"')
-        x = x.replace(u"\u2e3a", "-").replace(u"\u2014", "-")
+        x = x.replace("\u2018", "'").replace("\u2019", "'")
+        x = x.replace("\u201c", '"').replace("\u201d", '"')
+        x = x.replace("\u2e3a", "-").replace("\u2014", "-")
     except:
         pass
     return x
@@ -15,7 +15,7 @@ class file_writer:
     def __init__(self, filename):
         self.full_filename = os.path.join(constants.pages_dir,
                                           filename)
-        self.f = open(self.full_filename, 'w')
+        self.f = open(self.full_filename, 'wb')
         self.xforms = [ remove_tricky_unicode ]
 
     def add_xform(self, xform):
index 1609e9228322c39d635f41c5c357e6c657efde8a..c3be3d70164bbf9c55f4450c16c64a41cf185fd8 100644 (file)
@@ -6,13 +6,12 @@ import gdata
 import globals
 import os
 import renderer
-import sets
 import time
 
 class gcal_renderer(renderer.debuggable_abstaining_renderer):
     """A renderer to fetch upcoming events from www.google.com/calendar"""
 
-    calendar_whitelist = sets.ImmutableSet([
+    calendar_whitelist = frozenset([
         'Alex\'s calendar',
         'Family',
         'Holidays in United States',
@@ -89,7 +88,7 @@ class gcal_renderer(renderer.debuggable_abstaining_renderer):
             return datetime.datetime.strftime(x, '%Y-%m-%dT%H:%M:%SZ')
         time_min = datetime.datetime.now()
         time_max = time_min + datetime.timedelta(95)
-        time_min, time_max = map(format_datetime, (time_min, time_max))
+        time_min, time_max = list(map(format_datetime, (time_min, time_max)))
         self.debug_print("time_min is %s" % time_min)
         self.debug_print("time_max is %s" % time_max)
 
@@ -197,7 +196,7 @@ class gcal_renderer(renderer.debuggable_abstaining_renderer):
                         name, days[0], hours[0], minutes[0]))
             g.write('</ul>')
             g.write('<SCRIPT>\nlet timestampMap = new Map([')
-            for x in timestamps.keys():
+            for x in list(timestamps.keys()):
                 g.write('    ["%s", %f],\n' % (x, timestamps[x] * 1000.0))
             g.write(']);\n\n')
             g.write("""
index 4e2f65e3b12147e705f929e5f2569212670c3b1f..870020adb553b60d0ab62cda6ce214802311612a 100644 (file)
@@ -5,7 +5,7 @@ import trigger
 class gcal_trigger(trigger.trigger):
     def get_triggered_page_list(self):
         if globals.get("gcal_triggered") == True:
-            print "****** gcal has an imminent upcoming event. ******"
+            print("****** gcal has an imminent upcoming event. ******")
             return (constants.gcal_imminent_pagename, trigger.trigger.PRIORITY_HIGH)
         else:
             return None
index 64934ebf173831c93146d4a58cb9bb192b96c2a7..f88b2f5cd60a89874cf4845f69e9e5a866ea11a6 100644 (file)
@@ -4,9 +4,9 @@
 # https://developers.google.com/picasa-web/
 
 import sys
-import urllib
+import urllib.request, urllib.parse, urllib.error
 try:
-    import httplib     # python2
+    import http.client     # python2
 except ImportError:
     import http.client # python3
 import os.path
@@ -16,9 +16,9 @@ from oauth2client.client import OAuth2Credentials
 import gdata.calendar.service
 import gdata.docs.service
 import gdata.photos.service, gdata.photos
-from apiclient.discovery import build
+from googleapiclient.discovery import build
 import httplib2
-from apiclient.discovery import build
+from googleapiclient.discovery import build
 import datetime
 import ssl
 
@@ -55,8 +55,8 @@ class OAuth:
     # When this happens, we try re-creating the exception.
     def reset_connection(self):
         self.ssl_ctx = ssl.create_default_context(cafile='/usr/local/etc/ssl/cert.pem')
-        httplib.HTTPConnection.debuglevel = 2
-        self.conn = httplib.HTTPSConnection(self.host, context=self.ssl_ctx)
+        http.client.HTTPConnection.debuglevel = 2
+        self.conn = http.client.HTTPSConnection(self.host, context=self.ssl_ctx)
 
     def load_token(self):
         token = None
@@ -82,7 +82,7 @@ class OAuth:
         self.conn.request(
             "POST",
             "/o/oauth2/device/code",
-            urllib.urlencode({
+            urllib.parse.urlencode({
                 'client_id': self.client_id,
                 'scope'    : ' '.join(self.scope)
             }),
@@ -95,8 +95,8 @@ class OAuth:
             self.verification_url = data['verification_url']
             self.retry_interval = data['interval']
         else:
-            print("gdata: %d" % response.status)
-            print(response.read())
+            print(("gdata: %d" % response.status))
+            print((response.read()))
             sys.exit()
         return self.user_code
 
@@ -110,7 +110,7 @@ class OAuth:
             self.conn.request(
                 "POST",
                 "/o/oauth2/token",
-                urllib.urlencode({
+                urllib.parse.urlencode({
                     'client_id'     : self.client_id,
                     'client_secret' : self.client_secret,
                     'code'          : self.device_code,
@@ -127,8 +127,8 @@ class OAuth:
                     time.sleep(self.retry_interval + 2)
             else:
                 print("gdata: failed to get token")
-                print(response.status)
-                print(response.read())
+                print((response.status))
+                print((response.read()))
 
     def refresh_token(self):
         if self.checking_too_often():
@@ -141,7 +141,7 @@ class OAuth:
         self.conn.request(
             "POST",
             "/o/oauth2/token",
-            urllib.urlencode({
+            urllib.parse.urlencode({
                 'client_id'     : self.client_id,
                 'client_secret' : self.client_secret,
                 'refresh_token' : refresh_token,
@@ -160,8 +160,8 @@ class OAuth:
                     self.token['refresh_token'] = refresh_token
                     self.save_token()
                 return True
-        print("gdata: unexpected response %d to renewal request" % response.status)
-        print(response.read())
+        print(("gdata: unexpected response %d to renewal request" % response.status))
+        print((response.read()))
         return False
 
     def checking_too_often(self):
index 125d5b412208d2160f193e06b848d670dd547218..d734a2deb90c4051db50d9d1fb97af2492be9f9b 100644 (file)
@@ -27,7 +27,7 @@ class gdocs_renderer(renderer.debuggable_abstaining_renderer):
                 if page_token:
                     param['pageToken'] = page_token
                 param['q'] = self.query
-                print "QUERY: %s" % param['q']
+                print("QUERY: %s" % param['q'])
 
                 files = self.client.files().list(**param).execute()
                 result.extend(files['items'])
@@ -47,18 +47,18 @@ class gdocs_renderer(renderer.debuggable_abstaining_renderer):
             return "font-size:%dpt" % (x)
 
         for f in result:
-            print f['title']
-            print f['id']
+            print(f['title'])
+            print(f['id'])
             self.debug_print("%s (%s)\n" % (f['title'], f['id']))
             title = f['title']
             url = f['exportLinks']['text/html']
-            print f
-            print "Fetching %s..." % url
+            print(f)
+            print("Fetching %s..." % url)
             resp, contents = self.client._http.request(url)
-            print resp.status
-            print contents
+            print(resp.status)
+            print(contents)
             if resp.status == 200:
-                print "Got contents."
+                print("Got contents.")
                 contents = re.sub('<body class="..">', '', contents)
                 contents = contents.replace('</body>', '')
                 contents = re.sub('font-size:([0-9]+)pt', boost_font_size, contents)
index b87ab05caa49ed646ecff87266cfca482576aab5..849c1ad4a643bd724d952215c994567a609409cc 100644 (file)
@@ -1,7 +1,7 @@
 import file_writer
 import grab_bag
 import renderer
-import httplib
+import http.client
 import page_builder
 import profanity_filter
 import random
@@ -107,10 +107,10 @@ class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
         for uri in self.feed_uris:
             if self.should_use_https():
                 self.debug_print("Fetching: https://%s%s" % (self.feed_site, uri))
-                self.conn = httplib.HTTPSConnection(self.feed_site)
+                self.conn = http.client.HTTPSConnection(self.feed_site)
             else:
                 self.debug_print("Fetching: http://%s%s" % (self.feed_site, uri))
-                self.conn = httplib.HTTPConnection(self.feed_site)
+                self.conn = http.client.HTTPConnection(self.feed_site)
             self.conn.request(
                 "GET",
                 uri,
@@ -118,8 +118,8 @@ class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
                 {"Accept-Charset": "utf-8"})
             response = self.conn.getresponse()
             if response.status != 200:
-                print("%s: RSS fetch_news error, response: %d" % (self.page_title,
-                                                                  response.status))
+                print(("%s: RSS fetch_news error, response: %d" % (self.page_title,
+                                                                  response.status)))
                 self.debug_print(response.read())
                 return False
 
@@ -151,7 +151,7 @@ class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
                 #print u"Title: %s\nDescription: %s\nLink: %s\nImage: %s\n" % (
                 #    title, description, link, image)
 
-                blurb = u"""<DIV style="padding:8px;
+                blurb = """<DIV style="padding:8px;
                                  font-size:34pt;
                                  -webkit-column-break-inside:avoid;">"""
                 if image is not None:
index c882526409d1f9a698385c53c642c931b7d717bb..0285cf13fa70b64af3e2eda480013195014e237e 100644 (file)
@@ -37,7 +37,7 @@ class gkeep_renderer(renderer.debuggable_abstaining_renderer):
         return "gkeep"
 
     def periodic_render(self, key):
-        strikethrough = re.compile(u'\u2611([^\n]*)\n', re.UNICODE)
+        strikethrough = re.compile('\u2611([^\n]*)\n', re.UNICODE)
         linkify = re.compile(r'.*(https?:\/\/\S+).*')
 
         self.keep.sync()
@@ -52,7 +52,7 @@ class gkeep_renderer(renderer.debuggable_abstaining_renderer):
             self.debug_print("Note title '%s'" % title)
             if contents != '' and not contents.isspace():
                 contents = strikethrough.sub(r'<font color="#999999">` <del>\1</del></font>\n', contents)
-                contents = contents.replace('`', u'\u2611')
+                contents = contents.replace('`', '\u2611')
                 #self.debug_print("Note contents:\n%s" % contents)
                 contents = linkify.sub(r'<a href="\1">\1</a>', contents)
                 individual_lines = contents.split("\n")
@@ -64,7 +64,7 @@ class gkeep_renderer(renderer.debuggable_abstaining_renderer):
                         max_length = length
                 contents = contents.replace("\n", "<BR>\n")
                 color = note.color.name.lower()
-                if color in self.colors_by_name.keys():
+                if color in list(self.colors_by_name.keys()):
                     color = self.colors_by_name[color]
                 else:
                     self.debug_print("Unknown color '%s'" % color)
index 379e196cbdd0563a950cf772692b441b94ee1054..b7fb9d96cc728625cfe635bf242d075979397be0 100755 (executable)
--- a/kiosk.py
+++ b/kiosk.py
@@ -1,4 +1,4 @@
-#!/usr/local/bin/python
+#!/usr/local/bin/python3.7
 
 import sys
 import traceback
@@ -25,10 +25,10 @@ def thread_change_current():
         (page, triggered) = page_chooser.choose_next_page()
 
         if triggered:
-            print('chooser[%s] - WE ARE TRIGGERED.' % utils.timestamp())
+            print(('chooser[%s] - WE ARE TRIGGERED.' % utils.timestamp()))
             if page != last_page:
-                print('chooser[%s] - EMERGENCY PAGE %s LOAD NEEDED' % (
-                    utils.timestamp(), page))
+                print(('chooser[%s] - EMERGENCY PAGE %s LOAD NEEDED' % (
+                    utils.timestamp(), page)))
                 f = open(os.path.join(constants.pages_dir,
                                       "current.shtml"), "w")
                 emit_wrapped(f, page)
@@ -47,10 +47,10 @@ def thread_change_current():
 
         elif now >= swap_page_target:
             if (page == last_page):
-                print('chooser[%s] - nominal choice got the same page...' % (
-                    utils.timestamp()))
+                print(('chooser[%s] - nominal choice got the same page...' % (
+                    utils.timestamp())))
                 continue
-            print('chooser[%s] - nominal choice of %s' % (utils.timestamp(), page))
+            print(('chooser[%s] - nominal choice of %s' % (utils.timestamp(), page)))
             try:
                 f = open(os.path.join(constants.pages_dir,
                                       "current.shtml"), "w")
@@ -59,7 +59,7 @@ def thread_change_current():
                 last_page = page
                 swap_page_target = now + constants.refresh_period_sec
             except:
-                print('chooser[%s] - page does not exist?!' % (utils.timestamp()))
+                print(('chooser[%s] - page does not exist?!' % (utils.timestamp())))
                 continue
         time.sleep(1.0)
 
@@ -242,17 +242,19 @@ def emit_wrapped(f, filename):
 
 def thread_invoke_renderers():
     while True:
+        print("Renderer thread[%s]: invoking all renderers in catalog..." % (
+            utils.timestamp()))
         for r in renderer_catalog.get_renderers():
             try:
                 r.render()
             except Exception as e:
                 traceback.print_exc()
-                print("renderer[%s] unknown exception, swallowing it." % (
-                    utils.timestamp()))
+                print(("renderer[%s] unknown exception, swallowing it." % (
+                    utils.timestamp())))
             except Error as e:
                 traceback.print_exc()
-                print("renderer[%s] unknown error, swallowing it." % (
-                    utils.timestamp()))
+                print(("renderer[%s] unknown error, swallowing it." % (
+                    utils.timestamp())))
         time.sleep(constants.render_period_sec)
 
 if __name__ == "__main__":
@@ -262,12 +264,12 @@ if __name__ == "__main__":
     while True:
         if (changer_thread == None or
             not changer_thread.is_alive()):
-            print("chooser[%s] - (Re?)initializing chooser thread..." % utils.timestamp())
+            print(("chooser[%s] - (Re?)initializing chooser thread..." % utils.timestamp()))
             changer_thread = Thread(target = thread_change_current, args=())
             changer_thread.start()
         if (renderer_thread == None or
             not renderer_thread.is_alive()):
-            print("renderer[%s] - (Re?)initializing render thread..." % utils.timestamp())
+            print(("renderer[%s] - (Re?)initializing render thread..." % utils.timestamp()))
             renderer_thread = Thread(target = thread_invoke_renderers, args=())
             renderer_thread.start()
         time.sleep(10000)
index 020683d00e8b8c793aa500006708e463aff87744..32e0c1e3e06a60c53acbb495864933908ee72d07 100644 (file)
@@ -1,7 +1,6 @@
 import os
 import file_writer
 import renderer
-import sets
 import random
 
 class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
@@ -9,7 +8,7 @@ class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
 
     album_root_directory = "/usr/local/export/www/gphotos/albums"
 
-    album_whitelist = sets.ImmutableSet([
+    album_whitelist = frozenset([
         '1208 Newer Alex Photos',
         '1013 Scott and Lynn',
         '0106 Key West 2019',
@@ -34,7 +33,7 @@ class local_photos_mirror_renderer(renderer.debuggable_abstaining_renderer):
         '0407 Las Vegas, 2017',
     ])
 
-    extension_whitelist = sets.ImmutableSet([
+    extension_whitelist = frozenset([
         'jpg',
         'gif',
         'JPG',
index a4c29ff5160100ee24994f21dcf02596dedbcbc4..fa60642fb283ef080a50e2d52304450b29b7db5e 100644 (file)
@@ -8,7 +8,7 @@ import constants
 import datetime
 import file_writer
 import globals
-import httplib
+import http.client
 import json
 import renderer
 import secrets
index da5b2f1fab6ddf10c4ad346e39c1e1ce30846af9..838f51a545dd981af699086bc5057750b7f89295 100644 (file)
@@ -5,7 +5,7 @@ import trigger
 class myq_trigger(trigger.trigger):
     def get_triggered_page_list(self):
         if globals.get("myq_triggered") == True:
-            print "****** MyQ garage door is open page trigger ******"
+            print("****** MyQ garage door is open page trigger ******")
             return (constants.myq_pagename, trigger.trigger.PRIORITY_HIGH)
         else:
             return None
index 369d4426518f7b0a47301cfd652b2660ec93b2b8..65f5b93e354fc138d9dcbf390feaa2e0d1801fce 100644 (file)
@@ -77,7 +77,7 @@ class page_builder(object):
             items_per_row = 1
 
         else:
-            print "Error, unknown layout type: %d" % self.layout
+            print("Error, unknown layout type: %d" % self.layout)
 
         count = 0
         self.items.sort(key=len, reverse=True)
index d15bd7b1f88a6ae5500127c27ca47b44c5f6e6ed..9de0c2d543acc7b617f1fc4e173cc8d837460a15 100644 (file)
@@ -1,4 +1,4 @@
-import httplib
+import http.client
 import gdata_oauth
 import file_writer
 import renderer
@@ -78,7 +78,7 @@ class picasa_renderer(renderer.debuggable_abstaining_renderer):
             temp_width = {}
             temp_height = {}
             temp_is_video = {}
-            conn = httplib.HTTPSConnection("photoslibrary.googleapis.com")
+            conn = http.client.HTTPSConnection("photoslibrary.googleapis.com")
             conn.request("GET",
                          "/v1/albums",
                          None,
@@ -86,8 +86,8 @@ class picasa_renderer(renderer.debuggable_abstaining_renderer):
                          })
             response = conn.getresponse()
             if response.status != 200:
-                print("Failed to fetch albums, status %d\n" % response.status)
-            print response.read()
+                print(("Failed to fetch albums, status %d\n" % response.status))
+            print(response.read())
             albums = self.pws.GetUserFeed().entry
             for album in albums:
                 if (album.title.text not in picasa_renderer.album_whitelist):
@@ -159,8 +159,8 @@ oauth.get_new_token()
 if not oauth.has_token():
     user_code = oauth.get_user_code()
     print('------------------------------------------------------------')
-    print('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
-        oauth.verification_url, user_code))
+    print(('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
+        oauth.verification_url, user_code)))
     oauth.get_new_token()
 x = picasa_renderer({"Fetch Photos": (60 * 60 * 12),
                      "Shuffle Cached Photos": (1)},
index bcc6c38b78592d3caaeef4c88db0f58fb363b857..745ad52a5e50465f0fda1f124bc0d059b1fe2815 100644 (file)
@@ -1,7 +1,7 @@
 import file_writer
 from bs4 import BeautifulSoup
 import renderer
-import httplib
+import http.client
 import re
 
 class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
@@ -17,7 +17,7 @@ class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
         return "pollen"
 
     def fetch_html(self):
-        conn = httplib.HTTPConnection(self.site)
+        conn = http.client.HTTPConnection(self.site)
         conn.request(
                 "GET",
                 self.uri,
@@ -25,9 +25,9 @@ class pollen_count_renderer(renderer.debuggable_abstaining_renderer):
                 {})
         response = conn.getresponse()
         if response.status != 200:
-            print('Connection to %s/%s failed, status %d' % (self.site,
+            print(('Connection to %s/%s failed, status %d' % (self.site,
                                                              self.uri,
-                                                             response.status))
+                                                             response.status)))
             return False
         return response.read()
 
index 7b378ccc1213866b516ad0a1723f2e914c207ec6..1c862eb5f54f3769008ec4a73944f15c61bf60e0 100644 (file)
@@ -393,26 +393,26 @@ class profanity_filter:
         for word in brokenStr1:
             if (self.normalize(word) in self.arrBad or
                 word in self.arrBad):
-                print('***** PROFANITY WORD="%s"' % word)
+                print(('***** PROFANITY WORD="%s"' % word))
                 text = text.replace(word, badWordMask[:len(word)])
 
         if len(brokenStr1) > 1:
-            bigrams = zip(brokenStr1, brokenStr1[1:])
+            bigrams = list(zip(brokenStr1, brokenStr1[1:]))
             for bigram in bigrams:
                 phrase = "%s %s" % (bigram[0], bigram[1])
                 if (self.normalize(phrase) in self.arrBad or
                     phrase in self.arrBad):
-                    print('***** PROFANITY PHRASE="%s"' % phrase)
+                    print(('***** PROFANITY PHRASE="%s"' % phrase))
                     text = text.replace(bigram[0], badWordMask[:len(bigram[0])])
                     text = text.replace(bigram[1], badWordMask[:len(bigram[1])])
 
         if len(brokenStr1) > 2:
-            trigrams = zip(brokenStr1, brokenStr1[1:], brokenStr1[2:])
+            trigrams = list(zip(brokenStr1, brokenStr1[1:], brokenStr1[2:]))
             for trigram in trigrams:
                 phrase = "%s %s %s" % (trigram[0], trigram[1], trigram[2])
                 if (self.normalize(phrase) in self.arrBad or
                     phrase in self.arrBad):
-                    print('***** PROFANITY PHRASE="%s"' % phrase)
+                    print(('***** PROFANITY PHRASE="%s"' % phrase))
                     text = text.replace(trigram[0], badWordMask[:len(trigram[0])])
                     text = text.replace(trigram[1], badWordMask[:len(trigram[1])])
                     text = text.replace(trigram[2], badWordMask[:len(trigram[2])])
@@ -423,25 +423,25 @@ class profanity_filter:
         for word in brokenStr1:
             if (self.normalize(word) in self.arrBad or
                 word in self.arrBad):
-                print('***** PROFANITY WORD="%s"' % word)
+                print(('***** PROFANITY WORD="%s"' % word))
                 return True
 
         if len(brokenStr1) > 1:
-            bigrams = zip(brokenStr1, brokenStr1[1:])
+            bigrams = list(zip(brokenStr1, brokenStr1[1:]))
             for bigram in bigrams:
                 phrase = "%s %s" % (bigram[0], bigram[1])
                 if (self.normalize(phrase) in self.arrBad or
                     phrase in self.arrBad):
-                    print('***** PROFANITY PHRASE="%s"' % phrase)
+                    print(('***** PROFANITY PHRASE="%s"' % phrase))
                     return True
 
         if len(brokenStr1) > 2:
-            trigrams = zip(brokenStr1, brokenStr1[1:], brokenStr1[2:])
+            trigrams = list(zip(brokenStr1, brokenStr1[1:], brokenStr1[2:]))
             for trigram in trigrams:
                 phrase = "%s %s %s" % (trigram[0], trigram[1], trigram[2])
                 if (self.normalize(phrase) in self.arrBad or
                     phrase in self.arrBad):
-                    print('***** PROFANITY PHRASE="%s"' % phrase)
+                    print(('***** PROFANITY PHRASE="%s"' % phrase))
                     return True
 
         return False
index 05b641d30f806f8850ea8fc62e8527d4c6ec93be..dc1a4c59ba6a681b0938ffb14480e313cae715ed 100644 (file)
@@ -50,7 +50,7 @@ class reddit_renderer(renderer.debuggable_abstaining_renderer):
                         msg.thumbnail != "default" and
                         msg.thumbnail != ""):
                         content = '<IMG SRC="%s">' % msg.thumbnail
-                    x = u"""
+                    x = """
 <TABLE STYLE="font-size:%dpt;">
   <TR>
     <!-- The number of upvotes or item image: -->
index 721b374529ea96f7804a8cd6077271b0bbbb6e8b..34e4459d429abda03a28da88048690cbc0834f0c 100644 (file)
@@ -36,10 +36,12 @@ class abstaining_renderer(renderer):
                 tries[key] = 0
 
             if tries[key] > 5:
-                print('Too many retries for "%s", giving up for now' % key)
+                print(('Too many retries for "%s.%s", giving up for now' % (
+                    key, self.__class__)))
                 keys_to_skip.add(key)
             else:
-                msg = 'renderer: periodic render event for "%s"' % key
+                msg = 'renderer: periodic render event for "%s.%s"' % (
+                    key, self.__class__)
                 if (tries[key] > 1):
                     msg = msg + " (try %d)" % tries[key]
                 print(msg)
@@ -71,7 +73,7 @@ class debuggable_abstaining_renderer(abstaining_renderer):
                 # current date and time
                 now = datetime.now()
                 timestamp = now.strftime("%d-%b-%Y (%H:%M:%S.%f)")
-                print "%s(%s): %s" % (self.debug_prefix(), timestamp, msg)
+                print("%s(%s): %s" % (self.debug_prefix(), timestamp, msg))
         except Exception as e:
-            print "Exception in debug_print!"
-            print e
+            print("Exception in debug_print!")
+            print(e)
index c789aaa2b6f6e2c10c500eaa04654d8ad9f85201..3cd8406b4d2b8e02770dc54682162d3874df829f 100644 (file)
@@ -25,8 +25,8 @@ oauth = gdata_oauth.OAuth(secrets.google_client_id,
 if not oauth.has_token():
     user_code = oauth.get_user_code()
     print('------------------------------------------------------------')
-    print('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
-        oauth.verification_url, user_code))
+    print(('Go to %s and enter the code "%s" (no quotes, case-sensitive)' % (
+        oauth.verification_url, user_code)))
     oauth.get_new_token()
 
 seconds = 1
index d78102fe51e4b928d2d94ff58eddaf98b7271a85..e84b3cc3834ec9bd798f24e4be476dd6c1132a25 100644 (file)
@@ -3,7 +3,7 @@ import file_writer
 import grab_bag
 import renderer
 import datetime
-import httplib
+import http.client
 import page_builder
 import profanity_filter
 import random
@@ -70,7 +70,7 @@ class reuters_rss_renderer(renderer.debuggable_abstaining_renderer):
         oldest = datetime.datetime.now() - datetime.timedelta(14)
 
         for uri in self.feed_uris:
-            self.conn = httplib.HTTPConnection(self.feed_site)
+            self.conn = http.client.HTTPConnection(self.feed_site)
             self.conn.request(
                 "GET",
                 uri,
@@ -78,8 +78,8 @@ class reuters_rss_renderer(renderer.debuggable_abstaining_renderer):
                 {"Accept-Charset": "utf-8"})
             response = self.conn.getresponse()
             if response.status != 200:
-                print("%s: RSS fetch_news error, response: %d" % (self.page,
-                                                                  response.status))
+                print(("%s: RSS fetch_news error, response: %d" % (self.page,
+                                                                  response.status)))
                 self.debug_print(response.read())
                 return False
 
index 906e00ef134a0e0959e0b8ee9b3fc2140d5809b1..c8d12ce17d6bcadef5a79c645f0b2cdae1121df9 100644 (file)
@@ -1,9 +1,8 @@
 import datetime
 import generic_news_rss_renderer as gnrss
-import sets
 
 class seattletimes_rss_renderer(gnrss.generic_news_rss_renderer):
-    interesting_categories = sets.ImmutableSet([
+    interesting_categories = frozenset([
         'Nation',
         'World',
         'Life',
index ab904ce0b97dbdaf620fbb58bfd0a9471ba85387..eca0dcb517dac6f2a71e41734400f93268e44fbb 100644 (file)
@@ -1,6 +1,6 @@
 import renderer
 import file_writer
-import httplib
+import http.client
 import xml.etree.ElementTree as ET
 
 class stevens_pass_conditions_renderer(renderer.debuggable_abstaining_renderer):
@@ -16,7 +16,7 @@ class stevens_pass_conditions_renderer(renderer.debuggable_abstaining_renderer):
     def periodic_render(self, key):
         f = file_writer.file_writer('stevens-conditions_1_none.html')
         for uri in self.feed_uris:
-            self.conn = httplib.HTTPSConnection(self.feed_site)
+            self.conn = http.client.HTTPSConnection(self.feed_site)
             self.conn.request(
                 "GET",
                 uri,
index 5a9be493e5aa9079d6c34ccd370ab9413a5be827..f8491e6a9eb73b8ba06ea35ffd1eb96f5ca0b5c2 100644 (file)
@@ -8,7 +8,7 @@ import renderer
 import random
 import secrets
 import time
-import urllib2
+import urllib.request, urllib.error, urllib.parse
 
 class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
     # format exchange:symbol
@@ -55,12 +55,12 @@ class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
             while True:
                 key = self.get_random_key()
                 url = self.prefix + "function=GLOBAL_QUOTE&symbol=%s&apikey=%s" % (symbol, key)
-                raw = urllib2.urlopen(url).read()
+                raw = urllib.request.urlopen(url).read()
                 cooked = json.loads(raw)
-                if u'Global Quote' not in cooked:
+                if 'Global Quote' not in cooked:
 #                    print "%s\n" % cooked
-                    print "Failure %d, sleep %d sec...\n" % (attempts + 1,
-                                                             2 ** attempts)
+                    print("Failure %d, sleep %d sec...\n" % (attempts + 1,
+                                                             2 ** attempts))
                     time.sleep(2 ** attempts)
                     attempts += 1
                     if attempts > 10: # we'll wait up to 512 seconds per symbol
@@ -69,11 +69,11 @@ class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
                     break
 
             # These fuckers...
-            if u'Global Quote' not in cooked:
-                print "Can't get data for symbol %s: %s\n" % (
-                    symbol, raw)
+            if 'Global Quote' not in cooked:
+                print("Can't get data for symbol %s: %s\n" % (
+                    symbol, raw))
                 continue
-            cooked = cooked[u'Global Quote']
+            cooked = cooked['Global Quote']
 
             # {
             #   u'Global Quote':
@@ -92,20 +92,20 @@ class stock_quote_renderer(renderer.debuggable_abstaining_renderer):
             # }
 
             price = "?????"
-            if u'05. price' in cooked:
-                price = cooked[u'05. price']
+            if '05. price' in cooked:
+                price = cooked['05. price']
                 price = price[:-2]
 
             percent_change = "?????"
-            if u'10. change percent' in cooked:
-                percent_change = cooked[u'10. change percent']
+            if '10. change percent' in cooked:
+                percent_change = cooked['10. change percent']
                 if not '-' in percent_change:
                     percent_change = "+" + percent_change
 
             change = "?????"
             cell_color = "#bbbbbb"
-            if u'09. change' in cooked:
-                change = cooked[u'09. change']
+            if '09. change' in cooked:
+                change = cooked['09. change']
                 if "-" in change:
                     cell_color = "#b00000"
                 else:
index c0389f6a68a994f886a3a45206402ffd5111b558..33ccfc05ca2203917bab3ceabc53259ed7c42a12 100644 (file)
@@ -2,13 +2,12 @@ from bs4 import BeautifulSoup
 import datetime
 import file_writer
 import grab_bag
-import httplib
+import http.client
 import page_builder
 import profanity_filter
 import random
 import re
 import renderer
-import sets
 
 class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
     def __init__(self, name_to_timeout_dict):
@@ -110,61 +109,31 @@ class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
             feed_uris.append("/events/?start-date=%s&page=2&picks=true" % next_sun)
 
         for uri in feed_uris:
-            self.debug_print("fetching '%s'" % uri)
-            self.conn = httplib.HTTPSConnection(self.feed_site)
-            self.conn.request(
-                "GET",
-                uri,
-                None,
-                {"Accept-Charset": "utf-8"})
-            response = self.conn.getresponse()
-            if response.status != 200:
-                print("stranger: Failed, status %d" % (response.status))
+            try:
+                self.debug_print("fetching '%s'" % uri)
+                self.conn = http.client.HTTPSConnection(self.feed_site)
+                self.conn.request(
+                    "GET",
+                    uri,
+                    None,
+                    {"Accept-Charset": "utf-8"})
+                response = self.conn.getresponse()
+                if response.status != 200:
+                    self.debug_print("Connection failed, status %d" % (
+                        response.status))
+                    continue
+                raw = response.read()
+            except:
+                self.debug_print("Exception talking to the stranger, ignoring.")
                 continue
 
-            raw = response.read()
             soup = BeautifulSoup(raw, "html.parser")
             filter = profanity_filter.profanity_filter()
             for x in soup.find_all('div', class_='row event list-item mb-3 py-3'):
                 text = x.get_text();
                 if (filter.contains_bad_words(text)):
                     continue
-
-#          <div class="row event list-item mb-3 py-3">
-#          <div class="col-12">
-#                <a class="category-tag" href="?category=on-demand">On Demand</a>
-#          </div> // col-12
-#          <div class="col-md-3 order-1 order-md-3">
-#              <a href="https://everout.thestranger.com/events/spliff-2020-on-demand/e24125/">
-#                  <img class="img-responsive" src="https://d2i729k8wyri5w.cloudfront.net/eyJidWNrZXQiOiAiZXZlcm91dC1pbWFnZXMtcHJvZHVjdGlvbiIsICJrZXkiOiAiaW1hZ2UtMTU5MTA2NTQxODU5NzA5My1vcmlnaW5hbC1sb2dvLmpwZWciLCAiZWRpdHMiOiB7InJlc2l6ZSI6IHsiZml0IjogImNvdmVyIiwgIndpZHRoIjogNDAwLCAiaGVpZ2h0IjogMzAwfX19">
-#              </a>
-#          </div> // col-md-3 order-1 order-md-3
-#          <div class="col-md-6 order-2 order-md-1 event-details">
-#             <h3 class="mb-0 event-title">
-#                 <a href="https://everout.thestranger.com/events/spliff-2020-on-demand/e24125/"><span class="staff-pick fas fa-star" aria-hidden="true"></span></a>
-#                 <a href="https://everout.thestranger.com/events/spliff-2020-on-demand/e24125/">
-#                 <span class="title-link">SPLIFF 2020 - On Demand</span>
-#                 </a>
-#             </h3>
-#             <div class="event-date">
-#               Every day
-#             </div> // event-date
-#             <div class="event-time">
-#             </div> // event-time
-#          </div> // col-md-6 order-2 order-md-1 event-details
-#          <div class="col-md-3 order-3 order-md-2 location-column">
-#            <div class="location-name">
-#              <i class="fad fa-map-marker-alt"></i> <a href="https://everout.thestranger.com/locations/the-stranger-online/l27660/">The Stranger (Online)</a>
-#            </div> // location-name
-#            <div class="location-region">
-#            </div> // location-region
-#            <ul class="event-tags">
-#              <li>$10 - $20</li>
-#            </ul>
-#          </div> // col-md-3 order-3 order-md-2 location-colum
-#        </div> // row event list-item mb-3 py-3
-
-                raw = unicode(x)
+                raw = str(x)
                 raw = raw.replace('src="/',
                                   'align="left" src="https://www.thestranger.com/')
                 raw = raw.replace('href="/',
@@ -178,6 +147,7 @@ class stranger_events_renderer(renderer.debuggable_abstaining_renderer):
             self.debug_print("fetched %d events so far." % self.events.size())
         return self.events.size() > 0
 
-x = stranger_events_renderer({"Test", 123})
-x.periodic_render("Fetch Events")
-x.periodic_render("Shuffle Events")
+# Test
+#x = stranger_events_renderer({"Test", 123})
+#x.periodic_render("Fetch Events")
+#x.periodic_render("Shuffle Events")
index 49c39da3b46029364c4f403b616f2549c157a296..304cf81b7b69aa1dbc2aa6f148a1ad825b61ecc3 100644 (file)
@@ -54,7 +54,7 @@ class twitter_renderer(renderer.debuggable_abstaining_renderer):
         try:
             tweets = self.api.home_timeline(tweet_mode='extended', count=200)
         except:
-            print "Exception while fetching tweets!"
+            print("Exception while fetching tweets!")
             return False
         for tweet in tweets:
             author = tweet.author.name
@@ -67,7 +67,7 @@ class twitter_renderer(renderer.debuggable_abstaining_renderer):
         return True
 
     def shuffle_tweets(self):
-        authors = self.tweets_by_author.keys()
+        authors = list(self.tweets_by_author.keys())
         author = random.choice(authors)
         handle = self.handles_by_author[author]
         tweets = self.tweets_by_author[author]
@@ -86,7 +86,7 @@ class twitter_renderer(renderer.debuggable_abstaining_renderer):
                 (not self.filter.contains_bad_words(text))):
                 already_seen.add(text)
                 text = self.linkify(text)
-                f.write(u'<LI><B>%s</B>\n' % text)
+                f.write('<LI><B>%s</B>\n' % text)
                 count += 1
                 length += len(text)
                 if count > 3 or length > 270:
@@ -103,6 +103,6 @@ t = twitter_renderer(
 #x = t.linkify(x)
 #print x
 if t.fetch_tweets() == 0:
-    print "Error fetching tweets, none fetched."
+    print("Error fetching tweets, none fetched.")
 else:
     t.shuffle_tweets()
index 26c49ca8a442b39374c5106159127a0ca25c630f..fdd4fe125d3257a1303797b338d6981dcaca56fc 100644 (file)
@@ -4,7 +4,7 @@ import renderer
 import json
 import re
 import secrets
-import urllib2
+import urllib.request, urllib.error, urllib.parse
 import random
 
 class weather_renderer(renderer.debuggable_abstaining_renderer):
@@ -94,7 +94,7 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
         clear_count = 0
         total_snow = 0
         count = min(len(conditions), len(rain), len(snow))
-        for x in xrange(0, count):
+        for x in range(0, count):
             seen_rain = rain[x] > 0;
             seen_snow = snow[x] > 0;
             total_snow += snow[x]
@@ -148,7 +148,7 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
         lwind = ""
         lprecip = ""
         ltime = ""
-        for x in xrange(0, count):
+        for x in range(0, count):
             time = self.describe_time(x)
             current = ""
             chunks = 0
@@ -216,7 +216,7 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
             text_location = "Bellevue, WA"
             param = "id=5786882"
 
-        www = urllib2.urlopen('http://api.openweathermap.org/data/2.5/forecast?%s&APPID=%s&units=imperial' % (
+        www = urllib.request.urlopen('http://api.openweathermap.org/data/2.5/forecast?%s&APPID=%s&units=imperial' % (
             param, secrets.openweather_key))
         response = www.read()
         www.close()
@@ -255,7 +255,7 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
         conditions = {}
         rain = {}
         snow = {}
-        for x in xrange(0, count):
+        for x in range(0, count):
             data = parsed_json['list'][x]
             dt = data['dt_txt']  # 2019-10-07 18:00:00
             date = dt.split(" ")[0]
@@ -268,7 +268,7 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
             snow[date] = []
             ts[date] = 0
 
-        for x in xrange(0, count):
+        for x in range(0, count):
             data = parsed_json['list'][x]
             dt = data['dt_txt']  # 2019-10-07 18:00:00
             date = dt.split(" ")[0]
@@ -332,7 +332,7 @@ class weather_renderer(renderer.debuggable_abstaining_renderer):
             if (formatted_date in days_seen):
                 continue;
             days_seen[formatted_date] = True
-        num_days = len(days_seen.keys())
+        num_days = len(list(days_seen.keys()))
 
         days_seen = {}
         for date in sorted(highs.keys()):