+import datetime
+from dateutil.parser import parse
import file_writer
import grab_bag
import renderer
-import httplib
+import http.client
import page_builder
import profanity_filter
import random
class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
def __init__(self, name_to_timeout_dict, feed_site, feed_uris, page_title):
- super(generic_news_rss_renderer, self).__init__(name_to_timeout_dict, False)
+ super(generic_news_rss_renderer, self).__init__(name_to_timeout_dict,
+ False)
self.debug = 1
self.feed_site = feed_site
self.feed_uris = feed_uris
def find_link(self, item):
return item.findtext('link')
+ def munge_link(self, link):
+ return link
+
def find_image(self, item):
return item.findtext('image')
+ def munge_image(self, image):
+ return image
+
def item_is_interesting_for_headlines(self, title, description, item):
- pass
+ return True
+
+ def is_item_older_than_n_days(self, item, n):
+ pubdate = item.findtext('pubDate')
+ if pubdate is not None:
+ pubdate = parse(pubdate)
+ tzinfo = pubdate.tzinfo
+ now = datetime.datetime.now(tzinfo)
+ delta = (now - pubdate).total_seconds() / (60 * 60 * 24)
+ if (delta > n):
+ return True
+ return False
def item_is_interesting_for_article(self, title, description, item):
- pass
+ return True
def periodic_render(self, key):
if key == "Fetch News":
return False
for msg in subset:
blurb = msg
- blurb += "</TD>\n"
+ blurb += u'</TD>'
details.add_item(blurb)
g = file_writer.file_writer('%s_6_none.html' % (
self.get_details_page_prefix()))
for uri in self.feed_uris:
if self.should_use_https():
self.debug_print("Fetching: https://%s%s" % (self.feed_site, uri))
- self.conn = httplib.HTTPSConnection(self.feed_site)
+ self.conn = http.client.HTTPSConnection(self.feed_site)
else:
self.debug_print("Fetching: http://%s%s" % (self.feed_site, uri))
- self.conn = httplib.HTTPConnection(self.feed_site)
+ self.conn = http.client.HTTPConnection(self.feed_site)
self.conn.request(
"GET",
uri,
{"Accept-Charset": "utf-8"})
response = self.conn.getresponse()
if response.status != 200:
- print("%s: RSS fetch_news error, response: %d" % (self.page_title,
- response.status))
+ print(("%s: RSS fetch_news error, response: %d" % (self.page_title,
+ response.status)))
self.debug_print(response.read())
return False
description = item.findtext('description')
if description is not None:
description = self.munge_description(description)
+ image = self.find_image(item)
+ if image is not None:
+ image = self.munge_image(image)
link = item.findtext('link')
- image = item.findtext('image')
+ if link is not None:
+ link = self.munge_link(link)
if (title is None or
not self.item_is_interesting_for_headlines(title,
font-size:34pt;
-webkit-column-break-inside:avoid;">"""
if image is not None:
- blurb += '<IMG SRC="%s" ALIGN=LEFT HEIGHT=115 style="padding:8px;">\n' % image
- blurb += '<P><B>%s</B>' % title
+ blurb += u'<IMG SRC="%s" ALIGN=LEFT HEIGHT=115 ' % image
+ blurb += u'style="padding:8px;">'
+
+ if link is None:
+ blurb += u'<P><B>%s</B>' % title
+ else:
+ blurb += u'<P><B><A HREF="%s">%s</A></B>' % (link, title)
if (description is not None and
- self.item_is_interesting_for_article(title, description, item)):
+ self.item_is_interesting_for_article(title,
+ description,
+ item)):
longblurb = blurb
- longblurb += "<BR>"
+ longblurb += u"<BR>"
longblurb += description
- longblurb += "</DIV>"
+ longblurb += u"</DIV>"
longblurb = longblurb.replace("font-size:34pt",
"font-size:44pt")
- self.details.add(longblurb.encode('utf-8', errors='ignore'))
+ self.details.add(longblurb)
- blurb += "</DIV>"
- self.news.add(blurb.encode('utf-8', errors='ignore'))
+ blurb += u"</DIV>"
+ self.news.add(blurb)
count += 1
return count > 0
-
-# Test
-#x = generic_news_rss_renderer(
-# {"Fetch News" : 1,
-# "Shuffle News" : 1},
-# "rss.cnn.com",
-# [ "/rss/generic_news_topstories.rss",
-# "/rss/money_latest.rss",
-# "/rss/generic_news_tech.rss",
-# ],
-# "Test" )
-#if x.fetch_news() == 0:
-# print "Error fetching news, no items fetched."
-#x.shuffle_news()