#!/usr/bin/env python3 from abc import abstractmethod import datetime from dateutil.parser import parse import http.client import random import re from typing import Dict, List, Optional, Union import xml.etree.ElementTree as ET import file_writer import grab_bag import renderer import page_builder import profanity_filter class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer): def __init__( self, name_to_timeout_dict: Dict[str, int], feed_site: str, feed_uris: List[str], page_title: str, ): super(generic_news_rss_renderer, self).__init__(name_to_timeout_dict, False) self.debug = True self.feed_site = feed_site self.feed_uris = feed_uris self.page_title = page_title self.news = grab_bag.grab_bag() self.details = grab_bag.grab_bag() self.filter = profanity_filter.profanity_filter() @abstractmethod def debug_prefix(self) -> str: pass @abstractmethod def get_headlines_page_prefix(self) -> str: pass @abstractmethod def get_details_page_prefix(self) -> str: pass def get_headlines_page_priority(self) -> str: return "4" def get_details_page_priority(self) -> str: return "6" @abstractmethod def should_use_https(self) -> bool: pass def should_profanity_filter(self) -> bool: return False def find_title(self, item: ET.Element) -> Optional[str]: return item.findtext("title") def munge_title(self, title: str) -> str: return title def find_description(self, item: ET.Element) -> Optional[str]: return item.findtext("description") def munge_description(self, description: str) -> str: description = re.sub("<[^>]+>", "", description) return description def find_link(self, item: ET.Element) -> Optional[str]: return item.findtext("link") def munge_link(self, link: str) -> str: return link def find_image(self, item: ET.Element) -> Optional[str]: return item.findtext("image") def munge_image(self, image: str) -> str: return image def find_pubdate(self, item: ET.Element) -> Optional[str]: return item.findtext("pubDate") def munge_pubdate(self, pubdate: str) -> str: return pubdate def item_is_interesting_for_headlines( self, title: str, description: str, item: ET.Element ) -> bool: return True def is_item_older_than_n_days(self, item: ET.Element, n: int) -> bool: pubdate = self.find_pubdate(item) if pubdate is None: return False pubdatetime = parse(pubdate) tzinfo = pubdatetime.tzinfo now = datetime.datetime.now(tzinfo) delta = (now - pubdatetime).total_seconds() / (60 * 60 * 24) return delta > n def item_is_interesting_for_article( self, title: str, description: str, item: ET.Element ) -> bool: return True def periodic_render(self, key: str) -> bool: if key == "Fetch News": return self.fetch_news() elif key == "Shuffle News": return self.shuffle_news() else: raise Exception def shuffle_news(self) -> bool: headlines = page_builder.page_builder() headlines.set_layout(page_builder.page_builder.LAYOUT_FOUR_ITEMS) headlines.set_title("%s" % self.page_title) subset = self.news.subset(4) if subset is None: self.debug_print("Not enough messages to choose from.") return False for msg in subset: headlines.add_item(msg) headlines.set_custom_html( """ """ ) _ = f"{self.get_headlines_page_prefix()}_{self.get_headlines_page_priority()}_25900.html" with file_writer.file_writer(_) as f: headlines.render_html(f) details = page_builder.page_builder() details.set_layout(page_builder.page_builder.LAYOUT_ONE_ITEM) details.set_custom_html( """ """ ) details.set_title(f"{self.page_title}") subset = self.details.subset(1) if subset is None: self.debug_print("Not enough details to choose from.") return False for msg in subset: blurb = msg blurb += "" details.add_item(blurb) _ = f"{self.get_details_page_prefix()}_{self.get_details_page_priority()}_86400.html" with file_writer.file_writer(_) as g: details.render_html(g) return True def fetch_news(self) -> bool: count = 0 self.news.clear() self.details.clear() self.conn: Optional[Union[http.client.HTTPConnection, http.client.HTTPSConnection]] = None for uri in self.feed_uris: if self.should_use_https(): self.debug_print("Fetching: https://%s%s" % (self.feed_site, uri)) self.conn = http.client.HTTPSConnection(self.feed_site, timeout=20) else: self.debug_print("Fetching: http://%s%s" % (self.feed_site, uri)) self.conn = http.client.HTTPConnection(self.feed_site, timeout=20) assert(self.conn is not None) self.conn.request( "GET", uri, None, { "Accept": "*/*", "Cache-control": "max-age=59", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36", }, ) try: response = self.conn.getresponse() except: print("Exception in generic RSS renderer HTTP connection") return False if response.status != 200: print( f"{self.page_title}: RSS fetch_news error, response: {response.status}" ) self.debug_print(str(response.read())) return False rss = ET.fromstring(response.read()) channel = rss[0] for item in channel.getchildren(): title = self.find_title(item) if title is not None: title = self.munge_title(title) description = item.findtext("description") if description is not None: description = self.munge_description(description) else: description = "" image = self.find_image(item) if image is not None: image = self.munge_image(image) link = item.findtext("link") if link is not None: link = self.munge_link(link) if title is None or not self.item_is_interesting_for_headlines( title, description, item ): self.debug_print(f'Item "{title}" is not interesting') continue if self.should_profanity_filter() and ( self.filter.contains_bad_words(title) or self.filter.contains_bad_words(description) ): self.debug_print(f'Found bad words in item "{title}"') continue blurb = """
""" if image is not None: blurb += f'{title}" else: blurb += f'

{title}' pubdate = self.find_pubdate(item) if pubdate is not None: pubdate = self.munge_pubdate(pubdate) ts = parse(pubdate) blurb += f' {ts.strftime("%b %d")}' if self.item_is_interesting_for_article(title, description, item): longblurb = blurb longblurb += "
" longblurb += description longblurb += "

" longblurb = longblurb.replace("font-size:34pt", "font-size:44pt") self.details.add(longblurb) blurb += "" self.news.add(blurb) count += 1 return count > 0