3 from abc import abstractmethod
5 from dateutil.parser import parse
9 from typing import Dict, List, Optional, Union
10 import xml.etree.ElementTree as ET
16 import profanity_filter
19 class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
22 name_to_timeout_dict: Dict[str, int],
27 super(generic_news_rss_renderer, self).__init__(name_to_timeout_dict, False)
29 self.feed_site = feed_site
30 self.feed_uris = feed_uris
31 self.page_title = page_title
32 self.news = grab_bag.grab_bag()
33 self.details = grab_bag.grab_bag()
34 self.filter = profanity_filter.profanity_filter()
37 def debug_prefix(self) -> str:
41 def get_headlines_page_prefix(self) -> str:
45 def get_details_page_prefix(self) -> str:
48 def get_headlines_page_priority(self) -> str:
51 def get_details_page_priority(self) -> str:
55 def should_use_https(self) -> bool:
58 def should_profanity_filter(self) -> bool:
61 def find_title(self, item: ET.Element) -> Optional[str]:
62 return item.findtext("title")
64 def munge_title(self, title: str) -> str:
67 def find_description(self, item: ET.Element) -> Optional[str]:
68 return item.findtext("description")
70 def munge_description(self, description: str) -> str:
71 description = re.sub("<[^>]+>", "", description)
74 def find_link(self, item: ET.Element) -> Optional[str]:
75 return item.findtext("link")
77 def munge_link(self, link: str) -> str:
80 def find_image(self, item: ET.Element) -> Optional[str]:
81 return item.findtext("image")
83 def munge_image(self, image: str) -> str:
86 def find_pubdate(self, item: ET.Element) -> Optional[str]:
87 return item.findtext("pubDate")
89 def munge_pubdate(self, pubdate: str) -> str:
92 def item_is_interesting_for_headlines(
93 self, title: str, description: str, item: ET.Element
97 def is_item_older_than_n_days(self, item: ET.Element, n: int) -> bool:
98 pubdate = self.find_pubdate(item)
101 pubdatetime = parse(pubdate)
102 tzinfo = pubdatetime.tzinfo
103 now = datetime.datetime.now(tzinfo)
104 delta = (now - pubdatetime).total_seconds() / (60 * 60 * 24)
107 def item_is_interesting_for_article(
108 self, title: str, description: str, item: ET.Element
112 def periodic_render(self, key: str) -> bool:
113 if key == "Fetch News":
114 return self.fetch_news()
115 elif key == "Shuffle News":
116 return self.shuffle_news()
120 def shuffle_news(self) -> bool:
121 headlines = page_builder.page_builder()
122 headlines.set_layout(page_builder.page_builder.LAYOUT_FOUR_ITEMS)
123 headlines.set_title("%s" % self.page_title)
124 subset = self.news.subset(4)
126 self.debug_print("Not enough messages to choose from.")
129 headlines.add_item(msg)
130 headlines.set_custom_html(
135 text-decoration: none;
140 text-decoration: none;
145 text-decoration: none;
150 _ = f"{self.get_headlines_page_prefix()}_{self.get_headlines_page_priority()}_25900.html"
151 with file_writer.file_writer(_) as f:
152 headlines.render_html(f)
154 details = page_builder.page_builder()
155 details.set_layout(page_builder.page_builder.LAYOUT_ONE_ITEM)
156 details.set_custom_html(
161 text-decoration: none;
166 text-decoration: none;
171 text-decoration: none;
176 details.set_title(f"{self.page_title}")
177 subset = self.details.subset(1)
179 self.debug_print("Not enough details to choose from.")
184 details.add_item(blurb)
185 _ = f"{self.get_details_page_prefix()}_{self.get_details_page_priority()}_86400.html"
186 with file_writer.file_writer(_) as g:
187 details.render_html(g)
190 def fetch_news(self) -> bool:
194 self.conn: Optional[Union[http.client.HTTPConnection,
195 http.client.HTTPSConnection]] = None
197 for uri in self.feed_uris:
198 if self.should_use_https():
199 self.debug_print("Fetching: https://%s%s" % (self.feed_site, uri))
200 self.conn = http.client.HTTPSConnection(self.feed_site, timeout=20)
202 self.debug_print("Fetching: http://%s%s" % (self.feed_site, uri))
203 self.conn = http.client.HTTPConnection(self.feed_site, timeout=20)
204 assert(self.conn is not None)
211 "Cache-control": "max-age=59",
212 "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
216 response = self.conn.getresponse()
218 print("Exception in generic RSS renderer HTTP connection")
221 if response.status != 200:
223 f"{self.page_title}: RSS fetch_news error, response: {response.status}"
225 self.debug_print(str(response.read()))
228 rss = ET.fromstring(response.read())
230 for item in channel.getchildren():
231 title = self.find_title(item)
232 if title is not None:
233 title = self.munge_title(title)
234 description = item.findtext("description")
235 if description is not None:
236 description = self.munge_description(description)
239 image = self.find_image(item)
240 if image is not None:
241 image = self.munge_image(image)
242 link = item.findtext("link")
244 link = self.munge_link(link)
246 if title is None or not self.item_is_interesting_for_headlines(
247 title, description, item
249 self.debug_print(f'Item "{title}" is not interesting')
252 if self.should_profanity_filter() and (
253 self.filter.contains_bad_words(title)
254 or self.filter.contains_bad_words(description)
256 self.debug_print(f'Found bad words in item "{title}"')
259 blurb = """<DIV style="padding:8px;
261 -webkit-column-break-inside:avoid;">"""
262 if image is not None:
263 blurb += f'<IMG SRC="{image}" ALIGN=LEFT HEIGHT=115 '
264 blurb += 'style="padding:8px;">'
267 blurb += f"<P><B>{title}</B>"
269 blurb += f'<P><B><A HREF="{link}">{title}</A></B>'
271 pubdate = self.find_pubdate(item)
272 if pubdate is not None:
273 pubdate = self.munge_pubdate(pubdate)
275 blurb += f' <FONT COLOR=#cccccc>{ts.strftime("%b %d")}</FONT>'
277 if self.item_is_interesting_for_article(title, description, item):
280 longblurb += description
281 longblurb += "</DIV>"
282 longblurb = longblurb.replace("font-size:34pt", "font-size:44pt")
283 self.details.add(longblurb)