3 from abc import abstractmethod
5 from dateutil.parser import parse
9 from typing import Dict, List, Optional, Union
10 import xml.etree.ElementTree as ET
16 import profanity_filter
19 class generic_news_rss_renderer(renderer.debuggable_abstaining_renderer):
22 name_to_timeout_dict: Dict[str, int],
27 super(generic_news_rss_renderer, self).__init__(name_to_timeout_dict, False)
29 self.feed_site = feed_site
30 self.feed_uris = feed_uris
31 self.page_title = page_title
32 self.news = grab_bag.grab_bag()
33 self.details = grab_bag.grab_bag()
34 self.filter = profanity_filter.ProfanityFilter()
37 def debug_prefix(self) -> str:
41 def get_headlines_page_prefix(self) -> str:
45 def get_details_page_prefix(self) -> str:
48 def get_headlines_page_priority(self) -> str:
51 def get_details_page_priority(self) -> str:
55 def should_use_https(self) -> bool:
58 def should_profanity_filter(self) -> bool:
61 def find_title(self, item: ET.Element) -> Optional[str]:
62 return item.findtext("title")
64 def munge_title(self, title: str, item: ET.Element) -> str:
67 def find_description(self, item: ET.Element) -> Optional[str]:
68 return item.findtext("description")
70 def munge_description(
75 description = re.sub("<[^>]+>", "", description)
78 def find_link(self, item: ET.Element) -> Optional[str]:
79 return item.findtext("link")
81 def munge_link(self, link: str) -> str:
84 def find_image(self, item: ET.Element) -> Optional[str]:
85 return item.findtext("image")
87 def munge_image(self, image: str) -> str:
90 def find_pubdate(self, item: ET.Element) -> Optional[str]:
91 return item.findtext("pubDate")
93 def munge_pubdate(self, pubdate: str) -> str:
96 def item_is_interesting_for_headlines(
97 self, title: str, description: str, item: ET.Element
101 def do_headlines(self) -> bool:
104 def do_details(self) -> bool:
107 def is_item_older_than_n_days(self, item: ET.Element, n: int) -> bool:
108 pubdate = self.find_pubdate(item)
111 pubdatetime = parse(pubdate)
112 tzinfo = pubdatetime.tzinfo
113 now = datetime.datetime.now(tzinfo)
114 delta = (now - pubdatetime).total_seconds() / (60 * 60 * 24)
117 def item_is_interesting_for_article(
118 self, title: str, description: str, item: ET.Element
122 def periodic_render(self, key: str) -> bool:
123 if key == "Fetch News":
124 return self.fetch_news()
125 elif key == "Shuffle News":
126 return self.shuffle_news()
130 def shuffle_news(self) -> bool:
131 if self.do_headlines():
132 headlines = page_builder.page_builder()
133 headlines.set_layout(page_builder.page_builder.LAYOUT_FOUR_ITEMS)
134 headlines.set_title("%s" % self.page_title)
135 subset = self.news.subset(4)
137 self.debug_print("Not enough messages to choose from.")
140 headlines.add_item(msg)
141 headlines.set_custom_html(
146 text-decoration: none;
151 text-decoration: none;
156 text-decoration: none;
161 _ = f"{self.get_headlines_page_prefix()}_{self.get_headlines_page_priority()}_25900.html"
162 with file_writer.file_writer(_) as f:
163 headlines.render_html(f)
165 if self.do_details():
166 details = page_builder.page_builder()
167 details.set_layout(page_builder.page_builder.LAYOUT_ONE_ITEM)
168 details.set_custom_html(
173 text-decoration: none;
178 text-decoration: none;
183 text-decoration: none;
188 details.set_title(f"{self.page_title}")
189 subset = self.details.subset(1)
191 self.debug_print("Not enough details to choose from.")
196 details.add_item(blurb)
197 _ = f"{self.get_details_page_prefix()}_{self.get_details_page_priority()}_86400.html"
198 with file_writer.file_writer(_) as g:
199 details.render_html(g)
202 def fetch_news(self) -> bool:
206 self.conn: Optional[Union[http.client.HTTPConnection,
207 http.client.HTTPSConnection]] = None
209 for uri in self.feed_uris:
210 if self.should_use_https():
211 self.debug_print("Fetching: https://%s%s" % (self.feed_site, uri))
212 self.conn = http.client.HTTPSConnection(self.feed_site, timeout=20)
214 self.debug_print("Fetching: http://%s%s" % (self.feed_site, uri))
215 self.conn = http.client.HTTPConnection(self.feed_site, timeout=20)
216 assert(self.conn is not None)
223 "Cache-control": "max-age=59",
224 "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
228 response = self.conn.getresponse()
229 except Exception as e:
230 traceback.print_exc(file=sys.stdout)
232 f"Exception in generic RSS renderer HTTP connection fetching {self.feed_site}{uri}"
236 if response.status != 200:
238 f"{self.page_title}: RSS fetch_news error, response: {response.status}"
240 self.debug_print(str(response.read()))
243 rss = ET.fromstring(response.read())
246 for item in channel.getchildren():
247 title = self.find_title(item)
248 description = item.findtext("description")
249 if title is not None:
250 title = self.munge_title(title, item)
251 if description is not None:
252 description = self.munge_description(description, item)
255 image = self.find_image(item)
256 if image is not None:
257 image = self.munge_image(image)
258 link = item.findtext("link")
260 link = self.munge_link(link)
262 if title is None or not self.item_is_interesting_for_headlines(
263 title, description, item
265 self.debug_print(f'Item "{title}" is not interesting')
268 if self.should_profanity_filter() and (
269 self.filter.contains_bad_word(title)
270 or self.filter.contains_bad_word(description)
272 self.debug_print(f'Found bad words in item "{title}"')
275 if title in title_filter:
276 self.debug_print(f'Already saw title {title}, skipping.')
277 title_filter.add(title)
279 blurb = """<DIV style="padding:8px;
281 -webkit-column-break-inside:avoid;">"""
282 if image is not None:
283 blurb += f'<IMG SRC="{image}" ALIGN=LEFT HEIGHT=115 '
284 blurb += 'style="padding:8px;">'
287 blurb += f"<P><B>{title}</B>"
289 blurb += f'<P><B><A HREF="{link}">{title}</A></B>'
291 pubdate = self.find_pubdate(item)
292 if pubdate is not None:
293 pubdate = self.munge_pubdate(pubdate)
295 blurb += f' <FONT COLOR=#cccccc>{ts.strftime("%b %d")}</FONT>'
297 if self.item_is_interesting_for_article(title, description, item):
300 longblurb += description
301 longblurb += "</DIV>"
302 longblurb = longblurb.replace("font-size:34pt", "font-size:44pt")
303 self.details.add(longblurb)