From 5cd909241fefc418e495e633243403592339991a Mon Sep 17 00:00:00 2001 From: Scott Gasch Date: Wed, 1 Mar 2023 15:17:55 -0800 Subject: [PATCH] Use pyutils logging prepend message functionality. --- kiosk.py | 76 ++++++++++++++++++++++++++++--------------------------- listen.py | 2 ++ 2 files changed, 41 insertions(+), 37 deletions(-) diff --git a/kiosk.py b/kiosk.py index 5ff6e1c..2482160 100755 --- a/kiosk.py +++ b/kiosk.py @@ -22,6 +22,7 @@ import pytz from pyutils import ( bootstrap, config, + logging_utils, ) from pyutils.datetimes import datetime_utils from pyutils.files import file_utils @@ -41,6 +42,7 @@ logger = logging.getLogger(__file__) def thread_janitor() -> None: + logging_utils.register_thread_logging_prefix("janitor:") tracemalloc.start() tracemalloc_target = 0.0 gc_target = 0.0 @@ -61,26 +63,27 @@ def thread_janitor() -> None: key_type = "lineno" limit = 10 top_stats = snapshot.statistics(key_type) - logger.info(f"janitor: Top {limit} lines") + logger.info(f"Top {limit} lines") for index, stat in enumerate(top_stats[:limit], 1): frame = stat.traceback[0] # replace "/path/to/module/file.py" with "module/file.py" filename = os.sep.join(frame.filename.split(os.sep)[-2:]) logger.info( - f"janitor: #{index}: {filename}:{frame.lineno}: {stat.size / 1024:.1f} KiB" + f"#{index}: {filename}:{frame.lineno}: {stat.size / 1024:.1f} KiB" ) other = top_stats[limit:] if other: size = sum(stat.size for stat in other) - logger.info(f"janitor: {len(other)} others: {size/1024:.1f} KiB") + logger.info(f"{len(other)} others: {size/1024:.1f} KiB") total = sum(stat.size for stat in top_stats) - logger.info(f"janitor: Total allocated size: {total / 1024:.1f} KiB") + logger.info(f"Total allocated size: {total / 1024:.1f} KiB") if now > gc_target: - logger.info("janitor: kicking off a manual gc operation now.") + logger.info("Kicking off a manual gc operation now.") gc.collect() gc_target = now + 120.0 + logger.info("Having a little nap...") time.sleep(30.0) @@ -147,7 +150,7 @@ def process_command( while True: (page, _) = page_chooser.choose_next_page() if page == page_history[0]: - logger.debug(f"chooser: {page} is the same as last time! Try again.") + logger.debug(f"{page} is the same as last time! Try again.") else: break elif "internal" in command: @@ -215,6 +218,7 @@ def process_command( def thread_change_current(command_queue: Queue) -> None: + logging_utils.register_thread_logging_prefix("chooser:") page_history = ["", ""] swap_page_target = 0.0 @@ -237,9 +241,7 @@ def thread_change_current(command_queue: Queue) -> None: command = None if command: - logger.info( - f'chooser: We got a verbal command ("{command}"), parsing it...' - ) + logger.info(f'We got a verbal command ("{command}"), parsing it...') page = process_command(command, page_history, page_chooser) if page: return page, command @@ -249,13 +251,11 @@ def thread_change_current(command_queue: Queue) -> None: while True: (page, triggered) = page_chooser.choose_next_page() if triggered: - logger.info("chooser: A trigger is active...") + logger.info("A trigger is active...") break else: if page == page_history[0]: - logger.debug( - f"chooser: {page} is the same as last time! Try again." - ) + logger.debug(f"{page} is the same as last time! Try again.") else: break return (page, triggered) @@ -289,7 +289,7 @@ def thread_change_current(command_queue: Queue) -> None: # current page to the new page immediately. if page != page_history[0]: logger.info( - f"chooser: An emergency page reload to {page} is needed at this time." + f"An emergency page reload to {page} is needed at this time." ) swap_page_target = now + kiosk_constants.emergency_refresh_period_sec @@ -302,37 +302,37 @@ def thread_change_current(command_queue: Queue) -> None: override_refresh_sec=kiosk_constants.emergency_refresh_period_sec, command=command, ) - logger.debug(f"chooser: Wrote {current_file}.") + logger.debug(f"Wrote {current_file}.") except Exception: logger.exception( - f"chooser: Unexpected exception; assuming {page} doesn't exist?!" + f"Unexpected exception; assuming {page} doesn't exist?!" ) continue # Notify XMLHTTP clients that they need to refresh now. with open(emergency_file, "w") as f: f.write(f"Reload, suckers... you HAVE to see {page}!") - logger.debug(f"chooser: Wrote {emergency_file}...") + logger.debug(f"Wrote {emergency_file}...") # Fix this hack... maybe read the webserver logs and see if it # actually was picked up? time.sleep(0.999) os.remove(emergency_file) - logger.debug(f"chooser: ...and removed {emergency_file}.") + logger.debug(f"...and removed {emergency_file}.") page_history.insert(0, page) page_history = page_history[0:10] # If we're not triggered, only render a new page if time has expired. elif now >= swap_page_target: - logger.info(f"chooser: Nominal choice of {page} as the next to show.") + logger.info(f"Nominal choice of {page} as the next to show.") swap_page_target = now + kiosk_constants.refresh_period_sec try: with open(current_file, "w") as f: emit(f, page) - logger.debug(f"chooser: Wrote {current_file}.") + logger.debug(f"Wrote {current_file}.") except Exception: logger.exception( - f"chooser: Unexpected exception; assuming {page} doesn't exist?!" + f"Unexpected exception; assuming {page} doesn't exist?!" ) continue page_history.insert(0, page) @@ -608,7 +608,7 @@ def renderer_update_internal_stats_page( render_counts: collections.Counter, render_times: Dict[str, np.array], ) -> None: - logger.info("renderer: Updating internal render statistics page.") + logger.info("Updating internal render statistics page.") with file_writer.file_writer(kiosk_constants.render_stats_pagename) as f: f.write( """ @@ -658,6 +658,7 @@ def renderer_update_internal_stats_page( def thread_invoke_renderers() -> None: + logging_utils.register_thread_logging_prefix("renderer:") render_times: Dict[str, np.array] = {} render_counts: collections.Counter = collections.Counter() last_render: Dict[str, datetime] = {} @@ -667,17 +668,17 @@ def thread_invoke_renderers() -> None: # Main renderer loop while True: - logger.info("renderer: invoking all overdue renderers in catalog...") + logger.info("invoking all overdue renderers in catalog...") for r in renderer_catalog.get_renderers(): name = r.get_name() now = time.time() - logger.info(f"renderer: Invoking {name}'s render method.") + logger.info(f"Invoking {name}'s render method.") try: r.render() except Exception as e: logger.exception(e) logger.error( - f"renderer: Unexpected and unhandled exception ({e}) in {name}, swallowing it." + f"Unexpected and unhandled exception ({e}) in {name}, swallowing it." ) continue @@ -693,17 +694,16 @@ def thread_invoke_renderers() -> None: times = np.insert(times, 0, delta) render_times[name] = times if delta > 1.0: - hdr = "renderer: " logger.warning( f""" -{hdr} Warning: {name}'s rendering took {delta:5.2f}s. -{hdr} FYI: {name}'s render times: p25={np.percentile(times, 25):5.2f}, p50={np.percentile(times, 50):5.2f}, p75={np.percentile(times, 75):5.2f}, p90={np.percentile(times, 90):5.2f}, p99={np.percentile(times, 99):5.2f} +Warning: {name}'s rendering took {delta:5.2f}s. +FYI: {name}'s render times: p25={np.percentile(times, 25):5.2f}, p50={np.percentile(times, 50):5.2f}, p75={np.percentile(times, 75):5.2f}, p90={np.percentile(times, 90):5.2f}, p99={np.percentile(times, 99):5.2f} """ ) # Update a page about internal stats of renderers. renderer_update_internal_stats_page(last_render, render_counts, render_times) - logger.info("renderer: having a little nap...") + logger.info("Having a little nap...") time.sleep(kiosk_constants.render_period_sec) @@ -714,13 +714,15 @@ def main() -> None: renderer_thread: Optional[Thread] = None janitor_thread: Optional[Thread] = None hotword_thread: Optional[Thread] = None + + logging_utils.register_thread_logging_prefix("watchdog:") while True: if hotword_thread is None or not hotword_thread.is_alive(): if hotword_thread is None: logger.info("watchdog: Starting up the hotword detector thread...") else: logger.warning( - "watchdog: The hotword detector thread seems to have died; restarting it and hoping for the best." + "The hotword detector thread seems to have died; restarting it and hoping for the best." ) keyword_paths = [pvporcupine.KEYWORD_PATHS[x] for x in ["bumblebee"]] sensitivities = [0.7] * len(keyword_paths) @@ -734,36 +736,36 @@ def main() -> None: if changer_thread is None or not changer_thread.is_alive(): if changer_thread is None: - logger.info("watchdog: Starting up the current page changer thread...") + logger.info("Starting up the current page changer thread...") else: logger.warning( - "watchdog: The current page changer thread seems to have died; restarting it and hoping for the best." + "The current page changer thread seems to have died; restarting it and hoping for the best." ) changer_thread = Thread(target=thread_change_current, args=(command_queue,)) changer_thread.start() if renderer_thread is None or not renderer_thread.is_alive(): if renderer_thread is None: - logger.info("watchdog: Starting up the page renderer thread...") + logger.info("Starting up the page renderer thread...") else: logger.warning( - "watchdog: The page renderer thread seems to have died; restarting it and hoping for the best." + "The page renderer thread seems to have died; restarting it and hoping for the best." ) renderer_thread = Thread(target=thread_invoke_renderers, args=()) renderer_thread.start() if janitor_thread is None or not janitor_thread.is_alive(): if janitor_thread is None: - logger.info("watchdog: Starting up the memory janitor thread...") + logger.info("Starting up the memory janitor thread...") else: logger.warning( - "watchdog: The memory janitor thread seems to have died; restarting it and hoping for the best." + "The memory janitor thread seems to have died; restarting it and hoping for the best." ) janitor_thread = Thread(target=thread_janitor, args=()) janitor_thread.start() # Have a little break and then check to make sure all threads are still alive. - logger.debug("watchdog: having a little nap.") + logger.debug("Having a little nap...") time.sleep(kiosk_constants.check_threads_period_sec) diff --git a/listen.py b/listen.py index 4fe2fc6..4ddd774 100755 --- a/listen.py +++ b/listen.py @@ -7,6 +7,7 @@ import struct import pvporcupine import pyaudio import speech_recognition as sr +from pyutils import logging_utils logger = logging.getLogger(__file__) @@ -29,6 +30,7 @@ class HotwordListener(object): self._input_device_index = input_device_index def listen_forever(self): + logging_utils.register_thread_logging_prefix("listener:") keywords = list() for x in self._keyword_paths: keywords.append(os.path.basename(x).replace(".ppn", "").split("_")[0]) -- 2.45.0