2 # -*- coding: utf-8 -*-
4 # © Copyright 2021-2023, Scott Gasch
7 This is a module that offers an opinionated take on how whole program
8 logging should be initialized and controlled. It uses the standard
9 Python :mod:`logging` but gives you control, via commandline config,
12 * Set the logging default level (debug, info, warning, error, critical)
13 of the whole program (see: :code:`--logging_level`)... and to override
14 the logging level with :class:`LoggingContext`.
15 * Prepend or append a message to every log record also with
16 :class:`LoggingContext`.
17 * define the logging message format (see :code:`--logging_format` and
18 :code:`--logging_date_format`) including easily adding a PID/TID
19 marker on all messages to help with multithreaded debugging
20 (:code:`--logging_debug_threads`) and force module names of code
21 that emits log messages to be included in the format
22 (:code:`--logging_debug_modules`),
23 * control the destination of logged messages:
25 - log to the console/stderr (:code:`--logging_console`) and/or
26 - log to a rotated file (:code:`--logging_filename`,
27 :code:`--logging_filename_maxsize` and :code:`--logging_filename_count`)
29 - log to the UNIX syslog (:code:`--logging_syslog` and
30 :code:`--logging_syslog_facility`)
32 * optionally squelch repeated messages (:code:`--logging_squelch_repeats`),
33 * optionally log probalistically (:code:`--logging_probabilistically`),
34 * capture printed messages into the info log (:code:`--logging_captures_prints`),
35 * optionally clear unwanted logging handlers added by other imports
36 before this one (:code:`--logging_clear_preexisting_handlers`).
37 * optionally append to system-wide records of non-zero exits
38 (:code:`--logging_non_zero_exits_record_path`) and unhandled exceptions
39 (:code:`--logging_unhandled_top_level_exceptions_record_path`) in
40 cooperation with :mod:`pyutils.bootstrap`.
41 * There are also :class:`LoggerAdapter` classes to implement prefix/suffix
42 functionality without using :class:`LoggingContext` by wrapping the
45 To use this functionality, call :meth:`initialize_logging` early
46 in your program entry point. If you use the
47 :meth:`pyutils.bootstrap.initialize` decorator on your program's entry
48 point, it will call this for you automatically.
60 from logging.config import fileConfig
61 from logging.handlers import RotatingFileHandler, SysLogHandler
62 from typing import Any, Callable, Dict, Iterable, List, Optional, Union
65 from overrides import overrides
67 # This module is commonly used by others in here and should avoid
68 # taking any unnecessary dependencies back on them.
69 from pyutils import argparse_utils, config, misc_utils
71 cfg = config.add_commandline_args(f"Logging ({__file__})", "Args related to logging")
73 "--logging_config_file",
74 type=argparse_utils.valid_filename,
77 help="Config file containing the logging setup, see: https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial",
83 choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
85 help="The global default level below which to squelch log messages",
91 help="The format for lines logged via the logger module. See: https://docs.python.org/3/library/logging.html#formatter-objects",
94 "--logging_date_format",
96 default="%Y/%m/%dT%H:%M:%S.%f%z",
98 help="The format of any dates in --logging_format.",
102 action=argparse_utils.ActionNoYes,
104 help="Should we log to the console (stderr)",
107 "--logging_filename",
111 help="The filename of the logfile to write.",
114 "--logging_filename_maxsize",
115 type=argparse_utils.valid_byte_count,
116 default=(1024 * 1024),
118 help="The maximum size (in bytes) to write to the logging_filename.",
121 "--logging_filename_count",
125 help="The number of logging_filename copies to keep before deleting.",
129 action=argparse_utils.ActionNoYes,
131 help="Should we log to localhost's syslog.",
134 "--logging_syslog_facility",
160 metavar="SYSLOG_FACILITY_LIST",
161 help="The default syslog message facility identifier",
164 "--logging_debug_threads",
165 action=argparse_utils.ActionNoYes,
167 help="Should we prepend pid/tid data to all log messages?",
170 "--logging_debug_modules",
171 action=argparse_utils.ActionNoYes,
173 help="Should we prepend module/function data to all log messages?",
176 "--logging_info_is_print",
177 action=argparse_utils.ActionNoYes,
179 help="logging.info also prints to stdout.",
182 "--logging_squelch_repeats",
183 action=argparse_utils.ActionNoYes,
185 help="Do we allow code to indicate that it wants to squelch repeated logging messages or should we always log?",
188 "--logging_probabilistically",
189 action=argparse_utils.ActionNoYes,
191 help="Do we allow probabilistic logging (for code that wants it) or should we always log?",
193 # See also: OutputMultiplexer
195 "--logging_captures_prints",
196 action=argparse_utils.ActionNoYes,
198 help="When calling print, also log.info automatically.",
201 "--logging_clear_preexisting_handlers",
202 action=argparse_utils.ActionNoYes,
205 "Should logging code clear preexisting global logging handlers and thus insist that is "
206 + "alone can add handlers. Use this to work around annoying modules that insert global "
207 + "handlers with formats and logging levels you might now want. Caveat emptor, this may "
208 + "cause you to miss logging messages."
212 "--logging_non_zero_exits_record_path",
214 default="/var/log/abnormal_python_exits.log",
216 help="The filename in which to record non-zero exits.",
219 "--logging_unhandled_top_level_exceptions_record_path",
221 default="/var/log/abnormal_python_exits.log",
223 help="The filename in which to record unhandled top level exceptions.",
226 BUILT_IN_PRINT = print
227 LOGGING_INITIALIZED = False
230 # A map from logging_callsite_id -> count of logged messages.
231 squelched_logging_counts: Dict[str, int] = {}
234 def squelch_repeated_log_messages(squelch_after_n_repeats: int) -> Callable:
236 A decorator that marks a function as interested in having the logging
237 messages that it produces be squelched (ignored) after it logs the
238 same message more than N times.
242 This decorator affects *ALL* logging messages produced
243 within the decorated function. That said, messages must be
244 identical in order to be squelched. For example, if the same line
245 of code produces different messages (because of, e.g., a format
246 string), the messages are considered to be different.
248 An example of this from the pyutils code itself can be found in
249 :meth:`pyutils.ansi.fg` and :meth:`pyutils.ansi.bg` methods::
251 @logging_utils.squelch_repeated_log_messages(1)
253 name: Optional[str] = "",
254 red: Optional[int] = None,
255 green: Optional[int] = None,
256 blue: Optional[int] = None,
258 force_16color: bool = False,
259 force_216color: bool = False,
263 These methods log stuff like "Using 24-bit color strategy" which
264 gets old really fast and fills up the logs. By decorating the methods
265 with :code:`@logging_utils.squelch_repeated_log_messages(1)` the code
266 is requesting that its logged messages be dropped silently after the
267 first one is produced (note the argument 1).
269 Users can insist that all logged messages always be reflected in the
270 logs using the :code:`--no_logging_squelch_repeats` flag but the default
271 behavior is to allow code to request it be squelched.
273 :code:`--logging_squelch_repeats` only affects code with this decorator
274 on it; it ignores all other code.
277 squelch_after_n_repeats: the number of repeated messages allowed to
278 log before subsequent messages are silently dropped.
281 def squelch_logging_wrapper(f: Callable):
282 from pyutils import function_utils
284 identifier = function_utils.function_identifier(f)
286 # Get rid of module paths, e.g. pyutils.ansi:bg -> ansi:bg which
287 # is what we're going to need below.
288 identifier = re.sub(r"[^\.]+\.", "", identifier)
289 squelched_logging_counts[identifier] = squelch_after_n_repeats
292 return squelch_logging_wrapper
295 class SquelchRepeatedMessagesFilter(logging.Filter):
296 """A filter that only logs messages from a given site with the same
297 (exact) message at the same logging level N times and ignores
298 subsequent attempts to log.
300 This filter only affects logging messages that repeat more than a
301 threshold number of times from functions that are tagged with the
302 :code:`@logging_utils.squelched_logging_ok` decorator (see above);
303 all others are ignored.
305 This functionality is enabled by default but can be disabled via
306 the :code:`--no_logging_squelch_repeats` commandline flag.
309 def __init__(self) -> None:
311 self.counters: collections.Counter = collections.Counter()
314 def filter(self, record: logging.LogRecord) -> bool:
315 """Should we drop this log message?"""
316 id1 = f"{record.module}:{record.funcName}"
317 if id1 not in squelched_logging_counts:
319 threshold = squelched_logging_counts[id1]
320 logsite = f"{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}"
321 count = self.counters[logsite]
322 self.counters[logsite] += 1
323 return count < threshold
326 # A map from function_identifier -> probability of logging (0.0%..100.0%)
327 probabilistic_logging_levels: Dict[str, float] = {}
330 def logging_is_probabilistic(probability_of_logging: float) -> Callable:
331 """A decorator that indicates that all logging statements within the
332 scope of a particular (marked via decorator) function are not
333 deterministic (i.e. they do not always unconditionally log) but rather
334 are probabilistic (i.e. they log N% of the time, randomly) when the
335 user passes the :code:`--logging_probabilistically` commandline flag
336 (which is enabled by default).
340 This affects *ALL* logging statements within the marked function.
341 If you want it to only affect a subset of logging statements,
342 log those statements in a separate function that you invoke
343 from within the "too large" scope and mark that separate function
344 with the :code:`logging_is_probabilistic` decorator instead.
346 That this functionality can be disabled (forcing all logged
347 messages to produce output) via the
348 :code:`--no_logging_probabilistically` cmdline argument.
351 def probabilistic_logging_wrapper(f: Callable):
352 from pyutils import function_utils
354 identifier = function_utils.function_identifier(f)
355 probabilistic_logging_levels[identifier] = probability_of_logging
358 return probabilistic_logging_wrapper
361 class ProbabilisticFilter(logging.Filter):
363 A filter that logs messages probabilistically (i.e. randomly at some
364 percent chance). This filter is used with a decorator (see
365 :meth:`logging_is_probabilistic`) to implement the
366 :code:`--logging_probabilistically` commandline flag.
368 This filter only affects logging messages from functions that have
369 been tagged with the `@logging_utils.probabilistic_logging` decorator.
373 def filter(self, record: logging.LogRecord) -> bool:
374 """Should the message be logged?"""
375 identifier = f"{record.module}:{record.funcName}"
376 threshold = probabilistic_logging_levels.get(identifier, 100.0)
377 return misc_utils.execute_probabilistically(threshold)
380 class OnlyInfoFilter(logging.Filter):
381 """A filter that only logs messages produced at the INFO logging
382 level. This is used by the :code:`--logging_info_is_print`
383 commandline option to select a subset of the logging stream to
384 send to a stdout handler.
388 def filter(self, record: logging.LogRecord):
389 return record.levelno == logging.INFO
392 class PrefixAddingFilter(logging.Filter):
393 """A filter that adds a string prefix to the log record for the
394 formatter to later fill in. Requires a %(prefix)s in the format
398 def __init__(self, prefix: str, klobber: bool = False):
401 prefix: the prefix string to add
402 klobber: should we overwrite other prefixes?
409 self.klobber = klobber
412 def filter(self, record: logging.LogRecord):
414 record.prefix = self.prefix
415 elif "prefix" not in record.__dict__:
416 record.prefix = self.prefix
420 class SuffixAddingFilter(logging.Filter):
421 """A filter that adds a string suffix to the log record for the
422 formatter to later fill in. Requires a %(suffix)s in the format
426 def __init__(self, suffix: str, klobber: bool = False):
429 suffix: the suffix string to add
430 klobber: should we overwrite other suffixes?
437 self.klobber = klobber
440 def filter(self, record: logging.LogRecord):
442 record.suffix = self.suffix
443 elif "suffix" not in record.__dict__:
444 record.suffix = self.suffix
448 class PrependingLogAdapter(logging.LoggerAdapter):
449 """:class:`LoggingContext` adds prefixes and suffixes using a
450 logging.Filter that must insert "prefix" or "suffix" members into
451 each log record by using :class:`PrefixAddingFilter` and
452 :class:`SuffixAddingFilter`. This relies on the logging format
453 string containing a %(prefix)s and a %(suffix)s to work correctly.
455 This is an alternate approach that basically just wraps the logger
456 in a class that has the same interface and thunks most calls down
457 to the wrapped logger. It might be useful if you don't want to use
458 :class:`LoggingContext` or its friends.
460 >>> logger = logging.getLogger(__name__ + ".PrependingLogAdapter")
461 >>> logger.setLevel(logging.INFO)
462 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
464 At this point logger doesn't have any format string and so it is
465 missing %(prefix)s and %(suffix)s. It also doesn't have a
466 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
467 So using it in a :class:`LoggingContext` will not work.
469 But we can still add a prefix or suffix by just wrapping it:
471 >>> logger.info("TEST")
474 >>> log = PrependingLogAdapter.wrap_logger('prefix> ', logger)
479 def process(self, msg, kwargs):
480 return f'{self.extra.get("prefix", "")}{msg}', kwargs
483 def wrap_logger(prefix: str, logger: logging.Logger) -> logging.LoggerAdapter:
484 """Helper method around the creation of a LogAdapter that prepends
485 a given string to every log message produced.
488 prefix: the message to prepend to every log message.
489 logger: the logger whose messages to modify.
492 A new logger wrapping the old one with the given behavior.
493 The old logger will continue to behave as usual; simply drop
494 the reference to this wrapper when it's no longer needed.
497 return PrependingLogAdapter(logger, {"prefix": prefix})
500 class AppendingLogAdapter(logging.LoggerAdapter):
501 """:class:`LoggingContext` adds prefixes and suffixes using a
502 logging.Filter that must insert "prefix" or "suffix" members into
503 each log record by using :class:`PrefixAddingFilter` and
504 :class:`SuffixAddingFilter`. This relies on the logging format
505 string containing a %(prefix)s and a %(suffix)s to work correctly.
507 This is an alternate approach that basically just wraps the logger
508 in a class that has the same interface and thunks most calls down
509 to the wrapped logger. It might be useful if you don't want to use
510 :class:`LoggingContext` or its friends.
512 >>> logger = logging.getLogger(__name__ + ".AppendingLogAdapter")
513 >>> logger.setLevel(logging.INFO)
514 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
516 At this point logger doesn't have any format string and so it is
517 missing %(prefix)s and %(suffix)s. It also doesn't have a
518 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
519 So using it in a :class:`LoggingContext` will not work.
521 But we can still add a prefix or suffix by just wrapping it:
523 >>> logger.info("TEST")
526 >>> log = AppendingLogAdapter.wrap_logger('!!!', logger)
531 def process(self, msg, kwargs):
532 return f'{msg}{self.extra.get("suffix", "")}', kwargs
535 def wrap_logger(suffix: str, logger: logging.Logger) -> logging.LoggerAdapter:
536 """Helper method around the creation of a LoggerAdapter that appends
537 a given string to every log message produced.
540 suffix: the message to prepend to every log message.
541 logger: the logger whose messages to modify.
544 A new logger wrapping the old one with the given behavior.
545 The old logger will continue to behave as usual; simply drop
546 the reference to this wrapper when it's no longer needed.
549 return AppendingLogAdapter(logger, {"suffix": suffix})
552 class LoggingContext(contextlib.ContextDecorator):
555 logger: logging.Logger,
557 level: Optional[int] = None,
558 handlers: Optional[List[logging.Handler]] = None,
559 prefix: Optional[str] = None,
560 suffix: Optional[str] = None,
562 """This is a logging context that can be used to temporarily change the
563 way we are logging within its scope. Logging changes may include:
565 - Changing the logging level (e.g. from INFO to DEBUG)
566 - Adding a prefix or suffix to every log message produced
567 - Adding temporary Handlers to direct the logging output elsewhere
569 Setup for doctest / examples. This will normally be taken care of
570 by code in :meth:`initialize_logging` so you don't have to worry
573 >>> logging_format = "%(prefix)s%(message)s%(suffix)s"
574 >>> logger = logging.getLogger(__name__ + ".LoggingContext")
575 >>> logger.setLevel(logging.INFO)
576 >>> handler = logging.StreamHandler(sys.stdout)
577 >>> handler.setFormatter(
578 ... MillisecondAwareFormatter(
579 ... fmt=logging_format,
583 >>> logger.addHandler(handler)
584 >>> logger.addFilter(PrefixAddingFilter(None))
585 >>> logger.addFilter(SuffixAddingFilter(None))
587 First, this logger should be currently be configured to send
588 INFO+ messages to sys.stdout. Let's see it in action:
590 >>> logger.info("Hello world!")
592 >>> logger.debug("You should not see this")
594 The first example is to simply change the level of the logger.
595 Here we temporarily change it to DEBUG within the body of the
596 :class:`LoggingContext`:
598 >>> with LoggingContext(logger, level=logging.DEBUG):
599 ... logger.debug("You should see this now")
600 ... logger.info("Of course you should still see this too")
601 You should see this now
602 Of course you should still see this too
604 >>> logger.debug("Outside of the context we are at INFO again")
605 >>> logger.debug("(which is why you don't see these)")
606 >>> logger.info("But you should see this at INFO level")
607 But you should see this at INFO level
609 The prefix and suffix argument prepend or append a message to
610 all log output. To do this, you need %(prefix)s and
611 %(suffix)s placeholders in your logger format string
612 indicating where to insert the data. This is useful, for
613 example, to add an active request identifier to the set of log
614 messages produced while processing it.
616 >>> logger.info("About to work on a new request")
617 About to work on a new request
619 >>> with LoggingContext(logger, prefix='10.0.0.13> '):
620 ... logger.info("Working on it now")
621 10.0.0.13> Working on it now
623 >>> logger.info("Done with that request")
624 Done with that request
626 LoggingContext can also be used to add temporary handler(s).
627 This code temporarily uses two stdout handlers to double the
628 output for testing purporses but you could also temporarily,
629 e.g., add a RotatingFileHandler or SysLogHandler etc...
631 >>> with LoggingContext(logger, handlers=[logging.StreamHandler(sys.stdout)]):
632 ... logger.info("TEST")
636 Once leaving the context, logger's behavior is restored. In
637 this case, the extra handler is removed so output will not
640 >>> logger.info("OUTSIDE")
643 LoggingContext can also be used as a decorator if that is more
646 >>> @LoggingContext(logger, level=logging.DEBUG)
647 ... def log_stuff(logger):
648 ... logger.debug("But inside, the decorator has changed us to DEBUG")
650 >>> logger.debug("Outside, we're at INFO level and you don't see this")
651 >>> log_stuff(logger)
652 But inside, the decorator has changed us to DEBUG
653 >>> logger.debug("And, of course, out here we're still at INFO afterwards")
658 self.old_level: Optional[int] = None
659 self.handlers = handlers
661 self.prefix: Optional[logging.Filter] = PrefixAddingFilter(prefix, True)
665 self.suffix: Optional[logging.Filter] = SuffixAddingFilter(suffix, True)
669 def __enter__(self) -> Union[logging.Logger, logging.LoggerAdapter]:
672 self.old_level = self.logger.level
673 self.logger.setLevel(self.level)
676 for handler in self.handlers:
677 self.logger.addHandler(handler)
680 self.logger.addFilter(self.prefix)
683 self.logger.addFilter(self.suffix)
686 def __exit__(self, et, ev, tb) -> None:
688 self.logger.removeFilter(self.suffix)
691 self.logger.removeFilter(self.prefix)
694 for handler in self.handlers:
695 self.logger.removeHandler(handler)
699 self.logger.setLevel(self.old_level)
700 return None # propagate exceptions out
703 class MillisecondAwareFormatter(logging.Formatter):
704 """A formatter for adding milliseconds to log messages which, for
705 whatever reason, the default Python logger doesn't do.
709 You probably don't need to use this directly but it is
710 wired in under :meth:`initialize_logging` so that the
711 timestamps in log messages have millisecond level
715 converter = datetime.datetime.fromtimestamp # type: ignore
718 def formatTime(self, record, datefmt=None):
719 ct = MillisecondAwareFormatter.converter(
720 record.created, pytz.timezone("US/Pacific")
723 s = ct.strftime(datefmt)
725 t = ct.strftime("%Y-%m-%d %H:%M:%S")
726 s = f"{t},{record.msecs:%03d}"
730 def _log_about_logging(
732 default_logging_level,
733 preexisting_handlers_count,
737 """This is invoked automatically after logging is initialized such
738 that the first messages in the log are about how logging itself
741 level_name = logging._levelToName.get(
742 default_logging_level, str(default_logging_level)
744 logger.debug("Initialized global logging; logging level is %s.", level_name)
746 config.config["logging_clear_preexisting_handlers"]
747 and preexisting_handlers_count > 0
750 "Logging cleared %d global handlers (--logging_clear_preexisting_handlers)",
751 preexisting_handlers_count,
753 logger.debug('Logging format specification is "%s"', fmt)
754 if config.config["logging_debug_threads"]:
756 "...Logging format spec captures tid/pid. (--logging_debug_threads)"
758 if config.config["logging_debug_modules"]:
760 "...Logging format spec captures files/functions/lineno. (--logging_debug_modules)"
762 if config.config["logging_syslog"]:
764 "Logging to syslog as %s with priority mapping based on level. (--logging_syslog)",
767 if config.config["logging_filename"]:
769 'Logging to file "%s". (--logging_filename)',
770 config.config["logging_filename"],
773 "...with %d bytes max file size. (--logging_filename_maxsize)",
774 config.config["logging_filename_maxsize"],
777 "...and %d rotating backup file count. (--logging_filename_count)",
778 config.config["logging_filename_count"],
780 if config.config["logging_console"]:
781 logger.debug("Logging to the console (stderr). (--logging_console)")
782 if config.config["logging_info_is_print"]:
784 "Logging logger.info messages will be repeated on stdout. (--logging_info_is_print)"
786 if config.config["logging_squelch_repeats"]:
788 "Logging code allowed to request repeated messages be squelched. (--logging_squelch_repeats)"
792 "Logging code forbidden to request messages be squelched; all messages logged. (--no_logging_squelch_repeats)"
794 if config.config["logging_probabilistically"]:
796 "Logging code is allowed to request probabilistic logging. (--logging_probabilistically)"
800 "Logging code is forbidden to request probabilistic logging; messages always logged. (--no_logging_probabilistically)"
802 if config.config["logging_captures_prints"]:
804 "Logging will capture printed data as logger.info messages. (--logging_captures_prints)"
808 def _construct_logging_format() -> str:
809 """Figure out the root logging format string based on commandling args."""
811 # Custom or default --logging_format?
812 if config.config["logging_format"]:
813 fmt = config.config["logging_format"]
814 elif config.config["logging_syslog"]:
815 fmt = "%(levelname).1s:%(filename)s[%(process)d]: "
817 fmt = "%(levelname).1s:%(asctime)s: "
819 fmt += "%(prefix)s%(message)s%(suffix)s"
821 # --logging_debug_threads and --logging_debug_modules both affect
822 # the format by prepending information about the pid/tid or
824 if config.config["logging_debug_threads"]:
825 fmt = f"%(process)d.%(thread)d|{fmt}"
826 if config.config["logging_debug_modules"]:
827 fmt = f"%(filename)s:%(funcName)s:%(lineno)s|{fmt}"
831 def _construct_handlers_list(
832 logging_format: str, facility_name: Optional[str]
833 ) -> List[logging.Handler]:
834 """Create the list of handlers to be added to the root logger
835 based on commandline flags.
838 retval: List[logging.Handler] = []
840 # --logging_syslog (optionally with --logging_syslog_facility)
841 # sets up for logging to use the standard system syslogd as a
843 if config.config["logging_syslog"]:
844 if sys.platform not in ("win32", "cygwin"):
846 facility = SysLogHandler.__dict__.get(facility_name, SysLogHandler.LOG_USER)
848 syslog_handler = SysLogHandler(facility=facility, address="/dev/log")
849 syslog_handler.setFormatter(
850 MillisecondAwareFormatter(
852 datefmt=config.config["logging_date_format"],
855 retval.append(syslog_handler)
857 # --logging_filename (with friends --logging_filename_count and
858 # --logging_filename_maxsize) set up logging to a file on the
859 # filesystem with automatic rotation when it gets too big.
860 if config.config["logging_filename"]:
861 max_bytes = config.config["logging_filename_maxsize"]
862 assert max_bytes and isinstance(max_bytes, int)
863 backup_count = config.config["logging_filename_count"]
864 assert backup_count and isinstance(backup_count, int)
865 file_handler = RotatingFileHandler(
866 config.config["logging_filename"],
868 backupCount=backup_count,
870 file_handler.setFormatter(
871 MillisecondAwareFormatter(
873 datefmt=config.config["logging_date_format"],
876 retval.append(file_handler)
878 # --logging_console is, ahem, logging to the console.
879 if config.config["logging_console"]:
880 console_handler = logging.StreamHandler(sys.stderr)
881 console_handler.setFormatter(
882 MillisecondAwareFormatter(
884 datefmt=config.config["logging_date_format"],
887 retval.append(console_handler)
889 # --logging_info_is_print echoes any message to logger.info(x) as
890 # a print statement on stdout.
891 if config.config["logging_info_is_print"]:
892 print_handler = logging.StreamHandler(sys.stdout)
893 print_handler.addFilter(OnlyInfoFilter())
894 retval.append(print_handler)
897 retval.append(logging.NullHandler())
901 def _add_logging_handlers_and_filters(
902 logger: logging.Logger, handlers: List[logging.Handler]
904 """Adds a list of handlers to the root logger."""
906 for handler in handlers:
907 # These two are always present and set %(prefix) and %(suffix)
908 # if appropriate (see :class:`LoggingContext`) or make them
910 handler.addFilter(PrefixAddingFilter("", False))
911 handler.addFilter(SuffixAddingFilter("", False))
913 # --logging_squelch_repeats allows code to request repeat logging
914 # messages (identical log site and message contents) to be
915 # silenced. Logging code must request this explicitly, it isn't
916 # automatic. This option just allows the silencing to happen.
917 if config.config["logging_squelch_repeats"]:
918 handler.addFilter(SquelchRepeatedMessagesFilter())
920 # --logging_probabilistically allows code to request
921 # non-deterministic logging where messages have some probability
922 # of being produced. Logging code must request this explicitly.
923 # This option just allows the non-deterministic behavior to
924 # happen. Disabling it will cause every log message to be
926 if config.config["logging_probabilistically"]:
927 handler.addFilter(ProbabilisticFilter())
928 logger.addHandler(handler)
931 def initialize_logging(logger=None) -> logging.Logger:
932 """Initialize logging for the program. See module level comments
933 for information about what functionality this provides and how to
934 enable or disable functionality via the commandline.
937 :meth:`bootstrap.initialize` decorator on your program's entry point,
938 it will call this for you. See :meth:`pyutils.bootstrap.initialize`
942 global LOGGING_INITIALIZED
943 if LOGGING_INITIALIZED:
944 return logging.getLogger()
945 LOGGING_INITIALIZED = True
947 clear_preexisting = config.config["logging_clear_preexisting_handlers"]
948 preexisting_handlers_count = 0
950 logging.basicConfig(force=clear_preexisting)
951 logger = logging.getLogger()
953 # --logging_clear_preexisting_handlers removes logging handlers
954 # that were registered by global statements during imported module
956 if clear_preexisting:
957 while logger.hasHandlers():
958 logger.removeHandler(logger.handlers[0])
959 preexisting_handlers_count += 1
961 # --logging_config_file pulls logging settings from a config file
962 # skipping the rest of this setup.
963 if config.config["logging_config_file"] is not None:
964 fileConfig(config.config["logging_config_file"])
967 # Global default logging level (--logging_level); messages below
968 # this level will be silenced.
969 logging_level = config.config["logging_level"]
971 logging_level = logging_level.upper()
972 default_logging_level = getattr(logging, logging_level, None)
973 if not isinstance(default_logging_level, int):
974 raise ValueError(f'Invalid level: {config.config["logging_level"]}')
975 logger.setLevel(default_logging_level)
977 logging_format = _construct_logging_format()
979 if config.config["logging_syslog"]:
980 if sys.platform not in ("win32", "cygwin"):
981 if config.config["logging_syslog_facility"]:
982 facility_name = "LOG_" + config.config["logging_syslog_facility"]
983 handlers: List[logging.Handler] = _construct_handlers_list(
984 logging_format, facility_name
986 _add_logging_handlers_and_filters(logger, handlers)
988 # --logging_captures_prints, if set, will capture and log.info
989 # anything printed on stdout.
990 if config.config["logging_captures_prints"]:
993 def print_and_also_log(*arg, **kwarg):
994 f = kwarg.get("file", None)
999 BUILT_IN_PRINT(*arg, **kwarg)
1001 builtins.print = print_and_also_log
1003 # At this point the logger is ready, handlers are set up,
1004 # etc... so log about the logging configuration.
1007 default_logging_level,
1008 preexisting_handlers_count,
1015 def get_logger(name: str = ""):
1016 """Get the global logger"""
1017 logger = logging.getLogger(name)
1018 return initialize_logging(logger)
1021 class OutputMultiplexer(object):
1022 """A class that broadcasts printed messages to several sinks
1023 (including various logging levels, different files, different file
1024 handles, the house log, etc...). See also
1025 :class:`OutputMultiplexerContext` for an easy usage pattern.
1028 class Destination(enum.IntEnum):
1029 """Bits in the destination_bitv bitvector. Used to indicate the
1030 output destination."""
1033 LOG_DEBUG = 0x01 # ⎫
1035 LOG_WARNING = 0x04 # ⎬ Must provide logger to the c'tor.
1036 LOG_ERROR = 0x08 # ⎪
1037 LOG_CRITICAL = 0x10 # ⎭
1038 FILENAMES = 0x20 # Must provide a filename to the c'tor.
1039 FILEHANDLES = 0x40 # Must provide a handle to the c'tor.
1041 ALL_LOG_DESTINATIONS = (
1042 LOG_DEBUG | LOG_INFO | LOG_WARNING | LOG_ERROR | LOG_CRITICAL
1044 ALL_OUTPUT_DESTINATIONS = 0x8F
1049 destination_bitv: int,
1052 filenames: Optional[Iterable[str]] = None,
1053 handles: Optional[Iterable[io.TextIOWrapper]] = None,
1056 Constructs the OutputMultiplexer instance.
1059 destination_bitv: a bitvector where each bit represents an
1060 output destination. Multiple bits may be set.
1061 logger: if LOG_* bits are set, you must pass a logger here.
1062 filenames: if FILENAMES bit is set, this should be a list of
1063 files you'd like to output into. This code handles opening
1064 and closing said files.
1065 handles: if FILEHANDLES bit is set, this should be a list of
1066 already opened filehandles you'd like to output into. The
1067 handles will remain open after the scope of the multiplexer.
1070 logger = logging.getLogger(None)
1071 self.logger = logger
1073 self.f: Optional[List[Any]] = None
1074 if filenames is not None:
1075 self.f = [open(filename, "wb", buffering=0) for filename in filenames]
1077 if destination_bitv & OutputMultiplexer.Destination.FILENAMES:
1078 raise ValueError("Filenames argument is required if bitv & FILENAMES")
1081 self.h: Optional[List[Any]] = None
1082 if handles is not None:
1083 self.h = list(handles)
1085 if destination_bitv & OutputMultiplexer.Destination.FILEHANDLES:
1086 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1089 self.set_destination_bitv(destination_bitv)
1091 def get_destination_bitv(self):
1092 """Where are we outputting?"""
1093 return self.destination_bitv
1095 def set_destination_bitv(self, destination_bitv: int):
1096 """Change the output destination_bitv to the one provided."""
1097 if destination_bitv & self.Destination.FILENAMES and self.f is None:
1098 raise ValueError("Filename argument is required if bitv & FILENAMES")
1099 if destination_bitv & self.Destination.FILEHANDLES and self.h is None:
1100 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1101 self.destination_bitv = destination_bitv
1103 def print(self, *args, **kwargs):
1104 """Produce some output to all sinks."""
1105 from pyutils.string_utils import _sprintf, strip_escape_sequences
1107 end = kwargs.pop("end", None)
1109 if not isinstance(end, str):
1110 raise TypeError("end must be None or a string")
1111 sep = kwargs.pop("sep", None)
1113 if not isinstance(sep, str):
1114 raise TypeError("sep must be None or a string")
1116 raise TypeError("invalid keyword arguments to print()")
1117 buf = _sprintf(*args, end="", sep=sep)
1124 if self.destination_bitv & self.Destination.FILENAMES and self.f is not None:
1126 _.write(buf.encode("utf-8"))
1129 if self.destination_bitv & self.Destination.FILEHANDLES and self.h is not None:
1134 buf = strip_escape_sequences(buf)
1135 if self.logger is not None:
1136 if self.destination_bitv & self.Destination.LOG_DEBUG:
1137 self.logger.debug(buf)
1138 if self.destination_bitv & self.Destination.LOG_INFO:
1139 self.logger.info(buf)
1140 if self.destination_bitv & self.Destination.LOG_WARNING:
1141 self.logger.warning(buf)
1142 if self.destination_bitv & self.Destination.LOG_ERROR:
1143 self.logger.error(buf)
1144 if self.destination_bitv & self.Destination.LOG_CRITICAL:
1145 self.logger.critical(buf)
1146 if self.destination_bitv & self.Destination.HLOG:
1150 """Close all open files."""
1151 if self.f is not None:
1156 class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator):
1158 A context that uses an :class:`OutputMultiplexer`. e.g.::
1160 with OutputMultiplexerContext(
1161 OutputMultiplexer.LOG_INFO |
1162 OutputMultiplexer.LOG_DEBUG |
1163 OutputMultiplexer.FILENAMES |
1164 OutputMultiplexer.FILEHANDLES,
1165 filenames = [ '/tmp/foo.log', '/var/log/bar.log' ],
1168 mplex.print("This is a log message!")
1173 destination_bitv: OutputMultiplexer.Destination,
1181 destination_bitv: a bitvector that indicates where we should
1182 send output. See :class:`OutputMultiplexer` for options.
1183 logger: optional logger to use for log destination messages.
1184 filenames: optional filenames to write for filename destination
1186 handles: optional open filehandles to write for filehandle
1187 destination messages.
1192 filenames=filenames,
1196 def __enter__(self):
1199 def __exit__(self, etype, value, traceback) -> bool:
1201 if etype is not None:
1206 def _timestamp() -> str:
1207 ts = datetime.datetime.now(pytz.timezone('US/Pacific'))
1208 return ts.strftime("%Y/%m/%dT%H:%M:%S.%f%z")
1211 def non_zero_return_value(ret: Any):
1213 Special method hooked from bootstrap.py to optionally keep a system-wide
1214 record of non-zero python program exits.
1217 ret: the return value
1220 record = config.config['logging_non_zero_exits_record_path']
1222 program = config.PROGRAM_NAME
1223 args = config.ORIG_ARGV
1224 with open(record, 'a') as af:
1226 f'{_timestamp()}: {program} ({args}) exited with non-zero value {ret}.',
1233 def unhandled_top_level_exception(exc_type: type):
1235 Special method hooked from bootstrap.py to optionally keep a system-wide
1236 record of unhandled top level exceptions.
1239 exc_type: the type of the unhandled exception
1242 record = config.config['logging_unhandled_top_level_exceptions_record_path']
1244 program = config.PROGRAM_NAME
1245 args = config.ORIG_ARGV
1246 with open(record, 'a') as af:
1248 f'{_timestamp()}: {program} ({args}) took unhandled top level exception {exc_type}',
1255 def hlog(message: str) -> None:
1256 """Write a message to the house log (syslog facility local7 priority
1257 info) by calling `/usr/bin/logger`. This is pretty hacky but used
1258 by a bunch of (my) code. Another way to do this would be to use
1259 :code:`--logging_syslog` and :code:`--logging_syslog_facility` but
1260 I can't actually say that's easier.
1262 TODO: this needs to move.
1264 message = message.replace("'", "'\"'\"'")
1265 os.system(f"/usr/bin/logger -p local7.info -- '{message}'")
1268 if __name__ == "__main__":