2 # -*- coding: utf-8 -*-
4 # © Copyright 2021-2023, Scott Gasch
7 This is a module that offers an opinionated take on how whole program
8 logging should be initialized and controlled. It uses the standard
9 Python :mod:`logging` but gives you control, via commandline config,
12 * Set the logging default level (debug, info, warning, error, critical)
13 of the whole program (see: :code:`--logging_level`)... and to override
14 the logging level with :class:`LoggingContext`.
15 * Prepend or append a message to every log record also with
16 :class:`LoggingContext`.
17 * define the logging message format (see :code:`--logging_format` and
18 :code:`--logging_date_format`) including easily adding a PID/TID
19 marker on all messages to help with multithreaded debugging
20 (:code:`--logging_debug_threads`) and force module names of code
21 that emits log messages to be included in the format
22 (:code:`--logging_debug_modules`),
23 * control the destination of logged messages:
25 - log to the console/stderr (:code:`--logging_console`) and/or
26 - log to a rotated file (:code:`--logging_filename`,
27 :code:`--logging_filename_maxsize` and :code:`--logging_filename_count`)
29 - log to the UNIX syslog (:code:`--logging_syslog` and
30 :code:`--logging_syslog_facility`)
32 * optionally squelch repeated messages (:code:`--logging_squelch_repeats`),
33 * optionally log probalistically (:code:`--logging_probabilistically`),
34 * capture printed messages into the info log (:code:`--logging_captures_prints`),
35 * optionally clear unwanted logging handlers added by other imports
36 before this one (:code:`--logging_clear_preexisting_handlers`).
37 * optionally append to system-wide records of non-zero exits
38 (:code:`--logging_non_zero_exits_record_path`) and unhandled exceptions
39 (:code:`--logging_unhandled_top_level_exceptions_record_path`) in
40 cooperation with :mod:`pyutils.bootstrap`.
41 * There are also :class:`LoggerAdapter` classes to implement prefix/suffix
42 functionality without using :class:`LoggingContext` by wrapping the
45 To use this functionality, call :meth:`initialize_logging` early
46 in your program entry point. If you use the
47 :meth:`pyutils.bootstrap.initialize` decorator on your program's entry
48 point, it will call this for you automatically.
60 from logging import FileHandler
61 from logging.config import fileConfig
62 from logging.handlers import RotatingFileHandler, SysLogHandler
63 from typing import Any, Callable, Dict, Iterable, List, Optional, Union
66 from overrides import overrides
68 # This module is commonly used by others in here and should avoid
69 # taking any unnecessary dependencies back on them.
70 from pyutils import argparse_utils, config, misc_utils
72 cfg = config.add_commandline_args(f"Logging ({__file__})", "Args related to logging")
74 "--logging_config_file",
75 type=argparse_utils.valid_filename,
78 help="Config file containing the logging setup, see: https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial",
84 choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
86 help="The global default level below which to squelch log messages",
92 help="The format for lines logged via the logger module. See: https://docs.python.org/3/library/logging.html#formatter-objects",
95 "--logging_date_format",
97 default="%Y/%m/%dT%H:%M:%S.%f%z",
99 help="The format of any dates in --logging_format.",
103 action=argparse_utils.ActionNoYes,
105 help="Should we log to the console (stderr)",
108 "--logging_filename",
112 help="The filename of the logfile to write.",
115 "--logging_filename_maxsize",
116 type=argparse_utils.valid_byte_count,
117 default=(1024 * 1024),
119 help="The maximum size (in bytes) to write to the logging_filename.",
122 "--logging_filename_count",
126 help="The number of logging_filename copies to keep before deleting.",
130 action=argparse_utils.ActionNoYes,
132 help="Should we log to localhost's syslog.",
135 "--logging_syslog_facility",
161 metavar="SYSLOG_FACILITY_LIST",
162 help="The default syslog message facility identifier",
165 "--logging_debug_threads",
166 action=argparse_utils.ActionNoYes,
168 help="Should we prepend pid/tid data to all log messages?",
171 "--logging_debug_modules",
172 action=argparse_utils.ActionNoYes,
174 help="Should we prepend module/function data to all log messages?",
177 "--logging_info_is_print",
178 action=argparse_utils.ActionNoYes,
180 help="logging.info also prints to stdout.",
183 "--logging_squelch_repeats",
184 action=argparse_utils.ActionNoYes,
186 help="Do we allow code to indicate that it wants to squelch repeated logging messages or should we always log?",
189 "--logging_probabilistically",
190 action=argparse_utils.ActionNoYes,
192 help="Do we allow probabilistic logging (for code that wants it) or should we always log?",
194 # See also: OutputMultiplexer
196 "--logging_captures_prints",
197 action=argparse_utils.ActionNoYes,
199 help="When calling print, also log.info automatically.",
202 "--logging_clear_preexisting_handlers",
203 action=argparse_utils.ActionNoYes,
206 "Should logging code clear preexisting global logging handlers and thus insist that is "
207 + "alone can add handlers. Use this to work around annoying modules that insert global "
208 + "handlers with formats and logging levels you might now want. Caveat emptor, this may "
209 + "cause you to miss logging messages."
213 "--logging_non_zero_exits_record_path",
215 default="/var/log/abnormal_python_exits.log",
217 help="The filename in which to record non-zero exits.",
220 "--logging_unhandled_top_level_exceptions_record_path",
222 default="/var/log/abnormal_python_exits.log",
224 help="The filename in which to record unhandled top level exceptions.",
227 BUILT_IN_PRINT = print
228 LOGGING_INITIALIZED = False
231 # A map from logging_callsite_id -> count of logged messages.
232 squelched_logging_counts: Dict[str, int] = {}
235 def squelch_repeated_log_messages(squelch_after_n_repeats: int) -> Callable:
237 A decorator that marks a function as interested in having the logging
238 messages that it produces be squelched (ignored) after it logs the
239 same message more than N times.
243 This decorator affects *ALL* logging messages produced
244 within the decorated function. That said, messages must be
245 identical in order to be squelched. For example, if the same line
246 of code produces different messages (because of, e.g., a format
247 string), the messages are considered to be different.
249 An example of this from the pyutils code itself can be found in
250 :meth:`pyutils.ansi.fg` and :meth:`pyutils.ansi.bg` methods::
252 @logging_utils.squelch_repeated_log_messages(1)
254 name: Optional[str] = "",
255 red: Optional[int] = None,
256 green: Optional[int] = None,
257 blue: Optional[int] = None,
259 force_16color: bool = False,
260 force_216color: bool = False,
264 These methods log stuff like "Using 24-bit color strategy" which
265 gets old really fast and fills up the logs. By decorating the methods
266 with :code:`@logging_utils.squelch_repeated_log_messages(1)` the code
267 is requesting that its logged messages be dropped silently after the
268 first one is produced (note the argument 1).
270 Users can insist that all logged messages always be reflected in the
271 logs using the :code:`--no_logging_squelch_repeats` flag but the default
272 behavior is to allow code to request it be squelched.
274 :code:`--logging_squelch_repeats` only affects code with this decorator
275 on it; it ignores all other code.
278 squelch_after_n_repeats: the number of repeated messages allowed to
279 log before subsequent messages are silently dropped.
282 def squelch_logging_wrapper(f: Callable):
283 from pyutils import function_utils
285 identifier = function_utils.function_identifier(f)
287 # Get rid of module paths, e.g. pyutils.ansi:bg -> ansi:bg which
288 # is what we're going to need below.
289 identifier = re.sub(r"[^\.]+\.", "", identifier)
290 squelched_logging_counts[identifier] = squelch_after_n_repeats
293 return squelch_logging_wrapper
296 class SquelchRepeatedMessagesFilter(logging.Filter):
297 """A filter that only logs messages from a given site with the same
298 (exact) message at the same logging level N times and ignores
299 subsequent attempts to log.
301 This filter only affects logging messages that repeat more than a
302 threshold number of times from functions that are tagged with the
303 :code:`@logging_utils.squelched_logging_ok` decorator (see above);
304 all others are ignored.
306 This functionality is enabled by default but can be disabled via
307 the :code:`--no_logging_squelch_repeats` commandline flag.
310 def __init__(self) -> None:
312 self.counters: collections.Counter = collections.Counter()
315 def filter(self, record: logging.LogRecord) -> bool:
316 """Should we drop this log message?"""
317 id1 = f"{record.module}:{record.funcName}"
318 if id1 not in squelched_logging_counts:
320 threshold = squelched_logging_counts[id1]
321 logsite = f"{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}"
322 count = self.counters[logsite]
323 self.counters[logsite] += 1
324 return count < threshold
327 # A map from function_identifier -> probability of logging (0.0%..100.0%)
328 probabilistic_logging_levels: Dict[str, float] = {}
331 def logging_is_probabilistic(probability_of_logging: float) -> Callable:
332 """A decorator that indicates that all logging statements within the
333 scope of a particular (marked via decorator) function are not
334 deterministic (i.e. they do not always unconditionally log) but rather
335 are probabilistic (i.e. they log N% of the time, randomly) when the
336 user passes the :code:`--logging_probabilistically` commandline flag
337 (which is enabled by default).
341 This affects *ALL* logging statements within the marked function.
342 If you want it to only affect a subset of logging statements,
343 log those statements in a separate function that you invoke
344 from within the "too large" scope and mark that separate function
345 with the :code:`logging_is_probabilistic` decorator instead.
347 That this functionality can be disabled (forcing all logged
348 messages to produce output) via the
349 :code:`--no_logging_probabilistically` cmdline argument.
352 def probabilistic_logging_wrapper(f: Callable):
353 from pyutils import function_utils
355 identifier = function_utils.function_identifier(f)
356 probabilistic_logging_levels[identifier] = probability_of_logging
359 return probabilistic_logging_wrapper
362 class ProbabilisticFilter(logging.Filter):
364 A filter that logs messages probabilistically (i.e. randomly at some
365 percent chance). This filter is used with a decorator (see
366 :meth:`logging_is_probabilistic`) to implement the
367 :code:`--logging_probabilistically` commandline flag.
369 This filter only affects logging messages from functions that have
370 been tagged with the `@logging_utils.probabilistic_logging` decorator.
374 def filter(self, record: logging.LogRecord) -> bool:
375 """Should the message be logged?"""
376 identifier = f"{record.module}:{record.funcName}"
377 threshold = probabilistic_logging_levels.get(identifier, 100.0)
378 return misc_utils.execute_probabilistically(threshold)
381 class OnlyInfoFilter(logging.Filter):
382 """A filter that only logs messages produced at the INFO logging
383 level. This is used by the :code:`--logging_info_is_print`
384 commandline option to select a subset of the logging stream to
385 send to a stdout handler.
389 def filter(self, record: logging.LogRecord):
390 return record.levelno == logging.INFO
393 class PrefixAddingFilter(logging.Filter):
394 """A filter that adds a string prefix to the log record for the
395 formatter to later fill in. Requires a %(prefix)s in the format
399 def __init__(self, prefix: str, klobber: bool = False):
402 prefix: the prefix string to add
403 klobber: should we overwrite other prefixes?
410 self.klobber = klobber
413 def filter(self, record: logging.LogRecord):
415 record.prefix = self.prefix
416 elif "prefix" not in record.__dict__:
417 record.prefix = self.prefix
421 class SuffixAddingFilter(logging.Filter):
422 """A filter that adds a string suffix to the log record for the
423 formatter to later fill in. Requires a %(suffix)s in the format
427 def __init__(self, suffix: str, klobber: bool = False):
430 suffix: the suffix string to add
431 klobber: should we overwrite other suffixes?
438 self.klobber = klobber
441 def filter(self, record: logging.LogRecord):
443 record.suffix = self.suffix
444 elif "suffix" not in record.__dict__:
445 record.suffix = self.suffix
449 class PrependingLogAdapter(logging.LoggerAdapter):
450 """:class:`LoggingContext` adds prefixes and suffixes using a
451 logging.Filter that must insert "prefix" or "suffix" members into
452 each log record by using :class:`PrefixAddingFilter` and
453 :class:`SuffixAddingFilter`. This relies on the logging format
454 string containing a %(prefix)s and a %(suffix)s to work correctly.
456 This is an alternate approach that basically just wraps the logger
457 in a class that has the same interface and thunks most calls down
458 to the wrapped logger. It might be useful if you don't want to use
459 :class:`LoggingContext` or its friends.
461 >>> logger = logging.getLogger(__name__ + ".PrependingLogAdapter")
462 >>> logger.setLevel(logging.INFO)
463 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
465 At this point logger doesn't have any format string and so it is
466 missing %(prefix)s and %(suffix)s. It also doesn't have a
467 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
468 So using it in a :class:`LoggingContext` will not work.
470 But we can still add a prefix or suffix by just wrapping it:
472 >>> logger.info("TEST")
475 >>> log = PrependingLogAdapter.wrap_logger('prefix> ', logger)
480 def process(self, msg, kwargs):
481 return f'{self.extra.get("prefix", "")}{msg}', kwargs
484 def wrap_logger(prefix: str, logger: logging.Logger) -> logging.LoggerAdapter:
485 """Helper method around the creation of a LogAdapter that prepends
486 a given string to every log message produced.
489 prefix: the message to prepend to every log message.
490 logger: the logger whose messages to modify.
493 A new logger wrapping the old one with the given behavior.
494 The old logger will continue to behave as usual; simply drop
495 the reference to this wrapper when it's no longer needed.
498 return PrependingLogAdapter(logger, {"prefix": prefix})
501 class AppendingLogAdapter(logging.LoggerAdapter):
502 """:class:`LoggingContext` adds prefixes and suffixes using a
503 logging.Filter that must insert "prefix" or "suffix" members into
504 each log record by using :class:`PrefixAddingFilter` and
505 :class:`SuffixAddingFilter`. This relies on the logging format
506 string containing a %(prefix)s and a %(suffix)s to work correctly.
508 This is an alternate approach that basically just wraps the logger
509 in a class that has the same interface and thunks most calls down
510 to the wrapped logger. It might be useful if you don't want to use
511 :class:`LoggingContext` or its friends.
513 >>> logger = logging.getLogger(__name__ + ".AppendingLogAdapter")
514 >>> logger.setLevel(logging.INFO)
515 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
517 At this point logger doesn't have any format string and so it is
518 missing %(prefix)s and %(suffix)s. It also doesn't have a
519 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
520 So using it in a :class:`LoggingContext` will not work.
522 But we can still add a prefix or suffix by just wrapping it:
524 >>> logger.info("TEST")
527 >>> log = AppendingLogAdapter.wrap_logger('!!!', logger)
532 def process(self, msg, kwargs):
533 return f'{msg}{self.extra.get("suffix", "")}', kwargs
536 def wrap_logger(suffix: str, logger: logging.Logger) -> logging.LoggerAdapter:
537 """Helper method around the creation of a LoggerAdapter that appends
538 a given string to every log message produced.
541 suffix: the message to prepend to every log message.
542 logger: the logger whose messages to modify.
545 A new logger wrapping the old one with the given behavior.
546 The old logger will continue to behave as usual; simply drop
547 the reference to this wrapper when it's no longer needed.
550 return AppendingLogAdapter(logger, {"suffix": suffix})
553 class LoggingContext(contextlib.ContextDecorator):
556 logger: logging.Logger,
558 level: Optional[int] = None,
559 handlers: Optional[List[logging.Handler]] = None,
560 prefix: Optional[str] = None,
561 suffix: Optional[str] = None,
563 """This is a logging context that can be used to temporarily change the
564 way we are logging within its scope. Logging changes may include:
566 - Changing the logging level (e.g. from INFO to DEBUG)
567 - Adding a prefix or suffix to every log message produced
568 - Adding temporary Handlers to direct the logging output elsewhere
570 Setup for doctest / examples. This will normally be taken care of
571 by code in :meth:`initialize_logging` so you don't have to worry
574 >>> logging_format = "%(prefix)s%(message)s%(suffix)s"
575 >>> logger = logging.getLogger(__name__ + ".LoggingContext")
576 >>> logger.setLevel(logging.INFO)
577 >>> handler = logging.StreamHandler(sys.stdout)
578 >>> handler.setFormatter(
579 ... MillisecondAwareFormatter(
580 ... fmt=logging_format,
584 >>> logger.addHandler(handler)
585 >>> logger.addFilter(PrefixAddingFilter(None))
586 >>> logger.addFilter(SuffixAddingFilter(None))
588 First, this logger should be currently be configured to send
589 INFO+ messages to sys.stdout. Let's see it in action:
591 >>> logger.info("Hello world!")
593 >>> logger.debug("You should not see this")
595 The first example is to simply change the level of the logger.
596 Here we temporarily change it to DEBUG within the body of the
597 :class:`LoggingContext`:
599 >>> with LoggingContext(logger, level=logging.DEBUG):
600 ... logger.debug("You should see this now")
601 ... logger.info("Of course you should still see this too")
602 You should see this now
603 Of course you should still see this too
605 >>> logger.debug("Outside of the context we are at INFO again")
606 >>> logger.debug("(which is why you don't see these)")
607 >>> logger.info("But you should see this at INFO level")
608 But you should see this at INFO level
610 The prefix and suffix argument prepend or append a message to
611 all log output. To do this, you need %(prefix)s and
612 %(suffix)s placeholders in your logger format string
613 indicating where to insert the data. This is useful, for
614 example, to add an active request identifier to the set of log
615 messages produced while processing it.
617 >>> logger.info("About to work on a new request")
618 About to work on a new request
620 >>> with LoggingContext(logger, prefix='10.0.0.13> '):
621 ... logger.info("Working on it now")
622 10.0.0.13> Working on it now
624 >>> logger.info("Done with that request")
625 Done with that request
627 LoggingContext can also be used to add temporary handler(s).
628 This code temporarily uses two stdout handlers to double the
629 output for testing purporses but you could also temporarily,
630 e.g., add a RotatingFileHandler or SysLogHandler etc...
632 >>> with LoggingContext(logger, handlers=[logging.StreamHandler(sys.stdout)]):
633 ... logger.info("TEST")
637 Once leaving the context, logger's behavior is restored. In
638 this case, the extra handler is removed so output will not
641 >>> logger.info("OUTSIDE")
644 LoggingContext can also be used as a decorator if that is more
647 >>> @LoggingContext(logger, level=logging.DEBUG)
648 ... def log_stuff(logger):
649 ... logger.debug("But inside, the decorator has changed us to DEBUG")
651 >>> logger.debug("Outside, we're at INFO level and you don't see this")
652 >>> log_stuff(logger)
653 But inside, the decorator has changed us to DEBUG
654 >>> logger.debug("And, of course, out here we're still at INFO afterwards")
659 self.old_level: Optional[int] = None
660 self.handlers = handlers
662 self.prefix: Optional[logging.Filter] = PrefixAddingFilter(prefix, True)
666 self.suffix: Optional[logging.Filter] = SuffixAddingFilter(suffix, True)
670 def __enter__(self) -> Union[logging.Logger, logging.LoggerAdapter]:
673 self.old_level = self.logger.level
674 self.logger.setLevel(self.level)
677 for handler in self.handlers:
678 self.logger.addHandler(handler)
681 self.logger.addFilter(self.prefix)
684 self.logger.addFilter(self.suffix)
687 def __exit__(self, et, ev, tb) -> None:
689 self.logger.removeFilter(self.suffix)
692 self.logger.removeFilter(self.prefix)
695 for handler in self.handlers:
696 self.logger.removeHandler(handler)
700 self.logger.setLevel(self.old_level)
701 return None # propagate exceptions out
704 class MillisecondAwareFormatter(logging.Formatter):
705 """A formatter for adding milliseconds to log messages which, for
706 whatever reason, the default Python logger doesn't do.
710 You probably don't need to use this directly but it is
711 wired in under :meth:`initialize_logging` so that the
712 timestamps in log messages have millisecond level
716 converter = datetime.datetime.fromtimestamp # type: ignore
719 def formatTime(self, record, datefmt=None):
720 ct = MillisecondAwareFormatter.converter(
721 record.created, pytz.timezone("US/Pacific")
724 s = ct.strftime(datefmt)
726 t = ct.strftime("%Y-%m-%d %H:%M:%S")
727 s = f"{t},{record.msecs:%03d}"
731 def _log_about_logging(
733 default_logging_level,
734 preexisting_handlers_count,
738 """This is invoked automatically after logging is initialized such
739 that the first messages in the log are about how logging itself
742 level_name = logging._levelToName.get(
743 default_logging_level, str(default_logging_level)
745 logger.debug("Initialized global logging; logging level is %s.", level_name)
747 config.config["logging_clear_preexisting_handlers"]
748 and preexisting_handlers_count > 0
751 "Logging cleared %d global handlers (--logging_clear_preexisting_handlers)",
752 preexisting_handlers_count,
754 logger.debug('Logging format specification is "%s"', fmt)
755 if config.config["logging_debug_threads"]:
757 "...Logging format spec captures tid/pid. (--logging_debug_threads)"
759 if config.config["logging_debug_modules"]:
761 "...Logging format spec captures files/functions/lineno. (--logging_debug_modules)"
763 if config.config["logging_syslog"]:
765 "Logging to syslog as %s with priority mapping based on level. (--logging_syslog)",
768 if config.config["logging_filename"]:
770 'Logging to file "%s". (--logging_filename)',
771 config.config["logging_filename"],
774 "...with %d bytes max file size. (--logging_filename_maxsize)",
775 config.config["logging_filename_maxsize"],
778 "...and %d rotating backup file count. (--logging_filename_count)",
779 config.config["logging_filename_count"],
781 if config.config["logging_console"]:
782 logger.debug("Logging to the console (stderr). (--logging_console)")
783 if config.config["logging_info_is_print"]:
785 "Logging logger.info messages will be repeated on stdout. (--logging_info_is_print)"
787 if config.config["logging_squelch_repeats"]:
789 "Logging code allowed to request repeated messages be squelched. (--logging_squelch_repeats)"
793 "Logging code forbidden to request messages be squelched; all messages logged. (--no_logging_squelch_repeats)"
795 if config.config["logging_probabilistically"]:
797 "Logging code is allowed to request probabilistic logging. (--logging_probabilistically)"
801 "Logging code is forbidden to request probabilistic logging; messages always logged. (--no_logging_probabilistically)"
803 if config.config["logging_captures_prints"]:
805 "Logging will capture printed data as logger.info messages. (--logging_captures_prints)"
809 def _construct_logging_format() -> str:
810 """Figure out the root logging format string based on commandling args."""
812 # Custom or default --logging_format?
813 if config.config["logging_format"]:
814 fmt = config.config["logging_format"]
815 elif config.config["logging_syslog"]:
816 fmt = "%(levelname).1s:%(filename)s[%(process)d]: "
818 fmt = "%(levelname).1s:%(asctime)s: "
820 fmt += "%(prefix)s%(message)s%(suffix)s"
822 # --logging_debug_threads and --logging_debug_modules both affect
823 # the format by prepending information about the pid/tid or
825 if config.config["logging_debug_threads"]:
826 fmt = f"%(process)d.%(thread)d|{fmt}"
827 if config.config["logging_debug_modules"]:
828 fmt = f"%(filename)s:%(funcName)s:%(lineno)s|{fmt}"
832 def _construct_handlers_list(
833 logging_format: str, facility_name: Optional[str]
834 ) -> List[logging.Handler]:
835 """Create the list of handlers to be added to the root logger
836 based on commandline flags.
839 retval: List[logging.Handler] = []
841 # --logging_syslog (optionally with --logging_syslog_facility)
842 # sets up for logging to use the standard system syslogd as a
844 if config.config["logging_syslog"]:
845 if sys.platform not in ("win32", "cygwin"):
847 facility = SysLogHandler.__dict__.get(facility_name, SysLogHandler.LOG_USER)
849 syslog_handler = SysLogHandler(facility=facility, address="/dev/log")
850 syslog_handler.setFormatter(
851 MillisecondAwareFormatter(
853 datefmt=config.config["logging_date_format"],
856 retval.append(syslog_handler)
858 # --logging_filename (with friends --logging_filename_count and
859 # --logging_filename_maxsize) set up logging to a file on the
860 # filesystem with automatic rotation when it gets too big.
861 if config.config["logging_filename"]:
862 max_bytes = config.config["logging_filename_maxsize"]
863 assert max_bytes and isinstance(max_bytes, int)
864 backup_count = config.config["logging_filename_count"]
865 assert backup_count and isinstance(backup_count, int)
866 file_handler = RotatingFileHandler(
867 config.config["logging_filename"],
869 backupCount=backup_count,
871 file_handler.setFormatter(
872 MillisecondAwareFormatter(
874 datefmt=config.config["logging_date_format"],
877 retval.append(file_handler)
879 # --logging_console is, ahem, logging to the console.
880 if config.config["logging_console"]:
881 console_handler = logging.StreamHandler(sys.stderr)
882 console_handler.setFormatter(
883 MillisecondAwareFormatter(
885 datefmt=config.config["logging_date_format"],
888 retval.append(console_handler)
890 # --logging_info_is_print echoes any message to logger.info(x) as
891 # a print statement on stdout.
892 if config.config["logging_info_is_print"]:
893 print_handler = logging.StreamHandler(sys.stdout)
894 print_handler.addFilter(OnlyInfoFilter())
895 retval.append(print_handler)
898 retval.append(logging.NullHandler())
902 def _add_logging_handlers_and_filters(
903 logger: logging.Logger, handlers: List[logging.Handler]
905 """Adds a list of handlers to the root logger."""
907 for handler in handlers:
908 # These two are always present and set %(prefix) and %(suffix)
909 # if appropriate (see :class:`LoggingContext`) or make them
911 handler.addFilter(PrefixAddingFilter("", False))
912 handler.addFilter(SuffixAddingFilter("", False))
914 # --logging_squelch_repeats allows code to request repeat logging
915 # messages (identical log site and message contents) to be
916 # silenced. Logging code must request this explicitly, it isn't
917 # automatic. This option just allows the silencing to happen.
918 if config.config["logging_squelch_repeats"]:
919 handler.addFilter(SquelchRepeatedMessagesFilter())
921 # --logging_probabilistically allows code to request
922 # non-deterministic logging where messages have some probability
923 # of being produced. Logging code must request this explicitly.
924 # This option just allows the non-deterministic behavior to
925 # happen. Disabling it will cause every log message to be
927 if config.config["logging_probabilistically"]:
928 handler.addFilter(ProbabilisticFilter())
929 logger.addHandler(handler)
932 def initialize_logging(logger=None) -> logging.Logger:
933 """Initialize logging for the program. See module level comments
934 for information about what functionality this provides and how to
935 enable or disable functionality via the commandline.
938 :meth:`bootstrap.initialize` decorator on your program's entry point,
939 it will call this for you. See :meth:`pyutils.bootstrap.initialize`
943 global LOGGING_INITIALIZED
944 if LOGGING_INITIALIZED:
945 return logging.getLogger()
946 LOGGING_INITIALIZED = True
948 clear_preexisting = config.config["logging_clear_preexisting_handlers"]
949 preexisting_handlers_count = 0
951 logging.basicConfig(force=clear_preexisting)
952 logger = logging.getLogger()
954 # --logging_clear_preexisting_handlers removes logging handlers
955 # that were registered by global statements during imported module
957 if clear_preexisting:
958 while logger.hasHandlers():
959 logger.removeHandler(logger.handlers[0])
960 preexisting_handlers_count += 1
962 # --logging_config_file pulls logging settings from a config file
963 # skipping the rest of this setup.
964 if config.config["logging_config_file"] is not None:
965 fileConfig(config.config["logging_config_file"])
968 # Global default logging level (--logging_level); messages below
969 # this level will be silenced.
970 logging_level = config.config["logging_level"]
972 logging_level = logging_level.upper()
973 default_logging_level = getattr(logging, logging_level, None)
974 if not isinstance(default_logging_level, int):
975 raise ValueError(f'Invalid level: {config.config["logging_level"]}')
976 logger.setLevel(default_logging_level)
978 logging_format = _construct_logging_format()
980 if config.config["logging_syslog"]:
981 if sys.platform not in ("win32", "cygwin"):
982 if config.config["logging_syslog_facility"]:
983 facility_name = "LOG_" + config.config["logging_syslog_facility"]
984 handlers: List[logging.Handler] = _construct_handlers_list(
985 logging_format, facility_name
987 _add_logging_handlers_and_filters(logger, handlers)
989 # --logging_captures_prints, if set, will capture and log.info
990 # anything printed on stdout.
991 if config.config["logging_captures_prints"]:
994 def print_and_also_log(*arg, **kwarg):
995 f = kwarg.get("file", None)
1000 BUILT_IN_PRINT(*arg, **kwarg)
1002 builtins.print = print_and_also_log
1004 # At this point the logger is ready, handlers are set up,
1005 # etc... so log about the logging configuration.
1008 default_logging_level,
1009 preexisting_handlers_count,
1016 def get_logger(name: str = ""):
1017 """Get the global logger"""
1018 logger = logging.getLogger(name)
1019 return initialize_logging(logger)
1022 class OutputMultiplexer(object):
1023 """A class that broadcasts printed messages to several sinks
1024 (including various logging levels, different files, different file
1025 handles, the house log, etc...). See also
1026 :class:`OutputMultiplexerContext` for an easy usage pattern.
1029 class Destination(enum.IntEnum):
1030 """Bits in the destination_bitv bitvector. Used to indicate the
1031 output destination."""
1034 LOG_DEBUG = 0x01 # ⎫
1036 LOG_WARNING = 0x04 # ⎬ Must provide logger to the c'tor.
1037 LOG_ERROR = 0x08 # ⎪
1038 LOG_CRITICAL = 0x10 # ⎭
1039 FILENAMES = 0x20 # Must provide a filename to the c'tor.
1040 FILEHANDLES = 0x40 # Must provide a handle to the c'tor.
1042 ALL_LOG_DESTINATIONS = (
1043 LOG_DEBUG | LOG_INFO | LOG_WARNING | LOG_ERROR | LOG_CRITICAL
1045 ALL_OUTPUT_DESTINATIONS = 0x8F
1050 destination_bitv: int,
1053 filenames: Optional[Iterable[str]] = None,
1054 handles: Optional[Iterable[io.TextIOWrapper]] = None,
1057 Constructs the OutputMultiplexer instance.
1060 destination_bitv: a bitvector where each bit represents an
1061 output destination. Multiple bits may be set.
1062 logger: if LOG_* bits are set, you must pass a logger here.
1063 filenames: if FILENAMES bit is set, this should be a list of
1064 files you'd like to output into. This code handles opening
1065 and closing said files.
1066 handles: if FILEHANDLES bit is set, this should be a list of
1067 already opened filehandles you'd like to output into. The
1068 handles will remain open after the scope of the multiplexer.
1071 logger = logging.getLogger(None)
1072 self.logger = logger
1074 self.f: Optional[List[Any]] = None
1075 if filenames is not None:
1076 self.f = [open(filename, "wb", buffering=0) for filename in filenames]
1078 if destination_bitv & OutputMultiplexer.Destination.FILENAMES:
1079 raise ValueError("Filenames argument is required if bitv & FILENAMES")
1082 self.h: Optional[List[Any]] = None
1083 if handles is not None:
1084 self.h = list(handles)
1086 if destination_bitv & OutputMultiplexer.Destination.FILEHANDLES:
1087 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1090 self.set_destination_bitv(destination_bitv)
1092 def get_destination_bitv(self):
1093 """Where are we outputting?"""
1094 return self.destination_bitv
1096 def set_destination_bitv(self, destination_bitv: int):
1097 """Change the output destination_bitv to the one provided."""
1098 if destination_bitv & self.Destination.FILENAMES and self.f is None:
1099 raise ValueError("Filename argument is required if bitv & FILENAMES")
1100 if destination_bitv & self.Destination.FILEHANDLES and self.h is None:
1101 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1102 self.destination_bitv = destination_bitv
1104 def print(self, *args, **kwargs):
1105 """Produce some output to all sinks."""
1106 from pyutils.string_utils import _sprintf, strip_escape_sequences
1108 end = kwargs.pop("end", None)
1110 if not isinstance(end, str):
1111 raise TypeError("end must be None or a string")
1112 sep = kwargs.pop("sep", None)
1114 if not isinstance(sep, str):
1115 raise TypeError("sep must be None or a string")
1117 raise TypeError("invalid keyword arguments to print()")
1118 buf = _sprintf(*args, end="", sep=sep)
1125 if self.destination_bitv & self.Destination.FILENAMES and self.f is not None:
1127 _.write(buf.encode("utf-8"))
1130 if self.destination_bitv & self.Destination.FILEHANDLES and self.h is not None:
1135 buf = strip_escape_sequences(buf)
1136 if self.logger is not None:
1137 if self.destination_bitv & self.Destination.LOG_DEBUG:
1138 self.logger.debug(buf)
1139 if self.destination_bitv & self.Destination.LOG_INFO:
1140 self.logger.info(buf)
1141 if self.destination_bitv & self.Destination.LOG_WARNING:
1142 self.logger.warning(buf)
1143 if self.destination_bitv & self.Destination.LOG_ERROR:
1144 self.logger.error(buf)
1145 if self.destination_bitv & self.Destination.LOG_CRITICAL:
1146 self.logger.critical(buf)
1147 if self.destination_bitv & self.Destination.HLOG:
1151 """Close all open files."""
1152 if self.f is not None:
1157 class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator):
1159 A context that uses an :class:`OutputMultiplexer`. e.g.::
1161 with OutputMultiplexerContext(
1162 OutputMultiplexer.LOG_INFO |
1163 OutputMultiplexer.LOG_DEBUG |
1164 OutputMultiplexer.FILENAMES |
1165 OutputMultiplexer.FILEHANDLES,
1166 filenames = [ '/tmp/foo.log', '/var/log/bar.log' ],
1169 mplex.print("This is a log message!")
1174 destination_bitv: OutputMultiplexer.Destination,
1182 destination_bitv: a bitvector that indicates where we should
1183 send output. See :class:`OutputMultiplexer` for options.
1184 logger: optional logger to use for log destination messages.
1185 filenames: optional filenames to write for filename destination
1187 handles: optional open filehandles to write for filehandle
1188 destination messages.
1193 filenames=filenames,
1197 def __enter__(self):
1200 def __exit__(self, etype, value, traceback) -> bool:
1202 if etype is not None:
1207 def _get_systemwide_abnormal_exit_handler(filename: str) -> FileHandler:
1208 handler = FileHandler(filename)
1209 handler.setFormatter(
1210 MillisecondAwareFormatter(
1211 fmt=_construct_logging_format(),
1212 datefmt=config.config["logging_date_format"],
1218 def non_zero_return_value(ret: Any):
1220 Special method hooked from bootstrap.py to optionally keep a system-wide
1221 record of non-zero python program exits.
1224 ret: the return value
1227 record = config.config['logging_non_zero_exits_record_path']
1229 logger = logging.getLogger()
1230 handler = _get_systemwide_abnormal_exit_handler(record)
1231 program = config.PROGRAM_NAME
1232 args = config.ORIG_ARGV
1233 with LoggingContext(logger, handlers=[handler], level=logging.INFO):
1234 logger.info('%s (%s) exited with non-zero value %s', program, args, ret)
1239 def unhandled_top_level_exception(exc_type: type, exc_value, exc_tb):
1241 Special method hooked from bootstrap.py to optionally keep a system-wide
1242 record of unhandled top level exceptions.
1245 exc_type: the type of the unhandled exception
1248 record = config.config['logging_unhandled_top_level_exceptions_record_path']
1250 logger = logging.getLogger()
1251 handler = _get_systemwide_abnormal_exit_handler(record)
1252 program = config.PROGRAM_NAME
1253 args = config.ORIG_ARGV
1254 site_file = exc_tb.tb_frame.f_code.co_filename
1255 site_lineno = exc_tb.tb_lineno
1256 with LoggingContext(logger, handlers=[handler], level=logging.INFO):
1258 '%s (%s) took an unhandled top-level exception (type=%s(%s)) at %s:%s',
1270 def hlog(message: str) -> None:
1271 """Write a message to the house log (syslog facility local7 priority
1272 info) by calling `/usr/bin/logger`. This is pretty hacky but used
1273 by a bunch of (my) code. Another way to do this would be to use
1274 :code:`--logging_syslog` and :code:`--logging_syslog_facility` but
1275 I can't actually say that's easier.
1277 TODO: this needs to move.
1279 message = message.replace("'", "'\"'\"'")
1280 os.system(f"/usr/bin/logger -p local7.info -- '{message}'")
1283 if __name__ == "__main__":