2 # -*- coding: utf-8 -*-
4 # © Copyright 2021-2023, Scott Gasch
7 This is a module that offers an opinionated take on how whole program
8 logging should be initialized and controlled. It uses the standard
9 Python :mod:`logging` but gives you control, via commandline config,
12 * Set the logging default level (debug, info, warning, error, critical)
13 of the whole program (see: :code:`--logging_level`)... and to override
14 the logging level with :class:`LoggingContext`.
15 * Prepend or append a message to every log record also with
16 :class:`LoggingContext`.
17 * define the logging message format (see :code:`--logging_format` and
18 :code:`--logging_date_format`) including easily adding a PID/TID
19 marker on all messages to help with multithreaded debugging
20 (:code:`--logging_debug_threads`) and force module names of code
21 that emits log messages to be included in the format
22 (:code:`--logging_debug_modules`),
23 * control the destination of logged messages:
25 - log to the console/stderr (:code:`--logging_console`) and/or
26 - log to a rotated file (:code:`--logging_filename`,
27 :code:`--logging_filename_maxsize` and :code:`--logging_filename_count`)
29 - log to the UNIX syslog (:code:`--logging_syslog` and
30 :code:`--logging_syslog_facility`)
32 * optionally squelch repeated messages (:code:`--logging_squelch_repeats`),
33 * optionally log probalistically (:code:`--logging_probabilistically`),
34 * capture printed messages into the info log (:code:`--logging_captures_prints`),
35 * and optionally clear unwanted logging handlers added by other imports
36 before this one (:code:`--logging_clear_preexisting_handlers`).
37 * There are also :class:`LoggerAdapter` classes to implement prefix/suffix
38 functionality without using :class:`LoggingContext` by wrapping the
41 To use this functionality, call :meth:`initialize_logging` early
42 in your program entry point. If you use the
43 :meth:`pyutils.bootstrap.initialize` decorator on your program's entry
44 point, it will call this for you automatically.
56 from logging.config import fileConfig
57 from logging.handlers import RotatingFileHandler, SysLogHandler
58 from typing import Any, Callable, Dict, Iterable, List, Optional, Union
61 from overrides import overrides
63 # This module is commonly used by others in here and should avoid
64 # taking any unnecessary dependencies back on them.
65 from pyutils import argparse_utils, config, misc_utils
67 cfg = config.add_commandline_args(f"Logging ({__file__})", "Args related to logging")
69 "--logging_config_file",
70 type=argparse_utils.valid_filename,
73 help="Config file containing the logging setup, see: https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial",
79 choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
81 help="The global default level below which to squelch log messages",
87 help="The format for lines logged via the logger module. See: https://docs.python.org/3/library/logging.html#formatter-objects",
90 "--logging_date_format",
92 default="%Y/%m/%dT%H:%M:%S.%f%z",
94 help="The format of any dates in --logging_format.",
98 action=argparse_utils.ActionNoYes,
100 help="Should we log to the console (stderr)",
103 "--logging_filename",
107 help="The filename of the logfile to write.",
110 "--logging_filename_maxsize",
111 type=argparse_utils.valid_byte_count,
112 default=(1024 * 1024),
114 help="The maximum size (in bytes) to write to the logging_filename.",
117 "--logging_filename_count",
121 help="The number of logging_filename copies to keep before deleting.",
125 action=argparse_utils.ActionNoYes,
127 help="Should we log to localhost's syslog.",
130 "--logging_syslog_facility",
156 metavar="SYSLOG_FACILITY_LIST",
157 help="The default syslog message facility identifier",
160 "--logging_debug_threads",
161 action=argparse_utils.ActionNoYes,
163 help="Should we prepend pid/tid data to all log messages?",
166 "--logging_debug_modules",
167 action=argparse_utils.ActionNoYes,
169 help="Should we prepend module/function data to all log messages?",
172 "--logging_info_is_print",
173 action=argparse_utils.ActionNoYes,
175 help="logging.info also prints to stdout.",
178 "--logging_squelch_repeats",
179 action=argparse_utils.ActionNoYes,
181 help="Do we allow code to indicate that it wants to squelch repeated logging messages or should we always log?",
184 "--logging_probabilistically",
185 action=argparse_utils.ActionNoYes,
187 help="Do we allow probabilistic logging (for code that wants it) or should we always log?",
189 # See also: OutputMultiplexer
191 "--logging_captures_prints",
192 action=argparse_utils.ActionNoYes,
194 help="When calling print, also log.info automatically.",
197 "--logging_clear_preexisting_handlers",
198 action=argparse_utils.ActionNoYes,
201 "Should logging code clear preexisting global logging handlers and thus insist that is "
202 + "alone can add handlers. Use this to work around annoying modules that insert global "
203 + "handlers with formats and logging levels you might now want. Caveat emptor, this may "
204 + "cause you to miss logging messages."
208 "--logging_non_zero_exits_record_path",
210 default="/var/log/abnormal_python_exits.log",
212 help="The filename in which to record non-zero exits.",
215 "--logging_unhandled_top_level_exceptions_record_path",
217 default="/var/log/abnormal_python_exits.log",
219 help="The filename in which to record unhandled top level exceptions.",
222 BUILT_IN_PRINT = print
223 LOGGING_INITIALIZED = False
226 # A map from logging_callsite_id -> count of logged messages.
227 squelched_logging_counts: Dict[str, int] = {}
230 def squelch_repeated_log_messages(squelch_after_n_repeats: int) -> Callable:
232 A decorator that marks a function as interested in having the logging
233 messages that it produces be squelched (ignored) after it logs the
234 same message more than N times.
238 This decorator affects *ALL* logging messages produced
239 within the decorated function. That said, messages must be
240 identical in order to be squelched. For example, if the same line
241 of code produces different messages (because of, e.g., a format
242 string), the messages are considered to be different.
244 An example of this from the pyutils code itself can be found in
245 :meth:`pyutils.ansi.fg` and :meth:`pyutils.ansi.bg` methods::
247 @logging_utils.squelch_repeated_log_messages(1)
249 name: Optional[str] = "",
250 red: Optional[int] = None,
251 green: Optional[int] = None,
252 blue: Optional[int] = None,
254 force_16color: bool = False,
255 force_216color: bool = False,
259 These methods log stuff like "Using 24-bit color strategy" which
260 gets old really fast and fills up the logs. By decorating the methods
261 with :code:`@logging_utils.squelch_repeated_log_messages(1)` the code
262 is requesting that its logged messages be dropped silently after the
263 first one is produced (note the argument 1).
265 Users can insist that all logged messages always be reflected in the
266 logs using the :code:`--no_logging_squelch_repeats` flag but the default
267 behavior is to allow code to request it be squelched.
269 :code:`--logging_squelch_repeats` only affects code with this decorator
270 on it; it ignores all other code.
273 squelch_after_n_repeats: the number of repeated messages allowed to
274 log before subsequent messages are silently dropped.
277 def squelch_logging_wrapper(f: Callable):
278 from pyutils import function_utils
280 identifier = function_utils.function_identifier(f)
282 # Get rid of module paths, e.g. pyutils.ansi:bg -> ansi:bg which
283 # is what we're going to need below.
284 identifier = re.sub(r"[^\.]+\.", "", identifier)
285 squelched_logging_counts[identifier] = squelch_after_n_repeats
288 return squelch_logging_wrapper
291 class SquelchRepeatedMessagesFilter(logging.Filter):
292 """A filter that only logs messages from a given site with the same
293 (exact) message at the same logging level N times and ignores
294 subsequent attempts to log.
296 This filter only affects logging messages that repeat more than a
297 threshold number of times from functions that are tagged with the
298 :code:`@logging_utils.squelched_logging_ok` decorator (see above);
299 all others are ignored.
301 This functionality is enabled by default but can be disabled via
302 the :code:`--no_logging_squelch_repeats` commandline flag.
305 def __init__(self) -> None:
307 self.counters: collections.Counter = collections.Counter()
310 def filter(self, record: logging.LogRecord) -> bool:
311 """Should we drop this log message?"""
312 id1 = f"{record.module}:{record.funcName}"
313 if id1 not in squelched_logging_counts:
315 threshold = squelched_logging_counts[id1]
316 logsite = f"{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}"
317 count = self.counters[logsite]
318 self.counters[logsite] += 1
319 return count < threshold
322 # A map from function_identifier -> probability of logging (0.0%..100.0%)
323 probabilistic_logging_levels: Dict[str, float] = {}
326 def logging_is_probabilistic(probability_of_logging: float) -> Callable:
327 """A decorator that indicates that all logging statements within the
328 scope of a particular (marked via decorator) function are not
329 deterministic (i.e. they do not always unconditionally log) but rather
330 are probabilistic (i.e. they log N% of the time, randomly) when the
331 user passes the :code:`--logging_probabilistically` commandline flag
332 (which is enabled by default).
336 This affects *ALL* logging statements within the marked function.
337 If you want it to only affect a subset of logging statements,
338 log those statements in a separate function that you invoke
339 from within the "too large" scope and mark that separate function
340 with the :code:`logging_is_probabilistic` decorator instead.
342 That this functionality can be disabled (forcing all logged
343 messages to produce output) via the
344 :code:`--no_logging_probabilistically` cmdline argument.
347 def probabilistic_logging_wrapper(f: Callable):
348 from pyutils import function_utils
350 identifier = function_utils.function_identifier(f)
351 probabilistic_logging_levels[identifier] = probability_of_logging
354 return probabilistic_logging_wrapper
357 class ProbabilisticFilter(logging.Filter):
359 A filter that logs messages probabilistically (i.e. randomly at some
360 percent chance). This filter is used with a decorator (see
361 :meth:`logging_is_probabilistic`) to implement the
362 :code:`--logging_probabilistically` commandline flag.
364 This filter only affects logging messages from functions that have
365 been tagged with the `@logging_utils.probabilistic_logging` decorator.
369 def filter(self, record: logging.LogRecord) -> bool:
370 """Should the message be logged?"""
371 identifier = f"{record.module}:{record.funcName}"
372 threshold = probabilistic_logging_levels.get(identifier, 100.0)
373 return misc_utils.execute_probabilistically(threshold)
376 class OnlyInfoFilter(logging.Filter):
377 """A filter that only logs messages produced at the INFO logging
378 level. This is used by the :code:`--logging_info_is_print`
379 commandline option to select a subset of the logging stream to
380 send to a stdout handler.
384 def filter(self, record: logging.LogRecord):
385 return record.levelno == logging.INFO
388 class PrefixAddingFilter(logging.Filter):
389 """A filter that adds a string prefix to the log record for the
390 formatter to later fill in. Requires a %(prefix)s in the format
394 def __init__(self, prefix: str, klobber: bool = False):
397 prefix: the prefix string to add
398 klobber: should we overwrite other prefixes?
405 self.klobber = klobber
408 def filter(self, record: logging.LogRecord):
410 record.prefix = self.prefix
411 elif "prefix" not in record.__dict__:
412 record.prefix = self.prefix
416 class SuffixAddingFilter(logging.Filter):
417 """A filter that adds a string suffix to the log record for the
418 formatter to later fill in. Requires a %(suffix)s in the format
422 def __init__(self, suffix: str, klobber: bool = False):
425 suffix: the suffix string to add
426 klobber: should we overwrite other suffixes?
433 self.klobber = klobber
436 def filter(self, record: logging.LogRecord):
438 record.suffix = self.suffix
439 elif "suffix" not in record.__dict__:
440 record.suffix = self.suffix
444 class PrependingLogAdapter(logging.LoggerAdapter):
445 """:class:`LoggingContext` adds prefixes and suffixes using a
446 logging.Filter that must insert "prefix" or "suffix" members into
447 each log record by using :class:`PrefixAddingFilter` and
448 :class:`SuffixAddingFilter`. This relies on the logging format
449 string containing a %(prefix)s and a %(suffix)s to work correctly.
451 This is an alternate approach that basically just wraps the logger
452 in a class that has the same interface and thunks most calls down
453 to the wrapped logger. It might be useful if you don't want to use
454 :class:`LoggingContext` or its friends.
456 >>> logger = logging.getLogger(__name__ + ".PrependingLogAdapter")
457 >>> logger.setLevel(logging.INFO)
458 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
460 At this point logger doesn't have any format string and so it is
461 missing %(prefix)s and %(suffix)s. It also doesn't have a
462 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
463 So using it in a :class:`LoggingContext` will not work.
465 But we can still add a prefix or suffix by just wrapping it:
467 >>> logger.info("TEST")
470 >>> log = PrependingLogAdapter.wrap_logger('prefix> ', logger)
475 def process(self, msg, kwargs):
476 return f'{self.extra.get("prefix", "")}{msg}', kwargs
479 def wrap_logger(prefix: str, logger: logging.Logger) -> logging.LoggerAdapter:
480 """Helper method around the creation of a LogAdapter that prepends
481 a given string to every log message produced.
484 prefix: the message to prepend to every log message.
485 logger: the logger whose messages to modify.
488 A new logger wrapping the old one with the given behavior.
489 The old logger will continue to behave as usual; simply drop
490 the reference to this wrapper when it's no longer needed.
493 return PrependingLogAdapter(logger, {"prefix": prefix})
496 class AppendingLogAdapter(logging.LoggerAdapter):
497 """:class:`LoggingContext` adds prefixes and suffixes using a
498 logging.Filter that must insert "prefix" or "suffix" members into
499 each log record by using :class:`PrefixAddingFilter` and
500 :class:`SuffixAddingFilter`. This relies on the logging format
501 string containing a %(prefix)s and a %(suffix)s to work correctly.
503 This is an alternate approach that basically just wraps the logger
504 in a class that has the same interface and thunks most calls down
505 to the wrapped logger. It might be useful if you don't want to use
506 :class:`LoggingContext` or its friends.
508 >>> logger = logging.getLogger(__name__ + ".AppendingLogAdapter")
509 >>> logger.setLevel(logging.INFO)
510 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
512 At this point logger doesn't have any format string and so it is
513 missing %(prefix)s and %(suffix)s. It also doesn't have a
514 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
515 So using it in a :class:`LoggingContext` will not work.
517 But we can still add a prefix or suffix by just wrapping it:
519 >>> logger.info("TEST")
522 >>> log = AppendingLogAdapter.wrap_logger('!!!', logger)
527 def process(self, msg, kwargs):
528 return f'{msg}{self.extra.get("suffix", "")}', kwargs
531 def wrap_logger(suffix: str, logger: logging.Logger) -> logging.LoggerAdapter:
532 """Helper method around the creation of a LoggerAdapter that appends
533 a given string to every log message produced.
536 suffix: the message to prepend to every log message.
537 logger: the logger whose messages to modify.
540 A new logger wrapping the old one with the given behavior.
541 The old logger will continue to behave as usual; simply drop
542 the reference to this wrapper when it's no longer needed.
545 return AppendingLogAdapter(logger, {"suffix": suffix})
548 class LoggingContext(contextlib.ContextDecorator):
551 logger: logging.Logger,
553 level: Optional[int] = None,
554 handlers: Optional[List[logging.Handler]] = None,
555 prefix: Optional[str] = None,
556 suffix: Optional[str] = None,
558 """This is a logging context that can be used to temporarily change the
559 way we are logging within its scope. Logging changes may include:
561 - Changing the logging level (e.g. from INFO to DEBUG)
562 - Adding a prefix or suffix to every log message produced
563 - Adding temporary Handlers to direct the logging output elsewhere
565 Setup for doctest / examples. This will normally be taken care of
566 by code in :meth:`initialize_logging` so you don't have to worry
569 >>> logging_format = "%(prefix)s%(message)s%(suffix)s"
570 >>> logger = logging.getLogger(__name__ + ".LoggingContext")
571 >>> logger.setLevel(logging.INFO)
572 >>> handler = logging.StreamHandler(sys.stdout)
573 >>> handler.setFormatter(
574 ... MillisecondAwareFormatter(
575 ... fmt=logging_format,
579 >>> logger.addHandler(handler)
580 >>> logger.addFilter(PrefixAddingFilter(None))
581 >>> logger.addFilter(SuffixAddingFilter(None))
583 First, this logger should be currently be configured to send
584 INFO+ messages to sys.stdout. Let's see it in action:
586 >>> logger.info("Hello world!")
588 >>> logger.debug("You should not see this")
590 The first example is to simply change the level of the logger.
591 Here we temporarily change it to DEBUG within the body of the
592 :class:`LoggingContext`:
594 >>> with LoggingContext(logger, level=logging.DEBUG):
595 ... logger.debug("You should see this now")
596 ... logger.info("Of course you should still see this too")
597 You should see this now
598 Of course you should still see this too
600 >>> logger.debug("Outside of the context we are at INFO again")
601 >>> logger.debug("(which is why you don't see these)")
602 >>> logger.info("But you should see this at INFO level")
603 But you should see this at INFO level
605 The prefix and suffix argument prepend or append a message to
606 all log output. To do this, you need %(prefix)s and
607 %(suffix)s placeholders in your logger format string
608 indicating where to insert the data. This is useful, for
609 example, to add an active request identifier to the set of log
610 messages produced while processing it.
612 >>> logger.info("About to work on a new request")
613 About to work on a new request
615 >>> with LoggingContext(logger, prefix='10.0.0.13> '):
616 ... logger.info("Working on it now")
617 10.0.0.13> Working on it now
619 logger.info("Done with that request")
620 Done with that request
622 LoggingContext can also be used to add temporary handler(s).
623 This code temporarily uses two stdout handlers to double the
624 output for testing purporses but you could also temporarily,
625 e.g., add a RotatingFileHandler or SysLogHandler etc...
627 >>> with LoggingContext(logger, handlers=[logging.StreamHandler(sys.stdout)]):
628 ... logger.info("TEST")
632 Once leaving the context, logger's behavior is restored. In
633 this case, the extra handler is removed so output will not
636 >>> logger.info("OUTSIDE")
639 LoggingContext can also be used as a decorator if that is more
642 >>> @LoggingContext(logger, level=logging.DEBUG)
643 ... def log_stuff(logger):
644 ... logger.debug("But inside, the decorator has changed us to DEBUG")
646 >>> logger.debug("Outside, we're at INFO level and you don't see this")
647 >>> log_stuff(logger)
648 But inside, the decorator has changed us to DEBUG
649 >>> logger.debug("And, of course, out here we're still at INFO afterwards")
654 self.old_level: Optional[int] = None
655 self.handlers = handlers
657 self.prefix: Optional[logging.Filter] = PrefixAddingFilter(prefix, True)
661 self.suffix: Optional[logging.Filter] = SuffixAddingFilter(suffix, True)
665 def __enter__(self) -> Union[logging.Logger, logging.LoggerAdapter]:
668 self.old_level = self.logger.level
669 self.logger.setLevel(self.level)
672 for handler in self.handlers:
673 self.logger.addHandler(handler)
676 self.logger.addFilter(self.prefix)
679 self.logger.addFilter(self.suffix)
682 def __exit__(self, et, ev, tb) -> None:
684 self.logger.removeFilter(self.suffix)
687 self.logger.removeFilter(self.prefix)
690 for handler in self.handlers:
691 self.logger.removeHandler(handler)
695 self.logger.setLevel(self.old_level)
696 return None # propagate exceptions out
699 class MillisecondAwareFormatter(logging.Formatter):
700 """A formatter for adding milliseconds to log messages which, for
701 whatever reason, the default Python logger doesn't do.
705 You probably don't need to use this directly but it is
706 wired in under :meth:`initialize_logging` so that the
707 timestamps in log messages have millisecond level
711 converter = datetime.datetime.fromtimestamp # type: ignore
714 def formatTime(self, record, datefmt=None):
715 ct = MillisecondAwareFormatter.converter(
716 record.created, pytz.timezone("US/Pacific")
719 s = ct.strftime(datefmt)
721 t = ct.strftime("%Y-%m-%d %H:%M:%S")
722 s = f"{t},{record.msecs:%03d}"
726 def _log_about_logging(
728 default_logging_level,
729 preexisting_handlers_count,
733 """This is invoked automatically after logging is initialized such
734 that the first messages in the log are about how logging itself
737 level_name = logging._levelToName.get(
738 default_logging_level, str(default_logging_level)
740 logger.debug("Initialized global logging; logging level is %s.", level_name)
742 config.config["logging_clear_preexisting_handlers"]
743 and preexisting_handlers_count > 0
746 "Logging cleared %d global handlers (--logging_clear_preexisting_handlers)",
747 preexisting_handlers_count,
749 logger.debug('Logging format specification is "%s"', fmt)
750 if config.config["logging_debug_threads"]:
752 "...Logging format spec captures tid/pid. (--logging_debug_threads)"
754 if config.config["logging_debug_modules"]:
756 "...Logging format spec captures files/functions/lineno. (--logging_debug_modules)"
758 if config.config["logging_syslog"]:
760 "Logging to syslog as %s with priority mapping based on level. (--logging_syslog)",
763 if config.config["logging_filename"]:
765 'Logging to file "%s". (--logging_filename)',
766 config.config["logging_filename"],
769 "...with %d bytes max file size. (--logging_filename_maxsize)",
770 config.config["logging_filename_maxsize"],
773 "...and %d rotating backup file count. (--logging_filename_count)",
774 config.config["logging_filename_count"],
776 if config.config["logging_console"]:
777 logger.debug("Logging to the console (stderr). (--logging_console)")
778 if config.config["logging_info_is_print"]:
780 "Logging logger.info messages will be repeated on stdout. (--logging_info_is_print)"
782 if config.config["logging_squelch_repeats"]:
784 "Logging code allowed to request repeated messages be squelched. (--logging_squelch_repeats)"
788 "Logging code forbidden to request messages be squelched; all messages logged. (--no_logging_squelch_repeats)"
790 if config.config["logging_probabilistically"]:
792 "Logging code is allowed to request probabilistic logging. (--logging_probabilistically)"
796 "Logging code is forbidden to request probabilistic logging; messages always logged. (--no_logging_probabilistically)"
798 if config.config["logging_captures_prints"]:
800 "Logging will capture printed data as logger.info messages. (--logging_captures_prints)"
804 def _construct_logging_format() -> str:
805 """Figure out the root logging format string based on commandling args."""
807 # Custom or default --logging_format?
808 if config.config["logging_format"]:
809 fmt = config.config["logging_format"]
810 elif config.config["logging_syslog"]:
811 fmt = "%(levelname).1s:%(filename)s[%(process)d]: "
813 fmt = "%(levelname).1s:%(asctime)s: "
815 fmt += "%(prefix)s%(message)s%(suffix)s"
817 # --logging_debug_threads and --logging_debug_modules both affect
818 # the format by prepending information about the pid/tid or
820 if config.config["logging_debug_threads"]:
821 fmt = f"%(process)d.%(thread)d|{fmt}"
822 if config.config["logging_debug_modules"]:
823 fmt = f"%(filename)s:%(funcName)s:%(lineno)s|{fmt}"
827 def _construct_handlers_list(
828 logging_format: str, facility_name: Optional[str]
829 ) -> List[logging.Handler]:
830 """Create the list of handlers to be added to the root logger
831 based on commandline flags.
834 retval: List[logging.Handler] = []
836 # --logging_syslog (optionally with --logging_syslog_facility)
837 # sets up for logging to use the standard system syslogd as a
839 if config.config["logging_syslog"]:
840 if sys.platform not in ("win32", "cygwin"):
842 facility = SysLogHandler.__dict__.get(facility_name, SysLogHandler.LOG_USER)
844 syslog_handler = SysLogHandler(facility=facility, address="/dev/log")
845 syslog_handler.setFormatter(
846 MillisecondAwareFormatter(
848 datefmt=config.config["logging_date_format"],
851 retval.append(syslog_handler)
853 # --logging_filename (with friends --logging_filename_count and
854 # --logging_filename_maxsize) set up logging to a file on the
855 # filesystem with automatic rotation when it gets too big.
856 if config.config["logging_filename"]:
857 max_bytes = config.config["logging_filename_maxsize"]
858 assert max_bytes and isinstance(max_bytes, int)
859 backup_count = config.config["logging_filename_count"]
860 assert backup_count and isinstance(backup_count, int)
861 file_handler = RotatingFileHandler(
862 config.config["logging_filename"],
864 backupCount=backup_count,
866 file_handler.setFormatter(
867 MillisecondAwareFormatter(
869 datefmt=config.config["logging_date_format"],
872 retval.append(file_handler)
874 # --logging_console is, ahem, logging to the console.
875 if config.config["logging_console"]:
876 console_handler = logging.StreamHandler(sys.stderr)
877 console_handler.setFormatter(
878 MillisecondAwareFormatter(
880 datefmt=config.config["logging_date_format"],
883 retval.append(console_handler)
885 # --logging_info_is_print echoes any message to logger.info(x) as
886 # a print statement on stdout.
887 if config.config["logging_info_is_print"]:
888 print_handler = logging.StreamHandler(sys.stdout)
889 print_handler.addFilter(OnlyInfoFilter())
890 retval.append(print_handler)
893 retval.append(logging.NullHandler())
897 def _add_logging_handlers_and_filters(
898 logger: logging.Logger, handlers: List[logging.Handler]
900 """Adds a list of handlers to the root logger."""
902 for handler in handlers:
903 # These two are always present and set %(prefix) and %(suffix)
904 # if appropriate (see :class:`LoggingContext`) or make them
906 handler.addFilter(PrefixAddingFilter("", False))
907 handler.addFilter(SuffixAddingFilter("", False))
909 # --logging_squelch_repeats allows code to request repeat logging
910 # messages (identical log site and message contents) to be
911 # silenced. Logging code must request this explicitly, it isn't
912 # automatic. This option just allows the silencing to happen.
913 if config.config["logging_squelch_repeats"]:
914 handler.addFilter(SquelchRepeatedMessagesFilter())
916 # --logging_probabilistically allows code to request
917 # non-deterministic logging where messages have some probability
918 # of being produced. Logging code must request this explicitly.
919 # This option just allows the non-deterministic behavior to
920 # happen. Disabling it will cause every log message to be
922 if config.config["logging_probabilistically"]:
923 handler.addFilter(ProbabilisticFilter())
924 logger.addHandler(handler)
927 def initialize_logging(logger=None) -> logging.Logger:
928 """Initialize logging for the program. See module level comments
929 for information about what functionality this provides and how to
930 enable or disable functionality via the commandline.
933 :meth:`bootstrap.initialize` decorator on your program's entry point,
934 it will call this for you. See :meth:`pyutils.bootstrap.initialize`
938 global LOGGING_INITIALIZED
939 if LOGGING_INITIALIZED:
940 return logging.getLogger()
941 LOGGING_INITIALIZED = True
943 clear_preexisting = config.config["logging_clear_preexisting_handlers"]
944 preexisting_handlers_count = 0
946 logging.basicConfig(force=clear_preexisting)
947 logger = logging.getLogger()
949 # --logging_clear_preexisting_handlers removes logging handlers
950 # that were registered by global statements during imported module
952 if clear_preexisting:
953 while logger.hasHandlers():
954 logger.removeHandler(logger.handlers[0])
955 preexisting_handlers_count += 1
957 # --logging_config_file pulls logging settings from a config file
958 # skipping the rest of this setup.
959 if config.config["logging_config_file"] is not None:
960 fileConfig(config.config["logging_config_file"])
963 # Global default logging level (--logging_level); messages below
964 # this level will be silenced.
965 logging_level = config.config["logging_level"]
967 logging_level = logging_level.upper()
968 default_logging_level = getattr(logging, logging_level, None)
969 if not isinstance(default_logging_level, int):
970 raise ValueError(f'Invalid level: {config.config["logging_level"]}')
971 logger.setLevel(default_logging_level)
973 logging_format = _construct_logging_format()
975 if config.config["logging_syslog"]:
976 if sys.platform not in ("win32", "cygwin"):
977 if config.config["logging_syslog_facility"]:
978 facility_name = "LOG_" + config.config["logging_syslog_facility"]
979 handlers: List[logging.Handler] = _construct_handlers_list(
980 logging_format, facility_name
982 _add_logging_handlers_and_filters(logger, handlers)
984 # --logging_captures_prints, if set, will capture and log.info
985 # anything printed on stdout.
986 if config.config["logging_captures_prints"]:
989 def print_and_also_log(*arg, **kwarg):
990 f = kwarg.get("file", None)
995 BUILT_IN_PRINT(*arg, **kwarg)
997 builtins.print = print_and_also_log
999 # At this point the logger is ready, handlers are set up,
1000 # etc... so log about the logging configuration.
1003 default_logging_level,
1004 preexisting_handlers_count,
1011 def get_logger(name: str = ""):
1012 """Get the global logger"""
1013 logger = logging.getLogger(name)
1014 return initialize_logging(logger)
1017 class OutputMultiplexer(object):
1018 """A class that broadcasts printed messages to several sinks
1019 (including various logging levels, different files, different file
1020 handles, the house log, etc...). See also
1021 :class:`OutputMultiplexerContext` for an easy usage pattern.
1024 class Destination(enum.IntEnum):
1025 """Bits in the destination_bitv bitvector. Used to indicate the
1026 output destination."""
1029 LOG_DEBUG = 0x01 # ⎫
1031 LOG_WARNING = 0x04 # ⎬ Must provide logger to the c'tor.
1032 LOG_ERROR = 0x08 # ⎪
1033 LOG_CRITICAL = 0x10 # ⎭
1034 FILENAMES = 0x20 # Must provide a filename to the c'tor.
1035 FILEHANDLES = 0x40 # Must provide a handle to the c'tor.
1037 ALL_LOG_DESTINATIONS = (
1038 LOG_DEBUG | LOG_INFO | LOG_WARNING | LOG_ERROR | LOG_CRITICAL
1040 ALL_OUTPUT_DESTINATIONS = 0x8F
1045 destination_bitv: int,
1048 filenames: Optional[Iterable[str]] = None,
1049 handles: Optional[Iterable[io.TextIOWrapper]] = None,
1052 Constructs the OutputMultiplexer instance.
1055 destination_bitv: a bitvector where each bit represents an
1056 output destination. Multiple bits may be set.
1057 logger: if LOG_* bits are set, you must pass a logger here.
1058 filenames: if FILENAMES bit is set, this should be a list of
1059 files you'd like to output into. This code handles opening
1060 and closing said files.
1061 handles: if FILEHANDLES bit is set, this should be a list of
1062 already opened filehandles you'd like to output into. The
1063 handles will remain open after the scope of the multiplexer.
1066 logger = logging.getLogger(None)
1067 self.logger = logger
1069 self.f: Optional[List[Any]] = None
1070 if filenames is not None:
1071 self.f = [open(filename, "wb", buffering=0) for filename in filenames]
1073 if destination_bitv & OutputMultiplexer.Destination.FILENAMES:
1074 raise ValueError("Filenames argument is required if bitv & FILENAMES")
1077 self.h: Optional[List[Any]] = None
1078 if handles is not None:
1079 self.h = list(handles)
1081 if destination_bitv & OutputMultiplexer.Destination.FILEHANDLES:
1082 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1085 self.set_destination_bitv(destination_bitv)
1087 def get_destination_bitv(self):
1088 """Where are we outputting?"""
1089 return self.destination_bitv
1091 def set_destination_bitv(self, destination_bitv: int):
1092 """Change the output destination_bitv to the one provided."""
1093 if destination_bitv & self.Destination.FILENAMES and self.f is None:
1094 raise ValueError("Filename argument is required if bitv & FILENAMES")
1095 if destination_bitv & self.Destination.FILEHANDLES and self.h is None:
1096 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1097 self.destination_bitv = destination_bitv
1099 def print(self, *args, **kwargs):
1100 """Produce some output to all sinks."""
1101 from pyutils.string_utils import _sprintf, strip_escape_sequences
1103 end = kwargs.pop("end", None)
1105 if not isinstance(end, str):
1106 raise TypeError("end must be None or a string")
1107 sep = kwargs.pop("sep", None)
1109 if not isinstance(sep, str):
1110 raise TypeError("sep must be None or a string")
1112 raise TypeError("invalid keyword arguments to print()")
1113 buf = _sprintf(*args, end="", sep=sep)
1120 if self.destination_bitv & self.Destination.FILENAMES and self.f is not None:
1122 _.write(buf.encode("utf-8"))
1125 if self.destination_bitv & self.Destination.FILEHANDLES and self.h is not None:
1130 buf = strip_escape_sequences(buf)
1131 if self.logger is not None:
1132 if self.destination_bitv & self.Destination.LOG_DEBUG:
1133 self.logger.debug(buf)
1134 if self.destination_bitv & self.Destination.LOG_INFO:
1135 self.logger.info(buf)
1136 if self.destination_bitv & self.Destination.LOG_WARNING:
1137 self.logger.warning(buf)
1138 if self.destination_bitv & self.Destination.LOG_ERROR:
1139 self.logger.error(buf)
1140 if self.destination_bitv & self.Destination.LOG_CRITICAL:
1141 self.logger.critical(buf)
1142 if self.destination_bitv & self.Destination.HLOG:
1146 """Close all open files."""
1147 if self.f is not None:
1152 class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator):
1154 A context that uses an :class:`OutputMultiplexer`. e.g.::
1156 with OutputMultiplexerContext(
1157 OutputMultiplexer.LOG_INFO |
1158 OutputMultiplexer.LOG_DEBUG |
1159 OutputMultiplexer.FILENAMES |
1160 OutputMultiplexer.FILEHANDLES,
1161 filenames = [ '/tmp/foo.log', '/var/log/bar.log' ],
1164 mplex.print("This is a log message!")
1169 destination_bitv: OutputMultiplexer.Destination,
1177 destination_bitv: a bitvector that indicates where we should
1178 send output. See :class:`OutputMultiplexer` for options.
1179 logger: optional logger to use for log destination messages.
1180 filenames: optional filenames to write for filename destination
1182 handles: optional open filehandles to write for filehandle
1183 destination messages.
1188 filenames=filenames,
1192 def __enter__(self):
1195 def __exit__(self, etype, value, traceback) -> bool:
1197 if etype is not None:
1202 def _timestamp() -> str:
1203 ts = datetime.datetime.now(pytz.timezone('US/Pacific'))
1204 return ts.strftime("%Y/%m/%dT%H:%M:%S.%f%z")
1207 def non_zero_return_value(ret: Any):
1209 Special method hooked from bootstrap.py to optionally keep a system-wide
1210 record of non-zero python program exits.
1213 ret: the return value
1216 record = config.config['logging_non_zero_exits_record_path']
1218 program = config.PROGRAM_NAME
1219 args = config.ORIG_ARGV
1220 with open(record, 'a') as af:
1222 f'{_timestamp()}: {program} ({args}) exited with non-zero value {ret}.',
1229 def unhandled_top_level_exception(exc_type: type):
1231 Special method hooked from bootstrap.py to optionally keep a system-wide
1232 record of unhandled top level exceptions.
1235 exc_type: the type of the unhandled exception
1238 record = config.config['logging_unhandled_top_level_exceptions_record_path']
1240 program = config.PROGRAM_NAME
1241 args = config.ORIG_ARGV
1242 with open(record, 'a') as af:
1244 f'{_timestamp}: {program} ({args}) took unhandled top level exception {exc_type}',
1251 def hlog(message: str) -> None:
1252 """Write a message to the house log (syslog facility local7 priority
1253 info) by calling `/usr/bin/logger`. This is pretty hacky but used
1254 by a bunch of (my) code. Another way to do this would be to use
1255 :code:`--logging_syslog` and :code:`--logging_syslog_facility` but
1256 I can't actually say that's easier.
1258 TODO: this needs to move.
1260 message = message.replace("'", "'\"'\"'")
1261 os.system(f"/usr/bin/logger -p local7.info -- '{message}'")
1264 if __name__ == "__main__":