2 # -*- coding: utf-8 -*-
4 # © Copyright 2021-2023, Scott Gasch
7 This is a module that offers an opinionated take on how whole program
8 logging should be initialized and controlled. It uses the standard
9 Python :mod:`logging` but gives you control, via commandline config,
12 * Set the logging default level (debug, info, warning, error, critical)
13 of the whole program (see: :code:`--logging_level`)... and to override
14 the logging level with :class:`LoggingContext`.
15 * Prepend or append a message to every log record also with
16 :class:`LoggingContext`.
17 * define the logging message format (see :code:`--logging_format` and
18 :code:`--logging_date_format`) including easily adding a PID/TID
19 marker on all messages to help with multithreaded debugging
20 (:code:`--logging_debug_threads`) and force module names of code
21 that emits log messages to be included in the format
22 (:code:`--logging_debug_modules`),
23 * control the destination of logged messages:
25 - log to the console/stderr (:code:`--logging_console`) and/or
26 - log to a rotated file (:code:`--logging_filename`,
27 :code:`--logging_filename_maxsize` and :code:`--logging_filename_count`)
29 - log to the UNIX syslog (:code:`--logging_syslog` and
30 :code:`--logging_syslog_facility`)
32 * optionally squelch repeated messages (:code:`--logging_squelch_repeats`),
33 * optionally log probalistically (:code:`--logging_probabilistically`),
34 * capture printed messages into the info log (:code:`--logging_captures_prints`),
35 * and optionally clear unwanted logging handlers added by other imports
36 before this one (:code:`--logging_clear_preexisting_handlers`).
37 * There are also :class:`LoggerAdapter` classes to implement prefix/suffix
38 functionality without using :class:`LoggingContext` by wrapping the
41 To use this functionality, call :meth:`initialize_logging` early
42 in your program entry point. If you use the
43 :meth:`pyutils.bootstrap.initialize` decorator on your program's entry
44 point, it will call this for you automatically.
55 from logging.config import fileConfig
56 from logging.handlers import RotatingFileHandler, SysLogHandler
57 from typing import Any, Callable, Dict, Iterable, List, Optional, Union
60 from overrides import overrides
62 # This module is commonly used by others in here and should avoid
63 # taking any unnecessary dependencies back on them.
64 from pyutils import argparse_utils, config, misc_utils
66 cfg = config.add_commandline_args(f"Logging ({__file__})", "Args related to logging")
68 "--logging_config_file",
69 type=argparse_utils.valid_filename,
72 help="Config file containing the logging setup, see: https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial",
78 choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
80 help="The global default level below which to squelch log messages",
86 help="The format for lines logged via the logger module. See: https://docs.python.org/3/library/logging.html#formatter-objects",
89 "--logging_date_format",
91 default="%Y/%m/%dT%H:%M:%S.%f%z",
93 help="The format of any dates in --logging_format.",
97 action=argparse_utils.ActionNoYes,
99 help="Should we log to the console (stderr)",
102 "--logging_filename",
106 help="The filename of the logfile to write.",
109 "--logging_filename_maxsize",
110 type=argparse_utils.valid_byte_count,
111 default=(1024 * 1024),
113 help="The maximum size (in bytes) to write to the logging_filename.",
116 "--logging_filename_count",
120 help="The number of logging_filename copies to keep before deleting.",
124 action=argparse_utils.ActionNoYes,
126 help="Should we log to localhost's syslog.",
129 "--logging_syslog_facility",
155 metavar="SYSLOG_FACILITY_LIST",
156 help="The default syslog message facility identifier",
159 "--logging_debug_threads",
160 action=argparse_utils.ActionNoYes,
162 help="Should we prepend pid/tid data to all log messages?",
165 "--logging_debug_modules",
166 action=argparse_utils.ActionNoYes,
168 help="Should we prepend module/function data to all log messages?",
171 "--logging_info_is_print",
172 action=argparse_utils.ActionNoYes,
174 help="logging.info also prints to stdout.",
177 "--logging_squelch_repeats",
178 action=argparse_utils.ActionNoYes,
180 help="Do we allow code to indicate that it wants to squelch repeated logging messages or should we always log?",
183 "--logging_probabilistically",
184 action=argparse_utils.ActionNoYes,
186 help="Do we allow probabilistic logging (for code that wants it) or should we always log?",
188 # See also: OutputMultiplexer
190 "--logging_captures_prints",
191 action=argparse_utils.ActionNoYes,
193 help="When calling print, also log.info automatically.",
196 "--logging_clear_preexisting_handlers",
197 action=argparse_utils.ActionNoYes,
200 "Should logging code clear preexisting global logging handlers and thus insist that is "
201 + "alone can add handlers. Use this to work around annoying modules that insert global "
202 + "handlers with formats and logging levels you might now want. Caveat emptor, this may "
203 + "cause you to miss logging messages."
207 BUILT_IN_PRINT = print
208 LOGGING_INITIALIZED = False
211 # A map from logging_callsite_id -> count of logged messages.
212 squelched_logging_counts: Dict[str, int] = {}
215 def squelch_repeated_log_messages(squelch_after_n_repeats: int) -> Callable:
217 A decorator that marks a function as interested in having the logging
218 messages that it produces be squelched (ignored) after it logs the
219 same message more than N times.
223 This decorator affects *ALL* logging messages produced
224 within the decorated function. That said, messages must be
225 identical in order to be squelched. For example, if the same line
226 of code produces different messages (because of, e.g., a format
227 string), the messages are considered to be different.
229 An example of this from the pyutils code itself can be found in
230 :meth:`pyutils.ansi.fg` and :meth:`pyutils.ansi.bg` methods::
232 @logging_utils.squelch_repeated_log_messages(1)
234 name: Optional[str] = "",
235 red: Optional[int] = None,
236 green: Optional[int] = None,
237 blue: Optional[int] = None,
239 force_16color: bool = False,
240 force_216color: bool = False,
244 These methods log stuff like "Using 24-bit color strategy" which
245 gets old really fast and fills up the logs. By decorating the methods
246 with :code:`@logging_utils.squelch_repeated_log_messages(1)` the code
247 is requesting that its logged messages be dropped silently after the
248 first one is produced (note the argument 1).
250 Users can insist that all logged messages always be reflected in the
251 logs using the :code:`--no_logging_squelch_repeats` flag but the default
252 behavior is to allow code to request it be squelched.
254 :code:`--logging_squelch_repeats` only affects code with this decorator
255 on it; it ignores all other code.
258 squelch_after_n_repeats: the number of repeated messages allowed to
259 log before subsequent messages are silently dropped.
262 def squelch_logging_wrapper(f: Callable):
263 from pyutils import function_utils
265 identifier = function_utils.function_identifier(f)
266 squelched_logging_counts[identifier] = squelch_after_n_repeats
269 return squelch_logging_wrapper
272 class SquelchRepeatedMessagesFilter(logging.Filter):
273 """A filter that only logs messages from a given site with the same
274 (exact) message at the same logging level N times and ignores
275 subsequent attempts to log.
277 This filter only affects logging messages that repeat more than a
278 threshold number of times from functions that are tagged with the
279 :code:`@logging_utils.squelched_logging_ok` decorator (see above);
280 all others are ignored.
282 This functionality is enabled by default but can be disabled via
283 the :code:`--no_logging_squelch_repeats` commandline flag.
286 def __init__(self) -> None:
288 self.counters: collections.Counter = collections.Counter()
291 def filter(self, record: logging.LogRecord) -> bool:
292 """Should we drop this log message?"""
293 id1 = f"{record.module}:{record.funcName}"
294 if id1 not in squelched_logging_counts:
296 threshold = squelched_logging_counts[id1]
297 logsite = f"{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}"
298 count = self.counters[logsite]
299 self.counters[logsite] += 1
300 return count < threshold
303 # A map from function_identifier -> probability of logging (0.0%..100.0%)
304 probabilistic_logging_levels: Dict[str, float] = {}
307 def logging_is_probabilistic(probability_of_logging: float) -> Callable:
308 """A decorator that indicates that all logging statements within the
309 scope of a particular (marked via decorator) function are not
310 deterministic (i.e. they do not always unconditionally log) but rather
311 are probabilistic (i.e. they log N% of the time, randomly) when the
312 user passes the :code:`--logging_probabilistically` commandline flag
313 (which is enabled by default).
317 This affects *ALL* logging statements within the marked function.
318 If you want it to only affect a subset of logging statements,
319 log those statements in a separate function that you invoke
320 from within the "too large" scope and mark that separate function
321 with the :code:`logging_is_probabilistic` decorator instead.
323 That this functionality can be disabled (forcing all logged
324 messages to produce output) via the
325 :code:`--no_logging_probabilistically` cmdline argument.
328 def probabilistic_logging_wrapper(f: Callable):
329 from pyutils import function_utils
331 identifier = function_utils.function_identifier(f)
332 probabilistic_logging_levels[identifier] = probability_of_logging
335 return probabilistic_logging_wrapper
338 class ProbabilisticFilter(logging.Filter):
340 A filter that logs messages probabilistically (i.e. randomly at some
341 percent chance). This filter is used with a decorator (see
342 :meth:`logging_is_probabilistic`) to implement the
343 :code:`--logging_probabilistically` commandline flag.
345 This filter only affects logging messages from functions that have
346 been tagged with the `@logging_utils.probabilistic_logging` decorator.
350 def filter(self, record: logging.LogRecord) -> bool:
351 """Should the message be logged?"""
352 identifier = f"{record.module}:{record.funcName}"
353 threshold = probabilistic_logging_levels.get(identifier, 100.0)
354 return misc_utils.execute_probabilistically(threshold)
357 class OnlyInfoFilter(logging.Filter):
358 """A filter that only logs messages produced at the INFO logging
359 level. This is used by the :code:`--logging_info_is_print`
360 commandline option to select a subset of the logging stream to
361 send to a stdout handler.
365 def filter(self, record: logging.LogRecord):
366 return record.levelno == logging.INFO
369 class PrefixAddingFilter(logging.Filter):
370 """A filter that adds a string prefix to the log record for the
371 formatter to later fill in. Requires a %(prefix)s in the format
375 def __init__(self, prefix: str, klobber: bool = False):
378 prefix: the prefix string to add
379 klobber: should we overwrite other prefixes?
386 self.klobber = klobber
389 def filter(self, record: logging.LogRecord):
391 record.prefix = self.prefix
392 elif "prefix" not in record.__dict__:
393 record.prefix = self.prefix
397 class SuffixAddingFilter(logging.Filter):
398 """A filter that adds a string suffix to the log record for the
399 formatter to later fill in. Requires a %(suffix)s in the format
403 def __init__(self, suffix: str, klobber: bool = False):
406 suffix: the suffix string to add
407 klobber: should we overwrite other suffixes?
414 self.klobber = klobber
417 def filter(self, record: logging.LogRecord):
419 record.suffix = self.suffix
420 elif "suffix" not in record.__dict__:
421 record.suffix = self.suffix
425 class PrependingLogAdapter(logging.LoggerAdapter):
426 """:class:`LoggingContext` adds prefixes and suffixes using a
427 logging.Filter that must insert "prefix" or "suffix" members into
428 each log record by using :class:`PrefixAddingFilter` and
429 :class:`SuffixAddingFilter`. This relies on the logging format
430 string containing a %(prefix)s and a %(suffix)s to work correctly.
432 This is an alternate approach that basically just wraps the logger
433 in a class that has the same interface and thunks most calls down
434 to the wrapped logger. It might be useful if you don't want to use
435 :class:`LoggingContext` or its friends.
437 >>> logger = logging.getLogger(__name__ + ".PrependingLogAdapter")
438 >>> logger.setLevel(logging.INFO)
439 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
441 At this point logger doesn't have any format string and so it is
442 missing %(prefix)s and %(suffix)s. It also doesn't have a
443 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
444 So using it in a :class:`LoggingContext` will not work.
446 But we can still add a prefix or suffix by just wrapping it:
448 >>> logger.info("TEST")
451 >>> log = PrependingLogAdapter.wrap_logger('prefix> ', logger)
456 def process(self, msg, kwargs):
457 return f'{self.extra.get("prefix", "")}{msg}', kwargs
460 def wrap_logger(prefix: str, logger: logging.Logger) -> logging.LoggerAdapter:
461 """Helper method around the creation of a LogAdapter that prepends
462 a given string to every log message produced.
465 prefix: the message to prepend to every log message.
466 logger: the logger whose messages to modify.
469 A new logger wrapping the old one with the given behavior.
470 The old logger will continue to behave as usual; simply drop
471 the reference to this wrapper when it's no longer needed.
474 return PrependingLogAdapter(logger, {"prefix": prefix})
477 class AppendingLogAdapter(logging.LoggerAdapter):
478 """:class:`LoggingContext` adds prefixes and suffixes using a
479 logging.Filter that must insert "prefix" or "suffix" members into
480 each log record by using :class:`PrefixAddingFilter` and
481 :class:`SuffixAddingFilter`. This relies on the logging format
482 string containing a %(prefix)s and a %(suffix)s to work correctly.
484 This is an alternate approach that basically just wraps the logger
485 in a class that has the same interface and thunks most calls down
486 to the wrapped logger. It might be useful if you don't want to use
487 :class:`LoggingContext` or its friends.
489 >>> logger = logging.getLogger(__name__ + ".AppendingLogAdapter")
490 >>> logger.setLevel(logging.INFO)
491 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
493 At this point logger doesn't have any format string and so it is
494 missing %(prefix)s and %(suffix)s. It also doesn't have a
495 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
496 So using it in a :class:`LoggingContext` will not work.
498 But we can still add a prefix or suffix by just wrapping it:
500 >>> logger.info("TEST")
503 >>> log = AppendingLogAdapter.wrap_logger('!!!', logger)
508 def process(self, msg, kwargs):
509 return f'{msg}{self.extra.get("suffix", "")}', kwargs
512 def wrap_logger(suffix: str, logger: logging.Logger) -> logging.LoggerAdapter:
513 """Helper method around the creation of a LoggerAdapter that appends
514 a given string to every log message produced.
517 suffix: the message to prepend to every log message.
518 logger: the logger whose messages to modify.
521 A new logger wrapping the old one with the given behavior.
522 The old logger will continue to behave as usual; simply drop
523 the reference to this wrapper when it's no longer needed.
526 return AppendingLogAdapter(logger, {"suffix": suffix})
529 class LoggingContext(contextlib.ContextDecorator):
532 logger: logging.Logger,
534 level: Optional[int] = None,
535 handlers: Optional[List[logging.Handler]] = None,
536 prefix: Optional[str] = None,
537 suffix: Optional[str] = None,
539 """This is a logging context that can be used to temporarily change the
540 way we are logging within its scope. Logging changes may include:
542 - Changing the logging level (e.g. from INFO to DEBUG)
543 - Adding a prefix or suffix to every log message produced
544 - Adding temporary Handlers to direct the logging output elsewhere
546 Setup for doctest / examples. This will normally be taken care of
547 by code in :meth:`initialize_logging` so you don't have to worry
550 >>> logging_format = "%(prefix)s%(message)s%(suffix)s"
551 >>> logger = logging.getLogger(__name__ + ".LoggingContext")
552 >>> logger.setLevel(logging.INFO)
553 >>> handler = logging.StreamHandler(sys.stdout)
554 >>> handler.setFormatter(
555 ... MillisecondAwareFormatter(
556 ... fmt=logging_format,
560 >>> logger.addHandler(handler)
561 >>> logger.addFilter(PrefixAddingFilter(None))
562 >>> logger.addFilter(SuffixAddingFilter(None))
564 First, this logger should be currently be configured to send
565 INFO+ messages to sys.stdout. Let's see it in action:
567 >>> logger.info("Hello world!")
569 >>> logger.debug("You should not see this")
571 The first example is to simply change the level of the logger.
572 Here we temporarily change it to DEBUG within the body of the
573 :class:`LoggingContext`:
575 >>> with LoggingContext(logger, level=logging.DEBUG):
576 ... logger.debug("You should see this now")
577 ... logger.info("Of course you should still see this too")
578 You should see this now
579 Of course you should still see this too
581 >>> logger.debug("Outside of the context we are at INFO again")
582 >>> logger.debug("(which is why you don't see these)")
583 >>> logger.info("But you should see this at INFO level")
584 But you should see this at INFO level
586 The prefix and suffix argument prepend or append a message to
587 all log output. To do this, you need %(prefix)s and
588 %(suffix)s placeholders in your logger format string
589 indicating where to insert the data. This is useful, for
590 example, to add an active request identifier to the set of log
591 messages produced while processing it.
593 >>> logger.info("About to work on a new request")
594 About to work on a new request
596 >>> with LoggingContext(logger, prefix='10.0.0.13> '):
597 ... logger.info("Working on it now")
598 10.0.0.13> Working on it now
600 logger.info("Done with that request")
601 Done with that request
603 LoggingContext can also be used to add temporary handler(s).
604 This code temporarily uses two stdout handlers to double the
605 output for testing purporses but you could also temporarily,
606 e.g., add a RotatingFileHandler or SysLogHandler etc...
608 >>> with LoggingContext(logger, handlers=[logging.StreamHandler(sys.stdout)]):
609 ... logger.info("TEST")
613 Once leaving the context, logger's behavior is restored. In
614 this case, the extra handler is removed so output will not
617 >>> logger.info("OUTSIDE")
620 LoggingContext can also be used as a decorator if that is more
623 >>> @LoggingContext(logger, level=logging.DEBUG)
624 ... def log_stuff(logger):
625 ... logger.debug("But inside, the decorator has changed us to DEBUG")
627 >>> logger.debug("Outside, we're at INFO level and you don't see this")
628 >>> log_stuff(logger)
629 But inside, the decorator has changed us to DEBUG
630 >>> logger.debug("And, of course, out here we're still at INFO afterwards")
635 self.old_level: Optional[int] = None
636 self.handlers = handlers
638 self.prefix: Optional[logging.Filter] = PrefixAddingFilter(prefix, True)
642 self.suffix: Optional[logging.Filter] = SuffixAddingFilter(suffix, True)
646 def __enter__(self) -> Union[logging.Logger, logging.LoggerAdapter]:
649 self.old_level = self.logger.level
650 self.logger.setLevel(self.level)
653 for handler in self.handlers:
654 self.logger.addHandler(handler)
657 self.logger.addFilter(self.prefix)
660 self.logger.addFilter(self.suffix)
663 def __exit__(self, et, ev, tb) -> None:
665 self.logger.removeFilter(self.suffix)
668 self.logger.removeFilter(self.prefix)
671 for handler in self.handlers:
672 self.logger.removeHandler(handler)
676 self.logger.setLevel(self.old_level)
677 return None # propagate exceptions out
680 class MillisecondAwareFormatter(logging.Formatter):
681 """A formatter for adding milliseconds to log messages which, for
682 whatever reason, the default Python logger doesn't do.
686 You probably don't need to use this directly but it is
687 wired in under :meth:`initialize_logging` so that the
688 timestamps in log messages have millisecond level
692 converter = datetime.datetime.fromtimestamp # type: ignore
695 def formatTime(self, record, datefmt=None):
696 ct = MillisecondAwareFormatter.converter(
697 record.created, pytz.timezone("US/Pacific")
700 s = ct.strftime(datefmt)
702 t = ct.strftime("%Y-%m-%d %H:%M:%S")
703 s = f"{t},{record.msecs:%03d}"
707 def _log_about_logging(
709 default_logging_level,
710 preexisting_handlers_count,
714 """This is invoked automatically after logging is initialized such
715 that the first messages in the log are about how logging itself
718 level_name = logging._levelToName.get(
719 default_logging_level, str(default_logging_level)
721 logger.debug("Initialized global logging; logging level is %s.", level_name)
723 config.config["logging_clear_preexisting_handlers"]
724 and preexisting_handlers_count > 0
727 "Logging cleared %d global handlers (--logging_clear_preexisting_handlers)",
728 preexisting_handlers_count,
730 logger.debug('Logging format specification is "%s"', fmt)
731 if config.config["logging_debug_threads"]:
733 "...Logging format spec captures tid/pid. (--logging_debug_threads)"
735 if config.config["logging_debug_modules"]:
737 "...Logging format spec captures files/functions/lineno. (--logging_debug_modules)"
739 if config.config["logging_syslog"]:
741 "Logging to syslog as %s with priority mapping based on level. (--logging_syslog)",
744 if config.config["logging_filename"]:
746 'Logging to file "%s". (--logging_filename)',
747 config.config["logging_filename"],
750 "...with %d bytes max file size. (--logging_filename_maxsize)",
751 config.config["logging_filename_maxsize"],
754 "...and %d rotating backup file count. (--logging_filename_count)",
755 config.config["logging_filename_count"],
757 if config.config["logging_console"]:
758 logger.debug("Logging to the console (stderr). (--logging_console)")
759 if config.config["logging_info_is_print"]:
761 "Logging logger.info messages will be repeated on stdout. (--logging_info_is_print)"
763 if config.config["logging_squelch_repeats"]:
765 "Logging code allowed to request repeated messages be squelched. (--logging_squelch_repeats)"
769 "Logging code forbidden to request messages be squelched; all messages logged. (--no_logging_squelch_repeats)"
771 if config.config["logging_probabilistically"]:
773 "Logging code is allowed to request probabilistic logging. (--logging_probabilistically)"
777 "Logging code is forbidden to request probabilistic logging; messages always logged. (--no_logging_probabilistically)"
779 if config.config["logging_captures_prints"]:
781 "Logging will capture printed data as logger.info messages. (--logging_captures_prints)"
785 def _construct_logging_format() -> str:
786 """Figure out the root logging format string based on commandling args."""
788 # Custom or default --logging_format?
789 if config.config["logging_format"]:
790 fmt = config.config["logging_format"]
791 elif config.config["logging_syslog"]:
792 fmt = "%(levelname).1s:%(filename)s[%(process)d]: "
794 fmt = "%(levelname).1s:%(asctime)s: "
796 fmt += "%(prefix)s%(message)s%(suffix)s"
798 # --logging_debug_threads and --logging_debug_modules both affect
799 # the format by prepending information about the pid/tid or
801 if config.config["logging_debug_threads"]:
802 fmt = f"%(process)d.%(thread)d|{fmt}"
803 if config.config["logging_debug_modules"]:
804 fmt = f"%(filename)s:%(funcName)s:%(lineno)s|{fmt}"
808 def _construct_handlers_list(
809 logging_format: str, facility_name: Optional[str]
810 ) -> List[logging.Handler]:
811 """Create the list of handlers to be added to the root logger
812 based on commandline flags.
815 retval: List[logging.Handler] = []
817 # --logging_syslog (optionally with --logging_syslog_facility)
818 # sets up for logging to use the standard system syslogd as a
820 if config.config["logging_syslog"]:
821 if sys.platform not in ("win32", "cygwin"):
823 facility = SysLogHandler.__dict__.get(facility_name, SysLogHandler.LOG_USER)
825 syslog_handler = SysLogHandler(facility=facility, address="/dev/log")
826 syslog_handler.setFormatter(
827 MillisecondAwareFormatter(
829 datefmt=config.config["logging_date_format"],
832 retval.append(syslog_handler)
834 # --logging_filename (with friends --logging_filename_count and
835 # --logging_filename_maxsize) set up logging to a file on the
836 # filesystem with automatic rotation when it gets too big.
837 if config.config["logging_filename"]:
838 max_bytes = config.config["logging_filename_maxsize"]
839 assert max_bytes and isinstance(max_bytes, int)
840 backup_count = config.config["logging_filename_count"]
841 assert backup_count and isinstance(backup_count, int)
842 file_handler = RotatingFileHandler(
843 config.config["logging_filename"],
845 backupCount=backup_count,
847 file_handler.setFormatter(
848 MillisecondAwareFormatter(
850 datefmt=config.config["logging_date_format"],
853 retval.append(file_handler)
855 # --logging_console is, ahem, logging to the console.
856 if config.config["logging_console"]:
857 console_handler = logging.StreamHandler(sys.stderr)
858 console_handler.setFormatter(
859 MillisecondAwareFormatter(
861 datefmt=config.config["logging_date_format"],
864 retval.append(console_handler)
866 # --logging_info_is_print echoes any message to logger.info(x) as
867 # a print statement on stdout.
868 if config.config["logging_info_is_print"]:
869 print_handler = logging.StreamHandler(sys.stdout)
870 print_handler.addFilter(OnlyInfoFilter())
871 retval.append(print_handler)
874 retval.append(logging.NullHandler())
878 def _add_logging_handlers_and_filters(
879 logger: logging.Logger, handlers: List[logging.Handler]
881 """Adds a list of handlers to the root logger."""
883 for handler in handlers:
884 # These two are always present and set %(prefix) and %(suffix)
885 # if appropriate (see :class:`LoggingContext`) or make them
887 handler.addFilter(PrefixAddingFilter("", False))
888 handler.addFilter(SuffixAddingFilter("", False))
890 # --logging_squelch_repeats allows code to request repeat logging
891 # messages (identical log site and message contents) to be
892 # silenced. Logging code must request this explicitly, it isn't
893 # automatic. This option just allows the silencing to happen.
894 if config.config["logging_squelch_repeats"]:
895 handler.addFilter(SquelchRepeatedMessagesFilter())
897 # --logging_probabilistically allows code to request
898 # non-deterministic logging where messages have some probability
899 # of being produced. Logging code must request this explicitly.
900 # This option just allows the non-deterministic behavior to
901 # happen. Disabling it will cause every log message to be
903 if config.config["logging_probabilistically"]:
904 handler.addFilter(ProbabilisticFilter())
905 logger.addHandler(handler)
908 def initialize_logging(logger=None) -> logging.Logger:
909 """Initialize logging for the program. See module level comments
910 for information about what functionality this provides and how to
911 enable or disable functionality via the commandline.
914 :meth:`bootstrap.initialize` decorator on your program's entry point,
915 it will call this for you. See :meth:`pyutils.bootstrap.initialize`
919 global LOGGING_INITIALIZED
920 if LOGGING_INITIALIZED:
921 return logging.getLogger()
922 LOGGING_INITIALIZED = True
924 preexisting_handlers_count = 0
926 # --logging_clear_preexisting_handlers removes logging handlers
927 # that were registered by global statements during imported module
929 if config.config["logging_clear_preexisting_handlers"]:
930 logging.basicConfig(force=True)
931 logger = logging.getLogger()
932 while logger.hasHandlers():
933 logger.removeHandler(logger.handlers[0])
934 preexisting_handlers_count += 1
936 logging.basicConfig()
937 logger = logging.getLogger()
939 # --logging_config_file pulls logging settings from a config file
940 # skipping the rest of this setup.
941 if config.config["logging_config_file"] is not None:
942 fileConfig(config.config["logging_config_file"])
945 # Global default logging level (--logging_level); messages below
946 # this level will be silenced.
947 logging_level = config.config["logging_level"]
949 logging_level = logging_level.upper()
950 default_logging_level = getattr(logging, logging_level, None)
951 if not isinstance(default_logging_level, int):
952 raise ValueError(f'Invalid level: {config.config["logging_level"]}')
953 logger.setLevel(default_logging_level)
955 logging_format = _construct_logging_format()
957 if config.config["logging_syslog"]:
958 if sys.platform not in ("win32", "cygwin"):
959 if config.config["logging_syslog_facility"]:
960 facility_name = "LOG_" + config.config["logging_syslog_facility"]
961 handlers: List[logging.Handler] = _construct_handlers_list(
962 logging_format, facility_name
964 _add_logging_handlers_and_filters(logger, handlers)
966 # --logging_captures_prints, if set, will capture and log.info
967 # anything printed on stdout.
968 if config.config["logging_captures_prints"]:
971 def print_and_also_log(*arg, **kwarg):
972 f = kwarg.get("file", None)
977 BUILT_IN_PRINT(*arg, **kwarg)
979 builtins.print = print_and_also_log
981 # At this point the logger is ready, handlers are set up,
982 # etc... so log about the logging configuration.
985 default_logging_level,
986 preexisting_handlers_count,
993 def get_logger(name: str = ""):
994 """Get the global logger"""
995 logger = logging.getLogger(name)
996 return initialize_logging(logger)
999 class OutputMultiplexer(object):
1000 """A class that broadcasts printed messages to several sinks
1001 (including various logging levels, different files, different file
1002 handles, the house log, etc...). See also
1003 :class:`OutputMultiplexerContext` for an easy usage pattern.
1006 class Destination(enum.IntEnum):
1007 """Bits in the destination_bitv bitvector. Used to indicate the
1008 output destination."""
1011 LOG_DEBUG = 0x01 # ⎫
1013 LOG_WARNING = 0x04 # ⎬ Must provide logger to the c'tor.
1014 LOG_ERROR = 0x08 # ⎪
1015 LOG_CRITICAL = 0x10 # ⎭
1016 FILENAMES = 0x20 # Must provide a filename to the c'tor.
1017 FILEHANDLES = 0x40 # Must provide a handle to the c'tor.
1019 ALL_LOG_DESTINATIONS = (
1020 LOG_DEBUG | LOG_INFO | LOG_WARNING | LOG_ERROR | LOG_CRITICAL
1022 ALL_OUTPUT_DESTINATIONS = 0x8F
1027 destination_bitv: int,
1030 filenames: Optional[Iterable[str]] = None,
1031 handles: Optional[Iterable[io.TextIOWrapper]] = None,
1034 Constructs the OutputMultiplexer instance.
1037 destination_bitv: a bitvector where each bit represents an
1038 output destination. Multiple bits may be set.
1039 logger: if LOG_* bits are set, you must pass a logger here.
1040 filenames: if FILENAMES bit is set, this should be a list of
1041 files you'd like to output into. This code handles opening
1042 and closing said files.
1043 handles: if FILEHANDLES bit is set, this should be a list of
1044 already opened filehandles you'd like to output into. The
1045 handles will remain open after the scope of the multiplexer.
1048 logger = logging.getLogger(None)
1049 self.logger = logger
1051 self.f: Optional[List[Any]] = None
1052 if filenames is not None:
1053 self.f = [open(filename, "wb", buffering=0) for filename in filenames]
1055 if destination_bitv & OutputMultiplexer.Destination.FILENAMES:
1056 raise ValueError("Filenames argument is required if bitv & FILENAMES")
1059 self.h: Optional[List[Any]] = None
1060 if handles is not None:
1061 self.h = list(handles)
1063 if destination_bitv & OutputMultiplexer.Destination.FILEHANDLES:
1064 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1067 self.set_destination_bitv(destination_bitv)
1069 def get_destination_bitv(self):
1070 """Where are we outputting?"""
1071 return self.destination_bitv
1073 def set_destination_bitv(self, destination_bitv: int):
1074 """Change the output destination_bitv to the one provided."""
1075 if destination_bitv & self.Destination.FILENAMES and self.f is None:
1076 raise ValueError("Filename argument is required if bitv & FILENAMES")
1077 if destination_bitv & self.Destination.FILEHANDLES and self.h is None:
1078 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1079 self.destination_bitv = destination_bitv
1081 def print(self, *args, **kwargs):
1082 """Produce some output to all sinks."""
1083 from pyutils.string_utils import _sprintf, strip_escape_sequences
1085 end = kwargs.pop("end", None)
1087 if not isinstance(end, str):
1088 raise TypeError("end must be None or a string")
1089 sep = kwargs.pop("sep", None)
1091 if not isinstance(sep, str):
1092 raise TypeError("sep must be None or a string")
1094 raise TypeError("invalid keyword arguments to print()")
1095 buf = _sprintf(*args, end="", sep=sep)
1102 if self.destination_bitv & self.Destination.FILENAMES and self.f is not None:
1104 _.write(buf.encode("utf-8"))
1107 if self.destination_bitv & self.Destination.FILEHANDLES and self.h is not None:
1112 buf = strip_escape_sequences(buf)
1113 if self.logger is not None:
1114 if self.destination_bitv & self.Destination.LOG_DEBUG:
1115 self.logger.debug(buf)
1116 if self.destination_bitv & self.Destination.LOG_INFO:
1117 self.logger.info(buf)
1118 if self.destination_bitv & self.Destination.LOG_WARNING:
1119 self.logger.warning(buf)
1120 if self.destination_bitv & self.Destination.LOG_ERROR:
1121 self.logger.error(buf)
1122 if self.destination_bitv & self.Destination.LOG_CRITICAL:
1123 self.logger.critical(buf)
1124 if self.destination_bitv & self.Destination.HLOG:
1128 """Close all open files."""
1129 if self.f is not None:
1134 class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator):
1136 A context that uses an :class:`OutputMultiplexer`. e.g.::
1138 with OutputMultiplexerContext(
1139 OutputMultiplexer.LOG_INFO |
1140 OutputMultiplexer.LOG_DEBUG |
1141 OutputMultiplexer.FILENAMES |
1142 OutputMultiplexer.FILEHANDLES,
1143 filenames = [ '/tmp/foo.log', '/var/log/bar.log' ],
1146 mplex.print("This is a log message!")
1151 destination_bitv: OutputMultiplexer.Destination,
1159 destination_bitv: a bitvector that indicates where we should
1160 send output. See :class:`OutputMultiplexer` for options.
1161 logger: optional logger to use for log destination messages.
1162 filenames: optional filenames to write for filename destination
1164 handles: optional open filehandles to write for filehandle
1165 destination messages.
1170 filenames=filenames,
1174 def __enter__(self):
1177 def __exit__(self, etype, value, traceback) -> bool:
1179 if etype is not None:
1184 def hlog(message: str) -> None:
1185 """Write a message to the house log (syslog facility local7 priority
1186 info) by calling `/usr/bin/logger`. This is pretty hacky but used
1187 by a bunch of (my) code. Another way to do this would be to use
1188 :code:`--logging_syslog` and :code:`--logging_syslog_facility` but
1189 I can't actually say that's easier.
1191 TODO: this needs to move.
1193 message = message.replace("'", "'\"'\"'")
1194 os.system(f"/usr/bin/logger -p local7.info -- '{message}'")
1197 if __name__ == "__main__":