2 # -*- coding: utf-8 -*-
4 # © Copyright 2021-2023, Scott Gasch
7 This is a module that offers an opinionated take on how whole program
8 logging should be initialized and controlled. It uses the standard
9 Python :mod:`logging` but gives you control, via commandline config,
12 * Set the logging default level (debug, info, warning, error, critical)
13 of the whole program (see: :code:`--logging_level`)... and to override
14 the logging level with :class:`LoggingContext`.
15 * Prepend or append a message to every log record also with
16 :class:`LoggingContext`.
17 * define the logging message format (see :code:`--logging_format` and
18 :code:`--logging_date_format`) including easily adding a PID/TID
19 marker on all messages to help with multithreaded debugging
20 (:code:`--logging_debug_threads`) and force module names of code
21 that emits log messages to be included in the format
22 (:code:`--logging_debug_modules`),
23 * control the destination of logged messages:
25 - log to the console/stderr (:code:`--logging_console`) and/or
26 - log to a rotated file (:code:`--logging_filename`,
27 :code:`--logging_filename_maxsize` and :code:`--logging_filename_count`)
29 - log to the UNIX syslog (:code:`--logging_syslog` and
30 :code:`--logging_syslog_facility`)
32 * optionally squelch repeated messages (:code:`--logging_squelch_repeats`),
33 * optionally log probalistically (:code:`--logging_probabilistically`),
34 * capture printed messages into the info log (:code:`--logging_captures_prints`),
35 * and optionally clear unwanted logging handlers added by other imports
36 before this one (:code:`--logging_clear_preexisting_handlers`).
37 * There are also :class:`LoggerAdapter` classes to implement prefix/suffix
38 functionality without using :class:`LoggingContext` by wrapping the
41 To use this functionality, call :meth:`initialize_logging` early
42 in your program entry point. If you use the
43 :meth:`pyutils.bootstrap.initialize` decorator on your program's entry
44 point, it will call this for you automatically.
56 from logging.config import fileConfig
57 from logging.handlers import RotatingFileHandler, SysLogHandler
58 from typing import Any, Callable, Dict, Iterable, List, Optional, Union
61 from overrides import overrides
63 # This module is commonly used by others in here and should avoid
64 # taking any unnecessary dependencies back on them.
65 from pyutils import argparse_utils, config, misc_utils
67 cfg = config.add_commandline_args(f"Logging ({__file__})", "Args related to logging")
69 "--logging_config_file",
70 type=argparse_utils.valid_filename,
73 help="Config file containing the logging setup, see: https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial",
79 choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
81 help="The global default level below which to squelch log messages",
87 help="The format for lines logged via the logger module. See: https://docs.python.org/3/library/logging.html#formatter-objects",
90 "--logging_date_format",
92 default="%Y/%m/%dT%H:%M:%S.%f%z",
94 help="The format of any dates in --logging_format.",
98 action=argparse_utils.ActionNoYes,
100 help="Should we log to the console (stderr)",
103 "--logging_filename",
107 help="The filename of the logfile to write.",
110 "--logging_filename_maxsize",
111 type=argparse_utils.valid_byte_count,
112 default=(1024 * 1024),
114 help="The maximum size (in bytes) to write to the logging_filename.",
117 "--logging_filename_count",
121 help="The number of logging_filename copies to keep before deleting.",
125 action=argparse_utils.ActionNoYes,
127 help="Should we log to localhost's syslog.",
130 "--logging_syslog_facility",
156 metavar="SYSLOG_FACILITY_LIST",
157 help="The default syslog message facility identifier",
160 "--logging_debug_threads",
161 action=argparse_utils.ActionNoYes,
163 help="Should we prepend pid/tid data to all log messages?",
166 "--logging_debug_modules",
167 action=argparse_utils.ActionNoYes,
169 help="Should we prepend module/function data to all log messages?",
172 "--logging_info_is_print",
173 action=argparse_utils.ActionNoYes,
175 help="logging.info also prints to stdout.",
178 "--logging_squelch_repeats",
179 action=argparse_utils.ActionNoYes,
181 help="Do we allow code to indicate that it wants to squelch repeated logging messages or should we always log?",
184 "--logging_probabilistically",
185 action=argparse_utils.ActionNoYes,
187 help="Do we allow probabilistic logging (for code that wants it) or should we always log?",
189 # See also: OutputMultiplexer
191 "--logging_captures_prints",
192 action=argparse_utils.ActionNoYes,
194 help="When calling print, also log.info automatically.",
197 "--logging_clear_preexisting_handlers",
198 action=argparse_utils.ActionNoYes,
201 "Should logging code clear preexisting global logging handlers and thus insist that is "
202 + "alone can add handlers. Use this to work around annoying modules that insert global "
203 + "handlers with formats and logging levels you might now want. Caveat emptor, this may "
204 + "cause you to miss logging messages."
208 BUILT_IN_PRINT = print
209 LOGGING_INITIALIZED = False
212 # A map from logging_callsite_id -> count of logged messages.
213 squelched_logging_counts: Dict[str, int] = {}
216 def squelch_repeated_log_messages(squelch_after_n_repeats: int) -> Callable:
218 A decorator that marks a function as interested in having the logging
219 messages that it produces be squelched (ignored) after it logs the
220 same message more than N times.
224 This decorator affects *ALL* logging messages produced
225 within the decorated function. That said, messages must be
226 identical in order to be squelched. For example, if the same line
227 of code produces different messages (because of, e.g., a format
228 string), the messages are considered to be different.
230 An example of this from the pyutils code itself can be found in
231 :meth:`pyutils.ansi.fg` and :meth:`pyutils.ansi.bg` methods::
233 @logging_utils.squelch_repeated_log_messages(1)
235 name: Optional[str] = "",
236 red: Optional[int] = None,
237 green: Optional[int] = None,
238 blue: Optional[int] = None,
240 force_16color: bool = False,
241 force_216color: bool = False,
245 These methods log stuff like "Using 24-bit color strategy" which
246 gets old really fast and fills up the logs. By decorating the methods
247 with :code:`@logging_utils.squelch_repeated_log_messages(1)` the code
248 is requesting that its logged messages be dropped silently after the
249 first one is produced (note the argument 1).
251 Users can insist that all logged messages always be reflected in the
252 logs using the :code:`--no_logging_squelch_repeats` flag but the default
253 behavior is to allow code to request it be squelched.
255 :code:`--logging_squelch_repeats` only affects code with this decorator
256 on it; it ignores all other code.
259 squelch_after_n_repeats: the number of repeated messages allowed to
260 log before subsequent messages are silently dropped.
263 def squelch_logging_wrapper(f: Callable):
264 from pyutils import function_utils
266 identifier = function_utils.function_identifier(f)
268 # Get rid of module paths, e.g. pyutils.ansi:bg -> ansi:bg which
269 # is what we're going to need below.
270 identifier = re.sub(r"[^\.]+\.", "", identifier)
271 squelched_logging_counts[identifier] = squelch_after_n_repeats
274 return squelch_logging_wrapper
277 class SquelchRepeatedMessagesFilter(logging.Filter):
278 """A filter that only logs messages from a given site with the same
279 (exact) message at the same logging level N times and ignores
280 subsequent attempts to log.
282 This filter only affects logging messages that repeat more than a
283 threshold number of times from functions that are tagged with the
284 :code:`@logging_utils.squelched_logging_ok` decorator (see above);
285 all others are ignored.
287 This functionality is enabled by default but can be disabled via
288 the :code:`--no_logging_squelch_repeats` commandline flag.
291 def __init__(self) -> None:
293 self.counters: collections.Counter = collections.Counter()
296 def filter(self, record: logging.LogRecord) -> bool:
297 """Should we drop this log message?"""
298 id1 = f"{record.module}:{record.funcName}"
299 if id1 not in squelched_logging_counts:
301 threshold = squelched_logging_counts[id1]
302 logsite = f"{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}"
303 count = self.counters[logsite]
304 self.counters[logsite] += 1
305 return count < threshold
308 # A map from function_identifier -> probability of logging (0.0%..100.0%)
309 probabilistic_logging_levels: Dict[str, float] = {}
312 def logging_is_probabilistic(probability_of_logging: float) -> Callable:
313 """A decorator that indicates that all logging statements within the
314 scope of a particular (marked via decorator) function are not
315 deterministic (i.e. they do not always unconditionally log) but rather
316 are probabilistic (i.e. they log N% of the time, randomly) when the
317 user passes the :code:`--logging_probabilistically` commandline flag
318 (which is enabled by default).
322 This affects *ALL* logging statements within the marked function.
323 If you want it to only affect a subset of logging statements,
324 log those statements in a separate function that you invoke
325 from within the "too large" scope and mark that separate function
326 with the :code:`logging_is_probabilistic` decorator instead.
328 That this functionality can be disabled (forcing all logged
329 messages to produce output) via the
330 :code:`--no_logging_probabilistically` cmdline argument.
333 def probabilistic_logging_wrapper(f: Callable):
334 from pyutils import function_utils
336 identifier = function_utils.function_identifier(f)
337 probabilistic_logging_levels[identifier] = probability_of_logging
340 return probabilistic_logging_wrapper
343 class ProbabilisticFilter(logging.Filter):
345 A filter that logs messages probabilistically (i.e. randomly at some
346 percent chance). This filter is used with a decorator (see
347 :meth:`logging_is_probabilistic`) to implement the
348 :code:`--logging_probabilistically` commandline flag.
350 This filter only affects logging messages from functions that have
351 been tagged with the `@logging_utils.probabilistic_logging` decorator.
355 def filter(self, record: logging.LogRecord) -> bool:
356 """Should the message be logged?"""
357 identifier = f"{record.module}:{record.funcName}"
358 threshold = probabilistic_logging_levels.get(identifier, 100.0)
359 return misc_utils.execute_probabilistically(threshold)
362 class OnlyInfoFilter(logging.Filter):
363 """A filter that only logs messages produced at the INFO logging
364 level. This is used by the :code:`--logging_info_is_print`
365 commandline option to select a subset of the logging stream to
366 send to a stdout handler.
370 def filter(self, record: logging.LogRecord):
371 return record.levelno == logging.INFO
374 class PrefixAddingFilter(logging.Filter):
375 """A filter that adds a string prefix to the log record for the
376 formatter to later fill in. Requires a %(prefix)s in the format
380 def __init__(self, prefix: str, klobber: bool = False):
383 prefix: the prefix string to add
384 klobber: should we overwrite other prefixes?
391 self.klobber = klobber
394 def filter(self, record: logging.LogRecord):
396 record.prefix = self.prefix
397 elif "prefix" not in record.__dict__:
398 record.prefix = self.prefix
402 class SuffixAddingFilter(logging.Filter):
403 """A filter that adds a string suffix to the log record for the
404 formatter to later fill in. Requires a %(suffix)s in the format
408 def __init__(self, suffix: str, klobber: bool = False):
411 suffix: the suffix string to add
412 klobber: should we overwrite other suffixes?
419 self.klobber = klobber
422 def filter(self, record: logging.LogRecord):
424 record.suffix = self.suffix
425 elif "suffix" not in record.__dict__:
426 record.suffix = self.suffix
430 class PrependingLogAdapter(logging.LoggerAdapter):
431 """:class:`LoggingContext` adds prefixes and suffixes using a
432 logging.Filter that must insert "prefix" or "suffix" members into
433 each log record by using :class:`PrefixAddingFilter` and
434 :class:`SuffixAddingFilter`. This relies on the logging format
435 string containing a %(prefix)s and a %(suffix)s to work correctly.
437 This is an alternate approach that basically just wraps the logger
438 in a class that has the same interface and thunks most calls down
439 to the wrapped logger. It might be useful if you don't want to use
440 :class:`LoggingContext` or its friends.
442 >>> logger = logging.getLogger(__name__ + ".PrependingLogAdapter")
443 >>> logger.setLevel(logging.INFO)
444 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
446 At this point logger doesn't have any format string and so it is
447 missing %(prefix)s and %(suffix)s. It also doesn't have a
448 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
449 So using it in a :class:`LoggingContext` will not work.
451 But we can still add a prefix or suffix by just wrapping it:
453 >>> logger.info("TEST")
456 >>> log = PrependingLogAdapter.wrap_logger('prefix> ', logger)
461 def process(self, msg, kwargs):
462 return f'{self.extra.get("prefix", "")}{msg}', kwargs
465 def wrap_logger(prefix: str, logger: logging.Logger) -> logging.LoggerAdapter:
466 """Helper method around the creation of a LogAdapter that prepends
467 a given string to every log message produced.
470 prefix: the message to prepend to every log message.
471 logger: the logger whose messages to modify.
474 A new logger wrapping the old one with the given behavior.
475 The old logger will continue to behave as usual; simply drop
476 the reference to this wrapper when it's no longer needed.
479 return PrependingLogAdapter(logger, {"prefix": prefix})
482 class AppendingLogAdapter(logging.LoggerAdapter):
483 """:class:`LoggingContext` adds prefixes and suffixes using a
484 logging.Filter that must insert "prefix" or "suffix" members into
485 each log record by using :class:`PrefixAddingFilter` and
486 :class:`SuffixAddingFilter`. This relies on the logging format
487 string containing a %(prefix)s and a %(suffix)s to work correctly.
489 This is an alternate approach that basically just wraps the logger
490 in a class that has the same interface and thunks most calls down
491 to the wrapped logger. It might be useful if you don't want to use
492 :class:`LoggingContext` or its friends.
494 >>> logger = logging.getLogger(__name__ + ".AppendingLogAdapter")
495 >>> logger.setLevel(logging.INFO)
496 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
498 At this point logger doesn't have any format string and so it is
499 missing %(prefix)s and %(suffix)s. It also doesn't have a
500 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
501 So using it in a :class:`LoggingContext` will not work.
503 But we can still add a prefix or suffix by just wrapping it:
505 >>> logger.info("TEST")
508 >>> log = AppendingLogAdapter.wrap_logger('!!!', logger)
513 def process(self, msg, kwargs):
514 return f'{msg}{self.extra.get("suffix", "")}', kwargs
517 def wrap_logger(suffix: str, logger: logging.Logger) -> logging.LoggerAdapter:
518 """Helper method around the creation of a LoggerAdapter that appends
519 a given string to every log message produced.
522 suffix: the message to prepend to every log message.
523 logger: the logger whose messages to modify.
526 A new logger wrapping the old one with the given behavior.
527 The old logger will continue to behave as usual; simply drop
528 the reference to this wrapper when it's no longer needed.
531 return AppendingLogAdapter(logger, {"suffix": suffix})
534 class LoggingContext(contextlib.ContextDecorator):
537 logger: logging.Logger,
539 level: Optional[int] = None,
540 handlers: Optional[List[logging.Handler]] = None,
541 prefix: Optional[str] = None,
542 suffix: Optional[str] = None,
544 """This is a logging context that can be used to temporarily change the
545 way we are logging within its scope. Logging changes may include:
547 - Changing the logging level (e.g. from INFO to DEBUG)
548 - Adding a prefix or suffix to every log message produced
549 - Adding temporary Handlers to direct the logging output elsewhere
551 Setup for doctest / examples. This will normally be taken care of
552 by code in :meth:`initialize_logging` so you don't have to worry
555 >>> logging_format = "%(prefix)s%(message)s%(suffix)s"
556 >>> logger = logging.getLogger(__name__ + ".LoggingContext")
557 >>> logger.setLevel(logging.INFO)
558 >>> handler = logging.StreamHandler(sys.stdout)
559 >>> handler.setFormatter(
560 ... MillisecondAwareFormatter(
561 ... fmt=logging_format,
565 >>> logger.addHandler(handler)
566 >>> logger.addFilter(PrefixAddingFilter(None))
567 >>> logger.addFilter(SuffixAddingFilter(None))
569 First, this logger should be currently be configured to send
570 INFO+ messages to sys.stdout. Let's see it in action:
572 >>> logger.info("Hello world!")
574 >>> logger.debug("You should not see this")
576 The first example is to simply change the level of the logger.
577 Here we temporarily change it to DEBUG within the body of the
578 :class:`LoggingContext`:
580 >>> with LoggingContext(logger, level=logging.DEBUG):
581 ... logger.debug("You should see this now")
582 ... logger.info("Of course you should still see this too")
583 You should see this now
584 Of course you should still see this too
586 >>> logger.debug("Outside of the context we are at INFO again")
587 >>> logger.debug("(which is why you don't see these)")
588 >>> logger.info("But you should see this at INFO level")
589 But you should see this at INFO level
591 The prefix and suffix argument prepend or append a message to
592 all log output. To do this, you need %(prefix)s and
593 %(suffix)s placeholders in your logger format string
594 indicating where to insert the data. This is useful, for
595 example, to add an active request identifier to the set of log
596 messages produced while processing it.
598 >>> logger.info("About to work on a new request")
599 About to work on a new request
601 >>> with LoggingContext(logger, prefix='10.0.0.13> '):
602 ... logger.info("Working on it now")
603 10.0.0.13> Working on it now
605 logger.info("Done with that request")
606 Done with that request
608 LoggingContext can also be used to add temporary handler(s).
609 This code temporarily uses two stdout handlers to double the
610 output for testing purporses but you could also temporarily,
611 e.g., add a RotatingFileHandler or SysLogHandler etc...
613 >>> with LoggingContext(logger, handlers=[logging.StreamHandler(sys.stdout)]):
614 ... logger.info("TEST")
618 Once leaving the context, logger's behavior is restored. In
619 this case, the extra handler is removed so output will not
622 >>> logger.info("OUTSIDE")
625 LoggingContext can also be used as a decorator if that is more
628 >>> @LoggingContext(logger, level=logging.DEBUG)
629 ... def log_stuff(logger):
630 ... logger.debug("But inside, the decorator has changed us to DEBUG")
632 >>> logger.debug("Outside, we're at INFO level and you don't see this")
633 >>> log_stuff(logger)
634 But inside, the decorator has changed us to DEBUG
635 >>> logger.debug("And, of course, out here we're still at INFO afterwards")
640 self.old_level: Optional[int] = None
641 self.handlers = handlers
643 self.prefix: Optional[logging.Filter] = PrefixAddingFilter(prefix, True)
647 self.suffix: Optional[logging.Filter] = SuffixAddingFilter(suffix, True)
651 def __enter__(self) -> Union[logging.Logger, logging.LoggerAdapter]:
654 self.old_level = self.logger.level
655 self.logger.setLevel(self.level)
658 for handler in self.handlers:
659 self.logger.addHandler(handler)
662 self.logger.addFilter(self.prefix)
665 self.logger.addFilter(self.suffix)
668 def __exit__(self, et, ev, tb) -> None:
670 self.logger.removeFilter(self.suffix)
673 self.logger.removeFilter(self.prefix)
676 for handler in self.handlers:
677 self.logger.removeHandler(handler)
681 self.logger.setLevel(self.old_level)
682 return None # propagate exceptions out
685 class MillisecondAwareFormatter(logging.Formatter):
686 """A formatter for adding milliseconds to log messages which, for
687 whatever reason, the default Python logger doesn't do.
691 You probably don't need to use this directly but it is
692 wired in under :meth:`initialize_logging` so that the
693 timestamps in log messages have millisecond level
697 converter = datetime.datetime.fromtimestamp # type: ignore
700 def formatTime(self, record, datefmt=None):
701 ct = MillisecondAwareFormatter.converter(
702 record.created, pytz.timezone("US/Pacific")
705 s = ct.strftime(datefmt)
707 t = ct.strftime("%Y-%m-%d %H:%M:%S")
708 s = f"{t},{record.msecs:%03d}"
712 def _log_about_logging(
714 default_logging_level,
715 preexisting_handlers_count,
719 """This is invoked automatically after logging is initialized such
720 that the first messages in the log are about how logging itself
723 level_name = logging._levelToName.get(
724 default_logging_level, str(default_logging_level)
726 logger.debug("Initialized global logging; logging level is %s.", level_name)
728 config.config["logging_clear_preexisting_handlers"]
729 and preexisting_handlers_count > 0
732 "Logging cleared %d global handlers (--logging_clear_preexisting_handlers)",
733 preexisting_handlers_count,
735 logger.debug('Logging format specification is "%s"', fmt)
736 if config.config["logging_debug_threads"]:
738 "...Logging format spec captures tid/pid. (--logging_debug_threads)"
740 if config.config["logging_debug_modules"]:
742 "...Logging format spec captures files/functions/lineno. (--logging_debug_modules)"
744 if config.config["logging_syslog"]:
746 "Logging to syslog as %s with priority mapping based on level. (--logging_syslog)",
749 if config.config["logging_filename"]:
751 'Logging to file "%s". (--logging_filename)',
752 config.config["logging_filename"],
755 "...with %d bytes max file size. (--logging_filename_maxsize)",
756 config.config["logging_filename_maxsize"],
759 "...and %d rotating backup file count. (--logging_filename_count)",
760 config.config["logging_filename_count"],
762 if config.config["logging_console"]:
763 logger.debug("Logging to the console (stderr). (--logging_console)")
764 if config.config["logging_info_is_print"]:
766 "Logging logger.info messages will be repeated on stdout. (--logging_info_is_print)"
768 if config.config["logging_squelch_repeats"]:
770 "Logging code allowed to request repeated messages be squelched. (--logging_squelch_repeats)"
774 "Logging code forbidden to request messages be squelched; all messages logged. (--no_logging_squelch_repeats)"
776 if config.config["logging_probabilistically"]:
778 "Logging code is allowed to request probabilistic logging. (--logging_probabilistically)"
782 "Logging code is forbidden to request probabilistic logging; messages always logged. (--no_logging_probabilistically)"
784 if config.config["logging_captures_prints"]:
786 "Logging will capture printed data as logger.info messages. (--logging_captures_prints)"
790 def _construct_logging_format() -> str:
791 """Figure out the root logging format string based on commandling args."""
793 # Custom or default --logging_format?
794 if config.config["logging_format"]:
795 fmt = config.config["logging_format"]
796 elif config.config["logging_syslog"]:
797 fmt = "%(levelname).1s:%(filename)s[%(process)d]: "
799 fmt = "%(levelname).1s:%(asctime)s: "
801 fmt += "%(prefix)s%(message)s%(suffix)s"
803 # --logging_debug_threads and --logging_debug_modules both affect
804 # the format by prepending information about the pid/tid or
806 if config.config["logging_debug_threads"]:
807 fmt = f"%(process)d.%(thread)d|{fmt}"
808 if config.config["logging_debug_modules"]:
809 fmt = f"%(filename)s:%(funcName)s:%(lineno)s|{fmt}"
813 def _construct_handlers_list(
814 logging_format: str, facility_name: Optional[str]
815 ) -> List[logging.Handler]:
816 """Create the list of handlers to be added to the root logger
817 based on commandline flags.
820 retval: List[logging.Handler] = []
822 # --logging_syslog (optionally with --logging_syslog_facility)
823 # sets up for logging to use the standard system syslogd as a
825 if config.config["logging_syslog"]:
826 if sys.platform not in ("win32", "cygwin"):
828 facility = SysLogHandler.__dict__.get(facility_name, SysLogHandler.LOG_USER)
830 syslog_handler = SysLogHandler(facility=facility, address="/dev/log")
831 syslog_handler.setFormatter(
832 MillisecondAwareFormatter(
834 datefmt=config.config["logging_date_format"],
837 retval.append(syslog_handler)
839 # --logging_filename (with friends --logging_filename_count and
840 # --logging_filename_maxsize) set up logging to a file on the
841 # filesystem with automatic rotation when it gets too big.
842 if config.config["logging_filename"]:
843 max_bytes = config.config["logging_filename_maxsize"]
844 assert max_bytes and isinstance(max_bytes, int)
845 backup_count = config.config["logging_filename_count"]
846 assert backup_count and isinstance(backup_count, int)
847 file_handler = RotatingFileHandler(
848 config.config["logging_filename"],
850 backupCount=backup_count,
852 file_handler.setFormatter(
853 MillisecondAwareFormatter(
855 datefmt=config.config["logging_date_format"],
858 retval.append(file_handler)
860 # --logging_console is, ahem, logging to the console.
861 if config.config["logging_console"]:
862 console_handler = logging.StreamHandler(sys.stderr)
863 console_handler.setFormatter(
864 MillisecondAwareFormatter(
866 datefmt=config.config["logging_date_format"],
869 retval.append(console_handler)
871 # --logging_info_is_print echoes any message to logger.info(x) as
872 # a print statement on stdout.
873 if config.config["logging_info_is_print"]:
874 print_handler = logging.StreamHandler(sys.stdout)
875 print_handler.addFilter(OnlyInfoFilter())
876 retval.append(print_handler)
879 retval.append(logging.NullHandler())
883 def _add_logging_handlers_and_filters(
884 logger: logging.Logger, handlers: List[logging.Handler]
886 """Adds a list of handlers to the root logger."""
888 for handler in handlers:
889 # These two are always present and set %(prefix) and %(suffix)
890 # if appropriate (see :class:`LoggingContext`) or make them
892 handler.addFilter(PrefixAddingFilter("", False))
893 handler.addFilter(SuffixAddingFilter("", False))
895 # --logging_squelch_repeats allows code to request repeat logging
896 # messages (identical log site and message contents) to be
897 # silenced. Logging code must request this explicitly, it isn't
898 # automatic. This option just allows the silencing to happen.
899 if config.config["logging_squelch_repeats"]:
900 handler.addFilter(SquelchRepeatedMessagesFilter())
902 # --logging_probabilistically allows code to request
903 # non-deterministic logging where messages have some probability
904 # of being produced. Logging code must request this explicitly.
905 # This option just allows the non-deterministic behavior to
906 # happen. Disabling it will cause every log message to be
908 if config.config["logging_probabilistically"]:
909 handler.addFilter(ProbabilisticFilter())
910 logger.addHandler(handler)
913 def initialize_logging(logger=None) -> logging.Logger:
914 """Initialize logging for the program. See module level comments
915 for information about what functionality this provides and how to
916 enable or disable functionality via the commandline.
919 :meth:`bootstrap.initialize` decorator on your program's entry point,
920 it will call this for you. See :meth:`pyutils.bootstrap.initialize`
924 global LOGGING_INITIALIZED
925 if LOGGING_INITIALIZED:
926 return logging.getLogger()
927 LOGGING_INITIALIZED = True
929 clear_preexisting = config.config["logging_clear_preexisting_handlers"]
930 preexisting_handlers_count = 0
932 logging.basicConfig(force=clear_preexisting)
933 logger = logging.getLogger()
935 # --logging_clear_preexisting_handlers removes logging handlers
936 # that were registered by global statements during imported module
938 if clear_preexisting:
939 while logger.hasHandlers():
940 logger.removeHandler(logger.handlers[0])
941 preexisting_handlers_count += 1
943 # --logging_config_file pulls logging settings from a config file
944 # skipping the rest of this setup.
945 if config.config["logging_config_file"] is not None:
946 fileConfig(config.config["logging_config_file"])
949 # Global default logging level (--logging_level); messages below
950 # this level will be silenced.
951 logging_level = config.config["logging_level"]
953 logging_level = logging_level.upper()
954 default_logging_level = getattr(logging, logging_level, None)
955 if not isinstance(default_logging_level, int):
956 raise ValueError(f'Invalid level: {config.config["logging_level"]}')
957 logger.setLevel(default_logging_level)
959 logging_format = _construct_logging_format()
961 if config.config["logging_syslog"]:
962 if sys.platform not in ("win32", "cygwin"):
963 if config.config["logging_syslog_facility"]:
964 facility_name = "LOG_" + config.config["logging_syslog_facility"]
965 handlers: List[logging.Handler] = _construct_handlers_list(
966 logging_format, facility_name
968 _add_logging_handlers_and_filters(logger, handlers)
970 # --logging_captures_prints, if set, will capture and log.info
971 # anything printed on stdout.
972 if config.config["logging_captures_prints"]:
975 def print_and_also_log(*arg, **kwarg):
976 f = kwarg.get("file", None)
981 BUILT_IN_PRINT(*arg, **kwarg)
983 builtins.print = print_and_also_log
985 # At this point the logger is ready, handlers are set up,
986 # etc... so log about the logging configuration.
989 default_logging_level,
990 preexisting_handlers_count,
997 def get_logger(name: str = ""):
998 """Get the global logger"""
999 logger = logging.getLogger(name)
1000 return initialize_logging(logger)
1003 class OutputMultiplexer(object):
1004 """A class that broadcasts printed messages to several sinks
1005 (including various logging levels, different files, different file
1006 handles, the house log, etc...). See also
1007 :class:`OutputMultiplexerContext` for an easy usage pattern.
1010 class Destination(enum.IntEnum):
1011 """Bits in the destination_bitv bitvector. Used to indicate the
1012 output destination."""
1015 LOG_DEBUG = 0x01 # ⎫
1017 LOG_WARNING = 0x04 # ⎬ Must provide logger to the c'tor.
1018 LOG_ERROR = 0x08 # ⎪
1019 LOG_CRITICAL = 0x10 # ⎭
1020 FILENAMES = 0x20 # Must provide a filename to the c'tor.
1021 FILEHANDLES = 0x40 # Must provide a handle to the c'tor.
1023 ALL_LOG_DESTINATIONS = (
1024 LOG_DEBUG | LOG_INFO | LOG_WARNING | LOG_ERROR | LOG_CRITICAL
1026 ALL_OUTPUT_DESTINATIONS = 0x8F
1031 destination_bitv: int,
1034 filenames: Optional[Iterable[str]] = None,
1035 handles: Optional[Iterable[io.TextIOWrapper]] = None,
1038 Constructs the OutputMultiplexer instance.
1041 destination_bitv: a bitvector where each bit represents an
1042 output destination. Multiple bits may be set.
1043 logger: if LOG_* bits are set, you must pass a logger here.
1044 filenames: if FILENAMES bit is set, this should be a list of
1045 files you'd like to output into. This code handles opening
1046 and closing said files.
1047 handles: if FILEHANDLES bit is set, this should be a list of
1048 already opened filehandles you'd like to output into. The
1049 handles will remain open after the scope of the multiplexer.
1052 logger = logging.getLogger(None)
1053 self.logger = logger
1055 self.f: Optional[List[Any]] = None
1056 if filenames is not None:
1057 self.f = [open(filename, "wb", buffering=0) for filename in filenames]
1059 if destination_bitv & OutputMultiplexer.Destination.FILENAMES:
1060 raise ValueError("Filenames argument is required if bitv & FILENAMES")
1063 self.h: Optional[List[Any]] = None
1064 if handles is not None:
1065 self.h = list(handles)
1067 if destination_bitv & OutputMultiplexer.Destination.FILEHANDLES:
1068 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1071 self.set_destination_bitv(destination_bitv)
1073 def get_destination_bitv(self):
1074 """Where are we outputting?"""
1075 return self.destination_bitv
1077 def set_destination_bitv(self, destination_bitv: int):
1078 """Change the output destination_bitv to the one provided."""
1079 if destination_bitv & self.Destination.FILENAMES and self.f is None:
1080 raise ValueError("Filename argument is required if bitv & FILENAMES")
1081 if destination_bitv & self.Destination.FILEHANDLES and self.h is None:
1082 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1083 self.destination_bitv = destination_bitv
1085 def print(self, *args, **kwargs):
1086 """Produce some output to all sinks."""
1087 from pyutils.string_utils import _sprintf, strip_escape_sequences
1089 end = kwargs.pop("end", None)
1091 if not isinstance(end, str):
1092 raise TypeError("end must be None or a string")
1093 sep = kwargs.pop("sep", None)
1095 if not isinstance(sep, str):
1096 raise TypeError("sep must be None or a string")
1098 raise TypeError("invalid keyword arguments to print()")
1099 buf = _sprintf(*args, end="", sep=sep)
1106 if self.destination_bitv & self.Destination.FILENAMES and self.f is not None:
1108 _.write(buf.encode("utf-8"))
1111 if self.destination_bitv & self.Destination.FILEHANDLES and self.h is not None:
1116 buf = strip_escape_sequences(buf)
1117 if self.logger is not None:
1118 if self.destination_bitv & self.Destination.LOG_DEBUG:
1119 self.logger.debug(buf)
1120 if self.destination_bitv & self.Destination.LOG_INFO:
1121 self.logger.info(buf)
1122 if self.destination_bitv & self.Destination.LOG_WARNING:
1123 self.logger.warning(buf)
1124 if self.destination_bitv & self.Destination.LOG_ERROR:
1125 self.logger.error(buf)
1126 if self.destination_bitv & self.Destination.LOG_CRITICAL:
1127 self.logger.critical(buf)
1128 if self.destination_bitv & self.Destination.HLOG:
1132 """Close all open files."""
1133 if self.f is not None:
1138 class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator):
1140 A context that uses an :class:`OutputMultiplexer`. e.g.::
1142 with OutputMultiplexerContext(
1143 OutputMultiplexer.LOG_INFO |
1144 OutputMultiplexer.LOG_DEBUG |
1145 OutputMultiplexer.FILENAMES |
1146 OutputMultiplexer.FILEHANDLES,
1147 filenames = [ '/tmp/foo.log', '/var/log/bar.log' ],
1150 mplex.print("This is a log message!")
1155 destination_bitv: OutputMultiplexer.Destination,
1163 destination_bitv: a bitvector that indicates where we should
1164 send output. See :class:`OutputMultiplexer` for options.
1165 logger: optional logger to use for log destination messages.
1166 filenames: optional filenames to write for filename destination
1168 handles: optional open filehandles to write for filehandle
1169 destination messages.
1174 filenames=filenames,
1178 def __enter__(self):
1181 def __exit__(self, etype, value, traceback) -> bool:
1183 if etype is not None:
1188 def hlog(message: str) -> None:
1189 """Write a message to the house log (syslog facility local7 priority
1190 info) by calling `/usr/bin/logger`. This is pretty hacky but used
1191 by a bunch of (my) code. Another way to do this would be to use
1192 :code:`--logging_syslog` and :code:`--logging_syslog_facility` but
1193 I can't actually say that's easier.
1195 TODO: this needs to move.
1197 message = message.replace("'", "'\"'\"'")
1198 os.system(f"/usr/bin/logger -p local7.info -- '{message}'")
1201 if __name__ == "__main__":