2 # -*- coding: utf-8 -*-
4 # © Copyright 2021-2023, Scott Gasch
7 This is a module that offers an opinionated take on how whole program
8 logging should be initialized and controlled. It uses the standard
9 Python :mod:`logging` but gives you control, via commandline config,
12 * Set the logging default level (debug, info, warning, error, critical)
13 of the whole program (see: :code:`--logging_level`)... and to override
14 the logging level with :class:`LoggingContext`.
15 * Prepend or append a message to every log record also with
16 :class:`LoggingContext`.
17 * define the logging message format (see :code:`--logging_format` and
18 :code:`--logging_date_format`) including easily adding a PID/TID
19 marker on all messages to help with multithreaded debugging
20 (:code:`--logging_debug_threads`) and force module names of code
21 that emits log messages to be included in the format
22 (:code:`--logging_debug_modules`),
23 * control the destination of logged messages:
25 - log to the console/stderr (:code:`--logging_console`) and/or
26 - log to a rotated file (:code:`--logging_filename`,
27 :code:`--logging_filename_maxsize` and :code:`--logging_filename_count`)
29 - log to the UNIX syslog (:code:`--logging_syslog` and
30 :code:`--logging_syslog_facility`)
32 * optionally squelch repeated messages (:code:`--logging_squelch_repeats`),
33 * optionally log probalistically (:code:`--logging_probabilistically`),
34 * capture printed messages into the info log (:code:`--logging_captures_prints`),
35 * optionally clear unwanted logging handlers added by other imports
36 before this one (:code:`--logging_clear_preexisting_handlers`).
37 * optionally append to system-wide records of non-zero exits
38 (:code:`--logging_non_zero_exits_record_path`) and unhandled exceptions
39 (:code:`--logging_unhandled_top_level_exceptions_record_path`) in
40 cooperation with :mod:`pyutils.bootstrap`.
41 * There are also :class:`LoggerAdapter` classes to implement prefix/suffix
42 functionality without using :class:`LoggingContext` by wrapping the
45 To use this functionality, call :meth:`initialize_logging` early
46 in your program entry point. If you use the
47 :meth:`pyutils.bootstrap.initialize` decorator on your program's entry
48 point, it will call this for you automatically.
60 from logging import FileHandler
61 from logging.config import fileConfig
62 from logging.handlers import RotatingFileHandler, SysLogHandler
63 from types import TracebackType
64 from typing import Any, Callable, Dict, Iterable, List, Optional, Union
67 from overrides import overrides
69 # This module is commonly used by others in here and should avoid
70 # taking any unnecessary dependencies back on them.
71 from pyutils import argparse_utils, config, misc_utils
73 cfg = config.add_commandline_args(f"Logging ({__file__})", "Args related to logging")
75 "--logging_config_file",
76 type=argparse_utils.valid_filename,
79 help="Config file containing the logging setup, see: https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial",
85 choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
87 help="The global default level below which to squelch log messages",
93 help="The format for lines logged via the logger module. See: https://docs.python.org/3/library/logging.html#formatter-objects",
96 "--logging_date_format",
98 default="%Y/%m/%dT%H:%M:%S.%f%z",
100 help="The format of any dates in --logging_format.",
104 action=argparse_utils.ActionNoYes,
106 help="Should we log to the console (stderr)",
109 "--logging_filename",
113 help="The filename of the logfile to write.",
116 "--logging_filename_maxsize",
117 type=argparse_utils.valid_byte_count,
118 default=(1024 * 1024),
120 help="The maximum size (in bytes) to write to the logging_filename.",
123 "--logging_filename_count",
127 help="The number of logging_filename copies to keep before deleting.",
131 action=argparse_utils.ActionNoYes,
133 help="Should we log to localhost's syslog.",
136 "--logging_syslog_facility",
162 metavar="SYSLOG_FACILITY_LIST",
163 help="The default syslog message facility identifier",
166 "--logging_debug_threads",
167 action=argparse_utils.ActionNoYes,
169 help="Should we prepend pid/tid data to all log messages?",
172 "--logging_debug_modules",
173 action=argparse_utils.ActionNoYes,
175 help="Should we prepend module/function data to all log messages?",
178 "--logging_info_is_print",
179 action=argparse_utils.ActionNoYes,
181 help="logging.info also prints to stdout.",
184 "--logging_squelch_repeats",
185 action=argparse_utils.ActionNoYes,
187 help="Do we allow code to indicate that it wants to squelch repeated logging messages or should we always log?",
190 "--logging_probabilistically",
191 action=argparse_utils.ActionNoYes,
193 help="Do we allow probabilistic logging (for code that wants it) or should we always log?",
195 # See also: OutputMultiplexer
197 "--logging_captures_prints",
198 action=argparse_utils.ActionNoYes,
200 help="When calling print, also log.info automatically.",
203 "--logging_clear_preexisting_handlers",
204 action=argparse_utils.ActionNoYes,
207 "Should logging code clear preexisting global logging handlers and thus insist that is "
208 + "alone can add handlers. Use this to work around annoying modules that insert global "
209 + "handlers with formats and logging levels you might now want. Caveat emptor, this may "
210 + "cause you to miss logging messages."
214 "--logging_non_zero_exits_record_path",
216 default="/var/log/abnormal_python_exits.log",
218 help="The filename in which to record non-zero exits.",
221 "--logging_unhandled_top_level_exceptions_record_path",
223 default="/var/log/abnormal_python_exits.log",
225 help="The filename in which to record unhandled top level exceptions.",
228 BUILT_IN_PRINT = print
229 LOGGING_INITIALIZED = False
232 # A map from logging_callsite_id -> count of logged messages.
233 squelched_logging_counts: Dict[str, int] = {}
236 def squelch_repeated_log_messages(squelch_after_n_repeats: int) -> Callable:
238 A decorator that marks a function as interested in having the logging
239 messages that it produces be squelched (ignored) after it logs the
240 same message more than N times.
244 This decorator affects *ALL* logging messages produced
245 within the decorated function. That said, messages must be
246 identical in order to be squelched. For example, if the same line
247 of code produces different messages (because of, e.g., a format
248 string), the messages are considered to be different.
250 An example of this from the pyutils code itself can be found in
251 :meth:`pyutils.ansi.fg` and :meth:`pyutils.ansi.bg` methods::
253 @logging_utils.squelch_repeated_log_messages(1)
255 name: Optional[str] = "",
256 red: Optional[int] = None,
257 green: Optional[int] = None,
258 blue: Optional[int] = None,
260 force_16color: bool = False,
261 force_216color: bool = False,
265 These methods log stuff like "Using 24-bit color strategy" which
266 gets old really fast and fills up the logs. By decorating the methods
267 with :code:`@logging_utils.squelch_repeated_log_messages(1)` the code
268 is requesting that its logged messages be dropped silently after the
269 first one is produced (note the argument 1).
271 Users can insist that all logged messages always be reflected in the
272 logs using the :code:`--no_logging_squelch_repeats` flag but the default
273 behavior is to allow code to request it be squelched.
275 :code:`--logging_squelch_repeats` only affects code with this decorator
276 on it; it ignores all other code.
279 squelch_after_n_repeats: the number of repeated messages allowed to
280 log before subsequent messages are silently dropped.
283 def squelch_logging_wrapper(f: Callable):
284 from pyutils import function_utils
286 identifier = function_utils.function_identifier(f)
288 # Get rid of module paths, e.g. pyutils.ansi:bg -> ansi:bg which
289 # is what we're going to need below.
290 identifier = re.sub(r"[^\.]+\.", "", identifier)
291 squelched_logging_counts[identifier] = squelch_after_n_repeats
294 return squelch_logging_wrapper
297 class SquelchRepeatedMessagesFilter(logging.Filter):
298 """A filter that only logs messages from a given site with the same
299 (exact) message at the same logging level N times and ignores
300 subsequent attempts to log.
302 This filter only affects logging messages that repeat more than a
303 threshold number of times from functions that are tagged with the
304 :code:`@logging_utils.squelched_logging_ok` decorator (see above);
305 all others are ignored.
307 This functionality is enabled by default but can be disabled via
308 the :code:`--no_logging_squelch_repeats` commandline flag.
311 def __init__(self) -> None:
313 self.counters: collections.Counter = collections.Counter()
316 def filter(self, record: logging.LogRecord) -> bool:
317 """Should we drop this log message?"""
318 id1 = f"{record.module}:{record.funcName}"
319 if id1 not in squelched_logging_counts:
321 threshold = squelched_logging_counts[id1]
322 logsite = f"{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}"
323 count = self.counters[logsite]
324 self.counters[logsite] += 1
325 return count < threshold
328 # A map from function_identifier -> probability of logging (0.0%..100.0%)
329 probabilistic_logging_levels: Dict[str, float] = {}
332 def logging_is_probabilistic(probability_of_logging: float) -> Callable:
333 """A decorator that indicates that all logging statements within the
334 scope of a particular (marked via decorator) function are not
335 deterministic (i.e. they do not always unconditionally log) but rather
336 are probabilistic (i.e. they log N% of the time, randomly) when the
337 user passes the :code:`--logging_probabilistically` commandline flag
338 (which is enabled by default).
342 This affects *ALL* logging statements within the marked function.
343 If you want it to only affect a subset of logging statements,
344 log those statements in a separate function that you invoke
345 from within the "too large" scope and mark that separate function
346 with the :code:`logging_is_probabilistic` decorator instead.
348 That this functionality can be disabled (forcing all logged
349 messages to produce output) via the
350 :code:`--no_logging_probabilistically` cmdline argument.
353 def probabilistic_logging_wrapper(f: Callable):
354 from pyutils import function_utils
356 identifier = function_utils.function_identifier(f)
357 probabilistic_logging_levels[identifier] = probability_of_logging
360 return probabilistic_logging_wrapper
363 class ProbabilisticFilter(logging.Filter):
365 A filter that logs messages probabilistically (i.e. randomly at some
366 percent chance). This filter is used with a decorator (see
367 :meth:`logging_is_probabilistic`) to implement the
368 :code:`--logging_probabilistically` commandline flag.
370 This filter only affects logging messages from functions that have
371 been tagged with the `@logging_utils.probabilistic_logging` decorator.
375 def filter(self, record: logging.LogRecord) -> bool:
376 """Should the message be logged?"""
377 identifier = f"{record.module}:{record.funcName}"
378 threshold = probabilistic_logging_levels.get(identifier, 100.0)
379 return misc_utils.execute_probabilistically(threshold)
382 class OnlyInfoFilter(logging.Filter):
383 """A filter that only logs messages produced at the INFO logging
384 level. This is used by the :code:`--logging_info_is_print`
385 commandline option to select a subset of the logging stream to
386 send to a stdout handler.
390 def filter(self, record: logging.LogRecord):
391 return record.levelno == logging.INFO
394 class PrefixAddingFilter(logging.Filter):
395 """A filter that adds a string prefix to the log record for the
396 formatter to later fill in. Requires a %(prefix)s in the format
400 def __init__(self, prefix: str, klobber: bool = False):
403 prefix: the prefix string to add
404 klobber: should we overwrite other prefixes?
411 self.klobber = klobber
414 def filter(self, record: logging.LogRecord):
416 record.prefix = self.prefix
417 elif "prefix" not in record.__dict__:
418 record.prefix = self.prefix
422 class SuffixAddingFilter(logging.Filter):
423 """A filter that adds a string suffix to the log record for the
424 formatter to later fill in. Requires a %(suffix)s in the format
428 def __init__(self, suffix: str, klobber: bool = False):
431 suffix: the suffix string to add
432 klobber: should we overwrite other suffixes?
439 self.klobber = klobber
442 def filter(self, record: logging.LogRecord):
444 record.suffix = self.suffix
445 elif "suffix" not in record.__dict__:
446 record.suffix = self.suffix
450 class PrependingLogAdapter(logging.LoggerAdapter):
451 """:class:`LoggingContext` adds prefixes and suffixes using a
452 logging.Filter that must insert "prefix" or "suffix" members into
453 each log record by using :class:`PrefixAddingFilter` and
454 :class:`SuffixAddingFilter`. This relies on the logging format
455 string containing a %(prefix)s and a %(suffix)s to work correctly.
457 This is an alternate approach that basically just wraps the logger
458 in a class that has the same interface and thunks most calls down
459 to the wrapped logger. It might be useful if you don't want to use
460 :class:`LoggingContext` or its friends.
462 >>> logger = logging.getLogger(__name__ + ".PrependingLogAdapter")
463 >>> logger.setLevel(logging.INFO)
464 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
466 At this point logger doesn't have any format string and so it is
467 missing %(prefix)s and %(suffix)s. It also doesn't have a
468 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
469 So using it in a :class:`LoggingContext` will not work.
471 But we can still add a prefix or suffix by just wrapping it:
473 >>> logger.info("TEST")
476 >>> log = PrependingLogAdapter.wrap_logger('prefix> ', logger)
481 def process(self, msg, kwargs):
482 return f'{self.extra.get("prefix", "")}{msg}', kwargs
485 def wrap_logger(prefix: str, logger: logging.Logger) -> logging.LoggerAdapter:
486 """Helper method around the creation of a LogAdapter that prepends
487 a given string to every log message produced.
490 prefix: the message to prepend to every log message.
491 logger: the logger whose messages to modify.
494 A new logger wrapping the old one with the given behavior.
495 The old logger will continue to behave as usual; simply drop
496 the reference to this wrapper when it's no longer needed.
499 return PrependingLogAdapter(logger, {"prefix": prefix})
502 class AppendingLogAdapter(logging.LoggerAdapter):
503 """:class:`LoggingContext` adds prefixes and suffixes using a
504 logging.Filter that must insert "prefix" or "suffix" members into
505 each log record by using :class:`PrefixAddingFilter` and
506 :class:`SuffixAddingFilter`. This relies on the logging format
507 string containing a %(prefix)s and a %(suffix)s to work correctly.
509 This is an alternate approach that basically just wraps the logger
510 in a class that has the same interface and thunks most calls down
511 to the wrapped logger. It might be useful if you don't want to use
512 :class:`LoggingContext` or its friends.
514 >>> logger = logging.getLogger(__name__ + ".AppendingLogAdapter")
515 >>> logger.setLevel(logging.INFO)
516 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
518 At this point logger doesn't have any format string and so it is
519 missing %(prefix)s and %(suffix)s. It also doesn't have a
520 :class:`PrefixAddingFilter` or :class:`SuffixAddingFilter` added.
521 So using it in a :class:`LoggingContext` will not work.
523 But we can still add a prefix or suffix by just wrapping it:
525 >>> logger.info("TEST")
528 >>> log = AppendingLogAdapter.wrap_logger('!!!', logger)
533 def process(self, msg, kwargs):
534 return f'{msg}{self.extra.get("suffix", "")}', kwargs
537 def wrap_logger(suffix: str, logger: logging.Logger) -> logging.LoggerAdapter:
538 """Helper method around the creation of a LoggerAdapter that appends
539 a given string to every log message produced.
542 suffix: the message to prepend to every log message.
543 logger: the logger whose messages to modify.
546 A new logger wrapping the old one with the given behavior.
547 The old logger will continue to behave as usual; simply drop
548 the reference to this wrapper when it's no longer needed.
551 return AppendingLogAdapter(logger, {"suffix": suffix})
554 class LoggingContext(contextlib.ContextDecorator):
557 logger: logging.Logger,
559 level: Optional[int] = None,
560 handlers: Optional[List[logging.Handler]] = None,
561 prefix: Optional[str] = None,
562 suffix: Optional[str] = None,
564 """This is a logging context that can be used to temporarily change the
565 way we are logging within its scope. Logging changes may include:
567 - Changing the logging level (e.g. from INFO to DEBUG)
568 - Adding a prefix or suffix to every log message produced
569 - Adding temporary Handlers to direct the logging output elsewhere
571 Setup for doctest / examples. This will normally be taken care of
572 by code in :meth:`initialize_logging` so you don't have to worry
575 >>> logging_format = "%(prefix)s%(message)s%(suffix)s"
576 >>> logger = logging.getLogger(__name__ + ".LoggingContext")
577 >>> logger.setLevel(logging.INFO)
578 >>> handler = logging.StreamHandler(sys.stdout)
579 >>> handler.setFormatter(
580 ... MillisecondAwareFormatter(
581 ... fmt=logging_format,
585 >>> logger.addHandler(handler)
586 >>> logger.addFilter(PrefixAddingFilter(None))
587 >>> logger.addFilter(SuffixAddingFilter(None))
589 First, this logger should be currently be configured to send
590 INFO+ messages to sys.stdout. Let's see it in action:
592 >>> logger.info("Hello world!")
594 >>> logger.debug("You should not see this")
596 The first example is to simply change the level of the logger.
597 Here we temporarily change it to DEBUG within the body of the
598 :class:`LoggingContext`:
600 >>> with LoggingContext(logger, level=logging.DEBUG):
601 ... logger.debug("You should see this now")
602 ... logger.info("Of course you should still see this too")
603 You should see this now
604 Of course you should still see this too
606 >>> logger.debug("Outside of the context we are at INFO again")
607 >>> logger.debug("(which is why you don't see these)")
608 >>> logger.info("But you should see this at INFO level")
609 But you should see this at INFO level
611 The prefix and suffix argument prepend or append a message to
612 all log output. To do this, you need %(prefix)s and
613 %(suffix)s placeholders in your logger format string
614 indicating where to insert the data. This is useful, for
615 example, to add an active request identifier to the set of log
616 messages produced while processing it.
618 >>> logger.info("About to work on a new request")
619 About to work on a new request
621 >>> with LoggingContext(logger, prefix='10.0.0.13> '):
622 ... logger.info("Working on it now")
623 10.0.0.13> Working on it now
625 >>> logger.info("Done with that request")
626 Done with that request
628 LoggingContext can also be used to add temporary handler(s).
629 This code temporarily uses two stdout handlers to double the
630 output for testing purporses but you could also temporarily,
631 e.g., add a RotatingFileHandler or SysLogHandler etc...
633 >>> with LoggingContext(logger, handlers=[logging.StreamHandler(sys.stdout)]):
634 ... logger.info("TEST")
638 Once leaving the context, logger's behavior is restored. In
639 this case, the extra handler is removed so output will not
642 >>> logger.info("OUTSIDE")
645 LoggingContext can also be used as a decorator if that is more
648 >>> @LoggingContext(logger, level=logging.DEBUG)
649 ... def log_stuff(logger):
650 ... logger.debug("But inside, the decorator has changed us to DEBUG")
652 >>> logger.debug("Outside, we're at INFO level and you don't see this")
653 >>> log_stuff(logger)
654 But inside, the decorator has changed us to DEBUG
655 >>> logger.debug("And, of course, out here we're still at INFO afterwards")
658 logger: the logger on which to operate
659 level: the new level to set for the duration of the context
660 handlers: additional handlers to add for the duration of the context
661 prefix: the prefix string to set for the duration of the context
662 suffix: the suffix string to set for the duration of the context
670 self.old_level: Optional[int] = None
671 self.handlers = handlers
673 self.prefix: Optional[logging.Filter] = PrefixAddingFilter(prefix, True)
677 self.suffix: Optional[logging.Filter] = SuffixAddingFilter(suffix, True)
681 def __enter__(self) -> Union[logging.Logger, logging.LoggerAdapter]:
684 self.old_level = self.logger.level
685 self.logger.setLevel(self.level)
688 for handler in self.handlers:
689 self.logger.addHandler(handler)
692 self.logger.addFilter(self.prefix)
695 self.logger.addFilter(self.suffix)
698 def __exit__(self, et, ev, tb) -> None:
700 self.logger.removeFilter(self.suffix)
703 self.logger.removeFilter(self.prefix)
706 for handler in self.handlers:
707 self.logger.removeHandler(handler)
711 self.logger.setLevel(self.old_level)
712 return None # propagate exceptions out
715 class MillisecondAwareFormatter(logging.Formatter):
716 """A formatter for adding milliseconds to log messages which, for
717 whatever reason, the default Python logger doesn't do.
721 You probably don't need to use this directly but it is
722 wired in under :meth:`initialize_logging` so that the
723 timestamps in log messages have millisecond level
727 converter = datetime.datetime.fromtimestamp # type: ignore
730 def formatTime(self, record, datefmt=None):
731 ct = MillisecondAwareFormatter.converter(
732 record.created, pytz.timezone("US/Pacific")
735 s = ct.strftime(datefmt)
737 t = ct.strftime("%Y-%m-%d %H:%M:%S")
738 s = f"{t},{record.msecs:%03d}"
742 def _log_about_logging(
744 default_logging_level,
745 preexisting_handlers_count,
749 """This is invoked automatically after logging is initialized such
750 that the first messages in the log are about how logging itself
753 level_name = logging._levelToName.get(
754 default_logging_level, str(default_logging_level)
756 logger.debug("Initialized global logging; logging level is %s.", level_name)
758 config.config["logging_clear_preexisting_handlers"]
759 and preexisting_handlers_count > 0
762 "Logging cleared %d global handlers (--logging_clear_preexisting_handlers)",
763 preexisting_handlers_count,
765 logger.debug('Logging format specification is "%s"', fmt)
766 if config.config["logging_debug_threads"]:
768 "...Logging format spec captures tid/pid. (--logging_debug_threads)"
770 if config.config["logging_debug_modules"]:
772 "...Logging format spec captures files/functions/lineno. (--logging_debug_modules)"
774 if config.config["logging_syslog"]:
776 "Logging to syslog as %s with priority mapping based on level. (--logging_syslog)",
779 if config.config["logging_filename"]:
781 'Logging to file "%s". (--logging_filename)',
782 config.config["logging_filename"],
785 "...with %d bytes max file size. (--logging_filename_maxsize)",
786 config.config["logging_filename_maxsize"],
789 "...and %d rotating backup file count. (--logging_filename_count)",
790 config.config["logging_filename_count"],
792 if config.config["logging_console"]:
793 logger.debug("Logging to the console (stderr). (--logging_console)")
794 if config.config["logging_info_is_print"]:
796 "Logging logger.info messages will be repeated on stdout. (--logging_info_is_print)"
798 if config.config["logging_squelch_repeats"]:
800 "Logging code allowed to request repeated messages be squelched. (--logging_squelch_repeats)"
804 "Logging code forbidden to request messages be squelched; all messages logged. (--no_logging_squelch_repeats)"
806 if config.config["logging_probabilistically"]:
808 "Logging code is allowed to request probabilistic logging. (--logging_probabilistically)"
812 "Logging code is forbidden to request probabilistic logging; messages always logged. (--no_logging_probabilistically)"
814 if config.config["logging_captures_prints"]:
816 "Logging will capture printed data as logger.info messages. (--logging_captures_prints)"
820 def _construct_logging_format() -> str:
821 """Figure out the root logging format string based on commandling args."""
823 # Custom or default --logging_format?
824 if config.config["logging_format"]:
825 fmt = config.config["logging_format"]
826 elif config.config["logging_syslog"]:
827 fmt = "%(levelname).1s:%(filename)s[%(process)d]: "
829 fmt = "%(levelname).1s:%(asctime)s: "
831 fmt += "%(prefix)s%(message)s%(suffix)s"
833 # --logging_debug_threads and --logging_debug_modules both affect
834 # the format by prepending information about the pid/tid or
836 if config.config["logging_debug_threads"]:
837 fmt = f"%(process)d.%(thread)d|{fmt}"
838 if config.config["logging_debug_modules"]:
839 fmt = f"%(filename)s:%(funcName)s:%(lineno)s|{fmt}"
843 def _construct_handlers_list(
844 logging_format: str, facility_name: Optional[str]
845 ) -> List[logging.Handler]:
846 """Create the list of handlers to be added to the root logger
847 based on commandline flags.
850 retval: List[logging.Handler] = []
852 # --logging_syslog (optionally with --logging_syslog_facility)
853 # sets up for logging to use the standard system syslogd as a
855 if config.config["logging_syslog"]:
856 if sys.platform not in ("win32", "cygwin"):
858 facility = SysLogHandler.__dict__.get(facility_name, SysLogHandler.LOG_USER)
860 syslog_handler = SysLogHandler(facility=facility, address="/dev/log")
861 syslog_handler.setFormatter(
862 MillisecondAwareFormatter(
864 datefmt=config.config["logging_date_format"],
867 retval.append(syslog_handler)
869 # --logging_filename (with friends --logging_filename_count and
870 # --logging_filename_maxsize) set up logging to a file on the
871 # filesystem with automatic rotation when it gets too big.
872 if config.config["logging_filename"]:
873 max_bytes = config.config["logging_filename_maxsize"]
874 assert max_bytes and isinstance(max_bytes, int)
875 backup_count = config.config["logging_filename_count"]
876 assert backup_count and isinstance(backup_count, int)
877 file_handler = RotatingFileHandler(
878 config.config["logging_filename"],
880 backupCount=backup_count,
882 file_handler.setFormatter(
883 MillisecondAwareFormatter(
885 datefmt=config.config["logging_date_format"],
888 retval.append(file_handler)
890 # --logging_console is, ahem, logging to the console.
891 if config.config["logging_console"]:
892 console_handler = logging.StreamHandler(sys.stderr)
893 console_handler.setFormatter(
894 MillisecondAwareFormatter(
896 datefmt=config.config["logging_date_format"],
899 retval.append(console_handler)
901 # --logging_info_is_print echoes any message to logger.info(x) as
902 # a print statement on stdout.
903 if config.config["logging_info_is_print"]:
904 print_handler = logging.StreamHandler(sys.stdout)
905 print_handler.addFilter(OnlyInfoFilter())
906 retval.append(print_handler)
909 retval.append(logging.NullHandler())
913 def _add_logging_handlers_and_filters(
914 logger: logging.Logger, handlers: List[logging.Handler]
916 """Adds a list of handlers to the root logger."""
918 for handler in handlers:
919 # These two are always present and set %(prefix) and %(suffix)
920 # if appropriate (see :class:`LoggingContext`) or make them
922 handler.addFilter(PrefixAddingFilter("", False))
923 handler.addFilter(SuffixAddingFilter("", False))
925 # --logging_squelch_repeats allows code to request repeat logging
926 # messages (identical log site and message contents) to be
927 # silenced. Logging code must request this explicitly, it isn't
928 # automatic. This option just allows the silencing to happen.
929 if config.config["logging_squelch_repeats"]:
930 handler.addFilter(SquelchRepeatedMessagesFilter())
932 # --logging_probabilistically allows code to request
933 # non-deterministic logging where messages have some probability
934 # of being produced. Logging code must request this explicitly.
935 # This option just allows the non-deterministic behavior to
936 # happen. Disabling it will cause every log message to be
938 if config.config["logging_probabilistically"]:
939 handler.addFilter(ProbabilisticFilter())
940 logger.addHandler(handler)
943 def initialize_logging(logger=None) -> logging.Logger:
944 """Initialize logging for the program. See module level comments
945 for information about what functionality this provides and how to
946 enable or disable functionality via the commandline.
949 :meth:`bootstrap.initialize` decorator on your program's entry point,
950 it will call this for you. See :meth:`pyutils.bootstrap.initialize`
954 ValueError: if logging level is invalid
958 global LOGGING_INITIALIZED
959 if LOGGING_INITIALIZED:
960 return logging.getLogger()
961 LOGGING_INITIALIZED = True
963 clear_preexisting = config.config["logging_clear_preexisting_handlers"]
964 preexisting_handlers_count = 0
966 logging.basicConfig(force=clear_preexisting)
967 logger = logging.getLogger()
969 # --logging_clear_preexisting_handlers removes logging handlers
970 # that were registered by global statements during imported module
972 if clear_preexisting:
973 while logger.hasHandlers():
974 logger.removeHandler(logger.handlers[0])
975 preexisting_handlers_count += 1
977 # --logging_config_file pulls logging settings from a config file
978 # skipping the rest of this setup.
979 if config.config["logging_config_file"] is not None:
980 fileConfig(config.config["logging_config_file"])
983 # Global default logging level (--logging_level); messages below
984 # this level will be silenced.
985 logging_level = config.config["logging_level"]
987 logging_level = logging_level.upper()
988 default_logging_level = getattr(logging, logging_level, None)
989 if not isinstance(default_logging_level, int):
990 raise ValueError(f'Invalid level: {config.config["logging_level"]}')
991 logger.setLevel(default_logging_level)
993 logging_format = _construct_logging_format()
995 if config.config["logging_syslog"]:
996 if sys.platform not in ("win32", "cygwin"):
997 if config.config["logging_syslog_facility"]:
998 facility_name = "LOG_" + config.config["logging_syslog_facility"]
999 handlers: List[logging.Handler] = _construct_handlers_list(
1000 logging_format, facility_name
1002 _add_logging_handlers_and_filters(logger, handlers)
1004 # --logging_captures_prints, if set, will capture and log.info
1005 # anything printed on stdout.
1006 if config.config["logging_captures_prints"]:
1009 def print_and_also_log(*arg, **kwarg):
1010 f = kwarg.get("file", None)
1012 logger.warning(*arg)
1015 BUILT_IN_PRINT(*arg, **kwarg)
1017 builtins.print = print_and_also_log
1019 # At this point the logger is ready, handlers are set up,
1020 # etc... so log about the logging configuration.
1023 default_logging_level,
1024 preexisting_handlers_count,
1031 def get_logger(name: str = ""):
1032 """Get the global logger"""
1033 logger = logging.getLogger(name)
1034 return initialize_logging(logger)
1037 class OutputMultiplexer(object):
1038 """A class that broadcasts printed messages to several sinks
1039 (including various logging levels, different files, different file
1040 handles, the house log, etc...). See also
1041 :class:`OutputMultiplexerContext` for an easy usage pattern.
1044 class Destination(enum.IntEnum):
1045 """Bits in the destination_bitv bitvector. Used to indicate the
1046 output destination."""
1049 LOG_DEBUG = 0x01 # ⎫
1051 LOG_WARNING = 0x04 # ⎬ Must provide logger to the c'tor.
1052 LOG_ERROR = 0x08 # ⎪
1053 LOG_CRITICAL = 0x10 # ⎭
1054 FILENAMES = 0x20 # Must provide a filename to the c'tor.
1055 FILEHANDLES = 0x40 # Must provide a handle to the c'tor.
1057 ALL_LOG_DESTINATIONS = (
1058 LOG_DEBUG | LOG_INFO | LOG_WARNING | LOG_ERROR | LOG_CRITICAL
1060 ALL_OUTPUT_DESTINATIONS = 0x8F
1065 destination_bitv: int,
1068 filenames: Optional[Iterable[str]] = None,
1069 handles: Optional[Iterable[io.TextIOWrapper]] = None,
1072 Constructs the OutputMultiplexer instance.
1075 destination_bitv: a bitvector where each bit represents an
1076 output destination. Multiple bits may be set.
1077 logger: if LOG_* bits are set, you must pass a logger here.
1078 filenames: if FILENAMES bit is set, this should be a list of
1079 files you'd like to output into. This code handles opening
1080 and closing said files.
1081 handles: if FILEHANDLES bit is set, this should be a list of
1082 already opened filehandles you'd like to output into. The
1083 handles will remain open after the scope of the multiplexer.
1086 ValueError: invalid combination of arguments (e.g. the filenames
1087 argument is present but the filenames bit isn't set, the handle
1088 argument is present but the handles bit isn't set, etc...)
1091 logger = logging.getLogger(None)
1092 self.logger = logger
1094 self.f: Optional[List[Any]] = None
1095 if filenames is not None:
1096 self.f = [open(filename, "wb", buffering=0) for filename in filenames]
1098 if destination_bitv & OutputMultiplexer.Destination.FILENAMES:
1099 raise ValueError("Filenames argument is required if bitv & FILENAMES")
1102 self.h: Optional[List[Any]] = None
1103 if handles is not None:
1104 self.h = list(handles)
1106 if destination_bitv & OutputMultiplexer.Destination.FILEHANDLES:
1107 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1110 self.set_destination_bitv(destination_bitv)
1112 def get_destination_bitv(self):
1113 """Where are we outputting?"""
1114 return self.destination_bitv
1116 def set_destination_bitv(self, destination_bitv: int):
1117 """Change the output destination_bitv to the one provided.
1120 destination_bitv: the new destination bitvector to set.
1123 ValueError: invalid combination of arguments (e.g. the filenames
1124 argument is present but the filenames bit isn't set, the handle
1125 argument is present but the handles bit isn't set, etc...)
1127 if destination_bitv & self.Destination.FILENAMES and self.f is None:
1128 raise ValueError("Filename argument is required if bitv & FILENAMES")
1129 if destination_bitv & self.Destination.FILEHANDLES and self.h is None:
1130 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1131 self.destination_bitv = destination_bitv
1133 def print(self, *args, **kwargs):
1134 """Produce some output to all sinks. Use the same arguments as the
1138 TypeError: Illegal argument types encountered
1140 from pyutils.string_utils import _sprintf, strip_escape_sequences
1142 end = kwargs.pop("end", None)
1144 if not isinstance(end, str):
1145 raise TypeError("end must be None or a string")
1146 sep = kwargs.pop("sep", None)
1148 if not isinstance(sep, str):
1149 raise TypeError("sep must be None or a string")
1151 raise TypeError("invalid keyword arguments to print()")
1152 buf = _sprintf(*args, end="", sep=sep)
1159 if self.destination_bitv & self.Destination.FILENAMES and self.f is not None:
1161 _.write(buf.encode("utf-8"))
1164 if self.destination_bitv & self.Destination.FILEHANDLES and self.h is not None:
1169 buf = strip_escape_sequences(buf)
1170 if self.logger is not None:
1171 if self.destination_bitv & self.Destination.LOG_DEBUG:
1172 self.logger.debug(buf)
1173 if self.destination_bitv & self.Destination.LOG_INFO:
1174 self.logger.info(buf)
1175 if self.destination_bitv & self.Destination.LOG_WARNING:
1176 self.logger.warning(buf)
1177 if self.destination_bitv & self.Destination.LOG_ERROR:
1178 self.logger.error(buf)
1179 if self.destination_bitv & self.Destination.LOG_CRITICAL:
1180 self.logger.critical(buf)
1181 if self.destination_bitv & self.Destination.HLOG:
1185 """Close all open files."""
1186 if self.f is not None:
1191 class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator):
1193 A context that uses an :class:`OutputMultiplexer`. e.g.::
1195 with OutputMultiplexerContext(
1196 OutputMultiplexer.LOG_INFO |
1197 OutputMultiplexer.LOG_DEBUG |
1198 OutputMultiplexer.FILENAMES |
1199 OutputMultiplexer.FILEHANDLES,
1200 filenames = [ '/tmp/foo.log', '/var/log/bar.log' ],
1203 mplex.print("This is a log message!")
1208 destination_bitv: OutputMultiplexer.Destination,
1216 destination_bitv: a bitvector that indicates where we should
1217 send output. See :class:`OutputMultiplexer` for options.
1218 logger: optional logger to use for log destination messages.
1219 filenames: optional filenames to write for filename destination
1221 handles: optional open filehandles to write for filehandle
1222 destination messages.
1227 filenames=filenames,
1231 def __enter__(self):
1234 def __exit__(self, etype, value, tb) -> bool:
1236 if etype is not None:
1241 def _get_systemwide_abnormal_exit_handler(filename: str) -> FileHandler:
1242 handler = FileHandler(filename)
1243 handler.setFormatter(
1244 MillisecondAwareFormatter(
1245 fmt=_construct_logging_format(),
1246 datefmt=config.config["logging_date_format"],
1252 def non_zero_return_value(ret: Any) -> bool:
1254 Special method hooked from bootstrap.py to optionally keep a system-wide
1255 record of non-zero python program exits.
1258 ret: the return value
1261 record = config.config['logging_non_zero_exits_record_path']
1263 logger = logging.getLogger()
1264 handler = _get_systemwide_abnormal_exit_handler(record)
1265 program = config.PROGRAM_NAME
1266 args = config.ORIG_ARGV
1267 with LoggingContext(logger, handlers=[handler], level=logging.ERROR) as log:
1268 log.error('%s (%s) Exit %s', program, args, ret)
1275 def unhandled_top_level_exception(
1276 exc_type: type, exc_value: type, exc_tb: TracebackType
1279 Special method hooked from bootstrap.py to optionally keep a system-wide
1280 record of unhandled top level exceptions.
1283 exc_type: the type of the unhandled exception
1284 exc_value: the value passed to the exception's c'tor
1285 exc_tb: the stack from where the exception was raised
1288 record = config.config['logging_unhandled_top_level_exceptions_record_path']
1292 logger = logging.getLogger()
1293 handler = _get_systemwide_abnormal_exit_handler(record)
1294 program = config.PROGRAM_NAME
1295 args = config.ORIG_ARGV
1296 tb = traceback.extract_tb(exc_tb)
1297 original_frame = tb[-1]
1298 raise_site_file = original_frame.filename
1299 raise_site_lineno = original_frame.lineno
1300 with LoggingContext(logger, handlers=[handler], level=logging.ERROR) as log:
1302 '%s (%s) unhandled top-level exception (type=%s(%s)) at %s:%s',
1316 def hlog(message: str) -> None:
1317 """Write a message to the house log (syslog facility local7 priority
1318 info) by calling `/usr/bin/logger`. This is pretty hacky but used
1319 by a bunch of (my) code. Another way to do this would be to use
1320 :code:`--logging_syslog` and :code:`--logging_syslog_facility` but
1321 I can't actually say that's easier.
1323 TODO: this needs to move.
1325 message = message.replace("'", "'\"'\"'")
1326 os.system(f"/usr/bin/logger -p local7.info -- '{message}'")
1329 if __name__ == "__main__":