2 # -*- coding: utf-8 -*-
3 # pylint: disable=redefined-variable-type
5 # © Copyright 2021-2023, Scott Gasch
8 This is a module that offers an opinionated take on how whole program
9 logging should be initialized and controlled. It uses the standard
10 Python :mod:`logging` but gives you control, via commandline config,
13 * Set the logging default level (debug, info, warning, error, critical)
14 of the whole program (see: :code:`--logging_level`)... and to override
15 the logging level for individual modules/functions based on their names
16 (see :code:`--lmodule`),
17 * define the logging message format (see :code:`--logging_format` and
18 :code:`--logging_date_format`) including easily adding a PID/TID
19 marker on all messages to help with multithreaded debugging
20 (:code:`--logging_debug_threads`) and force module names of code
21 that emits log messages to be included in the format
22 (:code:`--logging_debug_modules`),
23 * control the destination of logged messages:
25 - log to the console/stderr (:code:`--logging_console`) and/or
26 - log to a rotated file (:code:`--logging_filename`,
27 :code:`--logging_filename_maxsize` and :code:`--logging_filename_count`)
29 - log to the UNIX syslog (:code:`--logging_syslog` and
30 :code:`--logging_syslog_facility`)
32 * optionally squelch repeated messages (:code:`--logging_squelch_repeats`),
33 * optionally log probalistically (:code:`--logging_probabilistically`),
34 * capture printed messages into the info log (:code:`--logging_captures_prints`),
35 * and optionally clear unwanted logging handlers added by other imports
36 before this one (:code:`--logging_clear_preexisting_handlers`).
38 To use this functionality, call :meth:`initialize_logging` early
39 in your program entry point. If you use the
40 :meth:`pyutils.bootstrap.initialize` decorator on your program's entry
41 point, it will call this for you automatically.
52 from logging.config import fileConfig
53 from logging.handlers import RotatingFileHandler, SysLogHandler
54 from typing import Any, Callable, Dict, Iterable, List, Optional, Union
57 from overrides import overrides
59 # This module is commonly used by others in here and should avoid
60 # taking any unnecessary dependencies back on them.
61 from pyutils import argparse_utils, config, misc_utils
63 cfg = config.add_commandline_args(f"Logging ({__file__})", "Args related to logging")
65 "--logging_config_file",
66 type=argparse_utils.valid_filename,
69 help="Config file containing the logging setup, see: https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial",
75 choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
77 help="The global default level below which to squelch log messages; see also --lmodule",
83 help="The format for lines logged via the logger module. See: https://docs.python.org/3/library/logging.html#formatter-objects",
86 "--logging_date_format",
88 default="%Y/%m/%dT%H:%M:%S.%f%z",
90 help="The format of any dates in --logging_format.",
94 action=argparse_utils.ActionNoYes,
96 help="Should we log to the console (stderr)",
103 help="The filename of the logfile to write.",
106 "--logging_filename_maxsize",
108 default=(1024 * 1024),
110 help="The maximum size (in bytes) to write to the logging_filename.",
113 "--logging_filename_count",
117 help="The number of logging_filename copies to keep before deleting.",
121 action=argparse_utils.ActionNoYes,
123 help="Should we log to localhost's syslog.",
126 "--logging_syslog_facility",
152 metavar="SYSLOG_FACILITY_LIST",
153 help="The default syslog message facility identifier",
156 "--logging_debug_threads",
157 action=argparse_utils.ActionNoYes,
159 help="Should we prepend pid/tid data to all log messages?",
162 "--logging_debug_modules",
163 action=argparse_utils.ActionNoYes,
165 help="Should we prepend module/function data to all log messages?",
168 "--logging_info_is_print",
169 action=argparse_utils.ActionNoYes,
171 help="logging.info also prints to stdout.",
174 "--logging_squelch_repeats",
175 action=argparse_utils.ActionNoYes,
177 help="Do we allow code to indicate that it wants to squelch repeated logging messages or should we always log?",
180 "--logging_probabilistically",
181 action=argparse_utils.ActionNoYes,
183 help="Do we allow probabilistic logging (for code that wants it) or should we always log?",
185 # See also: OutputMultiplexer
187 "--logging_captures_prints",
188 action=argparse_utils.ActionNoYes,
190 help="When calling print, also log.info automatically.",
195 metavar="<SCOPE>=<LEVEL>[,<SCOPE>=<LEVEL>...]",
197 "Allows per-scope logging levels which override the global level set with --logging-level."
198 + "Pass a space separated list of <scope>=<level> where <scope> is one of: module, "
199 + "module:function, or :function and <level> is a logging level (e.g. INFO, DEBUG...)"
203 "--logging_clear_preexisting_handlers",
204 action=argparse_utils.ActionNoYes,
207 "Should logging code clear preexisting global logging handlers and thus insist that is "
208 + "alone can add handlers. Use this to work around annoying modules that insert global "
209 + "handlers with formats and logging levels you might now want. Caveat emptor, this may "
210 + "cause you to miss logging messages."
214 BUILT_IN_PRINT = print
215 LOGGING_INITIALIZED = False
218 # A map from logging_callsite_id -> count of logged messages.
219 squelched_logging_counts: Dict[str, int] = {}
222 def squelch_repeated_log_messages(squelch_after_n_repeats: int) -> Callable:
224 A decorator that marks a function as interested in having the logging
225 messages that it produces be squelched (ignored) after it logs the
226 same message more than N times.
230 This decorator affects *ALL* logging messages produced
231 within the decorated function. That said, messages must be
232 identical in order to be squelched. For example, if the same line
233 of code produces different messages (because of, e.g., a format
234 string), the messages are considered to be different.
236 An example of this from the pyutils code itself can be found in
237 :meth:`pyutils.ansi.fg` and :meth:`pyutils.ansi.bg` methods::
239 @logging_utils.squelch_repeated_log_messages(1)
241 name: Optional[str] = "",
242 red: Optional[int] = None,
243 green: Optional[int] = None,
244 blue: Optional[int] = None,
246 force_16color: bool = False,
247 force_216color: bool = False,
251 These methods log stuff like "Using 24-bit color strategy" which
252 gets old really fast and fills up the logs. By decorating the methods
253 with :code:`@logging_utils.squelch_repeated_log_messages(1)` the code
254 is requesting that its logged messages be dropped silently after the
255 first one is produced (note the argument 1).
257 Users can insist that all logged messages always be reflected in the
258 logs using the :code:`--no_logging_squelch_repeats` flag but the default
259 behavior is to allow code to request it be squelched.
261 :code:`--logging_squelch_repeats` only affects code with this decorator
262 on it; it ignores all other code.
265 squelch_after_n_repeats: the number of repeated messages allowed to
266 log before subsequent messages are silently dropped.
269 def squelch_logging_wrapper(f: Callable):
270 from pyutils import function_utils
272 identifier = function_utils.function_identifier(f)
273 squelched_logging_counts[identifier] = squelch_after_n_repeats
276 return squelch_logging_wrapper
279 class SquelchRepeatedMessagesFilter(logging.Filter):
280 """A filter that only logs messages from a given site with the same
281 (exact) message at the same logging level N times and ignores
282 subsequent attempts to log.
284 This filter only affects logging messages that repeat more than a
285 threshold number of times from functions that are tagged with the
286 :code:`@logging_utils.squelched_logging_ok` decorator (see above);
287 all others are ignored.
289 This functionality is enabled by default but can be disabled via
290 the :code:`--no_logging_squelch_repeats` commandline flag.
293 def __init__(self) -> None:
295 self.counters: collections.Counter = collections.Counter()
298 def filter(self, record: logging.LogRecord) -> bool:
299 """Should we drop this log message?"""
300 id1 = f"{record.module}:{record.funcName}"
301 if id1 not in squelched_logging_counts:
303 threshold = squelched_logging_counts[id1]
304 logsite = f"{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}"
305 count = self.counters[logsite]
306 self.counters[logsite] += 1
307 return count < threshold
310 class DynamicPerScopeLoggingLevelFilter(logging.Filter):
311 """This filter only allows logging messages from an allow list of
312 module names or `module:function` names. Blocks all others. This
313 filter is used to implement the :code:`--lmodule` commandline option.
317 You probably don't need to use this directly, just use
318 :code:`--lmodule`. For example, to set logging level to INFO
319 everywhere except "module:function" where it should be DEBUG::
321 # myprogram.py --logging_level=INFO --lmodule=module:function=DEBUG
326 def level_name_to_level(name: str) -> int:
327 """Given a level name, return its numberic value."""
328 numeric_level = getattr(logging, name, None)
329 if not isinstance(numeric_level, int):
330 raise ValueError(f"Invalid level: {name}")
335 default_logging_level: int,
336 per_scope_logging_levels: Optional[str],
338 """Construct the Filter.
341 default_logging_level: the logging level of the whole program
342 per_scope_logging_levels: optional, comma separated overrides of
343 logging level per scope of the format scope=level where
344 scope is of the form "module:function" or ":function" and
345 level is one of NOTSET, DEBUG, INFO, WARNING, ERROR or
349 self.valid_levels = set(
350 ["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
352 self.default_logging_level = default_logging_level
353 self.level_by_scope = {}
354 if per_scope_logging_levels is not None:
355 for chunk in per_scope_logging_levels.split(","):
358 f'Malformed lmodule directive: "{chunk}", missing "=". Ignored.',
363 (scope, level) = chunk.split("=")
366 f'Malformed lmodule directive: "{chunk}". Ignored.',
370 scope = scope.strip()
371 level = level.strip().upper()
372 if level not in self.valid_levels:
374 f'Malformed lmodule directive: "{chunk}", bad level. Ignored.',
380 ] = DynamicPerScopeLoggingLevelFilter.level_name_to_level(level)
383 def filter(self, record: logging.LogRecord) -> bool:
384 """Decides whether or not to log based on an allow list."""
386 # First try to find a logging level by scope (--lmodule)
387 if len(self.level_by_scope) > 0:
391 f"{record.module}:{record.funcName}",
392 f":{record.funcName}",
394 level = self.level_by_scope.get(scope, None)
395 if level is not None:
396 if min_level is None or level < min_level:
399 # If we found one, use it instead of the global default level.
400 if min_level is not None:
401 return record.levelno >= min_level
403 # Otherwise, use the global logging level (--logging_level)
404 return record.levelno >= self.default_logging_level
407 # A map from function_identifier -> probability of logging (0.0%..100.0%)
408 probabilistic_logging_levels: Dict[str, float] = {}
411 def logging_is_probabilistic(probability_of_logging: float) -> Callable:
412 """A decorator that indicates that all logging statements within the
413 scope of a particular (marked via decorator) function are not
414 deterministic (i.e. they do not always unconditionally log) but rather
415 are probabilistic (i.e. they log N% of the time, randomly) when the
416 user passes the :code:`--logging_probabilistically` commandline flag
417 (which is enabled by default).
421 This affects *ALL* logging statements within the marked function.
422 If you want it to only affect a subset of logging statements,
423 log those statements in a separate function that you invoke
424 from within the "too large" scope and mark that separate function
425 with the :code:`logging_is_probabilistic` decorator instead.
427 That this functionality can be disabled (forcing all logged
428 messages to produce output) via the
429 :code:`--no_logging_probabilistically` cmdline argument.
432 def probabilistic_logging_wrapper(f: Callable):
433 from pyutils import function_utils
435 identifier = function_utils.function_identifier(f)
436 probabilistic_logging_levels[identifier] = probability_of_logging
439 return probabilistic_logging_wrapper
442 class ProbabilisticFilter(logging.Filter):
444 A filter that logs messages probabilistically (i.e. randomly at some
445 percent chance). This filter is used with a decorator (see
446 :meth:`logging_is_probabilistic`) to implement the
447 :code:`--logging_probabilistically` commandline flag.
449 This filter only affects logging messages from functions that have
450 been tagged with the `@logging_utils.probabilistic_logging` decorator.
454 def filter(self, record: logging.LogRecord) -> bool:
455 """Should the message be logged?"""
456 identifier = f"{record.module}:{record.funcName}"
457 threshold = probabilistic_logging_levels.get(identifier, 100.0)
458 return misc_utils.execute_probabilistically(threshold)
461 class OnlyInfoFilter(logging.Filter):
462 """A filter that only logs messages produced at the INFO logging
463 level. This is used by the :code:`--logging_info_is_print`
464 commandline option to select a subset of the logging stream to
465 send to a stdout handler.
469 def filter(self, record: logging.LogRecord):
470 return record.levelno == logging.INFO
473 def prepend_all_logger_messages(
474 prefix: str, logger: logging.Logger
475 ) -> logging.LoggerAdapter:
476 """Helper method around the creation of a LoggerAdapter that prepends
477 a given string to every log message produced.
480 prefix: the message to prepend to every log message.
481 logger: the logger whose messages to modify.
484 A new logger wrapping the old one with the given behavior.
485 The old logger will continue to behave as usual; simply drop
486 the reference to this wrapper when it's no longer needed.
488 return PrependingLogAdapter.wrap_logger(prefix, logger)
491 class PrependingLogAdapter(logging.LoggerAdapter):
492 def process(self, msg, kwargs):
493 return f'{self.extra.get("prefix", "")}{msg}', kwargs
496 def wrap_logger(prefix: str, logger: logging.Logger) -> logging.LoggerAdapter:
497 """Helper method around the creation of a LogAdapter that prepends
498 a given string to every log message produced.
501 prefix: the message to prepend to every log message.
502 logger: the logger whose messages to modify.
505 A new logger wrapping the old one with the given behavior.
506 The old logger will continue to behave as usual; simply drop
507 the reference to this wrapper when it's no longer needed.
510 return PrependingLogAdapter(logger, {"prefix": prefix})
513 def append_all_logger_messages(
514 suffix: str, logger: logging.Logger
515 ) -> logging.LoggerAdapter:
516 """Helper method around the creation of a LoggerAdapter that appends
517 a given string to every log message produced.
520 suffix: the message to prepend to every log message.
521 logger: the logger whose messages to modify.
524 A new logger wrapping the old one with the given behavior.
525 The old logger will continue to behave as usual; simply drop
526 the reference to this wrapper when it's no longer needed.
528 return AppendingLogAdapter.wrap_logger(suffix, logger)
531 class AppendingLogAdapter(logging.LoggerAdapter):
532 def process(self, msg, kwargs):
533 return f'{msg}{self.extra.get("suffix", "")}', kwargs
536 def wrap_logger(suffix: str, logger: logging.Logger) -> logging.LoggerAdapter:
537 """Helper method around the creation of a LoggerAdapter that appends
538 a given string to every log message produced.
541 suffix: the message to prepend to every log message.
542 logger: the logger whose messages to modify.
545 A new logger wrapping the old one with the given behavior.
546 The old logger will continue to behave as usual; simply drop
547 the reference to this wrapper when it's no longer needed.
550 return AppendingLogAdapter(logger, {"suffix": suffix})
553 class LoggingContext(contextlib.ContextDecorator):
556 logger: logging.Logger,
558 handlers: Optional[List[logging.Handler]] = None,
559 prefix: Optional[str] = None,
560 suffix: Optional[str] = None,
562 """This is a logging context that can be used to temporarily change logging:
564 * Change the destination of log messages (by adding temporary handlers)
565 * Add a prefix string to log messages
566 * Add a suffix string to log messages
570 Unfortunately this can't be used to dynamically change the
571 defaut logging level because of a conflict with
572 :class:`DynamicPerScopeLoggingLevelFilter` which, to work,
573 must see every logging message. I love the ability to set
574 logging per module from the commandline and am not willing
575 to lose it in return for the ability to dynamically change
576 the logging level in code.
580 >>> logging.root.setLevel(0)
581 >>> logger = logging.getLogger(__name__)
582 >>> logger.addHandler(logging.StreamHandler(sys.stdout))
583 >>> logger.info("Hello!")
586 >>> with LoggingContext(logger, prefix="REQUEST#12345>") as log:
587 ... log.info("This is a test %d", 123)
588 REQUEST#12345>This is a test 123
592 self.handlers = handlers
596 def __enter__(self) -> Union[logging.Logger, logging.LoggerAdapter]:
598 self.log: Union[logging.Logger, logging.LoggerAdapter] = self.logger
600 for handler in self.handlers:
601 self.log.addHandler(handler)
603 self.log = PrependingLogAdapter(self.log, {"prefix": self.prefix})
605 self.log = AppendingLogAdapter(self.log, {"suffix": self.suffix})
608 def __exit__(self, et, ev, tb) -> None:
610 for handler in self.handlers:
611 self.logger.removeHandler(handler)
613 return None # propagate exceptions out
616 class MillisecondAwareFormatter(logging.Formatter):
617 """A formatter for adding milliseconds to log messages which, for
618 whatever reason, the default Python logger doesn't do.
622 You probably don't need to use this directly but it is
623 wired in under :meth:`initialize_logging` so that the
624 timestamps in log messages have millisecond level
629 converter = datetime.datetime.fromtimestamp # type: ignore
632 def formatTime(self, record, datefmt=None):
633 ct = MillisecondAwareFormatter.converter(
634 record.created, pytz.timezone("US/Pacific")
637 s = ct.strftime(datefmt)
639 t = ct.strftime("%Y-%m-%d %H:%M:%S")
640 s = f"{t},{record.msecs:%03d}"
644 def _log_about_logging(
646 default_logging_level,
647 preexisting_handlers_count,
651 """This is invoked automatically after logging is initialized such
652 that the first messages in the log are about how logging itself
655 level_name = logging._levelToName.get(
656 default_logging_level, str(default_logging_level)
658 logger.debug("Initialized global logging; logging level is %s.", level_name)
660 config.config["logging_clear_preexisting_handlers"]
661 and preexisting_handlers_count > 0
664 "Logging cleared %d global handlers (--logging_clear_preexisting_handlers)",
665 preexisting_handlers_count,
667 logger.debug('Logging format specification is "%s"', fmt)
668 if config.config["logging_debug_threads"]:
670 "...Logging format spec captures tid/pid. (--logging_debug_threads)"
672 if config.config["logging_debug_modules"]:
674 "...Logging format spec captures files/functions/lineno. (--logging_debug_modules)"
676 if config.config["logging_syslog"]:
678 "Logging to syslog as %s with priority mapping based on level. (--logging_syslog)",
681 if config.config["logging_filename"]:
683 'Logging to file "%s". (--logging_filename)',
684 config.config["logging_filename"],
687 "...with %d bytes max file size. (--logging_filename_maxsize)",
688 config.config["logging_filename_maxsize"],
691 "...and %d rotating backup file count. (--logging_filename_count)",
692 config.config["logging_filename_count"],
694 if config.config["logging_console"]:
695 logger.debug("Logging to the console (stderr). (--logging_console)")
696 if config.config["logging_info_is_print"]:
698 "Logging logger.info messages will be repeated on stdout. (--logging_info_is_print)"
700 if config.config["logging_squelch_repeats"]:
702 "Logging code allowed to request repeated messages be squelched. (--logging_squelch_repeats)"
706 "Logging code forbidden to request messages be squelched; all messages logged. (--no_logging_squelch_repeats)"
708 if config.config["logging_probabilistically"]:
710 "Logging code is allowed to request probabilistic logging. (--logging_probabilistically)"
714 "Logging code is forbidden to request probabilistic logging; messages always logged. (--no_logging_probabilistically)"
716 if config.config["lmodule"]:
718 f'Logging dynamic per-module logging enabled. (--lmodule={config.config["lmodule"]})'
720 if config.config["logging_captures_prints"]:
722 "Logging will capture printed data as logger.info messages. (--logging_captures_prints)"
726 def initialize_logging(logger=None) -> logging.Logger:
727 """Initialize logging for the program. See module level comments
728 for information about what functionality this provides and how to
729 enable or disable functionality via the commandline.
732 :meth:`bootstrap.initialize` decorator on your program's entry point,
733 it will call this for you. See :meth:`pyutils.bootstrap.initialize`
736 global LOGGING_INITIALIZED
737 if LOGGING_INITIALIZED:
738 return logging.getLogger()
739 LOGGING_INITIALIZED = True
742 logger = logging.getLogger()
744 # --logging_clear_preexisting_handlers removes logging handlers
745 # that were registered by global statements during imported module
747 preexisting_handlers_count = 0
748 assert config.has_been_parsed()
749 if config.config["logging_clear_preexisting_handlers"]:
750 while logger.hasHandlers():
751 logger.removeHandler(logger.handlers[0])
752 preexisting_handlers_count += 1
754 # --logging_config_file pulls logging settings from a config file
755 # skipping the rest of this setup.
756 if config.config["logging_config_file"] is not None:
757 fileConfig(config.config["logging_config_file"])
760 handlers: List[logging.Handler] = []
761 handler: Optional[logging.Handler] = None
763 # Global default logging level (--logging_level); messages below
764 # this level will be silenced.
765 logging_level = config.config["logging_level"]
767 logging_level = logging_level.upper()
768 default_logging_level = getattr(logging, logging_level, None)
769 if not isinstance(default_logging_level, int):
770 raise ValueError(f'Invalid level: {config.config["logging_level"]}')
772 # Custom or default --logging_format?
773 if config.config["logging_format"]:
774 fmt = config.config["logging_format"]
775 elif config.config["logging_syslog"]:
776 fmt = "%(levelname).1s:%(filename)s[%(process)d]: %(message)s"
778 fmt = "%(levelname).1s:%(asctime)s: %(message)s"
780 # --logging_debug_threads and --logging_debug_modules both affect
781 # the format by prepending information about the pid/tid or
783 if config.config["logging_debug_threads"]:
784 fmt = f"%(process)d.%(thread)d|{fmt}"
785 if config.config["logging_debug_modules"]:
786 fmt = f"%(filename)s:%(funcName)s:%(lineno)s|{fmt}"
788 # --logging_syslog (optionally with --logging_syslog_facility)
789 # sets up for logging to use the standard system syslogd as a
792 if config.config["logging_syslog"]:
793 if sys.platform not in ("win32", "cygwin"):
794 if config.config["logging_syslog_facility"]:
795 facility_name = "LOG_" + config.config["logging_syslog_facility"]
796 facility = SysLogHandler.__dict__.get(facility_name, SysLogHandler.LOG_USER) # type: ignore
797 assert facility is not None
798 handler = SysLogHandler(facility=facility, address="/dev/log")
799 handler.setFormatter(
800 MillisecondAwareFormatter(
802 datefmt=config.config["logging_date_format"],
805 handlers.append(handler)
807 # --logging_filename (with friends --logging_filename_count and
808 # --logging_filename_maxsize) set up logging to a file on the
809 # filesystem with automatic rotation when it gets too big.
810 if config.config["logging_filename"]:
811 max_bytes = config.config["logging_filename_maxsize"]
812 assert max_bytes and isinstance(max_bytes, int)
813 backup_count = config.config["logging_filename_count"]
814 assert backup_count and isinstance(backup_count, int)
815 handler = RotatingFileHandler(
816 config.config["logging_filename"],
818 backupCount=backup_count,
820 handler.setFormatter(
821 MillisecondAwareFormatter(
823 datefmt=config.config["logging_date_format"],
826 handlers.append(handler)
828 # --logging_console is, ahem, logging to the console.
829 if config.config["logging_console"]:
830 handler = logging.StreamHandler(sys.stderr)
831 handler.setFormatter(
832 MillisecondAwareFormatter(
834 datefmt=config.config["logging_date_format"],
837 handlers.append(handler)
839 if len(handlers) == 0:
840 handlers.append(logging.NullHandler())
841 for handler in handlers:
842 logger.addHandler(handler)
844 # --logging_info_is_print echoes any message to logger.info(x) as
845 # a print statement on stdout.
846 if config.config["logging_info_is_print"]:
847 handler = logging.StreamHandler(sys.stdout)
848 handler.addFilter(OnlyInfoFilter())
849 logger.addHandler(handler)
851 # --logging_squelch_repeats allows code to request repeat logging
852 # messages (identical log site and message contents) to be
853 # silenced. Logging code must request this explicitly, it isn't
854 # automatic. This option just allows the silencing to happen.
855 if config.config["logging_squelch_repeats"]:
856 for handler in handlers:
857 handler.addFilter(SquelchRepeatedMessagesFilter())
859 # --logging_probabilistically allows code to request
860 # non-deterministic logging where messages have some probability
861 # of being produced. Logging code must request this explicitly.
862 # This option just allows the non-deterministic behavior to
863 # happen. Disabling it will cause every log message to be
865 if config.config["logging_probabilistically"]:
866 for handler in handlers:
867 handler.addFilter(ProbabilisticFilter())
869 # --lmodule is a way to have a special logging level for just on
870 # module or one set of modules that is different than the one set
871 # globally via --logging_level.
872 for handler in handlers:
874 DynamicPerScopeLoggingLevelFilter(
875 default_logging_level,
876 config.config["lmodule"],
880 logger.propagate = False
882 # --logging_captures_prints, if set, will capture and log.info
883 # anything printed on stdout.
884 if config.config["logging_captures_prints"]:
887 def print_and_also_log(*arg, **kwarg):
888 f = kwarg.get("file", None)
893 BUILT_IN_PRINT(*arg, **kwarg)
895 builtins.print = print_and_also_log
897 # At this point the logger is ready, handlers are set up,
898 # etc... so log about the logging configuration.
901 default_logging_level,
902 preexisting_handlers_count,
909 def get_logger(name: str = ""):
910 """Get the global logger"""
911 logger = logging.getLogger(name)
912 return initialize_logging(logger)
915 def tprint(*args, **kwargs) -> None:
916 """Legacy function for printing a message augmented with thread id
917 still needed by some code. Please use --logging_debug_threads in
920 if config.config["logging_debug_threads"]:
921 from pyutils.parallelize.thread_utils import current_thread_id
923 print(f"{current_thread_id()}", end="")
924 print(*args, **kwargs)
929 class OutputMultiplexer(object):
930 """A class that broadcasts printed messages to several sinks
931 (including various logging levels, different files, different file
932 handles, the house log, etc...). See also
933 :class:`OutputMultiplexerContext` for an easy usage pattern.
936 class Destination(enum.IntEnum):
937 """Bits in the destination_bitv bitvector. Used to indicate the
938 output destination."""
943 LOG_WARNING = 0x04 # ⎬ Must provide logger to the c'tor.
945 LOG_CRITICAL = 0x10 # ⎭
946 FILENAMES = 0x20 # Must provide a filename to the c'tor.
947 FILEHANDLES = 0x40 # Must provide a handle to the c'tor.
949 ALL_LOG_DESTINATIONS = (
950 LOG_DEBUG | LOG_INFO | LOG_WARNING | LOG_ERROR | LOG_CRITICAL
952 ALL_OUTPUT_DESTINATIONS = 0x8F
957 destination_bitv: int,
960 filenames: Optional[Iterable[str]] = None,
961 handles: Optional[Iterable[io.TextIOWrapper]] = None,
964 Constructs the OutputMultiplexer instance.
967 destination_bitv: a bitvector where each bit represents an
968 output destination. Multiple bits may be set.
969 logger: if LOG_* bits are set, you must pass a logger here.
970 filenames: if FILENAMES bit is set, this should be a list of
971 files you'd like to output into. This code handles opening
972 and closing said files.
973 handles: if FILEHANDLES bit is set, this should be a list of
974 already opened filehandles you'd like to output into. The
975 handles will remain open after the scope of the multiplexer.
978 logger = logging.getLogger(None)
981 self.f: Optional[List[Any]] = None
982 if filenames is not None:
983 self.f = [open(filename, "wb", buffering=0) for filename in filenames]
985 if destination_bitv & OutputMultiplexer.Destination.FILENAMES:
986 raise ValueError("Filenames argument is required if bitv & FILENAMES")
989 self.h: Optional[List[Any]] = None
990 if handles is not None:
991 self.h = list(handles)
993 if destination_bitv & OutputMultiplexer.Destination.FILEHANDLES:
994 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
997 self.set_destination_bitv(destination_bitv)
999 def get_destination_bitv(self):
1000 """Where are we outputting?"""
1001 return self.destination_bitv
1003 def set_destination_bitv(self, destination_bitv: int):
1004 """Change the output destination_bitv to the one provided."""
1005 if destination_bitv & self.Destination.FILENAMES and self.f is None:
1006 raise ValueError("Filename argument is required if bitv & FILENAMES")
1007 if destination_bitv & self.Destination.FILEHANDLES and self.h is None:
1008 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1009 self.destination_bitv = destination_bitv
1011 def print(self, *args, **kwargs):
1012 """Produce some output to all sinks."""
1013 from pyutils.string_utils import _sprintf, strip_escape_sequences
1015 end = kwargs.pop("end", None)
1017 if not isinstance(end, str):
1018 raise TypeError("end must be None or a string")
1019 sep = kwargs.pop("sep", None)
1021 if not isinstance(sep, str):
1022 raise TypeError("sep must be None or a string")
1024 raise TypeError("invalid keyword arguments to print()")
1025 buf = _sprintf(*args, end="", sep=sep)
1032 if self.destination_bitv & self.Destination.FILENAMES and self.f is not None:
1034 _.write(buf.encode("utf-8"))
1037 if self.destination_bitv & self.Destination.FILEHANDLES and self.h is not None:
1042 buf = strip_escape_sequences(buf)
1043 if self.logger is not None:
1044 if self.destination_bitv & self.Destination.LOG_DEBUG:
1045 self.logger.debug(buf)
1046 if self.destination_bitv & self.Destination.LOG_INFO:
1047 self.logger.info(buf)
1048 if self.destination_bitv & self.Destination.LOG_WARNING:
1049 self.logger.warning(buf)
1050 if self.destination_bitv & self.Destination.LOG_ERROR:
1051 self.logger.error(buf)
1052 if self.destination_bitv & self.Destination.LOG_CRITICAL:
1053 self.logger.critical(buf)
1054 if self.destination_bitv & self.Destination.HLOG:
1058 """Close all open files."""
1059 if self.f is not None:
1064 class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator):
1066 A context that uses an :class:`OutputMultiplexer`. e.g.::
1068 with OutputMultiplexerContext(
1069 OutputMultiplexer.LOG_INFO |
1070 OutputMultiplexer.LOG_DEBUG |
1071 OutputMultiplexer.FILENAMES |
1072 OutputMultiplexer.FILEHANDLES,
1073 filenames = [ '/tmp/foo.log', '/var/log/bar.log' ],
1076 mplex.print("This is a log message!")
1081 destination_bitv: OutputMultiplexer.Destination,
1089 destination_bitv: a bitvector that indicates where we should
1090 send output. See :class:`OutputMultiplexer` for options.
1091 logger: optional logger to use for log destination messages.
1092 filenames: optional filenames to write for filename destination
1094 handles: optional open filehandles to write for filehandle
1095 destination messages.
1100 filenames=filenames,
1104 def __enter__(self):
1107 def __exit__(self, etype, value, traceback) -> bool:
1109 if etype is not None:
1114 def hlog(message: str) -> None:
1115 """Write a message to the house log (syslog facility local7 priority
1116 info) by calling `/usr/bin/logger`. This is pretty hacky but used
1117 by a bunch of (my) code. Another way to do this would be to use
1118 :code:`--logging_syslog` and :code:`--logging_syslog_facility` but
1119 I can't actually say that's easier.
1121 TODO: this needs to move.
1123 message = message.replace("'", "'\"'\"'")
1124 os.system(f"/usr/bin/logger -p local7.info -- '{message}'")
1127 if __name__ == "__main__":