2 # -*- coding: utf-8 -*-
4 # © Copyright 2021-2023, Scott Gasch
7 This is a module that offers an opinionated take on how whole program
8 logging should be initialized and controlled. It uses the standard
9 Python :mod:`logging` but gives you control, via commandline config,
12 * Set the logging default level (debug, info, warning, error, critical)
13 of the whole program (see: :code:`--logging_level`)... and to override
14 the logging level for individual modules/functions based on their names
15 (see :code:`--lmodule`),
16 * define the logging message format (see :code:`--logging_format` and
17 :code:`--logging_date_format`) including easily adding a PID/TID
18 marker on all messages to help with multithreaded debugging
19 (:code:`--logging_debug_threads`) and force module names of code
20 that emits log messages to be included in the format
21 (:code:`--logging_debug_modules`),
22 * control the destination of logged messages:
24 - log to the console/stderr (:code:`--logging_console`) and/or
25 - log to a rotated file (:code:`--logging_filename`,
26 :code:`--logging_filename_maxsize` and :code:`--logging_filename_count`)
28 - log to the UNIX syslog (:code:`--logging_syslog` and
29 :code:`--logging_syslog_facility`)
31 * optionally squelch repeated messages (:code:`--logging_squelch_repeats`),
32 * optionally log probalistically (:code:`--logging_probabilistically`),
33 * capture printed messages into the info log (:code:`--logging_captures_prints`),
34 * and optionally clear unwanted logging handlers added by other imports
35 before this one (:code:`--logging_clear_preexisting_handlers`).
37 To use this functionality, call :meth:`initialize_logging` early
38 in your program entry point. If you use the
39 :meth:`pyutils.bootstrap.initialize` decorator on your program's entry
40 point, it will call this for you automatically.
51 from logging.config import fileConfig
52 from logging.handlers import RotatingFileHandler, SysLogHandler
53 from typing import Any, Callable, Dict, Iterable, List, Optional
56 from overrides import overrides
58 # This module is commonly used by others in here and should avoid
59 # taking any unnecessary dependencies back on them.
60 from pyutils import argparse_utils, config, misc_utils
62 cfg = config.add_commandline_args(f"Logging ({__file__})", "Args related to logging")
64 "--logging_config_file",
65 type=argparse_utils.valid_filename,
68 help="Config file containing the logging setup, see: https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial",
74 choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
76 help="The global default level below which to squelch log messages; see also --lmodule",
82 help="The format for lines logged via the logger module. See: https://docs.python.org/3/library/logging.html#formatter-objects",
85 "--logging_date_format",
87 default="%Y/%m/%dT%H:%M:%S.%f%z",
89 help="The format of any dates in --logging_format.",
93 action=argparse_utils.ActionNoYes,
95 help="Should we log to the console (stderr)",
102 help="The filename of the logfile to write.",
105 "--logging_filename_maxsize",
107 default=(1024 * 1024),
109 help="The maximum size (in bytes) to write to the logging_filename.",
112 "--logging_filename_count",
116 help="The number of logging_filename copies to keep before deleting.",
120 action=argparse_utils.ActionNoYes,
122 help="Should we log to localhost's syslog.",
125 "--logging_syslog_facility",
151 metavar="SYSLOG_FACILITY_LIST",
152 help="The default syslog message facility identifier",
155 "--logging_debug_threads",
156 action=argparse_utils.ActionNoYes,
158 help="Should we prepend pid/tid data to all log messages?",
161 "--logging_debug_modules",
162 action=argparse_utils.ActionNoYes,
164 help="Should we prepend module/function data to all log messages?",
167 "--logging_info_is_print",
168 action=argparse_utils.ActionNoYes,
170 help="logging.info also prints to stdout.",
173 "--logging_squelch_repeats",
174 action=argparse_utils.ActionNoYes,
176 help="Do we allow code to indicate that it wants to squelch repeated logging messages or should we always log?",
179 "--logging_probabilistically",
180 action=argparse_utils.ActionNoYes,
182 help="Do we allow probabilistic logging (for code that wants it) or should we always log?",
184 # See also: OutputMultiplexer
186 "--logging_captures_prints",
187 action=argparse_utils.ActionNoYes,
189 help="When calling print, also log.info automatically.",
194 metavar="<SCOPE>=<LEVEL>[,<SCOPE>=<LEVEL>...]",
196 "Allows per-scope logging levels which override the global level set with --logging-level."
197 + "Pass a space separated list of <scope>=<level> where <scope> is one of: module, "
198 + "module:function, or :function and <level> is a logging level (e.g. INFO, DEBUG...)"
202 "--logging_clear_preexisting_handlers",
203 action=argparse_utils.ActionNoYes,
206 "Should logging code clear preexisting global logging handlers and thus insist that is "
207 + "alone can add handlers. Use this to work around annoying modules that insert global "
208 + "handlers with formats and logging levels you might now want. Caveat emptor, this may "
209 + "cause you to miss logging messages."
213 BUILT_IN_PRINT = print
214 LOGGING_INITIALIZED = False
217 # A map from logging_callsite_id -> count of logged messages.
218 squelched_logging_counts: Dict[str, int] = {}
221 def squelch_repeated_log_messages(squelch_after_n_repeats: int) -> Callable:
223 A decorator that marks a function as interested in having the logging
224 messages that it produces be squelched (ignored) after it logs the
225 same message more than N times.
229 This decorator affects *ALL* logging messages produced
230 within the decorated function. That said, messages must be
231 identical in order to be squelched. For example, if the same line
232 of code produces different messages (because of, e.g., a format
233 string), the messages are considered to be different.
235 An example of this from the pyutils code itself can be found in
236 :meth:`pyutils.ansi.fg` and :meth:`pyutils.ansi.bg` methods::
238 @logging_utils.squelch_repeated_log_messages(1)
240 name: Optional[str] = "",
241 red: Optional[int] = None,
242 green: Optional[int] = None,
243 blue: Optional[int] = None,
245 force_16color: bool = False,
246 force_216color: bool = False,
250 These methods log stuff like "Using 24-bit color strategy" which
251 gets old really fast and fills up the logs. By decorating the methods
252 with :code:`@logging_utils.squelch_repeated_log_messages(1)` the code
253 is requesting that its logged messages be dropped silently after the
254 first one is produced (note the argument 1).
256 Users can insist that all logged messages always be reflected in the
257 logs using the :code:`--no_logging_squelch_repeats` flag but the default
258 behavior is to allow code to request it be squelched.
260 :code:`--logging_squelch_repeats` only affects code with this decorator
261 on it; it ignores all other code.
264 squelch_after_n_repeats: the number of repeated messages allowed to
265 log before subsequent messages are silently dropped.
268 def squelch_logging_wrapper(f: Callable):
269 from pyutils import function_utils
271 identifier = function_utils.function_identifier(f)
272 squelched_logging_counts[identifier] = squelch_after_n_repeats
275 return squelch_logging_wrapper
278 class SquelchRepeatedMessagesFilter(logging.Filter):
279 """A filter that only logs messages from a given site with the same
280 (exact) message at the same logging level N times and ignores
281 subsequent attempts to log.
283 This filter only affects logging messages that repeat more than a
284 threshold number of times from functions that are tagged with the
285 :code:`@logging_utils.squelched_logging_ok` decorator (see above);
286 all others are ignored.
288 This functionality is enabled by default but can be disabled via
289 the :code:`--no_logging_squelch_repeats` commandline flag.
292 def __init__(self) -> None:
294 self.counters: collections.Counter = collections.Counter()
297 def filter(self, record: logging.LogRecord) -> bool:
298 """Should we drop this log message?"""
299 id1 = f"{record.module}:{record.funcName}"
300 if id1 not in squelched_logging_counts:
302 threshold = squelched_logging_counts[id1]
303 logsite = f"{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}"
304 count = self.counters[logsite]
305 self.counters[logsite] += 1
306 return count < threshold
309 class DynamicPerScopeLoggingLevelFilter(logging.Filter):
310 """This filter only allows logging messages from an allow list of
311 module names or `module:function` names. Blocks all others. This
312 filter is used to implement the :code:`--lmodule` commandline option.
316 You probably don't need to use this directly, just use
317 :code:`--lmodule`. For example, to set logging level to INFO
318 everywhere except "module:function" where it should be DEBUG::
320 # myprogram.py --logging_level=INFO --lmodule=module:function=DEBUG
325 def level_name_to_level(name: str) -> int:
326 """Given a level name, return its numberic value."""
327 numeric_level = getattr(logging, name, None)
328 if not isinstance(numeric_level, int):
329 raise ValueError(f"Invalid level: {name}")
334 default_logging_level: int,
335 per_scope_logging_levels: Optional[str],
337 """Construct the Filter.
340 default_logging_level: the logging level of the whole program
341 per_scope_logging_levels: optional, comma separated overrides of
342 logging level per scope of the format scope=level where
343 scope is of the form "module:function" or ":function" and
344 level is one of NOTSET, DEBUG, INFO, WARNING, ERROR or
348 self.valid_levels = set(
349 ["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
351 self.default_logging_level = default_logging_level
352 self.level_by_scope = {}
353 if per_scope_logging_levels is not None:
354 for chunk in per_scope_logging_levels.split(","):
357 f'Malformed lmodule directive: "{chunk}", missing "=". Ignored.',
362 (scope, level) = chunk.split("=")
365 f'Malformed lmodule directive: "{chunk}". Ignored.',
369 scope = scope.strip()
370 level = level.strip().upper()
371 if level not in self.valid_levels:
373 f'Malformed lmodule directive: "{chunk}", bad level. Ignored.',
379 ] = DynamicPerScopeLoggingLevelFilter.level_name_to_level(level)
382 def filter(self, record: logging.LogRecord) -> bool:
383 """Decides whether or not to log based on an allow list."""
385 # First try to find a logging level by scope (--lmodule)
386 if len(self.level_by_scope) > 0:
390 f"{record.module}:{record.funcName}",
391 f":{record.funcName}",
393 level = self.level_by_scope.get(scope, None)
394 if level is not None:
395 if min_level is None or level < min_level:
398 # If we found one, use it instead of the global default level.
399 if min_level is not None:
400 return record.levelno >= min_level
402 # Otherwise, use the global logging level (--logging_level)
403 return record.levelno >= self.default_logging_level
406 # A map from function_identifier -> probability of logging (0.0%..100.0%)
407 probabilistic_logging_levels: Dict[str, float] = {}
410 def logging_is_probabilistic(probability_of_logging: float) -> Callable:
411 """A decorator that indicates that all logging statements within the
412 scope of a particular (marked via decorator) function are not
413 deterministic (i.e. they do not always unconditionally log) but rather
414 are probabilistic (i.e. they log N% of the time, randomly) when the
415 user passes the :code:`--logging_probabilistically` commandline flag
416 (which is enabled by default).
420 This affects *ALL* logging statements within the marked function.
421 If you want it to only affect a subset of logging statements,
422 log those statements in a separate function that you invoke
423 from within the "too large" scope and mark that separate function
424 with the :code:`logging_is_probabilistic` decorator instead.
426 That this functionality can be disabled (forcing all logged
427 messages to produce output) via the
428 :code:`--no_logging_probabilistically` cmdline argument.
431 def probabilistic_logging_wrapper(f: Callable):
432 from pyutils import function_utils
434 identifier = function_utils.function_identifier(f)
435 probabilistic_logging_levels[identifier] = probability_of_logging
438 return probabilistic_logging_wrapper
441 class ProbabilisticFilter(logging.Filter):
443 A filter that logs messages probabilistically (i.e. randomly at some
444 percent chance). This filter is used with a decorator (see
445 :meth:`logging_is_probabilistic`) to implement the
446 :code:`--logging_probabilistically` commandline flag.
448 This filter only affects logging messages from functions that have
449 been tagged with the `@logging_utils.probabilistic_logging` decorator.
453 def filter(self, record: logging.LogRecord) -> bool:
454 """Should the message be logged?"""
455 identifier = f"{record.module}:{record.funcName}"
456 threshold = probabilistic_logging_levels.get(identifier, 100.0)
457 return misc_utils.execute_probabilistically(threshold)
460 class OnlyInfoFilter(logging.Filter):
461 """A filter that only logs messages produced at the INFO logging
462 level. This is used by the :code:`--logging_info_is_print`
463 commandline option to select a subset of the logging stream to
464 send to a stdout handler.
468 def filter(self, record: logging.LogRecord):
469 return record.levelno == logging.INFO
472 def prepend_all_logger_messages(
473 prefix: str, logger: logging.Logger
474 ) -> logging.LoggerAdapter:
475 """Helper method around the creation of a LogAdapter that prepends
476 a given string to every log message produced.
479 prefix: the message to prepend to every log message.
480 logger: the logger whose messages to modify.
483 A new logger wrapping the old one with the given behavior.
484 The old logger will continue to behave as usual; simply drop
485 the reference to this wrapper when it's no longer needed.
487 return PrependingLogAdapter.wrap_logger(prefix, logger)
490 class PrependingLogAdapter(logging.LoggerAdapter):
491 def process(self, msg, kwargs):
492 return f'{self.extra.get("prefix", "")}{msg}', kwargs
495 def wrap_logger(prefix: str, logger: logging.Logger) -> logging.LoggerAdapter:
496 """Helper method around the creation of a LogAdapter that prepends
497 a given string to every log message produced.
500 prefix: the message to prepend to every log message.
501 logger: the logger whose messages to modify.
504 A new logger wrapping the old one with the given behavior.
505 The old logger will continue to behave as usual; simply drop
506 the reference to this wrapper when it's no longer needed.
509 return PrependingLogAdapter(logger, {"prefix": prefix})
512 def append_all_logger_messages(
513 suffix: str, logger: logging.Logger
514 ) -> logging.LoggerAdapter:
515 """Helper method around the creation of a LogAdapter that appends
516 a given string to every log message produced.
519 suffix: the message to prepend to every log message.
520 logger: the logger whose messages to modify.
523 A new logger wrapping the old one with the given behavior.
524 The old logger will continue to behave as usual; simply drop
525 the reference to this wrapper when it's no longer needed.
527 return AppendingLogAdapter.wrap_logger(suffix, logger)
530 class AppendingLogAdapter(logging.LoggerAdapter):
531 def process(self, msg, kwargs):
532 return f'{msg}{self.extra.get("suffix", "")}', kwargs
535 def wrap_logger(suffix: str, logger: logging.Logger) -> logging.LoggerAdapter:
536 """Helper method around the creation of a LogAdapter that appends
537 a given string to every log message produced.
540 suffix: the message to prepend to every log message.
541 logger: the logger whose messages to modify.
544 A new logger wrapping the old one with the given behavior.
545 The old logger will continue to behave as usual; simply drop
546 the reference to this wrapper when it's no longer needed.
549 return AppendingLogAdapter(logger, {"suffix": suffix})
552 class MillisecondAwareFormatter(logging.Formatter):
553 """A formatter for adding milliseconds to log messages which, for
554 whatever reason, the default Python logger doesn't do.
558 You probably don't need to use this directly but it is
559 wired in under :meth:`initialize_logging` so that the
560 timestamps in log messages have millisecond level
565 converter = datetime.datetime.fromtimestamp # type: ignore
568 def formatTime(self, record, datefmt=None):
569 ct = MillisecondAwareFormatter.converter(
570 record.created, pytz.timezone("US/Pacific")
573 s = ct.strftime(datefmt)
575 t = ct.strftime("%Y-%m-%d %H:%M:%S")
576 s = f"{t},{record.msecs:%03d}"
580 def _log_about_logging(
582 default_logging_level,
583 preexisting_handlers_count,
587 """This is invoked automatically after logging is initialized such
588 that the first messages in the log are about how logging itself
591 level_name = logging._levelToName.get(
592 default_logging_level, str(default_logging_level)
594 logger.debug("Initialized global logging; logging level is %s.", level_name)
596 config.config["logging_clear_preexisting_handlers"]
597 and preexisting_handlers_count > 0
600 "Logging cleared %d global handlers (--logging_clear_preexisting_handlers)",
601 preexisting_handlers_count,
603 logger.debug('Logging format specification is "%s"', fmt)
604 if config.config["logging_debug_threads"]:
606 "...Logging format spec captures tid/pid. (--logging_debug_threads)"
608 if config.config["logging_debug_modules"]:
610 "...Logging format spec captures files/functions/lineno. (--logging_debug_modules)"
612 if config.config["logging_syslog"]:
614 "Logging to syslog as %s with priority mapping based on level. (--logging_syslog)",
617 if config.config["logging_filename"]:
619 'Logging to file "%s". (--logging_filename)',
620 config.config["logging_filename"],
623 "...with %d bytes max file size. (--logging_filename_maxsize)",
624 config.config["logging_filename_maxsize"],
627 "...and %d rotating backup file count. (--logging_filename_count)",
628 config.config["logging_filename_count"],
630 if config.config["logging_console"]:
631 logger.debug("Logging to the console (stderr). (--logging_console)")
632 if config.config["logging_info_is_print"]:
634 "Logging logger.info messages will be repeated on stdout. (--logging_info_is_print)"
636 if config.config["logging_squelch_repeats"]:
638 "Logging code allowed to request repeated messages be squelched. (--logging_squelch_repeats)"
642 "Logging code forbidden to request messages be squelched; all messages logged. (--no_logging_squelch_repeats)"
644 if config.config["logging_probabilistically"]:
646 "Logging code is allowed to request probabilistic logging. (--logging_probabilistically)"
650 "Logging code is forbidden to request probabilistic logging; messages always logged. (--no_logging_probabilistically)"
652 if config.config["lmodule"]:
654 f'Logging dynamic per-module logging enabled. (--lmodule={config.config["lmodule"]})'
656 if config.config["logging_captures_prints"]:
658 "Logging will capture printed data as logger.info messages. (--logging_captures_prints)"
662 def initialize_logging(logger=None) -> logging.Logger:
663 """Initialize logging for the program. See module level comments
664 for information about what functionality this provides and how to
665 enable or disable functionality via the commandline.
668 :meth:`bootstrap.initialize` decorator on your program's entry point,
669 it will call this for you. See :meth:`pyutils.bootstrap.initialize`
672 global LOGGING_INITIALIZED
673 if LOGGING_INITIALIZED:
674 return logging.getLogger()
675 LOGGING_INITIALIZED = True
678 logger = logging.getLogger()
680 # --logging_clear_preexisting_handlers removes logging handlers
681 # that were registered by global statements during imported module
683 preexisting_handlers_count = 0
684 assert config.has_been_parsed()
685 if config.config["logging_clear_preexisting_handlers"]:
686 while logger.hasHandlers():
687 logger.removeHandler(logger.handlers[0])
688 preexisting_handlers_count += 1
690 # --logging_config_file pulls logging settings from a config file
691 # skipping the rest of this setup.
692 if config.config["logging_config_file"] is not None:
693 fileConfig(config.config["logging_config_file"])
696 handlers: List[logging.Handler] = []
697 handler: Optional[logging.Handler] = None
699 # Global default logging level (--logging_level); messages below
700 # this level will be silenced.
701 logging_level = config.config["logging_level"]
703 logging_level = logging_level.upper()
704 default_logging_level = getattr(logging, logging_level, None)
705 if not isinstance(default_logging_level, int):
706 raise ValueError(f'Invalid level: {config.config["logging_level"]}')
708 # Custom or default --logging_format?
709 if config.config["logging_format"]:
710 fmt = config.config["logging_format"]
711 elif config.config["logging_syslog"]:
712 fmt = "%(levelname).1s:%(filename)s[%(process)d]: %(message)s"
714 fmt = "%(levelname).1s:%(asctime)s: %(message)s"
716 # --logging_debug_threads and --logging_debug_modules both affect
717 # the format by prepending information about the pid/tid or
719 if config.config["logging_debug_threads"]:
720 fmt = f"%(process)d.%(thread)d|{fmt}"
721 if config.config["logging_debug_modules"]:
722 fmt = f"%(filename)s:%(funcName)s:%(lineno)s|{fmt}"
724 # --logging_syslog (optionally with --logging_syslog_facility)
725 # sets up for logging to use the standard system syslogd as a
728 if config.config["logging_syslog"]:
729 if sys.platform not in ("win32", "cygwin"):
730 if config.config["logging_syslog_facility"]:
731 facility_name = "LOG_" + config.config["logging_syslog_facility"]
732 facility = SysLogHandler.__dict__.get(facility_name, SysLogHandler.LOG_USER) # type: ignore
733 assert facility is not None
734 handler = SysLogHandler(facility=facility, address="/dev/log")
735 handler.setFormatter(
736 MillisecondAwareFormatter(
738 datefmt=config.config["logging_date_format"],
741 handlers.append(handler)
743 # --logging_filename (with friends --logging_filename_count and
744 # --logging_filename_maxsize) set up logging to a file on the
745 # filesystem with automatic rotation when it gets too big.
746 if config.config["logging_filename"]:
747 max_bytes = config.config["logging_filename_maxsize"]
748 assert max_bytes and isinstance(max_bytes, int)
749 backup_count = config.config["logging_filename_count"]
750 assert backup_count and isinstance(backup_count, int)
751 handler = RotatingFileHandler(
752 config.config["logging_filename"],
754 backupCount=backup_count,
756 handler.setFormatter(
757 MillisecondAwareFormatter(
759 datefmt=config.config["logging_date_format"],
762 handlers.append(handler)
764 # --logging_console is, ahem, logging to the console.
765 if config.config["logging_console"]:
766 handler = logging.StreamHandler(sys.stderr)
767 handler.setFormatter(
768 MillisecondAwareFormatter(
770 datefmt=config.config["logging_date_format"],
773 handlers.append(handler)
775 if len(handlers) == 0:
776 handlers.append(logging.NullHandler())
777 for handler in handlers:
778 logger.addHandler(handler)
780 # --logging_info_is_print echoes any message to logger.info(x) as
781 # a print statement on stdout.
782 if config.config["logging_info_is_print"]:
783 handler = logging.StreamHandler(sys.stdout)
784 handler.addFilter(OnlyInfoFilter())
785 logger.addHandler(handler)
787 # --logging_squelch_repeats allows code to request repeat logging
788 # messages (identical log site and message contents) to be
789 # silenced. Logging code must request this explicitly, it isn't
790 # automatic. This option just allows the silencing to happen.
791 if config.config["logging_squelch_repeats"]:
792 for handler in handlers:
793 handler.addFilter(SquelchRepeatedMessagesFilter())
795 # --logging_probabilistically allows code to request
796 # non-deterministic logging where messages have some probability
797 # of being produced. Logging code must request this explicitly.
798 # This option just allows the non-deterministic behavior to
799 # happen. Disabling it will cause every log message to be
801 if config.config["logging_probabilistically"]:
802 for handler in handlers:
803 handler.addFilter(ProbabilisticFilter())
805 # --lmodule is a way to have a special logging level for just on
806 # module or one set of modules that is different than the one set
807 # globally via --logging_level.
808 for handler in handlers:
810 DynamicPerScopeLoggingLevelFilter(
811 default_logging_level,
812 config.config["lmodule"],
816 logger.propagate = False
818 # --logging_captures_prints, if set, will capture and log.info
819 # anything printed on stdout.
820 if config.config["logging_captures_prints"]:
823 def print_and_also_log(*arg, **kwarg):
824 f = kwarg.get("file", None)
829 BUILT_IN_PRINT(*arg, **kwarg)
831 builtins.print = print_and_also_log
833 # At this point the logger is ready, handlers are set up,
834 # etc... so log about the logging configuration.
837 default_logging_level,
838 preexisting_handlers_count,
845 def get_logger(name: str = ""):
846 """Get the global logger"""
847 logger = logging.getLogger(name)
848 return initialize_logging(logger)
851 def tprint(*args, **kwargs) -> None:
852 """Legacy function for printing a message augmented with thread id
853 still needed by some code. Please use --logging_debug_threads in
856 if config.config["logging_debug_threads"]:
857 from pyutils.parallelize.thread_utils import current_thread_id
859 print(f"{current_thread_id()}", end="")
860 print(*args, **kwargs)
865 class OutputMultiplexer(object):
866 """A class that broadcasts printed messages to several sinks
867 (including various logging levels, different files, different file
868 handles, the house log, etc...). See also
869 :class:`OutputMultiplexerContext` for an easy usage pattern.
872 class Destination(enum.IntEnum):
873 """Bits in the destination_bitv bitvector. Used to indicate the
874 output destination."""
879 LOG_WARNING = 0x04 # ⎬ Must provide logger to the c'tor.
881 LOG_CRITICAL = 0x10 # ⎭
882 FILENAMES = 0x20 # Must provide a filename to the c'tor.
883 FILEHANDLES = 0x40 # Must provide a handle to the c'tor.
885 ALL_LOG_DESTINATIONS = (
886 LOG_DEBUG | LOG_INFO | LOG_WARNING | LOG_ERROR | LOG_CRITICAL
888 ALL_OUTPUT_DESTINATIONS = 0x8F
893 destination_bitv: int,
896 filenames: Optional[Iterable[str]] = None,
897 handles: Optional[Iterable[io.TextIOWrapper]] = None,
900 Constructs the OutputMultiplexer instance.
903 destination_bitv: a bitvector where each bit represents an
904 output destination. Multiple bits may be set.
905 logger: if LOG_* bits are set, you must pass a logger here.
906 filenames: if FILENAMES bit is set, this should be a list of
907 files you'd like to output into. This code handles opening
908 and closing said files.
909 handles: if FILEHANDLES bit is set, this should be a list of
910 already opened filehandles you'd like to output into. The
911 handles will remain open after the scope of the multiplexer.
914 logger = logging.getLogger(None)
917 self.f: Optional[List[Any]] = None
918 if filenames is not None:
919 self.f = [open(filename, "wb", buffering=0) for filename in filenames]
921 if destination_bitv & OutputMultiplexer.Destination.FILENAMES:
922 raise ValueError("Filenames argument is required if bitv & FILENAMES")
925 self.h: Optional[List[Any]] = None
926 if handles is not None:
927 self.h = list(handles)
929 if destination_bitv & OutputMultiplexer.Destination.FILEHANDLES:
930 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
933 self.set_destination_bitv(destination_bitv)
935 def get_destination_bitv(self):
936 """Where are we outputting?"""
937 return self.destination_bitv
939 def set_destination_bitv(self, destination_bitv: int):
940 """Change the output destination_bitv to the one provided."""
941 if destination_bitv & self.Destination.FILENAMES and self.f is None:
942 raise ValueError("Filename argument is required if bitv & FILENAMES")
943 if destination_bitv & self.Destination.FILEHANDLES and self.h is None:
944 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
945 self.destination_bitv = destination_bitv
947 def print(self, *args, **kwargs):
948 """Produce some output to all sinks."""
949 from pyutils.string_utils import _sprintf, strip_escape_sequences
951 end = kwargs.pop("end", None)
953 if not isinstance(end, str):
954 raise TypeError("end must be None or a string")
955 sep = kwargs.pop("sep", None)
957 if not isinstance(sep, str):
958 raise TypeError("sep must be None or a string")
960 raise TypeError("invalid keyword arguments to print()")
961 buf = _sprintf(*args, end="", sep=sep)
968 if self.destination_bitv & self.Destination.FILENAMES and self.f is not None:
970 _.write(buf.encode("utf-8"))
973 if self.destination_bitv & self.Destination.FILEHANDLES and self.h is not None:
978 buf = strip_escape_sequences(buf)
979 if self.logger is not None:
980 if self.destination_bitv & self.Destination.LOG_DEBUG:
981 self.logger.debug(buf)
982 if self.destination_bitv & self.Destination.LOG_INFO:
983 self.logger.info(buf)
984 if self.destination_bitv & self.Destination.LOG_WARNING:
985 self.logger.warning(buf)
986 if self.destination_bitv & self.Destination.LOG_ERROR:
987 self.logger.error(buf)
988 if self.destination_bitv & self.Destination.LOG_CRITICAL:
989 self.logger.critical(buf)
990 if self.destination_bitv & self.Destination.HLOG:
994 """Close all open files."""
995 if self.f is not None:
1000 class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator):
1002 A context that uses an :class:`OutputMultiplexer`. e.g.::
1004 with OutputMultiplexerContext(
1005 OutputMultiplexer.LOG_INFO |
1006 OutputMultiplexer.LOG_DEBUG |
1007 OutputMultiplexer.FILENAMES |
1008 OutputMultiplexer.FILEHANDLES,
1009 filenames = [ '/tmp/foo.log', '/var/log/bar.log' ],
1012 mplex.print("This is a log message!")
1017 destination_bitv: OutputMultiplexer.Destination,
1025 destination_bitv: a bitvector that indicates where we should
1026 send output. See :class:`OutputMultiplexer` for options.
1027 logger: optional logger to use for log destination messages.
1028 filenames: optional filenames to write for filename destination
1030 handles: optional open filehandles to write for filehandle
1031 destination messages.
1036 filenames=filenames,
1040 def __enter__(self):
1043 def __exit__(self, etype, value, traceback) -> bool:
1045 if etype is not None:
1050 def hlog(message: str) -> None:
1051 """Write a message to the house log (syslog facility local7 priority
1052 info) by calling `/usr/bin/logger`. This is pretty hacky but used
1053 by a bunch of (my) code. Another way to do this would be to use
1054 :code:`--logging_syslog` and :code:`--logging_syslog_facility` but
1055 I can't actually say that's easier.
1057 TODO: this needs to move.
1059 message = message.replace("'", "'\"'\"'")
1060 os.system(f"/usr/bin/logger -p local7.info -- '{message}'")
1063 if __name__ == "__main__":