2 # -*- coding: utf-8 -*-
4 # © Copyright 2021-2023, Scott Gasch
7 This is a module that offers an opinionated take on how whole program
8 logging should be initialized and controlled. It uses the standard
9 Python :mod:`logging` but gives you control, via commandline config,
12 * Set the logging default level (debug, info, warning, error, critical)
13 of the whole program (see: :code:`--logging_level`)... and to override
14 the logging level for individual modules/functions based on their names
15 (see :code:`--lmodule`),
16 * define the logging message format (see :code:`--logging_format` and
17 :code:`--logging_date_format`) including easily adding a PID/TID
18 marker on all messages to help with multithreaded debugging
19 (:code:`--logging_debug_threads`) and force module names of code
20 that emits log messages to be included in the format
21 (:code:`--logging_debug_modules`),
22 * control the destination of logged messages:
24 - log to the console/stderr (:code:`--logging_console`) and/or
25 - log to a rotated file (:code:`--logging_filename`,
26 :code:`--logging_filename_maxsize` and :code:`--logging_filename_count`)
28 - log to the UNIX syslog (:code:`--logging_syslog` and
29 :code:`--logging_syslog_facility`)
31 * optionally squelch repeated messages (:code:`--logging_squelch_repeats`),
32 * optionally log probalistically (:code:`--logging_probabilistically`),
33 * capture printed messages into the info log (:code:`--logging_captures_prints`),
34 * and optionally clear unwanted logging handlers added by other imports
35 before this one (:code:`--logging_clear_preexisting_handlers`).
37 To use this functionality, call :meth:`initialize_logging` early
38 in your program entry point. If you use the
39 :meth:`pyutils.bootstrap.initialize` decorator on your program's entry
40 point, it will call this for you automatically.
51 from logging.config import fileConfig
52 from logging.handlers import RotatingFileHandler, SysLogHandler
53 from typing import Any, Callable, Dict, Iterable, List, Optional
56 from overrides import overrides
58 # This module is commonly used by others in here and should avoid
59 # taking any unnecessary dependencies back on them.
60 from pyutils import argparse_utils, config, misc_utils
62 cfg = config.add_commandline_args(f"Logging ({__file__})", "Args related to logging")
64 "--logging_config_file",
65 type=argparse_utils.valid_filename,
68 help="Config file containing the logging setup, see: https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial",
74 choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
76 help="The global default level below which to squelch log messages; see also --lmodule",
82 help="The format for lines logged via the logger module. See: https://docs.python.org/3/library/logging.html#formatter-objects",
85 "--logging_date_format",
87 default="%Y/%m/%dT%H:%M:%S.%f%z",
89 help="The format of any dates in --logging_format.",
93 action=argparse_utils.ActionNoYes,
95 help="Should we log to the console (stderr)",
102 help="The filename of the logfile to write.",
105 "--logging_filename_maxsize",
107 default=(1024 * 1024),
109 help="The maximum size (in bytes) to write to the logging_filename.",
112 "--logging_filename_count",
116 help="The number of logging_filename copies to keep before deleting.",
120 action=argparse_utils.ActionNoYes,
122 help="Should we log to localhost's syslog.",
125 "--logging_syslog_facility",
151 metavar="SYSLOG_FACILITY_LIST",
152 help="The default syslog message facility identifier",
155 "--logging_debug_threads",
156 action=argparse_utils.ActionNoYes,
158 help="Should we prepend pid/tid data to all log messages?",
161 "--logging_debug_modules",
162 action=argparse_utils.ActionNoYes,
164 help="Should we prepend module/function data to all log messages?",
167 "--logging_info_is_print",
168 action=argparse_utils.ActionNoYes,
170 help="logging.info also prints to stdout.",
173 "--logging_squelch_repeats",
174 action=argparse_utils.ActionNoYes,
176 help="Do we allow code to indicate that it wants to squelch repeated logging messages or should we always log?",
179 "--logging_probabilistically",
180 action=argparse_utils.ActionNoYes,
182 help="Do we allow probabilistic logging (for code that wants it) or should we always log?",
184 # See also: OutputMultiplexer
186 "--logging_captures_prints",
187 action=argparse_utils.ActionNoYes,
189 help="When calling print, also log.info automatically.",
194 metavar="<SCOPE>=<LEVEL>[,<SCOPE>=<LEVEL>...]",
196 "Allows per-scope logging levels which override the global level set with --logging-level."
197 + "Pass a space separated list of <scope>=<level> where <scope> is one of: module, "
198 + "module:function, or :function and <level> is a logging level (e.g. INFO, DEBUG...)"
202 "--logging_clear_preexisting_handlers",
203 action=argparse_utils.ActionNoYes,
206 "Should logging code clear preexisting global logging handlers and thus insist that is "
207 + "alone can add handlers. Use this to work around annoying modules that insert global "
208 + "handlers with formats and logging levels you might now want. Caveat emptor, this may "
209 + "cause you to miss logging messages."
213 BUILT_IN_PRINT = print
214 LOGGING_INITIALIZED = False
217 # A map from logging_callsite_id -> count of logged messages.
218 squelched_logging_counts: Dict[str, int] = {}
221 def squelch_repeated_log_messages(squelch_after_n_repeats: int) -> Callable:
223 A decorator that marks a function as interested in having the logging
224 messages that it produces be squelched (ignored) after it logs the
225 same message more than N times.
229 This decorator affects *ALL* logging messages produced
230 within the decorated function. That said, messages must be
231 identical in order to be squelched. For example, if the same line
232 of code produces different messages (because of, e.g., a format
233 string), the messages are considered to be different.
235 An example of this from the pyutils code itself can be found in
236 :meth:`pyutils.ansi.fg` and :meth:`pyutils.ansi.bg` methods::
238 @logging_utils.squelch_repeated_log_messages(1)
240 name: Optional[str] = "",
241 red: Optional[int] = None,
242 green: Optional[int] = None,
243 blue: Optional[int] = None,
245 force_16color: bool = False,
246 force_216color: bool = False,
250 These methods log stuff like "Using 24-bit color strategy" which
251 gets old really fast and fills up the logs. By decorating the methods
252 with :code:`@logging_utils.squelch_repeated_log_messages(1)` the code
253 is requesting that its logged messages be dropped silently after the
254 first one is produced (note the argument 1).
256 Users can insist that all logged messages always be reflected in the
257 logs using the :code:`--no_logging_squelch_repeats` flag but the default
258 behavior is to allow code to request it be squelched.
260 :code:`--logging_squelch_repeats` only affects code with this decorator
261 on it; it ignores all other code.
264 squelch_after_n_repeats: the number of repeated messages allowed to
265 log before subsequent messages are silently dropped.
268 def squelch_logging_wrapper(f: Callable):
269 from pyutils import function_utils
271 identifier = function_utils.function_identifier(f)
272 squelched_logging_counts[identifier] = squelch_after_n_repeats
275 return squelch_logging_wrapper
278 class SquelchRepeatedMessagesFilter(logging.Filter):
279 """A filter that only logs messages from a given site with the same
280 (exact) message at the same logging level N times and ignores
281 subsequent attempts to log.
283 This filter only affects logging messages that repeat more than a
284 threshold number of times from functions that are tagged with the
285 :code:`@logging_utils.squelched_logging_ok` decorator (see above);
286 all others are ignored.
288 This functionality is enabled by default but can be disabled via
289 the :code:`--no_logging_squelch_repeats` commandline flag.
292 def __init__(self) -> None:
294 self.counters: collections.Counter = collections.Counter()
297 def filter(self, record: logging.LogRecord) -> bool:
298 """Should we drop this log message?"""
299 id1 = f"{record.module}:{record.funcName}"
300 if id1 not in squelched_logging_counts:
302 threshold = squelched_logging_counts[id1]
303 logsite = f"{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}"
304 count = self.counters[logsite]
305 self.counters[logsite] += 1
306 return count < threshold
309 class DynamicPerScopeLoggingLevelFilter(logging.Filter):
310 """This filter only allows logging messages from an allow list of
311 module names or `module:function` names. Blocks all others. This
312 filter is used to implement the :code:`--lmodule` commandline option.
316 You probably don't need to use this directly, just use
317 :code:`--lmodule`. For example, to set logging level to INFO
318 everywhere except "module:function" where it should be DEBUG::
320 # myprogram.py --logging_level=INFO --lmodule=module:function=DEBUG
325 def level_name_to_level(name: str) -> int:
326 """Given a level name, return its numberic value."""
327 numeric_level = getattr(logging, name, None)
328 if not isinstance(numeric_level, int):
329 raise ValueError(f"Invalid level: {name}")
334 default_logging_level: int,
335 per_scope_logging_levels: Optional[str],
337 """Construct the Filter.
340 default_logging_level: the logging level of the whole program
341 per_scope_logging_levels: optional, comma separated overrides of
342 logging level per scope of the format scope=level where
343 scope is of the form "module:function" or ":function" and
344 level is one of NOTSET, DEBUG, INFO, WARNING, ERROR or
348 self.valid_levels = set(
349 ["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
351 self.default_logging_level = default_logging_level
352 self.level_by_scope = {}
353 if per_scope_logging_levels is not None:
354 for chunk in per_scope_logging_levels.split(","):
357 f'Malformed lmodule directive: "{chunk}", missing "=". Ignored.',
362 (scope, level) = chunk.split("=")
365 f'Malformed lmodule directive: "{chunk}". Ignored.',
369 scope = scope.strip()
370 level = level.strip().upper()
371 if level not in self.valid_levels:
373 f'Malformed lmodule directive: "{chunk}", bad level. Ignored.',
379 ] = DynamicPerScopeLoggingLevelFilter.level_name_to_level(level)
382 def filter(self, record: logging.LogRecord) -> bool:
383 """Decides whether or not to log based on an allow list."""
385 # First try to find a logging level by scope (--lmodule)
386 if len(self.level_by_scope) > 0:
390 f"{record.module}:{record.funcName}",
391 f":{record.funcName}",
393 level = self.level_by_scope.get(scope, None)
394 if level is not None:
395 if min_level is None or level < min_level:
398 # If we found one, use it instead of the global default level.
399 if min_level is not None:
400 return record.levelno >= min_level
402 # Otherwise, use the global logging level (--logging_level)
403 return record.levelno >= self.default_logging_level
406 # A map from function_identifier -> probability of logging (0.0%..100.0%)
407 probabilistic_logging_levels: Dict[str, float] = {}
410 def logging_is_probabilistic(probability_of_logging: float) -> Callable:
411 """A decorator that indicates that all logging statements within the
412 scope of a particular (marked via decorator) function are not
413 deterministic (i.e. they do not always unconditionally log) but rather
414 are probabilistic (i.e. they log N% of the time, randomly) when the
415 user passes the :code:`--logging_probabilistically` commandline flag
416 (which is enabled by default).
420 This affects *ALL* logging statements within the marked function.
421 If you want it to only affect a subset of logging statements,
422 log those statements in a separate function that you invoke
423 from within the "too large" scope and mark that separate function
424 with the :code:`logging_is_probabilistic` decorator instead.
426 That this functionality can be disabled (forcing all logged
427 messages to produce output) via the
428 :code:`--no_logging_probabilistically` cmdline argument.
431 def probabilistic_logging_wrapper(f: Callable):
432 from pyutils import function_utils
434 identifier = function_utils.function_identifier(f)
435 probabilistic_logging_levels[identifier] = probability_of_logging
438 return probabilistic_logging_wrapper
441 class ProbabilisticFilter(logging.Filter):
443 A filter that logs messages probabilistically (i.e. randomly at some
444 percent chance). This filter is used with a decorator (see
445 :meth:`logging_is_probabilistic`) to implement the
446 :code:`--logging_probabilistically` commandline flag.
448 This filter only affects logging messages from functions that have
449 been tagged with the `@logging_utils.probabilistic_logging` decorator.
453 def filter(self, record: logging.LogRecord) -> bool:
454 """Should the message be logged?"""
455 identifier = f"{record.module}:{record.funcName}"
456 threshold = probabilistic_logging_levels.get(identifier, 100.0)
457 return misc_utils.execute_probabilistically(threshold)
460 class OnlyInfoFilter(logging.Filter):
461 """A filter that only logs messages produced at the INFO logging
462 level. This is used by the :code:`--logging_info_is_print`
463 commandline option to select a subset of the logging stream to
464 send to a stdout handler.
468 def filter(self, record: logging.LogRecord):
469 return record.levelno == logging.INFO
472 class MillisecondAwareFormatter(logging.Formatter):
474 A formatter for adding milliseconds to log messages which, for
475 whatever reason, the default Python logger doesn't do.
479 You probably don't need to use this directly but it is
480 wired in under :meth:initialize_logging so that the
481 timestamps in log messages have millisecond level
485 converter = datetime.datetime.fromtimestamp # type: ignore
488 def formatTime(self, record, datefmt=None):
489 ct = MillisecondAwareFormatter.converter(
490 record.created, pytz.timezone("US/Pacific")
493 s = ct.strftime(datefmt)
495 t = ct.strftime("%Y-%m-%d %H:%M:%S")
496 s = f"{t},{record.msecs:%03d}"
500 def _log_about_logging(
502 default_logging_level,
503 preexisting_handlers_count,
507 """This is invoked automatically after logging is initialized such
508 that the first messages in the log are about how logging itself
511 level_name = logging._levelToName.get(
512 default_logging_level, str(default_logging_level)
514 logger.debug("Initialized global logging; logging level is %s.", level_name)
516 config.config["logging_clear_preexisting_handlers"]
517 and preexisting_handlers_count > 0
520 "Logging cleared %d global handlers (--logging_clear_preexisting_handlers)",
521 preexisting_handlers_count,
523 logger.debug('Logging format specification is "%s"', fmt)
524 if config.config["logging_debug_threads"]:
526 "...Logging format spec captures tid/pid. (--logging_debug_threads)"
528 if config.config["logging_debug_modules"]:
530 "...Logging format spec captures files/functions/lineno. (--logging_debug_modules)"
532 if config.config["logging_syslog"]:
534 "Logging to syslog as %s with priority mapping based on level. (--logging_syslog)",
537 if config.config["logging_filename"]:
539 'Logging to file "%s". (--logging_filename)',
540 config.config["logging_filename"],
543 "...with %d bytes max file size. (--logging_filename_maxsize)",
544 config.config["logging_filename_maxsize"],
547 "...and %d rotating backup file count. (--logging_filename_count)",
548 config.config["logging_filename_count"],
550 if config.config["logging_console"]:
551 logger.debug("Logging to the console (stderr). (--logging_console)")
552 if config.config["logging_info_is_print"]:
554 "Logging logger.info messages will be repeated on stdout. (--logging_info_is_print)"
556 if config.config["logging_squelch_repeats"]:
558 "Logging code allowed to request repeated messages be squelched. (--logging_squelch_repeats)"
562 "Logging code forbidden to request messages be squelched; all messages logged. (--no_logging_squelch_repeats)"
564 if config.config["logging_probabilistically"]:
566 "Logging code is allowed to request probabilistic logging. (--logging_probabilistically)"
570 "Logging code is forbidden to request probabilistic logging; messages always logged. (--no_logging_probabilistically)"
572 if config.config["lmodule"]:
574 f'Logging dynamic per-module logging enabled. (--lmodule={config.config["lmodule"]})'
576 if config.config["logging_captures_prints"]:
578 "Logging will capture printed data as logger.info messages. (--logging_captures_prints)"
582 def initialize_logging(logger=None) -> logging.Logger:
583 """Initialize logging for the program. See module level comments
584 for information about what functionality this provides and how to
585 enable or disable functionality via the commandline.
588 :meth:`bootstrap.initialize` decorator on your program's entry point,
589 it will call this for you. See :meth:`pyutils.bootstrap.initialize`
592 global LOGGING_INITIALIZED
593 if LOGGING_INITIALIZED:
594 return logging.getLogger()
595 LOGGING_INITIALIZED = True
598 logger = logging.getLogger()
600 # --logging_clear_preexisting_handlers removes logging handlers
601 # that were registered by global statements during imported module
603 preexisting_handlers_count = 0
604 assert config.has_been_parsed()
605 if config.config["logging_clear_preexisting_handlers"]:
606 while logger.hasHandlers():
607 logger.removeHandler(logger.handlers[0])
608 preexisting_handlers_count += 1
610 # --logging_config_file pulls logging settings from a config file
611 # skipping the rest of this setup.
612 if config.config["logging_config_file"] is not None:
613 fileConfig(config.config["logging_config_file"])
616 handlers: List[logging.Handler] = []
617 handler: Optional[logging.Handler] = None
619 # Global default logging level (--logging_level); messages below
620 # this level will be silenced.
621 logging_level = config.config["logging_level"]
623 logging_level = logging_level.upper()
624 default_logging_level = getattr(logging, logging_level, None)
625 if not isinstance(default_logging_level, int):
626 raise ValueError(f'Invalid level: {config.config["logging_level"]}')
628 # Custom or default --logging_format?
629 if config.config["logging_format"]:
630 fmt = config.config["logging_format"]
631 elif config.config["logging_syslog"]:
632 fmt = "%(levelname).1s:%(filename)s[%(process)d]: %(message)s"
634 fmt = "%(levelname).1s:%(asctime)s: %(message)s"
636 # --logging_debug_threads and --logging_debug_modules both affect
637 # the format by prepending information about the pid/tid or
639 if config.config["logging_debug_threads"]:
640 fmt = f"%(process)d.%(thread)d|{fmt}"
641 if config.config["logging_debug_modules"]:
642 fmt = f"%(filename)s:%(funcName)s:%(lineno)s|{fmt}"
644 # --logging_syslog (optionally with --logging_syslog_facility)
645 # sets up for logging to use the standard system syslogd as a
648 if config.config["logging_syslog"]:
649 if sys.platform not in ("win32", "cygwin"):
650 if config.config["logging_syslog_facility"]:
651 facility_name = "LOG_" + config.config["logging_syslog_facility"]
652 facility = SysLogHandler.__dict__.get(facility_name, SysLogHandler.LOG_USER) # type: ignore
653 assert facility is not None
654 handler = SysLogHandler(facility=facility, address="/dev/log")
655 handler.setFormatter(
656 MillisecondAwareFormatter(
658 datefmt=config.config["logging_date_format"],
661 handlers.append(handler)
663 # --logging_filename (with friends --logging_filename_count and
664 # --logging_filename_maxsize) set up logging to a file on the
665 # filesystem with automatic rotation when it gets too big.
666 if config.config["logging_filename"]:
667 max_bytes = config.config["logging_filename_maxsize"]
668 assert max_bytes and isinstance(max_bytes, int)
669 backup_count = config.config["logging_filename_count"]
670 assert backup_count and isinstance(backup_count, int)
671 handler = RotatingFileHandler(
672 config.config["logging_filename"],
674 backupCount=backup_count,
676 handler.setFormatter(
677 MillisecondAwareFormatter(
679 datefmt=config.config["logging_date_format"],
682 handlers.append(handler)
684 # --logging_console is, ahem, logging to the console.
685 if config.config["logging_console"]:
686 handler = logging.StreamHandler(sys.stderr)
687 handler.setFormatter(
688 MillisecondAwareFormatter(
690 datefmt=config.config["logging_date_format"],
693 handlers.append(handler)
695 if len(handlers) == 0:
696 handlers.append(logging.NullHandler())
697 for handler in handlers:
698 logger.addHandler(handler)
700 # --logging_info_is_print echoes any message to logger.info(x) as
701 # a print statement on stdout.
702 if config.config["logging_info_is_print"]:
703 handler = logging.StreamHandler(sys.stdout)
704 handler.addFilter(OnlyInfoFilter())
705 logger.addHandler(handler)
707 # --logging_squelch_repeats allows code to request repeat logging
708 # messages (identical log site and message contents) to be
709 # silenced. Logging code must request this explicitly, it isn't
710 # automatic. This option just allows the silencing to happen.
711 if config.config["logging_squelch_repeats"]:
712 for handler in handlers:
713 handler.addFilter(SquelchRepeatedMessagesFilter())
715 # --logging_probabilistically allows code to request
716 # non-deterministic logging where messages have some probability
717 # of being produced. Logging code must request this explicitly.
718 # This option just allows the non-deterministic behavior to
719 # happen. Disabling it will cause every log message to be
721 if config.config["logging_probabilistically"]:
722 for handler in handlers:
723 handler.addFilter(ProbabilisticFilter())
725 # --lmodule is a way to have a special logging level for just on
726 # module or one set of modules that is different than the one set
727 # globally via --logging_level.
728 for handler in handlers:
730 DynamicPerScopeLoggingLevelFilter(
731 default_logging_level,
732 config.config["lmodule"],
736 logger.propagate = False
738 # --logging_captures_prints, if set, will capture and log.info
739 # anything printed on stdout.
740 if config.config["logging_captures_prints"]:
743 def print_and_also_log(*arg, **kwarg):
744 f = kwarg.get("file", None)
749 BUILT_IN_PRINT(*arg, **kwarg)
751 builtins.print = print_and_also_log
753 # At this point the logger is ready, handlers are set up,
754 # etc... so log about the logging configuration.
757 default_logging_level,
758 preexisting_handlers_count,
765 def get_logger(name: str = ""):
766 """Get the global logger"""
767 logger = logging.getLogger(name)
768 return initialize_logging(logger)
771 def tprint(*args, **kwargs) -> None:
772 """Legacy function for printing a message augmented with thread id
773 still needed by some code. Please use --logging_debug_threads in
776 if config.config["logging_debug_threads"]:
777 from pyutils.parallelize.thread_utils import current_thread_id
779 print(f"{current_thread_id()}", end="")
780 print(*args, **kwargs)
785 class OutputMultiplexer(object):
786 """A class that broadcasts printed messages to several sinks
787 (including various logging levels, different files, different file
788 handles, the house log, etc...). See also
789 :class:`OutputMultiplexerContext` for an easy usage pattern.
792 class Destination(enum.IntEnum):
793 """Bits in the destination_bitv bitvector. Used to indicate the
794 output destination."""
799 LOG_WARNING = 0x04 # ⎬ Must provide logger to the c'tor.
801 LOG_CRITICAL = 0x10 # ⎭
802 FILENAMES = 0x20 # Must provide a filename to the c'tor.
803 FILEHANDLES = 0x40 # Must provide a handle to the c'tor.
805 ALL_LOG_DESTINATIONS = (
806 LOG_DEBUG | LOG_INFO | LOG_WARNING | LOG_ERROR | LOG_CRITICAL
808 ALL_OUTPUT_DESTINATIONS = 0x8F
813 destination_bitv: int,
816 filenames: Optional[Iterable[str]] = None,
817 handles: Optional[Iterable[io.TextIOWrapper]] = None,
820 Constructs the OutputMultiplexer instance.
823 destination_bitv: a bitvector where each bit represents an
824 output destination. Multiple bits may be set.
825 logger: if LOG_* bits are set, you must pass a logger here.
826 filenames: if FILENAMES bit is set, this should be a list of
827 files you'd like to output into. This code handles opening
828 and closing said files.
829 handles: if FILEHANDLES bit is set, this should be a list of
830 already opened filehandles you'd like to output into. The
831 handles will remain open after the scope of the multiplexer.
834 logger = logging.getLogger(None)
837 self.f: Optional[List[Any]] = None
838 if filenames is not None:
839 self.f = [open(filename, "wb", buffering=0) for filename in filenames]
841 if destination_bitv & OutputMultiplexer.Destination.FILENAMES:
842 raise ValueError("Filenames argument is required if bitv & FILENAMES")
845 self.h: Optional[List[Any]] = None
846 if handles is not None:
847 self.h = list(handles)
849 if destination_bitv & OutputMultiplexer.Destination.FILEHANDLES:
850 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
853 self.set_destination_bitv(destination_bitv)
855 def get_destination_bitv(self):
856 """Where are we outputting?"""
857 return self.destination_bitv
859 def set_destination_bitv(self, destination_bitv: int):
860 """Change the output destination_bitv to the one provided."""
861 if destination_bitv & self.Destination.FILENAMES and self.f is None:
862 raise ValueError("Filename argument is required if bitv & FILENAMES")
863 if destination_bitv & self.Destination.FILEHANDLES and self.h is None:
864 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
865 self.destination_bitv = destination_bitv
867 def print(self, *args, **kwargs):
868 """Produce some output to all sinks."""
869 from pyutils.string_utils import _sprintf, strip_escape_sequences
871 end = kwargs.pop("end", None)
873 if not isinstance(end, str):
874 raise TypeError("end must be None or a string")
875 sep = kwargs.pop("sep", None)
877 if not isinstance(sep, str):
878 raise TypeError("sep must be None or a string")
880 raise TypeError("invalid keyword arguments to print()")
881 buf = _sprintf(*args, end="", sep=sep)
888 if self.destination_bitv & self.Destination.FILENAMES and self.f is not None:
890 _.write(buf.encode("utf-8"))
893 if self.destination_bitv & self.Destination.FILEHANDLES and self.h is not None:
898 buf = strip_escape_sequences(buf)
899 if self.logger is not None:
900 if self.destination_bitv & self.Destination.LOG_DEBUG:
901 self.logger.debug(buf)
902 if self.destination_bitv & self.Destination.LOG_INFO:
903 self.logger.info(buf)
904 if self.destination_bitv & self.Destination.LOG_WARNING:
905 self.logger.warning(buf)
906 if self.destination_bitv & self.Destination.LOG_ERROR:
907 self.logger.error(buf)
908 if self.destination_bitv & self.Destination.LOG_CRITICAL:
909 self.logger.critical(buf)
910 if self.destination_bitv & self.Destination.HLOG:
914 """Close all open files."""
915 if self.f is not None:
920 class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator):
922 A context that uses an :class:`OutputMultiplexer`. e.g.::
924 with OutputMultiplexerContext(
925 OutputMultiplexer.LOG_INFO |
926 OutputMultiplexer.LOG_DEBUG |
927 OutputMultiplexer.FILENAMES |
928 OutputMultiplexer.FILEHANDLES,
929 filenames = [ '/tmp/foo.log', '/var/log/bar.log' ],
932 mplex.print("This is a log message!")
937 destination_bitv: OutputMultiplexer.Destination,
945 destination_bitv: a bitvector that indicates where we should
946 send output. See :class:`OutputMultiplexer` for options.
947 logger: optional logger to use for log destination messages.
948 filenames: optional filenames to write for filename destination
950 handles: optional open filehandles to write for filehandle
951 destination messages.
963 def __exit__(self, etype, value, traceback) -> bool:
965 if etype is not None:
970 def hlog(message: str) -> None:
971 """Write a message to the house log (syslog facility local7 priority
972 info) by calling `/usr/bin/logger`. This is pretty hacky but used
973 by a bunch of (my) code. Another way to do this would be to use
974 :code:`--logging_syslog` and :code:`--logging_syslog_facility` but
975 I can't actually say that's easier.
977 TODO: this needs to move.
979 message = message.replace("'", "'\"'\"'")
980 os.system(f"/usr/bin/logger -p local7.info -- '{message}'")
983 if __name__ == "__main__":