2 # -*- coding: utf-8 -*-
4 # © Copyright 2021-2023, Scott Gasch
7 This is a module that offers an opinionated take on how whole program
8 logging should be initialized and controlled. It uses the standard
9 Python :mod:`logging` but gives you control, via commandline config,
12 * Set the logging default level (debug, info, warning, error, critical)
13 of the whole program (see: :code:`--logging_level`)... and to override
14 the logging level with :class:`LoggingContext`.
15 * Prepend or append a message to every log record also with
16 :class:`LoggingContext`.
17 * define the logging message format (see :code:`--logging_format` and
18 :code:`--logging_date_format`) including easily adding a PID/TID
19 marker on all messages to help with multithreaded debugging
20 (:code:`--logging_debug_threads`) and force module names of code
21 that emits log messages to be included in the format
22 (:code:`--logging_debug_modules`),
23 * control the destination of logged messages:
25 - log to the console/stderr (:code:`--logging_console`) and/or
26 - log to a rotated file (:code:`--logging_filename`,
27 :code:`--logging_filename_maxsize` and :code:`--logging_filename_count`)
29 - log to the UNIX syslog (:code:`--logging_syslog` and
30 :code:`--logging_syslog_facility`)
32 * optionally squelch repeated messages (:code:`--logging_squelch_repeats`),
33 * optionally log probalistically (:code:`--logging_probabilistically`),
34 * capture printed messages into the info log (:code:`--logging_captures_prints`),
35 * and optionally clear unwanted logging handlers added by other imports
36 before this one (:code:`--logging_clear_preexisting_handlers`).
38 To use this functionality, call :meth:`initialize_logging` early
39 in your program entry point. If you use the
40 :meth:`pyutils.bootstrap.initialize` decorator on your program's entry
41 point, it will call this for you automatically.
52 from logging.config import fileConfig
53 from logging.handlers import RotatingFileHandler, SysLogHandler
54 from typing import Any, Callable, Dict, Iterable, List, Optional, Union
57 from overrides import overrides
59 # This module is commonly used by others in here and should avoid
60 # taking any unnecessary dependencies back on them.
61 from pyutils import argparse_utils, config, misc_utils
63 cfg = config.add_commandline_args(f"Logging ({__file__})", "Args related to logging")
65 "--logging_config_file",
66 type=argparse_utils.valid_filename,
69 help="Config file containing the logging setup, see: https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial",
75 choices=["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
77 help="The global default level below which to squelch log messages",
83 help="The format for lines logged via the logger module. See: https://docs.python.org/3/library/logging.html#formatter-objects",
86 "--logging_date_format",
88 default="%Y/%m/%dT%H:%M:%S.%f%z",
90 help="The format of any dates in --logging_format.",
94 action=argparse_utils.ActionNoYes,
96 help="Should we log to the console (stderr)",
103 help="The filename of the logfile to write.",
106 "--logging_filename_maxsize",
108 default=(1024 * 1024),
110 help="The maximum size (in bytes) to write to the logging_filename.",
113 "--logging_filename_count",
117 help="The number of logging_filename copies to keep before deleting.",
121 action=argparse_utils.ActionNoYes,
123 help="Should we log to localhost's syslog.",
126 "--logging_syslog_facility",
152 metavar="SYSLOG_FACILITY_LIST",
153 help="The default syslog message facility identifier",
156 "--logging_debug_threads",
157 action=argparse_utils.ActionNoYes,
159 help="Should we prepend pid/tid data to all log messages?",
162 "--logging_debug_modules",
163 action=argparse_utils.ActionNoYes,
165 help="Should we prepend module/function data to all log messages?",
168 "--logging_info_is_print",
169 action=argparse_utils.ActionNoYes,
171 help="logging.info also prints to stdout.",
174 "--logging_squelch_repeats",
175 action=argparse_utils.ActionNoYes,
177 help="Do we allow code to indicate that it wants to squelch repeated logging messages or should we always log?",
180 "--logging_probabilistically",
181 action=argparse_utils.ActionNoYes,
183 help="Do we allow probabilistic logging (for code that wants it) or should we always log?",
185 # See also: OutputMultiplexer
187 "--logging_captures_prints",
188 action=argparse_utils.ActionNoYes,
190 help="When calling print, also log.info automatically.",
193 "--logging_clear_preexisting_handlers",
194 action=argparse_utils.ActionNoYes,
197 "Should logging code clear preexisting global logging handlers and thus insist that is "
198 + "alone can add handlers. Use this to work around annoying modules that insert global "
199 + "handlers with formats and logging levels you might now want. Caveat emptor, this may "
200 + "cause you to miss logging messages."
204 BUILT_IN_PRINT = print
205 LOGGING_INITIALIZED = False
208 # A map from logging_callsite_id -> count of logged messages.
209 squelched_logging_counts: Dict[str, int] = {}
212 def squelch_repeated_log_messages(squelch_after_n_repeats: int) -> Callable:
214 A decorator that marks a function as interested in having the logging
215 messages that it produces be squelched (ignored) after it logs the
216 same message more than N times.
220 This decorator affects *ALL* logging messages produced
221 within the decorated function. That said, messages must be
222 identical in order to be squelched. For example, if the same line
223 of code produces different messages (because of, e.g., a format
224 string), the messages are considered to be different.
226 An example of this from the pyutils code itself can be found in
227 :meth:`pyutils.ansi.fg` and :meth:`pyutils.ansi.bg` methods::
229 @logging_utils.squelch_repeated_log_messages(1)
231 name: Optional[str] = "",
232 red: Optional[int] = None,
233 green: Optional[int] = None,
234 blue: Optional[int] = None,
236 force_16color: bool = False,
237 force_216color: bool = False,
241 These methods log stuff like "Using 24-bit color strategy" which
242 gets old really fast and fills up the logs. By decorating the methods
243 with :code:`@logging_utils.squelch_repeated_log_messages(1)` the code
244 is requesting that its logged messages be dropped silently after the
245 first one is produced (note the argument 1).
247 Users can insist that all logged messages always be reflected in the
248 logs using the :code:`--no_logging_squelch_repeats` flag but the default
249 behavior is to allow code to request it be squelched.
251 :code:`--logging_squelch_repeats` only affects code with this decorator
252 on it; it ignores all other code.
255 squelch_after_n_repeats: the number of repeated messages allowed to
256 log before subsequent messages are silently dropped.
259 def squelch_logging_wrapper(f: Callable):
260 from pyutils import function_utils
262 identifier = function_utils.function_identifier(f)
263 squelched_logging_counts[identifier] = squelch_after_n_repeats
266 return squelch_logging_wrapper
269 class SquelchRepeatedMessagesFilter(logging.Filter):
270 """A filter that only logs messages from a given site with the same
271 (exact) message at the same logging level N times and ignores
272 subsequent attempts to log.
274 This filter only affects logging messages that repeat more than a
275 threshold number of times from functions that are tagged with the
276 :code:`@logging_utils.squelched_logging_ok` decorator (see above);
277 all others are ignored.
279 This functionality is enabled by default but can be disabled via
280 the :code:`--no_logging_squelch_repeats` commandline flag.
283 def __init__(self) -> None:
285 self.counters: collections.Counter = collections.Counter()
288 def filter(self, record: logging.LogRecord) -> bool:
289 """Should we drop this log message?"""
290 id1 = f"{record.module}:{record.funcName}"
291 if id1 not in squelched_logging_counts:
293 threshold = squelched_logging_counts[id1]
294 logsite = f"{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}"
295 count = self.counters[logsite]
296 self.counters[logsite] += 1
297 return count < threshold
300 # A map from function_identifier -> probability of logging (0.0%..100.0%)
301 probabilistic_logging_levels: Dict[str, float] = {}
304 def logging_is_probabilistic(probability_of_logging: float) -> Callable:
305 """A decorator that indicates that all logging statements within the
306 scope of a particular (marked via decorator) function are not
307 deterministic (i.e. they do not always unconditionally log) but rather
308 are probabilistic (i.e. they log N% of the time, randomly) when the
309 user passes the :code:`--logging_probabilistically` commandline flag
310 (which is enabled by default).
314 This affects *ALL* logging statements within the marked function.
315 If you want it to only affect a subset of logging statements,
316 log those statements in a separate function that you invoke
317 from within the "too large" scope and mark that separate function
318 with the :code:`logging_is_probabilistic` decorator instead.
320 That this functionality can be disabled (forcing all logged
321 messages to produce output) via the
322 :code:`--no_logging_probabilistically` cmdline argument.
325 def probabilistic_logging_wrapper(f: Callable):
326 from pyutils import function_utils
328 identifier = function_utils.function_identifier(f)
329 probabilistic_logging_levels[identifier] = probability_of_logging
332 return probabilistic_logging_wrapper
335 class ProbabilisticFilter(logging.Filter):
337 A filter that logs messages probabilistically (i.e. randomly at some
338 percent chance). This filter is used with a decorator (see
339 :meth:`logging_is_probabilistic`) to implement the
340 :code:`--logging_probabilistically` commandline flag.
342 This filter only affects logging messages from functions that have
343 been tagged with the `@logging_utils.probabilistic_logging` decorator.
347 def filter(self, record: logging.LogRecord) -> bool:
348 """Should the message be logged?"""
349 identifier = f"{record.module}:{record.funcName}"
350 threshold = probabilistic_logging_levels.get(identifier, 100.0)
351 return misc_utils.execute_probabilistically(threshold)
354 class OnlyInfoFilter(logging.Filter):
355 """A filter that only logs messages produced at the INFO logging
356 level. This is used by the :code:`--logging_info_is_print`
357 commandline option to select a subset of the logging stream to
358 send to a stdout handler.
362 def filter(self, record: logging.LogRecord):
363 return record.levelno == logging.INFO
366 class PrefixAddingFilter(logging.Filter):
367 def __init__(self, prefix: str, klobber: bool = False):
373 self.klobber = klobber
375 def filter(self, record: logging.LogRecord):
377 record.prefix = self.prefix
378 elif "prefix" not in record.__dict__:
379 record.prefix = self.prefix
383 class SuffixAddingFilter(logging.Filter):
384 def __init__(self, suffix: str, klobber: bool = False):
390 self.klobber = klobber
392 def filter(self, record: logging.LogRecord):
394 record.suffix = self.suffix
395 elif "suffix" not in record.__dict__:
396 record.suffix = self.suffix
400 class PrependingLogAdapter(logging.LoggerAdapter):
401 def process(self, msg, kwargs):
402 return f'{self.extra.get("prefix", "")}{msg}', kwargs
405 def wrap_logger(prefix: str, logger: logging.Logger) -> logging.LoggerAdapter:
406 """Helper method around the creation of a LogAdapter that prepends
407 a given string to every log message produced.
410 prefix: the message to prepend to every log message.
411 logger: the logger whose messages to modify.
414 A new logger wrapping the old one with the given behavior.
415 The old logger will continue to behave as usual; simply drop
416 the reference to this wrapper when it's no longer needed.
419 return PrependingLogAdapter(logger, {"prefix": prefix})
422 class AppendingLogAdapter(logging.LoggerAdapter):
423 def process(self, msg, kwargs):
424 return f'{msg}{self.extra.get("suffix", "")}', kwargs
427 def wrap_logger(suffix: str, logger: logging.Logger) -> logging.LoggerAdapter:
428 """Helper method around the creation of a LoggerAdapter that appends
429 a given string to every log message produced.
432 suffix: the message to prepend to every log message.
433 logger: the logger whose messages to modify.
436 A new logger wrapping the old one with the given behavior.
437 The old logger will continue to behave as usual; simply drop
438 the reference to this wrapper when it's no longer needed.
441 return AppendingLogAdapter(logger, {"suffix": suffix})
444 class LoggingContext(contextlib.ContextDecorator):
447 logger: logging.Logger,
449 level: Optional[int] = None,
450 handlers: Optional[List[logging.Handler]] = None,
451 prefix: Optional[str] = None,
452 suffix: Optional[str] = None,
454 """This is a logging context that can be used to temporarily change the
455 way we are logging within its scope. Logging changes may include:
457 - Changing the logging level (e.g. from INFO to DEBUG)
458 - Adding a prefix or suffix to every log message produced
459 - Adding temporary Handlers to direct the logging output elsewhere
461 Setup for doctest / examples. This will normally be taken care of
462 by code in :meth:`initialize_logging` so you don't have to worry
465 >>> logging_format = "%(prefix)s%(message)s%(suffix)s"
466 >>> logger = logging.getLogger(__name__)
467 >>> logger.setLevel(logging.INFO)
468 >>> handler = logging.StreamHandler(sys.stdout)
469 >>> handler.setFormatter(
470 ... MillisecondAwareFormatter(
471 ... fmt=logging_format,
475 >>> logger.addHandler(handler)
476 >>> logger.addFilter(PrefixAddingFilter(None))
477 >>> logger.addFilter(SuffixAddingFilter(None))
479 First, this logger should be currently be configured to send
480 INFO+ messages to sys.stdout. Let's see it in action:
482 >>> logger.info("Hello world!")
484 >>> logger.debug("You should not see this")
486 The first example is to simply change the level of the logger.
487 Here we temporarily change it to DEBUG within the body of the
488 :class:`LoggingContext`:
490 >>> with LoggingContext(logger, level=logging.DEBUG):
491 ... logger.debug("You should see this now")
492 ... logger.info("Of course you should still see this too")
493 You should see this now
494 Of course you should still see this too
496 >>> logger.debug("Outside of the context we are at INFO again")
497 >>> logger.debug("(which is why you don't see these)")
498 >>> logger.info("But you should see this at INFO level")
499 But you should see this at INFO level
501 The prefix and suffix argument prepend or append a message to
502 all log output. To do this, you need %(prefix)s and
503 %(suffix)s placeholders in your logger format string
504 indicating where to insert the data. This is useful, for
505 example, to add an active request identifier to the set of log
506 messages produced while processing it.
508 >>> logger.info("About to work on a new request")
509 About to work on a new request
511 >>> with LoggingContext(logger, prefix='10.0.0.13> '):
512 ... logger.info("Working on it now")
513 10.0.0.13> Working on it now
515 logger.info("Done with that request")
516 Done with that request
518 LoggingContext can also be used to add temporary handler(s).
519 This code temporarily uses two stdout handlers to double the
520 output for testing purporses but you could also temporarily,
521 e.g., add a RotatingFileHandler or SysLogHandler etc...
523 >>> with LoggingContext(logger, handlers=[logging.StreamHandler(sys.stdout)]):
524 ... logger.info("TEST")
528 Once leaving the context, logger's behavior is restored. In
529 this case, the extra handler is removed so output will not
532 >>> logger.info("OUTSIDE")
535 LoggingContext can also be used as a decorator if that is more
538 >>> @LoggingContext(logger, level=logging.DEBUG)
539 ... def log_stuff(logger):
540 ... logger.debug("But inside, the decorator has changed us to DEBUG")
542 >>> logger.debug("Outside, we're at INFO level and you don't see this")
543 >>> log_stuff(logger)
544 But inside, the decorator has changed us to DEBUG
545 >>> logger.debug("And, of course, out here we're still at INFO afterwards")
550 self.old_level: Optional[int] = None
551 self.handlers = handlers
553 self.prefix: Optional[logging.Filter] = PrefixAddingFilter(prefix, True)
557 self.suffix: Optional[logging.Filter] = SuffixAddingFilter(suffix, True)
561 def __enter__(self) -> Union[logging.Logger, logging.LoggerAdapter]:
564 self.old_level = self.logger.level
565 self.logger.setLevel(self.level)
568 for handler in self.handlers:
569 self.logger.addHandler(handler)
572 self.logger.addFilter(self.prefix)
575 self.logger.addFilter(self.suffix)
578 def __exit__(self, et, ev, tb) -> None:
580 self.logger.removeFilter(self.suffix)
583 self.logger.removeFilter(self.prefix)
586 for handler in self.handlers:
587 self.logger.removeHandler(handler)
591 self.logger.setLevel(self.old_level)
592 return None # propagate exceptions out
595 class MillisecondAwareFormatter(logging.Formatter):
596 """A formatter for adding milliseconds to log messages which, for
597 whatever reason, the default Python logger doesn't do.
601 You probably don't need to use this directly but it is
602 wired in under :meth:`initialize_logging` so that the
603 timestamps in log messages have millisecond level
608 converter = datetime.datetime.fromtimestamp # type: ignore
611 def formatTime(self, record, datefmt=None):
612 ct = MillisecondAwareFormatter.converter(
613 record.created, pytz.timezone("US/Pacific")
616 s = ct.strftime(datefmt)
618 t = ct.strftime("%Y-%m-%d %H:%M:%S")
619 s = f"{t},{record.msecs:%03d}"
623 def _log_about_logging(
625 default_logging_level,
626 preexisting_handlers_count,
630 """This is invoked automatically after logging is initialized such
631 that the first messages in the log are about how logging itself
634 level_name = logging._levelToName.get(
635 default_logging_level, str(default_logging_level)
637 logger.debug("Initialized global logging; logging level is %s.", level_name)
639 config.config["logging_clear_preexisting_handlers"]
640 and preexisting_handlers_count > 0
643 "Logging cleared %d global handlers (--logging_clear_preexisting_handlers)",
644 preexisting_handlers_count,
646 logger.debug('Logging format specification is "%s"', fmt)
647 if config.config["logging_debug_threads"]:
649 "...Logging format spec captures tid/pid. (--logging_debug_threads)"
651 if config.config["logging_debug_modules"]:
653 "...Logging format spec captures files/functions/lineno. (--logging_debug_modules)"
655 if config.config["logging_syslog"]:
657 "Logging to syslog as %s with priority mapping based on level. (--logging_syslog)",
660 if config.config["logging_filename"]:
662 'Logging to file "%s". (--logging_filename)',
663 config.config["logging_filename"],
666 "...with %d bytes max file size. (--logging_filename_maxsize)",
667 config.config["logging_filename_maxsize"],
670 "...and %d rotating backup file count. (--logging_filename_count)",
671 config.config["logging_filename_count"],
673 if config.config["logging_console"]:
674 logger.debug("Logging to the console (stderr). (--logging_console)")
675 if config.config["logging_info_is_print"]:
677 "Logging logger.info messages will be repeated on stdout. (--logging_info_is_print)"
679 if config.config["logging_squelch_repeats"]:
681 "Logging code allowed to request repeated messages be squelched. (--logging_squelch_repeats)"
685 "Logging code forbidden to request messages be squelched; all messages logged. (--no_logging_squelch_repeats)"
687 if config.config["logging_probabilistically"]:
689 "Logging code is allowed to request probabilistic logging. (--logging_probabilistically)"
693 "Logging code is forbidden to request probabilistic logging; messages always logged. (--no_logging_probabilistically)"
695 if config.config["logging_captures_prints"]:
697 "Logging will capture printed data as logger.info messages. (--logging_captures_prints)"
701 def _construct_logging_format() -> str:
702 # Custom or default --logging_format?
703 if config.config["logging_format"]:
704 fmt = config.config["logging_format"]
705 elif config.config["logging_syslog"]:
706 fmt = "%(levelname).1s:%(filename)s[%(process)d]: "
708 fmt = "%(levelname).1s:%(asctime)s: "
710 fmt += "%(prefix)s%(message)s%(suffix)s"
712 # --logging_debug_threads and --logging_debug_modules both affect
713 # the format by prepending information about the pid/tid or
715 if config.config["logging_debug_threads"]:
716 fmt = f"%(process)d.%(thread)d|{fmt}"
717 if config.config["logging_debug_modules"]:
718 fmt = f"%(filename)s:%(funcName)s:%(lineno)s|{fmt}"
722 def _construct_handlers_list(
723 logging_format: str, facility_name: Optional[str]
724 ) -> List[logging.Handler]:
725 retval: List[logging.Handler] = []
727 # --logging_syslog (optionally with --logging_syslog_facility)
728 # sets up for logging to use the standard system syslogd as a
730 if config.config["logging_syslog"]:
731 if sys.platform not in ("win32", "cygwin"):
733 facility = SysLogHandler.__dict__.get(facility_name, SysLogHandler.LOG_USER)
735 syslog_handler = SysLogHandler(facility=facility, address="/dev/log")
736 syslog_handler.setFormatter(
737 MillisecondAwareFormatter(
739 datefmt=config.config["logging_date_format"],
742 retval.append(syslog_handler)
744 # --logging_filename (with friends --logging_filename_count and
745 # --logging_filename_maxsize) set up logging to a file on the
746 # filesystem with automatic rotation when it gets too big.
747 if config.config["logging_filename"]:
748 max_bytes = config.config["logging_filename_maxsize"]
749 assert max_bytes and isinstance(max_bytes, int)
750 backup_count = config.config["logging_filename_count"]
751 assert backup_count and isinstance(backup_count, int)
752 file_handler = RotatingFileHandler(
753 config.config["logging_filename"],
755 backupCount=backup_count,
757 file_handler.setFormatter(
758 MillisecondAwareFormatter(
760 datefmt=config.config["logging_date_format"],
763 retval.append(file_handler)
765 # --logging_console is, ahem, logging to the console.
766 if config.config["logging_console"]:
767 console_handler = logging.StreamHandler(sys.stderr)
768 console_handler.setFormatter(
769 MillisecondAwareFormatter(
771 datefmt=config.config["logging_date_format"],
774 retval.append(console_handler)
777 retval.append(logging.NullHandler())
781 def _add_logging_handlers_and_filters(
782 logger: logging.Logger, handlers: List[logging.Handler]
785 for handler in handlers:
786 # These two are always present and set %(prefix) and %(suffix)
787 # if appropriate (see :class:`LoggingContext`) or make them
789 handler.addFilter(PrefixAddingFilter(""))
790 handler.addFilter(SuffixAddingFilter(""))
792 # --logging_squelch_repeats allows code to request repeat logging
793 # messages (identical log site and message contents) to be
794 # silenced. Logging code must request this explicitly, it isn't
795 # automatic. This option just allows the silencing to happen.
796 if config.config["logging_squelch_repeats"]:
797 handler.addFilter(SquelchRepeatedMessagesFilter())
799 # --logging_probabilistically allows code to request
800 # non-deterministic logging where messages have some probability
801 # of being produced. Logging code must request this explicitly.
802 # This option just allows the non-deterministic behavior to
803 # happen. Disabling it will cause every log message to be
805 if config.config["logging_probabilistically"]:
806 handler.addFilter(ProbabilisticFilter())
807 logger.addHandler(handler)
809 # --logging_info_is_print echoes any message to logger.info(x) as
810 # a print statement on stdout.
811 if config.config["logging_info_is_print"]:
812 handler = logging.StreamHandler(sys.stdout)
813 handler.addFilter(OnlyInfoFilter())
814 logger.addHandler(handler)
817 def initialize_logging(logger=None) -> logging.Logger:
818 """Initialize logging for the program. See module level comments
819 for information about what functionality this provides and how to
820 enable or disable functionality via the commandline.
823 :meth:`bootstrap.initialize` decorator on your program's entry point,
824 it will call this for you. See :meth:`pyutils.bootstrap.initialize`
827 global LOGGING_INITIALIZED
828 if LOGGING_INITIALIZED:
829 return logging.getLogger()
830 LOGGING_INITIALIZED = True
832 preexisting_handlers_count = 0
834 # --logging_clear_preexisting_handlers removes logging handlers
835 # that were registered by global statements during imported module
837 if config.config["logging_clear_preexisting_handlers"]:
838 logging.basicConfig(force=True)
839 logger = logging.getLogger()
840 while logger.hasHandlers():
841 logger.removeHandler(logger.handlers[0])
842 preexisting_handlers_count += 1
844 logging.basicConfig()
845 logger = logging.getLogger()
847 # --logging_config_file pulls logging settings from a config file
848 # skipping the rest of this setup.
849 if config.config["logging_config_file"] is not None:
850 fileConfig(config.config["logging_config_file"])
853 # Global default logging level (--logging_level); messages below
854 # this level will be silenced.
855 logging_level = config.config["logging_level"]
857 logging_level = logging_level.upper()
858 default_logging_level = getattr(logging, logging_level, None)
859 if not isinstance(default_logging_level, int):
860 raise ValueError(f'Invalid level: {config.config["logging_level"]}')
861 logger.setLevel(default_logging_level)
863 logging_format = _construct_logging_format()
865 if config.config["logging_syslog"]:
866 if sys.platform not in ("win32", "cygwin"):
867 if config.config["logging_syslog_facility"]:
868 facility_name = "LOG_" + config.config["logging_syslog_facility"]
869 handlers: List[logging.Handler] = _construct_handlers_list(
870 logging_format, facility_name
872 _add_logging_handlers_and_filters(logger, handlers)
874 # --logging_captures_prints, if set, will capture and log.info
875 # anything printed on stdout.
876 if config.config["logging_captures_prints"]:
879 def print_and_also_log(*arg, **kwarg):
880 f = kwarg.get("file", None)
885 BUILT_IN_PRINT(*arg, **kwarg)
887 builtins.print = print_and_also_log
889 # At this point the logger is ready, handlers are set up,
890 # etc... so log about the logging configuration.
893 default_logging_level,
894 preexisting_handlers_count,
901 def get_logger(name: str = ""):
902 """Get the global logger"""
903 logger = logging.getLogger(name)
904 return initialize_logging(logger)
907 def tprint(*args, **kwargs) -> None:
908 """Legacy function for printing a message augmented with thread id
909 still needed by some code. Please use --logging_debug_threads in
912 if config.config["logging_debug_threads"]:
913 from pyutils.parallelize.thread_utils import current_thread_id
915 print(f"{current_thread_id()}", end="")
916 print(*args, **kwargs)
921 class OutputMultiplexer(object):
922 """A class that broadcasts printed messages to several sinks
923 (including various logging levels, different files, different file
924 handles, the house log, etc...). See also
925 :class:`OutputMultiplexerContext` for an easy usage pattern.
928 class Destination(enum.IntEnum):
929 """Bits in the destination_bitv bitvector. Used to indicate the
930 output destination."""
935 LOG_WARNING = 0x04 # ⎬ Must provide logger to the c'tor.
937 LOG_CRITICAL = 0x10 # ⎭
938 FILENAMES = 0x20 # Must provide a filename to the c'tor.
939 FILEHANDLES = 0x40 # Must provide a handle to the c'tor.
941 ALL_LOG_DESTINATIONS = (
942 LOG_DEBUG | LOG_INFO | LOG_WARNING | LOG_ERROR | LOG_CRITICAL
944 ALL_OUTPUT_DESTINATIONS = 0x8F
949 destination_bitv: int,
952 filenames: Optional[Iterable[str]] = None,
953 handles: Optional[Iterable[io.TextIOWrapper]] = None,
956 Constructs the OutputMultiplexer instance.
959 destination_bitv: a bitvector where each bit represents an
960 output destination. Multiple bits may be set.
961 logger: if LOG_* bits are set, you must pass a logger here.
962 filenames: if FILENAMES bit is set, this should be a list of
963 files you'd like to output into. This code handles opening
964 and closing said files.
965 handles: if FILEHANDLES bit is set, this should be a list of
966 already opened filehandles you'd like to output into. The
967 handles will remain open after the scope of the multiplexer.
970 logger = logging.getLogger(None)
973 self.f: Optional[List[Any]] = None
974 if filenames is not None:
975 self.f = [open(filename, "wb", buffering=0) for filename in filenames]
977 if destination_bitv & OutputMultiplexer.Destination.FILENAMES:
978 raise ValueError("Filenames argument is required if bitv & FILENAMES")
981 self.h: Optional[List[Any]] = None
982 if handles is not None:
983 self.h = list(handles)
985 if destination_bitv & OutputMultiplexer.Destination.FILEHANDLES:
986 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
989 self.set_destination_bitv(destination_bitv)
991 def get_destination_bitv(self):
992 """Where are we outputting?"""
993 return self.destination_bitv
995 def set_destination_bitv(self, destination_bitv: int):
996 """Change the output destination_bitv to the one provided."""
997 if destination_bitv & self.Destination.FILENAMES and self.f is None:
998 raise ValueError("Filename argument is required if bitv & FILENAMES")
999 if destination_bitv & self.Destination.FILEHANDLES and self.h is None:
1000 raise ValueError("Handle argument is required if bitv & FILEHANDLES")
1001 self.destination_bitv = destination_bitv
1003 def print(self, *args, **kwargs):
1004 """Produce some output to all sinks."""
1005 from pyutils.string_utils import _sprintf, strip_escape_sequences
1007 end = kwargs.pop("end", None)
1009 if not isinstance(end, str):
1010 raise TypeError("end must be None or a string")
1011 sep = kwargs.pop("sep", None)
1013 if not isinstance(sep, str):
1014 raise TypeError("sep must be None or a string")
1016 raise TypeError("invalid keyword arguments to print()")
1017 buf = _sprintf(*args, end="", sep=sep)
1024 if self.destination_bitv & self.Destination.FILENAMES and self.f is not None:
1026 _.write(buf.encode("utf-8"))
1029 if self.destination_bitv & self.Destination.FILEHANDLES and self.h is not None:
1034 buf = strip_escape_sequences(buf)
1035 if self.logger is not None:
1036 if self.destination_bitv & self.Destination.LOG_DEBUG:
1037 self.logger.debug(buf)
1038 if self.destination_bitv & self.Destination.LOG_INFO:
1039 self.logger.info(buf)
1040 if self.destination_bitv & self.Destination.LOG_WARNING:
1041 self.logger.warning(buf)
1042 if self.destination_bitv & self.Destination.LOG_ERROR:
1043 self.logger.error(buf)
1044 if self.destination_bitv & self.Destination.LOG_CRITICAL:
1045 self.logger.critical(buf)
1046 if self.destination_bitv & self.Destination.HLOG:
1050 """Close all open files."""
1051 if self.f is not None:
1056 class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator):
1058 A context that uses an :class:`OutputMultiplexer`. e.g.::
1060 with OutputMultiplexerContext(
1061 OutputMultiplexer.LOG_INFO |
1062 OutputMultiplexer.LOG_DEBUG |
1063 OutputMultiplexer.FILENAMES |
1064 OutputMultiplexer.FILEHANDLES,
1065 filenames = [ '/tmp/foo.log', '/var/log/bar.log' ],
1068 mplex.print("This is a log message!")
1073 destination_bitv: OutputMultiplexer.Destination,
1081 destination_bitv: a bitvector that indicates where we should
1082 send output. See :class:`OutputMultiplexer` for options.
1083 logger: optional logger to use for log destination messages.
1084 filenames: optional filenames to write for filename destination
1086 handles: optional open filehandles to write for filehandle
1087 destination messages.
1092 filenames=filenames,
1096 def __enter__(self):
1099 def __exit__(self, etype, value, traceback) -> bool:
1101 if etype is not None:
1106 def hlog(message: str) -> None:
1107 """Write a message to the house log (syslog facility local7 priority
1108 info) by calling `/usr/bin/logger`. This is pretty hacky but used
1109 by a bunch of (my) code. Another way to do this would be to use
1110 :code:`--logging_syslog` and :code:`--logging_syslog_facility` but
1111 I can't actually say that's easier.
1113 TODO: this needs to move.
1115 message = message.replace("'", "'\"'\"'")
1116 os.system(f"/usr/bin/logger -p local7.info -- '{message}'")
1119 if __name__ == "__main__":