X-Git-Url: https://wannabe.guru.org/gitweb/?a=blobdiff_plain;f=logging_utils.py;h=bf8d8b062b911507ccbd7f68f5346530c7bd0d79;hb=36fea7f15ed17150691b5b3ead75450e575229ef;hp=034f90c0ee3ab932ffbf92740017fed369884b55;hpb=4c315e387f18010ba0b5661744ad3c792f21d2d1;p=python_utils.git diff --git a/logging_utils.py b/logging_utils.py index 034f90c..bf8d8b0 100644 --- a/logging_utils.py +++ b/logging_utils.py @@ -10,9 +10,12 @@ import io import logging from logging.handlers import RotatingFileHandler, SysLogHandler import os -import pytz +import random import sys -from typing import Iterable, Optional +from typing import Callable, Iterable, Mapping, Optional + +from overrides import overrides +import pytz # This module is commonly used by others in here and should avoid # taking any unnecessary dependencies back on them. @@ -20,8 +23,8 @@ import argparse_utils import config cfg = config.add_commandline_args( - f'Logging ({__file__})', - 'Args related to logging') + f'Logging ({__file__})', 'Args related to logging' +) cfg.add_argument( '--logging_config_file', type=argparse_utils.valid_filename, @@ -35,20 +38,20 @@ cfg.add_argument( default='INFO', choices=['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], metavar='LEVEL', - help='The level below which to squelch log messages.', + help='The global default level below which to squelch log messages; see also --lmodule', ) cfg.add_argument( '--logging_format', type=str, - default='%(levelname).1s:%(asctime)s: %(message)s', - help='The format for lines logged via the logger module.' + default=None, + help='The format for lines logged via the logger module. See: https://docs.python.org/3/library/logging.html#formatter-objects', ) cfg.add_argument( '--logging_date_format', type=str, default='%Y/%m/%dT%H:%M:%S.%f%z', metavar='DATEFMT', - help='The format of any dates in --logging_format.' + help='The format of any dates in --logging_format.', ) cfg.add_argument( '--logging_console', @@ -61,92 +64,330 @@ cfg.add_argument( type=str, default=None, metavar='FILENAME', - help='The filename of the logfile to write.' + help='The filename of the logfile to write.', ) cfg.add_argument( '--logging_filename_maxsize', type=int, - default=(1024*1024), + default=(1024 * 1024), metavar='#BYTES', - help='The maximum size (in bytes) to write to the logging_filename.' + help='The maximum size (in bytes) to write to the logging_filename.', ) cfg.add_argument( '--logging_filename_count', type=int, - default=2, + default=7, metavar='COUNT', - help='The number of logging_filename copies to keep before deleting.' + help='The number of logging_filename copies to keep before deleting.', ) cfg.add_argument( '--logging_syslog', action=argparse_utils.ActionNoYes, default=False, - help='Should we log to localhost\'s syslog.' + help='Should we log to localhost\'s syslog.', +) +cfg.add_argument( + '--logging_syslog_facility', + type=str, + default='USER', + choices=[ + 'NOTSET', + 'AUTH', + 'AUTH_PRIV', + 'CRON', + 'DAEMON', + 'FTP', + 'KERN', + 'LPR', + 'MAIL', + 'NEWS', + 'SYSLOG', + 'USER', + 'UUCP', + 'LOCAL0', + 'LOCAL1', + 'LOCAL2', + 'LOCAL3', + 'LOCAL4', + 'LOCAL5', + 'LOCAL6', + 'LOCAL7', + ], + metavar='SYSLOG_FACILITY_LIST', + help='The default syslog message facility identifier', ) cfg.add_argument( '--logging_debug_threads', action=argparse_utils.ActionNoYes, default=False, - help='Should we prepend pid/tid data to all log messages?' + help='Should we prepend pid/tid data to all log messages?', +) +cfg.add_argument( + '--logging_debug_modules', + action=argparse_utils.ActionNoYes, + default=False, + help='Should we prepend module/function data to all log messages?', ) cfg.add_argument( '--logging_info_is_print', action=argparse_utils.ActionNoYes, default=False, - help='logging.info also prints to stdout.' + help='logging.info also prints to stdout.', ) cfg.add_argument( - '--logging_max_n_times_per_message', - type=int, - default=0, - help='When set, ignore logged messages from the same site after N.' + '--logging_squelch_repeats', + action=argparse_utils.ActionNoYes, + default=True, + help='Do we allow code to indicate that it wants to squelch repeated logging messages or should we always log?', +) +cfg.add_argument( + '--logging_probabilistically', + action=argparse_utils.ActionNoYes, + default=True, + help='Do we allow probabilistic logging (for code that wants it) or should we always log?', ) - # See also: OutputMultiplexer cfg.add_argument( '--logging_captures_prints', action=argparse_utils.ActionNoYes, default=False, - help='When calling print also log.info too' + help='When calling print, also log.info automatically.', +) +cfg.add_argument( + '--lmodule', + type=str, + metavar='=[,=...]', + help=( + 'Allows per-scope logging levels which override the global level set with --logging-level.' + + 'Pass a space separated list of = where is one of: module, ' + + 'module:function, or :function and is a logging level (e.g. INFO, DEBUG...)' + ), +) +cfg.add_argument( + '--logging_clear_preexisting_handlers', + action=argparse_utils.ActionNoYes, + default=True, + help=( + 'Should logging code clear preexisting global logging handlers and thus insist that is ' + + 'alone can add handlers. Use this to work around annoying modules that insert global ' + + 'handlers with formats and logging levels you might now want. Caveat emptor, this may ' + + 'cause you to miss logging messages.' + ), ) built_in_print = print +logging_initialized = False -class OnlyInfoFilter(logging.Filter): +# A map from logging_callsite_id -> count of logged messages. +squelched_logging_counts: Mapping[str, int] = {} + + +def squelch_repeated_log_messages(squelch_after_n_repeats: int) -> Callable: """ - A filter that only logs messages produced at the INFO logging level. + A decorator that marks a function as interested in having the logging + messages that it produces be squelched (ignored) after it logs the + same message more than N times. + + Note: this decorator affects *ALL* logging messages produced + within the decorated function. That said, messages must be + identical in order to be squelched. For example, if the same line + of code produces different messages (because of, e.g., a format + string), the messages are considered to be different. + """ - def filter(self, record): - return record.levelno == logging.INFO + + def squelch_logging_wrapper(f: Callable): + import function_utils + + identifier = function_utils.function_identifier(f) + squelched_logging_counts[identifier] = squelch_after_n_repeats + return f + + return squelch_logging_wrapper -class OnlyNTimesFilter(logging.Filter): +class SquelchRepeatedMessagesFilter(logging.Filter): """ A filter that only logs messages from a given site with the same - message at the same logging level N times and ignores subsequent - attempts to log. + (exact) message at the same logging level N times and ignores + subsequent attempts to log. + + This filter only affects logging messages that repeat more than + a threshold number of times from functions that are tagged with + the @logging_utils.squelched_logging_ok decorator; others are + ignored. + + This functionality is enabled by default but can be disabled via + the --no_logging_squelch_repeats commandline flag. """ - def __init__(self, maximum: int) -> None: - self.maximum = maximum + + def __init__(self) -> None: self.counters = collections.Counter() super().__init__() + @overrides + def filter(self, record: logging.LogRecord) -> bool: + id1 = f'{record.module}:{record.funcName}' + if id1 not in squelched_logging_counts: + return True + threshold = squelched_logging_counts[id1] + logsite = ( + f'{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}' + ) + count = self.counters[logsite] + self.counters[logsite] += 1 + return count < threshold + + +class DynamicPerScopeLoggingLevelFilter(logging.Filter): + """This filter only allows logging messages from an allow list of + module names or module:function names. Blocks others. + + """ + + @staticmethod + def level_name_to_level(name: str) -> int: + numeric_level = getattr(logging, name, None) + if not isinstance(numeric_level, int): + raise ValueError(f'Invalid level: {name}') + return numeric_level + + def __init__( + self, + default_logging_level: int, + per_scope_logging_levels: str, + ) -> None: + super().__init__() + self.valid_levels = set( + ['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] + ) + self.default_logging_level = default_logging_level + self.level_by_scope = {} + if per_scope_logging_levels is not None: + for chunk in per_scope_logging_levels.split(','): + if '=' not in chunk: + print( + f'Malformed lmodule directive: "{chunk}", missing "=". Ignored.', + file=sys.stderr, + ) + continue + try: + (scope, level) = chunk.split('=') + except ValueError: + print( + f'Malformed lmodule directive: "{chunk}". Ignored.', + file=sys.stderr, + ) + continue + scope = scope.strip() + level = level.strip().upper() + if level not in self.valid_levels: + print( + f'Malformed lmodule directive: "{chunk}", bad level. Ignored.', + file=sys.stderr, + ) + continue + self.level_by_scope[ + scope + ] = DynamicPerScopeLoggingLevelFilter.level_name_to_level(level) + + @overrides def filter(self, record: logging.LogRecord) -> bool: - source = f'{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}' - count = self.counters[source] - self.counters[source] += 1 - return count < self.maximum + # First try to find a logging level by scope (--lmodule) + if len(self.level_by_scope) > 0: + min_level = None + for scope in ( + record.module, + f'{record.module}:{record.funcName}', + f':{record.funcName}', + ): + level = self.level_by_scope.get(scope, None) + if level is not None: + if min_level is None or level < min_level: + min_level = level + + # If we found one, use it instead of the global default level. + if min_level is not None: + return record.levelno >= min_level + + # Otherwise, use the global logging level (--logging_level) + return record.levelno >= self.default_logging_level + + +# A map from function_identifier -> probability of logging (0.0%..100.0%) +probabilistic_logging_levels: Mapping[str, float] = {} + + +def logging_is_probabilistic(probability_of_logging: float) -> Callable: + """ + A decorator that indicates that all logging statements within the + scope of a particular (marked) function are not deterministic + (i.e. they do not always unconditionally log) but rather are + probabilistic (i.e. they log N% of the time randomly). + + Note that this functionality can be disabled (forcing all logged + messages to produce output) via the --no_logging_probabilistically + cmdline argument. + + This affects *ALL* logging statements within the marked function. + + """ + + def probabilistic_logging_wrapper(f: Callable): + import function_utils + + identifier = function_utils.function_identifier(f) + probabilistic_logging_levels[identifier] = probability_of_logging + return f + + return probabilistic_logging_wrapper + + +class ProbabilisticFilter(logging.Filter): + """ + A filter that logs messages probabilistically (i.e. randomly at some + percent chance). + + This filter only affects logging messages from functions that have + been tagged with the @logging_utils.probabilistic_logging decorator. + + """ + + @overrides + def filter(self, record: logging.LogRecord) -> bool: + id1 = f'{record.module}:{record.funcName}' + if id1 not in probabilistic_logging_levels: + return True + threshold = probabilistic_logging_levels[id1] + return (random.random() * 100.0) <= threshold + + +class OnlyInfoFilter(logging.Filter): + """ + A filter that only logs messages produced at the INFO logging + level. This is used by the logging_info_is_print commandline + option to select a subset of the logging stream to send to a + stdout handler. + + """ + + @overrides + def filter(self, record: logging.LogRecord): + return record.levelno == logging.INFO class MillisecondAwareFormatter(logging.Formatter): """ - A formatter for adding milliseconds to log messages. + A formatter for adding milliseconds to log messages which, for + whatever reason, the default python logger doesn't do. """ + converter = datetime.datetime.fromtimestamp + @overrides def formatTime(self, record, datefmt=None): ct = MillisecondAwareFormatter.converter( record.created, pytz.timezone("US/Pacific") @@ -160,48 +401,70 @@ class MillisecondAwareFormatter(logging.Formatter): def initialize_logging(logger=None) -> logging.Logger: - assert config.has_been_parsed() + global logging_initialized + if logging_initialized: + return + logging_initialized = True + if logger is None: - logger = logging.getLogger() # Root logger + logger = logging.getLogger() + + preexisting_handlers_count = 0 + assert config.has_been_parsed() + if config.config['logging_clear_preexisting_handlers']: + while logger.hasHandlers(): + logger.removeHandler(logger.handlers[0]) + preexisting_handlers_count += 1 if config.config['logging_config_file'] is not None: logging.config.fileConfig('logging.conf') return logger handlers = [] - numeric_level = getattr( - logging, - config.config['logging_level'].upper(), - None + + # Global default logging level (--logging_level) + default_logging_level = getattr( + logging, config.config['logging_level'].upper(), None ) - if not isinstance(numeric_level, int): + if not isinstance(default_logging_level, int): raise ValueError('Invalid level: %s' % config.config['logging_level']) - fmt = config.config['logging_format'] + if config.config['logging_format']: + fmt = config.config['logging_format'] + else: + if config.config['logging_syslog']: + fmt = '%(levelname).1s:%(filename)s[%(process)d]: %(message)s' + else: + fmt = '%(levelname).1s:%(asctime)s: %(message)s' if config.config['logging_debug_threads']: fmt = f'%(process)d.%(thread)d|{fmt}' + if config.config['logging_debug_modules']: + fmt = f'%(filename)s:%(funcName)s:%(lineno)s|{fmt}' if config.config['logging_syslog']: if sys.platform not in ('win32', 'cygwin'): - handler = SysLogHandler() -# for k, v in encoded_priorities.items(): -# handler.encodePriority(k, v) + if config.config['logging_syslog_facility']: + facility_name = ( + 'LOG_' + config.config['logging_syslog_facility'] + ) + facility = SysLogHandler.__dict__.get( + facility_name, SysLogHandler.LOG_USER + ) + handler = SysLogHandler(facility=facility, address='/dev/log') handler.setFormatter( MillisecondAwareFormatter( fmt=fmt, datefmt=config.config['logging_date_format'], ) ) - handler.setLevel(numeric_level) handlers.append(handler) if config.config['logging_filename']: handler = RotatingFileHandler( config.config['logging_filename'], - maxBytes = config.config['logging_filename_maxsize'], - backupCount = config.config['logging_filename_count'], + maxBytes=config.config['logging_filename_maxsize'], + backupCount=config.config['logging_filename_count'], ) - handler.setLevel(numeric_level) handler.setFormatter( MillisecondAwareFormatter( fmt=fmt, @@ -212,7 +475,6 @@ def initialize_logging(logger=None) -> logging.Logger: if config.config['logging_console']: handler = logging.StreamHandler(sys.stderr) - handler.setLevel(numeric_level) handler.setFormatter( MillisecondAwareFormatter( fmt=fmt, @@ -232,16 +494,27 @@ def initialize_logging(logger=None) -> logging.Logger: handler.addFilter(OnlyInfoFilter()) logger.addHandler(handler) - maximum = config.config['logging_max_n_times_per_message'] - if maximum > 0: + if config.config['logging_squelch_repeats']: for handler in handlers: - handler.addFilter(OnlyNTimesFilter(maximum)) + handler.addFilter(SquelchRepeatedMessagesFilter()) - logger.setLevel(numeric_level) + if config.config['logging_probabilistically']: + for handler in handlers: + handler.addFilter(ProbabilisticFilter()) + + for handler in handlers: + handler.addFilter( + DynamicPerScopeLoggingLevelFilter( + default_logging_level, + config.config['lmodule'], + ) + ) + logger.setLevel(0) logger.propagate = False if config.config['logging_captures_prints']: import builtins + global built_in_print def print_and_also_log(*arg, **kwarg): @@ -251,8 +524,75 @@ def initialize_logging(logger=None) -> logging.Logger: else: logger.info(*arg) built_in_print(*arg, **kwarg) + builtins.print = print_and_also_log + # At this point the logger is ready, handlers are set up, + # etc... so log about the logging configuration. + + level_name = logging._levelToName.get( + default_logging_level, str(default_logging_level) + ) + logger.debug( + f'Initialized global logging; default logging level is {level_name}.' + ) + if ( + config.config['logging_clear_preexisting_handlers'] + and preexisting_handlers_count > 0 + ): + msg = f'Logging cleared {preexisting_handlers_count} global handlers (--logging_clear_preexisting_handlers)' + logger.warning(msg) + logger.debug(f'Logging format specification is "{fmt}"') + if config.config['logging_debug_threads']: + logger.debug( + '...Logging format spec captures tid/pid (--logging_debug_threads)' + ) + if config.config['logging_debug_modules']: + logger.debug( + '...Logging format spec captures files/functions/lineno (--logging_debug_modules)' + ) + if config.config['logging_syslog']: + logger.debug( + f'Logging to syslog as {facility_name} with priority mapping based on level' + ) + if config.config['logging_filename']: + logger.debug(f'Logging to filename {config.config["logging_filename"]}') + logger.debug( + f'...with {config.config["logging_filename_maxsize"]} bytes max file size.' + ) + logger.debug( + f'...and {config.config["logging_filename_count"]} rotating backup file count.' + ) + if config.config['logging_console']: + logger.debug('Logging to the console (stderr).') + if config.config['logging_info_is_print']: + logger.debug( + 'Logging logger.info messages will be repeated on stdout (--logging_info_is_print)' + ) + if config.config['logging_squelch_repeats']: + logger.debug( + 'Logging code allowed to request repeated messages be squelched (--logging_squelch_repeats)' + ) + else: + logger.debug( + 'Logging code forbidden to request messages be squelched; all messages logged (--no_logging_squelch_repeats)' + ) + if config.config['logging_probabilistically']: + logger.debug( + 'Logging code is allowed to request probabilistic logging (--logging_probabilistically)' + ) + else: + logger.debug( + 'Logging code is forbidden to request probabilistic logging; messages always logged (--no_logging_probabilistically)' + ) + if config.config['lmodule']: + logger.debug( + f'Logging dynamic per-module logging enabled (--lmodule={config.config["lmodule"]})' + ) + if config.config['logging_captures_prints']: + logger.debug( + 'Logging will capture printed data as logger.info messages (--logging_captures_prints)' + ) return logger @@ -262,8 +602,14 @@ def get_logger(name: str = ""): def tprint(*args, **kwargs) -> None: + """Legacy function for printing a message augmented with thread id + still needed by some code. Please use --logging_debug_threads in + new code. + + """ if config.config['logging_debug_threads']: from thread_utils import current_thread_id + print(f'{current_thread_id()}', end="") print(*args, **kwargs) else: @@ -271,33 +617,48 @@ def tprint(*args, **kwargs) -> None: def dprint(*args, **kwargs) -> None: + """Legacy function used to print to stderr still needed by some code. + Please just use normal logging with --logging_console which + accomplishes the same thing in new code. + + """ print(*args, file=sys.stderr, **kwargs) class OutputMultiplexer(object): + """ + A class that broadcasts printed messages to several sinks (including + various logging levels, different files, different file handles, + the house log, etc...). See also OutputMultiplexerContext for an + easy usage pattern. + + """ class Destination(enum.IntEnum): """Bits in the destination_bitv bitvector. Used to indicate the output destination.""" - LOG_DEBUG = 0x01 # -\ - LOG_INFO = 0x02 # | - LOG_WARNING = 0x04 # > Should provide logger to the c'tor. - LOG_ERROR = 0x08 # | - LOG_CRITICAL = 0x10 # _/ - FILENAMES = 0x20 # Must provide a filename to the c'tor. - FILEHANDLES = 0x40 # Must provide a handle to the c'tor. + + LOG_DEBUG = 0x01 # ⎫ + LOG_INFO = 0x02 # ⎪ + LOG_WARNING = 0x04 # ⎬ Must provide logger to the c'tor. + LOG_ERROR = 0x08 # ⎪ + LOG_CRITICAL = 0x10 # ⎭ + FILENAMES = 0x20 # Must provide a filename to the c'tor. + FILEHANDLES = 0x40 # Must provide a handle to the c'tor. HLOG = 0x80 ALL_LOG_DESTINATIONS = ( LOG_DEBUG | LOG_INFO | LOG_WARNING | LOG_ERROR | LOG_CRITICAL ) ALL_OUTPUT_DESTINATIONS = 0x8F - def __init__(self, - destination_bitv: int, - *, - logger=None, - filenames: Optional[Iterable[str]] = None, - handles: Optional[Iterable[io.TextIOWrapper]] = None): + def __init__( + self, + destination_bitv: int, + *, + logger=None, + filenames: Optional[Iterable[str]] = None, + handles: Optional[Iterable[io.TextIOWrapper]] = None, + ): if logger is None: logger = logging.getLogger(None) self.logger = logger @@ -334,12 +695,13 @@ class OutputMultiplexer(object): ) if destination_bitv & self.Destination.FILEHANDLES and self.h is None: raise ValueError( - "Handle argument is required if bitv & FILEHANDLES" - ) + "Handle argument is required if bitv & FILEHANDLES" + ) self.destination_bitv = destination_bitv def print(self, *args, **kwargs): from string_utils import sprintf, strip_escape_sequences + end = kwargs.pop("end", None) if end is not None: if not isinstance(end, str): @@ -358,16 +720,16 @@ class OutputMultiplexer(object): if end == '\n': buf += '\n' if ( - self.destination_bitv & self.Destination.FILENAMES and - self.f is not None + self.destination_bitv & self.Destination.FILENAMES + and self.f is not None ): for _ in self.f: _.write(buf.encode('utf-8')) _.flush() if ( - self.destination_bitv & self.Destination.FILEHANDLES and - self.h is not None + self.destination_bitv & self.Destination.FILEHANDLES + and self.h is not None ): for _ in self.h: _.write(buf) @@ -395,17 +757,35 @@ class OutputMultiplexer(object): class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator): - def __init__(self, - destination_bitv: OutputMultiplexer.Destination, - *, - logger = None, - filenames = None, - handles = None): + """ + A context that uses an OutputMultiplexer. e.g. + + with OutputMultiplexerContext( + OutputMultiplexer.LOG_INFO | + OutputMultiplexer.LOG_DEBUG | + OutputMultiplexer.FILENAMES | + OutputMultiplexer.FILEHANDLES, + filenames = [ '/tmp/foo.log', '/var/log/bar.log' ], + handles = [ f, g ] + ) as mplex: + mplex.print("This is a log message!") + + """ + + def __init__( + self, + destination_bitv: OutputMultiplexer.Destination, + *, + logger=None, + filenames=None, + handles=None, + ): super().__init__( destination_bitv, logger=logger, filenames=filenames, - handles=handles) + handles=handles, + ) def __enter__(self): return self @@ -418,5 +798,18 @@ class OutputMultiplexerContext(OutputMultiplexer, contextlib.ContextDecorator): def hlog(message: str) -> None: + """Write a message to the house log (syslog facility local7 priority + info) by calling /usr/bin/logger. This is pretty hacky but used + by a bunch of code. Another way to do this would be to use + --logging_syslog and --logging_syslog_facility but I can't + actually say that's easier. + + """ message = message.replace("'", "'\"'\"'") os.system(f"/usr/bin/logger -p local7.info -- '{message}'") + + +if __name__ == '__main__': + import doctest + + doctest.testmod()