logger = logging.getLogger(__name__)
cfg = config.add_commandline_args(
- f'Logging ({__file__})',
- 'Args related to function decorators')
+ f'Logging ({__file__})', 'Args related to function decorators'
+)
cfg.add_argument(
'--unittests_ignore_perf',
action='store_true',
'--unittests_num_perf_samples',
type=int,
default=50,
- help='The count of perf timing samples we need to see before blocking slow runs on perf grounds'
+ help='The count of perf timing samples we need to see before blocking slow runs on perf grounds',
)
cfg.add_argument(
'--unittests_drop_perf_traces',
type=str,
nargs=1,
default=None,
- help='The identifier (i.e. file!test_fixture) for which we should drop all perf data'
+ help='The identifier (i.e. file!test_fixture) for which we should drop all perf data',
)
cfg.add_argument(
'--unittests_persistance_strategy',
choices=['FILE', 'DATABASE'],
default='DATABASE',
- help='Should we persist perf data in a file or db?'
+ help='Should we persist perf data in a file or db?',
)
cfg.add_argument(
'--unittests_perfdb_filename',
type=str,
metavar='FILENAME',
default=f'{os.environ["HOME"]}/.python_unittest_performance_db',
- help='File in which to store perf data (iff --unittests_persistance_strategy is FILE)'
+ help='File in which to store perf data (iff --unittests_persistance_strategy is FILE)',
)
cfg.add_argument(
'--unittests_perfdb_spec',
type=str,
metavar='DBSPEC',
default='mariadb+pymysql://python_unittest:<PASSWORD>@db.house:3306/python_unittest_performance',
- help='Db connection spec for perf data (iff --unittest_persistance_strategy is DATABASE)'
+ help='Db connection spec for perf data (iff --unittest_persistance_strategy is DATABASE)',
)
# >>> This is the hacky business, FYI. <<<
message if it has become too slow.
"""
+
@functools.wraps(func)
def wrapper_perf_monitor(*args, **kwargs):
if config.config['unittests_persistance_strategy'] == 'FILE':
helper = FileBasedPerfRegressionDataPersister(filename)
elif config.config['unittests_persistance_strategy'] == 'DATABASE':
dbspec = config.config['unittests_perfdb_spec']
- dbspec = dbspec.replace('<PASSWORD>', scott_secrets.MARIADB_UNITTEST_PERF_PASSWORD)
+ dbspec = dbspec.replace(
+ '<PASSWORD>', scott_secrets.MARIADB_UNITTEST_PERF_PASSWORD
+ )
helper = DatabasePerfRegressionDataPersister(dbspec)
else:
- raise Exception(
- 'Unknown/unexpected --unittests_persistance_strategy value'
- )
+ raise Exception('Unknown/unexpected --unittests_persistance_strategy value')
func_id = function_utils.function_identifier(func)
func_name = func.__name__
hist = perfdb.get(func_id, [])
if len(hist) < config.config['unittests_num_perf_samples']:
hist.append(run_time)
- logger.debug(
- f'Still establishing a perf baseline for {func_name}'
- )
+ logger.debug(f'Still establishing a perf baseline for {func_name}')
else:
stdev = statistics.stdev(hist)
logger.debug(f'For {func_name}, performance stdev={stdev}')
slowest = hist[-1]
logger.debug(f'For {func_name}, slowest perf on record is {slowest:f}s')
limit = slowest + stdev * 4
- logger.debug(
- f'For {func_name}, max acceptable runtime is {limit:f}s'
- )
- logger.debug(
- f'For {func_name}, actual observed runtime was {run_time:f}s'
- )
- if (
- run_time > limit and
- not config.config['unittests_ignore_perf']
- ):
+ logger.debug(f'For {func_name}, max acceptable runtime is {limit:f}s')
+ logger.debug(f'For {func_name}, actual observed runtime was {run_time:f}s')
+ if run_time > limit and not config.config['unittests_ignore_perf']:
msg = f'''{func_id} performance has regressed unacceptably.
-{hist[-1]:f}s is the slowest record in {len(hist)} db perf samples.
-It just ran in {run_time:f}s which is >5 stdevs slower than the slowest sample.
+{slowest:f}s is the slowest runtime on record in {len(hist)} perf samples.
+It just ran in {run_time:f}s which is 4+ stdevs slower than the slowest.
Here is the current, full db perf timing distribution:
'''
for x in hist:
msg += f'{x:f}\n'
logger.error(msg)
- slf = args[0]
- slf.fail(msg)
+ slf = args[0] # Peek at the wrapped function's self ref.
+ slf.fail(msg) # ...to fail the testcase.
else:
hist.append(run_time)
+ # Don't spam the database with samples; just pick a random
+ # sample from what we have and store that back.
n = min(config.config['unittests_num_perf_samples'], len(hist))
hist = random.sample(hist, n)
hist.sort()
perfdb[func_id] = hist
helper.save_performance_data(func_id, perfdb)
return value
+
return wrapper_perf_monitor
def check_all_methods_for_perf_regressions(prefix='test_'):
+ """Decorate unittests with this to pay attention to the perf of the
+ testcode and flag perf regressions. e.g.
+
+ import unittest_utils as uu
+
+ @uu.check_all_methods_for_perf_regressions()
+ class TestMyClass(unittest.TestCase):
+
+ def test_some_part_of_my_class(self):
+ ...
+
+ """
+
def decorate_the_testcase(cls):
if issubclass(cls, unittest.TestCase):
for name, m in inspect.getmembers(cls, inspect.isfunction):
setattr(cls, name, check_method_for_perf_regressions(m))
logger.debug(f'Wrapping {cls.__name__}:{name}.')
return cls
+
return decorate_the_testcase
def breakpoint():
"""Hard code a breakpoint somewhere; drop into pdb."""
import pdb
+
pdb.set_trace()
... print("This is a test!")
>>> print({record().readline()})
{'This is a test!\\n'}
+ >>> record().close()
"""
def __init__(self) -> None:
... print("This is a test!", file=sys.stderr)
>>> print({record().readline()})
{'This is a test!\\n'}
+ >>> record().close()
"""
def __init__(self) -> None:
if __name__ == '__main__':
import doctest
+
doctest.testmod()