import bootstrap
import config
+import function_utils
import scott_secrets
import sqlalchemy as sa
self.conn.execute(sql)
-def function_identifier(f: Callable) -> str:
- """
- Given a callable function, return a string that identifies it.
- Usually that string is just __module__:__name__ but there's a
- corner case: when __module__ is __main__ (i.e. the callable is
- defined in the same module as __main__). In this case,
- f.__module__ returns "__main__" instead of the file that it is
- defined in. Work around this using pathlib.Path (see below).
-
- >>> function_identifier(function_identifier)
- 'unittest_utils:function_identifier'
-
- """
- if f.__module__ == '__main__':
- from pathlib import Path
- import __main__
- module = __main__.__file__
- module = Path(module).stem
- return f'{module}:{f.__name__}'
- else:
- return f'{f.__module__}:{f.__name__}'
-
-
def check_method_for_perf_regressions(func: Callable) -> Callable:
"""
This is meant to be used on a method in a class that subclasses
'Unknown/unexpected --unittests_persistance_strategy value'
)
- logger.debug(f'Watching {func.__name__}\'s performance...')
- func_id = function_identifier(func)
+ func_id = function_utils.function_identifier(func)
+ func_name = func.__name__
+ logger.debug(f'Watching {func_name}\'s performance...')
logger.debug(f'Canonical function identifier = {func_id}')
try:
if len(hist) < config.config['unittests_num_perf_samples']:
hist.append(run_time)
logger.debug(
- f'Still establishing a perf baseline for {func.__name__}'
+ f'Still establishing a perf baseline for {func_name}'
)
else:
stdev = statistics.stdev(hist)
- logger.debug(f'For {func.__name__}, performance stdev={stdev}')
+ logger.debug(f'For {func_name}, performance stdev={stdev}')
slowest = hist[-1]
- logger.debug(f'For {func.__name__}, slowest perf on record is {slowest:f}s')
+ logger.debug(f'For {func_name}, slowest perf on record is {slowest:f}s')
limit = slowest + stdev * 4
logger.debug(
- f'For {func.__name__}, max acceptable runtime is {limit:f}s'
+ f'For {func_name}, max acceptable runtime is {limit:f}s'
)
logger.debug(
- f'For {func.__name__}, actual observed runtime was {run_time:f}s'
+ f'For {func_name}, actual observed runtime was {run_time:f}s'
)
if (
run_time > limit and
not config.config['unittests_ignore_perf']
):
msg = f'''{func_id} performance has regressed unacceptably.
-{hist[-1]:f}s is the slowest record in {len(hist)} db perf samples.
-It just ran in {run_time:f}s which is >5 stdevs slower than the slowest sample.
+{slowest:f}s is the slowest runtime on record in {len(hist)} perf samples.
+It just ran in {run_time:f}s which is 4+ stdevs slower than the slowest.
Here is the current, full db perf timing distribution:
'''
for x in hist:
msg += f'{x:f}\n'
logger.error(msg)
- slf = args[0]
- slf.fail(msg)
+ slf = args[0] # Peek at the wrapped function's self ref.
+ slf.fail(msg) # ...to fail the testcase.
else:
hist.append(run_time)
+ # Don't spam the database with samples; just pick a random
+ # sample from what we have and store that back.
n = min(config.config['unittests_num_perf_samples'], len(hist))
hist = random.sample(hist, n)
hist.sort()
def check_all_methods_for_perf_regressions(prefix='test_'):
+ """Decorate unittests with this to pay attention to the perf of the
+ testcode and flag perf regressions. e.g.
+
+ import unittest_utils as uu
+
+ @uu.check_all_methods_for_perf_regressions()
+ class TestMyClass(unittest.TestCase):
+
+ def test_some_part_of_my_class(self):
+ ...
+
+ """
def decorate_the_testcase(cls):
if issubclass(cls, unittest.TestCase):
for name, m in inspect.getmembers(cls, inspect.isfunction):