3 """Helpers for unittests. Note that when you import this we
4 automatically wrap unittest.main() with a call to bootstrap.initialize
5 so that we getLogger config, commandline args, logging control,
6 etc... this works fine but it's a little hacky so caveat emptor.
16 from typing import Callable
23 logger = logging.getLogger(__name__)
24 cfg = config.add_commandline_args(
25 f'Logging ({__file__})',
26 'Args related to function decorators')
28 '--unittests_ignore_perf',
31 help='Ignore unittest perf regression in @check_method_for_perf_regressions',
34 '--unittests_num_perf_samples',
37 help='The count of perf timing samples we need to see before blocking slow runs on perf grounds'
40 '--unittests_drop_perf_traces',
44 help='The identifier (i.e. file!test_fixture) for which we should drop all perf data'
48 # >>> This is the hacky business, FYI. <<<
49 unittest.main = bootstrap.initialize(unittest.main)
52 _db = '/home/scott/.python_unittest_performance_db'
55 def check_method_for_perf_regressions(func: Callable) -> Callable:
56 """This is meant to be used on a method in a class that subclasses
57 unittest.TestCase. When thus decorated it will time the execution
58 of the code in the method, compare it with a database of
59 historical perfmance, and fail the test with a perf-related
60 message if it has become too slow.
63 def load_known_test_performance_characteristics():
64 with open(_db, 'rb') as f:
67 def save_known_test_performance_characteristics(perfdb):
68 with open(_db, 'wb') as f:
69 pickle.dump(perfdb, f, pickle.HIGHEST_PROTOCOL)
71 @functools.wraps(func)
72 def wrapper_perf_monitor(*args, **kwargs):
74 perfdb = load_known_test_performance_characteristics()
75 except Exception as e:
77 logger.warning(f'Unable to load perfdb from {_db}')
80 # This is a unique identifier for a test: filepath!function
81 logger.debug(f'Watching {func.__name__}\'s performance...')
82 func_id = f'{func.__globals__["__file__"]}!{func.__name__}'
83 logger.debug(f'Canonical function identifier = {func_id}')
85 # cmdline arg to forget perf traces for function
86 drop_id = config.config['unittests_drop_perf_traces']
87 if drop_id is not None:
91 # Run the wrapped test paying attention to latency.
92 start_time = time.perf_counter()
93 value = func(*args, **kwargs)
94 end_time = time.perf_counter()
95 run_time = end_time - start_time
96 logger.debug(f'{func.__name__} executed in {run_time:f}s.')
98 # Check the db; see if it was unexpectedly slow.
99 hist = perfdb.get(func_id, [])
100 if len(hist) < config.config['unittests_num_perf_samples']:
101 hist.append(run_time)
103 f'Still establishing a perf baseline for {func.__name__}'
106 stdev = statistics.stdev(hist)
107 limit = hist[-1] + stdev * 3
109 f'Max acceptable performace for {func.__name__} is {limit:f}s'
113 not config.config['unittests_ignore_perf']
115 msg = f'''{func_id} performance has regressed unacceptably.
116 {hist[-1]:f}s is the slowest record in {len(hist)} db perf samples.
117 It just ran in {run_time:f}s which is >3 stdevs slower than the slowest sample.
118 Here is the current, full db perf timing distribution:
125 hist.append(run_time)
127 n = min(config.config['unittests_num_perf_samples'], len(hist))
128 hist = random.sample(hist, n)
130 perfdb[func_id] = hist
131 save_known_test_performance_characteristics(perfdb)
133 return wrapper_perf_monitor
136 def check_all_methods_for_perf_regressions(prefix='test_'):
137 def decorate_the_testcase(cls):
138 if issubclass(cls, unittest.TestCase):
139 for name, m in inspect.getmembers(cls, inspect.isfunction):
140 if name.startswith(prefix):
141 setattr(cls, name, check_method_for_perf_regressions(m))
142 logger.debug(f'Wrapping {cls.__name__}:{name}.')
144 return decorate_the_testcase