3 """Helpers for unittests. Note that when you import this we
4 automatically wrap unittest.main() with a call to
5 bootstrap.initialize so that we getLogger config, commandline args,
6 logging control, etc... this works fine but it's a little hacky so
19 from typing import Callable
26 logger = logging.getLogger(__name__)
27 cfg = config.add_commandline_args(
28 f'Logging ({__file__})',
29 'Args related to function decorators')
31 '--unittests_ignore_perf',
34 help='Ignore unittest perf regression in @check_method_for_perf_regressions',
37 '--unittests_num_perf_samples',
40 help='The count of perf timing samples we need to see before blocking slow runs on perf grounds'
43 '--unittests_drop_perf_traces',
47 help='The identifier (i.e. file!test_fixture) for which we should drop all perf data'
51 # >>> This is the hacky business, FYI. <<<
52 unittest.main = bootstrap.initialize(unittest.main)
55 _db = '/home/scott/.python_unittest_performance_db'
58 def check_method_for_perf_regressions(func: Callable) -> Callable:
60 This is meant to be used on a method in a class that subclasses
61 unittest.TestCase. When thus decorated it will time the execution
62 of the code in the method, compare it with a database of
63 historical perfmance, and fail the test with a perf-related
64 message if it has become too slow.
67 def load_known_test_performance_characteristics():
68 with open(_db, 'rb') as f:
71 def save_known_test_performance_characteristics(perfdb):
72 with open(_db, 'wb') as f:
73 pickle.dump(perfdb, f, pickle.HIGHEST_PROTOCOL)
75 @functools.wraps(func)
76 def wrapper_perf_monitor(*args, **kwargs):
78 perfdb = load_known_test_performance_characteristics()
79 except Exception as e:
81 msg = f'Unable to load perfdb from {_db}'
85 # This is a unique identifier for a test: filepath!function
86 logger.debug(f'Watching {func.__name__}\'s performance...')
87 func_id = f'{func.__globals__["__file__"]}!{func.__name__}'
88 logger.debug(f'Canonical function identifier = {func_id}')
90 # cmdline arg to forget perf traces for function
91 drop_id = config.config['unittests_drop_perf_traces']
92 if drop_id is not None:
96 # Run the wrapped test paying attention to latency.
97 start_time = time.perf_counter()
98 value = func(*args, **kwargs)
99 end_time = time.perf_counter()
100 run_time = end_time - start_time
101 logger.debug(f'{func.__name__} executed in {run_time:f}s.')
103 # Check the db; see if it was unexpectedly slow.
104 hist = perfdb.get(func_id, [])
105 if len(hist) < config.config['unittests_num_perf_samples']:
106 hist.append(run_time)
108 f'Still establishing a perf baseline for {func.__name__}'
111 stdev = statistics.stdev(hist)
112 limit = hist[-1] + stdev * 5
114 f'Max acceptable performace for {func.__name__} is {limit:f}s'
118 not config.config['unittests_ignore_perf']
120 msg = f'''{func_id} performance has regressed unacceptably.
121 {hist[-1]:f}s is the slowest record in {len(hist)} db perf samples.
122 It just ran in {run_time:f}s which is >5 stdevs slower than the slowest sample.
123 Here is the current, full db perf timing distribution:
132 hist.append(run_time)
134 n = min(config.config['unittests_num_perf_samples'], len(hist))
135 hist = random.sample(hist, n)
137 perfdb[func_id] = hist
138 save_known_test_performance_characteristics(perfdb)
140 return wrapper_perf_monitor
143 def check_all_methods_for_perf_regressions(prefix='test_'):
144 def decorate_the_testcase(cls):
145 if issubclass(cls, unittest.TestCase):
146 for name, m in inspect.getmembers(cls, inspect.isfunction):
147 if name.startswith(prefix):
148 setattr(cls, name, check_method_for_perf_regressions(m))
149 logger.debug(f'Wrapping {cls.__name__}:{name}.')
151 return decorate_the_testcase
155 """Hard code a breakpoint somewhere; drop into pdb."""
160 class RecordStdout(object):
162 Record what is emitted to stdout.
164 >>> with RecordStdout() as record:
165 ... print("This is a test!")
166 >>> print({record().readline()})
167 {'This is a test!\\n'}
170 def __init__(self) -> None:
171 self.destination = tempfile.SpooledTemporaryFile(mode='r+')
174 def __enter__(self) -> Callable[[], tempfile.SpooledTemporaryFile]:
175 self.recorder = contextlib.redirect_stdout(self.destination)
176 self.recorder.__enter__()
177 return lambda: self.destination
179 def __exit__(self, *args) -> bool:
180 self.recorder.__exit__(*args)
181 self.destination.seek(0)
185 class RecordStderr(object):
187 Record what is emitted to stderr.
190 >>> with RecordStderr() as record:
191 ... print("This is a test!", file=sys.stderr)
192 >>> print({record().readline()})
193 {'This is a test!\\n'}
196 def __init__(self) -> None:
197 self.destination = tempfile.SpooledTemporaryFile(mode='r+')
200 def __enter__(self) -> Callable[[], tempfile.SpooledTemporaryFile]:
201 self.recorder = contextlib.redirect_stderr(self.destination)
202 self.recorder.__enter__()
203 return lambda: self.destination
205 def __exit__(self, *args) -> bool:
206 self.recorder.__exit__(*args)
207 self.destination.seek(0)
211 class RecordMultipleStreams(object):
213 Record the output to more than one stream.
216 def __init__(self, *files) -> None:
217 self.files = [*files]
218 self.destination = tempfile.SpooledTemporaryFile(mode='r+')
219 self.saved_writes = []
221 def __enter__(self) -> Callable[[], tempfile.SpooledTemporaryFile]:
223 self.saved_writes.append(f.write)
224 f.write = self.destination.write
225 return lambda: self.destination
227 def __exit__(self, *args) -> bool:
229 f.write = self.saved_writes.pop()
230 self.destination.seek(0)
233 if __name__ == '__main__':