3 """Helpers for unittests. Note that when you import this we
4 automatically wrap unittest.main() with a call to
5 bootstrap.initialize so that we getLogger config, commandline args,
6 logging control, etc... this works fine but it's a little hacky so
19 from typing import Callable
27 logger = logging.getLogger(__name__)
28 cfg = config.add_commandline_args(
29 f'Logging ({__file__})',
30 'Args related to function decorators')
32 '--unittests_ignore_perf',
35 help='Ignore unittest perf regression in @check_method_for_perf_regressions',
38 '--unittests_num_perf_samples',
41 help='The count of perf timing samples we need to see before blocking slow runs on perf grounds'
44 '--unittests_drop_perf_traces',
48 help='The identifier (i.e. file!test_fixture) for which we should drop all perf data'
52 # >>> This is the hacky business, FYI. <<<
53 unittest.main = bootstrap.initialize(unittest.main)
56 _db = '/home/scott/.python_unittest_performance_db'
59 def check_method_for_perf_regressions(func: Callable) -> Callable:
61 This is meant to be used on a method in a class that subclasses
62 unittest.TestCase. When thus decorated it will time the execution
63 of the code in the method, compare it with a database of
64 historical perfmance, and fail the test with a perf-related
65 message if it has become too slow.
68 def load_known_test_performance_characteristics():
69 with open(_db, 'rb') as f:
72 def save_known_test_performance_characteristics(perfdb):
73 with open(_db, 'wb') as f:
74 pickle.dump(perfdb, f, pickle.HIGHEST_PROTOCOL)
76 @functools.wraps(func)
77 def wrapper_perf_monitor(*args, **kwargs):
79 perfdb = load_known_test_performance_characteristics()
80 except Exception as e:
82 msg = f'Unable to load perfdb from {_db}'
87 # This is a unique identifier for a test: filepath!function
88 logger.debug(f'Watching {func.__name__}\'s performance...')
89 func_id = f'{func.__globals__["__file__"]}!{func.__name__}'
90 logger.debug(f'Canonical function identifier = {func_id}')
92 # cmdline arg to forget perf traces for function
93 drop_id = config.config['unittests_drop_perf_traces']
94 if drop_id is not None:
98 # Run the wrapped test paying attention to latency.
99 start_time = time.perf_counter()
100 value = func(*args, **kwargs)
101 end_time = time.perf_counter()
102 run_time = end_time - start_time
103 logger.debug(f'{func.__name__} executed in {run_time:f}s.')
105 # Check the db; see if it was unexpectedly slow.
106 hist = perfdb.get(func_id, [])
107 if len(hist) < config.config['unittests_num_perf_samples']:
108 hist.append(run_time)
110 f'Still establishing a perf baseline for {func.__name__}'
113 stdev = statistics.stdev(hist)
114 limit = hist[-1] + stdev * 5
116 f'Max acceptable performace for {func.__name__} is {limit:f}s'
120 not config.config['unittests_ignore_perf']
122 msg = f'''{func_id} performance has regressed unacceptably.
123 {hist[-1]:f}s is the slowest record in {len(hist)} db perf samples.
124 It just ran in {run_time:f}s which is >5 stdevs slower than the slowest sample.
125 Here is the current, full db perf timing distribution:
134 hist.append(run_time)
136 n = min(config.config['unittests_num_perf_samples'], len(hist))
137 hist = random.sample(hist, n)
139 perfdb[func_id] = hist
140 save_known_test_performance_characteristics(perfdb)
142 return wrapper_perf_monitor
145 def check_all_methods_for_perf_regressions(prefix='test_'):
146 def decorate_the_testcase(cls):
147 if issubclass(cls, unittest.TestCase):
148 for name, m in inspect.getmembers(cls, inspect.isfunction):
149 if name.startswith(prefix):
150 setattr(cls, name, check_method_for_perf_regressions(m))
151 logger.debug(f'Wrapping {cls.__name__}:{name}.')
153 return decorate_the_testcase
157 """Hard code a breakpoint somewhere; drop into pdb."""
162 class RecordStdout(object):
164 Record what is emitted to stdout.
166 >>> with RecordStdout() as record:
167 ... print("This is a test!")
168 >>> print({record().readline()})
169 {'This is a test!\\n'}
172 def __init__(self) -> None:
173 self.destination = tempfile.SpooledTemporaryFile(mode='r+')
176 def __enter__(self) -> Callable[[], tempfile.SpooledTemporaryFile]:
177 self.recorder = contextlib.redirect_stdout(self.destination)
178 self.recorder.__enter__()
179 return lambda: self.destination
181 def __exit__(self, *args) -> bool:
182 self.recorder.__exit__(*args)
183 self.destination.seek(0)
187 class RecordStderr(object):
189 Record what is emitted to stderr.
192 >>> with RecordStderr() as record:
193 ... print("This is a test!", file=sys.stderr)
194 >>> print({record().readline()})
195 {'This is a test!\\n'}
198 def __init__(self) -> None:
199 self.destination = tempfile.SpooledTemporaryFile(mode='r+')
202 def __enter__(self) -> Callable[[], tempfile.SpooledTemporaryFile]:
203 self.recorder = contextlib.redirect_stderr(self.destination)
204 self.recorder.__enter__()
205 return lambda: self.destination
207 def __exit__(self, *args) -> bool:
208 self.recorder.__exit__(*args)
209 self.destination.seek(0)
213 class RecordMultipleStreams(object):
215 Record the output to more than one stream.
218 def __init__(self, *files) -> None:
219 self.files = [*files]
220 self.destination = tempfile.SpooledTemporaryFile(mode='r+')
221 self.saved_writes = []
223 def __enter__(self) -> Callable[[], tempfile.SpooledTemporaryFile]:
225 self.saved_writes.append(f.write)
226 f.write = self.destination.write
227 return lambda: self.destination
229 def __exit__(self, *args) -> bool:
231 f.write = self.saved_writes.pop()
232 self.destination.seek(0)
235 if __name__ == '__main__':