3 """Helpers for unittests. Note that when you import this we
4 automatically wrap unittest.main() with a call to
5 bootstrap.initialize so that we getLogger config, commandline args,
6 logging control, etc... this works fine but it's a little hacky so
22 from abc import ABC, abstractmethod
23 from typing import Any, Callable, Dict, List, Optional
25 import sqlalchemy as sa
32 logger = logging.getLogger(__name__)
33 cfg = config.add_commandline_args(
34 f'Logging ({__file__})', 'Args related to function decorators'
37 '--unittests_ignore_perf',
40 help='Ignore unittest perf regression in @check_method_for_perf_regressions',
43 '--unittests_num_perf_samples',
46 help='The count of perf timing samples we need to see before blocking slow runs on perf grounds',
49 '--unittests_drop_perf_traces',
53 help='The identifier (i.e. file!test_fixture) for which we should drop all perf data',
56 '--unittests_persistance_strategy',
57 choices=['FILE', 'DATABASE'],
59 help='Should we persist perf data in a file or db?',
62 '--unittests_perfdb_filename',
65 default=f'{os.environ["HOME"]}/.python_unittest_performance_db',
66 help='File in which to store perf data (iff --unittests_persistance_strategy is FILE)',
69 '--unittests_perfdb_spec',
72 default='mariadb+pymysql://python_unittest:<PASSWORD>@db.house:3306/python_unittest_performance',
73 help='Db connection spec for perf data (iff --unittest_persistance_strategy is DATABASE)',
76 # >>> This is the hacky business, FYI. <<<
77 unittest.main = bootstrap.initialize(unittest.main)
80 class PerfRegressionDataPersister(ABC):
85 def load_performance_data(self, method_id: str) -> Dict[str, List[float]]:
89 def save_performance_data(self, method_id: str, data: Dict[str, List[float]]):
93 def delete_performance_data(self, method_id: str):
97 class FileBasedPerfRegressionDataPersister(PerfRegressionDataPersister):
98 def __init__(self, filename: str):
99 self.filename = filename
100 self.traces_to_delete: List[str] = []
102 def load_performance_data(self, method_id: str) -> Dict[str, List[float]]:
103 with open(self.filename, 'rb') as f:
104 return pickle.load(f)
106 def save_performance_data(self, method_id: str, data: Dict[str, List[float]]):
107 for trace in self.traces_to_delete:
111 with open(self.filename, 'wb') as f:
112 pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
114 def delete_performance_data(self, method_id: str):
115 self.traces_to_delete.append(method_id)
118 class DatabasePerfRegressionDataPersister(PerfRegressionDataPersister):
119 def __init__(self, dbspec: str):
121 self.engine = sa.create_engine(self.dbspec)
122 self.conn = self.engine.connect()
124 def load_performance_data(self, method_id: str) -> Dict[str, List[float]]:
125 results = self.conn.execute(
127 f'SELECT * FROM runtimes_by_function WHERE function = "{method_id}";'
130 ret: Dict[str, List[float]] = {method_id: []}
131 for result in results.all():
132 ret[method_id].append(result['runtime'])
136 def save_performance_data(self, method_id: str, data: Dict[str, List[float]]):
137 self.delete_performance_data(method_id)
138 for (method_id, perf_data) in data.items():
139 sql = 'INSERT INTO runtimes_by_function (function, runtime) VALUES '
140 for perf in perf_data:
141 self.conn.execute(sql + f'("{method_id}", {perf});')
143 def delete_performance_data(self, method_id: str):
144 sql = f'DELETE FROM runtimes_by_function WHERE function = "{method_id}"'
145 self.conn.execute(sql)
148 def check_method_for_perf_regressions(func: Callable) -> Callable:
150 This is meant to be used on a method in a class that subclasses
151 unittest.TestCase. When thus decorated it will time the execution
152 of the code in the method, compare it with a database of
153 historical perfmance, and fail the test with a perf-related
154 message if it has become too slow.
158 @functools.wraps(func)
159 def wrapper_perf_monitor(*args, **kwargs):
160 if config.config['unittests_ignore_perf']:
161 return func(*args, **kwargs)
163 if config.config['unittests_persistance_strategy'] == 'FILE':
164 filename = config.config['unittests_perfdb_filename']
165 helper = FileBasedPerfRegressionDataPersister(filename)
166 elif config.config['unittests_persistance_strategy'] == 'DATABASE':
167 dbspec = config.config['unittests_perfdb_spec']
168 dbspec = dbspec.replace(
169 '<PASSWORD>', scott_secrets.MARIADB_UNITTEST_PERF_PASSWORD
171 helper = DatabasePerfRegressionDataPersister(dbspec)
173 raise Exception('Unknown/unexpected --unittests_persistance_strategy value')
175 func_id = function_utils.function_identifier(func)
176 func_name = func.__name__
177 logger.debug(f'Watching {func_name}\'s performance...')
178 logger.debug(f'Canonical function identifier = {func_id}')
181 perfdb = helper.load_performance_data(func_id)
182 except Exception as e:
184 msg = 'Unable to load perfdb; skipping it...'
189 # cmdline arg to forget perf traces for function
190 drop_id = config.config['unittests_drop_perf_traces']
191 if drop_id is not None:
192 helper.delete_performance_data(drop_id)
194 # Run the wrapped test paying attention to latency.
195 start_time = time.perf_counter()
196 value = func(*args, **kwargs)
197 end_time = time.perf_counter()
198 run_time = end_time - start_time
200 # See if it was unexpectedly slow.
201 hist = perfdb.get(func_id, [])
202 if len(hist) < config.config['unittests_num_perf_samples']:
203 hist.append(run_time)
204 logger.debug(f'Still establishing a perf baseline for {func_name}')
206 stdev = statistics.stdev(hist)
207 logger.debug(f'For {func_name}, performance stdev={stdev}')
209 logger.debug(f'For {func_name}, slowest perf on record is {slowest:f}s')
210 limit = slowest + stdev * 4
211 logger.debug(f'For {func_name}, max acceptable runtime is {limit:f}s')
212 logger.debug(f'For {func_name}, actual observed runtime was {run_time:f}s')
214 msg = f'''{func_id} performance has regressed unacceptably.
215 {slowest:f}s is the slowest runtime on record in {len(hist)} perf samples.
216 It just ran in {run_time:f}s which is 4+ stdevs slower than the slowest.
217 Here is the current, full db perf timing distribution:
223 slf = args[0] # Peek at the wrapped function's self ref.
224 slf.fail(msg) # ...to fail the testcase.
226 hist.append(run_time)
228 # Don't spam the database with samples; just pick a random
229 # sample from what we have and store that back.
230 n = min(config.config['unittests_num_perf_samples'], len(hist))
231 hist = random.sample(hist, n)
233 perfdb[func_id] = hist
234 helper.save_performance_data(func_id, perfdb)
237 return wrapper_perf_monitor
240 def check_all_methods_for_perf_regressions(prefix='test_'):
241 """Decorate unittests with this to pay attention to the perf of the
242 testcode and flag perf regressions. e.g.
244 import unittest_utils as uu
246 @uu.check_all_methods_for_perf_regressions()
247 class TestMyClass(unittest.TestCase):
249 def test_some_part_of_my_class(self):
254 def decorate_the_testcase(cls):
255 if issubclass(cls, unittest.TestCase):
256 for name, m in inspect.getmembers(cls, inspect.isfunction):
257 if name.startswith(prefix):
258 setattr(cls, name, check_method_for_perf_regressions(m))
259 logger.debug(f'Wrapping {cls.__name__}:{name}.')
262 return decorate_the_testcase
266 """Hard code a breakpoint somewhere; drop into pdb."""
272 class RecordStdout(object):
274 Record what is emitted to stdout.
276 >>> with RecordStdout() as record:
277 ... print("This is a test!")
278 >>> print({record().readline()})
279 {'This is a test!\\n'}
283 def __init__(self) -> None:
284 self.destination = tempfile.SpooledTemporaryFile(mode='r+')
285 self.recorder: Optional[contextlib.redirect_stdout] = None
287 def __enter__(self) -> Callable[[], tempfile.SpooledTemporaryFile]:
288 self.recorder = contextlib.redirect_stdout(self.destination)
289 assert self.recorder is not None
290 self.recorder.__enter__()
291 return lambda: self.destination
293 def __exit__(self, *args) -> Optional[bool]:
294 assert self.recorder is not None
295 self.recorder.__exit__(*args)
296 self.destination.seek(0)
300 class RecordStderr(object):
302 Record what is emitted to stderr.
305 >>> with RecordStderr() as record:
306 ... print("This is a test!", file=sys.stderr)
307 >>> print({record().readline()})
308 {'This is a test!\\n'}
312 def __init__(self) -> None:
313 self.destination = tempfile.SpooledTemporaryFile(mode='r+')
314 self.recorder: Optional[contextlib.redirect_stdout[Any]] = None
316 def __enter__(self) -> Callable[[], tempfile.SpooledTemporaryFile]:
317 self.recorder = contextlib.redirect_stderr(self.destination) # type: ignore
318 assert self.recorder is not None
319 self.recorder.__enter__()
320 return lambda: self.destination
322 def __exit__(self, *args) -> Optional[bool]:
323 assert self.recorder is not None
324 self.recorder.__exit__(*args)
325 self.destination.seek(0)
329 class RecordMultipleStreams(object):
331 Record the output to more than one stream.
334 def __init__(self, *files) -> None:
335 self.files = [*files]
336 self.destination = tempfile.SpooledTemporaryFile(mode='r+')
337 self.saved_writes: List[Callable[..., Any]] = []
339 def __enter__(self) -> Callable[[], tempfile.SpooledTemporaryFile]:
341 self.saved_writes.append(f.write)
342 f.write = self.destination.write
343 return lambda: self.destination
345 def __exit__(self, *args) -> Optional[bool]:
347 f.write = self.saved_writes.pop()
348 self.destination.seek(0)
352 if __name__ == '__main__':