layer at all. When the db was down some of tests failed even
though they didn't care about perf. Let's not do that.
@functools.wraps(func)
def wrapper_perf_monitor(*args, **kwargs):
@functools.wraps(func)
def wrapper_perf_monitor(*args, **kwargs):
+ if config.config['unittests_ignore_perf']:
+ return func(*args, **kwargs)
+
if config.config['unittests_persistance_strategy'] == 'FILE':
filename = config.config['unittests_perfdb_filename']
helper = FileBasedPerfRegressionDataPersister(filename)
if config.config['unittests_persistance_strategy'] == 'FILE':
filename = config.config['unittests_perfdb_filename']
helper = FileBasedPerfRegressionDataPersister(filename)
limit = slowest + stdev * 4
logger.debug(f'For {func_name}, max acceptable runtime is {limit:f}s')
logger.debug(f'For {func_name}, actual observed runtime was {run_time:f}s')
limit = slowest + stdev * 4
logger.debug(f'For {func_name}, max acceptable runtime is {limit:f}s')
logger.debug(f'For {func_name}, actual observed runtime was {run_time:f}s')
- if run_time > limit and not config.config['unittests_ignore_perf']:
msg = f'''{func_id} performance has regressed unacceptably.
{slowest:f}s is the slowest runtime on record in {len(hist)} perf samples.
It just ran in {run_time:f}s which is 4+ stdevs slower than the slowest.
msg = f'''{func_id} performance has regressed unacceptably.
{slowest:f}s is the slowest runtime on record in {len(hist)} perf samples.
It just ran in {run_time:f}s which is 4+ stdevs slower than the slowest.