--unittests_ignore_perf shouldn't mess with the database / file
authorScott <[email protected]>
Mon, 31 Jan 2022 06:57:29 +0000 (22:57 -0800)
committerScott <[email protected]>
Mon, 31 Jan 2022 06:57:29 +0000 (22:57 -0800)
layer at all.  When the db was down some of tests failed even
though they didn't care about perf.  Let's not do that.

unittest_utils.py

index e84b4eb929cfb8ac37daf31811b675c9d9d7825e..b9746a80307ad512cee993aca29449e365e998b6 100644 (file)
@@ -158,6 +158,9 @@ def check_method_for_perf_regressions(func: Callable) -> Callable:
 
     @functools.wraps(func)
     def wrapper_perf_monitor(*args, **kwargs):
+        if config.config['unittests_ignore_perf']:
+            return func(*args, **kwargs)
+
         if config.config['unittests_persistance_strategy'] == 'FILE':
             filename = config.config['unittests_perfdb_filename']
             helper = FileBasedPerfRegressionDataPersister(filename)
@@ -208,7 +211,7 @@ def check_method_for_perf_regressions(func: Callable) -> Callable:
             limit = slowest + stdev * 4
             logger.debug(f'For {func_name}, max acceptable runtime is {limit:f}s')
             logger.debug(f'For {func_name}, actual observed runtime was {run_time:f}s')
-            if run_time > limit and not config.config['unittests_ignore_perf']:
+            if run_time > limit:
                 msg = f'''{func_id} performance has regressed unacceptably.
 {slowest:f}s is the slowest runtime on record in {len(hist)} perf samples.
 It just ran in {run_time:f}s which is 4+ stdevs slower than the slowest.