X-Git-Url: https://wannabe.guru.org/gitweb/?a=blobdiff_plain;f=unittest_utils.py;fp=unittest_utils.py;h=f4fed35f09fdf29970820bef8566652825327634;hb=e6f32fdd9b373dfcd100c7accb41f57d83c2f0a1;hp=4a9669d3a21f66e35004e1968cc85b65d711fd5c;hpb=36fea7f15ed17150691b5b3ead75450e575229ef;p=python_utils.git diff --git a/unittest_utils.py b/unittest_utils.py index 4a9669d..f4fed35 100644 --- a/unittest_utils.py +++ b/unittest_utils.py @@ -87,9 +87,7 @@ class PerfRegressionDataPersister(ABC): pass @abstractmethod - def save_performance_data( - self, method_id: str, data: Dict[str, List[float]] - ): + def save_performance_data(self, method_id: str, data: Dict[str, List[float]]): pass @abstractmethod @@ -106,9 +104,7 @@ class FileBasedPerfRegressionDataPersister(PerfRegressionDataPersister): with open(self.filename, 'rb') as f: return pickle.load(f) - def save_performance_data( - self, method_id: str, data: Dict[str, List[float]] - ): + def save_performance_data(self, method_id: str, data: Dict[str, List[float]]): for trace in self.traces_to_delete: if trace in data: data[trace] = [] @@ -138,9 +134,7 @@ class DatabasePerfRegressionDataPersister(PerfRegressionDataPersister): results.close() return ret - def save_performance_data( - self, method_id: str, data: Dict[str, List[float]] - ): + def save_performance_data(self, method_id: str, data: Dict[str, List[float]]): self.delete_performance_data(method_id) for (method_id, perf_data) in data.items(): sql = 'INSERT INTO runtimes_by_function (function, runtime) VALUES ' @@ -174,9 +168,7 @@ def check_method_for_perf_regressions(func: Callable) -> Callable: ) helper = DatabasePerfRegressionDataPersister(dbspec) else: - raise Exception( - 'Unknown/unexpected --unittests_persistance_strategy value' - ) + raise Exception('Unknown/unexpected --unittests_persistance_strategy value') func_id = function_utils.function_identifier(func) func_name = func.__name__ @@ -212,16 +204,10 @@ def check_method_for_perf_regressions(func: Callable) -> Callable: stdev = statistics.stdev(hist) logger.debug(f'For {func_name}, performance stdev={stdev}') slowest = hist[-1] - logger.debug( - f'For {func_name}, slowest perf on record is {slowest:f}s' - ) + logger.debug(f'For {func_name}, slowest perf on record is {slowest:f}s') limit = slowest + stdev * 4 - logger.debug( - f'For {func_name}, max acceptable runtime is {limit:f}s' - ) - logger.debug( - f'For {func_name}, actual observed runtime was {run_time:f}s' - ) + logger.debug(f'For {func_name}, max acceptable runtime is {limit:f}s') + logger.debug(f'For {func_name}, actual observed runtime was {run_time:f}s') if run_time > limit and not config.config['unittests_ignore_perf']: msg = f'''{func_id} performance has regressed unacceptably. {slowest:f}s is the slowest runtime on record in {len(hist)} perf samples.