Ran black code formatter on everything.
[python_utils.git] / unittest_utils.py
index d63f2b566b51b33bc794340b0d425d29a7e89142..4a9669d3a21f66e35004e1968cc85b65d711fd5c 100644 (file)
@@ -32,8 +32,8 @@ import sqlalchemy as sa
 
 logger = logging.getLogger(__name__)
 cfg = config.add_commandline_args(
-    f'Logging ({__file__})',
-    'Args related to function decorators')
+    f'Logging ({__file__})', 'Args related to function decorators'
+)
 cfg.add_argument(
     '--unittests_ignore_perf',
     action='store_true',
@@ -44,34 +44,34 @@ cfg.add_argument(
     '--unittests_num_perf_samples',
     type=int,
     default=50,
-    help='The count of perf timing samples we need to see before blocking slow runs on perf grounds'
+    help='The count of perf timing samples we need to see before blocking slow runs on perf grounds',
 )
 cfg.add_argument(
     '--unittests_drop_perf_traces',
     type=str,
     nargs=1,
     default=None,
-    help='The identifier (i.e. file!test_fixture) for which we should drop all perf data'
+    help='The identifier (i.e. file!test_fixture) for which we should drop all perf data',
 )
 cfg.add_argument(
     '--unittests_persistance_strategy',
     choices=['FILE', 'DATABASE'],
     default='DATABASE',
-    help='Should we persist perf data in a file or db?'
+    help='Should we persist perf data in a file or db?',
 )
 cfg.add_argument(
     '--unittests_perfdb_filename',
     type=str,
     metavar='FILENAME',
     default=f'{os.environ["HOME"]}/.python_unittest_performance_db',
-    help='File in which to store perf data (iff --unittests_persistance_strategy is FILE)'
+    help='File in which to store perf data (iff --unittests_persistance_strategy is FILE)',
 )
 cfg.add_argument(
     '--unittests_perfdb_spec',
     type=str,
     metavar='DBSPEC',
     default='mariadb+pymysql://python_unittest:<PASSWORD>@db.house:3306/python_unittest_performance',
-    help='Db connection spec for perf data (iff --unittest_persistance_strategy is DATABASE)'
+    help='Db connection spec for perf data (iff --unittest_persistance_strategy is DATABASE)',
 )
 
 # >>> This is the hacky business, FYI. <<<
@@ -87,7 +87,9 @@ class PerfRegressionDataPersister(ABC):
         pass
 
     @abstractmethod
-    def save_performance_data(self, method_id: str, data: Dict[str, List[float]]):
+    def save_performance_data(
+        self, method_id: str, data: Dict[str, List[float]]
+    ):
         pass
 
     @abstractmethod
@@ -104,7 +106,9 @@ class FileBasedPerfRegressionDataPersister(PerfRegressionDataPersister):
         with open(self.filename, 'rb') as f:
             return pickle.load(f)
 
-    def save_performance_data(self, method_id: str, data: Dict[str, List[float]]):
+    def save_performance_data(
+        self, method_id: str, data: Dict[str, List[float]]
+    ):
         for trace in self.traces_to_delete:
             if trace in data:
                 data[trace] = []
@@ -134,7 +138,9 @@ class DatabasePerfRegressionDataPersister(PerfRegressionDataPersister):
         results.close()
         return ret
 
-    def save_performance_data(self, method_id: str, data: Dict[str, List[float]]):
+    def save_performance_data(
+        self, method_id: str, data: Dict[str, List[float]]
+    ):
         self.delete_performance_data(method_id)
         for (method_id, perf_data) in data.items():
             sql = 'INSERT INTO runtimes_by_function (function, runtime) VALUES '
@@ -155,6 +161,7 @@ def check_method_for_perf_regressions(func: Callable) -> Callable:
     message if it has become too slow.
 
     """
+
     @functools.wraps(func)
     def wrapper_perf_monitor(*args, **kwargs):
         if config.config['unittests_persistance_strategy'] == 'FILE':
@@ -162,7 +169,9 @@ def check_method_for_perf_regressions(func: Callable) -> Callable:
             helper = FileBasedPerfRegressionDataPersister(filename)
         elif config.config['unittests_persistance_strategy'] == 'DATABASE':
             dbspec = config.config['unittests_perfdb_spec']
-            dbspec = dbspec.replace('<PASSWORD>', scott_secrets.MARIADB_UNITTEST_PERF_PASSWORD)
+            dbspec = dbspec.replace(
+                '<PASSWORD>', scott_secrets.MARIADB_UNITTEST_PERF_PASSWORD
+            )
             helper = DatabasePerfRegressionDataPersister(dbspec)
         else:
             raise Exception(
@@ -198,14 +207,14 @@ def check_method_for_perf_regressions(func: Callable) -> Callable:
         hist = perfdb.get(func_id, [])
         if len(hist) < config.config['unittests_num_perf_samples']:
             hist.append(run_time)
-            logger.debug(
-                f'Still establishing a perf baseline for {func_name}'
-            )
+            logger.debug(f'Still establishing a perf baseline for {func_name}')
         else:
             stdev = statistics.stdev(hist)
             logger.debug(f'For {func_name}, performance stdev={stdev}')
             slowest = hist[-1]
-            logger.debug(f'For {func_name}, slowest perf on record is {slowest:f}s')
+            logger.debug(
+                f'For {func_name}, slowest perf on record is {slowest:f}s'
+            )
             limit = slowest + stdev * 4
             logger.debug(
                 f'For {func_name}, max acceptable runtime is {limit:f}s'
@@ -213,10 +222,7 @@ def check_method_for_perf_regressions(func: Callable) -> Callable:
             logger.debug(
                 f'For {func_name}, actual observed runtime was {run_time:f}s'
             )
-            if (
-                run_time > limit and
-                not config.config['unittests_ignore_perf']
-            ):
+            if run_time > limit and not config.config['unittests_ignore_perf']:
                 msg = f'''{func_id} performance has regressed unacceptably.
 {slowest:f}s is the slowest runtime on record in {len(hist)} perf samples.
 It just ran in {run_time:f}s which is 4+ stdevs slower than the slowest.
@@ -226,8 +232,8 @@ Here is the current, full db perf timing distribution:
                 for x in hist:
                     msg += f'{x:f}\n'
                 logger.error(msg)
-                slf = args[0]        # Peek at the wrapped function's self ref.
-                slf.fail(msg)        # ...to fail the testcase.
+                slf = args[0]  # Peek at the wrapped function's self ref.
+                slf.fail(msg)  # ...to fail the testcase.
             else:
                 hist.append(run_time)
 
@@ -239,6 +245,7 @@ Here is the current, full db perf timing distribution:
         perfdb[func_id] = hist
         helper.save_performance_data(func_id, perfdb)
         return value
+
     return wrapper_perf_monitor
 
 
@@ -255,6 +262,7 @@ def check_all_methods_for_perf_regressions(prefix='test_'):
             ...
 
     """
+
     def decorate_the_testcase(cls):
         if issubclass(cls, unittest.TestCase):
             for name, m in inspect.getmembers(cls, inspect.isfunction):
@@ -262,12 +270,14 @@ def check_all_methods_for_perf_regressions(prefix='test_'):
                     setattr(cls, name, check_method_for_perf_regressions(m))
                     logger.debug(f'Wrapping {cls.__name__}:{name}.')
         return cls
+
     return decorate_the_testcase
 
 
 def breakpoint():
     """Hard code a breakpoint somewhere; drop into pdb."""
     import pdb
+
     pdb.set_trace()
 
 
@@ -346,4 +356,5 @@ class RecordMultipleStreams(object):
 
 if __name__ == '__main__':
     import doctest
+
     doctest.testmod()