Do not let test names in results collide when we run one to capture
authorScott Gasch <[email protected]>
Tue, 18 Oct 2022 03:00:26 +0000 (20:00 -0700)
committerScott Gasch <[email protected]>
Tue, 18 Oct 2022 03:00:26 +0000 (20:00 -0700)
coverage and another instance of the same test w/o coverage to check
perf.

tests/run_tests.py

index b398e1826a79d259ad72ea0a3c693e0252187700..301f89436059040ae549a76c3904e19124279648 100755 (executable)
@@ -291,6 +291,9 @@ class TemplatedTestRunner(TestRunner, ABC):
         for future in smart_future.wait_any(running, log_exceptions=False):
             result = future._resolve()
             logger.debug('Test %s finished.', result.name)
+
+            # We sometimes run the same test more than once.  Do not allow
+            # one run's results to klobber the other's.
             self.test_results += result
             if self.check_for_abort():
                 logger.debug(
@@ -327,7 +330,7 @@ class UnittestTestRunner(TemplatedTestRunner):
                 if basename in PERF_SENSATIVE_TESTS:
                     ret.append(
                         TestToRun(
-                            name=basename,
+                            name=f'{basename}_no_coverage',
                             kind='unittest w/o coverage to record perf',
                             cmdline=f'{test} 2>&1',
                         )
@@ -374,7 +377,7 @@ class DoctestTestRunner(TemplatedTestRunner):
                     if basename in PERF_SENSATIVE_TESTS:
                         ret.append(
                             TestToRun(
-                                name=basename,
+                                name=f'{basename}_no_coverage',
                                 kind='doctest w/o coverage to record perf',
                                 cmdline=f'python3 {test} 2>&1',
                             )
@@ -419,7 +422,7 @@ class IntegrationTestRunner(TemplatedTestRunner):
                 if basename in PERF_SENSATIVE_TESTS:
                     ret.append(
                         TestToRun(
-                            name=basename,
+                            name=f'{basename}_no_coverage',
                             kind='integration test w/o coverage to capture perf',
                             cmdline=f'{test} 2>&1',
                         )
@@ -570,16 +573,16 @@ def main() -> Optional[int]:
                         halt_event.set()
                         results[tid] = None
 
-        if started > 0:
-            percent_done = done / started
-        else:
-            percent_done = 0.0
-
         if failed == 0:
             color = ansi.fg('green')
         else:
             color = ansi.fg('red')
 
+        if started > 0:
+            percent_done = done / started * 100.0
+        else:
+            percent_done = 0.0
+
         if percent_done < 100.0:
             print(
                 text_utils.bar_graph_string(