Adds timer next to progress bar in run_tests.py.
[pyutils.git] / tests / run_tests.py
index b398e1826a79d259ad72ea0a3c693e0252187700..3b74ccb8cd6a3aa9d008e262b849b21938e72965 100755 (executable)
@@ -121,7 +121,7 @@ class TestResults:
             out += '\n'
 
         if len(self.tests_timed_out) > 0:
-            out += f'  ..{ansi.fg("yellow")}'
+            out += f'  ..{ansi.fg("lightning yellow")}'
             out += f'{len(self.tests_timed_out)} tests timed out'
             out += f'{ansi.reset()}:\n'
             for test in self.tests_failed:
@@ -288,9 +288,11 @@ class TemplatedTestRunner(TestRunner, ABC):
             )
             self.test_results.tests_executed[test_to_run.name] = time.time()
 
-        for future in smart_future.wait_any(running, log_exceptions=False):
-            result = future._resolve()
+        for result in smart_future.wait_any(running, log_exceptions=False):
             logger.debug('Test %s finished.', result.name)
+
+            # We sometimes run the same test more than once.  Do not allow
+            # one run's results to klobber the other's.
             self.test_results += result
             if self.check_for_abort():
                 logger.debug(
@@ -327,7 +329,7 @@ class UnittestTestRunner(TemplatedTestRunner):
                 if basename in PERF_SENSATIVE_TESTS:
                     ret.append(
                         TestToRun(
-                            name=basename,
+                            name=f'{basename}_no_coverage',
                             kind='unittest w/o coverage to record perf',
                             cmdline=f'{test} 2>&1',
                         )
@@ -374,7 +376,7 @@ class DoctestTestRunner(TemplatedTestRunner):
                     if basename in PERF_SENSATIVE_TESTS:
                         ret.append(
                             TestToRun(
-                                name=basename,
+                                name=f'{basename}_no_coverage',
                                 kind='doctest w/o coverage to record perf',
                                 cmdline=f'python3 {test} 2>&1',
                             )
@@ -419,7 +421,7 @@ class IntegrationTestRunner(TemplatedTestRunner):
                 if basename in PERF_SENSATIVE_TESTS:
                     ret.append(
                         TestToRun(
-                            name=basename,
+                            name=f'{basename}_no_coverage',
                             kind='integration test w/o coverage to capture perf',
                             cmdline=f'{test} 2>&1',
                         )
@@ -570,34 +572,39 @@ def main() -> Optional[int]:
                         halt_event.set()
                         results[tid] = None
 
-        if started > 0:
-            percent_done = done / started
-        else:
-            percent_done = 0.0
-
         if failed == 0:
             color = ansi.fg('green')
         else:
             color = ansi.fg('red')
 
+        if started > 0:
+            percent_done = done / started * 100.0
+        else:
+            percent_done = 0.0
+
         if percent_done < 100.0:
             print(
                 text_utils.bar_graph_string(
                     done,
                     started,
                     text=text_utils.BarGraphText.FRACTION,
-                    width=80,
+                    width=72,
                     fgcolor=color,
                 ),
-                end='\r',
+                end='',
                 flush=True,
             )
-        time.sleep(0.5)
+            print(f'  {color}{now - start_time:.1f}s{ansi.reset()}', end='\r')
+        time.sleep(0.1)
 
     print(f'{ansi.clear_line()}Final Report:')
     if config.config['coverage']:
         code_coverage_report()
     total_problems = test_results_report(results)
+    if total_problems > 0:
+        logging.error(
+            'Exiting with non-zero return code %d due to problems.', total_problems
+        )
     return total_problems