Final touches on the new test runner.
authorScott Gasch <[email protected]>
Thu, 2 Jun 2022 18:09:16 +0000 (11:09 -0700)
committerScott Gasch <[email protected]>
Thu, 2 Jun 2022 18:09:16 +0000 (11:09 -0700)
tests/run_tests.py

index 7e7bad593da9f8b453626ef018393b9436548833..ce9d63e93c349b4a9f6c08f213cd6d1294fc0955 100755 (executable)
@@ -16,6 +16,7 @@ from typing import Any, Dict, List, Optional
 
 from overrides import overrides
 
+import ansi
 import bootstrap
 import config
 import exec_utils
@@ -56,7 +57,7 @@ class TestRunner(ABC, thread_utils.ThreadWithReturnValue):
         super().__init__(self, target=self.begin, args=[params])
         self.params = params
         self.test_results = TestResults(
-            name=f"All {self.get_name()} tests",
+            name=self.get_name(),
             tests_executed=[],
             tests_succeeded=[],
             tests_failed=[],
@@ -182,7 +183,7 @@ class TemplatedTestRunner(TestRunner, ABC):
             for fut in newly_finished:
                 running.remove(fut)
                 done.append(fut)
-            time.sleep(0.25)
+            time.sleep(1.0)
 
         logger.debug('Thread %s finished.', self.get_name())
         return self.test_results
@@ -248,28 +249,35 @@ class IntegrationTestRunner(TemplatedTestRunner):
         return self.execute_commandline(test, cmdline)
 
 
-def test_results_report(results: Dict[str, TestResults]):
+def test_results_report(results: Dict[str, TestResults]) -> int:
+    total_problems = 0
     for type, result in results.items():
-        print(text_utils.header(f'{result.name}'))
-        print(f'  Ran {len(result.tests_executed)} tests.')
-        print(f'  ..{len(result.tests_succeeded)} tests succeeded.')
+        print(f'{result.name}: ', end='')
+        print(
+            f'{ansi.fg("green")}{len(result.tests_succeeded)}/{len(result.tests_executed)} passed{ansi.reset()}.'
+        )
         if len(result.tests_failed) > 0:
-            print(f'  ..{len(result.tests_failed)} tests failed:')
+            print(f'  ..{ansi.fg("red")}{len(result.tests_failed)} tests failed{ansi.reset()}:')
             for test in result.tests_failed:
                 print(f'    {test}')
+            total_problems += len(result.tests_failed)
 
         if len(result.tests_timed_out) > 0:
-            print(f'  ..{len(result.tests_timed_out)} tests timed out:')
+            print(
+                f'  ..{ansi.fg("yellow")}{len(result.tests_timed_out)} tests timed out{ansi.reset()}:'
+            )
             for test in result.tests_failed:
                 print(f'    {test}')
+            total_problems += len(result.tests_timed_out)
 
-        if len(result.tests_failed) + len(result.tests_timed_out):
-            print('Reminder: look in ./test_output to view test output logs')
+    if total_problems > 0:
+        print('Reminder: look in ./test_output to view test output logs')
+    return total_problems
 
 
 def code_coverage_report():
     text_utils.header('Code Coverage')
-    out = exec_utils.cmd('coverage combine .coverage*')
+    exec_utils.cmd('coverage combine .coverage*')
     out = exec_utils.cmd('coverage report --omit=config-3.8.py,*_test.py,*_itest.py --sort=-cover')
     print(out)
     print(
@@ -282,7 +290,6 @@ To recall this report w/o re-running the tests:
 run_tests.py with --coverage will klobber previous results.  See:
 
     https://coverage.readthedocs.io/en/6.2/
-
 """
     )
 
@@ -337,10 +344,10 @@ def main() -> Optional[int]:
                             halt_event.set()
         time.sleep(1.0)
 
-    test_results_report(results)
     if config.config['coverage']:
         code_coverage_report()
-    return 0
+    total_problems = test_results_report(results)
+    return total_problems
 
 
 if __name__ == '__main__':