4 A smart, fast test runner. Used in a git pre-commit hook.
13 from abc import ABC, abstractmethod
14 from dataclasses import dataclass
15 from typing import Any, Dict, List, Optional, Tuple
17 from overrides import overrides
24 import parallelize as par
29 logger = logging.getLogger(__name__)
30 args = config.add_commandline_args(f'({__file__})', 'Args related to __file__')
31 args.add_argument('--unittests', '-u', action='store_true', help='Run unittests.')
32 args.add_argument('--doctests', '-d', action='store_true', help='Run doctests.')
33 args.add_argument('--integration', '-i', action='store_true', help='Run integration tests.')
38 help='Run unittests, doctests and integration tests. Equivalient to -u -d -i',
41 '--coverage', '-c', action='store_true', help='Run tests and capture code coverage data'
44 HOME = os.environ['HOME']
46 # These tests will be run twice in --coverage mode: once to get code
47 # coverage and then again with not coverage enabeled. This is because
48 # they pay attention to code performance which is adversely affected
50 PERF_SENSATIVE_TESTS = set(['/home/scott/lib/python_modules/tests/string_utils_test.py'])
54 class TestingParameters:
56 """Should we stop as soon as one error has occurred?"""
58 halt_event: threading.Event
59 """An event that, when set, indicates to stop ASAP."""
65 """The name of the test"""
68 """The kind of the test"""
71 """The command line to execute"""
77 """The name of this test / set of tests."""
79 tests_executed: List[str]
80 """Tests that were executed."""
82 tests_succeeded: List[str]
83 """Tests that succeeded."""
85 tests_failed: List[str]
86 """Tests that failed."""
88 tests_timed_out: List[str]
89 """Tests that timed out."""
91 def __add__(self, other):
92 self.tests_executed.extend(other.tests_executed)
93 self.tests_succeeded.extend(other.tests_succeeded)
94 self.tests_failed.extend(other.tests_failed)
95 self.tests_timed_out.extend(other.tests_timed_out)
100 def __repr__(self) -> str:
101 out = f'{self.name}: '
102 out += f'{ansi.fg("green")}'
103 out += f'{len(self.tests_succeeded)}/{len(self.tests_executed)} passed'
104 out += f'{ansi.reset()}.\n'
106 if len(self.tests_failed) > 0:
107 out += f' ..{ansi.fg("red")}'
108 out += f'{len(self.tests_failed)} tests failed'
109 out += f'{ansi.reset()}:\n'
110 for test in self.tests_failed:
114 if len(self.tests_timed_out) > 0:
115 out += f' ..{ansi.fg("yellow")}'
116 out += f'{len(self.tests_timed_out)} tests timed out'
117 out += f'{ansi.reset()}:\n'
118 for test in self.tests_failed:
124 class TestRunner(ABC, thread_utils.ThreadWithReturnValue):
125 """A Base class for something that runs a test."""
127 def __init__(self, params: TestingParameters):
128 """Create a TestRunner.
131 params: Test running paramters.
134 super().__init__(self, target=self.begin, args=[params])
136 self.test_results = TestResults(
137 name=self.get_name(),
143 self.tests_started = 0
146 def get_name(self) -> str:
147 """The name of this test collection."""
150 def get_status(self) -> Tuple[int, TestResults]:
151 """Ask the TestRunner for its status."""
152 return (self.tests_started, self.test_results)
155 def begin(self, params: TestingParameters) -> TestResults:
156 """Start execution."""
160 class TemplatedTestRunner(TestRunner, ABC):
161 """A TestRunner that has a recipe for executing the tests."""
164 def identify_tests(self) -> List[TestToRun]:
165 """Return a list of tuples (test, cmdline) that should be executed."""
169 def run_test(self, test: TestToRun) -> TestResults:
170 """Run a single test and return its TestResults."""
173 def check_for_abort(self):
174 """Periodically caled to check to see if we need to stop."""
176 if self.params.halt_event.is_set():
177 logger.debug('Thread %s saw halt event; exiting.', self.get_name())
178 raise Exception("Kill myself!")
179 if self.params.halt_on_error:
180 if len(self.test_results.tests_failed) > 0:
181 logger.error('Thread %s saw abnormal results; exiting.', self.get_name())
182 raise Exception("Kill myself!")
184 def persist_output(self, test: TestToRun, message: str, output: str) -> None:
185 """Called to save the output of a test run."""
187 dest = f'{test.name}-output.txt'
188 with open(f'./test_output/{dest}', 'w') as wf:
189 print(message, file=wf)
190 print('-' * len(message), file=wf)
193 def execute_commandline(
197 timeout: float = 120.0,
199 """Execute a particular commandline to run a test."""
202 output = exec_utils.cmd(
204 timeout_seconds=timeout,
206 self.persist_output(test, f'{test.name} ({test.cmdline}) succeeded.', output)
207 logger.debug('%s: %s (%s) succeeded', self.get_name(), test.name, test.cmdline)
208 return TestResults(test.name, [test.name], [test.name], [], [])
209 except subprocess.TimeoutExpired as e:
210 msg = f'{self.get_name()}: {test.name} ({test.cmdline}) timed out after {e.timeout:.1f} seconds.'
213 '%s: %s output when it timed out: %s', self.get_name(), test.name, e.output
215 self.persist_output(test, msg, e.output.decode('utf-8'))
223 except subprocess.CalledProcessError as e:
225 f'{self.get_name()}: {test.name} ({test.cmdline}) failed; exit code {e.returncode}'
228 logger.debug('%s: %s output when it failed: %s', self.get_name(), test.name, e.output)
229 self.persist_output(test, msg, e.output.decode('utf-8'))
239 def begin(self, params: TestingParameters) -> TestResults:
240 logger.debug('Thread %s started.', self.get_name())
241 interesting_tests = self.identify_tests()
242 logger.debug('%s: Identified %d tests to be run.', self.get_name(), len(interesting_tests))
244 # Note: because of @parallelize on run_tests it actually
245 # returns a SmartFuture with a TestResult inside of it.
246 # That's the reason for this Any business.
247 running: List[Any] = []
248 for test_to_run in interesting_tests:
249 running.append(self.run_test(test_to_run))
251 '%s: Test %s started in the background.', self.get_name(), test_to_run.name
253 self.tests_started += 1
255 for future in smart_future.wait_any(running):
256 self.check_for_abort()
257 result = future._resolve()
258 logger.debug('Test %s finished.', result.name)
259 self.test_results += result
261 logger.debug('Thread %s finished.', self.get_name())
262 return self.test_results
265 class UnittestTestRunner(TemplatedTestRunner):
266 """Run all known Unittests."""
269 def get_name(self) -> str:
273 def identify_tests(self) -> List[TestToRun]:
275 for test in file_utils.expand_globs('*_test.py'):
276 basename = file_utils.without_path(test)
277 if config.config['coverage']:
281 kind='unittest capturing coverage',
282 cmdline=f'coverage run --source {HOME}/lib {test} --unittests_ignore_perf 2>&1',
285 if test in PERF_SENSATIVE_TESTS:
289 kind='unittest w/o coverage to record perf',
290 cmdline=f'{test} 2>&1',
298 cmdline=f'{test} 2>&1',
304 def run_test(self, test: TestToRun) -> TestResults:
305 return self.execute_commandline(test)
308 class DoctestTestRunner(TemplatedTestRunner):
309 """Run all known Doctests."""
312 def get_name(self) -> str:
316 def identify_tests(self) -> List[TestToRun]:
318 out = exec_utils.cmd('grep -lR "^ *import doctest" /home/scott/lib/python_modules/*')
319 for test in out.split('\n'):
320 if re.match(r'.*\.py$', test):
321 if 'run_tests.py' not in test:
322 basename = file_utils.without_path(test)
323 if config.config['coverage']:
327 kind='doctest capturing coverage',
328 cmdline=f'coverage run --source {HOME}/lib {test} 2>&1',
331 if test in PERF_SENSATIVE_TESTS:
335 kind='doctest w/o coverage to record perf',
336 cmdline=f'python3 {test} 2>&1',
341 TestToRun(name=basename, kind='doctest', cmdline=f'python3 {test} 2>&1')
346 def run_test(self, test: TestToRun) -> TestResults:
347 return self.execute_commandline(test)
350 class IntegrationTestRunner(TemplatedTestRunner):
351 """Run all know Integration tests."""
354 def get_name(self) -> str:
355 return "Integration Tests"
358 def identify_tests(self) -> List[TestToRun]:
360 for test in file_utils.expand_globs('*_itest.py'):
361 basename = file_utils.without_path(test)
362 if config.config['coverage']:
366 kind='integration test capturing coverage',
367 cmdline=f'coverage run --source {HOME}/lib {test} 2>&1',
370 if test in PERF_SENSATIVE_TESTS:
374 kind='integration test w/o coverage to capture perf',
375 cmdline=f'{test} 2>&1',
380 TestToRun(name=basename, kind='integration test', cmdline=f'{test} 2>&1')
385 def run_test(self, test: TestToRun) -> TestResults:
386 return self.execute_commandline(test)
389 def test_results_report(results: Dict[str, TestResults]) -> int:
390 """Give a final report about the tests that were run."""
392 for result in results.values():
393 print(result, end='')
394 total_problems += len(result.tests_failed)
395 total_problems += len(result.tests_timed_out)
397 if total_problems > 0:
398 print('Reminder: look in ./test_output to view test output logs')
399 return total_problems
402 def code_coverage_report():
403 """Give a final code coverage report."""
404 text_utils.header('Code Coverage')
405 exec_utils.cmd('coverage combine .coverage*')
406 out = exec_utils.cmd('coverage report --omit=config-3.8.py,*_test.py,*_itest.py --sort=-cover')
410 To recall this report w/o re-running the tests:
412 $ coverage report --omit=config-3.8.py,*_test.py,*_itest.py --sort=-cover
414 ...from the 'tests' directory. Note that subsequent calls to
415 run_tests.py with --coverage will klobber previous results. See:
417 https://coverage.readthedocs.io/en/6.2/
422 @bootstrap.initialize
423 def main() -> Optional[int]:
425 halt_event = threading.Event()
426 threads: List[TestRunner] = []
429 params = TestingParameters(
431 halt_event=halt_event,
434 if config.config['coverage']:
435 logger.debug('Clearing existing coverage data via "coverage erase".')
436 exec_utils.cmd('coverage erase')
438 if config.config['unittests'] or config.config['all']:
440 threads.append(UnittestTestRunner(params))
441 if config.config['doctests'] or config.config['all']:
443 threads.append(DoctestTestRunner(params))
444 if config.config['integration'] or config.config['all']:
446 threads.append(IntegrationTestRunner(params))
450 print('ERROR: one of --unittests, --doctests or --integration is required.')
453 for thread in threads:
456 results: Dict[str, TestResults] = {}
457 while len(results) != len(threads):
462 for thread in threads:
463 (s, tr) = thread.get_status()
465 failed += len(tr.tests_failed) + len(tr.tests_timed_out)
466 done += failed + len(tr.tests_succeeded)
467 if not thread.is_alive():
469 if tid not in results:
470 result = thread.join()
472 results[tid] = result
473 if len(result.tests_failed) > 0:
475 'Thread %s returned abnormal results; killing the others.', tid
480 percent_done = done / started
485 color = ansi.fg('green')
487 color = ansi.fg('red')
489 if percent_done < 100.0:
491 text_utils.bar_graph_string(
494 text=text_utils.BarGraphText.FRACTION,
503 print(f'{ansi.clear_line()}Final Report:')
504 if config.config['coverage']:
505 code_coverage_report()
506 total_problems = test_results_report(results)
507 return total_problems
510 if __name__ == '__main__':