4 A smart, fast test runner. Used in a git pre-commit hook.
13 from abc import ABC, abstractmethod
14 from dataclasses import dataclass
15 from typing import Any, Dict, List, Optional, Tuple
17 from overrides import overrides
24 import parallelize as par
29 logger = logging.getLogger(__name__)
30 args = config.add_commandline_args(f'({__file__})', 'Args related to __file__')
31 args.add_argument('--unittests', '-u', action='store_true', help='Run unittests.')
32 args.add_argument('--doctests', '-d', action='store_true', help='Run doctests.')
33 args.add_argument('--integration', '-i', action='store_true', help='Run integration tests.')
35 '--coverage', '-c', action='store_true', help='Run tests and capture code coverage data'
38 HOME = os.environ['HOME']
42 class TestingParameters:
44 """Should we stop as soon as one error has occurred?"""
46 halt_event: threading.Event
47 """An event that, when set, indicates to stop ASAP."""
53 """The name of this test / set of tests."""
55 tests_executed: List[str]
56 """Tests that were executed."""
58 tests_succeeded: List[str]
59 """Tests that succeeded."""
61 tests_failed: List[str]
62 """Tests that failed."""
64 tests_timed_out: List[str]
65 """Tests that timed out."""
67 def __add__(self, other):
68 self.tests_executed.extend(other.tests_executed)
69 self.tests_succeeded.extend(other.tests_succeeded)
70 self.tests_failed.extend(other.tests_failed)
71 self.tests_timed_out.extend(other.tests_timed_out)
76 def __repr__(self) -> str:
77 out = f'{self.name}: '
78 out += f'{ansi.fg("green")}'
79 out += f'{len(self.tests_succeeded)}/{len(self.tests_executed)} passed'
80 out += f'{ansi.reset()}.\n'
82 if len(self.tests_failed) > 0:
83 out += f' ..{ansi.fg("red")}'
84 out += f'{len(self.tests_failed)} tests failed'
85 out += f'{ansi.reset()}:\n'
86 for test in self.tests_failed:
90 if len(self.tests_timed_out) > 0:
91 out += f' ..{ansi.fg("yellow")}'
92 out += f'{len(self.tests_timed_out)} tests timed out'
93 out += f'{ansi.reset()}:\n'
94 for test in self.tests_failed:
100 class TestRunner(ABC, thread_utils.ThreadWithReturnValue):
101 """A Base class for something that runs a test."""
103 def __init__(self, params: TestingParameters):
104 """Create a TestRunner.
107 params: Test running paramters.
110 super().__init__(self, target=self.begin, args=[params])
112 self.test_results = TestResults(
113 name=self.get_name(),
119 self.tests_started = 0
122 def get_name(self) -> str:
123 """The name of this test collection."""
126 def get_status(self) -> Tuple[int, TestResults]:
127 """Ask the TestRunner for its status."""
128 return (self.tests_started, self.test_results)
131 def begin(self, params: TestingParameters) -> TestResults:
132 """Start execution."""
136 class TemplatedTestRunner(TestRunner, ABC):
137 """A TestRunner that has a recipe for executing the tests."""
140 def identify_tests(self) -> List[str]:
141 """Return a list of tests that should be executed."""
145 def run_test(self, test: Any) -> TestResults:
146 """Run a single test and return its TestResults."""
149 def check_for_abort(self):
150 """Periodically caled to check to see if we need to stop."""
152 if self.params.halt_event.is_set():
153 logger.debug('Thread %s saw halt event; exiting.', self.get_name())
154 raise Exception("Kill myself!")
155 if self.params.halt_on_error:
156 if len(self.test_results.tests_failed) > 0:
157 logger.error('Thread %s saw abnormal results; exiting.', self.get_name())
158 raise Exception("Kill myself!")
160 def persist_output(self, test_name: str, message: str, output: str) -> None:
161 """Called to save the output of a test run."""
163 basename = file_utils.without_path(test_name)
164 dest = f'{basename}-output.txt'
165 with open(f'./test_output/{dest}', 'w') as wf:
166 print(message, file=wf)
167 print('-' * len(message), file=wf)
170 def execute_commandline(
175 timeout: float = 120.0,
177 """Execute a particular commandline to run a test."""
180 logger.debug('%s: Running %s (%s)', self.get_name(), test_name, cmdline)
181 output = exec_utils.cmd(
183 timeout_seconds=timeout,
185 self.persist_output(test_name, f'{test_name} ({cmdline}) succeeded.', output)
186 logger.debug('%s (%s) succeeded', test_name, cmdline)
187 return TestResults(test_name, [test_name], [test_name], [], [])
188 except subprocess.TimeoutExpired as e:
189 msg = f'{self.get_name()}: {test_name} ({cmdline}) timed out after {e.timeout:.1f} seconds.'
192 '%s: %s output when it timed out: %s', self.get_name(), test_name, e.output
194 self.persist_output(test_name, msg, e.output.decode('utf-8'))
202 except subprocess.CalledProcessError as e:
203 msg = f'{self.get_name()}: {test_name} ({cmdline}) failed; exit code {e.returncode}'
205 logger.debug('%s: %s output when it failed: %s', self.get_name(), test_name, e.output)
206 self.persist_output(test_name, msg, e.output.decode('utf-8'))
216 def begin(self, params: TestingParameters) -> TestResults:
217 logger.debug('Thread %s started.', self.get_name())
218 interesting_tests = self.identify_tests()
220 running: List[Any] = []
221 for test in interesting_tests:
222 running.append(self.run_test(test))
223 self.tests_started = len(running)
225 for future in smart_future.wait_any(running):
226 self.check_for_abort()
227 result = future._resolve()
228 logger.debug('Test %s finished.', result.name)
229 self.test_results += result
231 logger.debug('Thread %s finished.', self.get_name())
232 return self.test_results
235 class UnittestTestRunner(TemplatedTestRunner):
236 """Run all known Unittests."""
239 def get_name(self) -> str:
243 def identify_tests(self) -> List[str]:
244 return list(file_utils.expand_globs('*_test.py'))
247 def run_test(self, test: Any) -> TestResults:
248 if config.config['coverage']:
249 cmdline = f'coverage run --source {HOME}/lib {test} --unittests_ignore_perf'
252 return self.execute_commandline(test, cmdline)
255 class DoctestTestRunner(TemplatedTestRunner):
256 """Run all known Doctests."""
259 def get_name(self) -> str:
263 def identify_tests(self) -> List[str]:
265 out = exec_utils.cmd('grep -lR "^ *import doctest" /home/scott/lib/python_modules/*')
266 for line in out.split('\n'):
267 if re.match(r'.*\.py$', line):
268 if 'run_tests.py' not in line:
273 def run_test(self, test: Any) -> TestResults:
274 if config.config['coverage']:
275 cmdline = f'coverage run --source {HOME}/lib {test} 2>&1'
277 cmdline = f'python3 {test}'
278 return self.execute_commandline(test, cmdline)
281 class IntegrationTestRunner(TemplatedTestRunner):
282 """Run all know Integration tests."""
285 def get_name(self) -> str:
286 return "Integration Tests"
289 def identify_tests(self) -> List[str]:
290 return list(file_utils.expand_globs('*_itest.py'))
293 def run_test(self, test: Any) -> TestResults:
294 if config.config['coverage']:
295 cmdline = f'coverage run --source {HOME}/lib {test}'
298 return self.execute_commandline(test, cmdline)
301 def test_results_report(results: Dict[str, TestResults]) -> int:
302 """Give a final report about the tests that were run."""
304 for result in results.values():
305 print(result, end='')
306 total_problems += len(result.tests_failed)
307 total_problems += len(result.tests_timed_out)
309 if total_problems > 0:
310 print('Reminder: look in ./test_output to view test output logs')
311 return total_problems
314 def code_coverage_report():
315 """Give a final code coverage report."""
316 text_utils.header('Code Coverage')
317 exec_utils.cmd('coverage combine .coverage*')
318 out = exec_utils.cmd('coverage report --omit=config-3.8.py,*_test.py,*_itest.py --sort=-cover')
322 To recall this report w/o re-running the tests:
324 $ coverage report --omit=config-3.8.py,*_test.py,*_itest.py --sort=-cover
326 ...from the 'tests' directory. Note that subsequent calls to
327 run_tests.py with --coverage will klobber previous results. See:
329 https://coverage.readthedocs.io/en/6.2/
334 @bootstrap.initialize
335 def main() -> Optional[int]:
337 halt_event = threading.Event()
338 threads: List[TestRunner] = []
341 params = TestingParameters(
343 halt_event=halt_event,
346 if config.config['coverage']:
347 logger.debug('Clearing existing coverage data via "coverage erase".')
348 exec_utils.cmd('coverage erase')
350 if config.config['unittests']:
352 threads.append(UnittestTestRunner(params))
353 if config.config['doctests']:
355 threads.append(DoctestTestRunner(params))
356 if config.config['integration']:
358 threads.append(IntegrationTestRunner(params))
362 print('ERROR: one of --unittests, --doctests or --integration is required.')
365 for thread in threads:
368 results: Dict[str, TestResults] = {}
369 while len(results) != len(threads):
374 for thread in threads:
375 if not thread.is_alive():
377 if tid not in results:
378 result = thread.join()
380 results[tid] = result
381 if len(result.tests_failed) > 0:
383 'Thread %s returned abnormal results; killing the others.', tid
387 (s, tr) = thread.get_status()
389 failed += len(tr.tests_failed) + len(tr.tests_timed_out)
390 done += failed + len(tr.tests_succeeded)
393 percent_done = done / started
398 color = ansi.fg('green')
400 color = ansi.fg('red')
402 if percent_done < 100.0:
404 text_utils.bar_graph(
416 if config.config['coverage']:
417 code_coverage_report()
418 total_problems = test_results_report(results)
419 return total_problems
422 if __name__ == '__main__':