4 A smart, fast test runner. Used in a git pre-commit hook.
13 from abc import ABC, abstractmethod
14 from dataclasses import dataclass
15 from typing import Any, Dict, List, Optional, Tuple
17 from overrides import overrides
24 import parallelize as par
29 logger = logging.getLogger(__name__)
30 args = config.add_commandline_args(f'({__file__})', 'Args related to __file__')
31 args.add_argument('--unittests', '-u', action='store_true', help='Run unittests.')
32 args.add_argument('--doctests', '-d', action='store_true', help='Run doctests.')
33 args.add_argument('--integration', '-i', action='store_true', help='Run integration tests.')
38 help='Run unittests, doctests and integration tests. Equivalient to -u -d -i',
41 '--coverage', '-c', action='store_true', help='Run tests and capture code coverage data'
44 HOME = os.environ['HOME']
48 class TestingParameters:
50 """Should we stop as soon as one error has occurred?"""
52 halt_event: threading.Event
53 """An event that, when set, indicates to stop ASAP."""
59 """The name of this test / set of tests."""
61 tests_executed: List[str]
62 """Tests that were executed."""
64 tests_succeeded: List[str]
65 """Tests that succeeded."""
67 tests_failed: List[str]
68 """Tests that failed."""
70 tests_timed_out: List[str]
71 """Tests that timed out."""
73 def __add__(self, other):
74 self.tests_executed.extend(other.tests_executed)
75 self.tests_succeeded.extend(other.tests_succeeded)
76 self.tests_failed.extend(other.tests_failed)
77 self.tests_timed_out.extend(other.tests_timed_out)
82 def __repr__(self) -> str:
83 out = f'{self.name}: '
84 out += f'{ansi.fg("green")}'
85 out += f'{len(self.tests_succeeded)}/{len(self.tests_executed)} passed'
86 out += f'{ansi.reset()}.\n'
88 if len(self.tests_failed) > 0:
89 out += f' ..{ansi.fg("red")}'
90 out += f'{len(self.tests_failed)} tests failed'
91 out += f'{ansi.reset()}:\n'
92 for test in self.tests_failed:
96 if len(self.tests_timed_out) > 0:
97 out += f' ..{ansi.fg("yellow")}'
98 out += f'{len(self.tests_timed_out)} tests timed out'
99 out += f'{ansi.reset()}:\n'
100 for test in self.tests_failed:
106 class TestRunner(ABC, thread_utils.ThreadWithReturnValue):
107 """A Base class for something that runs a test."""
109 def __init__(self, params: TestingParameters):
110 """Create a TestRunner.
113 params: Test running paramters.
116 super().__init__(self, target=self.begin, args=[params])
118 self.test_results = TestResults(
119 name=self.get_name(),
125 self.tests_started = 0
128 def get_name(self) -> str:
129 """The name of this test collection."""
132 def get_status(self) -> Tuple[int, TestResults]:
133 """Ask the TestRunner for its status."""
134 return (self.tests_started, self.test_results)
137 def begin(self, params: TestingParameters) -> TestResults:
138 """Start execution."""
142 class TemplatedTestRunner(TestRunner, ABC):
143 """A TestRunner that has a recipe for executing the tests."""
146 def identify_tests(self) -> List[str]:
147 """Return a list of tests that should be executed."""
151 def run_test(self, test: Any) -> TestResults:
152 """Run a single test and return its TestResults."""
155 def check_for_abort(self):
156 """Periodically caled to check to see if we need to stop."""
158 if self.params.halt_event.is_set():
159 logger.debug('Thread %s saw halt event; exiting.', self.get_name())
160 raise Exception("Kill myself!")
161 if self.params.halt_on_error:
162 if len(self.test_results.tests_failed) > 0:
163 logger.error('Thread %s saw abnormal results; exiting.', self.get_name())
164 raise Exception("Kill myself!")
166 def persist_output(self, test_name: str, message: str, output: str) -> None:
167 """Called to save the output of a test run."""
169 basename = file_utils.without_path(test_name)
170 dest = f'{basename}-output.txt'
171 with open(f'./test_output/{dest}', 'w') as wf:
172 print(message, file=wf)
173 print('-' * len(message), file=wf)
176 def execute_commandline(
181 timeout: float = 120.0,
183 """Execute a particular commandline to run a test."""
186 logger.debug('%s: Running %s (%s)', self.get_name(), test_name, cmdline)
187 output = exec_utils.cmd(
189 timeout_seconds=timeout,
191 self.persist_output(test_name, f'{test_name} ({cmdline}) succeeded.', output)
192 logger.debug('%s (%s) succeeded', test_name, cmdline)
193 return TestResults(test_name, [test_name], [test_name], [], [])
194 except subprocess.TimeoutExpired as e:
195 msg = f'{self.get_name()}: {test_name} ({cmdline}) timed out after {e.timeout:.1f} seconds.'
198 '%s: %s output when it timed out: %s', self.get_name(), test_name, e.output
200 self.persist_output(test_name, msg, e.output.decode('utf-8'))
208 except subprocess.CalledProcessError as e:
209 msg = f'{self.get_name()}: {test_name} ({cmdline}) failed; exit code {e.returncode}'
211 logger.debug('%s: %s output when it failed: %s', self.get_name(), test_name, e.output)
212 self.persist_output(test_name, msg, e.output.decode('utf-8'))
222 def begin(self, params: TestingParameters) -> TestResults:
223 logger.debug('Thread %s started.', self.get_name())
224 interesting_tests = self.identify_tests()
226 running: List[Any] = []
227 for test in interesting_tests:
228 running.append(self.run_test(test))
229 self.tests_started = len(running)
231 for future in smart_future.wait_any(running):
232 self.check_for_abort()
233 result = future._resolve()
234 logger.debug('Test %s finished.', result.name)
235 self.test_results += result
237 logger.debug('Thread %s finished.', self.get_name())
238 return self.test_results
241 class UnittestTestRunner(TemplatedTestRunner):
242 """Run all known Unittests."""
245 def get_name(self) -> str:
249 def identify_tests(self) -> List[str]:
250 return list(file_utils.expand_globs('*_test.py'))
253 def run_test(self, test: Any) -> TestResults:
254 if config.config['coverage']:
255 cmdline = f'coverage run --source {HOME}/lib {test} --unittests_ignore_perf'
258 return self.execute_commandline(test, cmdline)
261 class DoctestTestRunner(TemplatedTestRunner):
262 """Run all known Doctests."""
265 def get_name(self) -> str:
269 def identify_tests(self) -> List[str]:
271 out = exec_utils.cmd('grep -lR "^ *import doctest" /home/scott/lib/python_modules/*')
272 for line in out.split('\n'):
273 if re.match(r'.*\.py$', line):
274 if 'run_tests.py' not in line:
279 def run_test(self, test: Any) -> TestResults:
280 if config.config['coverage']:
281 cmdline = f'coverage run --source {HOME}/lib {test} 2>&1'
283 cmdline = f'python3 {test}'
284 return self.execute_commandline(test, cmdline)
287 class IntegrationTestRunner(TemplatedTestRunner):
288 """Run all know Integration tests."""
291 def get_name(self) -> str:
292 return "Integration Tests"
295 def identify_tests(self) -> List[str]:
296 return list(file_utils.expand_globs('*_itest.py'))
299 def run_test(self, test: Any) -> TestResults:
300 if config.config['coverage']:
301 cmdline = f'coverage run --source {HOME}/lib {test}'
304 return self.execute_commandline(test, cmdline)
307 def test_results_report(results: Dict[str, TestResults]) -> int:
308 """Give a final report about the tests that were run."""
310 for result in results.values():
311 print(result, end='')
312 total_problems += len(result.tests_failed)
313 total_problems += len(result.tests_timed_out)
315 if total_problems > 0:
316 print('Reminder: look in ./test_output to view test output logs')
317 return total_problems
320 def code_coverage_report():
321 """Give a final code coverage report."""
322 text_utils.header('Code Coverage')
323 exec_utils.cmd('coverage combine .coverage*')
324 out = exec_utils.cmd('coverage report --omit=config-3.8.py,*_test.py,*_itest.py --sort=-cover')
328 To recall this report w/o re-running the tests:
330 $ coverage report --omit=config-3.8.py,*_test.py,*_itest.py --sort=-cover
332 ...from the 'tests' directory. Note that subsequent calls to
333 run_tests.py with --coverage will klobber previous results. See:
335 https://coverage.readthedocs.io/en/6.2/
340 @bootstrap.initialize
341 def main() -> Optional[int]:
343 halt_event = threading.Event()
344 threads: List[TestRunner] = []
347 params = TestingParameters(
349 halt_event=halt_event,
352 if config.config['coverage']:
353 logger.debug('Clearing existing coverage data via "coverage erase".')
354 exec_utils.cmd('coverage erase')
356 if config.config['unittests'] or config.config['all']:
358 threads.append(UnittestTestRunner(params))
359 if config.config['doctests'] or config.config['all']:
361 threads.append(DoctestTestRunner(params))
362 if config.config['integration'] or config.config['all']:
364 threads.append(IntegrationTestRunner(params))
368 print('ERROR: one of --unittests, --doctests or --integration is required.')
371 for thread in threads:
374 results: Dict[str, TestResults] = {}
375 while len(results) != len(threads):
380 for thread in threads:
381 (s, tr) = thread.get_status()
383 failed += len(tr.tests_failed) + len(tr.tests_timed_out)
384 done += failed + len(tr.tests_succeeded)
385 if not thread.is_alive():
387 if tid not in results:
388 result = thread.join()
390 results[tid] = result
391 if len(result.tests_failed) > 0:
393 'Thread %s returned abnormal results; killing the others.', tid
398 percent_done = done / started
403 color = ansi.fg('green')
405 color = ansi.fg('red')
407 if percent_done < 100.0:
409 text_utils.bar_graph_string(
412 text=text_utils.BarGraphText.FRACTION,
421 print(f'{ansi.clear_line()}Final Report:')
422 if config.config['coverage']:
423 code_coverage_report()
424 total_problems = test_results_report(results)
425 return total_problems
428 if __name__ == '__main__':