4 A smart, fast test runner.
13 from abc import ABC, abstractmethod
14 from dataclasses import dataclass
15 from typing import Any, Dict, List, Optional
17 from overrides import overrides
24 import parallelize as par
28 logger = logging.getLogger(__name__)
29 args = config.add_commandline_args(f'({__file__})', 'Args related to __file__')
30 args.add_argument('--unittests', '-u', action='store_true', help='Run unittests.')
31 args.add_argument('--doctests', '-d', action='store_true', help='Run doctests.')
32 args.add_argument('--integration', '-i', action='store_true', help='Run integration tests.')
34 '--coverage', '-c', action='store_true', help='Run tests and capture code coverage data'
37 HOME = os.environ['HOME']
41 class TestingParameters:
43 halt_event: threading.Event
49 tests_executed: List[str]
50 tests_succeeded: List[str]
51 tests_failed: List[str]
52 tests_timed_out: List[str]
55 class TestRunner(ABC, thread_utils.ThreadWithReturnValue):
56 def __init__(self, params: TestingParameters):
57 super().__init__(self, target=self.begin, args=[params])
59 self.test_results = TestResults(
67 def aggregate_test_results(self, result: TestResults):
68 self.test_results.tests_executed.extend(result.tests_executed)
69 self.test_results.tests_succeeded.extend(result.tests_succeeded)
70 self.test_results.tests_failed.extend(result.tests_failed)
71 self.test_results.tests_timed_out.extend(result.tests_timed_out)
74 def get_name(self) -> str:
78 def begin(self, params: TestingParameters) -> TestResults:
82 class TemplatedTestRunner(TestRunner, ABC):
84 def identify_tests(self) -> List[Any]:
88 def run_test(self, test: Any) -> TestResults:
91 def check_for_abort(self):
92 if self.params.halt_event.is_set():
93 logger.debug('Thread %s saw halt event; exiting.', self.get_name())
94 raise Exception("Kill myself!")
95 if self.params.halt_on_error:
96 if len(self.test_results.tests_failed) > 0:
97 logger.error('Thread %s saw abnormal results; exiting.', self.get_name())
98 raise Exception("Kill myself!")
100 def status_report(self, running: List[Any], done: List[Any]):
101 total = len(running) + len(done)
103 '%s: %d/%d in flight; %d/%d completed.',
111 def persist_output(self, test_name: str, message: str, output: str) -> None:
112 basename = file_utils.without_path(test_name)
113 dest = f'{basename}-output.txt'
114 with open(f'./test_output/{dest}', 'w') as wf:
115 print(message, file=wf)
116 print('-' * len(message), file=wf)
119 def execute_commandline(
124 timeout: float = 120.0,
128 logger.debug('%s: Running %s (%s)', self.get_name(), test_name, cmdline)
129 output = exec_utils.cmd(
131 timeout_seconds=timeout,
133 self.persist_output(test_name, f'{test_name} ({cmdline}) succeeded.', output)
134 logger.debug('%s (%s) succeeded', test_name, cmdline)
135 return TestResults(test_name, [test_name], [test_name], [], [])
136 except subprocess.TimeoutExpired as e:
137 msg = f'{self.get_name()}: {test_name} ({cmdline}) timed out after {e.timeout:.1f} seconds.'
140 '%s: %s output when it timed out: %s', self.get_name(), test_name, e.output
142 self.persist_output(test_name, msg, e.output)
150 except subprocess.CalledProcessError as e:
151 msg = f'{self.get_name()}: {test_name} ({cmdline}) failed; exit code {e.returncode}'
153 logger.debug('%s: %s output when it failed: %s', self.get_name(), test_name, e.output)
154 self.persist_output(test_name, msg, e.output)
164 def begin(self, params: TestingParameters) -> TestResults:
165 logger.debug('Thread %s started.', self.get_name())
166 interesting_tests = self.identify_tests()
167 running: List[Any] = []
169 for test in interesting_tests:
170 running.append(self.run_test(test))
172 while len(running) > 0:
173 self.status_report(running, done)
174 self.check_for_abort()
178 newly_finished.append(fut)
179 result = fut._resolve()
180 logger.debug('Test %s finished.', result.name)
181 self.aggregate_test_results(result)
183 for fut in newly_finished:
188 logger.debug('Thread %s finished.', self.get_name())
189 return self.test_results
192 class UnittestTestRunner(TemplatedTestRunner):
194 def get_name(self) -> str:
195 return "UnittestTestRunner"
198 def identify_tests(self) -> List[Any]:
199 return list(file_utils.expand_globs('*_test.py'))
202 def run_test(self, test: Any) -> TestResults:
203 if config.config['coverage']:
204 cmdline = f'coverage run --source {HOME}/lib {test} --unittests_ignore_perf'
207 return self.execute_commandline(test, cmdline)
210 class DoctestTestRunner(TemplatedTestRunner):
212 def get_name(self) -> str:
213 return "DoctestTestRunner"
216 def identify_tests(self) -> List[Any]:
218 out = exec_utils.cmd('grep -lR "^ *import doctest" /home/scott/lib/python_modules/*')
219 for line in out.split('\n'):
220 if re.match(r'.*\.py$', line):
221 if 'run_tests.py' not in line:
226 def run_test(self, test: Any) -> TestResults:
227 if config.config['coverage']:
228 cmdline = f'coverage run --source {HOME}/lib {test} 2>&1'
230 cmdline = f'python3 {test}'
231 return self.execute_commandline(test, cmdline)
234 class IntegrationTestRunner(TemplatedTestRunner):
236 def get_name(self) -> str:
237 return "IntegrationTestRunner"
240 def identify_tests(self) -> List[Any]:
241 return list(file_utils.expand_globs('*_itest.py'))
244 def run_test(self, test: Any) -> TestResults:
245 if config.config['coverage']:
246 cmdline = f'coverage run --source {HOME}/lib {test}'
249 return self.execute_commandline(test, cmdline)
252 def test_results_report(results: Dict[str, TestResults]) -> int:
254 for type, result in results.items():
255 print(f'{result.name}: ', end='')
257 f'{ansi.fg("green")}{len(result.tests_succeeded)}/{len(result.tests_executed)} passed{ansi.reset()}.'
259 if len(result.tests_failed) > 0:
260 print(f' ..{ansi.fg("red")}{len(result.tests_failed)} tests failed{ansi.reset()}:')
261 for test in result.tests_failed:
263 total_problems += len(result.tests_failed)
265 if len(result.tests_timed_out) > 0:
267 f' ..{ansi.fg("yellow")}{len(result.tests_timed_out)} tests timed out{ansi.reset()}:'
269 for test in result.tests_failed:
271 total_problems += len(result.tests_timed_out)
273 if total_problems > 0:
274 print('Reminder: look in ./test_output to view test output logs')
275 return total_problems
278 def code_coverage_report():
279 text_utils.header('Code Coverage')
280 exec_utils.cmd('coverage combine .coverage*')
281 out = exec_utils.cmd('coverage report --omit=config-3.8.py,*_test.py,*_itest.py --sort=-cover')
285 To recall this report w/o re-running the tests:
287 $ coverage report --omit=config-3.8.py,*_test.py,*_itest.py --sort=-cover
289 ...from the 'tests' directory. Note that subsequent calls to
290 run_tests.py with --coverage will klobber previous results. See:
292 https://coverage.readthedocs.io/en/6.2/
297 @bootstrap.initialize
298 def main() -> Optional[int]:
300 halt_event = threading.Event()
301 threads: List[TestRunner] = []
304 params = TestingParameters(
306 halt_event=halt_event,
309 if config.config['coverage']:
310 logger.debug('Clearing existing coverage data via "coverage erase".')
311 exec_utils.cmd('coverage erase')
313 if config.config['unittests']:
315 threads.append(UnittestTestRunner(params))
316 if config.config['doctests']:
318 threads.append(DoctestTestRunner(params))
319 if config.config['integration']:
321 threads.append(IntegrationTestRunner(params))
325 print('ERROR: one of --unittests, --doctests or --integration is required.')
328 for thread in threads:
331 results: Dict[str, TestResults] = {}
332 while len(results) != len(threads):
333 for thread in threads:
334 if not thread.is_alive():
336 if tid not in results:
337 result = thread.join()
339 results[tid] = result
340 if len(result.tests_failed) > 0:
342 'Thread %s returned abnormal results; killing the others.', tid
347 if config.config['coverage']:
348 code_coverage_report()
349 total_problems = test_results_report(results)
350 return total_problems
353 if __name__ == '__main__':