HOME = os.environ['HOME']
+# These tests will be run twice in --coverage mode: once to get code
+# coverage and then again with not coverage enabeled. This is because
+# they pay attention to code performance which is adversely affected
+# by coverage.
+PERF_SENSATIVE_TESTS = set(['/home/scott/lib/python_modules/tests/string_utils_test.py'])
+
@dataclass
class TestingParameters:
"""An event that, when set, indicates to stop ASAP."""
+@dataclass
+class TestToRun:
+ name: str
+ """The name of the test"""
+
+ kind: str
+ """The kind of the test"""
+
+ cmdline: str
+ """The command line to execute"""
+
+
@dataclass
class TestResults:
name: str
"""A TestRunner that has a recipe for executing the tests."""
@abstractmethod
- def identify_tests(self) -> List[str]:
- """Return a list of tests that should be executed."""
+ def identify_tests(self) -> List[TestToRun]:
+ """Return a list of tuples (test, cmdline) that should be executed."""
pass
@abstractmethod
- def run_test(self, test: Any) -> TestResults:
+ def run_test(self, test: TestToRun) -> TestResults:
"""Run a single test and return its TestResults."""
pass
logger.error('Thread %s saw abnormal results; exiting.', self.get_name())
raise Exception("Kill myself!")
- def persist_output(self, test_name: str, message: str, output: str) -> None:
+ def persist_output(self, test: TestToRun, message: str, output: str) -> None:
"""Called to save the output of a test run."""
- basename = file_utils.without_path(test_name)
- dest = f'{basename}-output.txt'
+ dest = f'{test.name}-output.txt'
with open(f'./test_output/{dest}', 'w') as wf:
print(message, file=wf)
print('-' * len(message), file=wf)
def execute_commandline(
self,
- test_name: str,
- cmdline: str,
+ test: TestToRun,
*,
timeout: float = 120.0,
) -> TestResults:
"""Execute a particular commandline to run a test."""
try:
- logger.debug('%s: Running %s (%s)', self.get_name(), test_name, cmdline)
output = exec_utils.cmd(
- cmdline,
+ test.cmdline,
timeout_seconds=timeout,
)
- self.persist_output(test_name, f'{test_name} ({cmdline}) succeeded.', output)
- logger.debug('%s (%s) succeeded', test_name, cmdline)
- return TestResults(test_name, [test_name], [test_name], [], [])
+ self.persist_output(test, f'{test.name} ({test.cmdline}) succeeded.', output)
+ logger.debug('%s: %s (%s) succeeded', self.get_name(), test.name, test.cmdline)
+ return TestResults(test.name, [test.name], [test.name], [], [])
except subprocess.TimeoutExpired as e:
- msg = f'{self.get_name()}: {test_name} ({cmdline}) timed out after {e.timeout:.1f} seconds.'
+ msg = f'{self.get_name()}: {test.name} ({test.cmdline}) timed out after {e.timeout:.1f} seconds.'
logger.error(msg)
logger.debug(
- '%s: %s output when it timed out: %s', self.get_name(), test_name, e.output
+ '%s: %s output when it timed out: %s', self.get_name(), test.name, e.output
)
- self.persist_output(test_name, msg, e.output.decode('utf-8'))
+ self.persist_output(test, msg, e.output.decode('utf-8'))
return TestResults(
- test_name,
- [test_name],
+ test.name,
+ [test.name],
[],
[],
- [test_name],
+ [test.name],
)
except subprocess.CalledProcessError as e:
- msg = f'{self.get_name()}: {test_name} ({cmdline}) failed; exit code {e.returncode}'
+ msg = (
+ f'{self.get_name()}: {test.name} ({test.cmdline}) failed; exit code {e.returncode}'
+ )
logger.error(msg)
- logger.debug('%s: %s output when it failed: %s', self.get_name(), test_name, e.output)
- self.persist_output(test_name, msg, e.output.decode('utf-8'))
+ logger.debug('%s: %s output when it failed: %s', self.get_name(), test.name, e.output)
+ self.persist_output(test, msg, e.output.decode('utf-8'))
return TestResults(
- test_name,
- [test_name],
+ test.name,
+ [test.name],
[],
- [test_name],
+ [test.name],
[],
)
def begin(self, params: TestingParameters) -> TestResults:
logger.debug('Thread %s started.', self.get_name())
interesting_tests = self.identify_tests()
+ logger.debug('%s: Identified %d tests to be run.', self.get_name(), len(interesting_tests))
+ # Note: because of @parallelize on run_tests it actually
+ # returns a SmartFuture with a TestResult inside of it.
+ # That's the reason for this Any business.
running: List[Any] = []
- for test in interesting_tests:
- running.append(self.run_test(test))
- self.tests_started = len(running)
+ for test_to_run in interesting_tests:
+ running.append(self.run_test(test_to_run))
+ logger.debug(
+ '%s: Test %s started in the background.', self.get_name(), test_to_run.name
+ )
+ self.tests_started += 1
for future in smart_future.wait_any(running):
self.check_for_abort()
return "Unittests"
@overrides
- def identify_tests(self) -> List[str]:
- return list(file_utils.expand_globs('*_test.py'))
+ def identify_tests(self) -> List[TestToRun]:
+ ret = []
+ for test in file_utils.expand_globs('*_test.py'):
+ basename = file_utils.without_path(test)
+ if config.config['coverage']:
+ ret.append(
+ TestToRun(
+ name=basename,
+ kind='unittest capturing coverage',
+ cmdline=f'coverage run --source {HOME}/lib {test} --unittests_ignore_perf 2>&1',
+ )
+ )
+ if test in PERF_SENSATIVE_TESTS:
+ ret.append(
+ TestToRun(
+ name=basename,
+ kind='unittest w/o coverage to record perf',
+ cmdline=f'{test} 2>&1',
+ )
+ )
+ else:
+ ret.append(
+ TestToRun(
+ name=basename,
+ kind='unittest',
+ cmdline=f'{test} 2>&1',
+ )
+ )
+ return ret
@par.parallelize
- def run_test(self, test: Any) -> TestResults:
- if config.config['coverage']:
- cmdline = f'coverage run --source {HOME}/lib {test} --unittests_ignore_perf'
- else:
- cmdline = test
- return self.execute_commandline(test, cmdline)
+ def run_test(self, test: TestToRun) -> TestResults:
+ return self.execute_commandline(test)
class DoctestTestRunner(TemplatedTestRunner):
return "Doctests"
@overrides
- def identify_tests(self) -> List[str]:
+ def identify_tests(self) -> List[TestToRun]:
ret = []
out = exec_utils.cmd('grep -lR "^ *import doctest" /home/scott/lib/python_modules/*')
- for line in out.split('\n'):
- if re.match(r'.*\.py$', line):
- if 'run_tests.py' not in line:
- ret.append(line)
+ for test in out.split('\n'):
+ if re.match(r'.*\.py$', test):
+ if 'run_tests.py' not in test:
+ basename = file_utils.without_path(test)
+ if config.config['coverage']:
+ ret.append(
+ TestToRun(
+ name=basename,
+ kind='doctest capturing coverage',
+ cmdline=f'coverage run --source {HOME}/lib {test} 2>&1',
+ )
+ )
+ if test in PERF_SENSATIVE_TESTS:
+ ret.append(
+ TestToRun(
+ name=basename,
+ kind='doctest w/o coverage to record perf',
+ cmdline=f'python3 {test} 2>&1',
+ )
+ )
+ else:
+ ret.append(
+ TestToRun(name=basename, kind='doctest', cmdline=f'python3 {test} 2>&1')
+ )
return ret
@par.parallelize
- def run_test(self, test: Any) -> TestResults:
- if config.config['coverage']:
- cmdline = f'coverage run --source {HOME}/lib {test} 2>&1'
- else:
- cmdline = f'python3 {test}'
- return self.execute_commandline(test, cmdline)
+ def run_test(self, test: TestToRun) -> TestResults:
+ return self.execute_commandline(test)
class IntegrationTestRunner(TemplatedTestRunner):
return "Integration Tests"
@overrides
- def identify_tests(self) -> List[str]:
- return list(file_utils.expand_globs('*_itest.py'))
+ def identify_tests(self) -> List[TestToRun]:
+ ret = []
+ for test in file_utils.expand_globs('*_itest.py'):
+ basename = file_utils.without_path(test)
+ if config.config['coverage']:
+ ret.append(
+ TestToRun(
+ name=basename,
+ kind='integration test capturing coverage',
+ cmdline=f'coverage run --source {HOME}/lib {test} 2>&1',
+ )
+ )
+ if test in PERF_SENSATIVE_TESTS:
+ ret.append(
+ TestToRun(
+ name=basename,
+ kind='integration test w/o coverage to capture perf',
+ cmdline=f'{test} 2>&1',
+ )
+ )
+ else:
+ ret.append(
+ TestToRun(name=basename, kind='integration test', cmdline=f'{test} 2>&1')
+ )
+ return ret
@par.parallelize
- def run_test(self, test: Any) -> TestResults:
- if config.config['coverage']:
- cmdline = f'coverage run --source {HOME}/lib {test}'
- else:
- cmdline = test
- return self.execute_commandline(test, cmdline)
+ def run_test(self, test: TestToRun) -> TestResults:
+ return self.execute_commandline(test)
def test_results_report(results: Dict[str, TestResults]) -> int: