projects
/
pyutils.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Fix wrong TimeoutError in catch.
[pyutils.git]
/
tests
/
run_tests.py
diff --git
a/tests/run_tests.py
b/tests/run_tests.py
index b398e1826a79d259ad72ea0a3c693e0252187700..3d587ef6f5ba8d79279cf8e08ddd1e620894474e 100755
(executable)
--- a/
tests/run_tests.py
+++ b/
tests/run_tests.py
@@
-4,6
+4,8
@@
A smart, fast test runner. Used in a git pre-commit hook.
"""
A smart, fast test runner. Used in a git pre-commit hook.
"""
+from __future__ import annotations
+
import logging
import os
import re
import logging
import os
import re
@@
-106,6
+108,40
@@
class TestResults:
__radd__ = __add__
__radd__ = __add__
+ @staticmethod
+ def empty_test_results(suite_name: str) -> TestResults:
+ return TestResults(
+ name=suite_name,
+ tests_executed={},
+ tests_succeeded=[],
+ tests_failed=[],
+ tests_timed_out=[],
+ )
+
+ @staticmethod
+ def single_test_succeeded(name: str) -> TestResults:
+ return TestResults(name, {}, [name], [], [])
+
+ @staticmethod
+ def single_test_failed(name: str) -> TestResults:
+ return TestResults(
+ name,
+ {},
+ [],
+ [name],
+ [],
+ )
+
+ @staticmethod
+ def single_test_timed_out(name: str) -> TestResults:
+ return TestResults(
+ name,
+ {},
+ [],
+ [],
+ [name],
+ )
+
def __repr__(self) -> str:
out = f'{self.name}: '
out += f'{ansi.fg("green")}'
def __repr__(self) -> str:
out = f'{self.name}: '
out += f'{ansi.fg("green")}'
@@
-121,7
+157,7
@@
class TestResults:
out += '\n'
if len(self.tests_timed_out) > 0:
out += '\n'
if len(self.tests_timed_out) > 0:
- out += f' ..{ansi.fg("yellow")}'
+ out += f' ..{ansi.fg("
lightning
yellow")}'
out += f'{len(self.tests_timed_out)} tests timed out'
out += f'{ansi.reset()}:\n'
for test in self.tests_failed:
out += f'{len(self.tests_timed_out)} tests timed out'
out += f'{ansi.reset()}:\n'
for test in self.tests_failed:
@@
-142,13
+178,7
@@
class TestRunner(ABC, thread_utils.ThreadWithReturnValue):
"""
super().__init__(self, target=self.begin, args=[params])
self.params = params
"""
super().__init__(self, target=self.begin, args=[params])
self.params = params
- self.test_results = TestResults(
- name=self.get_name(),
- tests_executed={},
- tests_succeeded=[],
- tests_failed=[],
- tests_timed_out=[],
- )
+ self.test_results = TestResults.empty_test_results(self.get_name())
self.lock = threading.Lock()
@abstractmethod
self.lock = threading.Lock()
@abstractmethod
@@
-181,7
+211,7
@@
class TemplatedTestRunner(TestRunner, ABC):
pass
def check_for_abort(self) -> bool:
pass
def check_for_abort(self) -> bool:
- """Periodically caled to check to see if we need to stop."""
+ """Periodically cal
l
ed to check to see if we need to stop."""
if self.params.halt_event.is_set():
logger.debug('Thread %s saw halt event; exiting.', self.get_name())
if self.params.halt_event.is_set():
logger.debug('Thread %s saw halt event; exiting.', self.get_name())
@@
-209,32
+239,25
@@
class TemplatedTestRunner(TestRunner, ABC):
) -> TestResults:
"""Execute a particular commandline to run a test."""
) -> TestResults:
"""Execute a particular commandline to run a test."""
+ msg = f'{self.get_name()}: {test.name} ({test.cmdline}) '
try:
output = exec_utils.cmd(
test.cmdline,
timeout_seconds=timeout,
)
if "***Test Failed***" in output:
try:
output = exec_utils.cmd(
test.cmdline,
timeout_seconds=timeout,
)
if "***Test Failed***" in output:
- msg
= f'{self.get_name()}: {test.name} ({test.cmdline}) failed; doctest failure message detected
'
+ msg
+= 'failed; doctest failure message detected.
'
logger.error(msg)
self.persist_output(test, msg, output)
logger.error(msg)
self.persist_output(test, msg, output)
- return TestResults(
- test.name,
- {},
- [],
- [test.name],
- [],
- )
+ return TestResults.single_test_failed(test.name)
+
+ msg += 'succeeded.'
+ self.persist_output(test, msg, output)
+ logger.debug(msg)
+ return TestResults.single_test_succeeded(test.name)
- self.persist_output(
- test, f'{test.name} ({test.cmdline}) succeeded.', output
- )
- logger.debug(
- '%s: %s (%s) succeeded', self.get_name(), test.name, test.cmdline
- )
- return TestResults(test.name, {}, [test.name], [], [])
except subprocess.TimeoutExpired as e:
except subprocess.TimeoutExpired as e:
- msg
= f'{self.get_name()}: {test.name} ({test.cmdline})
timed out after {e.timeout:.1f} seconds.'
+ msg
+= f'
timed out after {e.timeout:.1f} seconds.'
logger.error(msg)
logger.debug(
'%s: %s output when it timed out: %s',
logger.error(msg)
logger.debug(
'%s: %s output when it timed out: %s',
@@
-243,27
+266,16
@@
class TemplatedTestRunner(TestRunner, ABC):
e.output,
)
self.persist_output(test, msg, e.output.decode('utf-8'))
e.output,
)
self.persist_output(test, msg, e.output.decode('utf-8'))
- return TestResults(
- test.name,
- {},
- [],
- [],
- [test.name],
- )
+ return TestResults.single_test_timed_out(test.name)
+
except subprocess.CalledProcessError as e:
except subprocess.CalledProcessError as e:
- msg
= f'{self.get_name()}: {test.name} ({test.cmdline}) failed; exit code {e.returncode}
'
+ msg
+= f'failed with exit code {e.returncode}.
'
logger.error(msg)
logger.debug(
'%s: %s output when it failed: %s', self.get_name(), test.name, e.output
)
self.persist_output(test, msg, e.output.decode('utf-8'))
logger.error(msg)
logger.debug(
'%s: %s output when it failed: %s', self.get_name(), test.name, e.output
)
self.persist_output(test, msg, e.output.decode('utf-8'))
- return TestResults(
- test.name,
- {},
- [],
- [test.name],
- [],
- )
+ return TestResults.single_test_failed(test.name)
@overrides
def begin(self, params: TestingParameters) -> TestResults:
@overrides
def begin(self, params: TestingParameters) -> TestResults:
@@
-288,10
+300,10
@@
class TemplatedTestRunner(TestRunner, ABC):
)
self.test_results.tests_executed[test_to_run.name] = time.time()
)
self.test_results.tests_executed[test_to_run.name] = time.time()
- for future in smart_future.wait_any(running, log_exceptions=False):
- result = future._resolve()
+ for result in smart_future.wait_any(running, log_exceptions=False):
logger.debug('Test %s finished.', result.name)
self.test_results += result
logger.debug('Test %s finished.', result.name)
self.test_results += result
+
if self.check_for_abort():
logger.debug(
'%s: check_for_abort told us to exit early.', self.get_name()
if self.check_for_abort():
logger.debug(
'%s: check_for_abort told us to exit early.', self.get_name()
@@
-327,7
+339,7
@@
class UnittestTestRunner(TemplatedTestRunner):
if basename in PERF_SENSATIVE_TESTS:
ret.append(
TestToRun(
if basename in PERF_SENSATIVE_TESTS:
ret.append(
TestToRun(
- name=
basename
,
+ name=
f'{basename}_no_coverage'
,
kind='unittest w/o coverage to record perf',
cmdline=f'{test} 2>&1',
)
kind='unittest w/o coverage to record perf',
cmdline=f'{test} 2>&1',
)
@@
-374,7
+386,7
@@
class DoctestTestRunner(TemplatedTestRunner):
if basename in PERF_SENSATIVE_TESTS:
ret.append(
TestToRun(
if basename in PERF_SENSATIVE_TESTS:
ret.append(
TestToRun(
- name=
basename
,
+ name=
f'{basename}_no_coverage'
,
kind='doctest w/o coverage to record perf',
cmdline=f'python3 {test} 2>&1',
)
kind='doctest w/o coverage to record perf',
cmdline=f'python3 {test} 2>&1',
)
@@
-419,7
+431,7
@@
class IntegrationTestRunner(TemplatedTestRunner):
if basename in PERF_SENSATIVE_TESTS:
ret.append(
TestToRun(
if basename in PERF_SENSATIVE_TESTS:
ret.append(
TestToRun(
- name=
basename
,
+ name=
f'{basename}_no_coverage'
,
kind='integration test w/o coverage to capture perf',
cmdline=f'{test} 2>&1',
)
kind='integration test w/o coverage to capture perf',
cmdline=f'{test} 2>&1',
)
@@
-463,9
+475,9
@@
def code_coverage_report():
)
print(out)
print(
)
print(out)
print(
- """To recall this report w/o re-running the tests:
+
f
"""To recall this report w/o re-running the tests:
- $
coverage report --omit=config-3.*.py,*_test.py,*_itest.py --sort=-cover
+ $
{ansi.bold()}coverage report --omit=config-3.*.py,*_test.py,*_itest.py --sort=-cover{ansi.reset()}
...from the 'tests' directory. Note that subsequent calls to
run_tests.py with --coverage will klobber previous results. See:
...from the 'tests' directory. Note that subsequent calls to
run_tests.py with --coverage will klobber previous results. See:
@@
-478,9
+490,9
@@
run_tests.py with --coverage will klobber previous results. See:
@bootstrap.initialize
def main() -> Optional[int]:
saw_flag = False
@bootstrap.initialize
def main() -> Optional[int]:
saw_flag = False
- halt_event = threading.Event()
threads: List[TestRunner] = []
threads: List[TestRunner] = []
+ halt_event = threading.Event()
halt_event.clear()
params = TestingParameters(
halt_on_error=True,
halt_event.clear()
params = TestingParameters(
halt_on_error=True,
@@
-490,7
+502,6
@@
def main() -> Optional[int]:
if config.config['coverage']:
logger.debug('Clearing existing coverage data via "coverage erase".')
exec_utils.cmd('coverage erase')
if config.config['coverage']:
logger.debug('Clearing existing coverage data via "coverage erase".')
exec_utils.cmd('coverage erase')
-
if config.config['unittests'] or config.config['all']:
saw_flag = True
threads.append(UnittestTestRunner(params))
if config.config['unittests'] or config.config['all']:
saw_flag = True
threads.append(UnittestTestRunner(params))
@@
-503,15
+514,14
@@
def main() -> Optional[int]:
if not saw_flag:
config.print_usage()
if not saw_flag:
config.print_usage()
- print('ERROR: one of --unittests, --doctests or --integration is required.')
- return 1
+ config.error('One of --unittests, --doctests or --integration is required.', 1)
for thread in threads:
thread.start()
for thread in threads:
thread.start()
- results: Dict[str, Optional[TestResults]] = {}
start_time = time.time()
last_update = start_time
start_time = time.time()
last_update = start_time
+ results: Dict[str, Optional[TestResults]] = {}
still_running = {}
while len(results) != len(threads):
still_running = {}
while len(results) != len(threads):
@@
-534,6
+544,7
@@
def main() -> Optional[int]:
}
still_running[tid] = running_with_start_time
}
still_running[tid] = running_with_start_time
+ # Maybe print tests that are still running.
now = time.time()
if now - start_time > 5.0:
if now - last_update > 3.0:
now = time.time()
if now - start_time > 5.0:
if now - last_update > 3.0:
@@
-551,12
+562,13
@@
def main() -> Optional[int]:
else:
print(f'Still running: {len(update)} tests.')
else:
print(f'Still running: {len(update)} tests.')
+ # Maybe signal the other threads to stop too.
if not thread.is_alive():
if tid not in results:
result = thread.join()
if result:
results[tid] = result
if not thread.is_alive():
if tid not in results:
result = thread.join()
if result:
results[tid] = result
- if
len(result.tests_failed
) > 0:
+ if
(len(result.tests_failed) + len(result.tests_timed_out)
) > 0:
logger.error(
'Thread %s returned abnormal results; killing the others.',
tid,
logger.error(
'Thread %s returned abnormal results; killing the others.',
tid,
@@
-570,34
+582,39
@@
def main() -> Optional[int]:
halt_event.set()
results[tid] = None
halt_event.set()
results[tid] = None
+ color = ansi.fg('green')
+ if failed > 0:
+ color = ansi.fg('red')
+
if started > 0:
if started > 0:
- percent_done = done / started
+ percent_done = done / started
* 100.0
else:
percent_done = 0.0
else:
percent_done = 0.0
- if failed == 0:
- color = ansi.fg('green')
- else:
- color = ansi.fg('red')
-
if percent_done < 100.0:
print(
text_utils.bar_graph_string(
done,
started,
text=text_utils.BarGraphText.FRACTION,
if percent_done < 100.0:
print(
text_utils.bar_graph_string(
done,
started,
text=text_utils.BarGraphText.FRACTION,
- width=
80
,
+ width=
72
,
fgcolor=color,
),
fgcolor=color,
),
- end='
\r
',
+ end='',
flush=True,
)
flush=True,
)
- time.sleep(0.5)
+ print(f' {color}{now - start_time:.1f}s{ansi.reset()}', end='\r')
+ time.sleep(0.1)
- print(f'{ansi.clear_line()}
Final Report:
')
+ print(f'{ansi.clear_line()}
\n{ansi.underline()}Final Report:{ansi.reset()}
')
if config.config['coverage']:
code_coverage_report()
if config.config['coverage']:
code_coverage_report()
+ print(f'Test suite runtime: {time.time() - start_time:.1f}s')
total_problems = test_results_report(results)
total_problems = test_results_report(results)
+ if total_problems > 0:
+ logging.error(
+ 'Exiting with non-zero return code %d due to problems.', total_problems
+ )
return total_problems
return total_problems