args.add_argument('--unittests', '-u', action='store_true', help='Run unittests.')
args.add_argument('--doctests', '-d', action='store_true', help='Run doctests.')
args.add_argument('--integration', '-i', action='store_true', help='Run integration tests.')
+args.add_argument(
+ '--all',
+ '-a',
+ action='store_true',
+ help='Run unittests, doctests and integration tests. Equivalient to -u -d -i',
+)
args.add_argument(
'--coverage', '-c', action='store_true', help='Run tests and capture code coverage data'
)
logger.debug('Clearing existing coverage data via "coverage erase".')
exec_utils.cmd('coverage erase')
- if config.config['unittests']:
+ if config.config['unittests'] or config.config['all']:
saw_flag = True
threads.append(UnittestTestRunner(params))
- if config.config['doctests']:
+ if config.config['doctests'] or config.config['all']:
saw_flag = True
threads.append(DoctestTestRunner(params))
- if config.config['integration']:
+ if config.config['integration'] or config.config['all']:
saw_flag = True
threads.append(IntegrationTestRunner(params))
)
time.sleep(0.5)
- print('\e[2K\rFinal Report:')
+ print(f'{ansi.clear_line()}Final Report:')
if config.config['coverage']:
code_coverage_report()
total_problems = test_results_report(results)