UNITTEST=0
INTEGRATION=0
FAILURES=0
+TESTS_RUN=0
+COVERAGE=0
+PERF_TESTS=("string_utils_test.py")
+
dup() {
if [ $# -ne 2 ]; then
}
function usage() {
- echo "Usage: $0 [-a]|[-i][-u][-d]"
+ echo "Usage: $0 [-a]|[-i][-u][-d] [--coverage]"
echo
echo "Runs tests under ${ROOT}. Options control which test types:"
echo
-i|--integration)
INTEGRATION=1
;;
+ --coverage)
+ COVERAGE=1
+ ;;
*) # unknown option
echo "Argument $key was not recognized."
echo
exit 2
fi
+if [ ${COVERAGE} -eq 1 ]; then
+ coverage erase
+fi
FAILED_TESTS=""
if [ ${DOCTEST} -eq 1 ]; then
BASE=$(basename ${doctest})
BASE="${BASE} (doctest)"
make_header "${BASE}" "${CYAN}"
- OUT=$( python3 ${doctest} 2>&1 )
+ if [ ${COVERAGE} -eq 1 ]; then
+ OUT=$( coverage run --source ${HOME}/lib --append ${doctest} 2>&1 )
+ else
+ OUT=$( python3 ${doctest} 2>&1 )
+ fi
+ TESTS_RUN=$((TESTS_RUN+1))
FAILED=$( echo "${OUT}" | grep '\*\*\*Test Failed\*\*\*' | wc -l )
if [ $FAILED == 0 ]; then
echo "OK"
else
echo -e "${FAILED}"
FAILURES=$((FAILURES+1))
- FAILED_TESTS="${FAILED_TESTS}, ${BASE}"
+ FAILED_TESTS="${FAILED_TESTS},${BASE} (python3 ${doctest})"
fi
fi
done
if [ ${UNITTEST} -eq 1 ]; then
for test in $(find ${ROOT} -name "*_test.py" -print); do
BASE=$(basename ${test})
- BASE="${BASE} (unittest)"
- make_header "${BASE}" "${GREEN}"
- ${test}
+ HDR="${BASE} (unittest)"
+ make_header "${HDR}" "${GREEN}"
+ if [ ${COVERAGE} -eq 1 ]; then
+ coverage run --source ${HOME}/lib --append ${test} --unittests_ignore_perf
+ if [[ " ${PERF_TESTS[*]} " =~ " ${BASE} " ]]; then
+ echo "(re-running w/o coverage to record perf results)."
+ ${test}
+ fi
+ else
+ ${test}
+ fi
if [ $? -ne 0 ]; then
FAILURES=$((FAILURES+1))
- FAILED_TESTS="${FAILED_TESTS}, ${BASE}"
+ FAILED_TESTS="${FAILED_TESTS},${BASE} (python3 ${test})"
fi
+ TESTS_RUN=$((TESTS_RUN+1))
done
fi
if [ ${INTEGRATION} -eq 1 ]; then
for test in $(find ${ROOT} -name "*_itest.py" -print); do
BASE=$(basename ${test})
- BASE="${BASE} (integration test)"
- make_header "${BASE}" "${ORANGE}"
- ${test}
+ HDR="${BASE} (integration test)"
+ make_header "${HDR}" "${ORANGE}"
+ if [ ${COVERAGE} -eq 1 ]; then
+ coverage run --source ${HOME}/lib --append ${test}
+ else
+ ${test}
+ fi
if [ $? -ne 0 ]; then
FAILURES=$((FAILURES+1))
- FAILED_TESTS="${FAILED_TESTS}, ${BASE}"
+ FAILED_TESTS="${FAILED_TESTS},${BASE} (python3 ${test})"
fi
+ TESTS_RUN=$((TESTS_RUN+1))
done
fi
+if [ ${COVERAGE} -eq 1 ]; then
+ make_header "Code Coverage Report" "${GREEN}"
+ coverage report --omit=config-3.8.py,*_test.py,*_itest.py --sort=-cover
+ echo
+ echo "To recall this report w/o run-running the tests:"
+ echo
+ echo " $ coverage report --omit=config-3.8.py,*_test.py,*_itest.py --sort=-cover"
+ echo
+ echo "...from the 'tests' directory. Note that subsequent calls to "
+ echo "run_tests.sh with --coverage will klobber previous results. See:"
+ echo
+ echo " https://coverage.readthedocs.io/en/6.2/"
+ echo
+fi
+
if [ ${FAILURES} -ne 0 ]; then
- FAILED_TESTS=$(echo ${FAILED_TESTS} | sed 's/^, //g')
+ FAILED_TESTS=$(echo ${FAILED_TESTS} | sed 's/^,/__/g')
+ FAILED_TESTS=$(echo ${FAILED_TESTS} | sed 's/,/\n__/g')
if [ ${FAILURES} -eq 1 ]; then
- echo -e "${RED}There was ${FAILURES} failure:"
+ echo -e "${RED}There was ${FAILURES}/${TESTS_RUN} failure:"
else
- echo -e "${RED}There were ${FAILURES} failures:"
+ echo -e "${RED}There were ${FAILURES}/${TESTS_RUN} failures:"
fi
echo "${FAILED_TESTS}"
echo -e "${NC}"
+ exit ${FAILURES}
else
- echo -e "${BLACK}${ON_GREEN}Everything looks good.${NC}"
+ echo -e "${BLACK}${ON_GREEN}All (${TESTS_RUN}) test(s) passed.${NC}"
+ exit 0
fi