Add option for re-running failed tests.

This adds the option '--rerun' to the `test-backend` infrastructure.
It runs the tests that failed during the last 'test-backend' run.  It
works by stailing failed test info at var/last_test_failure.json

Cleaned up by Umair Khan and Tim Abbott.
This commit is contained in:
Adarsh S 2016-12-23 23:12:45 +05:30 committed by Tim Abbott
parent 79ad174ad3
commit d54dea819d
3 changed files with 49 additions and 4 deletions

View File

@ -46,7 +46,9 @@ tests.
Another thing to note is that our tests generally "fail fast," i.e. they
stop at the first sign of trouble. This is generally a good thing for
iterative development, but you can override this behavior with the
`--nonfatal-errors` option.
`--nonfatal-errors` option. A useful option to combine with that is
the `--rerun` option, which will rerun just the tests that failed in
the last test run.
## How to write tests.

View File

@ -2,11 +2,13 @@
from __future__ import print_function
from __future__ import absolute_import
from typing import List
import glob
import optparse
import os
import sys
import subprocess
import ujson
# check for the venv
from lib import sanity_check
@ -90,6 +92,23 @@ not_yet_fully_covered = {
enforce_fully_covered = sorted(target_fully_covered - not_yet_fully_covered)
FAILED_TEST_PATH = 'var/last_test_failure.json'
def get_failed_tests():
# type: () -> List[str]
try:
with open(FAILED_TEST_PATH, 'r') as f:
return ujson.load(f)
except IOError:
print("var/last_test_failure.json doesn't exist; running all tests.")
return []
def write_failed_tests(failed_tests):
# type: (List[str]) -> None
if failed_tests:
with open(FAILED_TEST_PATH, 'w') as f:
ujson.dump(failed_tests, f)
if __name__ == "__main__":
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(os.path.dirname(TOOLS_DIR))
@ -165,11 +184,27 @@ if __name__ == "__main__":
action="store_true",
default=False,
help="Run tests in reverse order.")
parser.add_option('--rerun', dest="rerun",
action = "store_true",
default=False,
help=("Run the tests which failed the last time "
"test-backend was run. Implies --nonfatal-errors."))
(options, args) = parser.parse_args()
zerver_test_dir = 'zerver/tests/'
# While running --rerun, we read var/last_test_failure.json to get
# the list of tests that failed on the last run, and then pretend
# those tests were passed explicitly. --rerun implies
# --nonfatal-errors, so that we don't end up removing tests from
# the list that weren't run.
if options.rerun:
options.fatal_errors = False
failed_tests = get_failed_tests()
if failed_tests:
args = failed_tests
# to transform forward slashes '/' present in the argument into dots '.'
for suite in args:
args[args.index(suite)] = suite.rstrip('/').replace("/", ".")
@ -267,7 +302,8 @@ if __name__ == "__main__":
test_runner = TestRunner(failfast=options.fatal_errors, verbosity=2,
parallel=parallel, reverse=options.reverse,
keepdb=True)
failures = test_runner.run_tests(suites, full_suite=full_suite)
failures, failed_tests = test_runner.run_tests(suites, full_suite=full_suite)
write_failed_tests(failed_tests)
templates_not_rendered = test_runner.get_shallow_tested_templates()
# We only check the templates if all the tests ran and passed

View File

@ -167,6 +167,11 @@ class TextTestResult(runner.TextTestResult):
This class has unpythonic function names because base class follows
this style.
"""
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super(TextTestResult, self).__init__(*args, **kwargs)
self.failed_tests = [] # type: List[str]
def addInfo(self, test, msg):
# type: (TestCase, Text) -> None
self.stream.write(msg)
@ -193,6 +198,8 @@ class TextTestResult(runner.TextTestResult):
def addFailure(self, *args, **kwargs):
# type: (*Any, **Any) -> None
TestResult.addFailure(self, *args, **kwargs)
test_name = full_test_name(args[0])
self.failed_tests.append(test_name)
def addSkip(self, test, reason):
# type: (TestCase, Text) -> None
@ -364,7 +371,7 @@ class Runner(DiscoverRunner):
def run_tests(self, test_labels, extra_tests=None,
full_suite=False, **kwargs):
# type: (List[str], Optional[List[TestCase]], bool, **Any) -> bool
# type: (List[str], Optional[List[TestCase]], bool, **Any) -> Tuple[bool, List[str]]
self.setup_test_environment()
try:
suite = self.build_suite(test_labels, extra_tests)
@ -387,7 +394,7 @@ class Runner(DiscoverRunner):
failed = self.suite_result(suite, result)
if not failed:
write_instrumentation_reports(full_suite=full_suite)
return failed
return failed, result.failed_tests
def get_test_names(suite):
# type: (TestSuite) -> List[str]