From d54dea819d1290592d076ceb1b615764bfa9ff21 Mon Sep 17 00:00:00 2001 From: Adarsh S <4d4r5h.5@gmail.com> Date: Fri, 23 Dec 2016 23:12:45 +0530 Subject: [PATCH] Add option for re-running failed tests. This adds the option '--rerun' to the `test-backend` infrastructure. It runs the tests that failed during the last 'test-backend' run. It works by stailing failed test info at var/last_test_failure.json Cleaned up by Umair Khan and Tim Abbott. --- docs/testing-with-django.md | 4 +++- tools/test-backend | 38 ++++++++++++++++++++++++++++++++++++- zerver/lib/test_runner.py | 11 +++++++++-- 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/docs/testing-with-django.md b/docs/testing-with-django.md index 08322b9a24..1736ad1f21 100644 --- a/docs/testing-with-django.md +++ b/docs/testing-with-django.md @@ -46,7 +46,9 @@ tests. Another thing to note is that our tests generally "fail fast," i.e. they stop at the first sign of trouble. This is generally a good thing for iterative development, but you can override this behavior with the -`--nonfatal-errors` option. +`--nonfatal-errors` option. A useful option to combine with that is +the `--rerun` option, which will rerun just the tests that failed in +the last test run. ## How to write tests. diff --git a/tools/test-backend b/tools/test-backend index 39075c7273..2ec49022ea 100755 --- a/tools/test-backend +++ b/tools/test-backend @@ -2,11 +2,13 @@ from __future__ import print_function from __future__ import absolute_import +from typing import List import glob import optparse import os import sys import subprocess +import ujson # check for the venv from lib import sanity_check @@ -90,6 +92,23 @@ not_yet_fully_covered = { enforce_fully_covered = sorted(target_fully_covered - not_yet_fully_covered) +FAILED_TEST_PATH = 'var/last_test_failure.json' + +def get_failed_tests(): + # type: () -> List[str] + try: + with open(FAILED_TEST_PATH, 'r') as f: + return ujson.load(f) + except IOError: + print("var/last_test_failure.json doesn't exist; running all tests.") + return [] + +def write_failed_tests(failed_tests): + # type: (List[str]) -> None + if failed_tests: + with open(FAILED_TEST_PATH, 'w') as f: + ujson.dump(failed_tests, f) + if __name__ == "__main__": TOOLS_DIR = os.path.dirname(os.path.abspath(__file__)) os.chdir(os.path.dirname(TOOLS_DIR)) @@ -165,11 +184,27 @@ if __name__ == "__main__": action="store_true", default=False, help="Run tests in reverse order.") + parser.add_option('--rerun', dest="rerun", + action = "store_true", + default=False, + help=("Run the tests which failed the last time " + "test-backend was run. Implies --nonfatal-errors.")) (options, args) = parser.parse_args() zerver_test_dir = 'zerver/tests/' + # While running --rerun, we read var/last_test_failure.json to get + # the list of tests that failed on the last run, and then pretend + # those tests were passed explicitly. --rerun implies + # --nonfatal-errors, so that we don't end up removing tests from + # the list that weren't run. + if options.rerun: + options.fatal_errors = False + failed_tests = get_failed_tests() + if failed_tests: + args = failed_tests + # to transform forward slashes '/' present in the argument into dots '.' for suite in args: args[args.index(suite)] = suite.rstrip('/').replace("/", ".") @@ -267,7 +302,8 @@ if __name__ == "__main__": test_runner = TestRunner(failfast=options.fatal_errors, verbosity=2, parallel=parallel, reverse=options.reverse, keepdb=True) - failures = test_runner.run_tests(suites, full_suite=full_suite) + failures, failed_tests = test_runner.run_tests(suites, full_suite=full_suite) + write_failed_tests(failed_tests) templates_not_rendered = test_runner.get_shallow_tested_templates() # We only check the templates if all the tests ran and passed diff --git a/zerver/lib/test_runner.py b/zerver/lib/test_runner.py index e9fec26229..76cf698ecc 100644 --- a/zerver/lib/test_runner.py +++ b/zerver/lib/test_runner.py @@ -167,6 +167,11 @@ class TextTestResult(runner.TextTestResult): This class has unpythonic function names because base class follows this style. """ + def __init__(self, *args, **kwargs): + # type: (*Any, **Any) -> None + super(TextTestResult, self).__init__(*args, **kwargs) + self.failed_tests = [] # type: List[str] + def addInfo(self, test, msg): # type: (TestCase, Text) -> None self.stream.write(msg) @@ -193,6 +198,8 @@ class TextTestResult(runner.TextTestResult): def addFailure(self, *args, **kwargs): # type: (*Any, **Any) -> None TestResult.addFailure(self, *args, **kwargs) + test_name = full_test_name(args[0]) + self.failed_tests.append(test_name) def addSkip(self, test, reason): # type: (TestCase, Text) -> None @@ -364,7 +371,7 @@ class Runner(DiscoverRunner): def run_tests(self, test_labels, extra_tests=None, full_suite=False, **kwargs): - # type: (List[str], Optional[List[TestCase]], bool, **Any) -> bool + # type: (List[str], Optional[List[TestCase]], bool, **Any) -> Tuple[bool, List[str]] self.setup_test_environment() try: suite = self.build_suite(test_labels, extra_tests) @@ -387,7 +394,7 @@ class Runner(DiscoverRunner): failed = self.suite_result(suite, result) if not failed: write_instrumentation_reports(full_suite=full_suite) - return failed + return failed, result.failed_tests def get_test_names(suite): # type: (TestSuite) -> List[str]