diff --git a/tools/test-backend b/tools/test-backend index bce7d606c8..2e5da61e50 100755 --- a/tools/test-backend +++ b/tools/test-backend @@ -50,9 +50,6 @@ if __name__ == "__main__": parser.add_option('--coverage', dest='coverage', action="store_true", default=False, help='Compute test coverage.') - parser.add_option('--url-coverage', dest='url_coverage', - action="store_true", - default=False, help='Write url coverage data.') parser.add_option('--no-verbose-coverage', dest='verbose_coverage', action="store_false", default=True, help='Disable verbose print of coverage report.') @@ -127,6 +124,8 @@ if __name__ == "__main__": for suite in args: args[args.index(suite)] = suite.replace("/", ".") + full_suite = len(args) == 0 + if len(args) == 0: suites = ["zerver.tests", "analytics.tests"] @@ -150,11 +149,11 @@ if __name__ == "__main__": import cProfile prof = cProfile.Profile() prof.enable() - if options.url_coverage: - # This is kind of hacky, but it's the most reliable way - # to make sure instrumentation decorators know the - # setting when they run. - os.environ['TEST_INSTRUMENT_URL_COVERAGE'] = 'TRUE' + + # This is kind of hacky, but it's the most reliable way + # to make sure instrumentation decorators know the + # setting when they run. + os.environ['TEST_INSTRUMENT_URL_COVERAGE'] = 'TRUE' # setup() needs to be called after coverage is started to get proper coverage reports of model # files, since part of setup is importing the models for all applications in INSTALLED_APPS. @@ -169,7 +168,8 @@ if __name__ == "__main__": TestRunner = get_runner(settings) test_runner = TestRunner() - failures = test_runner.run_tests(suites, fatal_errors=options.fatal_errors) + failures = test_runner.run_tests(suites, fatal_errors=options.fatal_errors, + full_suite=full_suite) templates_not_rendered = test_runner.get_shallow_tested_templates() if templates_not_rendered: diff --git a/zerver/lib/test_helpers.py b/zerver/lib/test_helpers.py index e625fb6d42..c1f828eb50 100644 --- a/zerver/lib/test_helpers.py +++ b/zerver/lib/test_helpers.py @@ -47,6 +47,7 @@ import base64 import mock import os import re +import sys import time import ujson import unittest @@ -249,8 +250,8 @@ def instrument_url(f): return result return wrapper -def write_instrumentation_reports(): - # type: () -> None +def write_instrumentation_reports(full_suite): + # type: (bool) -> None if INSTRUMENTING: calls = INSTRUMENTED_CALLS var_dir = 'var' # TODO make sure path is robust here @@ -270,8 +271,9 @@ def write_instrumentation_reports(): ''') print(call) - print('URL coverage report is in %s' % (fn,)) - print('Try running: ./tools/analyze-url-coverage') + if full_suite: + print('URL coverage report is in %s' % (fn,)) + print('Try running: ./tools/analyze-url-coverage') # Find our untested urls. from zproject.urls import urlpatterns @@ -286,12 +288,12 @@ def write_instrumentation_reports(): else: untested_patterns.append(pattern.regex.pattern) - fn = os.path.join(var_dir, 'untested_url_report.txt') - with open(fn, 'w') as f: - f.write('untested urls\n') + + if full_suite and len(untested_patterns): + print("\nERROR: Some URLs are untested! Here's the list of untested URLs:") for untested_pattern in sorted(untested_patterns): - f.write(' %s\n' % (untested_pattern,)) - print('Untested-url report is in %s' % (fn,)) + print(" %s" % (untested_pattern,)) + sys.exit(1) def get_all_templates(): # type: () -> List[str] diff --git a/zerver/lib/test_runner.py b/zerver/lib/test_runner.py index 5cf5e8d5a5..3d1ea67200 100644 --- a/zerver/lib/test_runner.py +++ b/zerver/lib/test_runner.py @@ -186,8 +186,9 @@ class Runner(DiscoverRunner): return failed return failed - def run_tests(self, test_labels, extra_tests=None, **kwargs): - # type: (List[str], Optional[List[TestCase]], **Any) -> bool + def run_tests(self, test_labels, extra_tests=None, + full_suite=False, **kwargs): + # type: (List[str], Optional[List[TestCase]], bool, **Any) -> bool self.setup_test_environment() try: suite = self.build_suite(test_labels, extra_tests) @@ -208,5 +209,5 @@ class Runner(DiscoverRunner): failed = self.run_suite(suite, fatal_errors=kwargs.get('fatal_errors')) self.teardown_test_environment() if not failed: - write_instrumentation_reports() + write_instrumentation_reports(full_suite=full_suite) return failed