tests: Enforce 100% URL coverage.

We now instrument URL coverage whenever you run the back end tests,
and if you run the full suite and fail to test all endpoints, we
exit with a non-zero exit code and report failures to you.

If you are running just a subset of the test suite, you'll still
be able to see var/url_coverage.txt, which has some useful info.

With some tweaks to the output from tabbott.

Fixes #1441.
This commit is contained in:
Steve Howell 2016-11-18 16:28:28 -08:00 committed by Tim Abbott
parent d8dee522b6
commit 5f5e6b6d83
3 changed files with 24 additions and 21 deletions

View File

@ -50,9 +50,6 @@ if __name__ == "__main__":
parser.add_option('--coverage', dest='coverage',
action="store_true",
default=False, help='Compute test coverage.')
parser.add_option('--url-coverage', dest='url_coverage',
action="store_true",
default=False, help='Write url coverage data.')
parser.add_option('--no-verbose-coverage', dest='verbose_coverage',
action="store_false",
default=True, help='Disable verbose print of coverage report.')
@ -127,6 +124,8 @@ if __name__ == "__main__":
for suite in args:
args[args.index(suite)] = suite.replace("/", ".")
full_suite = len(args) == 0
if len(args) == 0:
suites = ["zerver.tests",
"analytics.tests"]
@ -150,11 +149,11 @@ if __name__ == "__main__":
import cProfile
prof = cProfile.Profile()
prof.enable()
if options.url_coverage:
# This is kind of hacky, but it's the most reliable way
# to make sure instrumentation decorators know the
# setting when they run.
os.environ['TEST_INSTRUMENT_URL_COVERAGE'] = 'TRUE'
# This is kind of hacky, but it's the most reliable way
# to make sure instrumentation decorators know the
# setting when they run.
os.environ['TEST_INSTRUMENT_URL_COVERAGE'] = 'TRUE'
# setup() needs to be called after coverage is started to get proper coverage reports of model
# files, since part of setup is importing the models for all applications in INSTALLED_APPS.
@ -169,7 +168,8 @@ if __name__ == "__main__":
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(suites, fatal_errors=options.fatal_errors)
failures = test_runner.run_tests(suites, fatal_errors=options.fatal_errors,
full_suite=full_suite)
templates_not_rendered = test_runner.get_shallow_tested_templates()
if templates_not_rendered:

View File

@ -47,6 +47,7 @@ import base64
import mock
import os
import re
import sys
import time
import ujson
import unittest
@ -249,8 +250,8 @@ def instrument_url(f):
return result
return wrapper
def write_instrumentation_reports():
# type: () -> None
def write_instrumentation_reports(full_suite):
# type: (bool) -> None
if INSTRUMENTING:
calls = INSTRUMENTED_CALLS
var_dir = 'var' # TODO make sure path is robust here
@ -270,8 +271,9 @@ def write_instrumentation_reports():
''')
print(call)
print('URL coverage report is in %s' % (fn,))
print('Try running: ./tools/analyze-url-coverage')
if full_suite:
print('URL coverage report is in %s' % (fn,))
print('Try running: ./tools/analyze-url-coverage')
# Find our untested urls.
from zproject.urls import urlpatterns
@ -286,12 +288,12 @@ def write_instrumentation_reports():
else:
untested_patterns.append(pattern.regex.pattern)
fn = os.path.join(var_dir, 'untested_url_report.txt')
with open(fn, 'w') as f:
f.write('untested urls\n')
if full_suite and len(untested_patterns):
print("\nERROR: Some URLs are untested! Here's the list of untested URLs:")
for untested_pattern in sorted(untested_patterns):
f.write(' %s\n' % (untested_pattern,))
print('Untested-url report is in %s' % (fn,))
print(" %s" % (untested_pattern,))
sys.exit(1)
def get_all_templates():
# type: () -> List[str]

View File

@ -186,8 +186,9 @@ class Runner(DiscoverRunner):
return failed
return failed
def run_tests(self, test_labels, extra_tests=None, **kwargs):
# type: (List[str], Optional[List[TestCase]], **Any) -> bool
def run_tests(self, test_labels, extra_tests=None,
full_suite=False, **kwargs):
# type: (List[str], Optional[List[TestCase]], bool, **Any) -> bool
self.setup_test_environment()
try:
suite = self.build_suite(test_labels, extra_tests)
@ -208,5 +209,5 @@ class Runner(DiscoverRunner):
failed = self.run_suite(suite, fatal_errors=kwargs.get('fatal_errors'))
self.teardown_test_environment()
if not failed:
write_instrumentation_reports()
write_instrumentation_reports(full_suite=full_suite)
return failed