zulip/tools/test-backend

386 lines
15 KiB
Python
Executable File

#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from typing import List, Any
import glob
import optparse
import os
import sys
import subprocess
import ujson
import httplib2
import requests
# check for the venv
from lib import sanity_check
sanity_check.check_venv(__file__)
import django
from django.conf import settings
from django.test.utils import get_runner
target_fully_covered = {path for target in [
'analytics/lib/*.py',
'analytics/models.py',
'analytics/tests/*.py',
'analytics/views.py',
'zerver/context_processors.py',
'zerver/lib/alert_words.py',
'zerver/lib/attachments.py',
'zerver/lib/avatar.py',
'zerver/lib/avatar_hash.py',
'zerver/lib/context_managers.py',
'zerver/lib/domains.py',
'zerver/lib/emoji.py',
'zerver/lib/i18n.py',
'zerver/lib/mention.py',
'zerver/lib/message.py',
'zerver/lib/name_restrictions.py',
'zerver/lib/realm_icon.py',
'zerver/lib/retention.py',
'zerver/lib/streams.py',
'zerver/lib/users.py',
'zerver/lib/webhooks/*.py',
'zerver/logging_handlers.py',
# As a project, we require 100% test coverage in the views files.
'zerver/views/*.py',
# Test files should have 100% coverage; test code that isn't run
# is likely a bug in the test.
'zerver/tests/*.py',
# Once we have a nice negative tests system, we can add these:
# 'zerver/webhooks/*/*.py',
# 'zerver/webhooks/*/*/*.py',
'zproject/backends.py',
# Uncovered but in exclude list and we'd like to have included soon
'confirmation/models.py',
'zerver/decorator.py',
'zerver/lib/actions.py',
'zerver/lib/events.py',
'zerver/lib/bugdown/__init__.py',
'zerver/lib/events.py',
'zerver/lib/integrations.py',
'zerver/lib/message.py',
'zerver/lib/narrow.py',
'zerver/lib/notifications.py',
'zerver/lib/request.py',
'zerver/lib/response.py',
'zerver/lib/sessions.py',
'zerver/lib/test_helpers.py',
'zerver/lib/test_classes.py',
'zerver/lib/upload.py',
'zerver/lib/validator.py',
'zerver/models.py',
] for path in glob.glob(target)}
not_yet_fully_covered = {
# Analytics fixtures library is used to generate test fixtures;
# isn't properly accounted for in test coverage analysis since it
# runs before tests.
'analytics/lib/fixtures.py',
# We have 100% coverage on the new stuff; need to refactor old stuff.
'analytics/views.py',
# Major lib files should have 100% coverage
'confirmation/models.py',
'zerver/decorator.py',
'zerver/lib/actions.py',
'zerver/lib/bugdown/__init__.py',
'zerver/lib/i18n.py',
'zerver/lib/message.py',
'zerver/lib/notifications.py',
'zerver/lib/push_notifications.py',
'zerver/lib/upload.py',
'zerver/models.py',
}
enforce_fully_covered = sorted(target_fully_covered - not_yet_fully_covered)
FAILED_TEST_PATH = 'var/last_test_failure.json'
def get_failed_tests():
# type: () -> List[str]
try:
with open(FAILED_TEST_PATH, 'r') as f:
return ujson.load(f)
except IOError:
print("var/last_test_failure.json doesn't exist; running all tests.")
return []
def write_failed_tests(failed_tests):
# type: (List[str]) -> None
if failed_tests:
with open(FAILED_TEST_PATH, 'w') as f:
ujson.dump(failed_tests, f)
def block_internet():
# type: () -> None
# We are blocking internet currently by assuming mostly any test would use
# httplib2 to access internet.
def internet_guard(*args, **kwargs):
# type: (*Any, **Any) -> None
raise Exception("Outgoing network requests are not allowed in the Zulip tests. "
"More details and advice are available here:"
"https://zulip.readthedocs.io/en/latest/testing.html#internet-access-inside-test-suits")
httplib2.Http.request = internet_guard
requests.request = internet_guard # type: ignore # mypy bug; see our #6017
if __name__ == "__main__":
block_internet()
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(os.path.dirname(TOOLS_DIR))
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
# Remove proxy settings for running backend tests
os.environ["http_proxy"] = ""
os.environ["https_proxy"] = ""
from zerver.lib.test_fixtures import is_template_database_current
from tools.lib.test_script import (
get_provisioning_status,
)
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.test_settings'
# "-u" uses unbuffered IO, which is important when wrapping it in subprocess
os.environ['PYTHONUNBUFFERED'] = 'y'
usage = """%prog [options]
test-backend # Runs all backend tests
test-backend zerver.tests.test_bugdown # run all tests in a test module
test-backend zerver/tests/test_bugdown.py # run all tests in a test module
test-backend test_bugdown # run all tests in a test module
test-backend zerver.tests.test_bugdown.BugdownTest # run all tests in a test class
test-backend BugdownTest # run all tests in a test class
test-backend zerver.tests.test_bugdown.BugdownTest.test_inline_youtube # run a single test
test-backend BugdownTest.test_inline_youtube # run a single test"""
parser = optparse.OptionParser(usage)
parser.add_option('--nonfatal-errors', action="store_false", default=True,
dest="fatal_errors", help="Continue past test failures to run all tests")
parser.add_option('--coverage', dest='coverage',
action="store_true",
default=False,
help='Compute test coverage.')
parser.add_option('--verbose-coverage', dest='verbose_coverage',
action="store_true",
default=False, help='Enable verbose print of coverage report.')
def allow_positive_int(option, opt_str, value, parser):
# type: (optparse.Option, str, int, optparse.OptionParser) -> None
if value < 1:
raise optparse.OptionValueError(
"option {}: Only positive integers are allowed.".format(opt_str))
setattr(parser.values, option.dest, value)
parser.add_option('--processes', dest='processes',
type="int",
callback=allow_positive_int,
action='callback',
default=None,
help='Specify the number of processes to run the '
'tests in. Default is 4.')
parser.add_option('--profile', dest='profile',
action="store_true",
default=False, help='Profile test runtime.')
parser.add_option('--force', dest='force',
action="store_true",
default=False, help='Run tests despite possible problems.')
parser.add_option('--no-shallow', dest='no_shallow',
action="store_true",
default=False,
help="Don't allow shallow testing of templates (deprecated)")
parser.add_option('--verbose', dest='verbose',
action="store_true",
default=False,
help="Show detailed output")
parser.add_option('--generate-fixtures', action="store_true", default=False,
dest="generate_fixtures",
help=("Force a call to generate-fixtures."))
parser.add_option('--report-slow-tests', dest='report_slow_tests',
action="store_true",
default=False,
help="Show which tests are slowest.")
parser.add_option('--reverse', dest='reverse',
action="store_true",
default=False,
help="Run tests in reverse order.")
parser.add_option('--rerun', dest="rerun",
action = "store_true",
default=False,
help=("Run the tests which failed the last time "
"test-backend was run. Implies --nonfatal-errors."))
(options, args) = parser.parse_args()
zerver_test_dir = 'zerver/tests/'
# While running --rerun, we read var/last_test_failure.json to get
# the list of tests that failed on the last run, and then pretend
# those tests were passed explicitly. --rerun implies
# --nonfatal-errors, so that we don't end up removing tests from
# the list that weren't run.
if options.rerun:
options.processes = 1
options.fatal_errors = False
failed_tests = get_failed_tests()
if failed_tests:
args = failed_tests
if len(args) > 0:
# If we passed a specific set of tests, run in serial mode.
options.processes = 1
# to transform forward slashes '/' present in the argument into dots '.'
for suite in args:
args[args.index(suite)] = suite.rstrip('/').replace("/", ".")
def rewrite_arguments(search_key):
# type: (str) -> None
for root, dirs, files_names in os.walk(zerver_test_dir, topdown=False):
for file_name in files_names:
# Check for files starting with alphanumeric characters and ending with '.py'
# Ignore backup files if any
if not file_name[0].isalnum() or not file_name.endswith(".py"):
continue
filepath = os.path.join(root, file_name)
for line in open(filepath):
if search_key not in line:
continue
new_suite = filepath.replace(".py", ".") + suite
args[args.index(suite)] = new_suite
return
for suite in args:
if suite[0].isupper() and "test_" in suite:
classname = suite.rsplit('.', 1)[0]
rewrite_arguments(classname)
elif suite[0].isupper():
rewrite_arguments('class %s(' % (suite,))
for suite in args:
if suite.startswith('test'):
for root, dirs, files_names in os.walk(zerver_test_dir):
for file_name in files_names:
if file_name == suite or file_name == suite + ".py":
new_suite = os.path.join(root, file_name)
args[args.index(suite)] = new_suite
break
for suite in args:
args[args.index(suite)] = suite.replace(".py", "")
# to transform forward slashes '/' introduced by the zerver_test_dir into dots '.'
# taking care of any forward slashes that might be present
for suite in args:
args[args.index(suite)] = suite.replace("/", ".")
full_suite = len(args) == 0
if len(args) == 0:
suites = ["zerver.tests",
"zerver.webhooks",
"analytics.tests"]
else:
suites = args
if not options.force:
ok, msg = get_provisioning_status()
if not ok:
print(msg)
print('If you really know what you are doing, use --force to run anyway.')
sys.exit(1)
if options.coverage:
import coverage
cov = coverage.Coverage(config_file="tools/coveragerc", concurrency='multiprocessing')
cov.start()
if options.profile:
import cProfile
prof = cProfile.Profile()
prof.enable()
# This is kind of hacky, but it's the most reliable way
# to make sure instrumentation decorators know the
# setting when they run.
os.environ['TEST_INSTRUMENT_URL_COVERAGE'] = 'TRUE'
# setup() needs to be called after coverage is started to get proper coverage reports of model
# files, since part of setup is importing the models for all applications in INSTALLED_APPS.
django.setup()
if options.generate_fixtures or not is_template_database_current():
generate_fixtures_command = [os.path.join(TOOLS_DIR, 'setup', 'generate-fixtures')]
generate_fixtures_command.append('--force')
subprocess.call(generate_fixtures_command)
subprocess.check_call(['tools/webpack', '--test'])
if options.processes is None:
options.processes = 4
TestRunner = get_runner(settings)
parallel = options.processes
if parallel > 1:
print("-- Running tests in parallel mode with {} "
"processes.".format(parallel))
else:
print("-- Running tests in serial mode.")
test_runner = TestRunner(failfast=options.fatal_errors, verbosity=2,
parallel=parallel, reverse=options.reverse,
keepdb=True)
failures, failed_tests = test_runner.run_tests(suites, full_suite=full_suite)
write_failed_tests(failed_tests)
templates_not_rendered = test_runner.get_shallow_tested_templates()
# We only check the templates if all the tests ran and passed
if not failures and full_suite and templates_not_rendered:
missed_count = len(templates_not_rendered)
print("\nError: %s templates have no tests!" % (missed_count,))
for template in templates_not_rendered:
print(' {}'.format(template))
print("See zerver/tests/test_templates.py for the exclude list.")
failures = True
if options.coverage:
cov.stop()
cov.save()
cov.combine()
cov.data_suffix = False # Disable suffix so that filename is .coverage
cov.save()
if options.verbose_coverage:
print("Printing coverage data")
cov.report(show_missing=False)
cov.html_report(directory='var/coverage')
print("HTML report saved; visit at http://127.0.0.1:9991/coverage/index.html")
if full_suite and not failures and options.coverage:
# Assert that various files have full coverage
for path in enforce_fully_covered:
missing_lines = cov.analysis2(path)[3]
if len(missing_lines) > 0:
print("ERROR: %s no longer has complete backend test coverage" % (path,))
print(" Lines missing coverage: %s" % (missing_lines,))
print()
failures = True
if failures:
print("It looks like your changes lost 100% test coverage in one or more files")
print("Usually, the right fix for this is to add some tests.")
print("But also check out the include/exclude lists in tools/test-backend.")
print("If this line intentionally is not tested, you can use a # nocoverage comment.")
print("To run this check locally, use `test-backend --coverage`.")
if options.profile:
prof.disable()
prof.dump_stats("/tmp/profile.data")
print("Profile data saved to /tmp/profile.data")
print("You can visualize it using e.g. `runsnake /tmp/profile.data`")
if options.report_slow_tests:
from zerver.lib.test_runner import report_slow_tests
# We do this even with failures, since slowness can be
# an important clue as to why tests fail.
report_slow_tests()
# We'll have printed whether tests passed or failed above
sys.exit(bool(failures))