zulip/tools/test-backend

306 lines
12 KiB
Plaintext
Raw Normal View History

#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
import glob
import optparse
import os
import sys
import subprocess
# check for the venv
from lib import sanity_check
sanity_check.check_venv(__file__)
import django
from django.conf import settings
from django.test.utils import get_runner
target_fully_covered = {path for target in [
'analytics/lib/*.py',
'analytics/models.py',
'analytics/tests/*.py',
'analytics/views.py',
'zerver/context_processors.py',
'zerver/lib/alert_words.py',
'zerver/lib/attachments.py',
'zerver/lib/avatar_hash.py',
'zerver/lib/context_managers.py',
'zerver/lib/domains.py',
'zerver/lib/emoji.py',
'zerver/lib/i18n.py',
'zerver/lib/mention.py',
'zerver/lib/message.py',
'zerver/lib/name_restrictions.py',
'zerver/lib/realm_icon.py',
'zerver/lib/retention.py',
'zerver/lib/streams.py',
'zerver/lib/users.py',
'zerver/lib/webhooks/*.py',
'zerver/views/*.py',
'zerver/tests/*.py',
# Once we have a nice negative tests system, we can add these:
# 'zerver/webhooks/*/*.py',
# 'zerver/webhooks/*/*/*.py',
'zproject/backends.py',
# Uncovered but in exclude list and we'd like to have included soon
'confirmation/models.py',
'zerver/decorator.py',
'zerver/lib/actions.py',
'zerver/lib/events.py',
'zerver/lib/bugdown/__init__.py',
'zerver/lib/events.py',
'zerver/lib/integrations.py',
'zerver/lib/message.py',
'zerver/lib/narrow.py',
'zerver/lib/notifications.py',
'zerver/lib/push_notifications.py',
'zerver/lib/request.py',
'zerver/lib/response.py',
'zerver/lib/test_helpers.py',
'zerver/lib/test_classes.py',
'zerver/lib/upload.py',
'zerver/lib/validator.py',
'zerver/models.py',
] for path in glob.glob(target)}
not_yet_fully_covered = {
# Analytics fixtures library is used to generate test fixtures;
# isn't properly accounted for in test coverage analysis since it
# runs before tests.
'analytics/lib/fixtures.py',
# We have 100% coverage on the new stuff; need to refactor old stuff.
'analytics/views.py',
# Major lib files should have 100% coverage
'confirmation/models.py',
'zerver/decorator.py',
'zerver/lib/actions.py',
'zerver/lib/events.py',
'zerver/lib/bugdown/__init__.py',
'zerver/lib/events.py',
'zerver/lib/i18n.py',
'zerver/lib/message.py',
'zerver/lib/notifications.py',
'zerver/lib/push_notifications.py',
'zerver/lib/upload.py',
'zerver/models.py',
# Test files should have full coverage; it's a bug in the test if
# they don't! There are open issues for all of these.
'zerver/tests/test_tornado.py',
# Getting views file coverage to 100% is a major project goal
'zerver/views/auth.py',
'zerver/views/home.py',
# Getting this to 100% is a major project goal.
'zproject/backends.py',
}
enforce_fully_covered = sorted(target_fully_covered - not_yet_fully_covered)
if __name__ == "__main__":
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(os.path.dirname(TOOLS_DIR))
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
from zerver.lib.test_fixtures import is_template_database_current
from tools.lib.test_script import (
get_provisioning_status,
)
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.test_settings'
# "-u" uses unbuffered IO, which is important when wrapping it in subprocess
os.environ['PYTHONUNBUFFERED'] = 'y'
usage = """%prog [options]
test-backend # Runs all backend tests
test-backend zerver.tests.test_bugdown # run all tests in a test module
test-backend zerver/tests/test_bugdown.py # run all tests in a test module
test-backend test_bugdown # run all tests in a test module
test-backend zerver.tests.test_bugdown.BugdownTest # run all tests in a test class
test-backend BugdownTest # run all tests in a test class
test-backend zerver.tests.test_bugdown.BugdownTest.test_inline_youtube # run a single test
test-backend BugdownTest.test_inline_youtube # run a single test"""
parser = optparse.OptionParser(usage)
parser.add_option('--nonfatal-errors', action="store_false", default=True,
dest="fatal_errors", help="Continue past test failures to run all tests")
parser.add_option('--coverage', dest='coverage',
action="store_true",
default=False, help='Compute test coverage.')
parser.add_option('--verbose-coverage', dest='verbose_coverage',
action="store_true",
default=False, help='Enable verbose print of coverage report.')
parser.add_option('--profile', dest='profile',
action="store_true",
default=False, help='Profile test runtime.')
2016-10-15 17:37:37 +02:00
parser.add_option('--force', dest='force',
action="store_true",
default=False, help='Run tests despite possible problems.')
parser.add_option('--no-shallow', dest='no_shallow',
action="store_true",
default=False,
help="Don't allow shallow testing of templates (deprecated)")
parser.add_option('--verbose', dest='verbose',
action="store_true",
default=False,
help="Show detailed output")
parser.add_option('--no-generate-fixtures', action="store_false", default=True,
dest="generate_fixtures",
help=("Reduce running time by not calling generate-fixtures. "
"This may cause spurious failures for some tests."))
parser.add_option('--report-slow-tests', dest='report_slow_tests',
action="store_true",
default=False,
help="Show which tests are slowest.")
(options, args) = parser.parse_args()
zerver_test_dir = 'zerver/tests/'
# to transform forward slashes '/' present in the argument into dots '.'
for suite in args:
args[args.index(suite)] = suite.rstrip('/').replace("/", ".")
def rewrite_arguments(search_key):
2016-10-16 07:23:50 +02:00
# type: (str) -> None
for root, dirs, files_names in os.walk(zerver_test_dir, topdown=False):
for file_name in files_names:
# Check for files starting with alphanumeric characters and ending with '.py'
# Ignore backup files if any
if not file_name[0].isalnum() or not file_name.endswith(".py"):
continue
filepath = os.path.join(root, file_name)
for line in open(filepath):
if search_key not in line:
continue
new_suite = filepath.replace(".py", ".") + suite
args[args.index(suite)] = new_suite
return
for suite in args:
if suite[0].isupper() and "test_" in suite:
classname = suite.rsplit('.', 1)[0]
rewrite_arguments(classname)
elif suite[0].isupper():
rewrite_arguments(suite)
for suite in args:
if suite.startswith('test'):
for root, dirs, files_names in os.walk(zerver_test_dir):
for file_name in files_names:
if file_name == suite or file_name == suite + ".py":
new_suite = os.path.join(root, file_name)
args[args.index(suite)] = new_suite
break
for suite in args:
args[args.index(suite)] = suite.replace(".py", "")
# to transform forward slashes '/' introduced by the zerver_test_dir into dots '.'
# taking care of any forward slashes that might be present
for suite in args:
args[args.index(suite)] = suite.replace("/", ".")
full_suite = len(args) == 0
if len(args) == 0:
suites = ["zerver.tests",
"zerver.webhooks",
"analytics.tests"]
else:
suites = args
2016-10-15 17:37:37 +02:00
if not options.force:
ok, msg = get_provisioning_status()
if not ok:
print(msg)
print('If you really know what you are doing, use --force to run anyway.')
sys.exit(1)
if options.coverage:
import coverage
cov = coverage.Coverage(config_file="tools/coveragerc")
cov.start()
if options.profile:
import cProfile
prof = cProfile.Profile()
prof.enable()
# This is kind of hacky, but it's the most reliable way
# to make sure instrumentation decorators know the
# setting when they run.
os.environ['TEST_INSTRUMENT_URL_COVERAGE'] = 'TRUE'
# setup() needs to be called after coverage is started to get proper coverage reports of model
# files, since part of setup is importing the models for all applications in INSTALLED_APPS.
django.setup()
if options.generate_fixtures:
generate_fixtures_command = [os.path.join(TOOLS_DIR, 'setup', 'generate-fixtures')]
if not is_template_database_current():
generate_fixtures_command.append('--force')
subprocess.call(generate_fixtures_command)
TestRunner = get_runner(settings)
"""
- To run tests in serial, set parallel=1. This is the default
currently because parallel mode is not stable at the moment.
- To change the number of parallel processes, you can either:
* Use 'DJANGO_TEST_PROCESSES' environment variable.
* Set parallel>1.
- For automatic determination of the number of processes,
set parallel=django.test.runner.default_test_processes().
"""
test_runner = TestRunner(failfast=options.fatal_errors, verbosity=2,
parallel=1, keepdb=True)
failures = test_runner.run_tests(suites, full_suite=full_suite)
templates_not_rendered = test_runner.get_shallow_tested_templates()
# We only check the templates if all the tests ran and passed
if not failures and full_suite and templates_not_rendered:
missed_count = len(templates_not_rendered)
print("\nError: %s templates have no tests!" % (missed_count,))
for template in templates_not_rendered:
print(' {}'.format(template))
print("See zerver/tests/test_templates.py for the exclude list.")
failures = True
if options.coverage:
cov.stop()
cov.save()
if options.verbose_coverage:
print("Printing coverage data")
cov.report(show_missing=False)
cov.html_report(directory='var/coverage')
print("HTML report saved to var/coverage")
if full_suite and not failures and options.coverage:
# Assert that various files have full coverage
for path in enforce_fully_covered:
missing_lines = cov.analysis2(path)[3]
if len(missing_lines) > 0:
print("ERROR: %s no longer has complete backend test coverage" % (path,))
print(" Lines missing coverage: %s" % (missing_lines,))
print()
failures = True
if failures:
print("It looks like your changes lost 100% test coverage in one or more files")
print("Usually, the right fix for this is to add some tests.")
print("But also check out the include/exclude lists in tools/test-backend.")
print("If this line intentionally is not tested, you can use a # nocoverage comment.")
print("To run this check locally, use `test-backend --coverage`.")
if options.profile:
prof.disable()
prof.dump_stats("/tmp/profile.data")
print("Profile data saved to /tmp/profile.data")
print("You can visualize it using e.g. `runsnake /tmp/profile.data`")
if options.report_slow_tests:
from zerver.lib.test_runner import report_slow_tests
# We do this even with failures, since slowness can be
# an important clue as to why tests fail.
report_slow_tests()
# We'll have printed whether tests passed or failed above
sys.exit(bool(failures))