py3: Switch almost all shebang lines to use `python3`.
This causes `upgrade-zulip-from-git`, as well as a no-option run of
`tools/build-release-tarball`, to produce a Zulip install running
Python 3, rather than Python 2. In particular this means that the
virtualenv we create, in which all application code runs, is Python 3.
One shebang line, on `zulip-ec2-configure-interfaces`, explicitly
keeps Python 2, and at least one external ops script, `wal-e`, also
still runs on Python 2. See discussion on the respective previous
commits that made those explicit. There may also be some other
third-party scripts we use, outside of this source tree and running
outside our virtualenv, that still run on Python 2.
2017-08-02 23:15:16 +02:00
|
|
|
#!/usr/bin/env python3
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-03-10 17:15:34 +01:00
|
|
|
from __future__ import print_function
|
2017-02-05 21:24:28 +01:00
|
|
|
from __future__ import absolute_import
|
2017-07-04 13:12:40 +02:00
|
|
|
from typing import List, Any
|
2017-02-19 01:26:52 +01:00
|
|
|
import glob
|
2016-01-23 23:16:14 +01:00
|
|
|
import optparse
|
|
|
|
import os
|
|
|
|
import sys
|
2016-03-12 05:58:35 +01:00
|
|
|
import subprocess
|
2016-12-23 18:42:45 +01:00
|
|
|
import ujson
|
2017-07-04 13:12:40 +02:00
|
|
|
import httplib2
|
2017-07-04 15:12:16 +02:00
|
|
|
import requests
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2017-02-05 21:24:28 +01:00
|
|
|
# check for the venv
|
|
|
|
from lib import sanity_check
|
|
|
|
sanity_check.check_venv(__file__)
|
|
|
|
|
|
|
|
import django
|
|
|
|
from django.conf import settings
|
|
|
|
from django.test.utils import get_runner
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2017-02-19 01:26:52 +01:00
|
|
|
target_fully_covered = {path for target in [
|
|
|
|
'analytics/lib/*.py',
|
2017-03-15 01:11:25 +01:00
|
|
|
'analytics/models.py',
|
|
|
|
'analytics/tests/*.py',
|
|
|
|
'analytics/views.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
'zerver/context_processors.py',
|
|
|
|
'zerver/lib/alert_words.py',
|
|
|
|
'zerver/lib/attachments.py',
|
2017-05-24 05:07:07 +02:00
|
|
|
'zerver/lib/avatar.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
'zerver/lib/avatar_hash.py',
|
|
|
|
'zerver/lib/context_managers.py',
|
|
|
|
'zerver/lib/domains.py',
|
|
|
|
'zerver/lib/emoji.py',
|
|
|
|
'zerver/lib/i18n.py',
|
|
|
|
'zerver/lib/mention.py',
|
|
|
|
'zerver/lib/message.py',
|
|
|
|
'zerver/lib/name_restrictions.py',
|
2017-02-21 03:41:20 +01:00
|
|
|
'zerver/lib/realm_icon.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
'zerver/lib/retention.py',
|
|
|
|
'zerver/lib/streams.py',
|
|
|
|
'zerver/lib/users.py',
|
|
|
|
'zerver/lib/webhooks/*.py',
|
2017-03-26 07:00:08 +02:00
|
|
|
'zerver/logging_handlers.py',
|
2017-04-27 07:52:39 +02:00
|
|
|
# As a project, we require 100% test coverage in the views files.
|
2017-02-19 01:26:52 +01:00
|
|
|
'zerver/views/*.py',
|
2017-04-27 07:52:39 +02:00
|
|
|
# Test files should have 100% coverage; test code that isn't run
|
|
|
|
# is likely a bug in the test.
|
2017-03-05 08:39:34 +01:00
|
|
|
'zerver/tests/*.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
# Once we have a nice negative tests system, we can add these:
|
|
|
|
# 'zerver/webhooks/*/*.py',
|
|
|
|
# 'zerver/webhooks/*/*/*.py',
|
|
|
|
'zproject/backends.py',
|
|
|
|
# Uncovered but in exclude list and we'd like to have included soon
|
|
|
|
'confirmation/models.py',
|
|
|
|
'zerver/decorator.py',
|
|
|
|
'zerver/lib/actions.py',
|
|
|
|
'zerver/lib/events.py',
|
|
|
|
'zerver/lib/bugdown/__init__.py',
|
|
|
|
'zerver/lib/events.py',
|
|
|
|
'zerver/lib/integrations.py',
|
|
|
|
'zerver/lib/message.py',
|
|
|
|
'zerver/lib/narrow.py',
|
|
|
|
'zerver/lib/notifications.py',
|
|
|
|
'zerver/lib/request.py',
|
2017-03-05 09:26:07 +01:00
|
|
|
'zerver/lib/response.py',
|
2017-05-10 19:04:57 +02:00
|
|
|
'zerver/lib/sessions.py',
|
2017-03-05 09:06:36 +01:00
|
|
|
'zerver/lib/test_helpers.py',
|
2017-03-05 09:01:49 +01:00
|
|
|
'zerver/lib/test_classes.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
'zerver/lib/upload.py',
|
|
|
|
'zerver/lib/validator.py',
|
|
|
|
'zerver/models.py',
|
2017-08-24 20:32:54 +02:00
|
|
|
'zerver/webhooks/*/*.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
] for path in glob.glob(target)}
|
|
|
|
|
|
|
|
not_yet_fully_covered = {
|
2017-03-15 01:11:25 +01:00
|
|
|
# Analytics fixtures library is used to generate test fixtures;
|
|
|
|
# isn't properly accounted for in test coverage analysis since it
|
|
|
|
# runs before tests.
|
2017-02-19 01:26:52 +01:00
|
|
|
'analytics/lib/fixtures.py',
|
2017-03-15 01:11:25 +01:00
|
|
|
# We have 100% coverage on the new stuff; need to refactor old stuff.
|
|
|
|
'analytics/views.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
# Major lib files should have 100% coverage
|
|
|
|
'confirmation/models.py',
|
|
|
|
'zerver/decorator.py',
|
|
|
|
'zerver/lib/actions.py',
|
|
|
|
'zerver/lib/bugdown/__init__.py',
|
2017-02-19 03:52:48 +01:00
|
|
|
'zerver/lib/i18n.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
'zerver/lib/message.py',
|
|
|
|
'zerver/lib/notifications.py',
|
|
|
|
'zerver/lib/push_notifications.py',
|
|
|
|
'zerver/lib/upload.py',
|
|
|
|
'zerver/models.py',
|
2017-08-24 20:32:54 +02:00
|
|
|
# Webhook integrations with incomplete coverage
|
|
|
|
'zerver/webhooks/basecamp/view.py',
|
|
|
|
'zerver/webhooks/beanstalk/view.py',
|
|
|
|
'zerver/webhooks/bitbucket2/view.py',
|
|
|
|
'zerver/webhooks/freshdesk/view.py',
|
|
|
|
'zerver/webhooks/github/view.py',
|
|
|
|
'zerver/webhooks/github_webhook/view.py',
|
|
|
|
'zerver/webhooks/gitlab/view.py',
|
|
|
|
'zerver/webhooks/gogs/view.py',
|
|
|
|
'zerver/webhooks/greenhouse/view.py',
|
|
|
|
'zerver/webhooks/hellosign/view.py',
|
|
|
|
'zerver/webhooks/ifttt/view.py',
|
|
|
|
'zerver/webhooks/jira/view.py',
|
|
|
|
'zerver/webhooks/librato/view.py',
|
|
|
|
'zerver/webhooks/newrelic/view.py',
|
|
|
|
'zerver/webhooks/pingdom/view.py',
|
|
|
|
'zerver/webhooks/pivotal/view.py',
|
|
|
|
'zerver/webhooks/semaphore/view.py',
|
|
|
|
'zerver/webhooks/solano/view.py',
|
|
|
|
'zerver/webhooks/stripe/view.py',
|
|
|
|
'zerver/webhooks/taiga/view.py',
|
|
|
|
'zerver/webhooks/teamcity/view.py',
|
|
|
|
'zerver/webhooks/transifex/view.py',
|
|
|
|
'zerver/webhooks/travis/view.py',
|
|
|
|
'zerver/webhooks/updown/view.py',
|
|
|
|
'zerver/webhooks/zapier/view.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
enforce_fully_covered = sorted(target_fully_covered - not_yet_fully_covered)
|
|
|
|
|
2016-12-23 18:42:45 +01:00
|
|
|
FAILED_TEST_PATH = 'var/last_test_failure.json'
|
|
|
|
|
|
|
|
def get_failed_tests():
|
|
|
|
# type: () -> List[str]
|
|
|
|
try:
|
|
|
|
with open(FAILED_TEST_PATH, 'r') as f:
|
|
|
|
return ujson.load(f)
|
|
|
|
except IOError:
|
|
|
|
print("var/last_test_failure.json doesn't exist; running all tests.")
|
|
|
|
return []
|
|
|
|
|
|
|
|
def write_failed_tests(failed_tests):
|
|
|
|
# type: (List[str]) -> None
|
|
|
|
if failed_tests:
|
|
|
|
with open(FAILED_TEST_PATH, 'w') as f:
|
|
|
|
ujson.dump(failed_tests, f)
|
|
|
|
|
2017-07-04 13:12:40 +02:00
|
|
|
def block_internet():
|
|
|
|
# type: () -> None
|
|
|
|
# We are blocking internet currently by assuming mostly any test would use
|
|
|
|
# httplib2 to access internet.
|
|
|
|
def internet_guard(*args, **kwargs):
|
|
|
|
# type: (*Any, **Any) -> None
|
2017-07-04 22:13:38 +02:00
|
|
|
raise Exception("Outgoing network requests are not allowed in the Zulip tests. "
|
|
|
|
"More details and advice are available here:"
|
2017-07-04 13:12:40 +02:00
|
|
|
"https://zulip.readthedocs.io/en/latest/testing.html#internet-access-inside-test-suits")
|
|
|
|
|
|
|
|
httplib2.Http.request = internet_guard
|
2017-08-25 20:01:20 +02:00
|
|
|
requests.request = internet_guard
|
2017-07-04 13:12:40 +02:00
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if __name__ == "__main__":
|
2017-07-04 13:12:40 +02:00
|
|
|
block_internet()
|
2016-03-12 05:58:35 +01:00
|
|
|
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
|
2016-07-12 19:25:34 +02:00
|
|
|
os.chdir(os.path.dirname(TOOLS_DIR))
|
2016-03-12 05:58:35 +01:00
|
|
|
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
|
2017-03-24 12:48:56 +01:00
|
|
|
|
|
|
|
# Remove proxy settings for running backend tests
|
|
|
|
os.environ["http_proxy"] = ""
|
|
|
|
os.environ["https_proxy"] = ""
|
|
|
|
|
2016-09-13 22:40:13 +02:00
|
|
|
from zerver.lib.test_fixtures import is_template_database_current
|
2016-10-15 17:11:01 +02:00
|
|
|
|
|
|
|
from tools.lib.test_script import (
|
|
|
|
get_provisioning_status,
|
|
|
|
)
|
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.test_settings'
|
|
|
|
# "-u" uses unbuffered IO, which is important when wrapping it in subprocess
|
|
|
|
os.environ['PYTHONUNBUFFERED'] = 'y'
|
|
|
|
|
2016-06-27 23:15:30 +02:00
|
|
|
usage = """%prog [options]
|
|
|
|
test-backend # Runs all backend tests
|
|
|
|
test-backend zerver.tests.test_bugdown # run all tests in a test module
|
2016-09-28 10:37:58 +02:00
|
|
|
test-backend zerver/tests/test_bugdown.py # run all tests in a test module
|
|
|
|
test-backend test_bugdown # run all tests in a test module
|
2016-06-27 23:15:30 +02:00
|
|
|
test-backend zerver.tests.test_bugdown.BugdownTest # run all tests in a test class
|
2016-09-28 10:37:58 +02:00
|
|
|
test-backend BugdownTest # run all tests in a test class
|
|
|
|
test-backend zerver.tests.test_bugdown.BugdownTest.test_inline_youtube # run a single test
|
|
|
|
test-backend BugdownTest.test_inline_youtube # run a single test"""
|
2016-06-27 23:15:30 +02:00
|
|
|
|
|
|
|
parser = optparse.OptionParser(usage)
|
2016-09-28 10:37:58 +02:00
|
|
|
|
2016-01-23 23:18:26 +01:00
|
|
|
parser.add_option('--nonfatal-errors', action="store_false", default=True,
|
|
|
|
dest="fatal_errors", help="Continue past test failures to run all tests")
|
2016-01-23 23:16:14 +01:00
|
|
|
parser.add_option('--coverage', dest='coverage',
|
|
|
|
action="store_true",
|
2017-05-04 11:55:36 +02:00
|
|
|
default=False,
|
2017-06-06 10:15:54 +02:00
|
|
|
help='Compute test coverage.')
|
2017-03-05 07:41:54 +01:00
|
|
|
parser.add_option('--verbose-coverage', dest='verbose_coverage',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Enable verbose print of coverage report.')
|
2017-03-22 05:13:44 +01:00
|
|
|
|
|
|
|
def allow_positive_int(option, opt_str, value, parser):
|
|
|
|
# type: (optparse.Option, str, int, optparse.OptionParser) -> None
|
|
|
|
if value < 1:
|
|
|
|
raise optparse.OptionValueError(
|
|
|
|
"option {}: Only positive integers are allowed.".format(opt_str))
|
|
|
|
setattr(parser.values, option.dest, value)
|
|
|
|
|
|
|
|
parser.add_option('--processes', dest='processes',
|
|
|
|
type="int",
|
|
|
|
callback=allow_positive_int,
|
|
|
|
action='callback',
|
2017-05-24 01:20:35 +02:00
|
|
|
default=None,
|
2017-03-22 05:13:44 +01:00
|
|
|
help='Specify the number of processes to run the '
|
2017-05-04 11:55:36 +02:00
|
|
|
'tests in. Default is 4.')
|
2016-01-24 02:21:34 +01:00
|
|
|
parser.add_option('--profile', dest='profile',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Profile test runtime.')
|
2016-10-15 17:37:37 +02:00
|
|
|
parser.add_option('--force', dest='force',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Run tests despite possible problems.')
|
2016-05-20 14:53:47 +02:00
|
|
|
parser.add_option('--no-shallow', dest='no_shallow',
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
2016-11-15 13:41:12 +01:00
|
|
|
help="Don't allow shallow testing of templates (deprecated)")
|
2016-05-20 14:53:47 +02:00
|
|
|
parser.add_option('--verbose', dest='verbose',
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Show detailed output")
|
2017-04-13 11:06:48 +02:00
|
|
|
parser.add_option('--generate-fixtures', action="store_true", default=False,
|
2016-07-12 03:59:28 +02:00
|
|
|
dest="generate_fixtures",
|
2017-04-13 11:06:48 +02:00
|
|
|
help=("Force a call to generate-fixtures."))
|
2016-07-29 19:48:43 +02:00
|
|
|
parser.add_option('--report-slow-tests', dest='report_slow_tests',
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Show which tests are slowest.")
|
2017-03-22 11:57:07 +01:00
|
|
|
parser.add_option('--reverse', dest='reverse',
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Run tests in reverse order.")
|
2016-12-23 18:42:45 +01:00
|
|
|
parser.add_option('--rerun', dest="rerun",
|
|
|
|
action = "store_true",
|
|
|
|
default=False,
|
|
|
|
help=("Run the tests which failed the last time "
|
|
|
|
"test-backend was run. Implies --nonfatal-errors."))
|
2016-01-23 23:16:14 +01:00
|
|
|
|
|
|
|
(options, args) = parser.parse_args()
|
2016-09-28 10:37:58 +02:00
|
|
|
zerver_test_dir = 'zerver/tests/'
|
|
|
|
|
2016-12-23 18:42:45 +01:00
|
|
|
# While running --rerun, we read var/last_test_failure.json to get
|
|
|
|
# the list of tests that failed on the last run, and then pretend
|
|
|
|
# those tests were passed explicitly. --rerun implies
|
|
|
|
# --nonfatal-errors, so that we don't end up removing tests from
|
|
|
|
# the list that weren't run.
|
|
|
|
if options.rerun:
|
2017-05-24 01:20:35 +02:00
|
|
|
options.processes = 1
|
2016-12-23 18:42:45 +01:00
|
|
|
options.fatal_errors = False
|
|
|
|
failed_tests = get_failed_tests()
|
|
|
|
if failed_tests:
|
|
|
|
args = failed_tests
|
2017-07-07 22:36:03 +02:00
|
|
|
if len(args) > 0:
|
|
|
|
# If we passed a specific set of tests, run in serial mode.
|
|
|
|
options.processes = 1
|
2016-12-23 18:42:45 +01:00
|
|
|
|
2016-09-28 10:37:58 +02:00
|
|
|
# to transform forward slashes '/' present in the argument into dots '.'
|
|
|
|
for suite in args:
|
2016-10-07 06:53:26 +02:00
|
|
|
args[args.index(suite)] = suite.rstrip('/').replace("/", ".")
|
2016-09-28 10:37:58 +02:00
|
|
|
|
2016-09-28 20:51:41 +02:00
|
|
|
def rewrite_arguments(search_key):
|
2016-10-16 07:23:50 +02:00
|
|
|
# type: (str) -> None
|
2016-11-03 21:38:06 +01:00
|
|
|
for root, dirs, files_names in os.walk(zerver_test_dir, topdown=False):
|
|
|
|
for file_name in files_names:
|
|
|
|
# Check for files starting with alphanumeric characters and ending with '.py'
|
|
|
|
# Ignore backup files if any
|
|
|
|
if not file_name[0].isalnum() or not file_name.endswith(".py"):
|
2016-09-28 20:51:41 +02:00
|
|
|
continue
|
2016-11-03 21:38:06 +01:00
|
|
|
filepath = os.path.join(root, file_name)
|
2016-09-28 20:51:41 +02:00
|
|
|
for line in open(filepath):
|
2016-11-03 21:38:06 +01:00
|
|
|
if search_key not in line:
|
|
|
|
continue
|
|
|
|
new_suite = filepath.replace(".py", ".") + suite
|
|
|
|
args[args.index(suite)] = new_suite
|
|
|
|
return
|
2016-09-28 20:51:41 +02:00
|
|
|
|
2016-09-28 10:37:58 +02:00
|
|
|
for suite in args:
|
|
|
|
if suite[0].isupper() and "test_" in suite:
|
|
|
|
classname = suite.rsplit('.', 1)[0]
|
2016-09-28 20:51:41 +02:00
|
|
|
rewrite_arguments(classname)
|
2016-09-28 10:37:58 +02:00
|
|
|
elif suite[0].isupper():
|
2017-06-16 08:20:03 +02:00
|
|
|
rewrite_arguments('class %s(' % (suite,))
|
2016-09-28 10:37:58 +02:00
|
|
|
|
|
|
|
for suite in args:
|
|
|
|
if suite.startswith('test'):
|
2016-11-03 21:38:06 +01:00
|
|
|
for root, dirs, files_names in os.walk(zerver_test_dir):
|
|
|
|
for file_name in files_names:
|
|
|
|
if file_name == suite or file_name == suite + ".py":
|
|
|
|
new_suite = os.path.join(root, file_name)
|
|
|
|
args[args.index(suite)] = new_suite
|
|
|
|
break
|
2016-09-28 10:37:58 +02:00
|
|
|
|
|
|
|
for suite in args:
|
|
|
|
args[args.index(suite)] = suite.replace(".py", "")
|
|
|
|
|
|
|
|
# to transform forward slashes '/' introduced by the zerver_test_dir into dots '.'
|
|
|
|
# taking care of any forward slashes that might be present
|
|
|
|
for suite in args:
|
|
|
|
args[args.index(suite)] = suite.replace("/", ".")
|
|
|
|
|
2016-11-19 01:28:28 +01:00
|
|
|
full_suite = len(args) == 0
|
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if len(args) == 0:
|
2016-07-29 21:52:45 +02:00
|
|
|
suites = ["zerver.tests",
|
2016-11-23 20:15:23 +01:00
|
|
|
"zerver.webhooks",
|
2016-07-29 21:52:45 +02:00
|
|
|
"analytics.tests"]
|
2016-01-23 23:16:14 +01:00
|
|
|
else:
|
|
|
|
suites = args
|
|
|
|
|
2016-10-15 17:37:37 +02:00
|
|
|
if not options.force:
|
|
|
|
ok, msg = get_provisioning_status()
|
|
|
|
if not ok:
|
|
|
|
print(msg)
|
|
|
|
print('If you really know what you are doing, use --force to run anyway.')
|
|
|
|
sys.exit(1)
|
2016-10-15 17:11:01 +02:00
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if options.coverage:
|
2017-06-06 10:04:20 +02:00
|
|
|
import coverage
|
2017-06-06 10:15:54 +02:00
|
|
|
cov = coverage.Coverage(config_file="tools/coveragerc", concurrency='multiprocessing')
|
2016-01-23 23:16:14 +01:00
|
|
|
cov.start()
|
2016-01-24 02:21:34 +01:00
|
|
|
if options.profile:
|
|
|
|
import cProfile
|
|
|
|
prof = cProfile.Profile()
|
|
|
|
prof.enable()
|
2016-11-19 01:28:28 +01:00
|
|
|
|
|
|
|
# This is kind of hacky, but it's the most reliable way
|
|
|
|
# to make sure instrumentation decorators know the
|
|
|
|
# setting when they run.
|
|
|
|
os.environ['TEST_INSTRUMENT_URL_COVERAGE'] = 'TRUE'
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-06-21 04:49:45 +02:00
|
|
|
# setup() needs to be called after coverage is started to get proper coverage reports of model
|
|
|
|
# files, since part of setup is importing the models for all applications in INSTALLED_APPS.
|
|
|
|
django.setup()
|
|
|
|
|
2017-04-13 11:06:48 +02:00
|
|
|
if options.generate_fixtures or not is_template_database_current():
|
2016-09-13 22:40:13 +02:00
|
|
|
generate_fixtures_command = [os.path.join(TOOLS_DIR, 'setup', 'generate-fixtures')]
|
2017-04-13 11:06:48 +02:00
|
|
|
generate_fixtures_command.append('--force')
|
2016-09-13 22:40:13 +02:00
|
|
|
subprocess.call(generate_fixtures_command)
|
2016-03-12 05:58:35 +01:00
|
|
|
|
2017-05-25 20:12:33 +02:00
|
|
|
subprocess.check_call(['tools/webpack', '--test'])
|
2017-05-24 00:03:53 +02:00
|
|
|
|
2017-05-24 01:20:35 +02:00
|
|
|
if options.processes is None:
|
|
|
|
options.processes = 4
|
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
TestRunner = get_runner(settings)
|
2017-03-22 05:13:44 +01:00
|
|
|
parallel = options.processes
|
|
|
|
|
|
|
|
if parallel > 1:
|
|
|
|
print("-- Running tests in parallel mode with {} "
|
|
|
|
"processes.".format(parallel))
|
|
|
|
else:
|
|
|
|
print("-- Running tests in serial mode.")
|
|
|
|
|
2017-02-10 05:49:28 +01:00
|
|
|
test_runner = TestRunner(failfast=options.fatal_errors, verbosity=2,
|
2017-03-22 11:57:07 +01:00
|
|
|
parallel=parallel, reverse=options.reverse,
|
|
|
|
keepdb=True)
|
2016-12-23 18:42:45 +01:00
|
|
|
failures, failed_tests = test_runner.run_tests(suites, full_suite=full_suite)
|
|
|
|
write_failed_tests(failed_tests)
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-05-20 14:53:47 +02:00
|
|
|
templates_not_rendered = test_runner.get_shallow_tested_templates()
|
2017-01-20 19:56:03 +01:00
|
|
|
# We only check the templates if all the tests ran and passed
|
|
|
|
if not failures and full_suite and templates_not_rendered:
|
2016-05-20 14:53:47 +02:00
|
|
|
missed_count = len(templates_not_rendered)
|
2016-12-01 00:38:19 +01:00
|
|
|
print("\nError: %s templates have no tests!" % (missed_count,))
|
2016-11-30 01:38:12 +01:00
|
|
|
for template in templates_not_rendered:
|
|
|
|
print(' {}'.format(template))
|
2016-12-01 01:03:42 +01:00
|
|
|
print("See zerver/tests/test_templates.py for the exclude list.")
|
2016-11-15 13:41:12 +01:00
|
|
|
failures = True
|
2016-05-20 14:53:47 +02:00
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if options.coverage:
|
|
|
|
cov.stop()
|
|
|
|
cov.save()
|
2017-06-06 10:15:54 +02:00
|
|
|
cov.combine()
|
2017-06-06 12:06:41 +02:00
|
|
|
cov.data_suffix = False # Disable suffix so that filename is .coverage
|
|
|
|
cov.save()
|
2016-06-16 00:05:07 +02:00
|
|
|
if options.verbose_coverage:
|
|
|
|
print("Printing coverage data")
|
|
|
|
cov.report(show_missing=False)
|
2016-07-13 10:49:26 +02:00
|
|
|
cov.html_report(directory='var/coverage')
|
2017-03-23 19:59:24 +01:00
|
|
|
print("HTML report saved; visit at http://127.0.0.1:9991/coverage/index.html")
|
2017-02-19 01:26:52 +01:00
|
|
|
if full_suite and not failures and options.coverage:
|
|
|
|
# Assert that various files have full coverage
|
|
|
|
for path in enforce_fully_covered:
|
|
|
|
missing_lines = cov.analysis2(path)[3]
|
|
|
|
if len(missing_lines) > 0:
|
|
|
|
print("ERROR: %s no longer has complete backend test coverage" % (path,))
|
|
|
|
print(" Lines missing coverage: %s" % (missing_lines,))
|
|
|
|
print()
|
|
|
|
failures = True
|
|
|
|
if failures:
|
|
|
|
print("It looks like your changes lost 100% test coverage in one or more files")
|
|
|
|
print("Usually, the right fix for this is to add some tests.")
|
|
|
|
print("But also check out the include/exclude lists in tools/test-backend.")
|
2017-03-05 08:46:59 +01:00
|
|
|
print("If this line intentionally is not tested, you can use a # nocoverage comment.")
|
2017-02-19 01:26:52 +01:00
|
|
|
print("To run this check locally, use `test-backend --coverage`.")
|
2016-01-24 02:21:34 +01:00
|
|
|
if options.profile:
|
|
|
|
prof.disable()
|
|
|
|
prof.dump_stats("/tmp/profile.data")
|
|
|
|
print("Profile data saved to /tmp/profile.data")
|
2017-08-23 00:44:14 +02:00
|
|
|
print("You can visualize it using e.g. `snakeviz /tmp/profile.data`")
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-07-29 19:48:43 +02:00
|
|
|
if options.report_slow_tests:
|
|
|
|
from zerver.lib.test_runner import report_slow_tests
|
|
|
|
# We do this even with failures, since slowness can be
|
|
|
|
# an important clue as to why tests fail.
|
|
|
|
report_slow_tests()
|
|
|
|
|
2017-02-10 05:48:15 +01:00
|
|
|
# We'll have printed whether tests passed or failed above
|
2016-01-23 23:16:14 +01:00
|
|
|
sys.exit(bool(failures))
|