py3: Switch almost all shebang lines to use `python3`.
This causes `upgrade-zulip-from-git`, as well as a no-option run of
`tools/build-release-tarball`, to produce a Zulip install running
Python 3, rather than Python 2. In particular this means that the
virtualenv we create, in which all application code runs, is Python 3.
One shebang line, on `zulip-ec2-configure-interfaces`, explicitly
keeps Python 2, and at least one external ops script, `wal-e`, also
still runs on Python 2. See discussion on the respective previous
commits that made those explicit. There may also be some other
third-party scripts we use, outside of this source tree and running
outside our virtualenv, that still run on Python 2.
2017-08-02 23:15:16 +02:00
|
|
|
#!/usr/bin/env python3
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2017-11-09 08:41:47 +01:00
|
|
|
import argparse
|
2020-01-22 17:41:49 +01:00
|
|
|
import contextlib
|
2020-06-11 00:54:34 +02:00
|
|
|
import glob
|
2016-01-23 23:16:14 +01:00
|
|
|
import os
|
2019-01-15 02:58:03 +01:00
|
|
|
import shlex
|
2016-03-12 05:58:35 +01:00
|
|
|
import subprocess
|
2020-06-11 00:54:34 +02:00
|
|
|
import sys
|
2019-01-15 02:58:03 +01:00
|
|
|
import tempfile
|
2020-06-11 00:54:34 +02:00
|
|
|
from typing import Iterator, List
|
|
|
|
from unittest import mock
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2021-07-03 08:22:44 +02:00
|
|
|
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
os.chdir(os.path.dirname(TOOLS_DIR))
|
|
|
|
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
|
|
|
|
|
|
|
|
# check for the venv
|
|
|
|
from tools.lib import sanity_check
|
|
|
|
|
|
|
|
sanity_check.check_venv(__file__)
|
|
|
|
|
2017-02-05 21:24:28 +01:00
|
|
|
import django
|
2020-08-07 01:09:47 +02:00
|
|
|
import orjson
|
2020-06-11 00:54:34 +02:00
|
|
|
import responses
|
2017-02-05 21:24:28 +01:00
|
|
|
from django.conf import settings
|
|
|
|
from django.test.utils import get_runner
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2020-09-02 02:50:08 +02:00
|
|
|
target_fully_covered = [
|
2021-02-12 08:20:45 +01:00
|
|
|
"analytics/lib/*.py",
|
|
|
|
"analytics/models.py",
|
|
|
|
"analytics/tests/*.py",
|
2021-06-18 00:23:41 +02:00
|
|
|
"analytics/views/*.py",
|
2017-10-12 04:11:09 +02:00
|
|
|
# zerver/ and zerver/lib/ are important core files
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/*.py",
|
|
|
|
"zerver/lib/*.py",
|
|
|
|
"zerver/lib/*/*.py",
|
|
|
|
"zerver/lib/*/*/*.py",
|
|
|
|
"zerver/data_import/*.py",
|
|
|
|
"zerver/templatetags/*.py",
|
|
|
|
"zerver/tornado/*.py",
|
2018-10-22 14:27:47 +02:00
|
|
|
# Billing files require 100% test coverage
|
2021-02-12 08:20:45 +01:00
|
|
|
"corporate/lib/stripe.py",
|
|
|
|
"corporate/views.py",
|
2017-04-27 07:52:39 +02:00
|
|
|
# Test files should have 100% coverage; test code that isn't run
|
|
|
|
# is likely a bug in the test.
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/tests/*.py",
|
|
|
|
"corporate/tests/*.py",
|
2017-10-12 04:11:09 +02:00
|
|
|
# As a project, we require 100% test coverage in the views files.
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/views/*.py",
|
|
|
|
"zproject/backends.py",
|
|
|
|
"confirmation/*.py",
|
|
|
|
"zerver/webhooks/*/*.py",
|
2017-10-12 04:11:09 +02:00
|
|
|
# Once we have a nice negative tests system, we can add these:
|
|
|
|
# 'zerver/webhooks/*/*.py',
|
|
|
|
# 'zerver/webhooks/*/*/*.py',
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/worker/*.py",
|
2020-09-02 02:50:08 +02:00
|
|
|
]
|
2017-02-19 01:26:52 +01:00
|
|
|
|
2020-09-02 02:50:08 +02:00
|
|
|
not_yet_fully_covered = [
|
2017-03-15 01:11:25 +01:00
|
|
|
# Analytics fixtures library is used to generate test fixtures;
|
|
|
|
# isn't properly accounted for in test coverage analysis since it
|
|
|
|
# runs before tests.
|
2021-02-12 08:20:45 +01:00
|
|
|
"analytics/lib/fixtures.py",
|
2017-03-15 01:11:25 +01:00
|
|
|
# We have 100% coverage on the new stuff; need to refactor old stuff.
|
2021-06-18 00:23:41 +02:00
|
|
|
"analytics/views/activity_common.py",
|
|
|
|
"analytics/views/realm_activity.py",
|
|
|
|
"analytics/views/installation_activity.py",
|
|
|
|
"analytics/views/stats.py",
|
|
|
|
"analytics/views/support.py",
|
2017-02-19 01:26:52 +01:00
|
|
|
# Major lib files should have 100% coverage
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/lib/addressee.py",
|
|
|
|
"zerver/lib/markdown/__init__.py",
|
|
|
|
"zerver/lib/cache.py",
|
|
|
|
"zerver/lib/cache_helpers.py",
|
|
|
|
"zerver/lib/i18n.py",
|
|
|
|
"zerver/lib/email_notifications.py",
|
|
|
|
"zerver/lib/send_email.py",
|
|
|
|
"zerver/lib/url_preview/preview.py",
|
|
|
|
"zerver/worker/queue_processors.py",
|
2020-06-27 00:35:15 +02:00
|
|
|
# Markdown sub-libs should have full coverage too; a lot are really close
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/lib/markdown/api_arguments_table_generator.py",
|
|
|
|
"zerver/lib/markdown/fenced_code.py",
|
|
|
|
"zerver/lib/markdown/help_relative_links.py",
|
|
|
|
"zerver/lib/markdown/nested_code_blocks.py",
|
2017-10-12 04:08:15 +02:00
|
|
|
# Other lib files that ideally would coverage, but aren't sorted
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/filters.py",
|
|
|
|
"zerver/middleware.py",
|
|
|
|
"zerver/lib/bot_lib.py",
|
|
|
|
"zerver/lib/camo.py",
|
|
|
|
"zerver/lib/debug.py",
|
|
|
|
"zerver/lib/error_notify.py",
|
|
|
|
"zerver/lib/export.py",
|
|
|
|
"zerver/lib/fix_unreads.py",
|
|
|
|
"zerver/lib/import_realm.py",
|
|
|
|
"zerver/lib/logging_util.py",
|
|
|
|
"zerver/lib/migrate.py",
|
|
|
|
"zerver/lib/profile.py",
|
|
|
|
"zerver/lib/queue.py",
|
|
|
|
"zerver/lib/sqlalchemy_utils.py",
|
|
|
|
"zerver/lib/storage.py",
|
|
|
|
"zerver/lib/unminify.py",
|
|
|
|
"zerver/lib/utils.py",
|
|
|
|
"zerver/lib/zephyr.py",
|
2021-06-11 10:45:10 +02:00
|
|
|
"zerver/lib/templates.py",
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/templatetags/minified_js.py",
|
2017-10-12 04:08:15 +02:00
|
|
|
# Low priority for coverage
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/lib/ccache.py",
|
|
|
|
"zerver/lib/generate_test_data.py",
|
|
|
|
"zerver/lib/server_initialization.py",
|
|
|
|
"zerver/lib/test_fixtures.py",
|
|
|
|
"zerver/lib/test_runner.py",
|
|
|
|
"zerver/lib/test_console_output.py",
|
|
|
|
"zerver/openapi/python_examples.py",
|
2018-12-05 19:54:05 +01:00
|
|
|
# Tornado should ideally have full coverage, but we're not there.
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/tornado/descriptors.py",
|
|
|
|
"zerver/tornado/django_api.py",
|
|
|
|
"zerver/tornado/event_queue.py",
|
|
|
|
"zerver/tornado/exceptions.py",
|
|
|
|
"zerver/tornado/handlers.py",
|
|
|
|
"zerver/tornado/ioloop_logging.py",
|
|
|
|
"zerver/tornado/sharding.py",
|
|
|
|
"zerver/tornado/views.py",
|
2018-12-05 19:54:05 +01:00
|
|
|
# Data import files; relatively low priority
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/data_import/sequencer.py",
|
|
|
|
"zerver/data_import/slack.py",
|
|
|
|
"zerver/data_import/gitter.py",
|
|
|
|
"zerver/data_import/import_util.py",
|
2017-08-24 20:32:54 +02:00
|
|
|
# Webhook integrations with incomplete coverage
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver/webhooks/greenhouse/view.py",
|
|
|
|
"zerver/webhooks/jira/view.py",
|
|
|
|
"zerver/webhooks/librato/view.py",
|
|
|
|
"zerver/webhooks/pivotal/view.py",
|
|
|
|
"zerver/webhooks/solano/view.py",
|
|
|
|
"zerver/webhooks/teamcity/view.py",
|
|
|
|
"zerver/webhooks/travis/view.py",
|
|
|
|
"zerver/webhooks/zapier/view.py",
|
2022-03-09 02:41:27 +01:00
|
|
|
# Cannot have coverage, as tests run in a transaction
|
|
|
|
"zerver/lib/safe_session_cached_db.py",
|
2022-04-05 03:27:33 +02:00
|
|
|
"zerver/lib/singleton_bmemcached.py",
|
2020-09-02 02:50:08 +02:00
|
|
|
]
|
2017-02-19 01:26:52 +01:00
|
|
|
|
2020-09-02 02:50:08 +02:00
|
|
|
enforce_fully_covered = sorted(
|
|
|
|
{path for target in target_fully_covered for path in glob.glob(target)}
|
|
|
|
- {path for target in not_yet_fully_covered for path in glob.glob(target)}
|
|
|
|
)
|
2017-02-19 01:26:52 +01:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
FAILED_TEST_PATH = "var/last_test_failure.json"
|
2016-12-23 18:42:45 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-03-15 15:37:42 +01:00
|
|
|
def get_failed_tests() -> List[str]:
|
2016-12-23 18:42:45 +01:00
|
|
|
try:
|
2020-08-07 01:09:47 +02:00
|
|
|
with open(FAILED_TEST_PATH, "rb") as f:
|
|
|
|
return orjson.loads(f.read())
|
2020-04-09 21:51:58 +02:00
|
|
|
except OSError:
|
2016-12-23 18:42:45 +01:00
|
|
|
print("var/last_test_failure.json doesn't exist; running all tests.")
|
|
|
|
return []
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-03-15 15:37:42 +01:00
|
|
|
def write_failed_tests(failed_tests: List[str]) -> None:
|
2016-12-23 18:42:45 +01:00
|
|
|
if failed_tests:
|
2021-02-12 08:20:45 +01:00
|
|
|
with open(FAILED_TEST_PATH, "wb") as f:
|
2020-08-07 01:09:47 +02:00
|
|
|
f.write(orjson.dumps(failed_tests))
|
2016-12-23 18:42:45 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-01-22 17:41:49 +01:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def block_internet() -> Iterator[None]:
|
|
|
|
# Monkey-patching - responses library raises requests.ConnectionError when access to an unregistered URL
|
|
|
|
# is attempted. We want to replace that with our own exception, so that it propagates all the way:
|
2021-02-12 08:20:45 +01:00
|
|
|
with mock.patch.object(responses, "ConnectionError", new=ZulipInternetBlockedError):
|
2020-01-22 17:41:49 +01:00
|
|
|
# We'll run all tests in this context manager. It'll cause an error to be raised (see above comment),
|
|
|
|
# if any code attempts to access the internet.
|
|
|
|
with responses.RequestsMock():
|
|
|
|
yield
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-01-22 17:41:49 +01:00
|
|
|
class ZulipInternetBlockedError(Exception):
|
|
|
|
def __init__(self, original_msg: str) -> None:
|
|
|
|
zulip_msg = (
|
|
|
|
"Outgoing network requests are not allowed in the Zulip tests. "
|
|
|
|
"More details and advice are available here:"
|
|
|
|
"https://zulip.readthedocs.io/en/latest/testing/testing.html#internet-access-inside-test-suites"
|
|
|
|
)
|
2020-06-09 00:25:09 +02:00
|
|
|
msg = f"{zulip_msg}\nResponses library error message: {original_msg}"
|
2020-01-22 17:41:49 +01:00
|
|
|
super().__init__(msg)
|
2017-07-04 13:12:40 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-03-14 17:01:01 +01:00
|
|
|
def main() -> None:
|
2019-04-30 22:33:03 +02:00
|
|
|
default_parallel = os.cpu_count()
|
|
|
|
|
2017-03-24 12:48:56 +01:00
|
|
|
# Remove proxy settings for running backend tests
|
2019-10-28 23:12:00 +01:00
|
|
|
os.environ.pop("http_proxy", "")
|
|
|
|
os.environ.pop("https_proxy", "")
|
2017-03-24 12:48:56 +01:00
|
|
|
|
2021-03-03 05:00:15 +01:00
|
|
|
from tools.lib.test_script import (
|
|
|
|
add_provision_check_override_param,
|
|
|
|
assert_provisioning_status_ok,
|
|
|
|
)
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.test_fixtures import (
|
|
|
|
remove_test_run_directories,
|
|
|
|
update_test_databases_if_required,
|
2016-10-15 17:11:01 +02:00
|
|
|
)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
os.environ["DJANGO_SETTINGS_MODULE"] = "zproject.test_settings"
|
2016-01-23 23:16:14 +01:00
|
|
|
# "-u" uses unbuffered IO, which is important when wrapping it in subprocess
|
2021-02-12 08:20:45 +01:00
|
|
|
os.environ["PYTHONUNBUFFERED"] = "y"
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2017-11-09 08:41:47 +01:00
|
|
|
usage = """test-backend [options]
|
2016-06-27 23:15:30 +02:00
|
|
|
test-backend # Runs all backend tests
|
2020-06-25 17:35:25 +02:00
|
|
|
test-backend zerver.tests.test_markdown # run all tests in a test module
|
|
|
|
test-backend zerver/tests/test_markdown.py # run all tests in a test module
|
|
|
|
test-backend test_markdown # run all tests in a test module
|
2020-06-27 00:35:15 +02:00
|
|
|
test-backend zerver.tests.test_markdown.MarkdownTest # run all tests in a test class
|
|
|
|
test-backend MarkdownTest # run all tests in a test class
|
|
|
|
test-backend zerver.tests.test_markdown.MarkdownTest.test_inline_youtube # run a single test
|
|
|
|
test-backend MarkdownTest.test_inline_youtube # run a single test"""
|
2016-06-27 23:15:30 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
description=usage, formatter_class=argparse.RawTextHelpFormatter
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.add_argument(
|
2021-07-31 01:15:06 +02:00
|
|
|
"-x",
|
|
|
|
"--stop",
|
|
|
|
action="store_true",
|
2021-02-12 08:19:30 +01:00
|
|
|
dest="fatal_errors",
|
2021-07-31 01:15:06 +02:00
|
|
|
help="Stop running tests after the first failure.",
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
parser.add_argument("--coverage", action="store_true", help="Compute test coverage.")
|
2021-02-12 08:19:30 +01:00
|
|
|
parser.add_argument(
|
2021-02-12 08:20:45 +01:00
|
|
|
"--verbose-coverage", action="store_true", help="Enable verbose print of coverage report."
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2022-05-14 22:52:31 +02:00
|
|
|
parser.add_argument(
|
|
|
|
"--xml-report", action="store_true", help="Enable (slow) XML coverage report."
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
|
|
|
"--no-html-report", action="store_true", help="Disable (slow) HTML coverage report."
|
|
|
|
)
|
2021-02-12 08:19:30 +01:00
|
|
|
parser.add_argument(
|
2021-02-12 08:20:45 +01:00
|
|
|
"--no-cov-cleanup", action="store_true", help="Do not clean generated coverage files."
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
parser.add_argument(
|
2021-02-12 08:20:45 +01:00
|
|
|
"--parallel",
|
|
|
|
dest="processes",
|
2021-02-12 08:19:30 +01:00
|
|
|
type=int,
|
2022-04-27 02:51:14 +02:00
|
|
|
default=None,
|
2021-02-12 08:20:45 +01:00
|
|
|
help="Specify the number of processes to run the "
|
|
|
|
"tests in. Default is the number of logical CPUs",
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
parser.add_argument("--profile", action="store_true", help="Profile test runtime.")
|
2021-03-03 05:00:15 +01:00
|
|
|
add_provision_check_override_param(parser)
|
2021-02-12 08:19:30 +01:00
|
|
|
parser.add_argument(
|
2021-02-12 08:20:45 +01:00
|
|
|
"--no-shallow",
|
2021-02-12 08:19:30 +01:00
|
|
|
action="store_true",
|
|
|
|
help="Don't allow shallow testing of templates (deprecated)",
|
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
parser.add_argument("--verbose", action="store_true", help="Show detailed output")
|
|
|
|
parser.add_argument("--reverse", action="store_true", help="Run tests in reverse order.")
|
2021-02-12 08:19:30 +01:00
|
|
|
parser.add_argument(
|
2021-02-12 08:20:45 +01:00
|
|
|
"--rerun",
|
2021-02-12 08:19:30 +01:00
|
|
|
action="store_true",
|
|
|
|
help=(
|
2021-07-31 01:15:06 +02:00
|
|
|
"Run the tests which failed the last time " "test-backend was run. Implies not --stop."
|
2021-02-12 08:19:30 +01:00
|
|
|
),
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
2021-02-12 08:20:45 +01:00
|
|
|
"--include-webhooks",
|
2021-02-12 08:19:30 +01:00
|
|
|
action="store_true",
|
|
|
|
help=("Include webhook tests. By default, they are skipped for performance."),
|
|
|
|
)
|
|
|
|
parser.add_argument(
|
2021-02-12 08:20:45 +01:00
|
|
|
"--generate-stripe-fixtures",
|
2021-02-12 08:19:30 +01:00
|
|
|
action="store_true",
|
|
|
|
help=("Generate Stripe test fixtures by making requests to Stripe test network"),
|
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
parser.add_argument("args", nargs="*")
|
2021-02-12 08:19:30 +01:00
|
|
|
parser.add_argument(
|
2021-02-12 08:20:45 +01:00
|
|
|
"--ban-console-output",
|
2021-02-12 08:19:30 +01:00
|
|
|
action="store_true",
|
2021-02-12 08:20:45 +01:00
|
|
|
help="Require stdout and stderr to be clean of unexpected output.",
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2017-11-09 08:41:47 +01:00
|
|
|
|
|
|
|
options = parser.parse_args()
|
2020-08-19 12:40:10 +02:00
|
|
|
if options.ban_console_output:
|
|
|
|
os.environ["BAN_CONSOLE_OUTPUT"] = "1"
|
|
|
|
|
2017-11-09 08:41:47 +01:00
|
|
|
args = options.args
|
2019-03-14 17:01:01 +01:00
|
|
|
include_webhooks = options.coverage or options.include_webhooks
|
2017-11-09 08:41:47 +01:00
|
|
|
|
2022-04-27 02:51:14 +02:00
|
|
|
if options.processes is not None and options.processes < 1:
|
2021-02-12 08:19:30 +01:00
|
|
|
raise argparse.ArgumentTypeError("option processes: Only positive integers are allowed.")
|
2017-11-09 08:41:47 +01:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
zerver_test_dir = "zerver/tests/"
|
2016-09-28 10:37:58 +02:00
|
|
|
|
2016-12-23 18:42:45 +01:00
|
|
|
# While running --rerun, we read var/last_test_failure.json to get
|
|
|
|
# the list of tests that failed on the last run, and then pretend
|
|
|
|
# those tests were passed explicitly. --rerun implies
|
2021-07-31 01:15:06 +02:00
|
|
|
# !fatal_errors, so that we don't end up removing tests from
|
2016-12-23 18:42:45 +01:00
|
|
|
# the list that weren't run.
|
|
|
|
if options.rerun:
|
2022-04-27 02:51:14 +02:00
|
|
|
default_parallel = 1
|
2016-12-23 18:42:45 +01:00
|
|
|
options.fatal_errors = False
|
|
|
|
failed_tests = get_failed_tests()
|
|
|
|
if failed_tests:
|
|
|
|
args = failed_tests
|
2017-07-07 22:36:03 +02:00
|
|
|
if len(args) > 0:
|
|
|
|
# If we passed a specific set of tests, run in serial mode.
|
2022-04-27 02:51:14 +02:00
|
|
|
default_parallel = 1
|
2016-09-28 10:37:58 +02:00
|
|
|
|
2019-03-14 17:01:01 +01:00
|
|
|
# to transform forward slashes '/' present in the argument into dots '.'
|
|
|
|
for i, suite in enumerate(args):
|
2021-02-12 08:20:45 +01:00
|
|
|
args[i] = suite.rstrip("/").replace("/", ".")
|
2016-09-28 10:37:58 +02:00
|
|
|
|
2019-03-15 15:37:42 +01:00
|
|
|
def rewrite_arguments(search_key: str) -> None:
|
2019-03-14 17:01:01 +01:00
|
|
|
for root, dirs, files_names in os.walk(zerver_test_dir, topdown=False):
|
|
|
|
for file_name in files_names:
|
|
|
|
# Check for files starting with alphanumeric characters and ending with '.py'
|
|
|
|
# Ignore backup files if any
|
|
|
|
if not file_name[0].isalnum() or not file_name.endswith(".py"):
|
|
|
|
continue
|
|
|
|
filepath = os.path.join(root, file_name)
|
|
|
|
for line in open(filepath):
|
|
|
|
if search_key not in line:
|
|
|
|
continue
|
|
|
|
new_suite = filepath.replace(".py", ".") + suite
|
|
|
|
args[i] = new_suite
|
|
|
|
return
|
|
|
|
|
|
|
|
for suite in args:
|
|
|
|
if suite[0].isupper() and "test_" in suite:
|
2021-02-12 08:20:45 +01:00
|
|
|
classname = suite.rsplit(".", 1)[0]
|
2019-03-14 17:01:01 +01:00
|
|
|
rewrite_arguments(classname)
|
|
|
|
elif suite[0].isupper():
|
2021-02-12 08:20:45 +01:00
|
|
|
rewrite_arguments(f"class {suite}(")
|
2019-03-14 17:01:01 +01:00
|
|
|
|
|
|
|
for i, suite in enumerate(args):
|
2021-02-12 08:20:45 +01:00
|
|
|
if suite.startswith("test"):
|
2019-03-14 17:01:01 +01:00
|
|
|
for root, dirs, files_names in os.walk(zerver_test_dir):
|
|
|
|
for file_name in files_names:
|
|
|
|
if file_name == suite or file_name == suite + ".py":
|
|
|
|
new_suite = os.path.join(root, file_name)
|
|
|
|
args[i] = new_suite
|
|
|
|
break
|
|
|
|
|
|
|
|
for i, suite in enumerate(args):
|
|
|
|
args[i] = suite.replace(".py", "")
|
|
|
|
|
|
|
|
# to transform forward slashes '/' introduced by the zerver_test_dir into dots '.'
|
|
|
|
# taking care of any forward slashes that might be present
|
|
|
|
for i, suite in enumerate(args):
|
|
|
|
args[i] = suite.replace("/", ".")
|
2016-09-28 10:37:58 +02:00
|
|
|
|
2016-11-19 01:28:28 +01:00
|
|
|
full_suite = len(args) == 0
|
|
|
|
|
2019-01-11 01:26:11 +01:00
|
|
|
if full_suite:
|
2018-03-31 04:13:44 +02:00
|
|
|
suites = [
|
|
|
|
"zerver.tests",
|
|
|
|
"analytics.tests",
|
2018-09-25 12:42:23 +02:00
|
|
|
"corporate.tests",
|
2018-03-31 04:13:44 +02:00
|
|
|
]
|
2016-01-23 23:16:14 +01:00
|
|
|
else:
|
|
|
|
suites = args
|
|
|
|
|
2019-01-11 01:26:11 +01:00
|
|
|
if full_suite and include_webhooks:
|
|
|
|
suites.append("zerver.webhooks")
|
|
|
|
|
2020-03-13 15:45:47 +01:00
|
|
|
if options.generate_stripe_fixtures:
|
|
|
|
if full_suite:
|
|
|
|
suites = [
|
|
|
|
"corporate.tests.test_stripe",
|
|
|
|
]
|
|
|
|
full_suite = False
|
|
|
|
os.environ["GENERATE_STRIPE_FIXTURES"] = "1"
|
|
|
|
|
2021-03-02 20:59:19 +01:00
|
|
|
assert_provisioning_status_ok(options.skip_provision_check)
|
2016-10-15 17:11:01 +02:00
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if options.coverage:
|
2017-06-06 10:04:20 +02:00
|
|
|
import coverage
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
cov = coverage.Coverage(
|
2021-02-12 08:20:45 +01:00
|
|
|
data_suffix="", config_file="tools/coveragerc", concurrency="multiprocessing"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-03-15 18:37:12 +01:00
|
|
|
# Do not clean .coverage file in continuous integration job so that coverage data can be uploaded.
|
2020-05-14 08:02:01 +02:00
|
|
|
if not options.no_cov_cleanup:
|
|
|
|
import atexit
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-05-14 08:02:01 +02:00
|
|
|
atexit.register(lambda: cov.erase()) # Ensure the data file gets cleaned up at the end.
|
2016-01-23 23:16:14 +01:00
|
|
|
cov.start()
|
2016-01-24 02:21:34 +01:00
|
|
|
if options.profile:
|
|
|
|
import cProfile
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2016-01-24 02:21:34 +01:00
|
|
|
prof = cProfile.Profile()
|
|
|
|
prof.enable()
|
2016-11-19 01:28:28 +01:00
|
|
|
|
|
|
|
# This is kind of hacky, but it's the most reliable way
|
|
|
|
# to make sure instrumentation decorators know the
|
|
|
|
# setting when they run.
|
2021-02-12 08:20:45 +01:00
|
|
|
os.environ["TEST_INSTRUMENT_URL_COVERAGE"] = "TRUE"
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-06-21 04:49:45 +02:00
|
|
|
# setup() needs to be called after coverage is started to get proper coverage reports of model
|
|
|
|
# files, since part of setup is importing the models for all applications in INSTALLED_APPS.
|
|
|
|
django.setup()
|
|
|
|
|
2020-04-21 23:04:42 +02:00
|
|
|
update_test_databases_if_required()
|
2016-03-12 05:58:35 +01:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
subprocess.check_call(["tools/webpack", "--test"])
|
2017-05-24 00:03:53 +02:00
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
TestRunner = get_runner(settings)
|
2017-03-22 05:13:44 +01:00
|
|
|
|
2022-04-27 02:51:14 +02:00
|
|
|
parallel = default_parallel if options.processes is None else options.processes
|
2017-03-22 05:13:44 +01:00
|
|
|
if parallel > 1:
|
2021-02-12 03:52:14 +01:00
|
|
|
print(f"-- Running tests in parallel mode with {parallel} processes.", flush=True)
|
2017-03-22 05:13:44 +01:00
|
|
|
else:
|
2019-05-28 23:31:18 +02:00
|
|
|
print("-- Running tests in serial mode.", flush=True)
|
2017-03-22 05:13:44 +01:00
|
|
|
|
2020-01-22 17:41:49 +01:00
|
|
|
with block_internet():
|
2021-02-12 08:19:30 +01:00
|
|
|
test_runner = TestRunner(
|
|
|
|
failfast=options.fatal_errors,
|
|
|
|
verbosity=2,
|
|
|
|
parallel=parallel,
|
|
|
|
reverse=options.reverse,
|
|
|
|
keepdb=True,
|
|
|
|
)
|
|
|
|
failures, failed_tests = test_runner.run_tests(
|
|
|
|
suites, full_suite=full_suite, include_webhooks=include_webhooks
|
|
|
|
)
|
2016-12-23 18:42:45 +01:00
|
|
|
write_failed_tests(failed_tests)
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-05-20 14:53:47 +02:00
|
|
|
templates_not_rendered = test_runner.get_shallow_tested_templates()
|
2017-01-20 19:56:03 +01:00
|
|
|
# We only check the templates if all the tests ran and passed
|
|
|
|
if not failures and full_suite and templates_not_rendered:
|
2016-05-20 14:53:47 +02:00
|
|
|
missed_count = len(templates_not_rendered)
|
2020-06-10 06:41:04 +02:00
|
|
|
print(f"\nError: {missed_count} templates have no tests!")
|
2016-11-30 01:38:12 +01:00
|
|
|
for template in templates_not_rendered:
|
2021-02-12 08:20:45 +01:00
|
|
|
print(f" {template}")
|
2016-12-01 01:03:42 +01:00
|
|
|
print("See zerver/tests/test_templates.py for the exclude list.")
|
2016-11-15 13:41:12 +01:00
|
|
|
failures = True
|
2016-05-20 14:53:47 +02:00
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if options.coverage:
|
|
|
|
cov.stop()
|
|
|
|
cov.save()
|
2017-06-06 10:15:54 +02:00
|
|
|
cov.combine()
|
2017-06-06 12:06:41 +02:00
|
|
|
cov.save()
|
2016-06-16 00:05:07 +02:00
|
|
|
if options.verbose_coverage:
|
|
|
|
print("Printing coverage data")
|
|
|
|
cov.report(show_missing=False)
|
2022-05-14 22:52:31 +02:00
|
|
|
if options.xml_report:
|
|
|
|
print("Writing XML report")
|
|
|
|
cov.xml_report(outfile="var/coverage.xml")
|
|
|
|
print("XML report saved; see var/coverage.xml")
|
|
|
|
if not options.no_html_report:
|
|
|
|
print("Writing HTML report")
|
|
|
|
cov.html_report(directory="var/coverage", show_contexts=True)
|
|
|
|
print("HTML report saved; visit at http://127.0.0.1:9991/coverage/index.html")
|
2017-02-19 01:26:52 +01:00
|
|
|
if full_suite and not failures and options.coverage:
|
|
|
|
# Assert that various files have full coverage
|
|
|
|
for path in enforce_fully_covered:
|
|
|
|
missing_lines = cov.analysis2(path)[3]
|
|
|
|
if len(missing_lines) > 0:
|
2020-06-10 06:41:04 +02:00
|
|
|
print(f"ERROR: {path} no longer has complete backend test coverage")
|
|
|
|
print(f" Lines missing coverage: {missing_lines}")
|
2017-02-19 01:26:52 +01:00
|
|
|
print()
|
|
|
|
failures = True
|
|
|
|
if failures:
|
|
|
|
print("It looks like your changes lost 100% test coverage in one or more files")
|
|
|
|
print("Usually, the right fix for this is to add some tests.")
|
|
|
|
print("But also check out the include/exclude lists in tools/test-backend.")
|
2017-03-05 08:46:59 +01:00
|
|
|
print("If this line intentionally is not tested, you can use a # nocoverage comment.")
|
2017-02-19 01:26:52 +01:00
|
|
|
print("To run this check locally, use `test-backend --coverage`.")
|
2018-04-06 21:42:22 +02:00
|
|
|
ok = True
|
|
|
|
for path in not_yet_fully_covered:
|
|
|
|
try:
|
|
|
|
missing_lines = cov.analysis2(path)[3]
|
2019-05-13 07:04:31 +02:00
|
|
|
if len(missing_lines) == 0 and path != "zerver/lib/migrate.py":
|
2021-02-12 08:19:30 +01:00
|
|
|
print(
|
|
|
|
f"ERROR: {path} has complete backend test coverage but is still in not_yet_fully_covered."
|
|
|
|
)
|
2018-04-06 21:42:22 +02:00
|
|
|
ok = False
|
|
|
|
except coverage.misc.NoSource:
|
|
|
|
continue
|
|
|
|
if not ok:
|
|
|
|
print()
|
2021-02-12 08:19:30 +01:00
|
|
|
print(
|
|
|
|
"There are one or more fully covered files that are still in not_yet_fully_covered."
|
|
|
|
)
|
2018-04-06 21:42:22 +02:00
|
|
|
print("Remove the file(s) from not_yet_fully_covered in `tools/test-backend`.")
|
|
|
|
failures = True
|
2016-01-24 02:21:34 +01:00
|
|
|
if options.profile:
|
|
|
|
prof.disable()
|
2021-02-12 08:20:45 +01:00
|
|
|
with tempfile.NamedTemporaryFile(prefix="profile.data.", delete=False) as stats_file:
|
2019-01-15 02:58:03 +01:00
|
|
|
prof.dump_stats(stats_file.name)
|
2020-06-09 00:25:09 +02:00
|
|
|
print(f"Profile data saved to {stats_file.name}")
|
|
|
|
print(f"You can visualize it using e.g. `snakeviz {shlex.quote(stats_file.name)}`")
|
2019-01-15 02:58:03 +01:00
|
|
|
print("Note: If you are using vagrant for development environment you will need to do:")
|
|
|
|
print("1.) `vagrant ssh -- -L 8080:127.0.0.1:8080`")
|
2020-06-09 00:25:09 +02:00
|
|
|
print(f"2.) `snakeviz -s {shlex.quote(stats_file.name)}`")
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2019-06-08 03:57:19 +02:00
|
|
|
# Ideally, we'd check for any leaked test databases here;
|
|
|
|
# but that needs some hackery with database names.
|
|
|
|
#
|
|
|
|
# destroy_leaked_test_databases()
|
|
|
|
|
2019-07-06 00:29:17 +02:00
|
|
|
removed = remove_test_run_directories()
|
|
|
|
if removed:
|
2020-06-10 06:41:04 +02:00
|
|
|
print(f"Removed {removed} stale test run directories!")
|
2019-07-06 00:29:17 +02:00
|
|
|
|
2017-02-10 05:48:15 +01:00
|
|
|
# We'll have printed whether tests passed or failed above
|
2016-01-23 23:16:14 +01:00
|
|
|
sys.exit(bool(failures))
|
2019-03-14 17:01:01 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-03-14 17:01:01 +01:00
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|