2019-07-28 05:38:02 +02:00
|
|
|
import multiprocessing
|
2020-06-11 00:54:34 +02:00
|
|
|
import os
|
2017-04-11 08:05:43 +02:00
|
|
|
import random
|
2020-06-11 00:54:34 +02:00
|
|
|
import shutil
|
2022-07-27 18:28:06 +02:00
|
|
|
import unittest
|
2024-07-12 02:30:25 +02:00
|
|
|
from collections.abc import Callable, Iterable
|
|
|
|
from typing import Any, TypeAlias
|
2022-09-20 16:55:51 +02:00
|
|
|
from unittest import TestSuite, runner
|
2017-02-10 05:48:15 +01:00
|
|
|
from unittest.result import TestResult
|
2016-05-20 14:53:47 +02:00
|
|
|
|
2022-07-28 19:43:26 +02:00
|
|
|
import orjson
|
2017-04-13 11:24:44 +02:00
|
|
|
from django.conf import settings
|
2022-07-29 09:42:23 +02:00
|
|
|
from django.db import ProgrammingError, connections
|
2017-02-10 05:49:28 +01:00
|
|
|
from django.test import runner as django_runner
|
|
|
|
from django.test.runner import DiscoverRunner
|
2016-05-20 14:53:47 +02:00
|
|
|
from django.test.signals import template_rendered
|
2024-07-12 02:30:25 +02:00
|
|
|
from typing_extensions import override
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2020-06-11 00:54:34 +02:00
|
|
|
from scripts.lib.zulip_tools import (
|
|
|
|
TEMPLATE_DATABASE_DIR,
|
|
|
|
get_dev_uuid_var_path,
|
|
|
|
get_or_create_dev_uuid_var_path,
|
|
|
|
)
|
2020-01-16 22:02:06 +01:00
|
|
|
from zerver.lib import test_helpers
|
2024-04-29 23:20:36 +02:00
|
|
|
from zerver.lib.partial import partial
|
2016-07-19 08:12:35 +02:00
|
|
|
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
|
2022-08-10 05:50:02 +02:00
|
|
|
from zerver.lib.test_fixtures import BACKEND_DATABASE_TEMPLATE
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.test_helpers import append_instrumentation_data, write_instrumentation_reports
|
2019-06-11 18:36:27 +02:00
|
|
|
|
2019-05-29 00:50:12 +02:00
|
|
|
# We need to pick an ID for this test-backend invocation, and store it
|
|
|
|
# in this global so it can be used in init_worker; this is used to
|
|
|
|
# ensure the database IDs we select are unique for each `test-backend`
|
|
|
|
# run. This probably should use a locking mechanism rather than the
|
2019-06-04 16:52:23 +02:00
|
|
|
# below hack, which fails 1/10000000 of the time.
|
2020-02-28 02:15:40 +01:00
|
|
|
random_id_range_start = str(random.randint(1, 10000000))
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:23 +02:00
|
|
|
def get_database_id(worker_id: int | None = None) -> str:
|
2020-02-28 02:15:40 +01:00
|
|
|
if worker_id:
|
2020-06-09 00:25:09 +02:00
|
|
|
return f"{random_id_range_start}_{worker_id}"
|
2020-02-28 02:15:40 +01:00
|
|
|
return random_id_range_start
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-07-02 22:15:51 +02:00
|
|
|
# The root directory for this run of the test suite.
|
|
|
|
TEST_RUN_DIR = get_or_create_dev_uuid_var_path(
|
2021-02-12 08:20:45 +01:00
|
|
|
os.path.join("test-backend", f"run_{get_database_id()}")
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-05-29 00:50:12 +02:00
|
|
|
|
2017-02-10 05:49:28 +01:00
|
|
|
_worker_id = 0 # Used to identify the worker process.
|
2017-02-10 12:40:14 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-02-10 05:48:15 +01:00
|
|
|
class TextTestResult(runner.TextTestResult):
|
|
|
|
"""
|
|
|
|
This class has unpythonic function names because base class follows
|
|
|
|
this style.
|
|
|
|
"""
|
2020-04-22 01:45:30 +02:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
2017-10-27 08:28:23 +02:00
|
|
|
super().__init__(*args, **kwargs)
|
2024-07-12 02:30:17 +02:00
|
|
|
self.failed_tests: list[str] = []
|
2016-12-23 18:42:45 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def addInstrumentation(self, test: unittest.TestCase, data: dict[str, Any]) -> None:
|
2017-02-10 05:48:15 +01:00
|
|
|
append_instrumentation_data(data)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2022-07-27 18:31:38 +02:00
|
|
|
def startTest(self, test: unittest.TestCase) -> None:
|
2017-02-10 05:48:15 +01:00
|
|
|
TestResult.startTest(self, test)
|
2022-07-05 21:47:35 +02:00
|
|
|
self.stream.write(f"Running {test.id()}\n")
|
2020-06-16 23:33:26 +02:00
|
|
|
self.stream.flush()
|
2017-02-10 05:48:15 +01:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2017-11-05 11:15:10 +01:00
|
|
|
def addSuccess(self, *args: Any, **kwargs: Any) -> None:
|
2017-02-10 05:48:15 +01:00
|
|
|
TestResult.addSuccess(self, *args, **kwargs)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2017-11-05 11:15:10 +01:00
|
|
|
def addError(self, *args: Any, **kwargs: Any) -> None:
|
2017-02-10 05:48:15 +01:00
|
|
|
TestResult.addError(self, *args, **kwargs)
|
2020-07-01 09:41:10 +02:00
|
|
|
test_name = args[0].id()
|
2017-05-09 08:03:00 +02:00
|
|
|
self.failed_tests.append(test_name)
|
2017-02-10 05:48:15 +01:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2017-11-05 11:15:10 +01:00
|
|
|
def addFailure(self, *args: Any, **kwargs: Any) -> None:
|
2017-02-10 05:48:15 +01:00
|
|
|
TestResult.addFailure(self, *args, **kwargs)
|
2020-07-01 09:41:10 +02:00
|
|
|
test_name = args[0].id()
|
2016-12-23 18:42:45 +01:00
|
|
|
self.failed_tests.append(test_name)
|
2017-02-10 05:48:15 +01:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2022-07-27 18:31:38 +02:00
|
|
|
def addSkip(self, test: unittest.TestCase, reason: str) -> None:
|
2017-02-10 05:48:15 +01:00
|
|
|
TestResult.addSkip(self, test, reason)
|
2022-07-05 21:47:35 +02:00
|
|
|
self.stream.write(f"** Skipping {test.id()}: {reason}\n")
|
2020-06-16 23:33:26 +02:00
|
|
|
self.stream.flush()
|
2017-02-10 05:48:15 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-02-10 05:49:28 +01:00
|
|
|
class RemoteTestResult(django_runner.RemoteTestResult):
|
|
|
|
"""
|
|
|
|
The class follows the unpythonic style of function names of the
|
|
|
|
base class.
|
|
|
|
"""
|
2020-04-22 01:45:30 +02:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def addInstrumentation(self, test: unittest.TestCase, data: dict[str, Any]) -> None:
|
2017-02-10 05:49:28 +01:00
|
|
|
# Some elements of data['info'] cannot be serialized.
|
2021-02-12 08:20:45 +01:00
|
|
|
if "info" in data:
|
|
|
|
del data["info"]
|
2017-02-10 05:49:28 +01:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
self.events.append(("addInstrumentation", self.test_index, data))
|
2017-02-10 05:49:28 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def process_instrumented_calls(func: Callable[[dict[str, Any]], None]) -> None:
|
2017-02-10 05:49:28 +01:00
|
|
|
for call in test_helpers.INSTRUMENTED_CALLS:
|
|
|
|
func(call)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
SerializedSubsuite: TypeAlias = tuple[type[TestSuite], list[str]]
|
|
|
|
SubsuiteArgs: TypeAlias = tuple[type["RemoteTestRunner"], int, SerializedSubsuite, bool, bool]
|
2017-06-01 09:08:33 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def run_subsuite(args: SubsuiteArgs) -> tuple[int, Any]:
|
2017-04-25 12:03:05 +02:00
|
|
|
# Reset the accumulated INSTRUMENTED_CALLS before running this subsuite.
|
|
|
|
test_helpers.INSTRUMENTED_CALLS = []
|
2017-07-10 07:27:17 +02:00
|
|
|
# The first argument is the test runner class but we don't need it
|
|
|
|
# because we run our own version of the runner class.
|
2022-06-28 00:43:57 +02:00
|
|
|
_, subsuite_index, subsuite, failfast, buffer = args
|
|
|
|
runner = RemoteTestRunner(failfast=failfast, buffer=buffer)
|
2022-09-20 16:55:51 +02:00
|
|
|
result = runner.run(subsuite)
|
2017-02-10 05:49:28 +01:00
|
|
|
# Now we send instrumentation related events. This data will be
|
|
|
|
# appended to the data structure in the main thread. For Mypy,
|
|
|
|
# type of Partial is different from Callable. All the methods of
|
|
|
|
# TestResult are passed TestCase as the first argument but
|
|
|
|
# addInstrumentation does not need it.
|
2017-08-25 20:01:20 +02:00
|
|
|
process_instrumented_calls(partial(result.addInstrumentation, None))
|
2017-02-10 05:49:28 +01:00
|
|
|
return subsuite_index, result.events
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:23 +02:00
|
|
|
def destroy_test_databases(worker_id: int | None = None) -> None:
|
2017-02-10 05:49:28 +01:00
|
|
|
for alias in connections:
|
|
|
|
connection = connections[alias]
|
2021-01-12 18:51:58 +01:00
|
|
|
|
2022-07-29 09:42:23 +02:00
|
|
|
try:
|
2019-06-11 19:32:46 +02:00
|
|
|
# In the parallel mode, the test databases are created
|
|
|
|
# through the N=self.parallel child processes, and in the
|
|
|
|
# parent process (which calls `destroy_test_databases`),
|
|
|
|
# `settings_dict` remains unchanged, with the original
|
|
|
|
# template database name (zulip_test_template). So to
|
|
|
|
# delete the database zulip_test_template_<number>, we
|
|
|
|
# need to pass `number` to `destroy_test_db`.
|
|
|
|
#
|
|
|
|
# When we run in serial mode (self.parallel=1), we don't
|
|
|
|
# fork and thus both creation and destruction occur in the
|
|
|
|
# same process, which means `settings_dict` has been
|
|
|
|
# updated to have `zulip_test_template_<number>` as its
|
|
|
|
# database name by the creation code. As a result, to
|
|
|
|
# delete that database, we need to not pass a number
|
|
|
|
# argument to destroy_test_db.
|
2019-06-04 16:44:56 +02:00
|
|
|
if worker_id is not None:
|
2021-05-08 02:36:30 +02:00
|
|
|
"""Modified from the Django original to"""
|
2020-02-28 02:15:40 +01:00
|
|
|
database_id = get_database_id(worker_id)
|
|
|
|
connection.creation.destroy_test_db(suffix=database_id)
|
2019-06-04 16:44:56 +02:00
|
|
|
else:
|
|
|
|
connection.creation.destroy_test_db()
|
2022-07-29 09:42:23 +02:00
|
|
|
except ProgrammingError:
|
|
|
|
# DB doesn't exist. No need to do anything.
|
|
|
|
pass
|
2017-02-10 05:49:28 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-05-29 00:50:12 +02:00
|
|
|
def create_test_databases(worker_id: int) -> None:
|
2020-02-28 02:15:40 +01:00
|
|
|
database_id = get_database_id(worker_id)
|
2017-04-11 07:32:49 +02:00
|
|
|
for alias in connections:
|
|
|
|
connection = connections[alias]
|
2017-02-10 05:49:28 +01:00
|
|
|
connection.creation.clone_test_db(
|
2020-02-28 02:15:40 +01:00
|
|
|
suffix=database_id,
|
2017-02-10 05:49:28 +01:00
|
|
|
keepdb=True,
|
|
|
|
)
|
|
|
|
|
2017-04-11 07:32:49 +02:00
|
|
|
settings_dict = connection.creation.get_test_db_clone_settings(database_id)
|
2017-02-10 05:49:28 +01:00
|
|
|
# connection.settings_dict must be updated in place for changes to be
|
|
|
|
# reflected in django.db.connections. If the following line assigned
|
|
|
|
# connection.settings_dict = settings_dict, new threads would connect
|
|
|
|
# to the default database instead of the appropriate clone.
|
|
|
|
connection.settings_dict.update(settings_dict)
|
|
|
|
connection.close()
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2022-09-29 23:09:47 +02:00
|
|
|
def init_worker(
|
|
|
|
counter: "multiprocessing.sharedctypes.Synchronized[int]",
|
2024-07-12 02:30:23 +02:00
|
|
|
initial_settings: dict[str, Any] | None = None,
|
|
|
|
serialized_contents: dict[str, str] | None = None,
|
|
|
|
process_setup: Callable[..., None] | None = None,
|
|
|
|
process_setup_args: tuple[Any, ...] | None = None,
|
|
|
|
debug_mode: bool | None = None,
|
|
|
|
used_aliases: set[str] | None = None,
|
2022-09-29 23:09:47 +02:00
|
|
|
) -> None:
|
2017-04-18 06:40:24 +02:00
|
|
|
"""
|
|
|
|
This function runs only under parallel mode. It initializes the
|
|
|
|
individual processes which are also called workers.
|
|
|
|
"""
|
2017-04-11 07:32:49 +02:00
|
|
|
global _worker_id
|
2017-04-21 08:31:21 +02:00
|
|
|
|
|
|
|
with counter.get_lock():
|
|
|
|
counter.value += 1
|
|
|
|
_worker_id = counter.value
|
|
|
|
|
|
|
|
"""
|
|
|
|
You can now use _worker_id.
|
|
|
|
"""
|
|
|
|
|
2017-04-11 07:32:49 +02:00
|
|
|
# Clear the cache
|
|
|
|
from zerver.lib.cache import get_cache_backend
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-04-11 07:32:49 +02:00
|
|
|
cache = get_cache_backend(None)
|
|
|
|
cache.clear()
|
|
|
|
|
|
|
|
# Close all connections
|
|
|
|
connections.close_all()
|
|
|
|
|
|
|
|
destroy_test_databases(_worker_id)
|
|
|
|
create_test_databases(_worker_id)
|
2019-07-05 21:50:51 +02:00
|
|
|
initialize_worker_path(_worker_id)
|
2017-04-25 09:50:13 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-02-10 05:49:28 +01:00
|
|
|
class ParallelTestSuite(django_runner.ParallelTestSuite):
|
2017-06-06 10:04:20 +02:00
|
|
|
run_subsuite = run_subsuite
|
2017-02-10 05:49:28 +01:00
|
|
|
init_worker = init_worker
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
def check_import_error(test_name: str) -> None:
|
2018-01-09 17:26:09 +01:00
|
|
|
try:
|
2020-03-28 01:25:56 +01:00
|
|
|
# Directly using __import__ is not recommended, but here it gives
|
2018-01-19 06:00:13 +01:00
|
|
|
# clearer traceback as compared to importlib.import_module.
|
|
|
|
__import__(test_name)
|
|
|
|
except ImportError as exc:
|
|
|
|
raise exc from exc # Disable exception chaining in Python 3.
|
2018-01-02 22:59:17 +01:00
|
|
|
|
2019-07-05 21:50:51 +02:00
|
|
|
|
|
|
|
def initialize_worker_path(worker_id: int) -> None:
|
|
|
|
# Allow each test worker process to write to a unique directory
|
|
|
|
# within `TEST_RUN_DIR`.
|
2021-02-12 08:20:45 +01:00
|
|
|
worker_path = os.path.join(TEST_RUN_DIR, f"worker_{_worker_id}")
|
2019-07-05 21:50:51 +02:00
|
|
|
os.makedirs(worker_path, exist_ok=True)
|
|
|
|
settings.TEST_WORKER_DIR = worker_path
|
|
|
|
|
|
|
|
# Every process should upload to a separate directory so that
|
|
|
|
# race conditions can be avoided.
|
|
|
|
settings.LOCAL_UPLOADS_DIR = get_or_create_dev_uuid_var_path(
|
2021-02-12 08:19:30 +01:00
|
|
|
os.path.join(
|
|
|
|
"test-backend",
|
|
|
|
os.path.basename(TEST_RUN_DIR),
|
|
|
|
os.path.basename(worker_path),
|
|
|
|
"test_uploads",
|
|
|
|
)
|
|
|
|
)
|
2022-12-12 22:02:25 +01:00
|
|
|
settings.LOCAL_AVATARS_DIR = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")
|
|
|
|
settings.LOCAL_FILES_DIR = os.path.join(settings.LOCAL_UPLOADS_DIR, "files")
|
2019-07-05 21:50:51 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2014-01-29 17:28:55 +01:00
|
|
|
class Runner(DiscoverRunner):
|
2017-02-10 05:49:28 +01:00
|
|
|
parallel_test_suite = ParallelTestSuite
|
2017-02-10 12:40:14 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
2014-01-29 17:28:55 +01:00
|
|
|
DiscoverRunner.__init__(self, *args, **kwargs)
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2016-05-20 14:53:47 +02:00
|
|
|
# `templates_rendered` holds templates which were rendered
|
|
|
|
# in proper logical tests.
|
2024-07-12 02:30:17 +02:00
|
|
|
self.templates_rendered: set[str] = set()
|
2016-05-20 14:53:47 +02:00
|
|
|
# `shallow_tested_templates` holds templates which were rendered
|
|
|
|
# in `zerver.tests.test_templates`.
|
2024-07-12 02:30:17 +02:00
|
|
|
self.shallow_tested_templates: set[str] = set()
|
2016-05-20 14:53:47 +02:00
|
|
|
template_rendered.connect(self.on_template_rendered)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2024-07-12 02:30:23 +02:00
|
|
|
def get_resultclass(self) -> type[TextTestResult] | None:
|
2017-02-10 05:48:15 +01:00
|
|
|
return TextTestResult
|
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def on_template_rendered(self, sender: Any, context: dict[str, Any], **kwargs: Any) -> None:
|
2021-02-12 08:20:45 +01:00
|
|
|
if hasattr(sender, "template"):
|
2016-05-20 14:53:47 +02:00
|
|
|
template_name = sender.template.name
|
|
|
|
if template_name not in self.templates_rendered:
|
2021-02-12 08:20:45 +01:00
|
|
|
if context.get("shallow_tested") and template_name not in self.templates_rendered:
|
2016-05-20 14:53:47 +02:00
|
|
|
self.shallow_tested_templates.add(template_name)
|
|
|
|
else:
|
|
|
|
self.templates_rendered.add(template_name)
|
|
|
|
self.shallow_tested_templates.discard(template_name)
|
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def get_shallow_tested_templates(self) -> set[str]:
|
2016-05-20 14:53:47 +02:00
|
|
|
return self.shallow_tested_templates
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2017-11-05 11:15:10 +01:00
|
|
|
def setup_test_environment(self, *args: Any, **kwargs: Any) -> Any:
|
2022-08-10 05:50:02 +02:00
|
|
|
settings.DATABASES["default"]["NAME"] = BACKEND_DATABASE_TEMPLATE
|
2017-04-18 13:42:12 +02:00
|
|
|
# We create/destroy the test databases in run_tests to avoid
|
|
|
|
# duplicate work when running in parallel mode.
|
2019-06-11 18:36:27 +02:00
|
|
|
|
|
|
|
# Write the template database ids to a file that we can
|
|
|
|
# reference for cleaning them up if they leak.
|
2021-02-12 08:19:30 +01:00
|
|
|
filepath = os.path.join(get_dev_uuid_var_path(), TEMPLATE_DATABASE_DIR, get_database_id())
|
2019-06-11 18:36:27 +02:00
|
|
|
os.makedirs(os.path.dirname(filepath), exist_ok=True)
|
|
|
|
with open(filepath, "w") as f:
|
|
|
|
if self.parallel > 1:
|
|
|
|
for index in range(self.parallel):
|
2020-02-28 02:15:40 +01:00
|
|
|
f.write(get_database_id(index + 1) + "\n")
|
2019-06-11 18:36:27 +02:00
|
|
|
else:
|
2020-02-28 02:15:40 +01:00
|
|
|
f.write(get_database_id() + "\n")
|
2019-07-02 22:15:51 +02:00
|
|
|
|
2019-07-05 21:50:51 +02:00
|
|
|
# Check if we are in serial mode to avoid unnecessarily making a directory.
|
|
|
|
# We add "worker_0" in the path for consistency with parallel mode.
|
|
|
|
if self.parallel == 1:
|
|
|
|
initialize_worker_path(0)
|
|
|
|
|
2017-10-27 08:28:23 +02:00
|
|
|
return super().setup_test_environment(*args, **kwargs)
|
2017-04-11 08:05:43 +02:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2017-11-05 11:15:10 +01:00
|
|
|
def teardown_test_environment(self, *args: Any, **kwargs: Any) -> Any:
|
2019-06-04 16:44:56 +02:00
|
|
|
# The test environment setup clones the zulip_test_template
|
|
|
|
# database, creating databases with names:
|
2020-02-28 02:15:40 +01:00
|
|
|
# 'zulip_test_template_N_<worker_id>',
|
|
|
|
# where N is `random_id_range_start`, and `worker_id` is a
|
|
|
|
# value between <1, self.parallel>.
|
2019-06-04 16:44:56 +02:00
|
|
|
#
|
|
|
|
# We need to delete those databases to avoid leaking disk
|
|
|
|
# (Django is smart and calls this on SIGINT too).
|
|
|
|
if self.parallel > 1:
|
|
|
|
for index in range(self.parallel):
|
|
|
|
destroy_test_databases(index + 1)
|
|
|
|
else:
|
|
|
|
destroy_test_databases()
|
2019-06-11 18:36:27 +02:00
|
|
|
|
|
|
|
# Clean up our record of which databases this process created.
|
2021-02-12 08:19:30 +01:00
|
|
|
filepath = os.path.join(get_dev_uuid_var_path(), TEMPLATE_DATABASE_DIR, get_database_id())
|
2019-06-11 18:36:27 +02:00
|
|
|
os.remove(filepath)
|
|
|
|
|
2019-07-02 22:15:51 +02:00
|
|
|
# Clean up our test runs root directory.
|
2019-07-15 02:10:56 +02:00
|
|
|
try:
|
2019-07-02 22:15:51 +02:00
|
|
|
shutil.rmtree(TEST_RUN_DIR)
|
2019-07-15 02:10:56 +02:00
|
|
|
except OSError:
|
|
|
|
print("Unable to clean up the test run's directory.")
|
2017-10-27 08:28:23 +02:00
|
|
|
return super().teardown_test_environment(*args, **kwargs)
|
2017-04-11 08:05:43 +02:00
|
|
|
|
2024-07-12 02:30:23 +02:00
|
|
|
def test_imports(self, test_labels: list[str], suite: TestSuite | ParallelTestSuite) -> None:
|
2023-07-22 01:37:06 +02:00
|
|
|
prefix = "unittest.loader._FailedTest."
|
2018-01-18 06:47:47 +01:00
|
|
|
for test_name in get_test_names(suite):
|
2023-07-22 01:37:06 +02:00
|
|
|
if test_name.startswith(prefix):
|
|
|
|
test_name = test_name[len(prefix) :]
|
|
|
|
for label in test_labels:
|
|
|
|
# This code block is for when a test label is
|
|
|
|
# directly provided, for example:
|
|
|
|
# ./tools/test-backend zerver.tests.test_alert_words.py
|
|
|
|
#
|
|
|
|
# In this case, the test name is of this form:
|
|
|
|
# 'unittest.loader._FailedTest.test_alert_words'
|
|
|
|
#
|
|
|
|
# Whereas check_import_error requires test names of
|
|
|
|
# this form:
|
|
|
|
# 'unittest.loader._FailedTest.zerver.tests.test_alert_words'.
|
|
|
|
if test_name in label:
|
|
|
|
test_name = label
|
|
|
|
break
|
|
|
|
check_import_error(test_name)
|
2018-01-18 06:47:47 +01:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2021-02-12 08:19:30 +01:00
|
|
|
def run_tests(
|
|
|
|
self,
|
2024-07-12 02:30:17 +02:00
|
|
|
test_labels: list[str],
|
2024-07-12 02:30:23 +02:00
|
|
|
failed_tests_path: str | None = None,
|
2021-02-12 08:19:30 +01:00
|
|
|
full_suite: bool = False,
|
|
|
|
include_webhooks: bool = False,
|
|
|
|
**kwargs: Any,
|
2022-07-28 19:43:26 +02:00
|
|
|
) -> int:
|
2014-01-29 00:47:48 +01:00
|
|
|
self.setup_test_environment()
|
2024-04-17 23:48:12 +02:00
|
|
|
suite = self.build_suite(test_labels)
|
2018-01-18 06:47:47 +01:00
|
|
|
self.test_imports(test_labels, suite)
|
2017-04-18 13:42:12 +02:00
|
|
|
if self.parallel == 1:
|
|
|
|
# We are running in serial mode so create the databases here.
|
|
|
|
# For parallel mode, the databases are created in init_worker.
|
|
|
|
# We don't want to create and destroy DB in setup_test_environment
|
|
|
|
# because it will be called for both serial and parallel modes.
|
|
|
|
# However, at this point we know in which mode we would be running
|
|
|
|
# since that decision has already been made in build_suite().
|
2019-05-29 00:50:12 +02:00
|
|
|
#
|
|
|
|
# We pass a _worker_id, which in this code path is always 0
|
|
|
|
destroy_test_databases(_worker_id)
|
|
|
|
create_test_databases(_worker_id)
|
2017-04-18 13:42:12 +02:00
|
|
|
|
2014-02-26 17:27:19 +01:00
|
|
|
# We have to do the next line to avoid flaky scenarios where we
|
|
|
|
# run a single test and getting an SA connection causes data from
|
|
|
|
# a Django connection to be rolled back mid-test.
|
2022-02-10 04:59:48 +01:00
|
|
|
with get_sqlalchemy_connection():
|
|
|
|
result = self.run_suite(suite)
|
2022-07-28 19:43:26 +02:00
|
|
|
assert isinstance(result, TextTestResult)
|
2014-01-29 00:47:48 +01:00
|
|
|
self.teardown_test_environment()
|
2017-02-10 05:48:15 +01:00
|
|
|
failed = self.suite_result(suite, result)
|
2016-07-28 01:40:28 +02:00
|
|
|
if not failed:
|
2019-01-11 01:26:11 +01:00
|
|
|
write_instrumentation_reports(full_suite=full_suite, include_webhooks=include_webhooks)
|
2022-07-28 19:43:26 +02:00
|
|
|
if failed_tests_path and result.failed_tests:
|
|
|
|
with open(failed_tests_path, "wb") as f:
|
|
|
|
f.write(orjson.dumps(result.failed_tests))
|
|
|
|
return failed
|
2017-02-10 05:49:28 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:23 +02:00
|
|
|
def get_test_names(suite: TestSuite | ParallelTestSuite) -> list[str]:
|
2018-01-18 07:48:20 +01:00
|
|
|
if isinstance(suite, ParallelTestSuite):
|
2022-09-20 16:55:51 +02:00
|
|
|
return [name for subsuite in suite.subsuites for name in get_test_names(subsuite)]
|
2018-01-18 07:48:20 +01:00
|
|
|
else:
|
2020-07-01 09:41:10 +02:00
|
|
|
return [t.id() for t in get_tests_from_suite(suite)]
|
2017-02-10 05:49:28 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2022-07-27 18:28:06 +02:00
|
|
|
def get_tests_from_suite(suite: TestSuite) -> Iterable[unittest.TestCase]:
|
2017-08-25 20:01:20 +02:00
|
|
|
for test in suite:
|
2017-02-10 05:49:28 +01:00
|
|
|
if isinstance(test, TestSuite):
|
2020-04-09 21:51:58 +02:00
|
|
|
yield from get_tests_from_suite(test)
|
2017-02-10 05:49:28 +01:00
|
|
|
else:
|
|
|
|
yield test
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-02-10 05:49:28 +01:00
|
|
|
class RemoteTestRunner(django_runner.RemoteTestRunner):
|
|
|
|
resultclass = RemoteTestResult
|