2015-11-01 17:11:06 +01:00
|
|
|
from __future__ import print_function
|
2016-05-20 14:53:47 +02:00
|
|
|
|
2017-02-10 05:49:28 +01:00
|
|
|
from functools import partial
|
2017-04-11 08:05:43 +02:00
|
|
|
import random
|
|
|
|
|
2017-03-03 19:01:52 +01:00
|
|
|
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, \
|
2017-06-06 10:04:20 +02:00
|
|
|
Text, Type
|
2017-02-10 05:48:15 +01:00
|
|
|
from unittest import loader, runner # type: ignore # Mypy cannot pick these up.
|
|
|
|
from unittest.result import TestResult
|
2016-05-20 14:53:47 +02:00
|
|
|
|
2017-04-13 11:24:44 +02:00
|
|
|
from django.conf import settings
|
2017-04-14 18:16:47 +02:00
|
|
|
from django.db import connections, ProgrammingError
|
2017-04-25 09:50:13 +02:00
|
|
|
from django.urls.resolvers import RegexURLPattern
|
2016-06-05 08:05:39 +02:00
|
|
|
from django.test import TestCase
|
2017-02-10 05:49:28 +01:00
|
|
|
from django.test import runner as django_runner
|
|
|
|
from django.test.runner import DiscoverRunner
|
2016-05-20 14:53:47 +02:00
|
|
|
from django.test.signals import template_rendered
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2017-02-10 05:49:28 +01:00
|
|
|
from zerver.lib import test_classes, test_helpers
|
2014-01-29 00:47:48 +01:00
|
|
|
from zerver.lib.cache import bounce_key_prefix_for_testing
|
2017-05-05 12:07:10 +02:00
|
|
|
from zerver.lib.rate_limiter import bounce_redis_key_prefix_for_testing
|
2017-03-22 11:45:39 +01:00
|
|
|
from zerver.lib.test_classes import flush_caches_for_testing
|
2016-07-19 08:12:35 +02:00
|
|
|
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
|
2016-07-28 01:40:28 +02:00
|
|
|
from zerver.lib.test_helpers import (
|
2016-07-28 02:40:04 +02:00
|
|
|
get_all_templates, write_instrumentation_reports,
|
2017-02-10 05:48:15 +01:00
|
|
|
append_instrumentation_data
|
2017-01-24 06:34:26 +01:00
|
|
|
)
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
import os
|
2016-05-03 20:53:59 +02:00
|
|
|
import subprocess
|
2016-05-03 21:02:20 +02:00
|
|
|
import sys
|
2014-01-29 00:47:48 +01:00
|
|
|
import time
|
2016-01-23 23:18:26 +01:00
|
|
|
import traceback
|
2015-06-10 20:28:32 +02:00
|
|
|
import unittest
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2017-02-10 12:40:14 +01:00
|
|
|
if False:
|
2017-02-10 05:49:28 +01:00
|
|
|
# Only needed by mypy.
|
|
|
|
from multiprocessing.sharedctypes import Synchronized
|
|
|
|
|
|
|
|
_worker_id = 0 # Used to identify the worker process.
|
2017-02-10 12:40:14 +01:00
|
|
|
|
2016-07-29 21:06:22 +02:00
|
|
|
def slow(slowness_reason):
|
|
|
|
# type: (str) -> Callable[[Callable], Callable]
|
2014-01-29 00:47:48 +01:00
|
|
|
'''
|
|
|
|
This is a decorate that annotates a test as being "known
|
|
|
|
to be slow." The decorator will set expected_run_time and slowness_reason
|
|
|
|
as atributes of the function. Other code can use this annotation
|
|
|
|
as needed, e.g. to exclude these tests in "fast" mode.
|
|
|
|
'''
|
|
|
|
def decorator(f):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (Any) -> Any
|
2014-01-29 00:47:48 +01:00
|
|
|
f.slowness_reason = slowness_reason
|
|
|
|
return f
|
|
|
|
|
|
|
|
return decorator
|
|
|
|
|
|
|
|
def is_known_slow_test(test_method):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (Any) -> bool
|
2014-01-29 00:47:48 +01:00
|
|
|
return hasattr(test_method, 'slowness_reason')
|
|
|
|
|
|
|
|
def full_test_name(test):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (TestCase) -> str
|
2014-01-29 20:20:16 +01:00
|
|
|
test_module = test.__module__
|
2014-01-29 00:47:48 +01:00
|
|
|
test_class = test.__class__.__name__
|
|
|
|
test_method = test._testMethodName
|
2014-01-29 20:20:16 +01:00
|
|
|
return '%s.%s.%s' % (test_module, test_class, test_method)
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
def get_test_method(test):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (TestCase) -> Callable[[], None]
|
2014-01-29 00:47:48 +01:00
|
|
|
return getattr(test, test._testMethodName)
|
|
|
|
|
2016-07-29 19:48:43 +02:00
|
|
|
# Each tuple is delay, test_name, slowness_reason
|
2017-05-17 21:17:21 +02:00
|
|
|
TEST_TIMINGS = [] # type: List[Tuple[float, str, str]]
|
2016-07-29 19:48:43 +02:00
|
|
|
|
|
|
|
|
|
|
|
def report_slow_tests():
|
|
|
|
# type: () -> None
|
|
|
|
timings = sorted(TEST_TIMINGS, reverse=True)
|
|
|
|
print('SLOWNESS REPORT')
|
|
|
|
print(' delay test')
|
|
|
|
print(' ---- ----')
|
|
|
|
for delay, test_name, slowness_reason in timings[:15]:
|
|
|
|
if not slowness_reason:
|
|
|
|
slowness_reason = 'UNKNOWN WHY SLOW, please investigate'
|
|
|
|
print(' %0.3f %s\n %s\n' % (delay, test_name, slowness_reason))
|
|
|
|
|
|
|
|
print('...')
|
|
|
|
for delay, test_name, slowness_reason in timings[100:]:
|
|
|
|
if slowness_reason:
|
|
|
|
print(' %.3f %s is not that slow' % (delay, test_name))
|
|
|
|
print(' consider removing @slow decorator')
|
|
|
|
print(' This may no longer be true: %s' % (slowness_reason,))
|
|
|
|
|
2017-02-10 05:48:15 +01:00
|
|
|
def enforce_timely_test_completion(test_method, test_name, delay, result):
|
|
|
|
# type: (Any, str, float, TestResult) -> None
|
2016-07-29 20:58:22 +02:00
|
|
|
if hasattr(test_method, 'slowness_reason'):
|
2017-05-17 21:17:21 +02:00
|
|
|
max_delay = 1.1 # seconds
|
2014-01-29 00:47:48 +01:00
|
|
|
else:
|
2017-05-17 21:17:21 +02:00
|
|
|
max_delay = 0.4 # seconds
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
if delay > max_delay:
|
2017-02-10 05:48:15 +01:00
|
|
|
msg = '** Test is TOO slow: %s (%.3f s)\n' % (test_name, delay)
|
|
|
|
result.addInfo(test_method, msg)
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
def fast_tests_only():
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: () -> bool
|
2016-01-26 01:23:21 +01:00
|
|
|
return "FAST_TESTS_ONLY" in os.environ
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2017-02-10 05:48:15 +01:00
|
|
|
def run_test(test, result):
|
|
|
|
# type: (TestCase, TestResult) -> bool
|
2016-01-23 23:18:26 +01:00
|
|
|
failed = False
|
2014-01-29 00:47:48 +01:00
|
|
|
test_method = get_test_method(test)
|
|
|
|
|
|
|
|
if fast_tests_only() and is_known_slow_test(test_method):
|
2016-01-27 22:48:04 +01:00
|
|
|
return failed
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
test_name = full_test_name(test)
|
|
|
|
|
|
|
|
bounce_key_prefix_for_testing(test_name)
|
2017-05-05 12:07:10 +02:00
|
|
|
bounce_redis_key_prefix_for_testing(test_name)
|
|
|
|
|
2017-03-22 11:45:39 +01:00
|
|
|
flush_caches_for_testing()
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2015-07-30 10:41:07 +02:00
|
|
|
if not hasattr(test, "_pre_setup"):
|
2016-05-03 20:53:59 +02:00
|
|
|
# test_name is likely of the form unittest.loader.ModuleImportFailure.zerver.tests.test_upload
|
|
|
|
import_failure_prefix = 'unittest.loader.ModuleImportFailure.'
|
|
|
|
if test_name.startswith(import_failure_prefix):
|
|
|
|
actual_test_name = test_name[len(import_failure_prefix):]
|
2017-02-10 05:48:15 +01:00
|
|
|
error_msg = ("\nActual test to be run is %s, but import failed.\n"
|
|
|
|
"Importing test module directly to generate clearer "
|
|
|
|
"traceback:\n") % (actual_test_name,)
|
|
|
|
result.addInfo(test, error_msg)
|
|
|
|
|
2016-05-03 20:53:59 +02:00
|
|
|
try:
|
2016-11-22 01:44:16 +01:00
|
|
|
command = [sys.executable, "-c", "import %s" % (actual_test_name,)]
|
2017-02-10 05:48:15 +01:00
|
|
|
msg = "Import test command: `%s`" % (' '.join(command),)
|
|
|
|
result.addInfo(test, msg)
|
2016-05-03 20:53:59 +02:00
|
|
|
subprocess.check_call(command)
|
|
|
|
except subprocess.CalledProcessError:
|
2017-02-10 05:48:15 +01:00
|
|
|
msg = ("If that traceback is confusing, try doing the "
|
|
|
|
"import inside `./manage.py shell`")
|
|
|
|
result.addInfo(test, msg)
|
|
|
|
result.addError(test, sys.exc_info())
|
2016-05-03 20:53:59 +02:00
|
|
|
return True
|
2017-02-10 05:48:15 +01:00
|
|
|
|
|
|
|
msg = ("Import unexpectedly succeeded! Something is wrong. Try "
|
|
|
|
"running `import %s` inside `./manage.py shell`.\n"
|
|
|
|
"If that works, you may have introduced an import "
|
|
|
|
"cycle.") % (actual_test_name,)
|
|
|
|
import_error = (Exception, Exception(msg), None) # type: Tuple[Any, Any, Any]
|
|
|
|
result.addError(test, import_error)
|
2016-05-03 20:53:59 +02:00
|
|
|
return True
|
|
|
|
else:
|
2017-02-10 05:48:15 +01:00
|
|
|
msg = "Test doesn't have _pre_setup; something is wrong."
|
|
|
|
error_pre_setup = (Exception, Exception(msg), None) # type: Tuple[Any, Any, Any]
|
|
|
|
result.addError(test, error_pre_setup)
|
|
|
|
return True
|
2014-01-29 00:47:48 +01:00
|
|
|
test._pre_setup()
|
|
|
|
|
|
|
|
start_time = time.time()
|
|
|
|
|
2017-02-10 05:48:15 +01:00
|
|
|
test(result) # unittest will handle skipping, error, failure and success.
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
delay = time.time() - start_time
|
2017-02-10 05:48:15 +01:00
|
|
|
enforce_timely_test_completion(test_method, test_name, delay, result)
|
2016-07-29 19:48:43 +02:00
|
|
|
slowness_reason = getattr(test_method, 'slowness_reason', '')
|
|
|
|
TEST_TIMINGS.append((delay, test_name, slowness_reason))
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
test._post_teardown()
|
2016-01-23 23:18:26 +01:00
|
|
|
return failed
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2017-02-10 05:48:15 +01:00
|
|
|
class TextTestResult(runner.TextTestResult):
|
|
|
|
"""
|
|
|
|
This class has unpythonic function names because base class follows
|
|
|
|
this style.
|
|
|
|
"""
|
2016-12-23 18:42:45 +01:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
# type: (*Any, **Any) -> None
|
|
|
|
super(TextTestResult, self).__init__(*args, **kwargs)
|
|
|
|
self.failed_tests = [] # type: List[str]
|
|
|
|
|
2017-02-10 05:48:15 +01:00
|
|
|
def addInfo(self, test, msg):
|
|
|
|
# type: (TestCase, Text) -> None
|
|
|
|
self.stream.write(msg)
|
|
|
|
self.stream.flush()
|
|
|
|
|
|
|
|
def addInstrumentation(self, test, data):
|
|
|
|
# type: (TestCase, Dict[str, Any]) -> None
|
|
|
|
append_instrumentation_data(data)
|
|
|
|
|
|
|
|
def startTest(self, test):
|
|
|
|
# type: (TestCase) -> None
|
|
|
|
TestResult.startTest(self, test)
|
|
|
|
self.stream.writeln("Running {}".format(full_test_name(test)))
|
|
|
|
self.stream.flush()
|
|
|
|
|
|
|
|
def addSuccess(self, *args, **kwargs):
|
|
|
|
# type: (*Any, **Any) -> None
|
|
|
|
TestResult.addSuccess(self, *args, **kwargs)
|
|
|
|
|
|
|
|
def addError(self, *args, **kwargs):
|
|
|
|
# type: (*Any, **Any) -> None
|
|
|
|
TestResult.addError(self, *args, **kwargs)
|
2017-05-09 08:03:00 +02:00
|
|
|
test_name = full_test_name(args[0])
|
|
|
|
self.failed_tests.append(test_name)
|
2017-02-10 05:48:15 +01:00
|
|
|
|
|
|
|
def addFailure(self, *args, **kwargs):
|
|
|
|
# type: (*Any, **Any) -> None
|
|
|
|
TestResult.addFailure(self, *args, **kwargs)
|
2016-12-23 18:42:45 +01:00
|
|
|
test_name = full_test_name(args[0])
|
|
|
|
self.failed_tests.append(test_name)
|
2017-02-10 05:48:15 +01:00
|
|
|
|
|
|
|
def addSkip(self, test, reason):
|
|
|
|
# type: (TestCase, Text) -> None
|
|
|
|
TestResult.addSkip(self, test, reason)
|
|
|
|
self.stream.writeln("** Skipping {}: {}".format(full_test_name(test),
|
|
|
|
reason))
|
|
|
|
self.stream.flush()
|
|
|
|
|
2017-02-10 05:49:28 +01:00
|
|
|
class RemoteTestResult(django_runner.RemoteTestResult):
|
|
|
|
"""
|
|
|
|
The class follows the unpythonic style of function names of the
|
|
|
|
base class.
|
|
|
|
"""
|
|
|
|
def addInfo(self, test, msg):
|
|
|
|
# type: (TestCase, Text) -> None
|
|
|
|
self.events.append(('addInfo', self.test_index, msg))
|
|
|
|
|
|
|
|
def addInstrumentation(self, test, data):
|
|
|
|
# type: (TestCase, Dict[str, Any]) -> None
|
|
|
|
# Some elements of data['info'] cannot be serialized.
|
|
|
|
if 'info' in data:
|
|
|
|
del data['info']
|
|
|
|
|
|
|
|
self.events.append(('addInstrumentation', self.test_index, data))
|
|
|
|
|
|
|
|
def process_instrumented_calls(func):
|
|
|
|
# type: (Callable) -> None
|
|
|
|
for call in test_helpers.INSTRUMENTED_CALLS:
|
|
|
|
func(call)
|
|
|
|
|
2017-06-06 10:04:20 +02:00
|
|
|
def run_subsuite(args):
|
|
|
|
# type: (Tuple[int, Tuple[Type[Iterable[TestCase]], List[str]], bool]) -> Tuple[int, Any]
|
2017-04-25 12:03:05 +02:00
|
|
|
# Reset the accumulated INSTRUMENTED_CALLS before running this subsuite.
|
|
|
|
test_helpers.INSTRUMENTED_CALLS = []
|
2017-02-10 05:49:28 +01:00
|
|
|
subsuite_index, subsuite, failfast = args
|
|
|
|
runner = RemoteTestRunner(failfast=failfast)
|
|
|
|
result = runner.run(deserialize_suite(subsuite))
|
|
|
|
# Now we send instrumentation related events. This data will be
|
|
|
|
# appended to the data structure in the main thread. For Mypy,
|
|
|
|
# type of Partial is different from Callable. All the methods of
|
|
|
|
# TestResult are passed TestCase as the first argument but
|
|
|
|
# addInstrumentation does not need it.
|
|
|
|
process_instrumented_calls(partial(result.addInstrumentation, None)) # type: ignore
|
|
|
|
return subsuite_index, result.events
|
|
|
|
|
2017-04-14 18:16:47 +02:00
|
|
|
# Monkey-patch database creation to fix unnecessary sleep(1)
|
|
|
|
from django.db.backends.postgresql.creation import DatabaseCreation
|
|
|
|
def _replacement_destroy_test_db(self, test_database_name, verbosity):
|
|
|
|
# type: (Any, str, Any) -> None
|
|
|
|
"""Replacement for Django's _destroy_test_db that removes the
|
|
|
|
unnecessary sleep(1)."""
|
|
|
|
with self.connection._nodb_connection.cursor() as cursor:
|
|
|
|
cursor.execute("DROP DATABASE %s"
|
|
|
|
% self.connection.ops.quote_name(test_database_name))
|
|
|
|
DatabaseCreation._destroy_test_db = _replacement_destroy_test_db
|
|
|
|
|
2017-04-11 07:32:49 +02:00
|
|
|
def destroy_test_databases(database_id=None):
|
|
|
|
# type: (Optional[int]) -> None
|
|
|
|
"""
|
|
|
|
When database_id is None, the name of the databases is picked up
|
|
|
|
by the database settings.
|
|
|
|
"""
|
2017-02-10 05:49:28 +01:00
|
|
|
for alias in connections:
|
|
|
|
connection = connections[alias]
|
|
|
|
try:
|
2017-04-11 07:32:49 +02:00
|
|
|
connection.creation.destroy_test_db(number=database_id)
|
2017-04-14 18:16:47 +02:00
|
|
|
except ProgrammingError:
|
2017-02-10 05:49:28 +01:00
|
|
|
# DB doesn't exist. No need to do anything.
|
|
|
|
pass
|
|
|
|
|
2017-04-11 07:32:49 +02:00
|
|
|
def create_test_databases(database_id):
|
|
|
|
# type: (int) -> None
|
|
|
|
for alias in connections:
|
|
|
|
connection = connections[alias]
|
2017-02-10 05:49:28 +01:00
|
|
|
connection.creation.clone_test_db(
|
2017-04-11 07:32:49 +02:00
|
|
|
number=database_id,
|
2017-02-10 05:49:28 +01:00
|
|
|
keepdb=True,
|
|
|
|
)
|
|
|
|
|
2017-04-11 07:32:49 +02:00
|
|
|
settings_dict = connection.creation.get_test_db_clone_settings(database_id)
|
2017-02-10 05:49:28 +01:00
|
|
|
# connection.settings_dict must be updated in place for changes to be
|
|
|
|
# reflected in django.db.connections. If the following line assigned
|
|
|
|
# connection.settings_dict = settings_dict, new threads would connect
|
|
|
|
# to the default database instead of the appropriate clone.
|
|
|
|
connection.settings_dict.update(settings_dict)
|
|
|
|
connection.close()
|
|
|
|
|
2017-04-11 07:32:49 +02:00
|
|
|
def init_worker(counter):
|
|
|
|
# type: (Synchronized) -> None
|
2017-04-18 06:40:24 +02:00
|
|
|
"""
|
|
|
|
This function runs only under parallel mode. It initializes the
|
|
|
|
individual processes which are also called workers.
|
|
|
|
"""
|
2017-04-11 07:32:49 +02:00
|
|
|
global _worker_id
|
2017-04-21 08:31:21 +02:00
|
|
|
|
|
|
|
with counter.get_lock():
|
|
|
|
counter.value += 1
|
|
|
|
_worker_id = counter.value
|
|
|
|
|
|
|
|
"""
|
|
|
|
You can now use _worker_id.
|
|
|
|
"""
|
|
|
|
|
2017-04-11 07:32:49 +02:00
|
|
|
test_classes.API_KEYS = {}
|
|
|
|
|
|
|
|
# Clear the cache
|
|
|
|
from zerver.lib.cache import get_cache_backend
|
|
|
|
cache = get_cache_backend(None)
|
|
|
|
cache.clear()
|
|
|
|
|
|
|
|
# Close all connections
|
|
|
|
connections.close_all()
|
|
|
|
|
|
|
|
destroy_test_databases(_worker_id)
|
|
|
|
create_test_databases(_worker_id)
|
|
|
|
|
2017-04-25 09:50:13 +02:00
|
|
|
# Every process should upload to a separate directory so that
|
|
|
|
# race conditions can be avoided.
|
|
|
|
settings.LOCAL_UPLOADS_DIR = '{}_{}'.format(settings.LOCAL_UPLOADS_DIR,
|
|
|
|
_worker_id)
|
|
|
|
|
|
|
|
def is_upload_avatar_url(url):
|
|
|
|
# type: (RegexURLPattern) -> bool
|
|
|
|
if url.regex.pattern == r'^user_avatars/(?P<path>.*)$':
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
# We manually update the upload directory path in the url regex.
|
|
|
|
from zproject import dev_urls
|
|
|
|
found = False
|
|
|
|
for url in dev_urls.urls:
|
|
|
|
if is_upload_avatar_url(url):
|
|
|
|
found = True
|
|
|
|
new_root = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars")
|
|
|
|
url.default_args['document_root'] = new_root
|
|
|
|
|
|
|
|
if not found:
|
|
|
|
print("*** Upload directory not found.")
|
|
|
|
|
2017-02-10 12:40:14 +01:00
|
|
|
class TestSuite(unittest.TestSuite):
|
|
|
|
def run(self, result, debug=False):
|
2017-02-10 05:48:15 +01:00
|
|
|
# type: (TestResult, Optional[bool]) -> TestResult
|
2017-02-16 08:39:57 +01:00
|
|
|
"""
|
|
|
|
This function mostly contains the code from
|
|
|
|
unittest.TestSuite.run. The need to override this function
|
|
|
|
occurred because we use run_test to run the testcase.
|
|
|
|
"""
|
|
|
|
topLevel = False
|
|
|
|
if getattr(result, '_testRunEntered', False) is False:
|
|
|
|
result._testRunEntered = topLevel = True
|
|
|
|
|
|
|
|
for test in self: # type: ignore # Mypy cannot recognize this
|
|
|
|
# but this is correct. Taken from unittest.
|
|
|
|
if result.shouldStop:
|
2017-02-12 08:01:08 +01:00
|
|
|
break
|
2017-02-10 12:40:14 +01:00
|
|
|
|
2017-02-16 08:39:57 +01:00
|
|
|
if isinstance(test, TestSuite):
|
|
|
|
test.run(result, debug=debug)
|
|
|
|
else:
|
|
|
|
self._tearDownPreviousClass(test, result) # type: ignore
|
|
|
|
self._handleModuleFixture(test, result) # type: ignore
|
|
|
|
self._handleClassSetUp(test, result) # type: ignore
|
|
|
|
result._previousTestClass = test.__class__
|
|
|
|
if (getattr(test.__class__, '_classSetupFailed', False) or
|
|
|
|
getattr(result, '_moduleSetUpFailed', False)):
|
|
|
|
continue
|
|
|
|
|
|
|
|
failed = run_test(test, result)
|
|
|
|
if failed or result.shouldStop:
|
|
|
|
result.shouldStop = True
|
|
|
|
break
|
|
|
|
|
|
|
|
if topLevel:
|
|
|
|
self._tearDownPreviousClass(None, result) # type: ignore
|
|
|
|
self._handleModuleTearDown(result) # type: ignore
|
|
|
|
result._testRunEntered = False
|
2017-02-10 12:40:14 +01:00
|
|
|
return result
|
|
|
|
|
|
|
|
class TestLoader(loader.TestLoader):
|
|
|
|
suiteClass = TestSuite
|
|
|
|
|
2017-02-10 05:49:28 +01:00
|
|
|
class ParallelTestSuite(django_runner.ParallelTestSuite):
|
2017-06-06 10:04:20 +02:00
|
|
|
run_subsuite = run_subsuite
|
2017-02-10 05:49:28 +01:00
|
|
|
init_worker = init_worker
|
|
|
|
|
|
|
|
def __init__(self, suite, processes, failfast):
|
|
|
|
# type: (TestSuite, int, bool) -> None
|
|
|
|
super(ParallelTestSuite, self).__init__(suite, processes, failfast)
|
|
|
|
self.subsuites = SubSuiteList(self.subsuites) # type: SubSuiteList
|
|
|
|
|
2014-01-29 17:28:55 +01:00
|
|
|
class Runner(DiscoverRunner):
|
2017-02-10 12:40:14 +01:00
|
|
|
test_suite = TestSuite
|
|
|
|
test_loader = TestLoader()
|
2017-02-10 05:49:28 +01:00
|
|
|
parallel_test_suite = ParallelTestSuite
|
2017-02-10 12:40:14 +01:00
|
|
|
|
2017-06-06 10:04:20 +02:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
# type: (*Any, **Any) -> None
|
2014-01-29 17:28:55 +01:00
|
|
|
DiscoverRunner.__init__(self, *args, **kwargs)
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2016-05-20 14:53:47 +02:00
|
|
|
# `templates_rendered` holds templates which were rendered
|
|
|
|
# in proper logical tests.
|
|
|
|
self.templates_rendered = set() # type: Set[str]
|
|
|
|
# `shallow_tested_templates` holds templates which were rendered
|
|
|
|
# in `zerver.tests.test_templates`.
|
|
|
|
self.shallow_tested_templates = set() # type: Set[str]
|
|
|
|
template_rendered.connect(self.on_template_rendered)
|
2017-04-11 08:05:43 +02:00
|
|
|
self.database_id = random.randint(1, 10000)
|
2016-05-20 14:53:47 +02:00
|
|
|
|
2017-02-10 05:48:15 +01:00
|
|
|
def get_resultclass(self):
|
|
|
|
# type: () -> Type[TestResult]
|
|
|
|
return TextTestResult
|
|
|
|
|
2016-05-20 14:53:47 +02:00
|
|
|
def on_template_rendered(self, sender, context, **kwargs):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (Any, Dict[str, Any], **Any) -> None
|
2016-05-20 14:53:47 +02:00
|
|
|
if hasattr(sender, 'template'):
|
|
|
|
template_name = sender.template.name
|
|
|
|
if template_name not in self.templates_rendered:
|
2016-12-01 01:00:26 +01:00
|
|
|
if context.get('shallow_tested') and template_name not in self.templates_rendered:
|
2016-05-20 14:53:47 +02:00
|
|
|
self.shallow_tested_templates.add(template_name)
|
|
|
|
else:
|
|
|
|
self.templates_rendered.add(template_name)
|
|
|
|
self.shallow_tested_templates.discard(template_name)
|
|
|
|
|
|
|
|
def get_shallow_tested_templates(self):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: () -> Set[str]
|
2016-05-20 14:53:47 +02:00
|
|
|
return self.shallow_tested_templates
|
|
|
|
|
2017-04-11 08:05:43 +02:00
|
|
|
def setup_test_environment(self, *args, **kwargs):
|
|
|
|
# type: (*Any, **Any) -> Any
|
2017-04-13 11:24:44 +02:00
|
|
|
settings.DATABASES['default']['NAME'] = settings.BACKEND_DATABASE_TEMPLATE
|
2017-04-18 13:42:12 +02:00
|
|
|
# We create/destroy the test databases in run_tests to avoid
|
|
|
|
# duplicate work when running in parallel mode.
|
2017-04-11 08:05:43 +02:00
|
|
|
return super(Runner, self).setup_test_environment(*args, **kwargs)
|
|
|
|
|
|
|
|
def teardown_test_environment(self, *args, **kwargs):
|
|
|
|
# type: (*Any, **Any) -> Any
|
|
|
|
# No need to pass the database id now. It will be picked up
|
|
|
|
# automatically through settings.
|
2017-04-26 08:09:43 +02:00
|
|
|
if self.parallel == 1:
|
|
|
|
# In parallel mode (parallel > 1), destroy_test_databases will
|
|
|
|
# destroy settings.BACKEND_DATABASE_TEMPLATE; we don't want that.
|
|
|
|
# So run this only in serial mode.
|
|
|
|
destroy_test_databases()
|
2017-04-11 08:05:43 +02:00
|
|
|
return super(Runner, self).teardown_test_environment(*args, **kwargs)
|
|
|
|
|
2016-11-19 01:28:28 +01:00
|
|
|
def run_tests(self, test_labels, extra_tests=None,
|
2016-12-03 00:04:17 +01:00
|
|
|
full_suite=False, **kwargs):
|
2016-12-23 18:42:45 +01:00
|
|
|
# type: (List[str], Optional[List[TestCase]], bool, **Any) -> Tuple[bool, List[str]]
|
2014-01-29 00:47:48 +01:00
|
|
|
self.setup_test_environment()
|
2016-05-03 21:02:20 +02:00
|
|
|
try:
|
|
|
|
suite = self.build_suite(test_labels, extra_tests)
|
|
|
|
except AttributeError:
|
|
|
|
traceback.print_exc()
|
|
|
|
print()
|
|
|
|
print(" This is often caused by a test module/class/function that doesn't exist or ")
|
|
|
|
print(" import properly. You can usually debug in a `manage.py shell` via e.g. ")
|
|
|
|
print(" import zerver.tests.test_messages")
|
|
|
|
print(" from zerver.tests.test_messages import StreamMessagesTest")
|
|
|
|
print(" StreamMessagesTest.test_message_to_stream")
|
|
|
|
print()
|
|
|
|
sys.exit(1)
|
2017-04-18 13:42:12 +02:00
|
|
|
|
|
|
|
if self.parallel == 1:
|
|
|
|
# We are running in serial mode so create the databases here.
|
|
|
|
# For parallel mode, the databases are created in init_worker.
|
|
|
|
# We don't want to create and destroy DB in setup_test_environment
|
|
|
|
# because it will be called for both serial and parallel modes.
|
|
|
|
# However, at this point we know in which mode we would be running
|
|
|
|
# since that decision has already been made in build_suite().
|
|
|
|
destroy_test_databases(self.database_id)
|
|
|
|
create_test_databases(self.database_id)
|
|
|
|
|
2014-02-26 17:27:19 +01:00
|
|
|
# We have to do the next line to avoid flaky scenarios where we
|
|
|
|
# run a single test and getting an SA connection causes data from
|
|
|
|
# a Django connection to be rolled back mid-test.
|
|
|
|
get_sqlalchemy_connection()
|
2017-02-10 12:40:14 +01:00
|
|
|
result = self.run_suite(suite)
|
2014-01-29 00:47:48 +01:00
|
|
|
self.teardown_test_environment()
|
2017-02-10 05:48:15 +01:00
|
|
|
failed = self.suite_result(suite, result)
|
2016-07-28 01:40:28 +02:00
|
|
|
if not failed:
|
2016-11-19 01:28:28 +01:00
|
|
|
write_instrumentation_reports(full_suite=full_suite)
|
2016-12-23 18:42:45 +01:00
|
|
|
return failed, result.failed_tests
|
2017-02-10 05:49:28 +01:00
|
|
|
|
|
|
|
def get_test_names(suite):
|
|
|
|
# type: (TestSuite) -> List[str]
|
|
|
|
return [full_test_name(t) for t in get_tests_from_suite(suite)]
|
|
|
|
|
|
|
|
def get_tests_from_suite(suite):
|
|
|
|
# type: (TestSuite) -> TestCase
|
|
|
|
for test in suite: # type: ignore
|
|
|
|
if isinstance(test, TestSuite):
|
|
|
|
for child in get_tests_from_suite(test):
|
|
|
|
yield child
|
|
|
|
else:
|
|
|
|
yield test
|
|
|
|
|
|
|
|
def serialize_suite(suite):
|
|
|
|
# type: (TestSuite) -> Tuple[Type[TestSuite], List[str]]
|
|
|
|
return type(suite), get_test_names(suite)
|
|
|
|
|
|
|
|
def deserialize_suite(args):
|
|
|
|
# type: (Tuple[Type[Iterable[TestCase]], List[str]]) -> Iterable[TestCase]
|
|
|
|
suite_class, test_names = args
|
|
|
|
suite = suite_class() # type: ignore # Gives abstract type error.
|
|
|
|
tests = TestLoader().loadTestsFromNames(test_names)
|
|
|
|
for test in get_tests_from_suite(tests):
|
|
|
|
suite.addTest(test)
|
|
|
|
return suite
|
|
|
|
|
|
|
|
class RemoteTestRunner(django_runner.RemoteTestRunner):
|
|
|
|
resultclass = RemoteTestResult
|
|
|
|
|
|
|
|
class SubSuiteList(list):
|
|
|
|
"""
|
|
|
|
This class allows us to avoid changing the main logic of
|
|
|
|
ParallelTestSuite and still make it serializable.
|
|
|
|
"""
|
|
|
|
def __init__(self, suites):
|
|
|
|
# type: (List[TestSuite]) -> None
|
|
|
|
serialized_suites = [serialize_suite(s) for s in suites]
|
|
|
|
super(SubSuiteList, self).__init__(serialized_suites)
|
|
|
|
|
|
|
|
def __getitem__(self, index):
|
|
|
|
# type: (Any) -> Any
|
|
|
|
suite = super(SubSuiteList, self).__getitem__(index)
|
|
|
|
return deserialize_suite(suite)
|