2015-11-01 17:11:06 +01:00
|
|
|
from __future__ import print_function
|
2016-05-20 14:53:47 +02:00
|
|
|
|
2016-07-29 19:48:43 +02:00
|
|
|
from typing import Any, Callable, Iterable, List, Optional, Set, Tuple
|
2016-05-20 14:53:47 +02:00
|
|
|
|
2016-06-05 08:05:39 +02:00
|
|
|
from django.test import TestCase
|
2014-01-29 17:28:55 +01:00
|
|
|
from django.test.runner import DiscoverRunner
|
2016-05-20 14:53:47 +02:00
|
|
|
from django.test.signals import template_rendered
|
2017-02-10 12:40:14 +01:00
|
|
|
from unittest import loader # type: ignore # Mypy cannot pick this up.
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
from zerver.lib.cache import bounce_key_prefix_for_testing
|
2016-07-19 08:12:35 +02:00
|
|
|
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
|
2016-07-28 01:40:28 +02:00
|
|
|
from zerver.lib.test_helpers import (
|
2016-07-28 02:40:04 +02:00
|
|
|
get_all_templates, write_instrumentation_reports,
|
2017-01-24 06:34:26 +01:00
|
|
|
)
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
import os
|
2016-05-03 20:53:59 +02:00
|
|
|
import subprocess
|
2016-05-03 21:02:20 +02:00
|
|
|
import sys
|
2014-01-29 00:47:48 +01:00
|
|
|
import time
|
2016-01-23 23:18:26 +01:00
|
|
|
import traceback
|
2015-06-10 20:28:32 +02:00
|
|
|
import unittest
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2017-02-10 12:40:14 +01:00
|
|
|
if False:
|
|
|
|
from unittest.result import TextTestResult
|
|
|
|
|
2016-07-29 21:06:22 +02:00
|
|
|
def slow(slowness_reason):
|
|
|
|
# type: (str) -> Callable[[Callable], Callable]
|
2014-01-29 00:47:48 +01:00
|
|
|
'''
|
|
|
|
This is a decorate that annotates a test as being "known
|
|
|
|
to be slow." The decorator will set expected_run_time and slowness_reason
|
|
|
|
as atributes of the function. Other code can use this annotation
|
|
|
|
as needed, e.g. to exclude these tests in "fast" mode.
|
|
|
|
'''
|
|
|
|
def decorator(f):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (Any) -> Any
|
2014-01-29 00:47:48 +01:00
|
|
|
f.slowness_reason = slowness_reason
|
|
|
|
return f
|
|
|
|
|
|
|
|
return decorator
|
|
|
|
|
|
|
|
def is_known_slow_test(test_method):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (Any) -> bool
|
2014-01-29 00:47:48 +01:00
|
|
|
return hasattr(test_method, 'slowness_reason')
|
|
|
|
|
|
|
|
def full_test_name(test):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (TestCase) -> str
|
2014-01-29 20:20:16 +01:00
|
|
|
test_module = test.__module__
|
2014-01-29 00:47:48 +01:00
|
|
|
test_class = test.__class__.__name__
|
|
|
|
test_method = test._testMethodName
|
2014-01-29 20:20:16 +01:00
|
|
|
return '%s.%s.%s' % (test_module, test_class, test_method)
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
def get_test_method(test):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (TestCase) -> Callable[[], None]
|
2014-01-29 00:47:48 +01:00
|
|
|
return getattr(test, test._testMethodName)
|
|
|
|
|
2016-07-29 19:48:43 +02:00
|
|
|
# Each tuple is delay, test_name, slowness_reason
|
|
|
|
TEST_TIMINGS = [] # type: List[Tuple[float, str, str]]
|
|
|
|
|
|
|
|
|
|
|
|
def report_slow_tests():
|
|
|
|
# type: () -> None
|
|
|
|
timings = sorted(TEST_TIMINGS, reverse=True)
|
|
|
|
print('SLOWNESS REPORT')
|
|
|
|
print(' delay test')
|
|
|
|
print(' ---- ----')
|
|
|
|
for delay, test_name, slowness_reason in timings[:15]:
|
|
|
|
if not slowness_reason:
|
|
|
|
slowness_reason = 'UNKNOWN WHY SLOW, please investigate'
|
|
|
|
print(' %0.3f %s\n %s\n' % (delay, test_name, slowness_reason))
|
|
|
|
|
|
|
|
print('...')
|
|
|
|
for delay, test_name, slowness_reason in timings[100:]:
|
|
|
|
if slowness_reason:
|
|
|
|
print(' %.3f %s is not that slow' % (delay, test_name))
|
|
|
|
print(' consider removing @slow decorator')
|
|
|
|
print(' This may no longer be true: %s' % (slowness_reason,))
|
|
|
|
|
2014-01-29 00:47:48 +01:00
|
|
|
def enforce_timely_test_completion(test_method, test_name, delay):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (Any, str, float) -> None
|
2016-07-29 20:58:22 +02:00
|
|
|
if hasattr(test_method, 'slowness_reason'):
|
|
|
|
max_delay = 1.1 # seconds
|
2014-01-29 00:47:48 +01:00
|
|
|
else:
|
2016-07-29 20:58:22 +02:00
|
|
|
max_delay = 0.4 # seconds
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
if delay > max_delay:
|
2016-07-29 20:58:22 +02:00
|
|
|
print(' ** Test is TOO slow: %s (%.3f s)' % (test_name, delay))
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
def fast_tests_only():
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: () -> bool
|
2016-01-26 01:23:21 +01:00
|
|
|
return "FAST_TESTS_ONLY" in os.environ
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
def run_test(test):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (TestCase) -> bool
|
2016-01-23 23:18:26 +01:00
|
|
|
failed = False
|
2014-01-29 00:47:48 +01:00
|
|
|
test_method = get_test_method(test)
|
|
|
|
|
|
|
|
if fast_tests_only() and is_known_slow_test(test_method):
|
2016-01-27 22:48:04 +01:00
|
|
|
return failed
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
test_name = full_test_name(test)
|
|
|
|
|
|
|
|
bounce_key_prefix_for_testing(test_name)
|
|
|
|
|
2015-11-01 17:11:06 +01:00
|
|
|
print('Running', test_name)
|
2015-07-30 10:41:07 +02:00
|
|
|
if not hasattr(test, "_pre_setup"):
|
2016-05-03 20:53:59 +02:00
|
|
|
# test_name is likely of the form unittest.loader.ModuleImportFailure.zerver.tests.test_upload
|
|
|
|
import_failure_prefix = 'unittest.loader.ModuleImportFailure.'
|
|
|
|
if test_name.startswith(import_failure_prefix):
|
|
|
|
actual_test_name = test_name[len(import_failure_prefix):]
|
|
|
|
print()
|
|
|
|
print("Actual test to be run is %s, but import failed." % (actual_test_name,))
|
|
|
|
print("Importing test module directly to generate clearer traceback:")
|
|
|
|
try:
|
2016-11-22 01:44:16 +01:00
|
|
|
command = [sys.executable, "-c", "import %s" % (actual_test_name,)]
|
2016-05-03 20:53:59 +02:00
|
|
|
print("Import test command: `%s`" % (' '.join(command),))
|
|
|
|
subprocess.check_call(command)
|
|
|
|
except subprocess.CalledProcessError:
|
|
|
|
print("If that traceback is confusing, try doing the import inside `./manage.py shell`")
|
|
|
|
print()
|
|
|
|
return True
|
2016-11-08 02:20:29 +01:00
|
|
|
print("Import unexpectedly succeeded! Something is wrong.")
|
|
|
|
print("Try running `import %s` inside `./manage.py shell`" % (actual_test_name,))
|
|
|
|
print("If that works, you may have introduced an import cycle.")
|
2016-05-03 20:53:59 +02:00
|
|
|
return True
|
|
|
|
else:
|
|
|
|
print("Test doesn't have _pre_setup; something is wrong.")
|
|
|
|
print("Here's a debugger. Good luck!")
|
2017-01-24 06:37:27 +01:00
|
|
|
import pdb
|
|
|
|
pdb.set_trace()
|
2014-01-29 00:47:48 +01:00
|
|
|
test._pre_setup()
|
|
|
|
|
|
|
|
start_time = time.time()
|
|
|
|
|
|
|
|
test.setUp()
|
2015-06-10 20:28:32 +02:00
|
|
|
try:
|
|
|
|
test_method()
|
2016-07-12 06:18:01 +02:00
|
|
|
except unittest.SkipTest as e:
|
|
|
|
print('Skipped:', e)
|
2016-01-23 23:18:26 +01:00
|
|
|
except Exception:
|
|
|
|
failed = True
|
|
|
|
traceback.print_exc()
|
|
|
|
|
2014-01-29 00:47:48 +01:00
|
|
|
test.tearDown()
|
|
|
|
|
|
|
|
delay = time.time() - start_time
|
|
|
|
enforce_timely_test_completion(test_method, test_name, delay)
|
2016-07-29 19:48:43 +02:00
|
|
|
slowness_reason = getattr(test_method, 'slowness_reason', '')
|
|
|
|
TEST_TIMINGS.append((delay, test_name, slowness_reason))
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
test._post_teardown()
|
2016-01-23 23:18:26 +01:00
|
|
|
return failed
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2017-02-10 12:40:14 +01:00
|
|
|
class TestSuite(unittest.TestSuite):
|
|
|
|
def run(self, result, debug=False):
|
|
|
|
# type: (TextTestResult, Optional[bool]) -> TextTestResult
|
|
|
|
for test in self: # type: ignore # Mypy cannot recognize this but this is correct. Taken from unittest.
|
|
|
|
result.startTest(test)
|
|
|
|
# The attributes __unittest_skip__ and __unittest_skip_why__ are undocumented
|
|
|
|
if hasattr(test, '__unittest_skip__') and test.__unittest_skip__: # type: ignore
|
|
|
|
print('Skipping', full_test_name(test), "(%s)" % (test.__unittest_skip_why__,)) # type: ignore
|
2017-02-12 08:01:08 +01:00
|
|
|
failed = run_test(test)
|
|
|
|
# Hack: This should be sent back another way
|
|
|
|
result.failed = failed
|
|
|
|
if failed and result.failfast:
|
|
|
|
break
|
2017-02-10 12:40:14 +01:00
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
class TestLoader(loader.TestLoader):
|
|
|
|
suiteClass = TestSuite
|
|
|
|
|
2014-01-29 17:28:55 +01:00
|
|
|
class Runner(DiscoverRunner):
|
2017-02-10 12:40:14 +01:00
|
|
|
test_suite = TestSuite
|
|
|
|
test_loader = TestLoader()
|
|
|
|
|
2014-01-29 00:47:48 +01:00
|
|
|
def __init__(self, *args, **kwargs):
|
2016-05-20 14:53:47 +02:00
|
|
|
# type: (*Any, **Any) -> None
|
2014-01-29 17:28:55 +01:00
|
|
|
DiscoverRunner.__init__(self, *args, **kwargs)
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2016-05-20 14:53:47 +02:00
|
|
|
# `templates_rendered` holds templates which were rendered
|
|
|
|
# in proper logical tests.
|
|
|
|
self.templates_rendered = set() # type: Set[str]
|
|
|
|
# `shallow_tested_templates` holds templates which were rendered
|
|
|
|
# in `zerver.tests.test_templates`.
|
|
|
|
self.shallow_tested_templates = set() # type: Set[str]
|
|
|
|
template_rendered.connect(self.on_template_rendered)
|
|
|
|
|
|
|
|
def on_template_rendered(self, sender, context, **kwargs):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: (Any, Dict[str, Any], **Any) -> None
|
2016-05-20 14:53:47 +02:00
|
|
|
if hasattr(sender, 'template'):
|
|
|
|
template_name = sender.template.name
|
|
|
|
if template_name not in self.templates_rendered:
|
2016-12-01 01:00:26 +01:00
|
|
|
if context.get('shallow_tested') and template_name not in self.templates_rendered:
|
2016-05-20 14:53:47 +02:00
|
|
|
self.shallow_tested_templates.add(template_name)
|
|
|
|
else:
|
|
|
|
self.templates_rendered.add(template_name)
|
|
|
|
self.shallow_tested_templates.discard(template_name)
|
|
|
|
|
|
|
|
def get_shallow_tested_templates(self):
|
2016-06-05 08:05:39 +02:00
|
|
|
# type: () -> Set[str]
|
2016-05-20 14:53:47 +02:00
|
|
|
return self.shallow_tested_templates
|
|
|
|
|
2016-11-19 01:28:28 +01:00
|
|
|
def run_tests(self, test_labels, extra_tests=None,
|
2016-12-03 00:04:17 +01:00
|
|
|
full_suite=False, **kwargs):
|
2016-11-19 01:28:28 +01:00
|
|
|
# type: (List[str], Optional[List[TestCase]], bool, **Any) -> bool
|
2014-01-29 00:47:48 +01:00
|
|
|
self.setup_test_environment()
|
2016-05-03 21:02:20 +02:00
|
|
|
try:
|
|
|
|
suite = self.build_suite(test_labels, extra_tests)
|
|
|
|
except AttributeError:
|
|
|
|
traceback.print_exc()
|
|
|
|
print()
|
|
|
|
print(" This is often caused by a test module/class/function that doesn't exist or ")
|
|
|
|
print(" import properly. You can usually debug in a `manage.py shell` via e.g. ")
|
|
|
|
print(" import zerver.tests.test_messages")
|
|
|
|
print(" from zerver.tests.test_messages import StreamMessagesTest")
|
|
|
|
print(" StreamMessagesTest.test_message_to_stream")
|
|
|
|
print()
|
|
|
|
sys.exit(1)
|
2014-02-26 17:27:19 +01:00
|
|
|
# We have to do the next line to avoid flaky scenarios where we
|
|
|
|
# run a single test and getting an SA connection causes data from
|
|
|
|
# a Django connection to be rolled back mid-test.
|
|
|
|
get_sqlalchemy_connection()
|
2017-02-10 12:40:14 +01:00
|
|
|
result = self.run_suite(suite)
|
2014-01-29 00:47:48 +01:00
|
|
|
self.teardown_test_environment()
|
2017-02-12 08:01:08 +01:00
|
|
|
failed = result.failed
|
2016-07-28 01:40:28 +02:00
|
|
|
if not failed:
|
2016-11-19 01:28:28 +01:00
|
|
|
write_instrumentation_reports(full_suite=full_suite)
|
2016-01-23 23:18:26 +01:00
|
|
|
return failed
|