2015-11-01 17:11:06 +01:00
|
|
|
from __future__ import print_function
|
2014-01-29 17:28:55 +01:00
|
|
|
from django.test.runner import DiscoverRunner
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
from zerver.lib.cache import bounce_key_prefix_for_testing
|
2014-02-26 17:27:19 +01:00
|
|
|
from zerver.views.messages import get_sqlalchemy_connection
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
import os
|
|
|
|
import time
|
2016-01-23 23:18:26 +01:00
|
|
|
import traceback
|
2015-06-10 20:28:32 +02:00
|
|
|
import unittest
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
def slow(expected_run_time, slowness_reason):
|
|
|
|
'''
|
|
|
|
This is a decorate that annotates a test as being "known
|
|
|
|
to be slow." The decorator will set expected_run_time and slowness_reason
|
|
|
|
as atributes of the function. Other code can use this annotation
|
|
|
|
as needed, e.g. to exclude these tests in "fast" mode.
|
|
|
|
'''
|
|
|
|
def decorator(f):
|
|
|
|
f.expected_run_time = expected_run_time
|
|
|
|
f.slowness_reason = slowness_reason
|
|
|
|
return f
|
|
|
|
|
|
|
|
return decorator
|
|
|
|
|
|
|
|
def is_known_slow_test(test_method):
|
|
|
|
return hasattr(test_method, 'slowness_reason')
|
|
|
|
|
|
|
|
def full_test_name(test):
|
2014-01-29 20:20:16 +01:00
|
|
|
test_module = test.__module__
|
2014-01-29 00:47:48 +01:00
|
|
|
test_class = test.__class__.__name__
|
|
|
|
test_method = test._testMethodName
|
2014-01-29 20:20:16 +01:00
|
|
|
return '%s.%s.%s' % (test_module, test_class, test_method)
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
def get_test_method(test):
|
|
|
|
return getattr(test, test._testMethodName)
|
|
|
|
|
|
|
|
def enforce_timely_test_completion(test_method, test_name, delay):
|
|
|
|
if hasattr(test_method, 'expected_run_time'):
|
|
|
|
# Allow for tests to run 50% slower than normal due
|
|
|
|
# to random variations.
|
|
|
|
max_delay = 1.5 * test_method.expected_run_time
|
|
|
|
else:
|
|
|
|
max_delay = 0.180 # seconds
|
|
|
|
|
|
|
|
# Further adjustments for slow laptops:
|
|
|
|
max_delay = max_delay * 3
|
|
|
|
|
|
|
|
if delay > max_delay:
|
2015-11-01 17:11:06 +01:00
|
|
|
print('Test is TOO slow: %s (%.3f s)' % (test_name, delay))
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
def fast_tests_only():
|
|
|
|
return os.environ.get('FAST_TESTS_ONLY', False)
|
|
|
|
|
|
|
|
def run_test(test):
|
2016-01-23 23:18:26 +01:00
|
|
|
failed = False
|
2014-01-29 00:47:48 +01:00
|
|
|
test_method = get_test_method(test)
|
|
|
|
|
|
|
|
if fast_tests_only() and is_known_slow_test(test_method):
|
|
|
|
return
|
|
|
|
|
|
|
|
test_name = full_test_name(test)
|
|
|
|
|
|
|
|
bounce_key_prefix_for_testing(test_name)
|
|
|
|
|
2015-11-01 17:11:06 +01:00
|
|
|
print('Running', test_name)
|
2015-07-30 10:41:07 +02:00
|
|
|
if not hasattr(test, "_pre_setup"):
|
2015-11-01 17:11:06 +01:00
|
|
|
print("somehow the test doesn't have _pre_setup; it may be an import fail.")
|
|
|
|
print("Here's a debugger. Good luck!")
|
2015-07-30 10:41:07 +02:00
|
|
|
import pdb; pdb.set_trace()
|
2014-01-29 00:47:48 +01:00
|
|
|
test._pre_setup()
|
|
|
|
|
|
|
|
start_time = time.time()
|
|
|
|
|
|
|
|
test.setUp()
|
2015-06-10 20:28:32 +02:00
|
|
|
try:
|
|
|
|
test_method()
|
|
|
|
except unittest.SkipTest:
|
|
|
|
pass
|
2016-01-23 23:18:26 +01:00
|
|
|
except Exception:
|
|
|
|
failed = True
|
|
|
|
traceback.print_exc()
|
|
|
|
|
2014-01-29 00:47:48 +01:00
|
|
|
test.tearDown()
|
|
|
|
|
|
|
|
delay = time.time() - start_time
|
|
|
|
enforce_timely_test_completion(test_method, test_name, delay)
|
|
|
|
|
|
|
|
test._post_teardown()
|
2016-01-23 23:18:26 +01:00
|
|
|
return failed
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2014-01-29 17:28:55 +01:00
|
|
|
class Runner(DiscoverRunner):
|
2014-01-29 00:47:48 +01:00
|
|
|
def __init__(self, *args, **kwargs):
|
2014-01-29 17:28:55 +01:00
|
|
|
DiscoverRunner.__init__(self, *args, **kwargs)
|
2014-01-29 00:47:48 +01:00
|
|
|
|
2016-01-23 23:18:26 +01:00
|
|
|
def run_suite(self, suite, fatal_errors=None):
|
|
|
|
failed = False
|
2014-01-29 00:47:48 +01:00
|
|
|
for test in suite:
|
2016-01-23 23:18:26 +01:00
|
|
|
if run_test(test):
|
|
|
|
failed = True
|
|
|
|
if fatal_errors:
|
|
|
|
return failed
|
|
|
|
return failed
|
2014-01-29 00:47:48 +01:00
|
|
|
|
|
|
|
def run_tests(self, test_labels, extra_tests=None, **kwargs):
|
|
|
|
self.setup_test_environment()
|
|
|
|
suite = self.build_suite(test_labels, extra_tests)
|
2014-02-26 17:27:19 +01:00
|
|
|
# We have to do the next line to avoid flaky scenarios where we
|
|
|
|
# run a single test and getting an SA connection causes data from
|
|
|
|
# a Django connection to be rolled back mid-test.
|
|
|
|
get_sqlalchemy_connection()
|
2016-01-23 23:18:26 +01:00
|
|
|
failed = self.run_suite(suite, fatal_errors=kwargs.get('fatal_errors'))
|
2014-01-29 00:47:48 +01:00
|
|
|
self.teardown_test_environment()
|
2016-01-23 23:18:26 +01:00
|
|
|
return failed
|
2015-11-01 17:11:06 +01:00
|
|
|
print()
|