mirror of https://github.com/zulip/zulip.git
testing: Use TestResult in run_test.
Internally, Django uses TestResult to gather results from testcases. This commit upgrades the run_test function to use TestResult to compile result.
This commit is contained in:
parent
f28158d301
commit
128beb910b
|
@ -262,7 +262,7 @@ if __name__ == "__main__":
|
||||||
subprocess.call(generate_fixtures_command)
|
subprocess.call(generate_fixtures_command)
|
||||||
|
|
||||||
TestRunner = get_runner(settings)
|
TestRunner = get_runner(settings)
|
||||||
test_runner = TestRunner(failfast=options.fatal_errors)
|
test_runner = TestRunner(failfast=options.fatal_errors, verbosity=2)
|
||||||
failures = test_runner.run_tests(suites, full_suite=full_suite)
|
failures = test_runner.run_tests(suites, full_suite=full_suite)
|
||||||
|
|
||||||
templates_not_rendered = test_runner.get_shallow_tested_templates()
|
templates_not_rendered = test_runner.get_shallow_tested_templates()
|
||||||
|
@ -309,8 +309,5 @@ if __name__ == "__main__":
|
||||||
# an important clue as to why tests fail.
|
# an important clue as to why tests fail.
|
||||||
report_slow_tests()
|
report_slow_tests()
|
||||||
|
|
||||||
if failures:
|
# We'll have printed whether tests passed or failed above
|
||||||
print('FAILED!')
|
|
||||||
else:
|
|
||||||
print('DONE!')
|
|
||||||
sys.exit(bool(failures))
|
sys.exit(bool(failures))
|
||||||
|
|
|
@ -1,16 +1,19 @@
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
from typing import Any, Callable, Iterable, List, Optional, Set, Tuple
|
from typing import Any, Callable, Iterable, List, Optional, Set, Tuple, \
|
||||||
|
Text, Type
|
||||||
|
from unittest import loader, runner # type: ignore # Mypy cannot pick these up.
|
||||||
|
from unittest.result import TestResult
|
||||||
|
|
||||||
from django.test import TestCase
|
from django.test import TestCase
|
||||||
from django.test.runner import DiscoverRunner
|
from django.test.runner import DiscoverRunner, RemoteTestResult
|
||||||
from django.test.signals import template_rendered
|
from django.test.signals import template_rendered
|
||||||
from unittest import loader # type: ignore # Mypy cannot pick this up.
|
|
||||||
|
|
||||||
from zerver.lib.cache import bounce_key_prefix_for_testing
|
from zerver.lib.cache import bounce_key_prefix_for_testing
|
||||||
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
|
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
|
||||||
from zerver.lib.test_helpers import (
|
from zerver.lib.test_helpers import (
|
||||||
get_all_templates, write_instrumentation_reports,
|
get_all_templates, write_instrumentation_reports,
|
||||||
|
append_instrumentation_data
|
||||||
)
|
)
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
@ -21,7 +24,7 @@ import traceback
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
if False:
|
if False:
|
||||||
from unittest.result import TextTestResult
|
from unittest.result import TestResult
|
||||||
|
|
||||||
def slow(slowness_reason):
|
def slow(slowness_reason):
|
||||||
# type: (str) -> Callable[[Callable], Callable]
|
# type: (str) -> Callable[[Callable], Callable]
|
||||||
|
@ -75,22 +78,23 @@ def report_slow_tests():
|
||||||
print(' consider removing @slow decorator')
|
print(' consider removing @slow decorator')
|
||||||
print(' This may no longer be true: %s' % (slowness_reason,))
|
print(' This may no longer be true: %s' % (slowness_reason,))
|
||||||
|
|
||||||
def enforce_timely_test_completion(test_method, test_name, delay):
|
def enforce_timely_test_completion(test_method, test_name, delay, result):
|
||||||
# type: (Any, str, float) -> None
|
# type: (Any, str, float, TestResult) -> None
|
||||||
if hasattr(test_method, 'slowness_reason'):
|
if hasattr(test_method, 'slowness_reason'):
|
||||||
max_delay = 1.1 # seconds
|
max_delay = 1.1 # seconds
|
||||||
else:
|
else:
|
||||||
max_delay = 0.4 # seconds
|
max_delay = 0.4 # seconds
|
||||||
|
|
||||||
if delay > max_delay:
|
if delay > max_delay:
|
||||||
print(' ** Test is TOO slow: %s (%.3f s)' % (test_name, delay))
|
msg = '** Test is TOO slow: %s (%.3f s)\n' % (test_name, delay)
|
||||||
|
result.addInfo(test_method, msg)
|
||||||
|
|
||||||
def fast_tests_only():
|
def fast_tests_only():
|
||||||
# type: () -> bool
|
# type: () -> bool
|
||||||
return "FAST_TESTS_ONLY" in os.environ
|
return "FAST_TESTS_ONLY" in os.environ
|
||||||
|
|
||||||
def run_test(test):
|
def run_test(test, result):
|
||||||
# type: (TestCase) -> bool
|
# type: (TestCase, TestResult) -> bool
|
||||||
failed = False
|
failed = False
|
||||||
test_method = get_test_method(test)
|
test_method = get_test_method(test)
|
||||||
|
|
||||||
|
@ -101,67 +105,99 @@ def run_test(test):
|
||||||
|
|
||||||
bounce_key_prefix_for_testing(test_name)
|
bounce_key_prefix_for_testing(test_name)
|
||||||
|
|
||||||
print('Running', test_name)
|
|
||||||
if not hasattr(test, "_pre_setup"):
|
if not hasattr(test, "_pre_setup"):
|
||||||
# test_name is likely of the form unittest.loader.ModuleImportFailure.zerver.tests.test_upload
|
# test_name is likely of the form unittest.loader.ModuleImportFailure.zerver.tests.test_upload
|
||||||
import_failure_prefix = 'unittest.loader.ModuleImportFailure.'
|
import_failure_prefix = 'unittest.loader.ModuleImportFailure.'
|
||||||
if test_name.startswith(import_failure_prefix):
|
if test_name.startswith(import_failure_prefix):
|
||||||
actual_test_name = test_name[len(import_failure_prefix):]
|
actual_test_name = test_name[len(import_failure_prefix):]
|
||||||
print()
|
error_msg = ("\nActual test to be run is %s, but import failed.\n"
|
||||||
print("Actual test to be run is %s, but import failed." % (actual_test_name,))
|
"Importing test module directly to generate clearer "
|
||||||
print("Importing test module directly to generate clearer traceback:")
|
"traceback:\n") % (actual_test_name,)
|
||||||
|
result.addInfo(test, error_msg)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
command = [sys.executable, "-c", "import %s" % (actual_test_name,)]
|
command = [sys.executable, "-c", "import %s" % (actual_test_name,)]
|
||||||
print("Import test command: `%s`" % (' '.join(command),))
|
msg = "Import test command: `%s`" % (' '.join(command),)
|
||||||
|
result.addInfo(test, msg)
|
||||||
subprocess.check_call(command)
|
subprocess.check_call(command)
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
print("If that traceback is confusing, try doing the import inside `./manage.py shell`")
|
msg = ("If that traceback is confusing, try doing the "
|
||||||
print()
|
"import inside `./manage.py shell`")
|
||||||
|
result.addInfo(test, msg)
|
||||||
|
result.addError(test, sys.exc_info())
|
||||||
return True
|
return True
|
||||||
print("Import unexpectedly succeeded! Something is wrong.")
|
|
||||||
print("Try running `import %s` inside `./manage.py shell`" % (actual_test_name,))
|
msg = ("Import unexpectedly succeeded! Something is wrong. Try "
|
||||||
print("If that works, you may have introduced an import cycle.")
|
"running `import %s` inside `./manage.py shell`.\n"
|
||||||
|
"If that works, you may have introduced an import "
|
||||||
|
"cycle.") % (actual_test_name,)
|
||||||
|
import_error = (Exception, Exception(msg), None) # type: Tuple[Any, Any, Any]
|
||||||
|
result.addError(test, import_error)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
print("Test doesn't have _pre_setup; something is wrong.")
|
msg = "Test doesn't have _pre_setup; something is wrong."
|
||||||
print("Here's a debugger. Good luck!")
|
error_pre_setup = (Exception, Exception(msg), None) # type: Tuple[Any, Any, Any]
|
||||||
import pdb
|
result.addError(test, error_pre_setup)
|
||||||
pdb.set_trace()
|
return True
|
||||||
test._pre_setup()
|
test._pre_setup()
|
||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
test.setUp()
|
test(result) # unittest will handle skipping, error, failure and success.
|
||||||
try:
|
|
||||||
test_method()
|
|
||||||
except unittest.SkipTest as e:
|
|
||||||
print('Skipped:', e)
|
|
||||||
except Exception:
|
|
||||||
failed = True
|
|
||||||
traceback.print_exc()
|
|
||||||
|
|
||||||
test.tearDown()
|
|
||||||
|
|
||||||
delay = time.time() - start_time
|
delay = time.time() - start_time
|
||||||
enforce_timely_test_completion(test_method, test_name, delay)
|
enforce_timely_test_completion(test_method, test_name, delay, result)
|
||||||
slowness_reason = getattr(test_method, 'slowness_reason', '')
|
slowness_reason = getattr(test_method, 'slowness_reason', '')
|
||||||
TEST_TIMINGS.append((delay, test_name, slowness_reason))
|
TEST_TIMINGS.append((delay, test_name, slowness_reason))
|
||||||
|
|
||||||
test._post_teardown()
|
test._post_teardown()
|
||||||
return failed
|
return failed
|
||||||
|
|
||||||
|
class TextTestResult(runner.TextTestResult):
|
||||||
|
"""
|
||||||
|
This class has unpythonic function names because base class follows
|
||||||
|
this style.
|
||||||
|
"""
|
||||||
|
def addInfo(self, test, msg):
|
||||||
|
# type: (TestCase, Text) -> None
|
||||||
|
self.stream.write(msg)
|
||||||
|
self.stream.flush()
|
||||||
|
|
||||||
|
def addInstrumentation(self, test, data):
|
||||||
|
# type: (TestCase, Dict[str, Any]) -> None
|
||||||
|
append_instrumentation_data(data)
|
||||||
|
|
||||||
|
def startTest(self, test):
|
||||||
|
# type: (TestCase) -> None
|
||||||
|
TestResult.startTest(self, test)
|
||||||
|
self.stream.writeln("Running {}".format(full_test_name(test)))
|
||||||
|
self.stream.flush()
|
||||||
|
|
||||||
|
def addSuccess(self, *args, **kwargs):
|
||||||
|
# type: (*Any, **Any) -> None
|
||||||
|
TestResult.addSuccess(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def addError(self, *args, **kwargs):
|
||||||
|
# type: (*Any, **Any) -> None
|
||||||
|
TestResult.addError(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def addFailure(self, *args, **kwargs):
|
||||||
|
# type: (*Any, **Any) -> None
|
||||||
|
TestResult.addFailure(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def addSkip(self, test, reason):
|
||||||
|
# type: (TestCase, Text) -> None
|
||||||
|
TestResult.addSkip(self, test, reason)
|
||||||
|
self.stream.writeln("** Skipping {}: {}".format(full_test_name(test),
|
||||||
|
reason))
|
||||||
|
self.stream.flush()
|
||||||
|
|
||||||
class TestSuite(unittest.TestSuite):
|
class TestSuite(unittest.TestSuite):
|
||||||
def run(self, result, debug=False):
|
def run(self, result, debug=False):
|
||||||
# type: (TextTestResult, Optional[bool]) -> TextTestResult
|
# type: (TestResult, Optional[bool]) -> TestResult
|
||||||
for test in self: # type: ignore # Mypy cannot recognize this but this is correct. Taken from unittest.
|
for test in self: # type: ignore # Mypy cannot recognize this but this is correct. Taken from unittest.
|
||||||
result.startTest(test)
|
failed = run_test(test, result)
|
||||||
# The attributes __unittest_skip__ and __unittest_skip_why__ are undocumented
|
if failed or result.shouldStop:
|
||||||
if hasattr(test, '__unittest_skip__') and test.__unittest_skip__: # type: ignore
|
|
||||||
print('Skipping', full_test_name(test), "(%s)" % (test.__unittest_skip_why__,)) # type: ignore
|
|
||||||
failed = run_test(test)
|
|
||||||
# Hack: This should be sent back another way
|
|
||||||
result.failed = failed
|
|
||||||
if failed and result.failfast:
|
|
||||||
break
|
break
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
@ -185,6 +221,10 @@ class Runner(DiscoverRunner):
|
||||||
self.shallow_tested_templates = set() # type: Set[str]
|
self.shallow_tested_templates = set() # type: Set[str]
|
||||||
template_rendered.connect(self.on_template_rendered)
|
template_rendered.connect(self.on_template_rendered)
|
||||||
|
|
||||||
|
def get_resultclass(self):
|
||||||
|
# type: () -> Type[TestResult]
|
||||||
|
return TextTestResult
|
||||||
|
|
||||||
def on_template_rendered(self, sender, context, **kwargs):
|
def on_template_rendered(self, sender, context, **kwargs):
|
||||||
# type: (Any, Dict[str, Any], **Any) -> None
|
# type: (Any, Dict[str, Any], **Any) -> None
|
||||||
if hasattr(sender, 'template'):
|
if hasattr(sender, 'template'):
|
||||||
|
@ -222,7 +262,7 @@ class Runner(DiscoverRunner):
|
||||||
get_sqlalchemy_connection()
|
get_sqlalchemy_connection()
|
||||||
result = self.run_suite(suite)
|
result = self.run_suite(suite)
|
||||||
self.teardown_test_environment()
|
self.teardown_test_environment()
|
||||||
failed = result.failed
|
failed = self.suite_result(suite, result)
|
||||||
if not failed:
|
if not failed:
|
||||||
write_instrumentation_reports(full_suite=full_suite)
|
write_instrumentation_reports(full_suite=full_suite)
|
||||||
return failed
|
return failed
|
||||||
|
|
Loading…
Reference in New Issue