2016-04-07 15:03:22 +02:00
|
|
|
#!/usr/bin/env python
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-03-10 17:15:34 +01:00
|
|
|
from __future__ import print_function
|
2016-01-23 23:16:14 +01:00
|
|
|
import optparse
|
|
|
|
import os
|
|
|
|
import sys
|
2016-03-12 05:58:35 +01:00
|
|
|
import subprocess
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-06-04 00:05:06 +02:00
|
|
|
try:
|
|
|
|
import django
|
|
|
|
from django.conf import settings
|
|
|
|
from django.test.utils import get_runner
|
2016-06-06 22:40:30 +02:00
|
|
|
# We don't actually need typing, but it's a good guard for being
|
|
|
|
# outside a Zulip virtualenv.
|
|
|
|
import typing
|
2016-06-04 00:05:06 +02:00
|
|
|
except ImportError as e:
|
|
|
|
print("ImportError: {}".format(e))
|
|
|
|
print("You need to run the Zulip tests inside a Zulip dev environment.")
|
|
|
|
print("If you are using Vagrant, you can `vagrant ssh` to enter the Vagrant guest.")
|
|
|
|
sys.exit(1)
|
2016-01-23 23:16:14 +01:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2016-03-12 05:58:35 +01:00
|
|
|
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
|
2016-07-12 19:25:34 +02:00
|
|
|
os.chdir(os.path.dirname(TOOLS_DIR))
|
2016-03-12 05:58:35 +01:00
|
|
|
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
|
2016-01-23 23:16:14 +01:00
|
|
|
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.test_settings'
|
|
|
|
# "-u" uses unbuffered IO, which is important when wrapping it in subprocess
|
|
|
|
os.environ['PYTHONUNBUFFERED'] = 'y'
|
|
|
|
|
2016-06-27 23:15:30 +02:00
|
|
|
usage = """%prog [options]
|
|
|
|
test-backend # Runs all backend tests
|
|
|
|
test-backend zerver.tests.test_bugdown # run all tests in a test module
|
|
|
|
test-backend zerver.tests.test_bugdown.BugdownTest # run all tests in a test class
|
2016-06-29 01:24:18 +02:00
|
|
|
test-backend zerver.tests.test_bugdown.BugdownTest.test_inline_youtube # run a single test"""
|
2016-06-27 23:15:30 +02:00
|
|
|
|
|
|
|
parser = optparse.OptionParser(usage)
|
2016-01-23 23:18:26 +01:00
|
|
|
parser.add_option('--nonfatal-errors', action="store_false", default=True,
|
|
|
|
dest="fatal_errors", help="Continue past test failures to run all tests")
|
2016-01-23 23:16:14 +01:00
|
|
|
parser.add_option('--coverage', dest='coverage',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Compute test coverage.')
|
2016-07-28 01:40:28 +02:00
|
|
|
parser.add_option('--url-coverage', dest='url_coverage',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Write url coverage data.')
|
2016-06-16 00:05:07 +02:00
|
|
|
parser.add_option('--no-verbose-coverage', dest='verbose_coverage',
|
|
|
|
action="store_false",
|
|
|
|
default=True, help='Disable verbose print of coverage report.')
|
2016-01-24 02:21:34 +01:00
|
|
|
parser.add_option('--profile', dest='profile',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Profile test runtime.')
|
2016-05-20 14:53:47 +02:00
|
|
|
parser.add_option('--no-shallow', dest='no_shallow',
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Don't allow shallow testing of templates")
|
|
|
|
parser.add_option('--verbose', dest='verbose',
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Show detailed output")
|
2016-07-12 03:59:28 +02:00
|
|
|
parser.add_option('--no-generate-fixtures', action="store_false", default=True,
|
|
|
|
dest="generate_fixtures",
|
|
|
|
help=("Reduce running time by not calling generate-fixtures. "
|
|
|
|
"This may cause spurious failures for some tests."))
|
2016-07-29 19:48:43 +02:00
|
|
|
parser.add_option('--report-slow-tests', dest='report_slow_tests',
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Show which tests are slowest.")
|
2016-01-23 23:16:14 +01:00
|
|
|
|
|
|
|
(options, args) = parser.parse_args()
|
|
|
|
if len(args) == 0:
|
2016-04-12 07:16:09 +02:00
|
|
|
suites = ["zerver.tests"]
|
2016-01-23 23:16:14 +01:00
|
|
|
else:
|
|
|
|
suites = args
|
|
|
|
|
|
|
|
if options.coverage:
|
|
|
|
import coverage
|
2016-05-19 19:01:23 +02:00
|
|
|
cov = coverage.Coverage(omit="*/zulip-venv-cache/*")
|
2016-01-23 23:16:14 +01:00
|
|
|
cov.start()
|
2016-01-24 02:21:34 +01:00
|
|
|
if options.profile:
|
|
|
|
import cProfile
|
|
|
|
prof = cProfile.Profile()
|
|
|
|
prof.enable()
|
2016-07-28 01:40:28 +02:00
|
|
|
if options.url_coverage:
|
|
|
|
# This is kind of hacky, but it's the most reliable way
|
|
|
|
# to make sure instrumentation decorators know the
|
|
|
|
# setting when they run.
|
|
|
|
os.environ['TEST_INSTRUMENT_URL_COVERAGE'] = 'TRUE'
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-06-21 04:49:45 +02:00
|
|
|
# setup() needs to be called after coverage is started to get proper coverage reports of model
|
|
|
|
# files, since part of setup is importing the models for all applications in INSTALLED_APPS.
|
|
|
|
django.setup()
|
|
|
|
|
2016-07-12 03:59:28 +02:00
|
|
|
if options.generate_fixtures:
|
|
|
|
subprocess.call(os.path.join(TOOLS_DIR, 'setup', 'generate-fixtures'))
|
2016-03-12 05:58:35 +01:00
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
TestRunner = get_runner(settings)
|
|
|
|
test_runner = TestRunner()
|
2016-01-23 23:18:26 +01:00
|
|
|
failures = test_runner.run_tests(suites, fatal_errors=options.fatal_errors)
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-05-20 14:53:47 +02:00
|
|
|
templates_not_rendered = test_runner.get_shallow_tested_templates()
|
|
|
|
if templates_not_rendered:
|
|
|
|
missed_count = len(templates_not_rendered)
|
|
|
|
if options.no_shallow or options.verbose:
|
|
|
|
print("*** Shallow tested templates: {}".format(missed_count))
|
|
|
|
|
|
|
|
if options.verbose:
|
|
|
|
for template in templates_not_rendered:
|
|
|
|
print('--- {}'.format(template))
|
|
|
|
|
|
|
|
if options.no_shallow:
|
|
|
|
failures = True
|
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if options.coverage:
|
|
|
|
cov.stop()
|
|
|
|
cov.save()
|
2016-06-16 00:05:07 +02:00
|
|
|
if options.verbose_coverage:
|
|
|
|
print("Printing coverage data")
|
|
|
|
cov.report(show_missing=False)
|
2016-07-13 10:49:26 +02:00
|
|
|
cov.html_report(directory='var/coverage')
|
|
|
|
print("HTML report saved to var/coverage")
|
2016-01-24 02:21:34 +01:00
|
|
|
if options.profile:
|
|
|
|
prof.disable()
|
|
|
|
prof.dump_stats("/tmp/profile.data")
|
|
|
|
print("Profile data saved to /tmp/profile.data")
|
|
|
|
print("You can visualize it using e.g. `runsnake /tmp/profile.data`")
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-07-29 19:48:43 +02:00
|
|
|
if options.report_slow_tests:
|
|
|
|
from zerver.lib.test_runner import report_slow_tests
|
|
|
|
# We do this even with failures, since slowness can be
|
|
|
|
# an important clue as to why tests fail.
|
|
|
|
report_slow_tests()
|
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if failures:
|
|
|
|
print('FAILED!')
|
|
|
|
else:
|
|
|
|
print('DONE!')
|
|
|
|
sys.exit(bool(failures))
|