2016-04-07 15:03:22 +02:00
|
|
|
#!/usr/bin/env python
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-03-10 17:15:34 +01:00
|
|
|
from __future__ import print_function
|
2017-02-05 21:24:28 +01:00
|
|
|
from __future__ import absolute_import
|
2016-12-23 18:42:45 +01:00
|
|
|
from typing import List
|
2017-02-19 01:26:52 +01:00
|
|
|
import glob
|
2016-01-23 23:16:14 +01:00
|
|
|
import optparse
|
|
|
|
import os
|
|
|
|
import sys
|
2016-03-12 05:58:35 +01:00
|
|
|
import subprocess
|
2016-12-23 18:42:45 +01:00
|
|
|
import ujson
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2017-02-05 21:24:28 +01:00
|
|
|
# check for the venv
|
|
|
|
from lib import sanity_check
|
|
|
|
sanity_check.check_venv(__file__)
|
|
|
|
|
|
|
|
import django
|
|
|
|
from django.conf import settings
|
|
|
|
from django.test.utils import get_runner
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2017-02-19 01:26:52 +01:00
|
|
|
target_fully_covered = {path for target in [
|
|
|
|
'analytics/lib/*.py',
|
2017-03-15 01:11:25 +01:00
|
|
|
'analytics/models.py',
|
|
|
|
'analytics/tests/*.py',
|
|
|
|
'analytics/views.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
'zerver/context_processors.py',
|
|
|
|
'zerver/lib/alert_words.py',
|
|
|
|
'zerver/lib/attachments.py',
|
|
|
|
'zerver/lib/avatar_hash.py',
|
|
|
|
'zerver/lib/context_managers.py',
|
|
|
|
'zerver/lib/domains.py',
|
|
|
|
'zerver/lib/emoji.py',
|
|
|
|
'zerver/lib/i18n.py',
|
|
|
|
'zerver/lib/mention.py',
|
|
|
|
'zerver/lib/message.py',
|
|
|
|
'zerver/lib/name_restrictions.py',
|
2017-02-21 03:41:20 +01:00
|
|
|
'zerver/lib/realm_icon.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
'zerver/lib/retention.py',
|
|
|
|
'zerver/lib/streams.py',
|
|
|
|
'zerver/lib/users.py',
|
|
|
|
'zerver/lib/webhooks/*.py',
|
2017-03-26 07:00:08 +02:00
|
|
|
'zerver/logging_handlers.py',
|
2017-04-27 07:52:39 +02:00
|
|
|
# As a project, we require 100% test coverage in the views files.
|
2017-02-19 01:26:52 +01:00
|
|
|
'zerver/views/*.py',
|
2017-04-27 07:52:39 +02:00
|
|
|
# Test files should have 100% coverage; test code that isn't run
|
|
|
|
# is likely a bug in the test.
|
2017-03-05 08:39:34 +01:00
|
|
|
'zerver/tests/*.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
# Once we have a nice negative tests system, we can add these:
|
|
|
|
# 'zerver/webhooks/*/*.py',
|
|
|
|
# 'zerver/webhooks/*/*/*.py',
|
|
|
|
'zproject/backends.py',
|
|
|
|
# Uncovered but in exclude list and we'd like to have included soon
|
|
|
|
'confirmation/models.py',
|
|
|
|
'zerver/decorator.py',
|
|
|
|
'zerver/lib/actions.py',
|
|
|
|
'zerver/lib/events.py',
|
|
|
|
'zerver/lib/bugdown/__init__.py',
|
|
|
|
'zerver/lib/events.py',
|
|
|
|
'zerver/lib/integrations.py',
|
|
|
|
'zerver/lib/message.py',
|
|
|
|
'zerver/lib/narrow.py',
|
|
|
|
'zerver/lib/notifications.py',
|
|
|
|
'zerver/lib/push_notifications.py',
|
|
|
|
'zerver/lib/request.py',
|
2017-03-05 09:26:07 +01:00
|
|
|
'zerver/lib/response.py',
|
2017-05-10 19:04:57 +02:00
|
|
|
'zerver/lib/sessions.py',
|
2017-03-05 09:06:36 +01:00
|
|
|
'zerver/lib/test_helpers.py',
|
2017-03-05 09:01:49 +01:00
|
|
|
'zerver/lib/test_classes.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
'zerver/lib/upload.py',
|
|
|
|
'zerver/lib/validator.py',
|
|
|
|
'zerver/models.py',
|
|
|
|
] for path in glob.glob(target)}
|
|
|
|
|
|
|
|
not_yet_fully_covered = {
|
2017-03-15 01:11:25 +01:00
|
|
|
# Analytics fixtures library is used to generate test fixtures;
|
|
|
|
# isn't properly accounted for in test coverage analysis since it
|
|
|
|
# runs before tests.
|
2017-02-19 01:26:52 +01:00
|
|
|
'analytics/lib/fixtures.py',
|
2017-03-15 01:11:25 +01:00
|
|
|
# We have 100% coverage on the new stuff; need to refactor old stuff.
|
|
|
|
'analytics/views.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
# Major lib files should have 100% coverage
|
|
|
|
'confirmation/models.py',
|
|
|
|
'zerver/decorator.py',
|
|
|
|
'zerver/lib/actions.py',
|
|
|
|
'zerver/lib/bugdown/__init__.py',
|
2017-02-19 03:52:48 +01:00
|
|
|
'zerver/lib/i18n.py',
|
2017-02-19 01:26:52 +01:00
|
|
|
'zerver/lib/message.py',
|
|
|
|
'zerver/lib/notifications.py',
|
|
|
|
'zerver/lib/push_notifications.py',
|
|
|
|
'zerver/lib/upload.py',
|
|
|
|
'zerver/models.py',
|
|
|
|
}
|
|
|
|
|
|
|
|
enforce_fully_covered = sorted(target_fully_covered - not_yet_fully_covered)
|
|
|
|
|
2016-12-23 18:42:45 +01:00
|
|
|
FAILED_TEST_PATH = 'var/last_test_failure.json'
|
|
|
|
|
|
|
|
def get_failed_tests():
|
|
|
|
# type: () -> List[str]
|
|
|
|
try:
|
|
|
|
with open(FAILED_TEST_PATH, 'r') as f:
|
|
|
|
return ujson.load(f)
|
|
|
|
except IOError:
|
|
|
|
print("var/last_test_failure.json doesn't exist; running all tests.")
|
|
|
|
return []
|
|
|
|
|
|
|
|
def write_failed_tests(failed_tests):
|
|
|
|
# type: (List[str]) -> None
|
|
|
|
if failed_tests:
|
|
|
|
with open(FAILED_TEST_PATH, 'w') as f:
|
|
|
|
ujson.dump(failed_tests, f)
|
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if __name__ == "__main__":
|
2016-03-12 05:58:35 +01:00
|
|
|
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
|
2016-07-12 19:25:34 +02:00
|
|
|
os.chdir(os.path.dirname(TOOLS_DIR))
|
2016-03-12 05:58:35 +01:00
|
|
|
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
|
2017-03-24 12:48:56 +01:00
|
|
|
|
|
|
|
# Remove proxy settings for running backend tests
|
|
|
|
os.environ["http_proxy"] = ""
|
|
|
|
os.environ["https_proxy"] = ""
|
|
|
|
|
2016-09-13 22:40:13 +02:00
|
|
|
from zerver.lib.test_fixtures import is_template_database_current
|
2016-10-15 17:11:01 +02:00
|
|
|
|
|
|
|
from tools.lib.test_script import (
|
|
|
|
get_provisioning_status,
|
|
|
|
)
|
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.test_settings'
|
|
|
|
# "-u" uses unbuffered IO, which is important when wrapping it in subprocess
|
|
|
|
os.environ['PYTHONUNBUFFERED'] = 'y'
|
|
|
|
|
2016-06-27 23:15:30 +02:00
|
|
|
usage = """%prog [options]
|
|
|
|
test-backend # Runs all backend tests
|
|
|
|
test-backend zerver.tests.test_bugdown # run all tests in a test module
|
2016-09-28 10:37:58 +02:00
|
|
|
test-backend zerver/tests/test_bugdown.py # run all tests in a test module
|
|
|
|
test-backend test_bugdown # run all tests in a test module
|
2016-06-27 23:15:30 +02:00
|
|
|
test-backend zerver.tests.test_bugdown.BugdownTest # run all tests in a test class
|
2016-09-28 10:37:58 +02:00
|
|
|
test-backend BugdownTest # run all tests in a test class
|
|
|
|
test-backend zerver.tests.test_bugdown.BugdownTest.test_inline_youtube # run a single test
|
|
|
|
test-backend BugdownTest.test_inline_youtube # run a single test"""
|
2016-06-27 23:15:30 +02:00
|
|
|
|
|
|
|
parser = optparse.OptionParser(usage)
|
2016-09-28 10:37:58 +02:00
|
|
|
|
2016-01-23 23:18:26 +01:00
|
|
|
parser.add_option('--nonfatal-errors', action="store_false", default=True,
|
|
|
|
dest="fatal_errors", help="Continue past test failures to run all tests")
|
2016-01-23 23:16:14 +01:00
|
|
|
parser.add_option('--coverage', dest='coverage',
|
|
|
|
action="store_true",
|
2017-05-04 11:55:36 +02:00
|
|
|
default=False,
|
|
|
|
help='Compute test coverage. Enforces processes=1.')
|
2017-03-05 07:41:54 +01:00
|
|
|
parser.add_option('--verbose-coverage', dest='verbose_coverage',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Enable verbose print of coverage report.')
|
2017-03-22 05:13:44 +01:00
|
|
|
|
|
|
|
def allow_positive_int(option, opt_str, value, parser):
|
|
|
|
# type: (optparse.Option, str, int, optparse.OptionParser) -> None
|
|
|
|
if value < 1:
|
|
|
|
raise optparse.OptionValueError(
|
|
|
|
"option {}: Only positive integers are allowed.".format(opt_str))
|
|
|
|
setattr(parser.values, option.dest, value)
|
|
|
|
|
|
|
|
parser.add_option('--processes', dest='processes',
|
|
|
|
type="int",
|
|
|
|
callback=allow_positive_int,
|
|
|
|
action='callback',
|
2017-05-04 11:55:36 +02:00
|
|
|
default=4,
|
2017-03-22 05:13:44 +01:00
|
|
|
help='Specify the number of processes to run the '
|
2017-05-04 11:55:36 +02:00
|
|
|
'tests in. Default is 4.')
|
2016-01-24 02:21:34 +01:00
|
|
|
parser.add_option('--profile', dest='profile',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Profile test runtime.')
|
2016-10-15 17:37:37 +02:00
|
|
|
parser.add_option('--force', dest='force',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Run tests despite possible problems.')
|
2016-05-20 14:53:47 +02:00
|
|
|
parser.add_option('--no-shallow', dest='no_shallow',
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
2016-11-15 13:41:12 +01:00
|
|
|
help="Don't allow shallow testing of templates (deprecated)")
|
2016-05-20 14:53:47 +02:00
|
|
|
parser.add_option('--verbose', dest='verbose',
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Show detailed output")
|
2017-04-13 11:06:48 +02:00
|
|
|
parser.add_option('--generate-fixtures', action="store_true", default=False,
|
2016-07-12 03:59:28 +02:00
|
|
|
dest="generate_fixtures",
|
2017-04-13 11:06:48 +02:00
|
|
|
help=("Force a call to generate-fixtures."))
|
2016-07-29 19:48:43 +02:00
|
|
|
parser.add_option('--report-slow-tests', dest='report_slow_tests',
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Show which tests are slowest.")
|
2017-03-22 11:57:07 +01:00
|
|
|
parser.add_option('--reverse', dest='reverse',
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Run tests in reverse order.")
|
2016-12-23 18:42:45 +01:00
|
|
|
parser.add_option('--rerun', dest="rerun",
|
|
|
|
action = "store_true",
|
|
|
|
default=False,
|
|
|
|
help=("Run the tests which failed the last time "
|
|
|
|
"test-backend was run. Implies --nonfatal-errors."))
|
2016-01-23 23:16:14 +01:00
|
|
|
|
|
|
|
(options, args) = parser.parse_args()
|
2017-05-04 11:55:36 +02:00
|
|
|
if options.coverage:
|
|
|
|
# Currently coverage doesn't work with parallel mode, so when
|
|
|
|
# coverage parameter is supplied we enfore serial mode.
|
|
|
|
print("Disabling parallel mode because coverage isn't supported.")
|
|
|
|
options.processes = 1
|
2016-09-28 10:37:58 +02:00
|
|
|
|
|
|
|
zerver_test_dir = 'zerver/tests/'
|
|
|
|
|
2016-12-23 18:42:45 +01:00
|
|
|
# While running --rerun, we read var/last_test_failure.json to get
|
|
|
|
# the list of tests that failed on the last run, and then pretend
|
|
|
|
# those tests were passed explicitly. --rerun implies
|
|
|
|
# --nonfatal-errors, so that we don't end up removing tests from
|
|
|
|
# the list that weren't run.
|
|
|
|
if options.rerun:
|
|
|
|
options.fatal_errors = False
|
|
|
|
failed_tests = get_failed_tests()
|
|
|
|
if failed_tests:
|
|
|
|
args = failed_tests
|
|
|
|
|
2016-09-28 10:37:58 +02:00
|
|
|
# to transform forward slashes '/' present in the argument into dots '.'
|
|
|
|
for suite in args:
|
2016-10-07 06:53:26 +02:00
|
|
|
args[args.index(suite)] = suite.rstrip('/').replace("/", ".")
|
2016-09-28 10:37:58 +02:00
|
|
|
|
2016-09-28 20:51:41 +02:00
|
|
|
def rewrite_arguments(search_key):
|
2016-10-16 07:23:50 +02:00
|
|
|
# type: (str) -> None
|
2016-11-03 21:38:06 +01:00
|
|
|
for root, dirs, files_names in os.walk(zerver_test_dir, topdown=False):
|
|
|
|
for file_name in files_names:
|
|
|
|
# Check for files starting with alphanumeric characters and ending with '.py'
|
|
|
|
# Ignore backup files if any
|
|
|
|
if not file_name[0].isalnum() or not file_name.endswith(".py"):
|
2016-09-28 20:51:41 +02:00
|
|
|
continue
|
2016-11-03 21:38:06 +01:00
|
|
|
filepath = os.path.join(root, file_name)
|
2016-09-28 20:51:41 +02:00
|
|
|
for line in open(filepath):
|
2016-11-03 21:38:06 +01:00
|
|
|
if search_key not in line:
|
|
|
|
continue
|
|
|
|
new_suite = filepath.replace(".py", ".") + suite
|
|
|
|
args[args.index(suite)] = new_suite
|
|
|
|
return
|
2016-09-28 20:51:41 +02:00
|
|
|
|
2016-09-28 10:37:58 +02:00
|
|
|
for suite in args:
|
|
|
|
if suite[0].isupper() and "test_" in suite:
|
|
|
|
classname = suite.rsplit('.', 1)[0]
|
2016-09-28 20:51:41 +02:00
|
|
|
rewrite_arguments(classname)
|
2016-09-28 10:37:58 +02:00
|
|
|
elif suite[0].isupper():
|
2016-09-28 20:51:41 +02:00
|
|
|
rewrite_arguments(suite)
|
2016-09-28 10:37:58 +02:00
|
|
|
|
|
|
|
for suite in args:
|
|
|
|
if suite.startswith('test'):
|
2016-11-03 21:38:06 +01:00
|
|
|
for root, dirs, files_names in os.walk(zerver_test_dir):
|
|
|
|
for file_name in files_names:
|
|
|
|
if file_name == suite or file_name == suite + ".py":
|
|
|
|
new_suite = os.path.join(root, file_name)
|
|
|
|
args[args.index(suite)] = new_suite
|
|
|
|
break
|
2016-09-28 10:37:58 +02:00
|
|
|
|
|
|
|
for suite in args:
|
|
|
|
args[args.index(suite)] = suite.replace(".py", "")
|
|
|
|
|
|
|
|
# to transform forward slashes '/' introduced by the zerver_test_dir into dots '.'
|
|
|
|
# taking care of any forward slashes that might be present
|
|
|
|
for suite in args:
|
|
|
|
args[args.index(suite)] = suite.replace("/", ".")
|
|
|
|
|
2016-11-19 01:28:28 +01:00
|
|
|
full_suite = len(args) == 0
|
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if len(args) == 0:
|
2016-07-29 21:52:45 +02:00
|
|
|
suites = ["zerver.tests",
|
2016-11-23 20:15:23 +01:00
|
|
|
"zerver.webhooks",
|
2016-07-29 21:52:45 +02:00
|
|
|
"analytics.tests"]
|
2016-01-23 23:16:14 +01:00
|
|
|
else:
|
|
|
|
suites = args
|
|
|
|
|
2016-10-15 17:37:37 +02:00
|
|
|
if not options.force:
|
|
|
|
ok, msg = get_provisioning_status()
|
|
|
|
if not ok:
|
|
|
|
print(msg)
|
|
|
|
print('If you really know what you are doing, use --force to run anyway.')
|
|
|
|
sys.exit(1)
|
2016-10-15 17:11:01 +02:00
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if options.coverage:
|
|
|
|
import coverage
|
2017-03-05 07:38:18 +01:00
|
|
|
cov = coverage.Coverage(config_file="tools/coveragerc")
|
2016-01-23 23:16:14 +01:00
|
|
|
cov.start()
|
2016-01-24 02:21:34 +01:00
|
|
|
if options.profile:
|
|
|
|
import cProfile
|
|
|
|
prof = cProfile.Profile()
|
|
|
|
prof.enable()
|
2016-11-19 01:28:28 +01:00
|
|
|
|
|
|
|
# This is kind of hacky, but it's the most reliable way
|
|
|
|
# to make sure instrumentation decorators know the
|
|
|
|
# setting when they run.
|
|
|
|
os.environ['TEST_INSTRUMENT_URL_COVERAGE'] = 'TRUE'
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-06-21 04:49:45 +02:00
|
|
|
# setup() needs to be called after coverage is started to get proper coverage reports of model
|
|
|
|
# files, since part of setup is importing the models for all applications in INSTALLED_APPS.
|
|
|
|
django.setup()
|
|
|
|
|
2017-04-13 11:06:48 +02:00
|
|
|
if options.generate_fixtures or not is_template_database_current():
|
2016-09-13 22:40:13 +02:00
|
|
|
generate_fixtures_command = [os.path.join(TOOLS_DIR, 'setup', 'generate-fixtures')]
|
2017-04-13 11:06:48 +02:00
|
|
|
generate_fixtures_command.append('--force')
|
2016-09-13 22:40:13 +02:00
|
|
|
subprocess.call(generate_fixtures_command)
|
2016-03-12 05:58:35 +01:00
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
TestRunner = get_runner(settings)
|
2017-03-22 05:13:44 +01:00
|
|
|
parallel = options.processes
|
|
|
|
|
|
|
|
if parallel > 1:
|
|
|
|
print("-- Running tests in parallel mode with {} "
|
|
|
|
"processes.".format(parallel))
|
|
|
|
else:
|
|
|
|
print("-- Running tests in serial mode.")
|
|
|
|
|
2017-02-10 05:49:28 +01:00
|
|
|
test_runner = TestRunner(failfast=options.fatal_errors, verbosity=2,
|
2017-03-22 11:57:07 +01:00
|
|
|
parallel=parallel, reverse=options.reverse,
|
|
|
|
keepdb=True)
|
2016-12-23 18:42:45 +01:00
|
|
|
failures, failed_tests = test_runner.run_tests(suites, full_suite=full_suite)
|
|
|
|
write_failed_tests(failed_tests)
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-05-20 14:53:47 +02:00
|
|
|
templates_not_rendered = test_runner.get_shallow_tested_templates()
|
2017-01-20 19:56:03 +01:00
|
|
|
# We only check the templates if all the tests ran and passed
|
|
|
|
if not failures and full_suite and templates_not_rendered:
|
2016-05-20 14:53:47 +02:00
|
|
|
missed_count = len(templates_not_rendered)
|
2016-12-01 00:38:19 +01:00
|
|
|
print("\nError: %s templates have no tests!" % (missed_count,))
|
2016-11-30 01:38:12 +01:00
|
|
|
for template in templates_not_rendered:
|
|
|
|
print(' {}'.format(template))
|
2016-12-01 01:03:42 +01:00
|
|
|
print("See zerver/tests/test_templates.py for the exclude list.")
|
2016-11-15 13:41:12 +01:00
|
|
|
failures = True
|
2016-05-20 14:53:47 +02:00
|
|
|
|
2016-01-23 23:16:14 +01:00
|
|
|
if options.coverage:
|
|
|
|
cov.stop()
|
|
|
|
cov.save()
|
2016-06-16 00:05:07 +02:00
|
|
|
if options.verbose_coverage:
|
|
|
|
print("Printing coverage data")
|
|
|
|
cov.report(show_missing=False)
|
2016-07-13 10:49:26 +02:00
|
|
|
cov.html_report(directory='var/coverage')
|
2017-03-23 19:59:24 +01:00
|
|
|
print("HTML report saved; visit at http://127.0.0.1:9991/coverage/index.html")
|
2017-02-19 01:26:52 +01:00
|
|
|
if full_suite and not failures and options.coverage:
|
|
|
|
# Assert that various files have full coverage
|
|
|
|
for path in enforce_fully_covered:
|
|
|
|
missing_lines = cov.analysis2(path)[3]
|
|
|
|
if len(missing_lines) > 0:
|
|
|
|
print("ERROR: %s no longer has complete backend test coverage" % (path,))
|
|
|
|
print(" Lines missing coverage: %s" % (missing_lines,))
|
|
|
|
print()
|
|
|
|
failures = True
|
|
|
|
if failures:
|
|
|
|
print("It looks like your changes lost 100% test coverage in one or more files")
|
|
|
|
print("Usually, the right fix for this is to add some tests.")
|
|
|
|
print("But also check out the include/exclude lists in tools/test-backend.")
|
2017-03-05 08:46:59 +01:00
|
|
|
print("If this line intentionally is not tested, you can use a # nocoverage comment.")
|
2017-02-19 01:26:52 +01:00
|
|
|
print("To run this check locally, use `test-backend --coverage`.")
|
2016-01-24 02:21:34 +01:00
|
|
|
if options.profile:
|
|
|
|
prof.disable()
|
|
|
|
prof.dump_stats("/tmp/profile.data")
|
|
|
|
print("Profile data saved to /tmp/profile.data")
|
|
|
|
print("You can visualize it using e.g. `runsnake /tmp/profile.data`")
|
2016-01-23 23:16:14 +01:00
|
|
|
|
2016-07-29 19:48:43 +02:00
|
|
|
if options.report_slow_tests:
|
|
|
|
from zerver.lib.test_runner import report_slow_tests
|
|
|
|
# We do this even with failures, since slowness can be
|
|
|
|
# an important clue as to why tests fail.
|
|
|
|
report_slow_tests()
|
|
|
|
|
2017-02-10 05:48:15 +01:00
|
|
|
# We'll have printed whether tests passed or failed above
|
2016-01-23 23:16:14 +01:00
|
|
|
sys.exit(bool(failures))
|