tests: Optimize test-backend performance.

This optimizes test-backend by skipping webhook
tests when run in default mode.

Tweaked by tabbott to extend the documentation and update the CI
commands.
This commit is contained in:
Raymond Akornor 2019-01-11 00:26:11 +00:00 committed by Tim Abbott
parent c78c3f423c
commit 4dc7f5354d
6 changed files with 27 additions and 8 deletions

View File

@ -51,6 +51,14 @@ iterative development, but you can override this behavior with the
the `--rerun` option, which will rerun just the tests that failed in the `--rerun` option, which will rerun just the tests that failed in
the last test run. the last test run.
**Webhook integrations**. For performance, `test-backend` with no
arguments will not run webhook integration tests (`zerver/webhooks/`),
which would otherwise account for about 25% of the total runtime.
When working on webhooks, we recommend instead running `test-backend
zerver/webhooks` manually (or better, the direction for the specific
webhooks you're working on). And of course our CI is configured to
always use `test-backend --include-webhooks` and run all of the tests.
## Writing tests ## Writing tests
Before you write your first tests of Zulip, it is worthwhile to read Before you write your first tests of Zulip, it is worthwhile to read

View File

@ -8,7 +8,7 @@ set -x
./tools/lint --backend --no-gitlint --no-mypy # gitlint disabled because flaky ./tools/lint --backend --no-gitlint --no-mypy # gitlint disabled because flaky
./tools/test-tools ./tools/test-tools
./tools/test-backend --coverage ./tools/test-backend --coverage --include-webhooks
# We run mypy after the backend tests so we get output from the # We run mypy after the backend tests so we get output from the
# backend tests, which tend to uncover more serious problems, first. # backend tests, which tend to uncover more serious problems, first.

View File

@ -41,7 +41,7 @@ run ./tools/clean-repo
# ci/backend # ci/backend
run ./tools/lint --backend $FORCEARG run ./tools/lint --backend $FORCEARG
run ./tools/test-tools run ./tools/test-tools
run ./tools/test-backend $FORCEARG run ./tools/test-backend --include-webhooks $FORCEARG
run ./tools/test-migrations run ./tools/test-migrations
# Not running SVG optimizing since it's low-churn # Not running SVG optimizing since it's low-churn
# run ./tools/setup/optimize-svg # run ./tools/setup/optimize-svg

View File

@ -264,6 +264,10 @@ if __name__ == "__main__":
default=False, default=False,
help=("Run the tests which failed the last time " help=("Run the tests which failed the last time "
"test-backend was run. Implies --nonfatal-errors.")) "test-backend was run. Implies --nonfatal-errors."))
parser.add_argument('--include-webhooks', dest="include_webhooks",
action="store_true",
default=False,
help=("Include webhook tests. By default, they are skipped for performance."))
parser.add_argument('args', nargs='*') parser.add_argument('args', nargs='*')
options = parser.parse_args() options = parser.parse_args()
@ -336,16 +340,20 @@ if __name__ == "__main__":
full_suite = len(args) == 0 full_suite = len(args) == 0
if len(args) == 0: if full_suite:
suites = [ suites = [
"zerver.tests", "zerver.tests",
"zerver.webhooks",
"analytics.tests", "analytics.tests",
"corporate.tests", "corporate.tests",
] ]
else: else:
suites = args suites = args
include_webhooks = options.coverage or options.include_webhooks
if full_suite and include_webhooks:
suites.append("zerver.webhooks")
if not options.force: if not options.force:
ok, msg = get_provisioning_status() ok, msg = get_provisioning_status()
if not ok: if not ok:
@ -387,7 +395,8 @@ if __name__ == "__main__":
test_runner = TestRunner(failfast=options.fatal_errors, verbosity=2, test_runner = TestRunner(failfast=options.fatal_errors, verbosity=2,
parallel=parallel, reverse=options.reverse, parallel=parallel, reverse=options.reverse,
keepdb=True) keepdb=True)
failures, failed_tests = test_runner.run_tests(suites, full_suite=full_suite) failures, failed_tests = test_runner.run_tests(suites, full_suite=full_suite,
include_webhooks=options.include_webhooks)
write_failed_tests(failed_tests) write_failed_tests(failed_tests)
templates_not_rendered = test_runner.get_shallow_tested_templates() templates_not_rendered = test_runner.get_shallow_tested_templates()

View File

@ -23,6 +23,7 @@ from zerver.lib import cache
from zerver.tornado import event_queue from zerver.tornado import event_queue
from zerver.tornado.handlers import allocate_handler_id from zerver.tornado.handlers import allocate_handler_id
from zerver.worker import queue_processors from zerver.worker import queue_processors
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
from zerver.lib.actions import ( from zerver.lib.actions import (
get_stream_recipient, get_stream_recipient,
@ -325,7 +326,7 @@ def instrument_url(f: UrlFuncT) -> UrlFuncT:
return result return result
return wrapper return wrapper
def write_instrumentation_reports(full_suite: bool) -> None: def write_instrumentation_reports(full_suite: bool, include_webhooks: bool) -> None:
if INSTRUMENTING: if INSTRUMENTING:
calls = INSTRUMENTED_CALLS calls = INSTRUMENTED_CALLS
@ -395,7 +396,7 @@ def write_instrumentation_reports(full_suite: bool) -> None:
'node-coverage/(?P<path>.*)', 'node-coverage/(?P<path>.*)',
'docs/(?P<path>.*)', 'docs/(?P<path>.*)',
'casper/(?P<path>.*)', 'casper/(?P<path>.*)',
]) ] + [webhook.url for webhook in WEBHOOK_INTEGRATIONS if not include_webhooks])
untested_patterns -= exempt_patterns untested_patterns -= exempt_patterns

View File

@ -434,6 +434,7 @@ class Runner(DiscoverRunner):
def run_tests(self, test_labels: List[str], def run_tests(self, test_labels: List[str],
extra_tests: Optional[List[TestCase]]=None, extra_tests: Optional[List[TestCase]]=None,
full_suite: bool=False, full_suite: bool=False,
include_webhooks: bool=False,
**kwargs: Any) -> Tuple[bool, List[str]]: **kwargs: Any) -> Tuple[bool, List[str]]:
self.setup_test_environment() self.setup_test_environment()
try: try:
@ -472,7 +473,7 @@ class Runner(DiscoverRunner):
self.teardown_test_environment() self.teardown_test_environment()
failed = self.suite_result(suite, result) failed = self.suite_result(suite, result)
if not failed: if not failed:
write_instrumentation_reports(full_suite=full_suite) write_instrumentation_reports(full_suite=full_suite, include_webhooks=include_webhooks)
return failed, result.failed_tests return failed, result.failed_tests
def get_test_names(suite: unittest.TestSuite) -> List[str]: def get_test_names(suite: unittest.TestSuite) -> List[str]: