From 4dc7f5354ddfbde3b732b31d46ec2981b47c427c Mon Sep 17 00:00:00 2001 From: Raymond Akornor Date: Fri, 11 Jan 2019 00:26:11 +0000 Subject: [PATCH] tests: Optimize test-backend performance. This optimizes test-backend by skipping webhook tests when run in default mode. Tweaked by tabbott to extend the documentation and update the CI commands. --- docs/testing/testing-with-django.md | 8 ++++++++ tools/ci/backend | 2 +- tools/test-all | 2 +- tools/test-backend | 15 ++++++++++++--- zerver/lib/test_helpers.py | 5 +++-- zerver/lib/test_runner.py | 3 ++- 6 files changed, 27 insertions(+), 8 deletions(-) diff --git a/docs/testing/testing-with-django.md b/docs/testing/testing-with-django.md index b4c1edb022..3eb40bde3a 100644 --- a/docs/testing/testing-with-django.md +++ b/docs/testing/testing-with-django.md @@ -51,6 +51,14 @@ iterative development, but you can override this behavior with the the `--rerun` option, which will rerun just the tests that failed in the last test run. +**Webhook integrations**. For performance, `test-backend` with no +arguments will not run webhook integration tests (`zerver/webhooks/`), +which would otherwise account for about 25% of the total runtime. +When working on webhooks, we recommend instead running `test-backend +zerver/webhooks` manually (or better, the direction for the specific +webhooks you're working on). And of course our CI is configured to +always use `test-backend --include-webhooks` and run all of the tests. + ## Writing tests Before you write your first tests of Zulip, it is worthwhile to read diff --git a/tools/ci/backend b/tools/ci/backend index 95ad1d8bf8..96e4208d38 100755 --- a/tools/ci/backend +++ b/tools/ci/backend @@ -8,7 +8,7 @@ set -x ./tools/lint --backend --no-gitlint --no-mypy # gitlint disabled because flaky ./tools/test-tools -./tools/test-backend --coverage +./tools/test-backend --coverage --include-webhooks # We run mypy after the backend tests so we get output from the # backend tests, which tend to uncover more serious problems, first. diff --git a/tools/test-all b/tools/test-all index ad7d8f1507..d91111f4b8 100755 --- a/tools/test-all +++ b/tools/test-all @@ -41,7 +41,7 @@ run ./tools/clean-repo # ci/backend run ./tools/lint --backend $FORCEARG run ./tools/test-tools -run ./tools/test-backend $FORCEARG +run ./tools/test-backend --include-webhooks $FORCEARG run ./tools/test-migrations # Not running SVG optimizing since it's low-churn # run ./tools/setup/optimize-svg diff --git a/tools/test-backend b/tools/test-backend index ef448fd2c0..f51196dfb8 100755 --- a/tools/test-backend +++ b/tools/test-backend @@ -264,6 +264,10 @@ if __name__ == "__main__": default=False, help=("Run the tests which failed the last time " "test-backend was run. Implies --nonfatal-errors.")) + parser.add_argument('--include-webhooks', dest="include_webhooks", + action="store_true", + default=False, + help=("Include webhook tests. By default, they are skipped for performance.")) parser.add_argument('args', nargs='*') options = parser.parse_args() @@ -336,16 +340,20 @@ if __name__ == "__main__": full_suite = len(args) == 0 - if len(args) == 0: + if full_suite: suites = [ "zerver.tests", - "zerver.webhooks", "analytics.tests", "corporate.tests", ] else: suites = args + include_webhooks = options.coverage or options.include_webhooks + + if full_suite and include_webhooks: + suites.append("zerver.webhooks") + if not options.force: ok, msg = get_provisioning_status() if not ok: @@ -387,7 +395,8 @@ if __name__ == "__main__": test_runner = TestRunner(failfast=options.fatal_errors, verbosity=2, parallel=parallel, reverse=options.reverse, keepdb=True) - failures, failed_tests = test_runner.run_tests(suites, full_suite=full_suite) + failures, failed_tests = test_runner.run_tests(suites, full_suite=full_suite, + include_webhooks=options.include_webhooks) write_failed_tests(failed_tests) templates_not_rendered = test_runner.get_shallow_tested_templates() diff --git a/zerver/lib/test_helpers.py b/zerver/lib/test_helpers.py index 6e98b6a0e9..2cb18a5eeb 100644 --- a/zerver/lib/test_helpers.py +++ b/zerver/lib/test_helpers.py @@ -23,6 +23,7 @@ from zerver.lib import cache from zerver.tornado import event_queue from zerver.tornado.handlers import allocate_handler_id from zerver.worker import queue_processors +from zerver.lib.integrations import WEBHOOK_INTEGRATIONS from zerver.lib.actions import ( get_stream_recipient, @@ -325,7 +326,7 @@ def instrument_url(f: UrlFuncT) -> UrlFuncT: return result return wrapper -def write_instrumentation_reports(full_suite: bool) -> None: +def write_instrumentation_reports(full_suite: bool, include_webhooks: bool) -> None: if INSTRUMENTING: calls = INSTRUMENTED_CALLS @@ -395,7 +396,7 @@ def write_instrumentation_reports(full_suite: bool) -> None: 'node-coverage/(?P.*)', 'docs/(?P.*)', 'casper/(?P.*)', - ]) + ] + [webhook.url for webhook in WEBHOOK_INTEGRATIONS if not include_webhooks]) untested_patterns -= exempt_patterns diff --git a/zerver/lib/test_runner.py b/zerver/lib/test_runner.py index d7a2fda8dc..5edb016dd4 100644 --- a/zerver/lib/test_runner.py +++ b/zerver/lib/test_runner.py @@ -434,6 +434,7 @@ class Runner(DiscoverRunner): def run_tests(self, test_labels: List[str], extra_tests: Optional[List[TestCase]]=None, full_suite: bool=False, + include_webhooks: bool=False, **kwargs: Any) -> Tuple[bool, List[str]]: self.setup_test_environment() try: @@ -472,7 +473,7 @@ class Runner(DiscoverRunner): self.teardown_test_environment() failed = self.suite_result(suite, result) if not failed: - write_instrumentation_reports(full_suite=full_suite) + write_instrumentation_reports(full_suite=full_suite, include_webhooks=include_webhooks) return failed, result.failed_tests def get_test_names(suite: unittest.TestSuite) -> List[str]: