py3: Switch almost all shebang lines to use `python3`.
This causes `upgrade-zulip-from-git`, as well as a no-option run of
`tools/build-release-tarball`, to produce a Zulip install running
Python 3, rather than Python 2. In particular this means that the
virtualenv we create, in which all application code runs, is Python 3.
One shebang line, on `zulip-ec2-configure-interfaces`, explicitly
keeps Python 2, and at least one external ops script, `wal-e`, also
still runs on Python 2. See discussion on the respective previous
commits that made those explicit. There may also be some other
third-party scripts we use, outside of this source tree and running
outside our virtualenv, that still run on Python 2.
2017-08-02 23:15:16 +02:00
|
|
|
#!/usr/bin/env python3
|
2017-09-30 08:44:16 +02:00
|
|
|
import argparse
|
2013-03-05 22:50:06 +01:00
|
|
|
import glob
|
2020-06-11 00:54:34 +02:00
|
|
|
import os
|
2018-07-18 23:26:43 +02:00
|
|
|
import shlex
|
2020-06-11 00:54:34 +02:00
|
|
|
import subprocess
|
|
|
|
import sys
|
2012-11-09 21:03:57 +01:00
|
|
|
|
2013-05-21 23:09:08 +02:00
|
|
|
#
|
|
|
|
# In order to use remote casperjs debugging, pass the --remote-debug flag
|
|
|
|
# This will start a remote debugging session listening on port 7777
|
|
|
|
#
|
2019-06-10 15:34:07 +02:00
|
|
|
# See https://zulip.readthedocs.io/en/latest/testing/testing-with-casper.html
|
|
|
|
# for more information on how to use remote debugging
|
2013-05-21 23:09:08 +02:00
|
|
|
#
|
|
|
|
|
2017-08-28 01:03:45 +02:00
|
|
|
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
|
2016-12-15 07:02:42 +01:00
|
|
|
os.environ["CASPER_TESTS"] = "1"
|
2017-08-28 01:03:45 +02:00
|
|
|
os.environ["PHANTOMJS_EXECUTABLE"] = os.path.join(ZULIP_PATH, "node_modules/.bin/phantomjs")
|
2019-10-28 23:12:00 +01:00
|
|
|
os.environ.pop("http_proxy", "")
|
|
|
|
os.environ.pop("https_proxy", "")
|
2013-01-10 19:05:53 +01:00
|
|
|
|
2020-03-25 16:08:22 +01:00
|
|
|
usage = """
|
|
|
|
|
2016-06-27 23:15:20 +02:00
|
|
|
test-js-with-casper # Run all test files
|
|
|
|
test-js-with-casper 09-navigation.js # Run a single test file
|
2016-11-02 17:40:06 +01:00
|
|
|
test-js-with-casper 09 # Run a single test file 09-navigation.js
|
|
|
|
test-js-with-casper 01-login.js 03-narrow.js # Run a few test files
|
2020-03-25 16:08:22 +01:00
|
|
|
test-js-with-casper 01 03 # Run a few test files, 01-login.js and 03-narrow.js here
|
|
|
|
|
|
|
|
Using loops:
|
|
|
|
|
|
|
|
test-js-with-capser --loop 5 07 # run 5 loops of test 07
|
|
|
|
---
|
|
|
|
"""
|
|
|
|
parser = argparse.ArgumentParser(usage=usage)
|
2017-09-30 08:44:16 +02:00
|
|
|
|
|
|
|
parser.add_argument('--skip-flaky-tests', dest='skip_flaky',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Skip flaky tests')
|
2020-03-25 16:08:22 +01:00
|
|
|
parser.add_argument('--loop', dest='loop', nargs=1,
|
|
|
|
action="store", type=int,
|
|
|
|
default=None, help='Run tests in a loop.')
|
2017-09-30 08:44:16 +02:00
|
|
|
parser.add_argument('--force', dest='force',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Run tests despite possible problems.')
|
2019-06-10 15:17:48 +02:00
|
|
|
parser.add_argument('--verbose',
|
|
|
|
help='Whether or not to enable verbose mode',
|
|
|
|
action="store_true",
|
|
|
|
default=False)
|
2017-09-30 08:44:16 +02:00
|
|
|
parser.add_argument('--remote-debug',
|
|
|
|
help='Whether or not to enable remote debugging on port 7777',
|
|
|
|
action="store_true",
|
|
|
|
default=False)
|
2018-03-08 16:31:57 +01:00
|
|
|
parser.add_argument('--xunit-export', dest='xunit_export',
|
|
|
|
action="store_true",
|
|
|
|
default=False, help='Export the results of the test suite to an XUnit XML file,')
|
2017-09-30 08:44:16 +02:00
|
|
|
parser.add_argument('tests', nargs=argparse.REMAINDER,
|
|
|
|
help='Specific tests to run; by default, runs all tests')
|
|
|
|
options = parser.parse_args()
|
2013-05-21 23:09:08 +02:00
|
|
|
|
2017-08-28 01:03:45 +02:00
|
|
|
sys.path.insert(0, ZULIP_PATH)
|
2016-10-15 17:23:28 +02:00
|
|
|
|
2017-02-05 21:24:28 +01:00
|
|
|
# check for the venv
|
|
|
|
from tools.lib import sanity_check
|
2020-06-11 00:54:34 +02:00
|
|
|
|
2017-02-05 21:24:28 +01:00
|
|
|
sanity_check.check_venv(__file__)
|
|
|
|
|
2020-06-11 00:54:34 +02:00
|
|
|
from typing import Iterable, List
|
|
|
|
|
2020-04-17 08:51:06 +02:00
|
|
|
from tools.lib.test_script import assert_provisioning_status_ok, find_js_test_files
|
2017-01-13 01:09:52 +01:00
|
|
|
from tools.lib.test_server import test_server_running
|
2016-10-15 17:23:28 +02:00
|
|
|
|
2019-06-20 18:27:09 +02:00
|
|
|
assert_provisioning_status_ok(options.force)
|
2016-10-15 17:23:28 +02:00
|
|
|
|
2017-08-28 01:03:45 +02:00
|
|
|
os.chdir(ZULIP_PATH)
|
2012-11-09 21:03:57 +01:00
|
|
|
|
2019-08-24 21:17:07 +02:00
|
|
|
subprocess.check_call(['node', 'node_modules/phantomjs-prebuilt/install.js'])
|
|
|
|
|
2018-07-18 23:50:15 +02:00
|
|
|
os.makedirs('var/casper', exist_ok=True)
|
2016-07-24 07:02:22 +02:00
|
|
|
|
2018-07-18 23:50:15 +02:00
|
|
|
for f in glob.glob('var/casper/casper-failure*.png'):
|
|
|
|
os.remove(f)
|
2016-09-27 10:12:40 +02:00
|
|
|
|
2020-03-12 13:51:54 +01:00
|
|
|
def reset_database() -> None:
|
|
|
|
from zerver.lib.test_helpers import reset_emails_in_zulip_realm
|
|
|
|
reset_emails_in_zulip_realm()
|
|
|
|
|
2017-10-27 11:13:03 +02:00
|
|
|
def run_tests(files: Iterable[str], external_host: str) -> None:
|
2017-08-28 01:03:45 +02:00
|
|
|
test_dir = os.path.join(ZULIP_PATH, 'frontend_tests/casper_tests')
|
2020-04-17 08:51:06 +02:00
|
|
|
test_files = find_js_test_files(test_dir, files)
|
Upgrade caspersjs to version 1.1.3. (w/acrefoot)
(Most of this work was done by acrefoot in an earlier branch.
I took over the branch to fix casper tests that were broken during
the upgrade (which were fixed in a different commit). I also
made most of the changes to run-casper.)
This also upgrades phantomjs to 2.1.7.
The huge structural change here is that we no longer vendor casperjs
or download phantomjs with our own script. Instead, we just use
casperjs and phantomjs from npm, via package.json.
Another thing that we do now is run casperjs tests individually, so
that we don't get strange test flakes from test interactions. (Tests
can still influence each other in terms of changing data, since we
don't yet have code to clear the test database in between tests.)
A lot of this diff is just removing files and obsolete configurations.
The main new piece is in package.json, which causes npm to install the
new version.
Also, run-casper now runs files individually, as mentioned above.
We had vendored casperjs in the past. I didn't bring over any of our
changes. Some of the changes were performance-related (primarily
5fd58cf24927359dce26588d59690c40c6ce6d4c), so the upgraded version may
be slower in some instances. (I didn't do much measurement of that,
since most of our slowness when running tests is about the setup
environment, not casper itself.) Any bug fixes that we may have
implemented in the past were either magically fixed by changes to
casper itself or by improvements we have made in the tests themselves
over the years.
Tim tested the Casper suite on his machine and running the full Casper
test suite is faster than it was before this change (1m30 vs. 1m50),
so we're at least not regressing overall performance.
2016-10-07 18:20:59 +02:00
|
|
|
|
2017-01-13 02:02:49 +01:00
|
|
|
# 10-admin.js is too flaky!
|
|
|
|
if options.skip_flaky:
|
|
|
|
test_files = [fn for fn in test_files if '10-admin' not in fn]
|
|
|
|
|
2020-03-25 16:08:22 +01:00
|
|
|
if options.loop:
|
|
|
|
loop_cnt = options.loop[0]
|
|
|
|
print('\n\nWe will use loop mode for these tests:\n')
|
|
|
|
for test_file in test_files:
|
|
|
|
print(' ' + os.path.basename(test_file))
|
2020-06-09 00:25:09 +02:00
|
|
|
print(f'\nnumber of loops: {loop_cnt}\n')
|
2020-03-25 16:08:22 +01:00
|
|
|
print()
|
|
|
|
else:
|
|
|
|
loop_cnt = None
|
|
|
|
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
remote_debug: List[str] = []
|
2016-07-21 05:26:45 +02:00
|
|
|
if options.remote_debug:
|
2018-07-18 23:26:43 +02:00
|
|
|
remote_debug = ["--remote-debugger-port=7777", "--remote-debugger-autorun=yes"]
|
2016-07-21 05:26:45 +02:00
|
|
|
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
verbose: List[str] = []
|
2019-06-10 15:17:48 +02:00
|
|
|
if options.verbose:
|
|
|
|
verbose = ["--verbose", "--log-level=debug"]
|
|
|
|
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
xunit_export: List[str] = []
|
2018-03-08 16:31:57 +01:00
|
|
|
if options.xunit_export:
|
|
|
|
xunit_export = ["--xunit=var/xunit-test-results/casper/result.xml"]
|
|
|
|
|
2020-02-26 06:19:42 +01:00
|
|
|
def run_tests() -> int:
|
2017-01-13 01:09:52 +01:00
|
|
|
ret = 1
|
Upgrade caspersjs to version 1.1.3. (w/acrefoot)
(Most of this work was done by acrefoot in an earlier branch.
I took over the branch to fix casper tests that were broken during
the upgrade (which were fixed in a different commit). I also
made most of the changes to run-casper.)
This also upgrades phantomjs to 2.1.7.
The huge structural change here is that we no longer vendor casperjs
or download phantomjs with our own script. Instead, we just use
casperjs and phantomjs from npm, via package.json.
Another thing that we do now is run casperjs tests individually, so
that we don't get strange test flakes from test interactions. (Tests
can still influence each other in terms of changing data, since we
don't yet have code to clear the test database in between tests.)
A lot of this diff is just removing files and obsolete configurations.
The main new piece is in package.json, which causes npm to install the
new version.
Also, run-casper now runs files individually, as mentioned above.
We had vendored casperjs in the past. I didn't bring over any of our
changes. Some of the changes were performance-related (primarily
5fd58cf24927359dce26588d59690c40c6ce6d4c), so the upgraded version may
be slower in some instances. (I didn't do much measurement of that,
since most of our slowness when running tests is about the setup
environment, not casper itself.) Any bug fixes that we may have
implemented in the past were either magically fixed by changes to
casper itself or by improvements we have made in the tests themselves
over the years.
Tim tested the Casper suite on his machine and running the full Casper
test suite is faster than it was before this change (1m30 vs. 1m50),
so we're at least not regressing overall performance.
2016-10-07 18:20:59 +02:00
|
|
|
for test_file in test_files:
|
2018-01-11 16:55:11 +01:00
|
|
|
test_name = os.path.basename(test_file)
|
2018-03-08 16:31:57 +01:00
|
|
|
cmd = ["node_modules/.bin/casperjs"] + remote_debug + verbose + xunit_export + ["test", test_file]
|
2020-06-10 06:41:04 +02:00
|
|
|
print("\n\n===================== {}\nRunning {}\n\n".format(test_name, " ".join(map(shlex.quote, cmd))), flush=True)
|
2018-07-18 23:26:43 +02:00
|
|
|
ret = subprocess.call(cmd)
|
Upgrade caspersjs to version 1.1.3. (w/acrefoot)
(Most of this work was done by acrefoot in an earlier branch.
I took over the branch to fix casper tests that were broken during
the upgrade (which were fixed in a different commit). I also
made most of the changes to run-casper.)
This also upgrades phantomjs to 2.1.7.
The huge structural change here is that we no longer vendor casperjs
or download phantomjs with our own script. Instead, we just use
casperjs and phantomjs from npm, via package.json.
Another thing that we do now is run casperjs tests individually, so
that we don't get strange test flakes from test interactions. (Tests
can still influence each other in terms of changing data, since we
don't yet have code to clear the test database in between tests.)
A lot of this diff is just removing files and obsolete configurations.
The main new piece is in package.json, which causes npm to install the
new version.
Also, run-casper now runs files individually, as mentioned above.
We had vendored casperjs in the past. I didn't bring over any of our
changes. Some of the changes were performance-related (primarily
5fd58cf24927359dce26588d59690c40c6ce6d4c), so the upgraded version may
be slower in some instances. (I didn't do much measurement of that,
since most of our slowness when running tests is about the setup
environment, not casper itself.) Any bug fixes that we may have
implemented in the past were either magically fixed by changes to
casper itself or by improvements we have made in the tests themselves
over the years.
Tim tested the Casper suite on his machine and running the full Casper
test suite is faster than it was before this change (1m30 vs. 1m50),
so we're at least not regressing overall performance.
2016-10-07 18:20:59 +02:00
|
|
|
if ret != 0:
|
2020-02-26 06:19:42 +01:00
|
|
|
return ret
|
|
|
|
return 0
|
|
|
|
|
2020-03-25 16:08:22 +01:00
|
|
|
def run_loops(loop_cnt: int) -> None:
|
|
|
|
while True:
|
|
|
|
for trial in range(1, loop_cnt + 1):
|
2020-06-09 00:25:09 +02:00
|
|
|
print(f'\n\n\nSTARTING TRIAL {trial} / {loop_cnt}\n')
|
2020-03-25 16:08:22 +01:00
|
|
|
ret = run_tests()
|
|
|
|
if ret == 0:
|
2020-06-09 00:25:09 +02:00
|
|
|
print(f'`\n\nSUCCESS! trial #{trial}\n\n')
|
2020-03-25 16:08:22 +01:00
|
|
|
else:
|
2020-06-09 00:25:09 +02:00
|
|
|
print(f'\n\nFAIL! trial #{trial}\n')
|
2020-03-25 16:08:22 +01:00
|
|
|
break
|
|
|
|
|
|
|
|
while True:
|
|
|
|
response = input('Press "q" to quit or enter number of loops: ')
|
|
|
|
if response == 'q':
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
loop_cnt = int(response)
|
|
|
|
break
|
|
|
|
except ValueError:
|
|
|
|
continue
|
|
|
|
|
2020-02-26 06:19:42 +01:00
|
|
|
with test_server_running(options.force, external_host):
|
2020-03-12 13:51:54 +01:00
|
|
|
# Important: do next things inside the `with` block, when Django
|
2020-02-26 06:19:42 +01:00
|
|
|
# will be pointing at the test database.
|
2020-03-12 13:51:54 +01:00
|
|
|
reset_database()
|
2020-02-26 06:19:42 +01:00
|
|
|
subprocess.check_call('tools/setup/generate-test-credentials')
|
2020-03-12 13:51:54 +01:00
|
|
|
|
|
|
|
# RUN THE TESTS!!!
|
2020-03-25 16:08:22 +01:00
|
|
|
if loop_cnt:
|
|
|
|
run_loops(loop_cnt)
|
|
|
|
ret = 0
|
2020-02-26 06:19:42 +01:00
|
|
|
else:
|
|
|
|
ret = run_tests()
|
2020-03-12 13:51:54 +01:00
|
|
|
|
2016-07-21 05:20:34 +02:00
|
|
|
if ret != 0:
|
|
|
|
print("""
|
2019-06-17 07:19:13 +02:00
|
|
|
The Casper frontend tests failed! For help debugging, read:
|
2019-07-03 01:03:53 +02:00
|
|
|
https://zulip.readthedocs.io/en/latest/testing/testing-with-casper.html""", file=sys.stderr)
|
|
|
|
if os.environ.get("CIRCLECI"):
|
|
|
|
print("", file=sys.stderr)
|
|
|
|
print("In CircleCI, the Artifacts tab contains screenshots of the failure.", file=sys.stderr)
|
|
|
|
print("", file=sys.stderr)
|
2015-08-21 22:48:57 +02:00
|
|
|
|
2016-07-21 05:20:34 +02:00
|
|
|
sys.exit(ret)
|
|
|
|
|
2017-01-13 01:09:52 +01:00
|
|
|
external_host = "zulipdev.com:9981"
|
2017-09-30 08:44:16 +02:00
|
|
|
run_tests(options.tests, external_host)
|
2016-07-21 05:20:34 +02:00
|
|
|
sys.exit(0)
|