2020-03-16 13:44:15 +01:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
import argparse
|
|
|
|
import os
|
|
|
|
import shlex
|
2020-06-11 00:54:34 +02:00
|
|
|
import subprocess
|
|
|
|
import sys
|
2020-03-16 13:44:15 +01:00
|
|
|
|
2020-11-28 19:27:44 +01:00
|
|
|
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
|
|
|
from scripts.lib.zulip_tools import ENDC, FAIL, OKGREEN
|
|
|
|
|
2020-03-16 13:44:15 +01:00
|
|
|
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
|
2020-05-09 22:26:43 +02:00
|
|
|
# Request the special webpack setup for frontend integration tests,
|
|
|
|
# where webpack assets are compiled up front rather than running in
|
2020-09-05 15:37:07 +02:00
|
|
|
# watch mode.
|
|
|
|
os.environ["PUPPETEER_TESTS"] = "1"
|
2020-05-09 22:26:43 +02:00
|
|
|
|
2020-03-16 13:44:15 +01:00
|
|
|
os.environ["CHROMIUM_EXECUTABLE"] = os.path.join(ZULIP_PATH, "node_modules/.bin/chromium")
|
|
|
|
os.environ.pop("http_proxy", "")
|
|
|
|
os.environ.pop("https_proxy", "")
|
|
|
|
|
|
|
|
usage = """test-js-with-puppeteer [options]
|
|
|
|
test-js-with-puppeteer # Run all test files
|
|
|
|
test-js-with-puppeteer 09-navigation.js # Run a single test file
|
|
|
|
test-js-with-puppeteer 09 # Run a single test file 09-navigation.js
|
|
|
|
test-js-with-puppeteer 01-login.js 03-narrow.js # Run a few test files
|
|
|
|
test-js-with-puppeteer 01 03 # Run a few test files, 01-login.js and 03-narrow.js here"""
|
|
|
|
parser = argparse.ArgumentParser(usage)
|
|
|
|
|
2020-09-02 04:49:02 +02:00
|
|
|
parser.add_argument('--interactive',
|
2020-03-16 13:44:15 +01:00
|
|
|
action="store_true",
|
2020-09-02 20:45:48 +02:00
|
|
|
help='Run tests interactively')
|
2020-09-02 04:49:02 +02:00
|
|
|
parser.add_argument('--force',
|
2020-03-16 13:44:15 +01:00
|
|
|
action="store_true",
|
2020-09-02 20:45:48 +02:00
|
|
|
help='Run tests despite possible problems.')
|
2020-03-16 13:44:15 +01:00
|
|
|
parser.add_argument('tests', nargs=argparse.REMAINDER,
|
|
|
|
help='Specific tests to run; by default, runs all tests')
|
|
|
|
|
|
|
|
options = parser.parse_args()
|
|
|
|
|
|
|
|
sys.path.insert(0, ZULIP_PATH)
|
|
|
|
|
|
|
|
# check for the venv
|
|
|
|
from tools.lib import sanity_check
|
2020-06-11 00:54:34 +02:00
|
|
|
|
2020-03-16 13:44:15 +01:00
|
|
|
sanity_check.check_venv(__file__)
|
|
|
|
|
2020-06-17 20:36:00 +02:00
|
|
|
from typing import Iterable, Tuple
|
2020-06-11 00:54:34 +02:00
|
|
|
|
|
|
|
from tools.lib.test_script import (
|
|
|
|
assert_provisioning_status_ok,
|
|
|
|
find_js_test_files,
|
|
|
|
prepare_puppeteer_run,
|
|
|
|
)
|
2020-03-16 13:44:15 +01:00
|
|
|
from tools.lib.test_server import test_server_running
|
|
|
|
|
|
|
|
|
|
|
|
def run_tests(files: Iterable[str], external_host: str) -> None:
|
|
|
|
test_dir = os.path.join(ZULIP_PATH, 'frontend_tests/puppeteer_tests')
|
2020-04-17 08:51:06 +02:00
|
|
|
test_files = find_js_test_files(test_dir, files)
|
2020-03-16 13:44:15 +01:00
|
|
|
|
2020-06-17 20:36:00 +02:00
|
|
|
def run_tests(test_number: int=0) -> Tuple[int, int]:
|
2020-03-16 13:44:15 +01:00
|
|
|
ret = 1
|
2020-06-17 20:36:00 +02:00
|
|
|
current_test_num = test_number
|
|
|
|
for test_file in test_files[test_number:]:
|
2020-03-16 13:44:15 +01:00
|
|
|
test_name = os.path.basename(test_file)
|
2020-09-02 06:59:07 +02:00
|
|
|
cmd = ["node", test_file]
|
2020-06-10 06:41:04 +02:00
|
|
|
print("\n\n===================== {}\nRunning {}\n\n".format(test_name, " ".join(map(shlex.quote, cmd))), flush=True)
|
2020-03-16 13:44:15 +01:00
|
|
|
ret = subprocess.call(cmd)
|
|
|
|
if ret != 0:
|
2020-06-17 20:36:00 +02:00
|
|
|
return ret, current_test_num
|
|
|
|
current_test_num += 1
|
|
|
|
return 0, -1
|
2020-03-16 13:44:15 +01:00
|
|
|
|
|
|
|
with test_server_running(False, external_host):
|
|
|
|
# Important: do this next call inside the `with` block, when Django
|
|
|
|
# will be pointing at the test database.
|
|
|
|
subprocess.check_call('tools/setup/generate-test-credentials')
|
|
|
|
if options.interactive:
|
|
|
|
response = input('Press Enter to run tests, "q" to quit: ')
|
|
|
|
ret = 1
|
2020-06-17 20:36:00 +02:00
|
|
|
failed_test_num = 0
|
|
|
|
while response != 'q' and failed_test_num != -1:
|
|
|
|
ret, failed_test_num = run_tests(failed_test_num)
|
2020-03-16 13:44:15 +01:00
|
|
|
if ret != 0:
|
|
|
|
response = input('Tests failed. Press Enter to re-run tests, "q" to quit: ')
|
|
|
|
else:
|
|
|
|
ret = 1
|
2020-06-17 20:36:00 +02:00
|
|
|
ret = run_tests()[0]
|
2020-03-16 13:44:15 +01:00
|
|
|
if ret != 0:
|
2020-11-28 19:27:44 +01:00
|
|
|
print(f"""
|
|
|
|
{FAIL}The Puppeteer frontend tests failed!{ENDC}
|
|
|
|
For help debugging, read:
|
2020-09-05 15:25:49 +02:00
|
|
|
https://zulip.readthedocs.io/en/latest/testing/testing-with-puppeteer.html
|
|
|
|
or report and ask for help in chat.zulip.org""", file=sys.stderr)
|
2020-03-26 21:00:05 +01:00
|
|
|
if os.environ.get("CIRCLECI"):
|
|
|
|
print("", file=sys.stderr)
|
|
|
|
print("In CircleCI, the Artifacts tab contains screenshots of the failure.", file=sys.stderr)
|
|
|
|
print("", file=sys.stderr)
|
2020-09-05 15:25:49 +02:00
|
|
|
else:
|
|
|
|
print("It's also worthy to see screenshots generated on failure stored under var/puppeteer/*.png")
|
2020-03-16 13:44:15 +01:00
|
|
|
sys.exit(ret)
|
|
|
|
|
|
|
|
external_host = "zulipdev.com:9981"
|
2020-04-17 09:19:39 +02:00
|
|
|
assert_provisioning_status_ok(options.force)
|
|
|
|
prepare_puppeteer_run()
|
2020-03-16 13:44:15 +01:00
|
|
|
run_tests(options.tests, external_host)
|
2020-11-28 19:27:44 +01:00
|
|
|
print(f"{OKGREEN}All tests passed!{ENDC}")
|
2020-03-16 13:44:15 +01:00
|
|
|
sys.exit(0)
|