diff --git a/frontend_tests/run-casper b/frontend_tests/run-casper index d5a18dfd06..7544eb3ece 100755 --- a/frontend_tests/run-casper +++ b/frontend_tests/run-casper @@ -21,20 +21,27 @@ os.environ["PHANTOMJS_EXECUTABLE"] = os.path.join(ZULIP_PATH, "node_modules/.bin os.environ.pop("http_proxy", "") os.environ.pop("https_proxy", "") -usage = """test-js-with-casper [options] +usage = """ + test-js-with-casper # Run all test files test-js-with-casper 09-navigation.js # Run a single test file test-js-with-casper 09 # Run a single test file 09-navigation.js test-js-with-casper 01-login.js 03-narrow.js # Run a few test files - test-js-with-casper 01 03 # Run a few test files, 01-login.js and 03-narrow.js here""" -parser = argparse.ArgumentParser(usage) + test-js-with-casper 01 03 # Run a few test files, 01-login.js and 03-narrow.js here + + Using loops: + + test-js-with-capser --loop 5 07 # run 5 loops of test 07 +--- +""" +parser = argparse.ArgumentParser(usage=usage) parser.add_argument('--skip-flaky-tests', dest='skip_flaky', action="store_true", default=False, help='Skip flaky tests') -parser.add_argument('--interactive', dest='interactive', - action="store_true", - default=False, help='Run tests interactively') +parser.add_argument('--loop', dest='loop', nargs=1, + action="store", type=int, + default=None, help='Run tests in a loop.') parser.add_argument('--force', dest='force', action="store_true", default=False, help='Run tests despite possible problems.') @@ -98,6 +105,16 @@ def run_tests(files: Iterable[str], external_host: str) -> None: if options.skip_flaky: test_files = [fn for fn in test_files if '10-admin' not in fn] + if options.loop: + loop_cnt = options.loop[0] + print('\n\nWe will use loop mode for these tests:\n') + for test_file in test_files: + print(' ' + os.path.basename(test_file)) + print('\nnumber of loops: {}\n'.format(loop_cnt)) + print() + else: + loop_cnt = None + remote_debug = [] # type: List[str] if options.remote_debug: remote_debug = ["--remote-debugger-port=7777", "--remote-debugger-autorun=yes"] @@ -121,6 +138,27 @@ def run_tests(files: Iterable[str], external_host: str) -> None: return ret return 0 + def run_loops(loop_cnt: int) -> None: + while True: + for trial in range(1, loop_cnt + 1): + print('\n\n\nSTARTING TRIAL {} / {}\n'.format(trial, loop_cnt)) + ret = run_tests() + if ret == 0: + print('`\n\nSUCCESS! trial #{}\n\n'.format(trial)) + else: + print('\n\nFAIL! trial #{}\n'.format(trial)) + break + + while True: + response = input('Press "q" to quit or enter number of loops: ') + if response == 'q': + return + try: + loop_cnt = int(response) + break + except ValueError: + continue + with test_server_running(options.force, external_host): # Important: do next things inside the `with` block, when Django # will be pointing at the test database. @@ -128,15 +166,10 @@ def run_tests(files: Iterable[str], external_host: str) -> None: subprocess.check_call('tools/setup/generate-test-credentials') # RUN THE TESTS!!! - if options.interactive: - response = input('Press Enter to run tests, "q" to quit: ') - ret = 1 - while response != 'q': - ret = run_tests() - if ret != 0: - response = input('Tests failed. Press Enter to re-run tests, "q" to quit: ') + if loop_cnt: + run_loops(loop_cnt) + ret = 0 else: - ret = 1 ret = run_tests() if ret != 0: