diff --git a/docs/testing/linters.md b/docs/testing/linters.md index bbb81d12c2..7f6f81fe66 100644 --- a/docs/testing/linters.md +++ b/docs/testing/linters.md @@ -126,9 +126,7 @@ eslint, and other home grown tools. You can find the source code [here](https://github.com/zulip/zulip/blob/master/tools/lint). In order for our entire lint suite to run in a timely fashion, the `lint` -script performs several lint checks in parallel by forking out subprocesses. This mechanism -is still evolving, but you can look at the method `run_parallel` to get the -gist of how it works. +script performs several lint checks in parallel by forking out subprocesses. Note that our project does custom regex-based checks on the code, and we also customize how we call pyflakes and pycodestyle (pep8). The code for these diff --git a/tools/coveragerc b/tools/coveragerc index f98cc411bb..c5bb8e5054 100644 --- a/tools/coveragerc +++ b/tools/coveragerc @@ -32,7 +32,6 @@ omit = zerver/lib/test_runner.py # Has its own independent test suite zerver/openapi/python_examples.py - zerver/lib/parallel.py # Debugging tools that don't lend themselves well to unit tests zerver/lib/debug.py # Part of provisioning/populate_db diff --git a/tools/test-backend b/tools/test-backend index d9a44a6f6c..ead1de4263 100755 --- a/tools/test-backend +++ b/tools/test-backend @@ -91,7 +91,6 @@ not_yet_fully_covered = [ 'zerver/lib/logging_util.py', 'zerver/lib/migrate.py', 'zerver/lib/outgoing_webhook.py', - 'zerver/lib/parallel.py', 'zerver/lib/profile.py', 'zerver/lib/queue.py', 'zerver/lib/sqlalchemy_utils.py', diff --git a/zerver/lib/parallel.py b/zerver/lib/parallel.py deleted file mode 100644 index b182b73aae..0000000000 --- a/zerver/lib/parallel.py +++ /dev/null @@ -1,74 +0,0 @@ -import errno -import os -import pty -import sys -from typing import Callable, Dict, Iterable, Iterator, Tuple, TypeVar - -JobData = TypeVar('JobData') - -def run_parallel(job: Callable[[JobData], int], - data: Iterable[JobData], - threads: int=6) -> Iterator[Tuple[int, JobData]]: - pids: Dict[int, JobData] = {} - - def wait_for_one() -> Tuple[int, JobData]: - while True: - try: - (pid, status) = os.wait() - return status, pids.pop(pid) - except KeyError: - pass - - for item in data: - pid = os.fork() - if pid == 0: - sys.stdin.close() - try: - os.close(pty.STDIN_FILENO) - except OSError as e: - if e.errno != errno.EBADF: - raise - sys.stdin = open("/dev/null") - os._exit(job(item)) - - pids[pid] = item - threads = threads - 1 - - if threads == 0: - (status, item) = wait_for_one() - threads += 1 - yield (status, item) - if status != 0: - # Stop if any error occurred - break - - while True: - try: - (status, item) = wait_for_one() - yield (status, item) - except OSError as e: - if e.errno == errno.ECHILD: - break - else: - raise - -if __name__ == "__main__": - # run some unit tests - import time - jobs = [10, 19, 18, 6, 14, 12, 8, 2, 1, 13, 3, 17, 9, 11, 5, 16, 7, 15, 4] - expected_output = [6, 10, 12, 2, 1, 14, 8, 3, 18, 19, 5, 9, 13, 11, 4, 7, 17, 16, 15] - - def wait_and_print(x: int) -> int: - time.sleep(x * 0.1) - return 0 - - output = [] - for (status, job) in run_parallel(wait_and_print, jobs): - output.append(job) - if output == expected_output: - print("Successfully passed test!") - else: - print("Failed test!") - print(jobs) - print(expected_output) - print(output)