py3: Switch almost all shebang lines to use `python3`.
This causes `upgrade-zulip-from-git`, as well as a no-option run of
`tools/build-release-tarball`, to produce a Zulip install running
Python 3, rather than Python 2. In particular this means that the
virtualenv we create, in which all application code runs, is Python 3.
One shebang line, on `zulip-ec2-configure-interfaces`, explicitly
keeps Python 2, and at least one external ops script, `wal-e`, also
still runs on Python 2. See discussion on the respective previous
commits that made those explicit. There may also be some other
third-party scripts we use, outside of this source tree and running
outside our virtualenv, that still run on Python 2.
2017-08-02 23:15:16 +02:00
|
|
|
#!/usr/bin/env python3
|
2019-01-14 17:30:53 +01:00
|
|
|
import argparse
|
2020-06-11 00:54:34 +02:00
|
|
|
import logging
|
2013-01-31 16:49:09 +01:00
|
|
|
import os
|
2013-06-19 21:16:39 +02:00
|
|
|
import pwd
|
2020-06-11 00:54:34 +02:00
|
|
|
import shlex
|
2013-01-31 16:49:09 +01:00
|
|
|
import subprocess
|
2020-06-11 00:54:34 +02:00
|
|
|
import sys
|
2013-04-18 22:58:32 +02:00
|
|
|
import time
|
2013-10-25 23:20:40 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
2020-09-15 02:01:33 +02:00
|
|
|
from scripts.lib.zulip_tools import (
|
|
|
|
DEPLOYMENTS_DIR,
|
|
|
|
ENDC,
|
|
|
|
OKGREEN,
|
|
|
|
WARNING,
|
|
|
|
get_config_file,
|
|
|
|
get_tornado_ports,
|
2021-04-27 20:48:19 +02:00
|
|
|
has_application_server,
|
2021-05-14 03:08:38 +02:00
|
|
|
has_process_fts_updates,
|
2020-09-15 02:01:33 +02:00
|
|
|
overwrite_symlink,
|
|
|
|
)
|
2013-03-13 19:26:51 +01:00
|
|
|
|
2021-04-16 21:02:32 +02:00
|
|
|
action = "restart"
|
|
|
|
if not sys.argv[0].endswith("restart-server"):
|
|
|
|
action = "start"
|
|
|
|
verbing = action.title() + "ing"
|
|
|
|
|
2018-08-12 01:56:58 +02:00
|
|
|
logging.Formatter.converter = time.gmtime
|
2021-04-16 21:02:32 +02:00
|
|
|
logging.basicConfig(format=f"%(asctime)s {action}-server: %(message)s", level=logging.INFO)
|
2013-01-31 16:49:09 +01:00
|
|
|
|
2019-01-14 17:30:53 +01:00
|
|
|
parser = argparse.ArgumentParser()
|
2021-02-12 08:20:45 +01:00
|
|
|
parser.add_argument("--fill-cache", action="store_true", help="Fill the memcached caches")
|
2021-04-20 23:11:52 +02:00
|
|
|
if action == "restart":
|
|
|
|
parser.add_argument(
|
|
|
|
"--less-graceful",
|
|
|
|
action="store_true",
|
|
|
|
help="Restart with more concern for expediency than minimizing availability interruption",
|
|
|
|
)
|
2019-01-14 17:30:53 +01:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
deploy_path = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
|
2013-06-03 19:29:52 +02:00
|
|
|
os.chdir(deploy_path)
|
2013-01-31 16:49:09 +01:00
|
|
|
|
2016-07-23 20:33:58 +02:00
|
|
|
if pwd.getpwuid(os.getuid()).pw_name != "zulip":
|
2013-11-01 00:00:30 +01:00
|
|
|
logging.error("Must be run as user 'zulip'.")
|
|
|
|
sys.exit(1)
|
2013-06-19 17:25:42 +02:00
|
|
|
|
2013-04-18 22:58:32 +02:00
|
|
|
# Send a statsd event on restarting the server
|
2021-02-12 08:19:30 +01:00
|
|
|
subprocess.check_call(
|
|
|
|
["./manage.py", "send_stats", "incr", "events.server_restart", str(int(time.time()))]
|
|
|
|
)
|
2013-04-18 22:58:32 +02:00
|
|
|
|
2019-01-14 17:30:53 +01:00
|
|
|
if args.fill_cache:
|
|
|
|
logging.info("Filling memcached caches")
|
|
|
|
subprocess.check_call(["./manage.py", "fill_memcached_caches"])
|
2013-05-30 21:05:34 +02:00
|
|
|
|
2018-08-11 01:28:06 +02:00
|
|
|
current_symlink = os.path.join(DEPLOYMENTS_DIR, "current")
|
|
|
|
last_symlink = os.path.join(DEPLOYMENTS_DIR, "last")
|
2019-09-20 02:23:23 +02:00
|
|
|
change_symlink = os.readlink(current_symlink) != deploy_path
|
|
|
|
if change_symlink:
|
2018-07-18 23:50:15 +02:00
|
|
|
overwrite_symlink(os.readlink(current_symlink), last_symlink)
|
|
|
|
overwrite_symlink(deploy_path, current_symlink)
|
2018-08-11 01:28:06 +02:00
|
|
|
|
2020-09-15 02:01:33 +02:00
|
|
|
config_file = get_config_file()
|
|
|
|
tornado_ports = get_tornado_ports(config_file)
|
2021-04-27 20:48:19 +02:00
|
|
|
workers = []
|
|
|
|
|
|
|
|
if has_application_server():
|
|
|
|
# Start by restarting the workers and similar processes, one at a
|
|
|
|
# time. Workers can always support processing events with old event
|
|
|
|
# contents, but cannot necessarily understand events enqueued by a
|
|
|
|
# newer Django process. Restarting them one at a time, rather than
|
|
|
|
# all-at-once, minimizes the downtime of each, and reduces startup
|
|
|
|
# contention.
|
|
|
|
#
|
|
|
|
# For "start" or less-graceful circumstances, we don't need to
|
|
|
|
# iterate; we'll stop all of them at once, and start them all later.
|
|
|
|
# In those cases, using the glob form is faster -- but if we do need
|
|
|
|
# to iterate, we need to expand the glob.
|
|
|
|
if action == "start" or args.less_graceful:
|
|
|
|
workers.append("zulip-workers:*")
|
|
|
|
else:
|
|
|
|
worker_status = subprocess.run(
|
|
|
|
["supervisorctl", "status", "zulip-workers:*"],
|
|
|
|
universal_newlines=True,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
)
|
|
|
|
# `supercisorctl status` returns 3 if any are stopped, which is fine here.
|
|
|
|
if worker_status.returncode not in (0, 3):
|
|
|
|
worker_status.check_returncode()
|
|
|
|
workers.extend(status_line.split()[0] for status_line in worker_status.stdout.splitlines())
|
2021-04-20 23:11:52 +02:00
|
|
|
|
2021-06-11 22:58:09 +02:00
|
|
|
if has_application_server(once=True):
|
|
|
|
workers.extend(
|
|
|
|
[
|
|
|
|
"zulip_deliver_scheduled_emails",
|
|
|
|
"zulip_deliver_scheduled_messages",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
2021-05-14 03:08:38 +02:00
|
|
|
if has_process_fts_updates():
|
2021-04-20 23:11:52 +02:00
|
|
|
workers.append("process-fts-updates")
|
|
|
|
|
2021-04-27 20:48:19 +02:00
|
|
|
if action == "restart" and len(workers) > 0:
|
2021-04-20 23:11:52 +02:00
|
|
|
if args.less_graceful:
|
|
|
|
# The less graceful form stops every worker now; we start them
|
|
|
|
# back up at the end.
|
|
|
|
logging.info("Stopping workers")
|
|
|
|
subprocess.check_call(["supervisorctl", "stop", *workers])
|
|
|
|
else:
|
|
|
|
# We cannot pass all of these to one `supervisorctl restart`
|
|
|
|
# because that takes them all down at once, waits until they are
|
|
|
|
# all down, and then brings them back up; doing them sequentially
|
|
|
|
# requires multiple `supervisorctl restart` calls.
|
|
|
|
for worker in workers:
|
|
|
|
logging.info("Restarting %s", worker)
|
|
|
|
subprocess.check_call(["supervisorctl", "restart", worker])
|
|
|
|
|
2021-04-27 20:48:19 +02:00
|
|
|
if has_application_server():
|
|
|
|
# Next, we restart the Tornado processes sequentially, in order to
|
|
|
|
# minimize downtime of the tornado service caused by too many Python
|
|
|
|
# processes restarting at the same time, resulting in each receiving
|
|
|
|
# insufficient priority. This is important, because Tornado is the
|
|
|
|
# main source of user-visible downtime when we restart a Zulip server.
|
|
|
|
# We do this before restarting Django, in case there are new event
|
|
|
|
# types which it will need to know how to deal with.
|
|
|
|
if len(tornado_ports) > 1:
|
|
|
|
for p in tornado_ports:
|
|
|
|
# Restart Tornado processes individually for a better rate of
|
|
|
|
# restarts. This also avoids behavior with restarting a whole
|
|
|
|
# supervisord group where if any individual process is slow to
|
|
|
|
# stop, the whole bundle stays stopped for an extended time.
|
|
|
|
logging.info("%s Tornado process on port %s", verbing, p)
|
|
|
|
subprocess.check_call(
|
|
|
|
["supervisorctl", action, f"zulip-tornado:zulip-tornado-port-{p}"]
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
logging.info("%s Tornado process", verbing)
|
|
|
|
subprocess.check_call(["supervisorctl", action, "zulip-tornado", "zulip-tornado:*"])
|
|
|
|
|
|
|
|
# Finally, restart the Django uWSGI processes.
|
|
|
|
logging.info("%s django server", verbing)
|
|
|
|
subprocess.check_call(["supervisorctl", action, "zulip-django"])
|
|
|
|
|
|
|
|
using_sso = subprocess.check_output(["./scripts/get-django-setting", "USING_APACHE_SSO"])
|
|
|
|
if using_sso.strip() == b"True":
|
|
|
|
logging.info("Restarting Apache WSGI process...")
|
|
|
|
subprocess.check_call(["pkill", "-x", "apache2", "-u", "zulip"])
|
2016-08-05 01:58:57 +02:00
|
|
|
|
2021-04-20 23:11:52 +02:00
|
|
|
# If we were doing this non-gracefully, or starting as opposed to
|
|
|
|
# restarting, we need to turn the workers (back) on. There's no
|
|
|
|
# advantage to doing this not-all-at-once.
|
2021-04-27 20:48:19 +02:00
|
|
|
if (action == "start" or args.less_graceful) and len(workers) > 0:
|
2021-04-20 23:11:52 +02:00
|
|
|
logging.info("Starting workers")
|
|
|
|
subprocess.check_call(["supervisorctl", "start", *workers])
|
2013-01-31 16:49:09 +01:00
|
|
|
|
2013-03-13 19:26:51 +01:00
|
|
|
logging.info("Done!")
|
2021-04-16 21:02:32 +02:00
|
|
|
print(OKGREEN + f"Zulip {action}ed successfully!" + ENDC)
|
2019-09-20 02:23:23 +02:00
|
|
|
|
|
|
|
if change_symlink and "PWD" in os.environ:
|
|
|
|
for symlink in [last_symlink, current_symlink]:
|
|
|
|
if os.path.commonprefix([os.environ["PWD"], symlink]) == symlink:
|
|
|
|
print(
|
|
|
|
"""
|
2020-06-14 02:57:50 +02:00
|
|
|
{}Your shell entered its current directory through a symlink:
|
|
|
|
{}
|
2019-09-20 02:23:23 +02:00
|
|
|
which has now changed. Your shell will not see this change until you run:
|
2020-06-14 02:57:50 +02:00
|
|
|
cd {}
|
|
|
|
to traverse the symlink again.{}
|
2021-02-12 08:19:30 +01:00
|
|
|
""".format(
|
|
|
|
WARNING, symlink, shlex.quote(os.environ["PWD"]), ENDC
|
|
|
|
),
|
2019-09-20 02:23:23 +02:00
|
|
|
file=sys.stderr,
|
|
|
|
)
|