zulip/scripts/restart-server

245 lines
9.2 KiB
Plaintext
Raw Normal View History

#!/usr/bin/env python3
import logging
import os
import pwd
import shlex
import subprocess
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from scripts.lib.setup_path import setup_path
setup_path()
from scripts.lib.supervisor import list_supervisor_processes
from scripts.lib.zulip_tools import (
DEPLOYMENTS_DIR,
ENDC,
OKGREEN,
WARNING,
get_config,
get_config_file,
get_tornado_ports,
has_application_server,
has_process_fts_updates,
overwrite_symlink,
start_arg_parser,
su_to_zulip,
)
action = "restart"
if not sys.argv[0].endswith("restart-server"):
action = "start"
verbing = action.title() + "ing"
logging.Formatter.converter = time.gmtime
logging.basicConfig(format=f"%(asctime)s {action}-server: %(message)s", level=logging.INFO)
parser = start_arg_parser(action=action, add_help=True)
args = parser.parse_args()
deploy_path = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
os.chdir(deploy_path)
username = pwd.getpwuid(os.getuid()).pw_name
if username == "root":
su_to_zulip()
elif username != "zulip":
logging.error("Must be run as user 'zulip'.")
sys.exit(1)
if not args.skip_checks:
logging.info("Running syntax and database checks")
subprocess.check_call(["./manage.py", "check", "--database", "default"])
if args.fill_cache:
logging.info("Filling memcached caches")
subprocess.check_call(["./manage.py", "fill_memcached_caches", "--skip-checks"])
current_symlink = os.path.join(DEPLOYMENTS_DIR, "current")
last_symlink = os.path.join(DEPLOYMENTS_DIR, "last")
change_symlink = os.readlink(current_symlink) != deploy_path
if change_symlink:
overwrite_symlink(os.readlink(current_symlink), last_symlink)
overwrite_symlink(deploy_path, current_symlink)
config_file = get_config_file()
tornado_ports = get_tornado_ports(config_file)
workers = []
if has_application_server():
# Start by restarting the workers and similar processes, one at a
# time. Workers can always support processing events with old event
# contents, but cannot necessarily understand events enqueued by a
# newer Django process. Restarting them one at a time, rather than
# all-at-once, minimizes the downtime of each, and reduces startup
# contention.
#
# For "start" or less-graceful circumstances, we don't need to
# iterate; we'll stop all of them at once, and start them all later.
# In those cases, using the glob form is faster -- but if we do need
# to iterate, we need to expand the glob.
if action == "start" or args.less_graceful:
workers.append("zulip-workers:*")
else:
workers.extend(list_supervisor_processes(["zulip-workers:*"]))
if has_application_server(once=True):
# These used to be included in "zulip-workers:*"; since we may
# be restarting an older version of Zulip, which has not
# applied puppet to reload the new list of processes, only
# stop them if they currently exist according to
# `supervisorctl`.
workers.extend(
list_supervisor_processes(
[
"zulip_deliver_scheduled_emails",
"zulip_deliver_scheduled_messages",
]
)
)
if has_process_fts_updates():
workers.append("process-fts-updates")
# Before we start (re)starting main services, make sure to start any
# optional auxiliary services that we don't stop, but do expect to be
# running, and aren't currently.
aux_services = list_supervisor_processes(["go-camo", "smokescreen"], only_running=False)
if aux_services:
subprocess.check_call(["supervisorctl", "start", *aux_services])
# If none of the workers nor the application servers are running, this
# is actually a "start," not a restart, which means we will defer
# workers to later.
if (
action == "restart"
and len(
list_supervisor_processes([*workers, "zulip-django", "zulip-tornado:*"], only_running=True)
)
== 0
):
action = "start"
verbing = "Starting"
elif action == "start":
existing_services = list_supervisor_processes([*workers, "zulip-django", "zulip-tornado:*"])
running_services = list_supervisor_processes(
[*workers, "zulip-django", "zulip-tornado:*"], only_running=True
)
if existing_services == running_services:
logging.info("Zulip is already started; nothing to do!")
sys.exit(0)
def restart_or_start(service: str) -> None:
our_verb = action
start-server: More gracefully handle only starting part of the server. While the previous commit handles the common case of all of the server being started already, it still produces ERROR output lines from supervisorctl when most of the server is already running. Take the case where one worker is stopped: ``` $ supervisorctl stop zulip-workers:zulip_events_deferred_work zulip-workers:zulip_events_deferred_work: stopped $ ./scripts/start-server 2023-04-04 15:50:28,505 start-server: Running syntax and database checks System check identified no issues (15 silenced). 2023-04-04 15:50:31,977 start-server: Starting Tornado process on port 9800 zulip-tornado:zulip-tornado-port-9800: ERROR (already started) 2023-04-04 15:50:32,283 start-server: Starting Tornado process on port 9801 zulip-tornado:zulip-tornado-port-9801: ERROR (already started) 2023-04-04 15:50:32,592 start-server: Starting django server zulip-django: ERROR (already started) 2023-04-04 15:50:33,340 start-server: Starting workers zulip-workers:zulip_events_deferred_work: started zulip_deliver_scheduled_emails: ERROR (already started) zulip_deliver_scheduled_messages: ERROR (already started) process-fts-updates: ERROR (already started) 2023-04-04 15:50:34,659 start-server: Done! Zulip started successfully! ``` More gracefully handle these cases: ``` $ ./scripts/start-server 2023-04-04 15:52:39,815 start-server: Running syntax and database checks System check identified no issues (15 silenced). 2023-04-04 15:52:43,270 start-server: Starting Tornado process on port 9800 2023-04-04 15:52:43,287 start-server: zulip-tornado:zulip-tornado-port-9800 already started! 2023-04-04 15:52:43,287 start-server: Starting Tornado process on port 9801 2023-04-04 15:52:43,300 start-server: zulip-tornado:zulip-tornado-port-9801 already started! 2023-04-04 15:52:43,300 start-server: Starting django server 2023-04-04 15:52:43,316 start-server: zulip-django already started! 2023-04-04 15:52:43,793 start-server: Starting workers zulip-workers:zulip_events_deferred_work: started 2023-04-04 15:52:45,111 start-server: Done! Zulip started successfully! ```
2023-04-04 17:49:37 +02:00
existing_services = list_supervisor_processes([service])
running_services = list_supervisor_processes([service], only_running=True)
if our_verb == "restart" and len(running_services) == 0:
our_verb = "start"
start-server: More gracefully handle only starting part of the server. While the previous commit handles the common case of all of the server being started already, it still produces ERROR output lines from supervisorctl when most of the server is already running. Take the case where one worker is stopped: ``` $ supervisorctl stop zulip-workers:zulip_events_deferred_work zulip-workers:zulip_events_deferred_work: stopped $ ./scripts/start-server 2023-04-04 15:50:28,505 start-server: Running syntax and database checks System check identified no issues (15 silenced). 2023-04-04 15:50:31,977 start-server: Starting Tornado process on port 9800 zulip-tornado:zulip-tornado-port-9800: ERROR (already started) 2023-04-04 15:50:32,283 start-server: Starting Tornado process on port 9801 zulip-tornado:zulip-tornado-port-9801: ERROR (already started) 2023-04-04 15:50:32,592 start-server: Starting django server zulip-django: ERROR (already started) 2023-04-04 15:50:33,340 start-server: Starting workers zulip-workers:zulip_events_deferred_work: started zulip_deliver_scheduled_emails: ERROR (already started) zulip_deliver_scheduled_messages: ERROR (already started) process-fts-updates: ERROR (already started) 2023-04-04 15:50:34,659 start-server: Done! Zulip started successfully! ``` More gracefully handle these cases: ``` $ ./scripts/start-server 2023-04-04 15:52:39,815 start-server: Running syntax and database checks System check identified no issues (15 silenced). 2023-04-04 15:52:43,270 start-server: Starting Tornado process on port 9800 2023-04-04 15:52:43,287 start-server: zulip-tornado:zulip-tornado-port-9800 already started! 2023-04-04 15:52:43,287 start-server: Starting Tornado process on port 9801 2023-04-04 15:52:43,300 start-server: zulip-tornado:zulip-tornado-port-9801 already started! 2023-04-04 15:52:43,300 start-server: Starting django server 2023-04-04 15:52:43,316 start-server: zulip-django already started! 2023-04-04 15:52:43,793 start-server: Starting workers zulip-workers:zulip_events_deferred_work: started 2023-04-04 15:52:45,111 start-server: Done! Zulip started successfully! ```
2023-04-04 17:49:37 +02:00
elif our_verb == "start" and existing_services == running_services:
logging.info("%s already started!", service)
return
subprocess.check_call(["supervisorctl", our_verb, service])
if action == "restart" and len(workers) > 0:
if args.less_graceful:
# The less graceful form stops every worker now; we start them
# back up at the end.
logging.info("Stopping workers")
subprocess.check_call(["supervisorctl", "stop", *workers])
else:
# We cannot pass all of these to one `supervisorctl restart`
# because that takes them all down at once, waits until they are
# all down, and then brings them back up; doing them sequentially
# requires multiple `supervisorctl restart` calls.
for worker in workers:
logging.info("Restarting %s", worker)
restart_or_start(worker)
if has_application_server():
# Next, we restart the Tornado processes sequentially, in order to
# minimize downtime of the tornado service caused by too many
# Python processes restarting at the same time, resulting in each
# receiving insufficient priority. This is important, because
# Tornado being unavailable for too long is the main source of
# user-visible downtime when we restart a Zulip server. We do
# this before restarting Django, in case there are new event types
# which it will need to know how to deal with.
if len(tornado_ports) > 1:
for p in tornado_ports:
# Restart Tornado processes individually for a better rate of
# restarts. This also avoids behavior with restarting a whole
# supervisord group where if any individual process is slow to
# stop, the whole bundle stays stopped for an extended time.
logging.info("%s Tornado process on port %s", verbing, p)
restart_or_start(f"zulip-tornado:zulip-tornado-port-{p}")
else:
logging.info("%s Tornado process", verbing)
restart_or_start("zulip-tornado:*")
# Finally, restart the Django uWSGI processes.
puppet: Use lazy-apps and uwsgi control sockets for rolling reloads. Restarting the uwsgi processes by way of supervisor opens a window during which nginx 502's all responses. uwsgi has a configuration called "chain reloading" which allows for rolling restart of the uwsgi processes, such that only one process at once in unavailable; see uwsgi documentation ([1]). The tradeoff is that this requires that the uwsgi processes load the libraries after forking, rather than before ("lazy apps"); in theory this can lead to larger memory footprints, since they are not shared. In practice, as Django defers much of the loading, this is not as much of an issue. In a very basic test of memory consumption (measured by total memory - free - caches - buffers; 6 uwsgi workers), both immediately after restarting Django, and after requesting `/` 60 times with 6 concurrent requests: | Non-lazy | Lazy app | Difference ------------------+------------+------------+------------- Fresh | 2,827,216 | 2,870,480 | +43,264 After 60 requests | 3,332,284 | 3,409,608 | +77,324 ..................|............|............|............. Difference | +505,068 | +539,128 | +34,060 That is, "lazy app" loading increased the footprint pre-requests by 43MB, and after 60 requests grew the memory footprint by 539MB, as opposed to non-lazy loading, which grew it by 505MB. Using wsgi "lazy app" loading does increase the memory footprint, but not by a large percentage. The other effect is that processes may be served by either old or new code during the restart window. This may cause transient failures when new frontend code talks to old backend code. Enable chain-reloading during graceful, puppetless restarts, but only if enabled via a zulip.conf configuration flag. Fixes #2559. [1]: https://uwsgi-docs.readthedocs.io/en/latest/articles/TheArtOfGracefulReloading.html#chain-reloading-lazy-apps
2022-01-01 05:20:49 +01:00
if (
action == "restart"
and not args.less_graceful
and get_config(config_file, "application_server", "rolling_restart", False)
puppet: Use lazy-apps and uwsgi control sockets for rolling reloads. Restarting the uwsgi processes by way of supervisor opens a window during which nginx 502's all responses. uwsgi has a configuration called "chain reloading" which allows for rolling restart of the uwsgi processes, such that only one process at once in unavailable; see uwsgi documentation ([1]). The tradeoff is that this requires that the uwsgi processes load the libraries after forking, rather than before ("lazy apps"); in theory this can lead to larger memory footprints, since they are not shared. In practice, as Django defers much of the loading, this is not as much of an issue. In a very basic test of memory consumption (measured by total memory - free - caches - buffers; 6 uwsgi workers), both immediately after restarting Django, and after requesting `/` 60 times with 6 concurrent requests: | Non-lazy | Lazy app | Difference ------------------+------------+------------+------------- Fresh | 2,827,216 | 2,870,480 | +43,264 After 60 requests | 3,332,284 | 3,409,608 | +77,324 ..................|............|............|............. Difference | +505,068 | +539,128 | +34,060 That is, "lazy app" loading increased the footprint pre-requests by 43MB, and after 60 requests grew the memory footprint by 539MB, as opposed to non-lazy loading, which grew it by 505MB. Using wsgi "lazy app" loading does increase the memory footprint, but not by a large percentage. The other effect is that processes may be served by either old or new code during the restart window. This may cause transient failures when new frontend code talks to old backend code. Enable chain-reloading during graceful, puppetless restarts, but only if enabled via a zulip.conf configuration flag. Fixes #2559. [1]: https://uwsgi-docs.readthedocs.io/en/latest/articles/TheArtOfGracefulReloading.html#chain-reloading-lazy-apps
2022-01-01 05:20:49 +01:00
and os.path.exists("/home/zulip/deployments/uwsgi-control")
):
# See if it's currently running
uwsgi_status = subprocess.run(
["supervisorctl", "status", "zulip-django"],
stdout=subprocess.DEVNULL,
check=False,
puppet: Use lazy-apps and uwsgi control sockets for rolling reloads. Restarting the uwsgi processes by way of supervisor opens a window during which nginx 502's all responses. uwsgi has a configuration called "chain reloading" which allows for rolling restart of the uwsgi processes, such that only one process at once in unavailable; see uwsgi documentation ([1]). The tradeoff is that this requires that the uwsgi processes load the libraries after forking, rather than before ("lazy apps"); in theory this can lead to larger memory footprints, since they are not shared. In practice, as Django defers much of the loading, this is not as much of an issue. In a very basic test of memory consumption (measured by total memory - free - caches - buffers; 6 uwsgi workers), both immediately after restarting Django, and after requesting `/` 60 times with 6 concurrent requests: | Non-lazy | Lazy app | Difference ------------------+------------+------------+------------- Fresh | 2,827,216 | 2,870,480 | +43,264 After 60 requests | 3,332,284 | 3,409,608 | +77,324 ..................|............|............|............. Difference | +505,068 | +539,128 | +34,060 That is, "lazy app" loading increased the footprint pre-requests by 43MB, and after 60 requests grew the memory footprint by 539MB, as opposed to non-lazy loading, which grew it by 505MB. Using wsgi "lazy app" loading does increase the memory footprint, but not by a large percentage. The other effect is that processes may be served by either old or new code during the restart window. This may cause transient failures when new frontend code talks to old backend code. Enable chain-reloading during graceful, puppetless restarts, but only if enabled via a zulip.conf configuration flag. Fixes #2559. [1]: https://uwsgi-docs.readthedocs.io/en/latest/articles/TheArtOfGracefulReloading.html#chain-reloading-lazy-apps
2022-01-01 05:20:49 +01:00
)
if uwsgi_status.returncode == 0:
logging.info("Starting rolling restart of django server")
with open("/home/zulip/deployments/uwsgi-control", "w") as control_socket:
# "c" is chain-reloading:
# https://uwsgi-docs.readthedocs.io/en/latest/MasterFIFO.html#available-commands
control_socket.write("c")
else:
logging.info("Starting django server")
subprocess.check_call(["supervisorctl", "start", "zulip-django"])
else:
logging.info("%s django server", verbing)
restart_or_start("zulip-django")
using_sso = subprocess.check_output(["./scripts/get-django-setting", "USING_APACHE_SSO"])
if using_sso.strip() == b"True":
logging.info("Restarting Apache WSGI process...")
subprocess.check_call(["pkill", "-x", "apache2", "-u", "zulip"])
# If we were doing this non-gracefully, or starting as opposed to
# restarting, we need to turn the workers (back) on. There's no
# advantage to doing this not-all-at-once.
start-server: More gracefully handle only starting part of the server. While the previous commit handles the common case of all of the server being started already, it still produces ERROR output lines from supervisorctl when most of the server is already running. Take the case where one worker is stopped: ``` $ supervisorctl stop zulip-workers:zulip_events_deferred_work zulip-workers:zulip_events_deferred_work: stopped $ ./scripts/start-server 2023-04-04 15:50:28,505 start-server: Running syntax and database checks System check identified no issues (15 silenced). 2023-04-04 15:50:31,977 start-server: Starting Tornado process on port 9800 zulip-tornado:zulip-tornado-port-9800: ERROR (already started) 2023-04-04 15:50:32,283 start-server: Starting Tornado process on port 9801 zulip-tornado:zulip-tornado-port-9801: ERROR (already started) 2023-04-04 15:50:32,592 start-server: Starting django server zulip-django: ERROR (already started) 2023-04-04 15:50:33,340 start-server: Starting workers zulip-workers:zulip_events_deferred_work: started zulip_deliver_scheduled_emails: ERROR (already started) zulip_deliver_scheduled_messages: ERROR (already started) process-fts-updates: ERROR (already started) 2023-04-04 15:50:34,659 start-server: Done! Zulip started successfully! ``` More gracefully handle these cases: ``` $ ./scripts/start-server 2023-04-04 15:52:39,815 start-server: Running syntax and database checks System check identified no issues (15 silenced). 2023-04-04 15:52:43,270 start-server: Starting Tornado process on port 9800 2023-04-04 15:52:43,287 start-server: zulip-tornado:zulip-tornado-port-9800 already started! 2023-04-04 15:52:43,287 start-server: Starting Tornado process on port 9801 2023-04-04 15:52:43,300 start-server: zulip-tornado:zulip-tornado-port-9801 already started! 2023-04-04 15:52:43,300 start-server: Starting django server 2023-04-04 15:52:43,316 start-server: zulip-django already started! 2023-04-04 15:52:43,793 start-server: Starting workers zulip-workers:zulip_events_deferred_work: started 2023-04-04 15:52:45,111 start-server: Done! Zulip started successfully! ```
2023-04-04 17:49:37 +02:00
if action == "start" or args.less_graceful:
workers = list_supervisor_processes(workers, only_running=False)
if workers:
logging.info("Starting workers")
subprocess.check_call(["supervisorctl", "start", *workers])
if has_application_server() and not args.skip_client_reloads:
# All of the servers have been (re)started; now enqueue events in
# the Tornado servers to tell clients to reload.
subprocess.check_call(["./scripts/reload-clients"])
logging.info("Done!")
print(OKGREEN + f"Zulip {action}ed successfully!" + ENDC)
if change_symlink and "PWD" in os.environ:
for symlink in [last_symlink, current_symlink]:
if os.path.commonprefix([os.environ["PWD"], symlink]) == symlink:
print(
"""
{}Your shell entered its current directory through a symlink:
{}
which has now changed. Your shell will not see this change until you run:
cd {}
to traverse the symlink again.{}
""".format(WARNING, symlink, shlex.quote(os.environ["PWD"]), ENDC),
file=sys.stderr,
)