py3: Switch almost all shebang lines to use `python3`.
This causes `upgrade-zulip-from-git`, as well as a no-option run of
`tools/build-release-tarball`, to produce a Zulip install running
Python 3, rather than Python 2. In particular this means that the
virtualenv we create, in which all application code runs, is Python 3.
One shebang line, on `zulip-ec2-configure-interfaces`, explicitly
keeps Python 2, and at least one external ops script, `wal-e`, also
still runs on Python 2. See discussion on the respective previous
commits that made those explicit. There may also be some other
third-party scripts we use, outside of this source tree and running
outside our virtualenv, that still run on Python 2.
2017-08-02 23:15:16 +02:00
|
|
|
#!/usr/bin/env python3
|
2024-08-28 03:57:32 +02:00
|
|
|
import contextlib
|
2020-06-11 00:54:34 +02:00
|
|
|
import logging
|
2013-01-31 16:49:09 +01:00
|
|
|
import os
|
2013-06-19 21:16:39 +02:00
|
|
|
import pwd
|
2020-06-11 00:54:34 +02:00
|
|
|
import shlex
|
2013-01-31 16:49:09 +01:00
|
|
|
import subprocess
|
2020-06-11 00:54:34 +02:00
|
|
|
import sys
|
2013-04-18 22:58:32 +02:00
|
|
|
import time
|
2013-10-25 23:20:40 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
2023-10-12 20:40:54 +02:00
|
|
|
from scripts.lib.setup_path import setup_path
|
|
|
|
|
|
|
|
setup_path()
|
|
|
|
|
2022-01-20 01:01:13 +01:00
|
|
|
from scripts.lib.supervisor import list_supervisor_processes
|
2020-09-15 02:01:33 +02:00
|
|
|
from scripts.lib.zulip_tools import (
|
|
|
|
DEPLOYMENTS_DIR,
|
|
|
|
ENDC,
|
|
|
|
OKGREEN,
|
|
|
|
WARNING,
|
2024-03-05 17:16:31 +01:00
|
|
|
get_config,
|
2020-09-15 02:01:33 +02:00
|
|
|
get_config_file,
|
|
|
|
get_tornado_ports,
|
2021-04-27 20:48:19 +02:00
|
|
|
has_application_server,
|
2021-05-14 03:08:38 +02:00
|
|
|
has_process_fts_updates,
|
2020-09-15 02:01:33 +02:00
|
|
|
overwrite_symlink,
|
2021-12-13 20:39:56 +01:00
|
|
|
start_arg_parser,
|
2024-02-07 20:57:28 +01:00
|
|
|
su_to_zulip,
|
2020-09-15 02:01:33 +02:00
|
|
|
)
|
2013-03-13 19:26:51 +01:00
|
|
|
|
2021-04-16 21:02:32 +02:00
|
|
|
action = "restart"
|
|
|
|
if not sys.argv[0].endswith("restart-server"):
|
|
|
|
action = "start"
|
|
|
|
verbing = action.title() + "ing"
|
|
|
|
|
2018-08-12 01:56:58 +02:00
|
|
|
logging.Formatter.converter = time.gmtime
|
2021-04-16 21:02:32 +02:00
|
|
|
logging.basicConfig(format=f"%(asctime)s {action}-server: %(message)s", level=logging.INFO)
|
2013-01-31 16:49:09 +01:00
|
|
|
|
2021-12-13 20:39:56 +01:00
|
|
|
parser = start_arg_parser(action=action, add_help=True)
|
2019-01-14 17:30:53 +01:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
deploy_path = os.path.realpath(os.path.join(os.path.dirname(__file__), ".."))
|
2013-06-03 19:29:52 +02:00
|
|
|
os.chdir(deploy_path)
|
2013-01-31 16:49:09 +01:00
|
|
|
|
2024-02-07 20:57:28 +01:00
|
|
|
username = pwd.getpwuid(os.getuid()).pw_name
|
|
|
|
if username == "root":
|
|
|
|
su_to_zulip()
|
|
|
|
elif username != "zulip":
|
2013-11-01 00:00:30 +01:00
|
|
|
logging.error("Must be run as user 'zulip'.")
|
|
|
|
sys.exit(1)
|
2013-06-19 17:25:42 +02:00
|
|
|
|
2022-10-14 00:35:30 +02:00
|
|
|
|
|
|
|
if not args.skip_checks:
|
|
|
|
logging.info("Running syntax and database checks")
|
|
|
|
subprocess.check_call(["./manage.py", "check", "--database", "default"])
|
|
|
|
|
2019-01-14 17:30:53 +01:00
|
|
|
if args.fill_cache:
|
|
|
|
logging.info("Filling memcached caches")
|
2024-05-24 15:55:37 +02:00
|
|
|
subprocess.check_call(["./manage.py", "fill_memcached_caches", "--automated", "--skip-checks"])
|
2013-05-30 21:05:34 +02:00
|
|
|
|
2018-08-11 01:28:06 +02:00
|
|
|
current_symlink = os.path.join(DEPLOYMENTS_DIR, "current")
|
|
|
|
last_symlink = os.path.join(DEPLOYMENTS_DIR, "last")
|
2019-09-20 02:23:23 +02:00
|
|
|
change_symlink = os.readlink(current_symlink) != deploy_path
|
|
|
|
if change_symlink:
|
2018-07-18 23:50:15 +02:00
|
|
|
overwrite_symlink(os.readlink(current_symlink), last_symlink)
|
|
|
|
overwrite_symlink(deploy_path, current_symlink)
|
2018-08-11 01:28:06 +02:00
|
|
|
|
2020-09-15 02:01:33 +02:00
|
|
|
config_file = get_config_file()
|
|
|
|
tornado_ports = get_tornado_ports(config_file)
|
2021-04-27 20:48:19 +02:00
|
|
|
workers = []
|
|
|
|
|
|
|
|
if has_application_server():
|
|
|
|
# Start by restarting the workers and similar processes, one at a
|
|
|
|
# time. Workers can always support processing events with old event
|
|
|
|
# contents, but cannot necessarily understand events enqueued by a
|
|
|
|
# newer Django process. Restarting them one at a time, rather than
|
|
|
|
# all-at-once, minimizes the downtime of each, and reduces startup
|
|
|
|
# contention.
|
|
|
|
#
|
|
|
|
# For "start" or less-graceful circumstances, we don't need to
|
|
|
|
# iterate; we'll stop all of them at once, and start them all later.
|
|
|
|
# In those cases, using the glob form is faster -- but if we do need
|
|
|
|
# to iterate, we need to expand the glob.
|
|
|
|
if action == "start" or args.less_graceful:
|
|
|
|
workers.append("zulip-workers:*")
|
|
|
|
else:
|
2022-01-20 02:17:41 +01:00
|
|
|
workers.extend(list_supervisor_processes(["zulip-workers:*"]))
|
2021-04-20 23:11:52 +02:00
|
|
|
|
2021-06-11 22:58:09 +02:00
|
|
|
if has_application_server(once=True):
|
2021-07-09 03:26:26 +02:00
|
|
|
# These used to be included in "zulip-workers:*"; since we may
|
|
|
|
# be restarting an older version of Zulip, which has not
|
|
|
|
# applied puppet to reload the new list of processes, only
|
|
|
|
# stop them if they currently exist according to
|
|
|
|
# `supervisorctl`.
|
2021-06-11 22:58:09 +02:00
|
|
|
workers.extend(
|
2021-07-09 03:26:26 +02:00
|
|
|
list_supervisor_processes(
|
2022-01-20 02:17:41 +01:00
|
|
|
[
|
|
|
|
"zulip_deliver_scheduled_emails",
|
|
|
|
"zulip_deliver_scheduled_messages",
|
|
|
|
]
|
2021-07-09 03:26:26 +02:00
|
|
|
)
|
2021-06-11 22:58:09 +02:00
|
|
|
)
|
|
|
|
|
2024-03-04 21:00:20 +01:00
|
|
|
# This is an optional service, so may or may not exist
|
|
|
|
workers.extend(list_supervisor_processes(["zulip-katex"]))
|
|
|
|
|
2024-09-05 23:04:56 +02:00
|
|
|
# This does have some Python code, which reads from settings.py,
|
|
|
|
# so we need to restart it on every deploy. A short outage during
|
|
|
|
# the restart is fine, as clients will transparently retry.
|
|
|
|
workers.append("zulip-tus")
|
|
|
|
|
2021-05-14 03:08:38 +02:00
|
|
|
if has_process_fts_updates():
|
2021-04-20 23:11:52 +02:00
|
|
|
workers.append("process-fts-updates")
|
|
|
|
|
2022-01-18 23:44:40 +01:00
|
|
|
# Before we start (re)starting main services, make sure to start any
|
|
|
|
# optional auxiliary services that we don't stop, but do expect to be
|
2024-03-04 21:00:20 +01:00
|
|
|
# running, and aren't currently. These get new versions by getting
|
|
|
|
# updated supervisor files, and puppet restarts them -- so we never
|
|
|
|
# restart them in here, only start them.
|
2022-01-18 23:44:40 +01:00
|
|
|
aux_services = list_supervisor_processes(["go-camo", "smokescreen"], only_running=False)
|
|
|
|
if aux_services:
|
|
|
|
subprocess.check_call(["supervisorctl", "start", *aux_services])
|
|
|
|
|
2024-10-08 17:21:34 +02:00
|
|
|
if args.only_django:
|
|
|
|
workers = []
|
|
|
|
check_services = ["zulip-django"]
|
|
|
|
else:
|
|
|
|
check_services = [*workers, "zulip-django", "zulip-tornado:*"]
|
|
|
|
|
2022-03-26 01:30:08 +01:00
|
|
|
# If none of the workers nor the application servers are running, this
|
|
|
|
# is actually a "start," not a restart, which means we will defer
|
|
|
|
# workers to later.
|
2024-10-08 17:21:34 +02:00
|
|
|
running_services = list_supervisor_processes(check_services, only_running=True)
|
|
|
|
if action == "restart" and len(running_services) == 0:
|
2022-03-26 01:30:08 +01:00
|
|
|
action = "start"
|
|
|
|
verbing = "Starting"
|
start-server: Make start-server a clean explicit no-op if already running.
Currently, the output from `start-server` if the server is already
running is potentially confusing, since it says ERROR several times:
```
$ ./scripts/start-server
2023-04-04 15:35:12,737 start-server: Running syntax and database checks
System check identified no issues (15 silenced).
2023-04-04 15:35:16,211 start-server: Starting Tornado process on port 9800
zulip-tornado:zulip-tornado-port-9800: ERROR (already started)
2023-04-04 15:35:16,528 start-server: Starting Tornado process on port 9801
zulip-tornado:zulip-tornado-port-9801: ERROR (already started)
2023-04-04 15:35:16,844 start-server: Starting django server
zulip-django: ERROR (already started)
2023-04-04 15:35:17,605 start-server: Starting workers
zulip_deliver_scheduled_emails: ERROR (already started)
zulip_deliver_scheduled_messages: ERROR (already started)
process-fts-updates: ERROR (already started)
2023-04-04 15:35:18,923 start-server: Done!
```
Catch the simple common case where all of the services are already
running, and output a clearer success message:
```
$ ./scripts/start-server
2023-04-04 15:39:52,367 start-server: Running syntax and database checks
System check identified no issues (15 silenced).
2023-04-04 15:39:55,857 start-server: Zulip is already started; nothing to do!
```
2023-04-04 17:36:25 +02:00
|
|
|
elif action == "start":
|
2024-10-08 17:21:34 +02:00
|
|
|
existing_services = list_supervisor_processes(check_services)
|
start-server: Make start-server a clean explicit no-op if already running.
Currently, the output from `start-server` if the server is already
running is potentially confusing, since it says ERROR several times:
```
$ ./scripts/start-server
2023-04-04 15:35:12,737 start-server: Running syntax and database checks
System check identified no issues (15 silenced).
2023-04-04 15:35:16,211 start-server: Starting Tornado process on port 9800
zulip-tornado:zulip-tornado-port-9800: ERROR (already started)
2023-04-04 15:35:16,528 start-server: Starting Tornado process on port 9801
zulip-tornado:zulip-tornado-port-9801: ERROR (already started)
2023-04-04 15:35:16,844 start-server: Starting django server
zulip-django: ERROR (already started)
2023-04-04 15:35:17,605 start-server: Starting workers
zulip_deliver_scheduled_emails: ERROR (already started)
zulip_deliver_scheduled_messages: ERROR (already started)
process-fts-updates: ERROR (already started)
2023-04-04 15:35:18,923 start-server: Done!
```
Catch the simple common case where all of the services are already
running, and output a clearer success message:
```
$ ./scripts/start-server
2023-04-04 15:39:52,367 start-server: Running syntax and database checks
System check identified no issues (15 silenced).
2023-04-04 15:39:55,857 start-server: Zulip is already started; nothing to do!
```
2023-04-04 17:36:25 +02:00
|
|
|
if existing_services == running_services:
|
|
|
|
logging.info("Zulip is already started; nothing to do!")
|
|
|
|
sys.exit(0)
|
2022-03-26 01:30:08 +01:00
|
|
|
|
2022-03-09 21:24:21 +01:00
|
|
|
|
|
|
|
def restart_or_start(service: str) -> None:
|
|
|
|
our_verb = action
|
start-server: More gracefully handle only starting part of the server.
While the previous commit handles the common case of all of the server
being started already, it still produces ERROR output lines from
supervisorctl when most of the server is already running. Take the
case where one worker is stopped:
```
$ supervisorctl stop zulip-workers:zulip_events_deferred_work
zulip-workers:zulip_events_deferred_work: stopped
$ ./scripts/start-server
2023-04-04 15:50:28,505 start-server: Running syntax and database checks
System check identified no issues (15 silenced).
2023-04-04 15:50:31,977 start-server: Starting Tornado process on port 9800
zulip-tornado:zulip-tornado-port-9800: ERROR (already started)
2023-04-04 15:50:32,283 start-server: Starting Tornado process on port 9801
zulip-tornado:zulip-tornado-port-9801: ERROR (already started)
2023-04-04 15:50:32,592 start-server: Starting django server
zulip-django: ERROR (already started)
2023-04-04 15:50:33,340 start-server: Starting workers
zulip-workers:zulip_events_deferred_work: started
zulip_deliver_scheduled_emails: ERROR (already started)
zulip_deliver_scheduled_messages: ERROR (already started)
process-fts-updates: ERROR (already started)
2023-04-04 15:50:34,659 start-server: Done!
Zulip started successfully!
```
More gracefully handle these cases:
```
$ ./scripts/start-server
2023-04-04 15:52:39,815 start-server: Running syntax and database checks
System check identified no issues (15 silenced).
2023-04-04 15:52:43,270 start-server: Starting Tornado process on port 9800
2023-04-04 15:52:43,287 start-server: zulip-tornado:zulip-tornado-port-9800 already started!
2023-04-04 15:52:43,287 start-server: Starting Tornado process on port 9801
2023-04-04 15:52:43,300 start-server: zulip-tornado:zulip-tornado-port-9801 already started!
2023-04-04 15:52:43,300 start-server: Starting django server
2023-04-04 15:52:43,316 start-server: zulip-django already started!
2023-04-04 15:52:43,793 start-server: Starting workers
zulip-workers:zulip_events_deferred_work: started
2023-04-04 15:52:45,111 start-server: Done!
Zulip started successfully!
```
2023-04-04 17:49:37 +02:00
|
|
|
existing_services = list_supervisor_processes([service])
|
|
|
|
running_services = list_supervisor_processes([service], only_running=True)
|
|
|
|
if our_verb == "restart" and len(running_services) == 0:
|
2022-03-09 21:24:21 +01:00
|
|
|
our_verb = "start"
|
start-server: More gracefully handle only starting part of the server.
While the previous commit handles the common case of all of the server
being started already, it still produces ERROR output lines from
supervisorctl when most of the server is already running. Take the
case where one worker is stopped:
```
$ supervisorctl stop zulip-workers:zulip_events_deferred_work
zulip-workers:zulip_events_deferred_work: stopped
$ ./scripts/start-server
2023-04-04 15:50:28,505 start-server: Running syntax and database checks
System check identified no issues (15 silenced).
2023-04-04 15:50:31,977 start-server: Starting Tornado process on port 9800
zulip-tornado:zulip-tornado-port-9800: ERROR (already started)
2023-04-04 15:50:32,283 start-server: Starting Tornado process on port 9801
zulip-tornado:zulip-tornado-port-9801: ERROR (already started)
2023-04-04 15:50:32,592 start-server: Starting django server
zulip-django: ERROR (already started)
2023-04-04 15:50:33,340 start-server: Starting workers
zulip-workers:zulip_events_deferred_work: started
zulip_deliver_scheduled_emails: ERROR (already started)
zulip_deliver_scheduled_messages: ERROR (already started)
process-fts-updates: ERROR (already started)
2023-04-04 15:50:34,659 start-server: Done!
Zulip started successfully!
```
More gracefully handle these cases:
```
$ ./scripts/start-server
2023-04-04 15:52:39,815 start-server: Running syntax and database checks
System check identified no issues (15 silenced).
2023-04-04 15:52:43,270 start-server: Starting Tornado process on port 9800
2023-04-04 15:52:43,287 start-server: zulip-tornado:zulip-tornado-port-9800 already started!
2023-04-04 15:52:43,287 start-server: Starting Tornado process on port 9801
2023-04-04 15:52:43,300 start-server: zulip-tornado:zulip-tornado-port-9801 already started!
2023-04-04 15:52:43,300 start-server: Starting django server
2023-04-04 15:52:43,316 start-server: zulip-django already started!
2023-04-04 15:52:43,793 start-server: Starting workers
zulip-workers:zulip_events_deferred_work: started
2023-04-04 15:52:45,111 start-server: Done!
Zulip started successfully!
```
2023-04-04 17:49:37 +02:00
|
|
|
elif our_verb == "start" and existing_services == running_services:
|
|
|
|
logging.info("%s already started!", service)
|
|
|
|
return
|
2022-03-09 21:24:21 +01:00
|
|
|
subprocess.check_call(["supervisorctl", our_verb, service])
|
|
|
|
|
|
|
|
|
2021-04-27 20:48:19 +02:00
|
|
|
if action == "restart" and len(workers) > 0:
|
2021-04-20 23:11:52 +02:00
|
|
|
if args.less_graceful:
|
|
|
|
# The less graceful form stops every worker now; we start them
|
|
|
|
# back up at the end.
|
|
|
|
logging.info("Stopping workers")
|
|
|
|
subprocess.check_call(["supervisorctl", "stop", *workers])
|
|
|
|
else:
|
|
|
|
# We cannot pass all of these to one `supervisorctl restart`
|
|
|
|
# because that takes them all down at once, waits until they are
|
|
|
|
# all down, and then brings them back up; doing them sequentially
|
|
|
|
# requires multiple `supervisorctl restart` calls.
|
|
|
|
for worker in workers:
|
|
|
|
logging.info("Restarting %s", worker)
|
2022-03-09 21:24:21 +01:00
|
|
|
restart_or_start(worker)
|
2021-04-20 23:11:52 +02:00
|
|
|
|
2021-04-27 20:48:19 +02:00
|
|
|
if has_application_server():
|
2024-10-08 17:21:34 +02:00
|
|
|
if not args.only_django:
|
|
|
|
# Next, we restart the Tornado processes sequentially, in order to
|
|
|
|
# minimize downtime of the tornado service caused by too many
|
|
|
|
# Python processes restarting at the same time, resulting in each
|
|
|
|
# receiving insufficient priority. This is important, because
|
|
|
|
# Tornado being unavailable for too long is the main source of
|
|
|
|
# user-visible downtime when we restart a Zulip server. We do
|
|
|
|
# this before restarting Django, in case there are new event types
|
|
|
|
# which it will need to know how to deal with.
|
|
|
|
if len(tornado_ports) > 1:
|
|
|
|
for p in tornado_ports:
|
|
|
|
# Restart Tornado processes individually for a better rate of
|
|
|
|
# restarts. This also avoids behavior with restarting a whole
|
|
|
|
# supervisord group where if any individual process is slow to
|
|
|
|
# stop, the whole bundle stays stopped for an extended time.
|
|
|
|
logging.info("%s Tornado process on port %s", verbing, p)
|
|
|
|
restart_or_start(f"zulip-tornado:zulip-tornado-port-{p}")
|
|
|
|
else:
|
|
|
|
logging.info("%s Tornado process", verbing)
|
|
|
|
restart_or_start("zulip-tornado:*")
|
2021-04-27 20:48:19 +02:00
|
|
|
|
|
|
|
# Finally, restart the Django uWSGI processes.
|
puppet: Use lazy-apps and uwsgi control sockets for rolling reloads.
Restarting the uwsgi processes by way of supervisor opens a window
during which nginx 502's all responses. uwsgi has a configuration
called "chain reloading" which allows for rolling restart of the uwsgi
processes, such that only one process at once in unavailable; see
uwsgi documentation ([1]).
The tradeoff is that this requires that the uwsgi processes load the
libraries after forking, rather than before ("lazy apps"); in theory
this can lead to larger memory footprints, since they are not shared.
In practice, as Django defers much of the loading, this is not as much
of an issue. In a very basic test of memory consumption (measured by
total memory - free - caches - buffers; 6 uwsgi workers), both
immediately after restarting Django, and after requesting `/` 60 times
with 6 concurrent requests:
| Non-lazy | Lazy app | Difference
------------------+------------+------------+-------------
Fresh | 2,827,216 | 2,870,480 | +43,264
After 60 requests | 3,332,284 | 3,409,608 | +77,324
..................|............|............|.............
Difference | +505,068 | +539,128 | +34,060
That is, "lazy app" loading increased the footprint pre-requests by
43MB, and after 60 requests grew the memory footprint by 539MB, as
opposed to non-lazy loading, which grew it by 505MB. Using wsgi "lazy
app" loading does increase the memory footprint, but not by a large
percentage.
The other effect is that processes may be served by either old or new
code during the restart window. This may cause transient failures
when new frontend code talks to old backend code.
Enable chain-reloading during graceful, puppetless restarts, but only
if enabled via a zulip.conf configuration flag.
Fixes #2559.
[1]: https://uwsgi-docs.readthedocs.io/en/latest/articles/TheArtOfGracefulReloading.html#chain-reloading-lazy-apps
2022-01-01 05:20:49 +01:00
|
|
|
if (
|
|
|
|
action == "restart"
|
|
|
|
and not args.less_graceful
|
2024-03-05 17:16:31 +01:00
|
|
|
and get_config(config_file, "application_server", "rolling_restart", False)
|
puppet: Use lazy-apps and uwsgi control sockets for rolling reloads.
Restarting the uwsgi processes by way of supervisor opens a window
during which nginx 502's all responses. uwsgi has a configuration
called "chain reloading" which allows for rolling restart of the uwsgi
processes, such that only one process at once in unavailable; see
uwsgi documentation ([1]).
The tradeoff is that this requires that the uwsgi processes load the
libraries after forking, rather than before ("lazy apps"); in theory
this can lead to larger memory footprints, since they are not shared.
In practice, as Django defers much of the loading, this is not as much
of an issue. In a very basic test of memory consumption (measured by
total memory - free - caches - buffers; 6 uwsgi workers), both
immediately after restarting Django, and after requesting `/` 60 times
with 6 concurrent requests:
| Non-lazy | Lazy app | Difference
------------------+------------+------------+-------------
Fresh | 2,827,216 | 2,870,480 | +43,264
After 60 requests | 3,332,284 | 3,409,608 | +77,324
..................|............|............|.............
Difference | +505,068 | +539,128 | +34,060
That is, "lazy app" loading increased the footprint pre-requests by
43MB, and after 60 requests grew the memory footprint by 539MB, as
opposed to non-lazy loading, which grew it by 505MB. Using wsgi "lazy
app" loading does increase the memory footprint, but not by a large
percentage.
The other effect is that processes may be served by either old or new
code during the restart window. This may cause transient failures
when new frontend code talks to old backend code.
Enable chain-reloading during graceful, puppetless restarts, but only
if enabled via a zulip.conf configuration flag.
Fixes #2559.
[1]: https://uwsgi-docs.readthedocs.io/en/latest/articles/TheArtOfGracefulReloading.html#chain-reloading-lazy-apps
2022-01-01 05:20:49 +01:00
|
|
|
and os.path.exists("/home/zulip/deployments/uwsgi-control")
|
|
|
|
):
|
|
|
|
# See if it's currently running
|
|
|
|
uwsgi_status = subprocess.run(
|
|
|
|
["supervisorctl", "status", "zulip-django"],
|
|
|
|
stdout=subprocess.DEVNULL,
|
2023-08-18 00:50:03 +02:00
|
|
|
check=False,
|
puppet: Use lazy-apps and uwsgi control sockets for rolling reloads.
Restarting the uwsgi processes by way of supervisor opens a window
during which nginx 502's all responses. uwsgi has a configuration
called "chain reloading" which allows for rolling restart of the uwsgi
processes, such that only one process at once in unavailable; see
uwsgi documentation ([1]).
The tradeoff is that this requires that the uwsgi processes load the
libraries after forking, rather than before ("lazy apps"); in theory
this can lead to larger memory footprints, since they are not shared.
In practice, as Django defers much of the loading, this is not as much
of an issue. In a very basic test of memory consumption (measured by
total memory - free - caches - buffers; 6 uwsgi workers), both
immediately after restarting Django, and after requesting `/` 60 times
with 6 concurrent requests:
| Non-lazy | Lazy app | Difference
------------------+------------+------------+-------------
Fresh | 2,827,216 | 2,870,480 | +43,264
After 60 requests | 3,332,284 | 3,409,608 | +77,324
..................|............|............|.............
Difference | +505,068 | +539,128 | +34,060
That is, "lazy app" loading increased the footprint pre-requests by
43MB, and after 60 requests grew the memory footprint by 539MB, as
opposed to non-lazy loading, which grew it by 505MB. Using wsgi "lazy
app" loading does increase the memory footprint, but not by a large
percentage.
The other effect is that processes may be served by either old or new
code during the restart window. This may cause transient failures
when new frontend code talks to old backend code.
Enable chain-reloading during graceful, puppetless restarts, but only
if enabled via a zulip.conf configuration flag.
Fixes #2559.
[1]: https://uwsgi-docs.readthedocs.io/en/latest/articles/TheArtOfGracefulReloading.html#chain-reloading-lazy-apps
2022-01-01 05:20:49 +01:00
|
|
|
)
|
|
|
|
if uwsgi_status.returncode == 0:
|
|
|
|
logging.info("Starting rolling restart of django server")
|
2024-08-28 03:57:32 +02:00
|
|
|
with contextlib.suppress(FileNotFoundError):
|
|
|
|
os.unlink("/var/lib/zulip/django-workers.ready")
|
puppet: Use lazy-apps and uwsgi control sockets for rolling reloads.
Restarting the uwsgi processes by way of supervisor opens a window
during which nginx 502's all responses. uwsgi has a configuration
called "chain reloading" which allows for rolling restart of the uwsgi
processes, such that only one process at once in unavailable; see
uwsgi documentation ([1]).
The tradeoff is that this requires that the uwsgi processes load the
libraries after forking, rather than before ("lazy apps"); in theory
this can lead to larger memory footprints, since they are not shared.
In practice, as Django defers much of the loading, this is not as much
of an issue. In a very basic test of memory consumption (measured by
total memory - free - caches - buffers; 6 uwsgi workers), both
immediately after restarting Django, and after requesting `/` 60 times
with 6 concurrent requests:
| Non-lazy | Lazy app | Difference
------------------+------------+------------+-------------
Fresh | 2,827,216 | 2,870,480 | +43,264
After 60 requests | 3,332,284 | 3,409,608 | +77,324
..................|............|............|.............
Difference | +505,068 | +539,128 | +34,060
That is, "lazy app" loading increased the footprint pre-requests by
43MB, and after 60 requests grew the memory footprint by 539MB, as
opposed to non-lazy loading, which grew it by 505MB. Using wsgi "lazy
app" loading does increase the memory footprint, but not by a large
percentage.
The other effect is that processes may be served by either old or new
code during the restart window. This may cause transient failures
when new frontend code talks to old backend code.
Enable chain-reloading during graceful, puppetless restarts, but only
if enabled via a zulip.conf configuration flag.
Fixes #2559.
[1]: https://uwsgi-docs.readthedocs.io/en/latest/articles/TheArtOfGracefulReloading.html#chain-reloading-lazy-apps
2022-01-01 05:20:49 +01:00
|
|
|
with open("/home/zulip/deployments/uwsgi-control", "w") as control_socket:
|
|
|
|
# "c" is chain-reloading:
|
|
|
|
# https://uwsgi-docs.readthedocs.io/en/latest/MasterFIFO.html#available-commands
|
|
|
|
control_socket.write("c")
|
2024-08-28 03:57:32 +02:00
|
|
|
n = 0
|
|
|
|
while not os.path.exists("/var/lib/zulip/django-workers.ready"):
|
|
|
|
time.sleep(1)
|
|
|
|
n += 1
|
|
|
|
if n % 5 == 0:
|
|
|
|
logging.info("...")
|
|
|
|
logging.info("Chain reloading complete")
|
puppet: Use lazy-apps and uwsgi control sockets for rolling reloads.
Restarting the uwsgi processes by way of supervisor opens a window
during which nginx 502's all responses. uwsgi has a configuration
called "chain reloading" which allows for rolling restart of the uwsgi
processes, such that only one process at once in unavailable; see
uwsgi documentation ([1]).
The tradeoff is that this requires that the uwsgi processes load the
libraries after forking, rather than before ("lazy apps"); in theory
this can lead to larger memory footprints, since they are not shared.
In practice, as Django defers much of the loading, this is not as much
of an issue. In a very basic test of memory consumption (measured by
total memory - free - caches - buffers; 6 uwsgi workers), both
immediately after restarting Django, and after requesting `/` 60 times
with 6 concurrent requests:
| Non-lazy | Lazy app | Difference
------------------+------------+------------+-------------
Fresh | 2,827,216 | 2,870,480 | +43,264
After 60 requests | 3,332,284 | 3,409,608 | +77,324
..................|............|............|.............
Difference | +505,068 | +539,128 | +34,060
That is, "lazy app" loading increased the footprint pre-requests by
43MB, and after 60 requests grew the memory footprint by 539MB, as
opposed to non-lazy loading, which grew it by 505MB. Using wsgi "lazy
app" loading does increase the memory footprint, but not by a large
percentage.
The other effect is that processes may be served by either old or new
code during the restart window. This may cause transient failures
when new frontend code talks to old backend code.
Enable chain-reloading during graceful, puppetless restarts, but only
if enabled via a zulip.conf configuration flag.
Fixes #2559.
[1]: https://uwsgi-docs.readthedocs.io/en/latest/articles/TheArtOfGracefulReloading.html#chain-reloading-lazy-apps
2022-01-01 05:20:49 +01:00
|
|
|
else:
|
|
|
|
logging.info("Starting django server")
|
|
|
|
subprocess.check_call(["supervisorctl", "start", "zulip-django"])
|
|
|
|
else:
|
|
|
|
logging.info("%s django server", verbing)
|
2022-03-09 21:24:21 +01:00
|
|
|
restart_or_start("zulip-django")
|
2021-04-27 20:48:19 +02:00
|
|
|
|
|
|
|
using_sso = subprocess.check_output(["./scripts/get-django-setting", "USING_APACHE_SSO"])
|
|
|
|
if using_sso.strip() == b"True":
|
|
|
|
logging.info("Restarting Apache WSGI process...")
|
|
|
|
subprocess.check_call(["pkill", "-x", "apache2", "-u", "zulip"])
|
2016-08-05 01:58:57 +02:00
|
|
|
|
2021-04-20 23:11:52 +02:00
|
|
|
# If we were doing this non-gracefully, or starting as opposed to
|
|
|
|
# restarting, we need to turn the workers (back) on. There's no
|
|
|
|
# advantage to doing this not-all-at-once.
|
2024-10-08 17:21:34 +02:00
|
|
|
if (action == "start" or args.less_graceful) and not args.only_django:
|
start-server: More gracefully handle only starting part of the server.
While the previous commit handles the common case of all of the server
being started already, it still produces ERROR output lines from
supervisorctl when most of the server is already running. Take the
case where one worker is stopped:
```
$ supervisorctl stop zulip-workers:zulip_events_deferred_work
zulip-workers:zulip_events_deferred_work: stopped
$ ./scripts/start-server
2023-04-04 15:50:28,505 start-server: Running syntax and database checks
System check identified no issues (15 silenced).
2023-04-04 15:50:31,977 start-server: Starting Tornado process on port 9800
zulip-tornado:zulip-tornado-port-9800: ERROR (already started)
2023-04-04 15:50:32,283 start-server: Starting Tornado process on port 9801
zulip-tornado:zulip-tornado-port-9801: ERROR (already started)
2023-04-04 15:50:32,592 start-server: Starting django server
zulip-django: ERROR (already started)
2023-04-04 15:50:33,340 start-server: Starting workers
zulip-workers:zulip_events_deferred_work: started
zulip_deliver_scheduled_emails: ERROR (already started)
zulip_deliver_scheduled_messages: ERROR (already started)
process-fts-updates: ERROR (already started)
2023-04-04 15:50:34,659 start-server: Done!
Zulip started successfully!
```
More gracefully handle these cases:
```
$ ./scripts/start-server
2023-04-04 15:52:39,815 start-server: Running syntax and database checks
System check identified no issues (15 silenced).
2023-04-04 15:52:43,270 start-server: Starting Tornado process on port 9800
2023-04-04 15:52:43,287 start-server: zulip-tornado:zulip-tornado-port-9800 already started!
2023-04-04 15:52:43,287 start-server: Starting Tornado process on port 9801
2023-04-04 15:52:43,300 start-server: zulip-tornado:zulip-tornado-port-9801 already started!
2023-04-04 15:52:43,300 start-server: Starting django server
2023-04-04 15:52:43,316 start-server: zulip-django already started!
2023-04-04 15:52:43,793 start-server: Starting workers
zulip-workers:zulip_events_deferred_work: started
2023-04-04 15:52:45,111 start-server: Done!
Zulip started successfully!
```
2023-04-04 17:49:37 +02:00
|
|
|
workers = list_supervisor_processes(workers, only_running=False)
|
|
|
|
if workers:
|
|
|
|
logging.info("Starting workers")
|
|
|
|
subprocess.check_call(["supervisorctl", "start", *workers])
|
2013-01-31 16:49:09 +01:00
|
|
|
|
2024-10-08 17:21:34 +02:00
|
|
|
if has_application_server() and not args.skip_client_reloads and not args.only_django:
|
2024-02-08 22:04:07 +01:00
|
|
|
# All of the servers have been (re)started; now enqueue events in
|
|
|
|
# the Tornado servers to tell clients to reload.
|
|
|
|
subprocess.check_call(["./scripts/reload-clients"])
|
|
|
|
|
2013-03-13 19:26:51 +01:00
|
|
|
logging.info("Done!")
|
2021-04-16 21:02:32 +02:00
|
|
|
print(OKGREEN + f"Zulip {action}ed successfully!" + ENDC)
|
2019-09-20 02:23:23 +02:00
|
|
|
|
|
|
|
if change_symlink and "PWD" in os.environ:
|
|
|
|
for symlink in [last_symlink, current_symlink]:
|
|
|
|
if os.path.commonprefix([os.environ["PWD"], symlink]) == symlink:
|
|
|
|
print(
|
|
|
|
"""
|
2020-06-14 02:57:50 +02:00
|
|
|
{}Your shell entered its current directory through a symlink:
|
|
|
|
{}
|
2019-09-20 02:23:23 +02:00
|
|
|
which has now changed. Your shell will not see this change until you run:
|
2020-06-14 02:57:50 +02:00
|
|
|
cd {}
|
|
|
|
to traverse the symlink again.{}
|
2023-12-05 18:45:07 +01:00
|
|
|
""".format(WARNING, symlink, shlex.quote(os.environ["PWD"]), ENDC),
|
2019-09-20 02:23:23 +02:00
|
|
|
file=sys.stderr,
|
|
|
|
)
|