2016-04-07 15:03:22 +02:00
|
|
|
#!/usr/bin/env python
|
2016-03-10 17:15:34 +01:00
|
|
|
from __future__ import print_function
|
2013-01-31 16:49:09 +01:00
|
|
|
import os
|
|
|
|
import sys
|
2013-06-19 21:16:39 +02:00
|
|
|
import pwd
|
2013-01-31 16:49:09 +01:00
|
|
|
import subprocess
|
2013-03-13 19:26:51 +01:00
|
|
|
import logging
|
2013-04-18 22:58:32 +02:00
|
|
|
import time
|
2013-10-25 23:20:40 +02:00
|
|
|
|
2013-10-25 23:46:02 +02:00
|
|
|
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
2016-08-13 17:46:19 +02:00
|
|
|
from scripts.lib.zulip_tools import ENDC, OKGREEN, DEPLOYMENTS_DIR
|
2013-03-13 19:26:51 +01:00
|
|
|
|
|
|
|
logging.basicConfig(format="%(asctime)s restart-server: %(message)s",
|
|
|
|
level=logging.INFO)
|
2013-01-31 16:49:09 +01:00
|
|
|
|
2013-06-03 19:29:52 +02:00
|
|
|
deploy_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
|
|
|
|
os.chdir(deploy_path)
|
2013-01-31 16:49:09 +01:00
|
|
|
|
2016-07-23 20:33:58 +02:00
|
|
|
if pwd.getpwuid(os.getuid()).pw_name != "zulip":
|
2013-11-01 00:00:30 +01:00
|
|
|
logging.error("Must be run as user 'zulip'.")
|
|
|
|
sys.exit(1)
|
2013-06-19 17:25:42 +02:00
|
|
|
|
2013-04-18 22:58:32 +02:00
|
|
|
# Send a statsd event on restarting the server
|
2016-04-07 15:27:25 +02:00
|
|
|
subprocess.check_call(["python", "./manage.py", "send_stats", "incr", "events.server_restart", str(int(time.time()))])
|
2013-04-18 22:58:32 +02:00
|
|
|
|
2013-05-30 21:05:34 +02:00
|
|
|
logging.info("Filling memcached caches")
|
2016-04-07 15:27:25 +02:00
|
|
|
subprocess.check_call(["python", "./manage.py", "fill_memcached_caches"])
|
2013-05-30 21:05:34 +02:00
|
|
|
|
2013-08-22 02:23:23 +02:00
|
|
|
# Restart the FastCGI and related processes via supervisorctl.
|
[manual] restart-server: Minimize downtime for message sender worker.
The manual step here is that we need to do the `puppet apply` before
pushing this commit, or `restart-server` will crash.
Previously we shut down everything in one group, which performed
poorly with supervisor's bad performance on restarting many daemons at
once. Now we shut down the unimportant stuff, then the important
stuff, bring back the important stuff, and then bring back the
unimportant stuff.
This new model has a little over 5s of downtime for the core
user-facing daemons -- which is still far more than would be ideal,
but a lot less than the 13s or so that we had before.
Here's some logs with the current setup for the tornado/django downtime:
2013-12-19 20:16:51,995 restart-server: Stopping daemons
2013-12-19 20:16:53,461 restart-server: Starting daemons
2013-12-19 20:16:57,146 restart-server: Starting workers
Compare with the behavior on master today:
2013-12-19 20:21:45,281 restart-server: Stopping daemons
2013-12-19 20:21:49,225 restart-server: Starting daemons
2013-12-19 20:21:58,463 restart-server: Done!
(imported from commit b2c1ba77f3dc989551d0939779208465a8410435)
2013-12-19 21:07:02 +01:00
|
|
|
logging.info("Stopping workers")
|
|
|
|
subprocess.check_call(["supervisorctl", "stop", "zulip-workers:*"])
|
|
|
|
logging.info("Stopping server core")
|
|
|
|
subprocess.check_call(["supervisorctl", "stop", "zulip-senders:* zulip-django zulip-tornado"])
|
2016-08-05 01:58:57 +02:00
|
|
|
|
|
|
|
current_symlink = os.path.join(DEPLOYMENTS_DIR, "current")
|
|
|
|
last_symlink = os.path.join(DEPLOYMENTS_DIR, "last")
|
|
|
|
if os.readlink(current_symlink) != deploy_path:
|
|
|
|
subprocess.check_call(["ln", '-nsf', os.readlink(current_symlink), last_symlink])
|
|
|
|
subprocess.check_call(["ln", '-nsf', deploy_path, current_symlink])
|
[manual] restart-server: Minimize downtime for message sender worker.
The manual step here is that we need to do the `puppet apply` before
pushing this commit, or `restart-server` will crash.
Previously we shut down everything in one group, which performed
poorly with supervisor's bad performance on restarting many daemons at
once. Now we shut down the unimportant stuff, then the important
stuff, bring back the important stuff, and then bring back the
unimportant stuff.
This new model has a little over 5s of downtime for the core
user-facing daemons -- which is still far more than would be ideal,
but a lot less than the 13s or so that we had before.
Here's some logs with the current setup for the tornado/django downtime:
2013-12-19 20:16:51,995 restart-server: Stopping daemons
2013-12-19 20:16:53,461 restart-server: Starting daemons
2013-12-19 20:16:57,146 restart-server: Starting workers
Compare with the behavior on master today:
2013-12-19 20:21:45,281 restart-server: Stopping daemons
2013-12-19 20:21:49,225 restart-server: Starting daemons
2013-12-19 20:21:58,463 restart-server: Done!
(imported from commit b2c1ba77f3dc989551d0939779208465a8410435)
2013-12-19 21:07:02 +01:00
|
|
|
logging.info("Starting server core")
|
|
|
|
subprocess.check_call(["supervisorctl", "start", "zulip-tornado zulip-django zulip-senders:*"])
|
|
|
|
logging.info("Starting workers")
|
|
|
|
subprocess.check_call(["supervisorctl", "start", "zulip-workers:*"])
|
2013-01-31 16:49:09 +01:00
|
|
|
|
2016-05-08 04:02:32 +02:00
|
|
|
using_sso = subprocess.check_output(['./scripts/get-django-setting', 'USING_APACHE_SSO'])
|
2016-07-26 06:40:05 +02:00
|
|
|
if using_sso.strip() == b'True':
|
2013-11-15 00:40:23 +01:00
|
|
|
logging.info("Restarting Apache WSGI process...")
|
|
|
|
subprocess.check_call(["pkill", "-f", "apache2", "-u", "zulip"])
|
|
|
|
|
2013-03-13 19:26:51 +01:00
|
|
|
logging.info("Done!")
|
2016-03-10 17:15:34 +01:00
|
|
|
print(OKGREEN + "Application restarted successfully!" + ENDC)
|