shfmt: Reformat shell scripts with shfmt.

https://github.com/mvdan/sh

Signed-off-by: Anders Kaseorg <anders@zulip.com>
This commit is contained in:
Anders Kaseorg 2020-10-14 19:55:57 -07:00 committed by Tim Abbott
parent caa939d2d5
commit dfaea9df65
55 changed files with 609 additions and 473 deletions

View File

@ -15,12 +15,10 @@ fi
cd /home/zulip/deployments/current
BACKLOG="$(./manage.py print_email_delivery_backlog)"
if [ "$BACKLOG" -gt 0 ] && [ "$BACKLOG" -lt 10 ]
then
if [ "$BACKLOG" -gt 0 ] && [ "$BACKLOG" -lt 10 ]; then
echo "backlog of $BACKLOG"
exit 1
elif [ "$BACKLOG" -ge 10 ]
then
elif [ "$BACKLOG" -ge 10 ]; then
echo "backlog of $BACKLOG"
exit 2
else

View File

@ -8,13 +8,12 @@
SUPERVISOR_STATUS=$(supervisorctl status zulip-workers:zulip_deliver_enqueued_emails 2>&1)
STATUS=$(echo "$SUPERVISOR_STATUS" | awk '{ print $2 }')
case "$STATUS" in
RUNNING)
echo "Running"
exit 0
;;
STOPPED|STARTING|BACKOFF|STOPPING|EXITED|FATAL|UNKNOWN)
STOPPED | STARTING | BACKOFF | STOPPING | EXITED | FATAL | UNKNOWN)
# not "RUNNING", but a recognized supervisor status
echo "$STATUS"
exit 1

View File

@ -9,16 +9,16 @@ if [ -z "$processes" ]; then
echo "No workers running"
exit 0
fi
mapfile -t processes <<< "$processes"
ps -o vsize,size,pid,user,command --sort -vsize "${processes[@]}" > "$datafile"
mapfile -t processes <<<"$processes"
ps -o vsize,size,pid,user,command --sort -vsize "${processes[@]}" >"$datafile"
cat "$datafile"
top_worker=$(head -n2 "$datafile" | tail -n1)
top_worker_memory_usage=$(echo "$top_worker" | cut -f1 -d" ")
rm -f "$datafile"
if [ "$top_worker_memory_usage" -gt 800000 ]; then
exit 2
exit 2
elif [ "$top_worker_memory_usage" -gt 600000 ]; then
exit 1
exit 1
else
exit 0
exit 0
fi

View File

@ -20,8 +20,8 @@
# always be included.
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
echo yes
exit 0
fi
HOME=/tmp/
@ -30,33 +30,33 @@ HOME=/tmp/
# graphs should look.
if [ "$1" = "config" ]; then
CONN_WARN=${queue_warn:-500}
CONN_CRIT=${queue_crit:-1000}
CONN_WARN=${queue_warn:-500}
CONN_CRIT=${queue_crit:-1000}
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The title of the graph
echo 'graph_title RabbitMQ connections'
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel connections'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
# The title of the graph
echo 'graph_title RabbitMQ connections'
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel connections'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
echo "connections.label Connections"
echo "connections.warning $CONN_WARN"
echo "connections.critical $CONN_CRIT"
echo "connections.info Number of active connections"
echo "connections.label Connections"
echo "connections.warning $CONN_WARN"
echo "connections.critical $CONN_CRIT"
echo "connections.info Number of active connections"
echo 'graph_info Shows the number of connections to RabbitMQ'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
echo 'graph_info Shows the number of connections to RabbitMQ'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
fi
# If not run with any parameters at all (or only unknown ones), do the

View File

@ -20,54 +20,54 @@
# always be included.
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
echo yes
exit 0
fi
# If run with the "config"-parameter, give out information on how the
# graphs should look.
HOME=/tmp/
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name | \
grep -v '^Listing' | \
grep -v 'done\.$' | sed -e 's/[.=-]/_/g' )
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name \
| grep -v '^Listing' \
| grep -v 'done\.$' | sed -e 's/[.=-]/_/g')
if [ "$1" = "config" ]; then
QUEUE_WARN=${queue_warn:-100}
QUEUE_CRIT=${queue_crit:-500}
QUEUE_WARN=${queue_warn:-100}
QUEUE_CRIT=${queue_crit:-500}
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The title of the graph
echo "graph_title RabbitMQ consumers"
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel consumers'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
# The title of the graph
echo "graph_title RabbitMQ consumers"
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel consumers'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
for queue in $QUEUES; do
echo "$queue.label $queue"
echo "$queue.warning $QUEUE_WARN"
echo "$queue.critical $QUEUE_CRIT"
echo "$queue.info Active consumers for $queue"
done
for queue in $QUEUES; do
echo "$queue.label $queue"
echo "$queue.warning $QUEUE_WARN"
echo "$queue.critical $QUEUE_CRIT"
echo "$queue.info Active consumers for $queue"
done
echo 'graph_info Lists active consumers for a queue.'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
echo 'graph_info Lists active consumers for a queue.'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
fi
# If not run with any parameters at all (or only unknown ones), do the
# real work - i.e. display the data. Almost always this will be
# "value" subfield for every data field.
HOME=$HOME rabbitmqctl list_queues name consumers| \
grep -v "^Listing" | grep -v "done.$" | \
perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
HOME=$HOME rabbitmqctl list_queues name consumers \
| grep -v "^Listing" | grep -v "done.$" \
| perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'

View File

@ -20,54 +20,54 @@
# always be included.
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
echo yes
exit 0
fi
# If run with the "config"-parameter, give out information on how the
# graphs should look.
HOME=/tmp/
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name | \
grep -v '^Listing' | \
grep -v 'done\.$' | sed -e 's/[.=-]/_/g' )
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name \
| grep -v '^Listing' \
| grep -v 'done\.$' | sed -e 's/[.=-]/_/g')
if [ "$1" = "config" ]; then
QUEUE_WARN=${queue_warn:-10000}
QUEUE_CRIT=${queue_crit:-20000}
QUEUE_WARN=${queue_warn:-10000}
QUEUE_CRIT=${queue_crit:-20000}
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The title of the graph
echo "graph_title RabbitMQ list_queues"
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel queue_size'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
# The title of the graph
echo "graph_title RabbitMQ list_queues"
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel queue_size'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
for queue in $QUEUES; do
echo "$queue.label $queue"
echo "$queue.warning $QUEUE_WARN"
echo "$queue.critical $QUEUE_CRIT"
echo "$queue.info Queue size for $queue"
done
for queue in $QUEUES; do
echo "$queue.label $queue"
echo "$queue.warning $QUEUE_WARN"
echo "$queue.critical $QUEUE_CRIT"
echo "$queue.info Queue size for $queue"
done
echo 'graph_info Lists how many messages are in each queue.'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
echo 'graph_info Lists how many messages are in each queue.'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
fi
# If not run with any parameters at all (or only unknown ones), do the
# real work - i.e. display the data. Almost always this will be
# "value" subfield for every data field.
HOME=$HOME rabbitmqctl list_queues | \
grep -v "^Listing" | grep -v "done.$" | \
perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
HOME=$HOME rabbitmqctl list_queues \
| grep -v "^Listing" | grep -v "done.$" \
| perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'

View File

@ -20,54 +20,54 @@
# always be included.
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
echo yes
exit 0
fi
# If run with the "config"-parameter, give out information on how the
# graphs should look.
HOME=/tmp/
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name | \
grep -v '^Listing' | \
grep -v 'done\.$' | sed -e 's/[.=-]/_/g' )
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name \
| grep -v '^Listing' \
| grep -v 'done\.$' | sed -e 's/[.=-]/_/g')
if [ "$1" = "config" ]; then
QUEUE_WARN=${queue_warn:-10000}
QUEUE_CRIT=${queue_crit:-20000}
QUEUE_WARN=${queue_warn:-10000}
QUEUE_CRIT=${queue_crit:-20000}
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The title of the graph
echo "graph_title RabbitMQ Unacknowledged Messages"
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel unacknowledged'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
# The title of the graph
echo "graph_title RabbitMQ Unacknowledged Messages"
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel unacknowledged'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
for queue in $QUEUES; do
echo "$queue.label $queue"
echo "$queue.warning $QUEUE_WARN"
echo "$queue.critical $QUEUE_CRIT"
echo "$queue.info Unacknowledged messages for $queue"
done
for queue in $QUEUES; do
echo "$queue.label $queue"
echo "$queue.warning $QUEUE_WARN"
echo "$queue.critical $QUEUE_CRIT"
echo "$queue.info Unacknowledged messages for $queue"
done
echo 'graph_info Lists how many messages are in each queue.'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
echo 'graph_info Lists how many messages are in each queue.'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
fi
# If not run with any parameters at all (or only unknown ones), do the
# real work - i.e. display the data. Almost always this will be
# "value" subfield for every data field.
HOME=$HOME rabbitmqctl list_queues name messages_unacknowledged | \
grep -v "^Listing" | grep -v "done.$" | \
perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
HOME=$HOME rabbitmqctl list_queues name messages_unacknowledged \
| grep -v "^Listing" | grep -v "done.$" \
| perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'

View File

@ -20,54 +20,54 @@
# always be included.
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
echo yes
exit 0
fi
# If run with the "config"-parameter, give out information on how the
# graphs should look.
HOME=/tmp/
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name | \
grep -v '^Listing' | \
grep -v 'done\.$' | sed -e 's/[.=-]/_/g' )
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name \
| grep -v '^Listing' \
| grep -v 'done\.$' | sed -e 's/[.=-]/_/g')
if [ "$1" = "config" ]; then
QUEUE_WARN=${queue_warn:-10000}
QUEUE_CRIT=${queue_crit:-20000}
QUEUE_WARN=${queue_warn:-10000}
QUEUE_CRIT=${queue_crit:-20000}
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The title of the graph
echo "graph_title RabbitMQ Uncommitted Messages"
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel uncommitted'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
# The title of the graph
echo "graph_title RabbitMQ Uncommitted Messages"
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel uncommitted'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
for queue in $QUEUES; do
echo "$queue.label $queue"
echo "$queue.warning $QUEUE_WARN"
echo "$queue.critical $QUEUE_CRIT"
echo "$queue.info Uncommitted messages for $queue"
done
for queue in $QUEUES; do
echo "$queue.label $queue"
echo "$queue.warning $QUEUE_WARN"
echo "$queue.critical $QUEUE_CRIT"
echo "$queue.info Uncommitted messages for $queue"
done
echo 'graph_info Lists how many messages are in each queue.'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
echo 'graph_info Lists how many messages are in each queue.'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
fi
# If not run with any parameters at all (or only unknown ones), do the
# real work - i.e. display the data. Almost always this will be
# "value" subfield for every data field.
HOME=$HOME rabbitmqctl list_channels name messages_uncommitted | \
grep -v "^Listing" | grep -v "done.$" | \
perl -nle'($q, $s) = /^(.*)\s+(\d+)$/; $q =~ s/[.=-]/_/g; print("$q.value $s")'
HOME=$HOME rabbitmqctl list_channels name messages_uncommitted \
| grep -v "^Listing" | grep -v "done.$" \
| perl -nle'($q, $s) = /^(.*)\s+(\d+)$/; $q =~ s/[.=-]/_/g; print("$q.value $s")'

View File

@ -20,54 +20,54 @@
# always be included.
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
echo yes
exit 0
fi
# If run with the "config"-parameter, give out information on how the
# graphs should look.
HOME=/tmp/
QUEUES=$(rabbitmqctl list_queues name | \
grep -v '^Listing' | \
grep -v 'done\.$' | sed -e 's/[.=-]/_/g' )
QUEUES=$(rabbitmqctl list_queues name \
| grep -v '^Listing' \
| grep -v 'done\.$' | sed -e 's/[.=-]/_/g')
if [ "$1" = "config" ]; then
QUEUE_WARN=${queue_warn:-10000}
QUEUE_CRIT=${queue_crit:-20000}
QUEUE_WARN=${queue_warn:-10000}
QUEUE_CRIT=${queue_crit:-20000}
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The title of the graph
echo "graph_title RabbitMQ Memory used by queue"
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1024 --vertical-label Bytes -l 0'
# The Y-axis label
echo 'graph_vlabel memory'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
# The title of the graph
echo "graph_title RabbitMQ Memory used by queue"
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1024 --vertical-label Bytes -l 0'
# The Y-axis label
echo 'graph_vlabel memory'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category RabbitMQ'
for queue in $QUEUES; do
echo "$queue.label $queue"
echo "$queue.warning $QUEUE_WARN"
echo "$queue.critical $QUEUE_CRIT"
echo "$queue.info Memory used by $queue"
done
for queue in $QUEUES; do
echo "$queue.label $queue"
echo "$queue.warning $QUEUE_WARN"
echo "$queue.critical $QUEUE_CRIT"
echo "$queue.info Memory used by $queue"
done
echo 'graph_info Show memory usage by queue'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
echo 'graph_info Show memory usage by queue'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
fi
# If not run with any parameters at all (or only unknown ones), do the
# real work - i.e. display the data. Almost always this will be
# "value" subfield for every data field.
HOME=$HOME rabbitmqctl list_queues name memory | \
grep -v "^Listing" | grep -v "done.$" | \
perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
HOME=$HOME rabbitmqctl list_queues name memory \
| grep -v "^Listing" | grep -v "done.$" \
| perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'

View File

@ -18,8 +18,8 @@
# always be included.
if [ "$1" = "autoconf" ]; then
echo yes
exit 0
echo yes
exit 0
fi
HOME=/tmp/
@ -28,30 +28,30 @@ HOME=/tmp/
# graphs should look.
if [ "$1" = "config" ]; then
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The host name this plugin is for. (Can be overridden to have
# one machine answer for several)
# The title of the graph
echo 'graph_title Event queues'
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel Number'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category Tornado'
# The title of the graph
echo 'graph_title Event queues'
# Arguments to "rrdtool graph". In this case, tell it that the
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
echo 'graph_args --base 1000 -l 0'
# The Y-axis label
echo 'graph_vlabel Number'
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
# 420 milliload)
#echo 'graph_scale no'
echo 'graph_category Tornado'
echo "active_queues.label Total active event queues"
echo "active_queues.info Total number of active event queues"
echo "active_users.label Users with active event queues"
echo "active_users.info Number of users with active event queues"
echo "active_queues.label Total active event queues"
echo "active_queues.info Total number of active event queues"
echo "active_users.label Users with active event queues"
echo "active_users.info Number of users with active event queues"
echo 'graph_info Shows the number of active event queues'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
echo 'graph_info Shows the number of active event queues'
# Last, if run with the "config"-parameter, quit here (don't
# display any data)
exit 0
fi
# If not run with any parameters at all (or only unknown ones), do the

View File

@ -5,7 +5,7 @@ set -e
LOCALDISK=/dev/nvme0n1
if ! grep -q $LOCALDISK /etc/fstab; then
echo "$LOCALDISK /srv xfs nofail,noatime 1 1" >> /etc/fstab
echo "$LOCALDISK /srv xfs nofail,noatime 1 1" >>/etc/fstab
fi
if ! mountpoint -q /srv; then

View File

@ -5,9 +5,9 @@ set -e
# Only run from ifup.
if [ "$MODE" != start ]; then
exit 0
exit 0
fi
if [ "$IFACE" = eth0 ]; then
/usr/local/sbin/zulip-ec2-configure-interfaces
/usr/local/sbin/zulip-ec2-configure-interfaces
fi

View File

@ -6,7 +6,7 @@ zulip_conf_get_boolean() {
# Treat absent and invalid values as false.
value=$(crudini --get /etc/zulip/zulip.conf "$1" "$2" 2>/dev/null)
case "$(echo "$value" | tr '[:upper:]' '[:lower:]')" in
1|yes|true|on) return 0 ;;
1 | yes | true | on) return 0 ;;
*) return 1 ;;
esac
}
@ -18,5 +18,5 @@ fi
deploy_hook="${ZULIP_CERTBOT_DEPLOY_HOOK:-service nginx reload}"
certbot renew --quiet \
--webroot --webroot-path=/var/lib/zulip/certbot-webroot/ \
--deploy-hook "$deploy_hook"
--webroot --webroot-path=/var/lib/zulip/certbot-webroot/ \
--deploy-hook "$deploy_hook"

View File

@ -43,7 +43,7 @@ Options:
Skip the initial `apt-get dist-upgrade`.
EOF
};
}
# Shell option parsing. Over time, we'll want to move some of the
# environment variables below into this self-documenting system.
@ -51,22 +51,62 @@ args="$(getopt -o '' --long help,hostname:,email:,certbot,self-signed-cert,cacer
eval "set -- $args"
while true; do
case "$1" in
--help) usage; exit 0;;
--help)
usage
exit 0
;;
--hostname) EXTERNAL_HOST="$2"; shift; shift;;
--email) ZULIP_ADMINISTRATOR="$2"; shift; shift;;
--hostname)
EXTERNAL_HOST="$2"
shift
shift
;;
--email)
ZULIP_ADMINISTRATOR="$2"
shift
shift
;;
--certbot) USE_CERTBOT=1; shift;;
--cacert) export CUSTOM_CA_CERTIFICATES="$2"; shift; shift;;
--self-signed-cert) SELF_SIGNED_CERT=1; shift;;
--certbot)
USE_CERTBOT=1
shift
;;
--cacert)
export CUSTOM_CA_CERTIFICATES="$2"
shift
shift
;;
--self-signed-cert)
SELF_SIGNED_CERT=1
shift
;;
--postgres-version) POSTGRES_VERSION="$2"; shift; shift;;
--postgres-missing-dictionaries) POSTGRES_MISSING_DICTIONARIES=1; shift;;
--no-init-db) NO_INIT_DB=1; shift;;
--postgres-version)
POSTGRES_VERSION="$2"
shift
shift
;;
--postgres-missing-dictionaries)
POSTGRES_MISSING_DICTIONARIES=1
shift
;;
--no-init-db)
NO_INIT_DB=1
shift
;;
--no-overwrite-settings) NO_OVERWRITE_SETTINGS=1; shift;;
--no-dist-upgrade) NO_DIST_UPGRADE=1; shift;;
--) shift; break;;
--no-overwrite-settings)
NO_OVERWRITE_SETTINGS=1
shift
;;
--no-dist-upgrade)
NO_DIST_UPGRADE=1
shift
;;
--)
shift
break
;;
esac
done
@ -78,9 +118,9 @@ fi
## Options from environment variables.
#
# Specify options for apt.
read -r -a APT_OPTIONS <<< "${APT_OPTIONS:-}"
read -r -a APT_OPTIONS <<<"${APT_OPTIONS:-}"
# Install additional packages.
read -r -a ADDITIONAL_PACKAGES <<< "${ADDITIONAL_PACKAGES:-}"
read -r -a ADDITIONAL_PACKAGES <<<"${ADDITIONAL_PACKAGES:-}"
# Comma-separated list of puppet manifests to install. default is
# zulip::voyager for an all-in-one system or zulip::dockervoyager for
# Docker. Use e.g. zulip::app_frontend for a Zulip frontend server.
@ -111,8 +151,8 @@ if [ -z "$EXTERNAL_HOST" ] || [ -z "$ZULIP_ADMINISTRATOR" ]; then
fi
fi
if [ "$EXTERNAL_HOST" = zulip.example.com ] ||
[ "$ZULIP_ADMINISTRATOR" = zulip-admin@example.com ]; then
if [ "$EXTERNAL_HOST" = zulip.example.com ] \
|| [ "$ZULIP_ADMINISTRATOR" = zulip-admin@example.com ]; then
# These example values are specifically checked for and would fail
# later; see check_config in zerver/lib/management.py.
echo 'error: The example hostname and email must be replaced with real values.' >&2
@ -134,8 +174,16 @@ export LANGUAGE="en_US.UTF-8"
# Check for a supported OS release.
if [ -f /etc/os-release ]; then
os_info="$(. /etc/os-release; printf '%s\n' "$ID" "$ID_LIKE" "$VERSION_ID" "$VERSION_CODENAME")"
{ read -r os_id; read -r os_id_like; read -r os_version_id; read -r os_version_codename || true; } <<< "$os_info"
os_info="$(
. /etc/os-release
printf '%s\n' "$ID" "$ID_LIKE" "$VERSION_ID" "$VERSION_CODENAME"
)"
{
read -r os_id
read -r os_id_like
read -r os_version_id
read -r os_version_codename || true
} <<<"$os_info"
case " $os_id $os_id_like " in
*' debian '*)
package_system="apt"
@ -147,7 +195,7 @@ if [ -f /etc/os-release ]; then
fi
case "$os_id$os_version_id" in
debian10|ubuntu18.04|ubuntu20.04) ;;
debian10 | ubuntu18.04 | ubuntu20.04) ;;
*)
set +x
cat <<EOF
@ -163,10 +211,11 @@ For more information, see:
https://zulip.readthedocs.io/en/latest/production/requirements.html
EOF
exit 1
;;
esac
if [ "$os_id" = ubuntu ] && ! apt-cache policy |
grep -q "^ release v=$os_version_id,o=Ubuntu,a=$os_version_codename,n=$os_version_codename,l=Ubuntu,c=universe"; then
if [ "$os_id" = ubuntu ] && ! apt-cache policy \
| grep -q "^ release v=$os_version_id,o=Ubuntu,a=$os_version_codename,n=$os_version_codename,l=Ubuntu,c=universe"; then
set +x
cat <<'EOF'
@ -187,10 +236,10 @@ case ",$PUPPET_CLASSES," in
if [ "$package_system" = apt ]; then
# We're going to install Postgres from the postgres apt
# repository; this may conflict with the existing postgres.
OTHER_PG="$(dpkg --get-selections |
grep -E '^postgresql-[0-9]+\s+install$' |
grep -v "^postgresql-$POSTGRES_VERSION\b" |
cut -f 1)" || true
OTHER_PG="$(dpkg --get-selections \
| grep -E '^postgresql-[0-9]+\s+install$' \
| grep -v "^postgresql-$POSTGRES_VERSION\b" \
| cut -f 1)" || true
if [ -n "$OTHER_PG" ]; then
INDENTED="${OTHER_PG//$'\n'/$'\n' }"
SPACED="${OTHER_PG//$'\n'/ }"
@ -274,9 +323,9 @@ fi
if [ "$package_system" = apt ]; then
if ! apt-get install -y \
puppet git curl wget jq \
python3 crudini \
"${ADDITIONAL_PACKAGES[@]}"; then
puppet git curl wget jq \
python3 crudini \
"${ADDITIONAL_PACKAGES[@]}"; then
set +x
echo -e '\033[0;31m' >&2
echo "Installing packages failed; is network working and (on Ubuntu) the universe repository enabled?" >&2
@ -286,9 +335,9 @@ if [ "$package_system" = apt ]; then
fi
elif [ "$package_system" = yum ]; then
if ! yum install -y \
puppet git curl wget jq \
python3 crudini \
"${ADDITIONAL_PACKAGES[@]}"; then
puppet git curl wget jq \
python3 crudini \
"${ADDITIONAL_PACKAGES[@]}"; then
set +x
echo -e '\033[0;31m' >&2
echo "Installing packages failed; is network working?" >&2
@ -328,13 +377,13 @@ has_class() {
id -u zulip &>/dev/null || useradd -m zulip --home-dir /home/zulip
if [ -n "$NO_OVERWRITE_SETTINGS" ] && [ -e "/etc/zulip/zulip.conf" ]; then
"$ZULIP_PATH"/scripts/zulip-puppet-apply --noop \
--write-catalog-summary \
--classfile=/var/lib/puppet/classes.txt \
>/dev/null
--write-catalog-summary \
--classfile=/var/lib/puppet/classes.txt \
>/dev/null
else
# Write out more than we need, and remove sections that are not
# applicable to the classes that are actually necessary.
cat <<EOF > /etc/zulip/zulip.conf
cat <<EOF >/etc/zulip/zulip.conf
[machine]
puppet_classes = $PUPPET_CLASSES
deploy_type = production
@ -352,9 +401,9 @@ EOF
fi
"$ZULIP_PATH"/scripts/zulip-puppet-apply --noop \
--write-catalog-summary \
--classfile=/var/lib/puppet/classes.txt \
>/dev/null
--write-catalog-summary \
--classfile=/var/lib/puppet/classes.txt \
>/dev/null
# We only need the postgres version setting on database hosts; but
# we don't know if this is a database host until we have the catalog summary.

View File

@ -31,7 +31,10 @@ fi
if [ "$current_node_version" != "v$node_version" ] || ! [ -L "$node_wrapper_path" ]; then
export NVM_DIR=/usr/local/nvm
# shellcheck source=/dev/null
if ! [ -e "$NVM_DIR/nvm.sh" ] || { . "$NVM_DIR/nvm.sh"; [ "$(nvm --version)" != "$nvm_version" ]; }; then
if ! [ -e "$NVM_DIR/nvm.sh" ] || {
. "$NVM_DIR/nvm.sh"
[ "$(nvm --version)" != "$nvm_version" ]
}; then
mkdir -p "$NVM_DIR"
wget_opts=(-nv)
if [ -n "${CUSTOM_CA_CERTIFICATES:-}" ]; then

View File

@ -36,7 +36,7 @@ apt-get -y install "${pre_setup_deps[@]}"
SCRIPTS_PATH="$(dirname "$(dirname "$0")")"
release=$(lsb_release -sc)
if [[ "$release" =~ ^(bionic|cosmic|disco|eoan|focal)$ ]] ; then
if [[ "$release" =~ ^(bionic|cosmic|disco|eoan|focal)$ ]]; then
apt-key add "$SCRIPTS_PATH"/setup/pgdg.asc
apt-key add "$SCRIPTS_PATH"/setup/pgroonga-ppa.asc
cat >$SOURCES_FILE <<EOF
@ -46,7 +46,7 @@ deb-src http://apt.postgresql.org/pub/repos/apt/ $release-pgdg main
deb http://ppa.launchpad.net/groonga/ppa/ubuntu $release main
deb-src http://ppa.launchpad.net/groonga/ppa/ubuntu $release main
EOF
elif [[ "$release" =~ ^(buster)$ ]] ; then
elif [[ "$release" =~ ^(buster)$ ]]; then
apt-key add "$SCRIPTS_PATH"/setup/pgdg.asc
apt-key add "$SCRIPTS_PATH"/setup/pgroonga-debian.asc
cat >$SOURCES_FILE <<EOF
@ -71,4 +71,4 @@ else
apt-get update && rm -f "$STAMP_FILE"
fi
echo "$DEPENDENCIES_HASH" > "$DEPENDENCIES_HASH_FILE"
echo "$DEPENDENCIES_HASH" >"$DEPENDENCIES_HASH_FILE"

View File

@ -54,4 +54,4 @@ else
apt-get update && rm -f "$STAMP_FILE"
fi
echo "$DEPENDENCIES_HASH" > "$DEPENDENCIES_HASH_FILE"
echo "$DEPENDENCIES_HASH" >"$DEPENDENCIES_HASH_FILE"

View File

@ -7,8 +7,14 @@ args="$(getopt -o '' --long prod -- "$@")"
eval "set -- $args"
while true; do
case "$1" in
--prod) is_prod=true; shift;;
--) shift; break;;
--prod)
is_prod=true
shift
;;
--)
shift
break
;;
esac
done

View File

@ -10,11 +10,20 @@ args="$(getopt -o '' --long help,force,exists-ok -- "$@")"
eval "set -- $args"
while true; do
case "$1" in
--help) usage;;
--force) FORCE=1; shift;;
--exists-ok) EXISTS_OK=1; shift;;
--) shift; break;;
*) usage;;
--help) usage ;;
--force)
FORCE=1
shift
;;
--exists-ok)
EXISTS_OK=1
shift
;;
--)
shift
break
;;
*) usage ;;
esac
done
EXTERNAL_HOST="$1"
@ -51,9 +60,9 @@ fi
rm -f "$KEYFILE" "$CERTFILE"
if [[ "$EXTERNAL_HOST" =~ ^(([0-9]+\.){3}[0-9]+)(:[0-9]+)?$ ]]; then
subjectAltName="IP:${BASH_REMATCH[1]}" # IPv4 address
subjectAltName="IP:${BASH_REMATCH[1]}" # IPv4 address
elif [[ "$EXTERNAL_HOST" =~ ^\[([^][]*)\](:[0-9]+)?$ ]]; then
subjectAltName="IP:${BASH_REMATCH[1]}" # IPv6 address
subjectAltName="IP:${BASH_REMATCH[1]}" # IPv6 address
elif [[ "$EXTERNAL_HOST" =~ ^([^:]+)(:[0-9]+)?$ ]]; then
subjectAltName="DNS:${BASH_REMATCH[1]}"
else
@ -94,8 +103,8 @@ fi
# Based on /usr/sbin/make-ssl-cert from Debian's `ssl-cert` package.
openssl req -new -x509 \
-config "$config" -days 3650 -nodes -sha256 \
-out "$CERTFILE" -keyout "$KEYFILE"
-config "$config" -days 3650 -nodes -sha256 \
-out "$CERTFILE" -keyout "$KEYFILE"
chmod 644 "$CERTFILE"
chmod 640 "$KEYFILE"

View File

@ -10,10 +10,16 @@ args="$(getopt -o '' --long help,quiet -- "$@")"
eval "set -- $args"
while true; do
case "$1" in
--help) usage;;
--quiet) QUIET=1; shift;;
--) shift; break;;
*) usage;;
--help) usage ;;
--quiet)
QUIET=1
shift
;;
--)
shift
break
;;
*) usage ;;
esac
done

View File

@ -13,15 +13,14 @@ POSTGRES_USER="${POSTGRES_USER:-postgres}"
# This psql command may fail because the zulip database doesnt exist,
# hence the &&.
if records="$(
cd / # Make sure the current working directory is readable by postgres
cd / # Make sure the current working directory is readable by postgres
su "$POSTGRES_USER" -c "psql -v ON_ERROR_STOP=1 -Atc 'SELECT COUNT(*) FROM zulip.zerver_message;' zulip"
)" && [ "$records" -gt 200 ]; then
set +x
echo "WARNING: This will delete your Zulip database which currently contains $records messages."
read -p "Do you want to proceed? [y/N] " -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
exit 1
fi
set -x
@ -35,12 +34,12 @@ fi
# Drop any open connections to any old database.
# Send the script via stdin in case the postgres user lacks permission to read it.
su -s /usr/bin/env - -- "$POSTGRES_USER" \
bash -s - zulip zulip_base < "$(dirname "$0")/terminate-psql-sessions"
bash -s - zulip zulip_base <"$(dirname "$0")/terminate-psql-sessions"
(
cd / # Make sure the current working directory is readable by postgres
cd / # Make sure the current working directory is readable by postgres
su "$POSTGRES_USER" -c 'psql -v ON_ERROR_STOP=1 -e'
) < "$(dirname "$0")/create-db.sql"
) <"$(dirname "$0")/create-db.sql"
# Clear memcached to avoid contamination from previous database state
"$(dirname "$0")/flush-memcached"

View File

@ -83,8 +83,14 @@ esac
# Check for a supported OS release.
if [ -f /etc/os-release ]; then
os_info="$(. /etc/os-release; printf '%s\n' "$ID" "$ID_LIKE")"
{ read -r os_id; read -r os_id_like|| true; } <<< "$os_info"
os_info="$(
. /etc/os-release
printf '%s\n' "$ID" "$ID_LIKE"
)"
{
read -r os_id
read -r os_id_like || true
} <<<"$os_info"
fi
set -x
@ -104,10 +110,10 @@ esac
# Passing --force-interactive suppresses a warning, but also brings up
# an annoying prompt we stifle with --no-eff-email.
certbot certonly "${method_args[@]}" \
"${HOSTNAMES[@]}" -m "$EMAIL" \
$agree_tos \
"${deploy_hook[@]}" \
--force-interactive --no-eff-email
"${HOSTNAMES[@]}" -m "$EMAIL" \
$agree_tos \
"${deploy_hook[@]}" \
--force-interactive --no-eff-email
symlink_with_backup() {
if [ -e "$2" ]; then

View File

@ -28,7 +28,7 @@ LOCALFILE="archive.tar.gz"
wget -qO "$LOCALFILE" "$URL"
# Check the hash against what was passed in
echo "$SHA256 $LOCALFILE" > "$LOCALFILE.sha256"
echo "$SHA256 $LOCALFILE" >"$LOCALFILE.sha256"
sha256sum -c "$LOCALFILE.sha256"
tar xzf "$LOCALFILE"

View File

@ -11,8 +11,11 @@ eval "set -- $args"
while true; do
case "$1" in
--) shift; break;;
*) usage;;
--)
shift
break
;;
*) usage ;;
esac
done
@ -47,8 +50,8 @@ git archive -o "$TARBALL" "--prefix=$prefix/" HEAD
cd "$TMPDIR"
tar -xf "$TARBALL"
while read -r i; do
rm -r --interactive=never "${TMPDIR:?}/$prefix/$i";
done < "$TMPDIR/$prefix/tools/release-tarball-exclude.txt"
rm -r --interactive=never "${TMPDIR:?}/$prefix/$i"
done <"$TMPDIR/$prefix/tools/release-tarball-exclude.txt"
tar -cf "$TARBALL" "$prefix"
rm -rf "$prefix"
@ -78,10 +81,10 @@ mkdir -p "var/log"
# TODO: Would be much better to instead run the below tools with some
# sort of environment hack so that we don't need to create this dummy
# secrets file.
cat >> zproject/prod_settings_template.py <<EOF
cat >>zproject/prod_settings_template.py <<EOF
DEBUG = False
EOF
cat >> zproject/dev-secrets.conf <<EOF
cat >>zproject/dev-secrets.conf <<EOF
[secrets]
local_database_password = ''
secret_key = 'not_used_here'
@ -96,8 +99,8 @@ EOF
# We don't need duplicate copies of emoji with hashed paths, and they would break Markdown
find prod-static/serve/generated/emoji/images/emoji/ -regex '.*\.[0-9a-f]+\.png' -delete
echo "$GITID" > build_id
echo "$version" > version
echo "$GITID" >build_id
echo "$version" >version
cd "$TMPDIR"

View File

@ -2,4 +2,4 @@
set -e
cd "$(dirname "$0")/.."
git describe --tags --match='[0-9]*' > zulip-git-version || true
git describe --tags --match='[0-9]*' >zulip-git-version || true

View File

@ -6,7 +6,7 @@ echo "Test suite is running under $(python --version)."
set -e
set -x
./tools/lint --groups=backend --skip=gitlint,mypy # gitlint disabled because flaky
./tools/lint --groups=backend --skip=gitlint,mypy # gitlint disabled because flaky
./tools/test-tools
# We need to pass a parallel level to test-backend because CircleCI's
# docker setup means the auto-detection logic sees the ~36 processes

View File

@ -5,7 +5,7 @@ source tools/ci/activate-venv
set -e
set -x
./tools/lint --groups=frontend --skip=gitlint # gitlint disabled because flaky
./tools/lint --groups=frontend --skip=gitlint # gitlint disabled because flaky
# Run the node tests first, since they're fast and deterministic
./tools/test-js-with-node --coverage

View File

@ -33,11 +33,11 @@ fi
mkdir /tmp/production-build
mv /tmp/tmp.*/zulip-server-test.tar.gz /tmp/production-build
cp -a \
tools/ci/success-http-headers.template.txt \
tools/ci/production-install \
tools/ci/production-verify \
tools/ci/production-upgrade-pg \
tools/ci/production-extract-tarball \
package.json yarn.lock \
\
/tmp/production-build
tools/ci/success-http-headers.template.txt \
tools/ci/production-install \
tools/ci/production-verify \
tools/ci/production-upgrade-pg \
tools/ci/production-extract-tarball \
package.json yarn.lock \
\
/tmp/production-build

View File

@ -14,8 +14,11 @@ APT_OPTIONS=(-o 'Dpkg::Options::=--force-confdef' -o 'Dpkg::Options::=--force-co
apt-get update
if [ -f /etc/os-release ]; then
os_info="$(. /etc/os-release; printf '%s\n' "$VERSION_CODENAME")"
{ read -r os_version_codename || true; } <<< "$os_info"
os_info="$(
. /etc/os-release
printf '%s\n' "$VERSION_CODENAME"
)"
{ read -r os_version_codename || true; } <<<"$os_info"
fi
if ! apt-get dist-upgrade -y "${APT_OPTIONS[@]}"; then

View File

@ -12,7 +12,9 @@ NOREPLY_EMAIL_ADDRESS = 'noreply@circleci.example.com'
ALLOWED_HOSTS = []
EOF
echo; echo "Now testing that the supervisord jobs are running properly"; echo
echo
echo "Now testing that the supervisord jobs are running properly"
echo
sleep 15 # Guaranteed to have a working supervisord process get an extra digit
if supervisorctl status | grep -vq RUNNING || supervisorctl status | sed 's/^.*uptime //' | grep -q 0:00:0; then
set +x
@ -33,16 +35,18 @@ if supervisorctl status | grep -vq RUNNING || supervisorctl status | sed 's/^.*u
fi
# TODO: Ideally this would test actually logging in, but this is a start.
echo; echo "Now testing that the newly installed server's homepage loads"; echo
echo
echo "Now testing that the newly installed server's homepage loads"
echo
wget https://localhost -O /tmp/index.html --no-check-certificate -S 2> /tmp/wget-output || true # || true so we see errors.log if this 500s
grep -vi '\(Vary\|Content-Language\|expires\|issued by\|modified\|saved\|[.][.][.]\|Date\|[-][-]\)' /tmp/wget-output > /tmp/http-headers-processed
wget https://localhost -O /tmp/index.html --no-check-certificate -S 2>/tmp/wget-output || true # || true so we see errors.log if this 500s
grep -vi '\(Vary\|Content-Language\|expires\|issued by\|modified\|saved\|[.][.][.]\|Date\|[-][-]\)' /tmp/wget-output >/tmp/http-headers-processed
nginx_version="$(nginx -v 2>&1 | awk '{print $3, $4}')"
# Simplify the diff by getting replacing 4-5 digit length numbers with <Length>.
sed -i 's|Length: [0-9]\+\( [(][0-9]\+[.][0-9]K[)]\)\?|Length: <Length>|' /tmp/http-headers-processed
sed -i -e 's|Length: [0-9]\+\( [(][0-9]\+[.][0-9]K[)]\)\?|Length: <Length>|' -e "s|{nginx_version_string}|$nginx_version|g" /tmp/success-http-headers.template.txt
sed -i -e 's|Length: [0-9]\+\( [(][0-9]\+[.][0-9]K[)]\)\?|Length: <Length>|' -e "s|{nginx_version_string}|$nginx_version|g" /tmp/success-http-headers.template.txt
if ! diff -ur /tmp/http-headers-processed /tmp/success-http-headers.template.txt; then
set +x
echo
@ -58,12 +62,14 @@ if ! diff -ur /tmp/http-headers-processed /tmp/success-http-headers.template.txt
fi
# Start the RabbitMQ queue worker related section
echo; echo "Now confirming all the RabbitMQ queue processors are correctly registered!"; echo
echo
echo "Now confirming all the RabbitMQ queue processors are correctly registered!"
echo
# These hacky shell scripts just extract the sorted list of queue processors, running and expected
supervisorctl status | cut -f1 -dR | cut -f2- -d: | grep events | cut -f1 -d" " | cut -f3- -d_ | cut -f1 -d- | sort -u > /tmp/running_queue_processors.txt
su zulip -c /home/zulip/deployments/current/scripts/lib/queue_workers.py | sort -u > /tmp/all_queue_processors.txt
su zulip -c "/home/zulip/deployments/current/scripts/lib/queue_workers.py --queue-type test" | sort -u > /tmp/expected_test_queues.txt
grep -v -x -f /tmp/expected_test_queues.txt /tmp/all_queue_processors.txt > /tmp/expected_queue_processors.txt
supervisorctl status | cut -f1 -dR | cut -f2- -d: | grep events | cut -f1 -d" " | cut -f3- -d_ | cut -f1 -d- | sort -u >/tmp/running_queue_processors.txt
su zulip -c /home/zulip/deployments/current/scripts/lib/queue_workers.py | sort -u >/tmp/all_queue_processors.txt
su zulip -c "/home/zulip/deployments/current/scripts/lib/queue_workers.py --queue-type test" | sort -u >/tmp/expected_test_queues.txt
grep -v -x -f /tmp/expected_test_queues.txt /tmp/all_queue_processors.txt >/tmp/expected_queue_processors.txt
if ! diff /tmp/expected_queue_processors.txt /tmp/running_queue_processors.txt >/dev/null; then
set +x
echo "FAILURE: Runnable queue processors declared in zerver/worker/queue_processors.py "
@ -74,7 +80,9 @@ if ! diff /tmp/expected_queue_processors.txt /tmp/running_queue_processors.txt >
exit 1
fi
echo; echo "Now running RabbitMQ consumer Nagios tests"; echo
echo
echo "Now running RabbitMQ consumer Nagios tests"
echo
# First run the check that usually runs in cron and populates the state files
/home/zulip/deployments/current/scripts/nagios/check-rabbitmq-consumers
@ -95,10 +103,12 @@ done
# Some of the Nagios tests have been temporarily disabled to work
# around a Travis CI infrastructure issue.
echo; echo "Now running additional Nagios tests"; echo
if ! /usr/lib/nagios/plugins/zulip_app_frontend/check_queue_worker_errors || \
! su zulip -c /usr/lib/nagios/plugins/zulip_postgres_appdb/check_fts_update_log; then # || \
# ! su zulip -c "/usr/lib/nagios/plugins/zulip_app_frontend/check_send_receive_time --site=https://127.0.0.1/api --nagios --insecure"; then
echo
echo "Now running additional Nagios tests"
echo
if ! /usr/lib/nagios/plugins/zulip_app_frontend/check_queue_worker_errors \
|| ! su zulip -c /usr/lib/nagios/plugins/zulip_postgres_appdb/check_fts_update_log; then # || \
# ! su zulip -c "/usr/lib/nagios/plugins/zulip_app_frontend/check_send_receive_time --site=https://127.0.0.1/api --nagios --insecure"; then
set +x
echo
echo "FAILURE: Nagios checks don't pass:"

View File

@ -17,11 +17,11 @@ if [ $# -ne 0 ] && [ "$1" == "--reviews" ]; then
fi
push_args=()
function is_merged {
function is_merged() {
! git rev-list -n 1 origin/master.."$1" | grep -q .
}
function clean_ref {
function clean_ref() {
ref="$1"
case "$ref" in
*/master | */HEAD)

View File

@ -8,12 +8,12 @@
# Do not invoke gitlint if commit message is empty
if grep -q '^[^#]' "$1"; then
lint_cmd="cd ~/zulip && python -m gitlint.cli"
if \
if [ -z "$VIRTUAL_ENV" ] && command -v vagrant > /dev/null && [ -e .vagrant ]; then
if
if [ -z "$VIRTUAL_ENV" ] && command -v vagrant >/dev/null && [ -e .vagrant ]; then
! vagrant ssh -c "$lint_cmd"
else
! eval "$lint_cmd"
fi < "$1"
fi <"$1"
then
echo "WARNING: Your commit message does not match Zulip's style guide."
fi

View File

@ -1,6 +1,6 @@
#!/usr/bin/env bash
function error_out {
function error_out() {
echo -en '\e[0;31m'
echo "$1"
echo -en '\e[0m'

View File

@ -18,7 +18,7 @@ if [ ${#changed_files} -eq 0 ]; then
exit 0
fi
if [ -z "$VIRTUAL_ENV" ] && command -v vagrant > /dev/null && [ -e .vagrant ]; then
if [ -z "$VIRTUAL_ENV" ] && command -v vagrant >/dev/null && [ -e .vagrant ]; then
vcmd="/srv/zulip/tools/lint --skip=gitlint --force $(printf '%q ' "${changed_files[@]}") || true"
echo "Running lint using vagrant..."
vagrant ssh -c "$vcmd"

View File

@ -36,12 +36,15 @@ WARNING='\033[93m'
ENDC='\033[0m'
# Make the script independent of the location from where it is executed
PARENT_PATH=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
PARENT_PATH=$(
cd "$(dirname "${BASH_SOURCE[0]}")"
pwd -P
)
cd "$PARENT_PATH"
mkdir -p ../var/log
LOG_PATH="../var/log/provision.log"
echo "PROVISIONING STARTING." >> $LOG_PATH
echo "PROVISIONING STARTING." >>$LOG_PATH
# PYTHONUNBUFFERED is important to ensure that tracebacks don't get
# lost far above where they should be in the output.

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -e
usage () {
usage() {
cat >&2 <<EOF
usage: $0 PULL_REQUEST_ID [REMOTE]
@ -69,7 +69,7 @@ fi
pr_url=https://api.github.com/repos/"${repo_fq}"/pulls/"${pr_id}"
pr_details="$(curl -s "$pr_url")"
pr_jq () {
pr_jq() {
echo "$pr_details" | jq "$@"
}

View File

@ -4,17 +4,15 @@ set -x
export DJANGO_SETTINGS_MODULE=zproject.test_settings
create_zulip_test()
{
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test <<EOF
create_zulip_test() {
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test <<EOF
DROP DATABASE IF EXISTS zulip_test;
CREATE DATABASE zulip_test TEMPLATE zulip_test_base;
EOF
}
create_zulip_test_template()
{
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test << EOF
create_zulip_test_template() {
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test <<EOF
DROP DATABASE IF EXISTS zulip_test_template;
CREATE DATABASE zulip_test_template TEMPLATE zulip_test;
EOF
@ -41,7 +39,7 @@ create_zulip_test
zerver.UserProfile zerver.Stream zerver.Recipient \
zerver.Subscription zerver.Message zerver.Huddle zerver.Realm \
zerver.UserMessage zerver.Client \
zerver.DefaultStream > zerver/tests/fixtures/messages.json
zerver.DefaultStream >zerver/tests/fixtures/messages.json
# create pristine template database, for fast fixture restoration after tests are run.
create_zulip_test_template

View File

@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -e
usage () {
usage() {
cat >&2 <<EOF
usage: $0 PULL_REQUEST_ID [REMOTE]

View File

@ -6,7 +6,6 @@ if ! [ -d ".git/hooks/" ]; then
exit 1
fi
for hook in pre-commit commit-msg
do
for hook in pre-commit commit-msg; do
ln -snf ../../tools/"$hook" .git/hooks/
done

View File

@ -17,7 +17,7 @@ set -e
set -x
# Set the hostname early
echo "$HOSTNAME" > /etc/hostname
echo "$HOSTNAME" >/etc/hostname
hostname "$HOSTNAME"
sed -i "s/localhost$/localhost $HOSTNAME $SERVER/" /etc/hosts
@ -57,15 +57,15 @@ EOF
# function so we do can it again later with the zulip user
function install_keys() {
USERNAME="$1"
SSHDIR="$( getent passwd "$USERNAME" | cut -d: -f6 )/.ssh"
SSHDIR="$(getent passwd "$USERNAME" | cut -d: -f6)/.ssh"
KEYDATA="$($AWS --output text \
secretsmanager get-secret-value \
--secret-id "$SSH_SECRET_ID" \
--query SecretString)"
secretsmanager get-secret-value \
--secret-id "$SSH_SECRET_ID" \
--query SecretString)"
mkdir -p "$SSHDIR"
echo "$KEYDATA" | jq -r .public | base64 -d > "$SSHDIR/id_rsa.pub"
echo "$KEYDATA" | jq -r .private | base64 -d > "$SSHDIR/id_rsa"
chown -R "$USERNAME:$USERNAME" "$SSHDIR"
echo "$KEYDATA" | jq -r .public | base64 -d >"$SSHDIR/id_rsa.pub"
echo "$KEYDATA" | jq -r .private | base64 -d >"$SSHDIR/id_rsa"
chown -R "$USERNAME:$USERNAME" "$SSHDIR"
chmod 600 "$SSHDIR/id_rsa"
}
install_keys root

View File

@ -8,7 +8,7 @@ if [ ! -d "/srv/zulip-aws-tools/v2/$AWS_CLI_VERSION" ]; then
cd /srv/zulip-aws-tools || exit 1
rm -rf awscli.zip awscli.zip.sha256 aws/
wget -q "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-$AWS_CLI_VERSION.zip" -O awscli.zip
echo "$AWS_CLI_SHA awscli.zip" > awscli.zip.sha256
echo "$AWS_CLI_SHA awscli.zip" >awscli.zip.sha256
sha256sum -c awscli.zip.sha256
unzip -q awscli.zip
(

View File

@ -1,11 +1,10 @@
#!/usr/bin/env bash
set -e
run()
{
run() {
PGHOST=localhost PGUSER=zulip \
"$(dirname "$0")/../../scripts/setup/terminate-psql-sessions" zulip_test zulip_test_base zulip_test_template
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test << EOF
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test <<EOF
DROP DATABASE IF EXISTS zulip_test;
CREATE DATABASE zulip_test TEMPLATE zulip_test_template;
EOF

View File

@ -8,7 +8,7 @@ email=desdemona@zulip.com
mkdir -p var/puppeteer
password=$(./manage.py print_initial_password "$email" | grep -F "$email" | awk '{ print $2 }')
cat > var/puppeteer/test_credentials.js <<EOF
cat >var/puppeteer/test_credentials.js <<EOF
// Generated by tools/setup/generate-test-credentials
var test_credentials = {default_user: {username: '$email', password: '$password'}};
try { exports.test_credentials = test_credentials; } catch (e) {}

View File

@ -43,19 +43,19 @@ if [ -z "$BRANCH" ]; then
BRANCH=$(crudini --get "$zulip_install_config_file" repo default_branch)
fi
AWS_ZONE_ID=$( crudini --get "$zulip_install_config_file" aws zone_id)
AWS_ZONE_ID=$(crudini --get "$zulip_install_config_file" aws zone_id)
SECURITY_GROUPS=$(crudini --get "$zulip_install_config_file" aws security_groups)
AMI_ID=$( crudini --get "$zulip_install_config_file" aws image_id)
INSTANCE_TYPE=$( crudini --get "$zulip_install_config_file" aws instance_type)
SSH_SECRET_ID=$( crudini --get "$zulip_install_config_file" aws ssh_secret_id)
AMI_ID=$(crudini --get "$zulip_install_config_file" aws image_id)
INSTANCE_TYPE=$(crudini --get "$zulip_install_config_file" aws instance_type)
SSH_SECRET_ID=$(crudini --get "$zulip_install_config_file" aws ssh_secret_id)
# Verify it doesn't exist already
ZONE_NAME=$($AWS route53 get-hosted-zone --id "$AWS_ZONE_ID" | jq -r '.HostedZone.Name' )
HOSTNAME="$SERVER.${ZONE_NAME%?}" # Remove trailing .
ZONE_NAME=$($AWS route53 get-hosted-zone --id "$AWS_ZONE_ID" | jq -r '.HostedZone.Name')
HOSTNAME="$SERVER.${ZONE_NAME%?}" # Remove trailing .
EXISTING_RECORDS=$($AWS route53 list-resource-record-sets \
--hosted-zone-id "$AWS_ZONE_ID" \
--query "ResourceRecordSets[?Name == '$HOSTNAME.']" \
| jq '. | length')
--hosted-zone-id "$AWS_ZONE_ID" \
--query "ResourceRecordSets[?Name == '$HOSTNAME.']" \
| jq '. | length')
if [ "$EXISTING_RECORDS" != "0" ]; then
echo "$HOSTNAME already exists!"
exit 1
@ -72,16 +72,16 @@ BOOTDATA=$(mktemp)
echo "BRANCH=$BRANCH"
echo "SSH_SECRET_ID=$SSH_SECRET_ID"
sed '/^AWS=/ r ./bootstrap-awscli' bootstrap-aws-installer
} >> "$BOOTDATA"
} >>"$BOOTDATA"
TAGS="[{Key=Name,Value=$SERVER},{Key=role,Value=\"$ROLES\"}]"
INSTANCE_DATA=$($AWS ec2 run-instances \
--iam-instance-profile 'Name="EC2ProdInstance"' \
--image-id "$AMI_ID" \
--instance-type "$INSTANCE_TYPE" \
--security-group-ids "$SECURITY_GROUPS" \
--tag-specifications "ResourceType=instance,Tags=$TAGS" \
--user-data "file://$BOOTDATA")
--iam-instance-profile 'Name="EC2ProdInstance"' \
--image-id "$AMI_ID" \
--instance-type "$INSTANCE_TYPE" \
--security-group-ids "$SECURITY_GROUPS" \
--tag-specifications "ResourceType=instance,Tags=$TAGS" \
--user-data "file://$BOOTDATA")
INSTANCEID=$(echo "$INSTANCE_DATA" | jq -r .Instances[0].InstanceId)
# Wait for public IP assignment
@ -89,12 +89,12 @@ PUBLIC_DNS_NAME=""
while [ -z "$PUBLIC_DNS_NAME" ]; do
sleep 1
PUBLIC_DNS_NAME=$($AWS ec2 describe-instances --instance-ids "$INSTANCEID" \
| jq -r .Reservations[0].Instances[0].PublicDnsName )
| jq -r .Reservations[0].Instances[0].PublicDnsName)
done
# Add the hostname to the zone
ROUTE53_CHANGES=$(mktemp)
cat > "$ROUTE53_CHANGES" <<EOF
cat >"$ROUTE53_CHANGES" <<EOF
{
"Comment": "Add the $HOSTNAME CNAME record",
"Changes": [

View File

@ -5,7 +5,7 @@ version=0.7.1
tarball="shellcheck-v$version.linux.x86_64.tar.xz"
sha256=64f17152d96d7ec261ad3086ed42d18232fcb65148b44571b564d688269d36c8
check_version () {
check_version() {
out="$(shellcheck --version 2>/dev/null)" && [[ "$out" = *"
version: $version
"* ]]
@ -16,7 +16,7 @@ if ! check_version; then
trap 'rm -r "$tmpdir"' EXIT
cd "$tmpdir"
wget -nv "https://github.com/koalaman/shellcheck/releases/download/v$version/$tarball"
sha256sum -c <<< "$sha256 $tarball"
sha256sum -c <<<"$sha256 $tarball"
tar -xJf "$tarball" --no-same-owner --strip-components=1 -C /usr/local/bin "shellcheck-v$version/shellcheck"
check_version
fi

View File

@ -1,10 +1,9 @@
#!/usr/bin/env bash
if [ "$(node_modules/.bin/svgo -f static/images/integrations/logos | grep -o '\.[0-9]% = ' | wc -l)" -ge 1 ]
then
echo "ERROR: svgo detected unoptimized SVG files in the \`static/images/integrations/logos\` folder." 1>&2
echo "Please run \`svgo -f static/images/integrations/logos\` and commit the file changes to optimize them."
exit 1
else
echo "SUCCESS: SVG files in static/images/integrations/logos are all optimized!"
if [ "$(node_modules/.bin/svgo -f static/images/integrations/logos | grep -o '\.[0-9]% = ' | wc -l)" -ge 1 ]; then
echo "ERROR: svgo detected unoptimized SVG files in the \`static/images/integrations/logos\` folder." 1>&2
echo "Please run \`svgo -f static/images/integrations/logos\` and commit the file changes to optimize them."
exit 1
else
echo "SUCCESS: SVG files in static/images/integrations/logos are all optimized!"
fi

View File

@ -29,18 +29,18 @@ set -x
POSTGRES_USER="postgres"
if [ "$(uname)" = "OpenBSD" ]; then
POSTGRES_USER="_postgresql"
POSTGRES_USER="_postgresql"
fi
ROOT_POSTGRES=(sudo -i -u "$POSTGRES_USER" psql)
DEFAULT_DB=""
if [ "$(uname)" = "Darwin" ]; then
ROOT_POSTGRES=(psql)
DEFAULT_DB="postgres"
ROOT_POSTGRES=(psql)
DEFAULT_DB="postgres"
fi
if [ "$(uname)" = "OpenBSD" ]; then
DEFAULT_DB="postgres"
DEFAULT_DB="postgres"
fi
VAGRANTUSERNAME=$(whoami)
@ -64,7 +64,7 @@ fi
uuid_var_path=$($(readlink -f "$(dirname "$0")/../../scripts/lib/zulip_tools.py") get_dev_uuid)
rm -f "$uuid_var_path/$STATUS_FILE_NAME"
"${ROOT_POSTGRES[@]}" -v ON_ERROR_STOP=1 -e "$DEFAULT_DB" << EOF
"${ROOT_POSTGRES[@]}" -v ON_ERROR_STOP=1 -e "$DEFAULT_DB" <<EOF
DO \$\$BEGIN
CREATE USER $USERNAME;
EXCEPTION WHEN duplicate_object THEN
@ -87,7 +87,7 @@ umask go-rw
PGPASS_PREFIX="*:*:*:$USERNAME:"
PGPASS_ESCAPED_PREFIX="*:\\*:\\*:$USERNAME:"
if ! grep -q "$PGPASS_ESCAPED_PREFIX" ~/.pgpass; then
echo "$PGPASS_PREFIX$PASSWORD" >> ~/.pgpass
echo "$PGPASS_PREFIX$PASSWORD" >>~/.pgpass
else
sed -i "s/$PGPASS_ESCAPED_PREFIX.*\$/$PGPASS_PREFIX$PASSWORD/" ~/.pgpass
fi
@ -106,7 +106,7 @@ psql -v ON_ERROR_STOP=1 -e -h localhost "$DBNAME_BASE" "$USERNAME" <<EOF
CREATE SCHEMA zulip;
EOF
"${ROOT_POSTGRES[@]}" -v ON_ERROR_STOP=1 -e "$DBNAME_BASE" << EOF
"${ROOT_POSTGRES[@]}" -v ON_ERROR_STOP=1 -e "$DBNAME_BASE" <<EOF
CREATE EXTENSION pgroonga;
GRANT USAGE ON SCHEMA pgroonga TO $USERNAME;
EOF

View File

@ -8,18 +8,20 @@ TEMP=$(getopt -o f --long force -- "$@")
eval set -- "$TEMP"
# extract options.
while true ; do
while true; do
case "$1" in
-f|--force)
FORCEARG="--force";
shift;;
-f | --force)
FORCEARG="--force"
shift
;;
--)
shift;
break;;
shift
break
;;
esac
done
function run {
function run() {
echo '----'
printf 'Running'
printf ' %q' "$@"

View File

@ -1,14 +1,14 @@
#!/usr/bin/env bash
set -e
color_message () {
color_message() {
local color_code="$1" message="$2"
printf '\e[%sm%s\e[0m\n' "$color_code" "$message" >&2
}
loglevel=()
usage () {
usage() {
cat <<EOF
usage:
--help, -h show this help message and exit
@ -18,17 +18,35 @@ usage:
EOF
}
args="$(getopt -o hL: --long help,loglevel:,skip-check-links,skip-external-links -- "$@")" ||
{ usage >&2; exit 1; }
args="$(getopt -o hL: --long help,loglevel:,skip-check-links,skip-external-links -- "$@")" \
|| {
usage >&2
exit 1
}
eval "set -- $args"
while true; do
case "$1" in
-h|--help) usage; exit 0;;
-L|--loglevel) loglevel=("$1" "$2"); shift 2;;
--skip-check-links) skip_check_links=1; shift;;
--skip-external-links) skip_external_links=1; shift;;
--) shift; break;;
*) exit 1;;
-h | --help)
usage
exit 0
;;
-L | --loglevel)
loglevel=("$1" "$2")
shift 2
;;
--skip-check-links)
skip_check_links=1
shift
;;
--skip-external-links)
skip_external_links=1
shift
;;
--)
shift
break
;;
*) exit 1 ;;
esac
done

View File

@ -10,10 +10,16 @@ args="$(getopt -o +f --long help,force -- "$@")"
eval "set -- $args"
while true; do
case "$1" in
--help) usage;;
-f|--force) FORCE=1; shift;;
--) shift; break;;
*) usage;;
--help) usage ;;
-f | --force)
FORCE=1
shift
;;
--)
shift
break
;;
*) usage ;;
esac
done
@ -27,9 +33,8 @@ if [ "$EUID" -ne 0 ]; then
fi
lxc-ls -f \
| perl -lane '$_ = $F[0]; print if (/^zulip-install-/ && !/-base$/)' \
| while read -r c
do
echo "$c"
lxc-destroy -f -n "$c"
done
| perl -lane '$_ = $F[0]; print if (/^zulip-install-/ && !/-base$/)' \
| while read -r c; do
echo "$c"
lxc-destroy -f -n "$c"
done

View File

@ -10,14 +10,23 @@ args="$(getopt -o +r: --long help,release: -- "$@")"
eval "set -- $args"
while true; do
case "$1" in
--help) usage;;
-r|--release) RELEASE="$2"; shift; shift;;
--) shift; break;;
*) usage;;
--help) usage ;;
-r | --release)
RELEASE="$2"
shift
shift
;;
--)
shift
break
;;
*) usage ;;
esac
done
INSTALLER="$1"; shift || usage
INSTALLER_ARGS=("$@"); set --
INSTALLER="$1"
shift || usage
INSTALLER_ARGS=("$@")
set --
if [ -z "$RELEASE" ] || [ -z "$INSTALLER" ]; then
usage
@ -42,7 +51,8 @@ while [ -z "$CONTAINER_NAME" ] || lxc-info -n "$CONTAINER_NAME" >/dev/null 2>&1;
CONTAINER_NAME=zulip-install-"$(basename "$shared_dir")"
done
message="$(cat <<EOF
message="$(
cat <<EOF
Container:
sudo lxc-attach --clear-env -n $CONTAINER_NAME
@ -68,7 +78,7 @@ mount -t overlay overlay \
"$shared_dir"/mnt
lxc-copy --ephemeral --keepdata -n "$BASE_CONTAINER_NAME" -N "$CONTAINER_NAME" \
-m bind="$shared_dir"/mnt:/mnt/src/,bind=/srv/zulip/test-install/pip-cache:/root/.cache/pip
-m bind="$shared_dir"/mnt:/mnt/src/,bind=/srv/zulip/test-install/pip-cache:/root/.cache/pip
"$THIS_DIR"/lxc-wait -n "$CONTAINER_NAME"

View File

@ -10,10 +10,17 @@ args="$(getopt -o +n: --long help,name: -- "$@")"
eval "set -- $args"
while true; do
case "$1" in
--help) usage;;
-n|--name) CONTAINER_NAME="$2"; shift; shift;;
--) shift; break;;
*) usage;;
--help) usage ;;
-n | --name)
CONTAINER_NAME="$2"
shift
shift
;;
--)
shift
break
;;
*) usage ;;
esac
done
@ -31,7 +38,10 @@ poll_runlevel() {
for _ in {1..60}; do
echo "lxc-wait: $CONTAINER_NAME: polling for boot..." >&2
runlevel="$(lxc-attach --clear-env -n "$CONTAINER_NAME" -- runlevel 2>/dev/null)" \
|| { sleep 1; continue; }
|| {
sleep 1
continue
}
if [ "$runlevel" != "${0%[0-9]}" ]; then
echo "lxc-wait: $CONTAINER_NAME: booted!" >&2
poll_network
@ -42,14 +52,16 @@ poll_runlevel() {
exit 1
}
poll_network() {
for _ in {1..60}; do
echo "lxc-wait: $CONTAINER_NAME: polling for network..." >&2
# New hosts don't have `host` or `nslookup`
lxc-attach --clear-env -n "$CONTAINER_NAME" -- \
ping -q -c 1 archive.ubuntu.com 2>/dev/null >/dev/null \
|| { sleep 1; continue; }
ping -q -c 1 archive.ubuntu.com 2>/dev/null >/dev/null \
|| {
sleep 1
continue
}
echo "lxc-wait: $CONTAINER_NAME: network is up!" >&2
exit 0
done
@ -57,6 +69,4 @@ poll_network() {
exit 1
}
poll_runlevel

View File

@ -7,13 +7,15 @@ if [ "$EUID" -ne 0 ]; then
fi
RELEASE="$1"
ARCH=amd64 # TODO: maybe i686 too
ARCH=amd64 # TODO: maybe i686 too
case "$RELEASE" in
bionic) extra_packages=(python-pip)
;;
focal) extra_packages=(python3-pip)
;;
bionic)
extra_packages=(python-pip)
;;
focal)
extra_packages=(python3-pip)
;;
*)
echo "error: unsupported target release: $RELEASE" >&2
exit 1
@ -46,16 +48,16 @@ run apt-get dist-upgrade -y
# As an optimization, we install a bunch of packages the installer
# would install for itself.
run apt-get install -y --no-install-recommends \
xvfb parallel unzip zip jq python3-pip wget curl eatmydata \
git crudini openssl ssl-cert \
build-essential python3-dev \
memcached redis-server \
hunspell-en-us supervisor libssl-dev puppet \
gettext libffi-dev libfreetype6-dev zlib1g-dev libjpeg-dev \
libldap2-dev \
libxml2-dev libxslt1-dev libpq-dev \
virtualenv \
"${extra_packages[@]}"
xvfb parallel unzip zip jq python3-pip wget curl eatmydata \
git crudini openssl ssl-cert \
build-essential python3-dev \
memcached redis-server \
hunspell-en-us supervisor libssl-dev puppet \
gettext libffi-dev libfreetype6-dev zlib1g-dev libjpeg-dev \
libldap2-dev \
libxml2-dev libxslt1-dev libpq-dev \
virtualenv \
"${extra_packages[@]}"
run ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime
run locale-gen en_US.UTF-8 || true

View File

@ -7,13 +7,13 @@ echo 'Testing whether migrations are consistent with models'
new_auto_named_migrations=$(./manage.py showmigrations \
| grep -E ' [0-9]{4}_auto_' \
| grep -Eve ' [0-9]{4}_auto_201[67]' \
-e ' 0052_auto_fix_realmalias_realm_nullable' \
-e ' 0003_auto_20150817_1733' \
-e ' 0002_auto_20150110_0810' \
-e ' 0002_auto_20190420_0723' \
-e ' 0009_auto_20191118_0520' \
-e ' 0052_auto_fix_realmalias_realm_nullable' \
-e ' 0003_auto_20150817_1733' \
-e ' 0002_auto_20150110_0810' \
-e ' 0002_auto_20190420_0723' \
-e ' 0009_auto_20191118_0520' \
| sed 's/\[[x ]\] / /' \
|| true)
|| true)
if [ "$new_auto_named_migrations" != "" ]; then
echo "ERROR: New migrations with unclear automatically generated names."
echo "Please rename these migrations to have readable names:"

View File

@ -6,7 +6,7 @@ if [ ! -d /srv/zulip-py3-venv ] || [ ! -d /srv/zulip-thumbor-venv ]; then
./tools/setup/setup_venvs.py
fi
compile_requirements () {
compile_requirements() {
source="$1"
output="$2"