mirror of https://github.com/zulip/zulip.git
shfmt: Reformat shell scripts with shfmt.
https://github.com/mvdan/sh Signed-off-by: Anders Kaseorg <anders@zulip.com>
This commit is contained in:
parent
caa939d2d5
commit
dfaea9df65
|
@ -15,12 +15,10 @@ fi
|
||||||
cd /home/zulip/deployments/current
|
cd /home/zulip/deployments/current
|
||||||
BACKLOG="$(./manage.py print_email_delivery_backlog)"
|
BACKLOG="$(./manage.py print_email_delivery_backlog)"
|
||||||
|
|
||||||
if [ "$BACKLOG" -gt 0 ] && [ "$BACKLOG" -lt 10 ]
|
if [ "$BACKLOG" -gt 0 ] && [ "$BACKLOG" -lt 10 ]; then
|
||||||
then
|
|
||||||
echo "backlog of $BACKLOG"
|
echo "backlog of $BACKLOG"
|
||||||
exit 1
|
exit 1
|
||||||
elif [ "$BACKLOG" -ge 10 ]
|
elif [ "$BACKLOG" -ge 10 ]; then
|
||||||
then
|
|
||||||
echo "backlog of $BACKLOG"
|
echo "backlog of $BACKLOG"
|
||||||
exit 2
|
exit 2
|
||||||
else
|
else
|
||||||
|
|
|
@ -8,13 +8,12 @@
|
||||||
SUPERVISOR_STATUS=$(supervisorctl status zulip-workers:zulip_deliver_enqueued_emails 2>&1)
|
SUPERVISOR_STATUS=$(supervisorctl status zulip-workers:zulip_deliver_enqueued_emails 2>&1)
|
||||||
STATUS=$(echo "$SUPERVISOR_STATUS" | awk '{ print $2 }')
|
STATUS=$(echo "$SUPERVISOR_STATUS" | awk '{ print $2 }')
|
||||||
|
|
||||||
|
|
||||||
case "$STATUS" in
|
case "$STATUS" in
|
||||||
RUNNING)
|
RUNNING)
|
||||||
echo "Running"
|
echo "Running"
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
STOPPED|STARTING|BACKOFF|STOPPING|EXITED|FATAL|UNKNOWN)
|
STOPPED | STARTING | BACKOFF | STOPPING | EXITED | FATAL | UNKNOWN)
|
||||||
# not "RUNNING", but a recognized supervisor status
|
# not "RUNNING", but a recognized supervisor status
|
||||||
echo "$STATUS"
|
echo "$STATUS"
|
||||||
exit 1
|
exit 1
|
||||||
|
|
|
@ -9,16 +9,16 @@ if [ -z "$processes" ]; then
|
||||||
echo "No workers running"
|
echo "No workers running"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
mapfile -t processes <<< "$processes"
|
mapfile -t processes <<<"$processes"
|
||||||
ps -o vsize,size,pid,user,command --sort -vsize "${processes[@]}" > "$datafile"
|
ps -o vsize,size,pid,user,command --sort -vsize "${processes[@]}" >"$datafile"
|
||||||
cat "$datafile"
|
cat "$datafile"
|
||||||
top_worker=$(head -n2 "$datafile" | tail -n1)
|
top_worker=$(head -n2 "$datafile" | tail -n1)
|
||||||
top_worker_memory_usage=$(echo "$top_worker" | cut -f1 -d" ")
|
top_worker_memory_usage=$(echo "$top_worker" | cut -f1 -d" ")
|
||||||
rm -f "$datafile"
|
rm -f "$datafile"
|
||||||
if [ "$top_worker_memory_usage" -gt 800000 ]; then
|
if [ "$top_worker_memory_usage" -gt 800000 ]; then
|
||||||
exit 2
|
exit 2
|
||||||
elif [ "$top_worker_memory_usage" -gt 600000 ]; then
|
elif [ "$top_worker_memory_usage" -gt 600000 ]; then
|
||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -20,8 +20,8 @@
|
||||||
# always be included.
|
# always be included.
|
||||||
|
|
||||||
if [ "$1" = "autoconf" ]; then
|
if [ "$1" = "autoconf" ]; then
|
||||||
echo yes
|
echo yes
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
HOME=/tmp/
|
HOME=/tmp/
|
||||||
|
@ -30,33 +30,33 @@ HOME=/tmp/
|
||||||
# graphs should look.
|
# graphs should look.
|
||||||
|
|
||||||
if [ "$1" = "config" ]; then
|
if [ "$1" = "config" ]; then
|
||||||
CONN_WARN=${queue_warn:-500}
|
CONN_WARN=${queue_warn:-500}
|
||||||
CONN_CRIT=${queue_crit:-1000}
|
CONN_CRIT=${queue_crit:-1000}
|
||||||
|
|
||||||
# The host name this plugin is for. (Can be overridden to have
|
# The host name this plugin is for. (Can be overridden to have
|
||||||
# one machine answer for several)
|
# one machine answer for several)
|
||||||
|
|
||||||
# The title of the graph
|
# The title of the graph
|
||||||
echo 'graph_title RabbitMQ connections'
|
echo 'graph_title RabbitMQ connections'
|
||||||
# Arguments to "rrdtool graph". In this case, tell it that the
|
# Arguments to "rrdtool graph". In this case, tell it that the
|
||||||
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
||||||
echo 'graph_args --base 1000 -l 0'
|
echo 'graph_args --base 1000 -l 0'
|
||||||
# The Y-axis label
|
# The Y-axis label
|
||||||
echo 'graph_vlabel connections'
|
echo 'graph_vlabel connections'
|
||||||
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
||||||
# 420 milliload)
|
# 420 milliload)
|
||||||
#echo 'graph_scale no'
|
#echo 'graph_scale no'
|
||||||
echo 'graph_category RabbitMQ'
|
echo 'graph_category RabbitMQ'
|
||||||
|
|
||||||
echo "connections.label Connections"
|
echo "connections.label Connections"
|
||||||
echo "connections.warning $CONN_WARN"
|
echo "connections.warning $CONN_WARN"
|
||||||
echo "connections.critical $CONN_CRIT"
|
echo "connections.critical $CONN_CRIT"
|
||||||
echo "connections.info Number of active connections"
|
echo "connections.info Number of active connections"
|
||||||
|
|
||||||
echo 'graph_info Shows the number of connections to RabbitMQ'
|
echo 'graph_info Shows the number of connections to RabbitMQ'
|
||||||
# Last, if run with the "config"-parameter, quit here (don't
|
# Last, if run with the "config"-parameter, quit here (don't
|
||||||
# display any data)
|
# display any data)
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If not run with any parameters at all (or only unknown ones), do the
|
# If not run with any parameters at all (or only unknown ones), do the
|
||||||
|
|
|
@ -20,54 +20,54 @@
|
||||||
# always be included.
|
# always be included.
|
||||||
|
|
||||||
if [ "$1" = "autoconf" ]; then
|
if [ "$1" = "autoconf" ]; then
|
||||||
echo yes
|
echo yes
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If run with the "config"-parameter, give out information on how the
|
# If run with the "config"-parameter, give out information on how the
|
||||||
# graphs should look.
|
# graphs should look.
|
||||||
|
|
||||||
HOME=/tmp/
|
HOME=/tmp/
|
||||||
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name | \
|
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name \
|
||||||
grep -v '^Listing' | \
|
| grep -v '^Listing' \
|
||||||
grep -v 'done\.$' | sed -e 's/[.=-]/_/g' )
|
| grep -v 'done\.$' | sed -e 's/[.=-]/_/g')
|
||||||
|
|
||||||
if [ "$1" = "config" ]; then
|
if [ "$1" = "config" ]; then
|
||||||
QUEUE_WARN=${queue_warn:-100}
|
QUEUE_WARN=${queue_warn:-100}
|
||||||
QUEUE_CRIT=${queue_crit:-500}
|
QUEUE_CRIT=${queue_crit:-500}
|
||||||
|
|
||||||
# The host name this plugin is for. (Can be overridden to have
|
# The host name this plugin is for. (Can be overridden to have
|
||||||
# one machine answer for several)
|
# one machine answer for several)
|
||||||
|
|
||||||
# The title of the graph
|
# The title of the graph
|
||||||
echo "graph_title RabbitMQ consumers"
|
echo "graph_title RabbitMQ consumers"
|
||||||
# Arguments to "rrdtool graph". In this case, tell it that the
|
# Arguments to "rrdtool graph". In this case, tell it that the
|
||||||
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
||||||
echo 'graph_args --base 1000 -l 0'
|
echo 'graph_args --base 1000 -l 0'
|
||||||
# The Y-axis label
|
# The Y-axis label
|
||||||
echo 'graph_vlabel consumers'
|
echo 'graph_vlabel consumers'
|
||||||
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
||||||
# 420 milliload)
|
# 420 milliload)
|
||||||
#echo 'graph_scale no'
|
#echo 'graph_scale no'
|
||||||
echo 'graph_category RabbitMQ'
|
echo 'graph_category RabbitMQ'
|
||||||
|
|
||||||
for queue in $QUEUES; do
|
for queue in $QUEUES; do
|
||||||
echo "$queue.label $queue"
|
echo "$queue.label $queue"
|
||||||
echo "$queue.warning $QUEUE_WARN"
|
echo "$queue.warning $QUEUE_WARN"
|
||||||
echo "$queue.critical $QUEUE_CRIT"
|
echo "$queue.critical $QUEUE_CRIT"
|
||||||
echo "$queue.info Active consumers for $queue"
|
echo "$queue.info Active consumers for $queue"
|
||||||
done
|
done
|
||||||
|
|
||||||
echo 'graph_info Lists active consumers for a queue.'
|
echo 'graph_info Lists active consumers for a queue.'
|
||||||
# Last, if run with the "config"-parameter, quit here (don't
|
# Last, if run with the "config"-parameter, quit here (don't
|
||||||
# display any data)
|
# display any data)
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If not run with any parameters at all (or only unknown ones), do the
|
# If not run with any parameters at all (or only unknown ones), do the
|
||||||
# real work - i.e. display the data. Almost always this will be
|
# real work - i.e. display the data. Almost always this will be
|
||||||
# "value" subfield for every data field.
|
# "value" subfield for every data field.
|
||||||
|
|
||||||
HOME=$HOME rabbitmqctl list_queues name consumers| \
|
HOME=$HOME rabbitmqctl list_queues name consumers \
|
||||||
grep -v "^Listing" | grep -v "done.$" | \
|
| grep -v "^Listing" | grep -v "done.$" \
|
||||||
perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
|
| perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
|
||||||
|
|
|
@ -20,54 +20,54 @@
|
||||||
# always be included.
|
# always be included.
|
||||||
|
|
||||||
if [ "$1" = "autoconf" ]; then
|
if [ "$1" = "autoconf" ]; then
|
||||||
echo yes
|
echo yes
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If run with the "config"-parameter, give out information on how the
|
# If run with the "config"-parameter, give out information on how the
|
||||||
# graphs should look.
|
# graphs should look.
|
||||||
|
|
||||||
HOME=/tmp/
|
HOME=/tmp/
|
||||||
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name | \
|
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name \
|
||||||
grep -v '^Listing' | \
|
| grep -v '^Listing' \
|
||||||
grep -v 'done\.$' | sed -e 's/[.=-]/_/g' )
|
| grep -v 'done\.$' | sed -e 's/[.=-]/_/g')
|
||||||
|
|
||||||
if [ "$1" = "config" ]; then
|
if [ "$1" = "config" ]; then
|
||||||
QUEUE_WARN=${queue_warn:-10000}
|
QUEUE_WARN=${queue_warn:-10000}
|
||||||
QUEUE_CRIT=${queue_crit:-20000}
|
QUEUE_CRIT=${queue_crit:-20000}
|
||||||
|
|
||||||
# The host name this plugin is for. (Can be overridden to have
|
# The host name this plugin is for. (Can be overridden to have
|
||||||
# one machine answer for several)
|
# one machine answer for several)
|
||||||
|
|
||||||
# The title of the graph
|
# The title of the graph
|
||||||
echo "graph_title RabbitMQ list_queues"
|
echo "graph_title RabbitMQ list_queues"
|
||||||
# Arguments to "rrdtool graph". In this case, tell it that the
|
# Arguments to "rrdtool graph". In this case, tell it that the
|
||||||
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
||||||
echo 'graph_args --base 1000 -l 0'
|
echo 'graph_args --base 1000 -l 0'
|
||||||
# The Y-axis label
|
# The Y-axis label
|
||||||
echo 'graph_vlabel queue_size'
|
echo 'graph_vlabel queue_size'
|
||||||
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
||||||
# 420 milliload)
|
# 420 milliload)
|
||||||
#echo 'graph_scale no'
|
#echo 'graph_scale no'
|
||||||
echo 'graph_category RabbitMQ'
|
echo 'graph_category RabbitMQ'
|
||||||
|
|
||||||
for queue in $QUEUES; do
|
for queue in $QUEUES; do
|
||||||
echo "$queue.label $queue"
|
echo "$queue.label $queue"
|
||||||
echo "$queue.warning $QUEUE_WARN"
|
echo "$queue.warning $QUEUE_WARN"
|
||||||
echo "$queue.critical $QUEUE_CRIT"
|
echo "$queue.critical $QUEUE_CRIT"
|
||||||
echo "$queue.info Queue size for $queue"
|
echo "$queue.info Queue size for $queue"
|
||||||
done
|
done
|
||||||
|
|
||||||
echo 'graph_info Lists how many messages are in each queue.'
|
echo 'graph_info Lists how many messages are in each queue.'
|
||||||
# Last, if run with the "config"-parameter, quit here (don't
|
# Last, if run with the "config"-parameter, quit here (don't
|
||||||
# display any data)
|
# display any data)
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If not run with any parameters at all (or only unknown ones), do the
|
# If not run with any parameters at all (or only unknown ones), do the
|
||||||
# real work - i.e. display the data. Almost always this will be
|
# real work - i.e. display the data. Almost always this will be
|
||||||
# "value" subfield for every data field.
|
# "value" subfield for every data field.
|
||||||
|
|
||||||
HOME=$HOME rabbitmqctl list_queues | \
|
HOME=$HOME rabbitmqctl list_queues \
|
||||||
grep -v "^Listing" | grep -v "done.$" | \
|
| grep -v "^Listing" | grep -v "done.$" \
|
||||||
perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
|
| perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
|
||||||
|
|
|
@ -20,54 +20,54 @@
|
||||||
# always be included.
|
# always be included.
|
||||||
|
|
||||||
if [ "$1" = "autoconf" ]; then
|
if [ "$1" = "autoconf" ]; then
|
||||||
echo yes
|
echo yes
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If run with the "config"-parameter, give out information on how the
|
# If run with the "config"-parameter, give out information on how the
|
||||||
# graphs should look.
|
# graphs should look.
|
||||||
|
|
||||||
HOME=/tmp/
|
HOME=/tmp/
|
||||||
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name | \
|
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name \
|
||||||
grep -v '^Listing' | \
|
| grep -v '^Listing' \
|
||||||
grep -v 'done\.$' | sed -e 's/[.=-]/_/g' )
|
| grep -v 'done\.$' | sed -e 's/[.=-]/_/g')
|
||||||
|
|
||||||
if [ "$1" = "config" ]; then
|
if [ "$1" = "config" ]; then
|
||||||
QUEUE_WARN=${queue_warn:-10000}
|
QUEUE_WARN=${queue_warn:-10000}
|
||||||
QUEUE_CRIT=${queue_crit:-20000}
|
QUEUE_CRIT=${queue_crit:-20000}
|
||||||
|
|
||||||
# The host name this plugin is for. (Can be overridden to have
|
# The host name this plugin is for. (Can be overridden to have
|
||||||
# one machine answer for several)
|
# one machine answer for several)
|
||||||
|
|
||||||
# The title of the graph
|
# The title of the graph
|
||||||
echo "graph_title RabbitMQ Unacknowledged Messages"
|
echo "graph_title RabbitMQ Unacknowledged Messages"
|
||||||
# Arguments to "rrdtool graph". In this case, tell it that the
|
# Arguments to "rrdtool graph". In this case, tell it that the
|
||||||
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
||||||
echo 'graph_args --base 1000 -l 0'
|
echo 'graph_args --base 1000 -l 0'
|
||||||
# The Y-axis label
|
# The Y-axis label
|
||||||
echo 'graph_vlabel unacknowledged'
|
echo 'graph_vlabel unacknowledged'
|
||||||
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
||||||
# 420 milliload)
|
# 420 milliload)
|
||||||
#echo 'graph_scale no'
|
#echo 'graph_scale no'
|
||||||
echo 'graph_category RabbitMQ'
|
echo 'graph_category RabbitMQ'
|
||||||
|
|
||||||
for queue in $QUEUES; do
|
for queue in $QUEUES; do
|
||||||
echo "$queue.label $queue"
|
echo "$queue.label $queue"
|
||||||
echo "$queue.warning $QUEUE_WARN"
|
echo "$queue.warning $QUEUE_WARN"
|
||||||
echo "$queue.critical $QUEUE_CRIT"
|
echo "$queue.critical $QUEUE_CRIT"
|
||||||
echo "$queue.info Unacknowledged messages for $queue"
|
echo "$queue.info Unacknowledged messages for $queue"
|
||||||
done
|
done
|
||||||
|
|
||||||
echo 'graph_info Lists how many messages are in each queue.'
|
echo 'graph_info Lists how many messages are in each queue.'
|
||||||
# Last, if run with the "config"-parameter, quit here (don't
|
# Last, if run with the "config"-parameter, quit here (don't
|
||||||
# display any data)
|
# display any data)
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If not run with any parameters at all (or only unknown ones), do the
|
# If not run with any parameters at all (or only unknown ones), do the
|
||||||
# real work - i.e. display the data. Almost always this will be
|
# real work - i.e. display the data. Almost always this will be
|
||||||
# "value" subfield for every data field.
|
# "value" subfield for every data field.
|
||||||
|
|
||||||
HOME=$HOME rabbitmqctl list_queues name messages_unacknowledged | \
|
HOME=$HOME rabbitmqctl list_queues name messages_unacknowledged \
|
||||||
grep -v "^Listing" | grep -v "done.$" | \
|
| grep -v "^Listing" | grep -v "done.$" \
|
||||||
perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
|
| perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
|
||||||
|
|
|
@ -20,54 +20,54 @@
|
||||||
# always be included.
|
# always be included.
|
||||||
|
|
||||||
if [ "$1" = "autoconf" ]; then
|
if [ "$1" = "autoconf" ]; then
|
||||||
echo yes
|
echo yes
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If run with the "config"-parameter, give out information on how the
|
# If run with the "config"-parameter, give out information on how the
|
||||||
# graphs should look.
|
# graphs should look.
|
||||||
|
|
||||||
HOME=/tmp/
|
HOME=/tmp/
|
||||||
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name | \
|
QUEUES=$(HOME=$HOME rabbitmqctl list_queues name \
|
||||||
grep -v '^Listing' | \
|
| grep -v '^Listing' \
|
||||||
grep -v 'done\.$' | sed -e 's/[.=-]/_/g' )
|
| grep -v 'done\.$' | sed -e 's/[.=-]/_/g')
|
||||||
|
|
||||||
if [ "$1" = "config" ]; then
|
if [ "$1" = "config" ]; then
|
||||||
QUEUE_WARN=${queue_warn:-10000}
|
QUEUE_WARN=${queue_warn:-10000}
|
||||||
QUEUE_CRIT=${queue_crit:-20000}
|
QUEUE_CRIT=${queue_crit:-20000}
|
||||||
|
|
||||||
# The host name this plugin is for. (Can be overridden to have
|
# The host name this plugin is for. (Can be overridden to have
|
||||||
# one machine answer for several)
|
# one machine answer for several)
|
||||||
|
|
||||||
# The title of the graph
|
# The title of the graph
|
||||||
echo "graph_title RabbitMQ Uncommitted Messages"
|
echo "graph_title RabbitMQ Uncommitted Messages"
|
||||||
# Arguments to "rrdtool graph". In this case, tell it that the
|
# Arguments to "rrdtool graph". In this case, tell it that the
|
||||||
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
||||||
echo 'graph_args --base 1000 -l 0'
|
echo 'graph_args --base 1000 -l 0'
|
||||||
# The Y-axis label
|
# The Y-axis label
|
||||||
echo 'graph_vlabel uncommitted'
|
echo 'graph_vlabel uncommitted'
|
||||||
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
||||||
# 420 milliload)
|
# 420 milliload)
|
||||||
#echo 'graph_scale no'
|
#echo 'graph_scale no'
|
||||||
echo 'graph_category RabbitMQ'
|
echo 'graph_category RabbitMQ'
|
||||||
|
|
||||||
for queue in $QUEUES; do
|
for queue in $QUEUES; do
|
||||||
echo "$queue.label $queue"
|
echo "$queue.label $queue"
|
||||||
echo "$queue.warning $QUEUE_WARN"
|
echo "$queue.warning $QUEUE_WARN"
|
||||||
echo "$queue.critical $QUEUE_CRIT"
|
echo "$queue.critical $QUEUE_CRIT"
|
||||||
echo "$queue.info Uncommitted messages for $queue"
|
echo "$queue.info Uncommitted messages for $queue"
|
||||||
done
|
done
|
||||||
|
|
||||||
echo 'graph_info Lists how many messages are in each queue.'
|
echo 'graph_info Lists how many messages are in each queue.'
|
||||||
# Last, if run with the "config"-parameter, quit here (don't
|
# Last, if run with the "config"-parameter, quit here (don't
|
||||||
# display any data)
|
# display any data)
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If not run with any parameters at all (or only unknown ones), do the
|
# If not run with any parameters at all (or only unknown ones), do the
|
||||||
# real work - i.e. display the data. Almost always this will be
|
# real work - i.e. display the data. Almost always this will be
|
||||||
# "value" subfield for every data field.
|
# "value" subfield for every data field.
|
||||||
|
|
||||||
HOME=$HOME rabbitmqctl list_channels name messages_uncommitted | \
|
HOME=$HOME rabbitmqctl list_channels name messages_uncommitted \
|
||||||
grep -v "^Listing" | grep -v "done.$" | \
|
| grep -v "^Listing" | grep -v "done.$" \
|
||||||
perl -nle'($q, $s) = /^(.*)\s+(\d+)$/; $q =~ s/[.=-]/_/g; print("$q.value $s")'
|
| perl -nle'($q, $s) = /^(.*)\s+(\d+)$/; $q =~ s/[.=-]/_/g; print("$q.value $s")'
|
||||||
|
|
|
@ -20,54 +20,54 @@
|
||||||
# always be included.
|
# always be included.
|
||||||
|
|
||||||
if [ "$1" = "autoconf" ]; then
|
if [ "$1" = "autoconf" ]; then
|
||||||
echo yes
|
echo yes
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If run with the "config"-parameter, give out information on how the
|
# If run with the "config"-parameter, give out information on how the
|
||||||
# graphs should look.
|
# graphs should look.
|
||||||
|
|
||||||
HOME=/tmp/
|
HOME=/tmp/
|
||||||
QUEUES=$(rabbitmqctl list_queues name | \
|
QUEUES=$(rabbitmqctl list_queues name \
|
||||||
grep -v '^Listing' | \
|
| grep -v '^Listing' \
|
||||||
grep -v 'done\.$' | sed -e 's/[.=-]/_/g' )
|
| grep -v 'done\.$' | sed -e 's/[.=-]/_/g')
|
||||||
|
|
||||||
if [ "$1" = "config" ]; then
|
if [ "$1" = "config" ]; then
|
||||||
QUEUE_WARN=${queue_warn:-10000}
|
QUEUE_WARN=${queue_warn:-10000}
|
||||||
QUEUE_CRIT=${queue_crit:-20000}
|
QUEUE_CRIT=${queue_crit:-20000}
|
||||||
|
|
||||||
# The host name this plugin is for. (Can be overridden to have
|
# The host name this plugin is for. (Can be overridden to have
|
||||||
# one machine answer for several)
|
# one machine answer for several)
|
||||||
|
|
||||||
# The title of the graph
|
# The title of the graph
|
||||||
echo "graph_title RabbitMQ Memory used by queue"
|
echo "graph_title RabbitMQ Memory used by queue"
|
||||||
# Arguments to "rrdtool graph". In this case, tell it that the
|
# Arguments to "rrdtool graph". In this case, tell it that the
|
||||||
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
||||||
echo 'graph_args --base 1024 --vertical-label Bytes -l 0'
|
echo 'graph_args --base 1024 --vertical-label Bytes -l 0'
|
||||||
# The Y-axis label
|
# The Y-axis label
|
||||||
echo 'graph_vlabel memory'
|
echo 'graph_vlabel memory'
|
||||||
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
||||||
# 420 milliload)
|
# 420 milliload)
|
||||||
#echo 'graph_scale no'
|
#echo 'graph_scale no'
|
||||||
echo 'graph_category RabbitMQ'
|
echo 'graph_category RabbitMQ'
|
||||||
|
|
||||||
for queue in $QUEUES; do
|
for queue in $QUEUES; do
|
||||||
echo "$queue.label $queue"
|
echo "$queue.label $queue"
|
||||||
echo "$queue.warning $QUEUE_WARN"
|
echo "$queue.warning $QUEUE_WARN"
|
||||||
echo "$queue.critical $QUEUE_CRIT"
|
echo "$queue.critical $QUEUE_CRIT"
|
||||||
echo "$queue.info Memory used by $queue"
|
echo "$queue.info Memory used by $queue"
|
||||||
done
|
done
|
||||||
|
|
||||||
echo 'graph_info Show memory usage by queue'
|
echo 'graph_info Show memory usage by queue'
|
||||||
# Last, if run with the "config"-parameter, quit here (don't
|
# Last, if run with the "config"-parameter, quit here (don't
|
||||||
# display any data)
|
# display any data)
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If not run with any parameters at all (or only unknown ones), do the
|
# If not run with any parameters at all (or only unknown ones), do the
|
||||||
# real work - i.e. display the data. Almost always this will be
|
# real work - i.e. display the data. Almost always this will be
|
||||||
# "value" subfield for every data field.
|
# "value" subfield for every data field.
|
||||||
|
|
||||||
HOME=$HOME rabbitmqctl list_queues name memory | \
|
HOME=$HOME rabbitmqctl list_queues name memory \
|
||||||
grep -v "^Listing" | grep -v "done.$" | \
|
| grep -v "^Listing" | grep -v "done.$" \
|
||||||
perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
|
| perl -nle'($q, $s) = split; $q =~ s/[.=-]/_/g; print("$q.value $s")'
|
||||||
|
|
|
@ -18,8 +18,8 @@
|
||||||
# always be included.
|
# always be included.
|
||||||
|
|
||||||
if [ "$1" = "autoconf" ]; then
|
if [ "$1" = "autoconf" ]; then
|
||||||
echo yes
|
echo yes
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
HOME=/tmp/
|
HOME=/tmp/
|
||||||
|
@ -28,30 +28,30 @@ HOME=/tmp/
|
||||||
# graphs should look.
|
# graphs should look.
|
||||||
|
|
||||||
if [ "$1" = "config" ]; then
|
if [ "$1" = "config" ]; then
|
||||||
# The host name this plugin is for. (Can be overridden to have
|
# The host name this plugin is for. (Can be overridden to have
|
||||||
# one machine answer for several)
|
# one machine answer for several)
|
||||||
|
|
||||||
# The title of the graph
|
# The title of the graph
|
||||||
echo 'graph_title Event queues'
|
echo 'graph_title Event queues'
|
||||||
# Arguments to "rrdtool graph". In this case, tell it that the
|
# Arguments to "rrdtool graph". In this case, tell it that the
|
||||||
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
# lower limit of the graph is '0', and that 1k=1000 (not 1024)
|
||||||
echo 'graph_args --base 1000 -l 0'
|
echo 'graph_args --base 1000 -l 0'
|
||||||
# The Y-axis label
|
# The Y-axis label
|
||||||
echo 'graph_vlabel Number'
|
echo 'graph_vlabel Number'
|
||||||
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
# We want Cur/Min/Avg/Max unscaled (i.e. 0.42 load instead of
|
||||||
# 420 milliload)
|
# 420 milliload)
|
||||||
#echo 'graph_scale no'
|
#echo 'graph_scale no'
|
||||||
echo 'graph_category Tornado'
|
echo 'graph_category Tornado'
|
||||||
|
|
||||||
echo "active_queues.label Total active event queues"
|
echo "active_queues.label Total active event queues"
|
||||||
echo "active_queues.info Total number of active event queues"
|
echo "active_queues.info Total number of active event queues"
|
||||||
echo "active_users.label Users with active event queues"
|
echo "active_users.label Users with active event queues"
|
||||||
echo "active_users.info Number of users with active event queues"
|
echo "active_users.info Number of users with active event queues"
|
||||||
|
|
||||||
echo 'graph_info Shows the number of active event queues'
|
echo 'graph_info Shows the number of active event queues'
|
||||||
# Last, if run with the "config"-parameter, quit here (don't
|
# Last, if run with the "config"-parameter, quit here (don't
|
||||||
# display any data)
|
# display any data)
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If not run with any parameters at all (or only unknown ones), do the
|
# If not run with any parameters at all (or only unknown ones), do the
|
||||||
|
|
|
@ -5,7 +5,7 @@ set -e
|
||||||
LOCALDISK=/dev/nvme0n1
|
LOCALDISK=/dev/nvme0n1
|
||||||
|
|
||||||
if ! grep -q $LOCALDISK /etc/fstab; then
|
if ! grep -q $LOCALDISK /etc/fstab; then
|
||||||
echo "$LOCALDISK /srv xfs nofail,noatime 1 1" >> /etc/fstab
|
echo "$LOCALDISK /srv xfs nofail,noatime 1 1" >>/etc/fstab
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! mountpoint -q /srv; then
|
if ! mountpoint -q /srv; then
|
||||||
|
|
|
@ -5,9 +5,9 @@ set -e
|
||||||
|
|
||||||
# Only run from ifup.
|
# Only run from ifup.
|
||||||
if [ "$MODE" != start ]; then
|
if [ "$MODE" != start ]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$IFACE" = eth0 ]; then
|
if [ "$IFACE" = eth0 ]; then
|
||||||
/usr/local/sbin/zulip-ec2-configure-interfaces
|
/usr/local/sbin/zulip-ec2-configure-interfaces
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -6,7 +6,7 @@ zulip_conf_get_boolean() {
|
||||||
# Treat absent and invalid values as false.
|
# Treat absent and invalid values as false.
|
||||||
value=$(crudini --get /etc/zulip/zulip.conf "$1" "$2" 2>/dev/null)
|
value=$(crudini --get /etc/zulip/zulip.conf "$1" "$2" 2>/dev/null)
|
||||||
case "$(echo "$value" | tr '[:upper:]' '[:lower:]')" in
|
case "$(echo "$value" | tr '[:upper:]' '[:lower:]')" in
|
||||||
1|yes|true|on) return 0 ;;
|
1 | yes | true | on) return 0 ;;
|
||||||
*) return 1 ;;
|
*) return 1 ;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
@ -18,5 +18,5 @@ fi
|
||||||
deploy_hook="${ZULIP_CERTBOT_DEPLOY_HOOK:-service nginx reload}"
|
deploy_hook="${ZULIP_CERTBOT_DEPLOY_HOOK:-service nginx reload}"
|
||||||
|
|
||||||
certbot renew --quiet \
|
certbot renew --quiet \
|
||||||
--webroot --webroot-path=/var/lib/zulip/certbot-webroot/ \
|
--webroot --webroot-path=/var/lib/zulip/certbot-webroot/ \
|
||||||
--deploy-hook "$deploy_hook"
|
--deploy-hook "$deploy_hook"
|
||||||
|
|
|
@ -43,7 +43,7 @@ Options:
|
||||||
Skip the initial `apt-get dist-upgrade`.
|
Skip the initial `apt-get dist-upgrade`.
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
};
|
}
|
||||||
|
|
||||||
# Shell option parsing. Over time, we'll want to move some of the
|
# Shell option parsing. Over time, we'll want to move some of the
|
||||||
# environment variables below into this self-documenting system.
|
# environment variables below into this self-documenting system.
|
||||||
|
@ -51,22 +51,62 @@ args="$(getopt -o '' --long help,hostname:,email:,certbot,self-signed-cert,cacer
|
||||||
eval "set -- $args"
|
eval "set -- $args"
|
||||||
while true; do
|
while true; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
--help) usage; exit 0;;
|
--help)
|
||||||
|
usage
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
|
||||||
--hostname) EXTERNAL_HOST="$2"; shift; shift;;
|
--hostname)
|
||||||
--email) ZULIP_ADMINISTRATOR="$2"; shift; shift;;
|
EXTERNAL_HOST="$2"
|
||||||
|
shift
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--email)
|
||||||
|
ZULIP_ADMINISTRATOR="$2"
|
||||||
|
shift
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
|
||||||
--certbot) USE_CERTBOT=1; shift;;
|
--certbot)
|
||||||
--cacert) export CUSTOM_CA_CERTIFICATES="$2"; shift; shift;;
|
USE_CERTBOT=1
|
||||||
--self-signed-cert) SELF_SIGNED_CERT=1; shift;;
|
shift
|
||||||
|
;;
|
||||||
|
--cacert)
|
||||||
|
export CUSTOM_CA_CERTIFICATES="$2"
|
||||||
|
shift
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--self-signed-cert)
|
||||||
|
SELF_SIGNED_CERT=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
|
||||||
--postgres-version) POSTGRES_VERSION="$2"; shift; shift;;
|
--postgres-version)
|
||||||
--postgres-missing-dictionaries) POSTGRES_MISSING_DICTIONARIES=1; shift;;
|
POSTGRES_VERSION="$2"
|
||||||
--no-init-db) NO_INIT_DB=1; shift;;
|
shift
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--postgres-missing-dictionaries)
|
||||||
|
POSTGRES_MISSING_DICTIONARIES=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--no-init-db)
|
||||||
|
NO_INIT_DB=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
|
||||||
--no-overwrite-settings) NO_OVERWRITE_SETTINGS=1; shift;;
|
--no-overwrite-settings)
|
||||||
--no-dist-upgrade) NO_DIST_UPGRADE=1; shift;;
|
NO_OVERWRITE_SETTINGS=1
|
||||||
--) shift; break;;
|
shift
|
||||||
|
;;
|
||||||
|
--no-dist-upgrade)
|
||||||
|
NO_DIST_UPGRADE=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--)
|
||||||
|
shift
|
||||||
|
break
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -78,9 +118,9 @@ fi
|
||||||
## Options from environment variables.
|
## Options from environment variables.
|
||||||
#
|
#
|
||||||
# Specify options for apt.
|
# Specify options for apt.
|
||||||
read -r -a APT_OPTIONS <<< "${APT_OPTIONS:-}"
|
read -r -a APT_OPTIONS <<<"${APT_OPTIONS:-}"
|
||||||
# Install additional packages.
|
# Install additional packages.
|
||||||
read -r -a ADDITIONAL_PACKAGES <<< "${ADDITIONAL_PACKAGES:-}"
|
read -r -a ADDITIONAL_PACKAGES <<<"${ADDITIONAL_PACKAGES:-}"
|
||||||
# Comma-separated list of puppet manifests to install. default is
|
# Comma-separated list of puppet manifests to install. default is
|
||||||
# zulip::voyager for an all-in-one system or zulip::dockervoyager for
|
# zulip::voyager for an all-in-one system or zulip::dockervoyager for
|
||||||
# Docker. Use e.g. zulip::app_frontend for a Zulip frontend server.
|
# Docker. Use e.g. zulip::app_frontend for a Zulip frontend server.
|
||||||
|
@ -111,8 +151,8 @@ if [ -z "$EXTERNAL_HOST" ] || [ -z "$ZULIP_ADMINISTRATOR" ]; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$EXTERNAL_HOST" = zulip.example.com ] ||
|
if [ "$EXTERNAL_HOST" = zulip.example.com ] \
|
||||||
[ "$ZULIP_ADMINISTRATOR" = zulip-admin@example.com ]; then
|
|| [ "$ZULIP_ADMINISTRATOR" = zulip-admin@example.com ]; then
|
||||||
# These example values are specifically checked for and would fail
|
# These example values are specifically checked for and would fail
|
||||||
# later; see check_config in zerver/lib/management.py.
|
# later; see check_config in zerver/lib/management.py.
|
||||||
echo 'error: The example hostname and email must be replaced with real values.' >&2
|
echo 'error: The example hostname and email must be replaced with real values.' >&2
|
||||||
|
@ -134,8 +174,16 @@ export LANGUAGE="en_US.UTF-8"
|
||||||
|
|
||||||
# Check for a supported OS release.
|
# Check for a supported OS release.
|
||||||
if [ -f /etc/os-release ]; then
|
if [ -f /etc/os-release ]; then
|
||||||
os_info="$(. /etc/os-release; printf '%s\n' "$ID" "$ID_LIKE" "$VERSION_ID" "$VERSION_CODENAME")"
|
os_info="$(
|
||||||
{ read -r os_id; read -r os_id_like; read -r os_version_id; read -r os_version_codename || true; } <<< "$os_info"
|
. /etc/os-release
|
||||||
|
printf '%s\n' "$ID" "$ID_LIKE" "$VERSION_ID" "$VERSION_CODENAME"
|
||||||
|
)"
|
||||||
|
{
|
||||||
|
read -r os_id
|
||||||
|
read -r os_id_like
|
||||||
|
read -r os_version_id
|
||||||
|
read -r os_version_codename || true
|
||||||
|
} <<<"$os_info"
|
||||||
case " $os_id $os_id_like " in
|
case " $os_id $os_id_like " in
|
||||||
*' debian '*)
|
*' debian '*)
|
||||||
package_system="apt"
|
package_system="apt"
|
||||||
|
@ -147,7 +195,7 @@ if [ -f /etc/os-release ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
case "$os_id$os_version_id" in
|
case "$os_id$os_version_id" in
|
||||||
debian10|ubuntu18.04|ubuntu20.04) ;;
|
debian10 | ubuntu18.04 | ubuntu20.04) ;;
|
||||||
*)
|
*)
|
||||||
set +x
|
set +x
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
@ -163,10 +211,11 @@ For more information, see:
|
||||||
https://zulip.readthedocs.io/en/latest/production/requirements.html
|
https://zulip.readthedocs.io/en/latest/production/requirements.html
|
||||||
EOF
|
EOF
|
||||||
exit 1
|
exit 1
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
if [ "$os_id" = ubuntu ] && ! apt-cache policy |
|
if [ "$os_id" = ubuntu ] && ! apt-cache policy \
|
||||||
grep -q "^ release v=$os_version_id,o=Ubuntu,a=$os_version_codename,n=$os_version_codename,l=Ubuntu,c=universe"; then
|
| grep -q "^ release v=$os_version_id,o=Ubuntu,a=$os_version_codename,n=$os_version_codename,l=Ubuntu,c=universe"; then
|
||||||
set +x
|
set +x
|
||||||
cat <<'EOF'
|
cat <<'EOF'
|
||||||
|
|
||||||
|
@ -187,10 +236,10 @@ case ",$PUPPET_CLASSES," in
|
||||||
if [ "$package_system" = apt ]; then
|
if [ "$package_system" = apt ]; then
|
||||||
# We're going to install Postgres from the postgres apt
|
# We're going to install Postgres from the postgres apt
|
||||||
# repository; this may conflict with the existing postgres.
|
# repository; this may conflict with the existing postgres.
|
||||||
OTHER_PG="$(dpkg --get-selections |
|
OTHER_PG="$(dpkg --get-selections \
|
||||||
grep -E '^postgresql-[0-9]+\s+install$' |
|
| grep -E '^postgresql-[0-9]+\s+install$' \
|
||||||
grep -v "^postgresql-$POSTGRES_VERSION\b" |
|
| grep -v "^postgresql-$POSTGRES_VERSION\b" \
|
||||||
cut -f 1)" || true
|
| cut -f 1)" || true
|
||||||
if [ -n "$OTHER_PG" ]; then
|
if [ -n "$OTHER_PG" ]; then
|
||||||
INDENTED="${OTHER_PG//$'\n'/$'\n' }"
|
INDENTED="${OTHER_PG//$'\n'/$'\n' }"
|
||||||
SPACED="${OTHER_PG//$'\n'/ }"
|
SPACED="${OTHER_PG//$'\n'/ }"
|
||||||
|
@ -274,9 +323,9 @@ fi
|
||||||
|
|
||||||
if [ "$package_system" = apt ]; then
|
if [ "$package_system" = apt ]; then
|
||||||
if ! apt-get install -y \
|
if ! apt-get install -y \
|
||||||
puppet git curl wget jq \
|
puppet git curl wget jq \
|
||||||
python3 crudini \
|
python3 crudini \
|
||||||
"${ADDITIONAL_PACKAGES[@]}"; then
|
"${ADDITIONAL_PACKAGES[@]}"; then
|
||||||
set +x
|
set +x
|
||||||
echo -e '\033[0;31m' >&2
|
echo -e '\033[0;31m' >&2
|
||||||
echo "Installing packages failed; is network working and (on Ubuntu) the universe repository enabled?" >&2
|
echo "Installing packages failed; is network working and (on Ubuntu) the universe repository enabled?" >&2
|
||||||
|
@ -286,9 +335,9 @@ if [ "$package_system" = apt ]; then
|
||||||
fi
|
fi
|
||||||
elif [ "$package_system" = yum ]; then
|
elif [ "$package_system" = yum ]; then
|
||||||
if ! yum install -y \
|
if ! yum install -y \
|
||||||
puppet git curl wget jq \
|
puppet git curl wget jq \
|
||||||
python3 crudini \
|
python3 crudini \
|
||||||
"${ADDITIONAL_PACKAGES[@]}"; then
|
"${ADDITIONAL_PACKAGES[@]}"; then
|
||||||
set +x
|
set +x
|
||||||
echo -e '\033[0;31m' >&2
|
echo -e '\033[0;31m' >&2
|
||||||
echo "Installing packages failed; is network working?" >&2
|
echo "Installing packages failed; is network working?" >&2
|
||||||
|
@ -328,13 +377,13 @@ has_class() {
|
||||||
id -u zulip &>/dev/null || useradd -m zulip --home-dir /home/zulip
|
id -u zulip &>/dev/null || useradd -m zulip --home-dir /home/zulip
|
||||||
if [ -n "$NO_OVERWRITE_SETTINGS" ] && [ -e "/etc/zulip/zulip.conf" ]; then
|
if [ -n "$NO_OVERWRITE_SETTINGS" ] && [ -e "/etc/zulip/zulip.conf" ]; then
|
||||||
"$ZULIP_PATH"/scripts/zulip-puppet-apply --noop \
|
"$ZULIP_PATH"/scripts/zulip-puppet-apply --noop \
|
||||||
--write-catalog-summary \
|
--write-catalog-summary \
|
||||||
--classfile=/var/lib/puppet/classes.txt \
|
--classfile=/var/lib/puppet/classes.txt \
|
||||||
>/dev/null
|
>/dev/null
|
||||||
else
|
else
|
||||||
# Write out more than we need, and remove sections that are not
|
# Write out more than we need, and remove sections that are not
|
||||||
# applicable to the classes that are actually necessary.
|
# applicable to the classes that are actually necessary.
|
||||||
cat <<EOF > /etc/zulip/zulip.conf
|
cat <<EOF >/etc/zulip/zulip.conf
|
||||||
[machine]
|
[machine]
|
||||||
puppet_classes = $PUPPET_CLASSES
|
puppet_classes = $PUPPET_CLASSES
|
||||||
deploy_type = production
|
deploy_type = production
|
||||||
|
@ -352,9 +401,9 @@ EOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
"$ZULIP_PATH"/scripts/zulip-puppet-apply --noop \
|
"$ZULIP_PATH"/scripts/zulip-puppet-apply --noop \
|
||||||
--write-catalog-summary \
|
--write-catalog-summary \
|
||||||
--classfile=/var/lib/puppet/classes.txt \
|
--classfile=/var/lib/puppet/classes.txt \
|
||||||
>/dev/null
|
>/dev/null
|
||||||
|
|
||||||
# We only need the postgres version setting on database hosts; but
|
# We only need the postgres version setting on database hosts; but
|
||||||
# we don't know if this is a database host until we have the catalog summary.
|
# we don't know if this is a database host until we have the catalog summary.
|
||||||
|
|
|
@ -31,7 +31,10 @@ fi
|
||||||
if [ "$current_node_version" != "v$node_version" ] || ! [ -L "$node_wrapper_path" ]; then
|
if [ "$current_node_version" != "v$node_version" ] || ! [ -L "$node_wrapper_path" ]; then
|
||||||
export NVM_DIR=/usr/local/nvm
|
export NVM_DIR=/usr/local/nvm
|
||||||
# shellcheck source=/dev/null
|
# shellcheck source=/dev/null
|
||||||
if ! [ -e "$NVM_DIR/nvm.sh" ] || { . "$NVM_DIR/nvm.sh"; [ "$(nvm --version)" != "$nvm_version" ]; }; then
|
if ! [ -e "$NVM_DIR/nvm.sh" ] || {
|
||||||
|
. "$NVM_DIR/nvm.sh"
|
||||||
|
[ "$(nvm --version)" != "$nvm_version" ]
|
||||||
|
}; then
|
||||||
mkdir -p "$NVM_DIR"
|
mkdir -p "$NVM_DIR"
|
||||||
wget_opts=(-nv)
|
wget_opts=(-nv)
|
||||||
if [ -n "${CUSTOM_CA_CERTIFICATES:-}" ]; then
|
if [ -n "${CUSTOM_CA_CERTIFICATES:-}" ]; then
|
||||||
|
|
|
@ -36,7 +36,7 @@ apt-get -y install "${pre_setup_deps[@]}"
|
||||||
SCRIPTS_PATH="$(dirname "$(dirname "$0")")"
|
SCRIPTS_PATH="$(dirname "$(dirname "$0")")"
|
||||||
|
|
||||||
release=$(lsb_release -sc)
|
release=$(lsb_release -sc)
|
||||||
if [[ "$release" =~ ^(bionic|cosmic|disco|eoan|focal)$ ]] ; then
|
if [[ "$release" =~ ^(bionic|cosmic|disco|eoan|focal)$ ]]; then
|
||||||
apt-key add "$SCRIPTS_PATH"/setup/pgdg.asc
|
apt-key add "$SCRIPTS_PATH"/setup/pgdg.asc
|
||||||
apt-key add "$SCRIPTS_PATH"/setup/pgroonga-ppa.asc
|
apt-key add "$SCRIPTS_PATH"/setup/pgroonga-ppa.asc
|
||||||
cat >$SOURCES_FILE <<EOF
|
cat >$SOURCES_FILE <<EOF
|
||||||
|
@ -46,7 +46,7 @@ deb-src http://apt.postgresql.org/pub/repos/apt/ $release-pgdg main
|
||||||
deb http://ppa.launchpad.net/groonga/ppa/ubuntu $release main
|
deb http://ppa.launchpad.net/groonga/ppa/ubuntu $release main
|
||||||
deb-src http://ppa.launchpad.net/groonga/ppa/ubuntu $release main
|
deb-src http://ppa.launchpad.net/groonga/ppa/ubuntu $release main
|
||||||
EOF
|
EOF
|
||||||
elif [[ "$release" =~ ^(buster)$ ]] ; then
|
elif [[ "$release" =~ ^(buster)$ ]]; then
|
||||||
apt-key add "$SCRIPTS_PATH"/setup/pgdg.asc
|
apt-key add "$SCRIPTS_PATH"/setup/pgdg.asc
|
||||||
apt-key add "$SCRIPTS_PATH"/setup/pgroonga-debian.asc
|
apt-key add "$SCRIPTS_PATH"/setup/pgroonga-debian.asc
|
||||||
cat >$SOURCES_FILE <<EOF
|
cat >$SOURCES_FILE <<EOF
|
||||||
|
@ -71,4 +71,4 @@ else
|
||||||
apt-get update && rm -f "$STAMP_FILE"
|
apt-get update && rm -f "$STAMP_FILE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "$DEPENDENCIES_HASH" > "$DEPENDENCIES_HASH_FILE"
|
echo "$DEPENDENCIES_HASH" >"$DEPENDENCIES_HASH_FILE"
|
||||||
|
|
|
@ -54,4 +54,4 @@ else
|
||||||
apt-get update && rm -f "$STAMP_FILE"
|
apt-get update && rm -f "$STAMP_FILE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "$DEPENDENCIES_HASH" > "$DEPENDENCIES_HASH_FILE"
|
echo "$DEPENDENCIES_HASH" >"$DEPENDENCIES_HASH_FILE"
|
||||||
|
|
|
@ -7,8 +7,14 @@ args="$(getopt -o '' --long prod -- "$@")"
|
||||||
eval "set -- $args"
|
eval "set -- $args"
|
||||||
while true; do
|
while true; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
--prod) is_prod=true; shift;;
|
--prod)
|
||||||
--) shift; break;;
|
is_prod=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--)
|
||||||
|
shift
|
||||||
|
break
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
|
@ -10,11 +10,20 @@ args="$(getopt -o '' --long help,force,exists-ok -- "$@")"
|
||||||
eval "set -- $args"
|
eval "set -- $args"
|
||||||
while true; do
|
while true; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
--help) usage;;
|
--help) usage ;;
|
||||||
--force) FORCE=1; shift;;
|
--force)
|
||||||
--exists-ok) EXISTS_OK=1; shift;;
|
FORCE=1
|
||||||
--) shift; break;;
|
shift
|
||||||
*) usage;;
|
;;
|
||||||
|
--exists-ok)
|
||||||
|
EXISTS_OK=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--)
|
||||||
|
shift
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
*) usage ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
EXTERNAL_HOST="$1"
|
EXTERNAL_HOST="$1"
|
||||||
|
@ -51,9 +60,9 @@ fi
|
||||||
rm -f "$KEYFILE" "$CERTFILE"
|
rm -f "$KEYFILE" "$CERTFILE"
|
||||||
|
|
||||||
if [[ "$EXTERNAL_HOST" =~ ^(([0-9]+\.){3}[0-9]+)(:[0-9]+)?$ ]]; then
|
if [[ "$EXTERNAL_HOST" =~ ^(([0-9]+\.){3}[0-9]+)(:[0-9]+)?$ ]]; then
|
||||||
subjectAltName="IP:${BASH_REMATCH[1]}" # IPv4 address
|
subjectAltName="IP:${BASH_REMATCH[1]}" # IPv4 address
|
||||||
elif [[ "$EXTERNAL_HOST" =~ ^\[([^][]*)\](:[0-9]+)?$ ]]; then
|
elif [[ "$EXTERNAL_HOST" =~ ^\[([^][]*)\](:[0-9]+)?$ ]]; then
|
||||||
subjectAltName="IP:${BASH_REMATCH[1]}" # IPv6 address
|
subjectAltName="IP:${BASH_REMATCH[1]}" # IPv6 address
|
||||||
elif [[ "$EXTERNAL_HOST" =~ ^([^:]+)(:[0-9]+)?$ ]]; then
|
elif [[ "$EXTERNAL_HOST" =~ ^([^:]+)(:[0-9]+)?$ ]]; then
|
||||||
subjectAltName="DNS:${BASH_REMATCH[1]}"
|
subjectAltName="DNS:${BASH_REMATCH[1]}"
|
||||||
else
|
else
|
||||||
|
@ -94,8 +103,8 @@ fi
|
||||||
|
|
||||||
# Based on /usr/sbin/make-ssl-cert from Debian's `ssl-cert` package.
|
# Based on /usr/sbin/make-ssl-cert from Debian's `ssl-cert` package.
|
||||||
openssl req -new -x509 \
|
openssl req -new -x509 \
|
||||||
-config "$config" -days 3650 -nodes -sha256 \
|
-config "$config" -days 3650 -nodes -sha256 \
|
||||||
-out "$CERTFILE" -keyout "$KEYFILE"
|
-out "$CERTFILE" -keyout "$KEYFILE"
|
||||||
|
|
||||||
chmod 644 "$CERTFILE"
|
chmod 644 "$CERTFILE"
|
||||||
chmod 640 "$KEYFILE"
|
chmod 640 "$KEYFILE"
|
||||||
|
|
|
@ -10,10 +10,16 @@ args="$(getopt -o '' --long help,quiet -- "$@")"
|
||||||
eval "set -- $args"
|
eval "set -- $args"
|
||||||
while true; do
|
while true; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
--help) usage;;
|
--help) usage ;;
|
||||||
--quiet) QUIET=1; shift;;
|
--quiet)
|
||||||
--) shift; break;;
|
QUIET=1
|
||||||
*) usage;;
|
shift
|
||||||
|
;;
|
||||||
|
--)
|
||||||
|
shift
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
*) usage ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
|
@ -13,15 +13,14 @@ POSTGRES_USER="${POSTGRES_USER:-postgres}"
|
||||||
# This psql command may fail because the zulip database doesn’t exist,
|
# This psql command may fail because the zulip database doesn’t exist,
|
||||||
# hence the &&.
|
# hence the &&.
|
||||||
if records="$(
|
if records="$(
|
||||||
cd / # Make sure the current working directory is readable by postgres
|
cd / # Make sure the current working directory is readable by postgres
|
||||||
su "$POSTGRES_USER" -c "psql -v ON_ERROR_STOP=1 -Atc 'SELECT COUNT(*) FROM zulip.zerver_message;' zulip"
|
su "$POSTGRES_USER" -c "psql -v ON_ERROR_STOP=1 -Atc 'SELECT COUNT(*) FROM zulip.zerver_message;' zulip"
|
||||||
)" && [ "$records" -gt 200 ]; then
|
)" && [ "$records" -gt 200 ]; then
|
||||||
set +x
|
set +x
|
||||||
echo "WARNING: This will delete your Zulip database which currently contains $records messages."
|
echo "WARNING: This will delete your Zulip database which currently contains $records messages."
|
||||||
read -p "Do you want to proceed? [y/N] " -r
|
read -p "Do you want to proceed? [y/N] " -r
|
||||||
echo
|
echo
|
||||||
if [[ ! $REPLY =~ ^[Yy]$ ]]
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||||
then
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
set -x
|
set -x
|
||||||
|
@ -35,12 +34,12 @@ fi
|
||||||
# Drop any open connections to any old database.
|
# Drop any open connections to any old database.
|
||||||
# Send the script via stdin in case the postgres user lacks permission to read it.
|
# Send the script via stdin in case the postgres user lacks permission to read it.
|
||||||
su -s /usr/bin/env - -- "$POSTGRES_USER" \
|
su -s /usr/bin/env - -- "$POSTGRES_USER" \
|
||||||
bash -s - zulip zulip_base < "$(dirname "$0")/terminate-psql-sessions"
|
bash -s - zulip zulip_base <"$(dirname "$0")/terminate-psql-sessions"
|
||||||
|
|
||||||
(
|
(
|
||||||
cd / # Make sure the current working directory is readable by postgres
|
cd / # Make sure the current working directory is readable by postgres
|
||||||
su "$POSTGRES_USER" -c 'psql -v ON_ERROR_STOP=1 -e'
|
su "$POSTGRES_USER" -c 'psql -v ON_ERROR_STOP=1 -e'
|
||||||
) < "$(dirname "$0")/create-db.sql"
|
) <"$(dirname "$0")/create-db.sql"
|
||||||
|
|
||||||
# Clear memcached to avoid contamination from previous database state
|
# Clear memcached to avoid contamination from previous database state
|
||||||
"$(dirname "$0")/flush-memcached"
|
"$(dirname "$0")/flush-memcached"
|
||||||
|
|
|
@ -83,8 +83,14 @@ esac
|
||||||
|
|
||||||
# Check for a supported OS release.
|
# Check for a supported OS release.
|
||||||
if [ -f /etc/os-release ]; then
|
if [ -f /etc/os-release ]; then
|
||||||
os_info="$(. /etc/os-release; printf '%s\n' "$ID" "$ID_LIKE")"
|
os_info="$(
|
||||||
{ read -r os_id; read -r os_id_like|| true; } <<< "$os_info"
|
. /etc/os-release
|
||||||
|
printf '%s\n' "$ID" "$ID_LIKE"
|
||||||
|
)"
|
||||||
|
{
|
||||||
|
read -r os_id
|
||||||
|
read -r os_id_like || true
|
||||||
|
} <<<"$os_info"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
@ -104,10 +110,10 @@ esac
|
||||||
# Passing --force-interactive suppresses a warning, but also brings up
|
# Passing --force-interactive suppresses a warning, but also brings up
|
||||||
# an annoying prompt we stifle with --no-eff-email.
|
# an annoying prompt we stifle with --no-eff-email.
|
||||||
certbot certonly "${method_args[@]}" \
|
certbot certonly "${method_args[@]}" \
|
||||||
"${HOSTNAMES[@]}" -m "$EMAIL" \
|
"${HOSTNAMES[@]}" -m "$EMAIL" \
|
||||||
$agree_tos \
|
$agree_tos \
|
||||||
"${deploy_hook[@]}" \
|
"${deploy_hook[@]}" \
|
||||||
--force-interactive --no-eff-email
|
--force-interactive --no-eff-email
|
||||||
|
|
||||||
symlink_with_backup() {
|
symlink_with_backup() {
|
||||||
if [ -e "$2" ]; then
|
if [ -e "$2" ]; then
|
||||||
|
|
|
@ -28,7 +28,7 @@ LOCALFILE="archive.tar.gz"
|
||||||
wget -qO "$LOCALFILE" "$URL"
|
wget -qO "$LOCALFILE" "$URL"
|
||||||
|
|
||||||
# Check the hash against what was passed in
|
# Check the hash against what was passed in
|
||||||
echo "$SHA256 $LOCALFILE" > "$LOCALFILE.sha256"
|
echo "$SHA256 $LOCALFILE" >"$LOCALFILE.sha256"
|
||||||
sha256sum -c "$LOCALFILE.sha256"
|
sha256sum -c "$LOCALFILE.sha256"
|
||||||
|
|
||||||
tar xzf "$LOCALFILE"
|
tar xzf "$LOCALFILE"
|
||||||
|
|
|
@ -11,8 +11,11 @@ eval "set -- $args"
|
||||||
|
|
||||||
while true; do
|
while true; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
--) shift; break;;
|
--)
|
||||||
*) usage;;
|
shift
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
*) usage ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -47,8 +50,8 @@ git archive -o "$TARBALL" "--prefix=$prefix/" HEAD
|
||||||
cd "$TMPDIR"
|
cd "$TMPDIR"
|
||||||
tar -xf "$TARBALL"
|
tar -xf "$TARBALL"
|
||||||
while read -r i; do
|
while read -r i; do
|
||||||
rm -r --interactive=never "${TMPDIR:?}/$prefix/$i";
|
rm -r --interactive=never "${TMPDIR:?}/$prefix/$i"
|
||||||
done < "$TMPDIR/$prefix/tools/release-tarball-exclude.txt"
|
done <"$TMPDIR/$prefix/tools/release-tarball-exclude.txt"
|
||||||
tar -cf "$TARBALL" "$prefix"
|
tar -cf "$TARBALL" "$prefix"
|
||||||
rm -rf "$prefix"
|
rm -rf "$prefix"
|
||||||
|
|
||||||
|
@ -78,10 +81,10 @@ mkdir -p "var/log"
|
||||||
# TODO: Would be much better to instead run the below tools with some
|
# TODO: Would be much better to instead run the below tools with some
|
||||||
# sort of environment hack so that we don't need to create this dummy
|
# sort of environment hack so that we don't need to create this dummy
|
||||||
# secrets file.
|
# secrets file.
|
||||||
cat >> zproject/prod_settings_template.py <<EOF
|
cat >>zproject/prod_settings_template.py <<EOF
|
||||||
DEBUG = False
|
DEBUG = False
|
||||||
EOF
|
EOF
|
||||||
cat >> zproject/dev-secrets.conf <<EOF
|
cat >>zproject/dev-secrets.conf <<EOF
|
||||||
[secrets]
|
[secrets]
|
||||||
local_database_password = ''
|
local_database_password = ''
|
||||||
secret_key = 'not_used_here'
|
secret_key = 'not_used_here'
|
||||||
|
@ -96,8 +99,8 @@ EOF
|
||||||
# We don't need duplicate copies of emoji with hashed paths, and they would break Markdown
|
# We don't need duplicate copies of emoji with hashed paths, and they would break Markdown
|
||||||
find prod-static/serve/generated/emoji/images/emoji/ -regex '.*\.[0-9a-f]+\.png' -delete
|
find prod-static/serve/generated/emoji/images/emoji/ -regex '.*\.[0-9a-f]+\.png' -delete
|
||||||
|
|
||||||
echo "$GITID" > build_id
|
echo "$GITID" >build_id
|
||||||
echo "$version" > version
|
echo "$version" >version
|
||||||
|
|
||||||
cd "$TMPDIR"
|
cd "$TMPDIR"
|
||||||
|
|
||||||
|
|
|
@ -2,4 +2,4 @@
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd "$(dirname "$0")/.."
|
cd "$(dirname "$0")/.."
|
||||||
git describe --tags --match='[0-9]*' > zulip-git-version || true
|
git describe --tags --match='[0-9]*' >zulip-git-version || true
|
||||||
|
|
|
@ -6,7 +6,7 @@ echo "Test suite is running under $(python --version)."
|
||||||
set -e
|
set -e
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
./tools/lint --groups=backend --skip=gitlint,mypy # gitlint disabled because flaky
|
./tools/lint --groups=backend --skip=gitlint,mypy # gitlint disabled because flaky
|
||||||
./tools/test-tools
|
./tools/test-tools
|
||||||
# We need to pass a parallel level to test-backend because CircleCI's
|
# We need to pass a parallel level to test-backend because CircleCI's
|
||||||
# docker setup means the auto-detection logic sees the ~36 processes
|
# docker setup means the auto-detection logic sees the ~36 processes
|
||||||
|
|
|
@ -5,7 +5,7 @@ source tools/ci/activate-venv
|
||||||
set -e
|
set -e
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
./tools/lint --groups=frontend --skip=gitlint # gitlint disabled because flaky
|
./tools/lint --groups=frontend --skip=gitlint # gitlint disabled because flaky
|
||||||
|
|
||||||
# Run the node tests first, since they're fast and deterministic
|
# Run the node tests first, since they're fast and deterministic
|
||||||
./tools/test-js-with-node --coverage
|
./tools/test-js-with-node --coverage
|
||||||
|
|
|
@ -33,11 +33,11 @@ fi
|
||||||
mkdir /tmp/production-build
|
mkdir /tmp/production-build
|
||||||
mv /tmp/tmp.*/zulip-server-test.tar.gz /tmp/production-build
|
mv /tmp/tmp.*/zulip-server-test.tar.gz /tmp/production-build
|
||||||
cp -a \
|
cp -a \
|
||||||
tools/ci/success-http-headers.template.txt \
|
tools/ci/success-http-headers.template.txt \
|
||||||
tools/ci/production-install \
|
tools/ci/production-install \
|
||||||
tools/ci/production-verify \
|
tools/ci/production-verify \
|
||||||
tools/ci/production-upgrade-pg \
|
tools/ci/production-upgrade-pg \
|
||||||
tools/ci/production-extract-tarball \
|
tools/ci/production-extract-tarball \
|
||||||
package.json yarn.lock \
|
package.json yarn.lock \
|
||||||
\
|
\
|
||||||
/tmp/production-build
|
/tmp/production-build
|
||||||
|
|
|
@ -14,8 +14,11 @@ APT_OPTIONS=(-o 'Dpkg::Options::=--force-confdef' -o 'Dpkg::Options::=--force-co
|
||||||
apt-get update
|
apt-get update
|
||||||
|
|
||||||
if [ -f /etc/os-release ]; then
|
if [ -f /etc/os-release ]; then
|
||||||
os_info="$(. /etc/os-release; printf '%s\n' "$VERSION_CODENAME")"
|
os_info="$(
|
||||||
{ read -r os_version_codename || true; } <<< "$os_info"
|
. /etc/os-release
|
||||||
|
printf '%s\n' "$VERSION_CODENAME"
|
||||||
|
)"
|
||||||
|
{ read -r os_version_codename || true; } <<<"$os_info"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! apt-get dist-upgrade -y "${APT_OPTIONS[@]}"; then
|
if ! apt-get dist-upgrade -y "${APT_OPTIONS[@]}"; then
|
||||||
|
|
|
@ -12,7 +12,9 @@ NOREPLY_EMAIL_ADDRESS = 'noreply@circleci.example.com'
|
||||||
ALLOWED_HOSTS = []
|
ALLOWED_HOSTS = []
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
echo; echo "Now testing that the supervisord jobs are running properly"; echo
|
echo
|
||||||
|
echo "Now testing that the supervisord jobs are running properly"
|
||||||
|
echo
|
||||||
sleep 15 # Guaranteed to have a working supervisord process get an extra digit
|
sleep 15 # Guaranteed to have a working supervisord process get an extra digit
|
||||||
if supervisorctl status | grep -vq RUNNING || supervisorctl status | sed 's/^.*uptime //' | grep -q 0:00:0; then
|
if supervisorctl status | grep -vq RUNNING || supervisorctl status | sed 's/^.*uptime //' | grep -q 0:00:0; then
|
||||||
set +x
|
set +x
|
||||||
|
@ -33,16 +35,18 @@ if supervisorctl status | grep -vq RUNNING || supervisorctl status | sed 's/^.*u
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# TODO: Ideally this would test actually logging in, but this is a start.
|
# TODO: Ideally this would test actually logging in, but this is a start.
|
||||||
echo; echo "Now testing that the newly installed server's homepage loads"; echo
|
echo
|
||||||
|
echo "Now testing that the newly installed server's homepage loads"
|
||||||
|
echo
|
||||||
|
|
||||||
wget https://localhost -O /tmp/index.html --no-check-certificate -S 2> /tmp/wget-output || true # || true so we see errors.log if this 500s
|
wget https://localhost -O /tmp/index.html --no-check-certificate -S 2>/tmp/wget-output || true # || true so we see errors.log if this 500s
|
||||||
grep -vi '\(Vary\|Content-Language\|expires\|issued by\|modified\|saved\|[.][.][.]\|Date\|[-][-]\)' /tmp/wget-output > /tmp/http-headers-processed
|
grep -vi '\(Vary\|Content-Language\|expires\|issued by\|modified\|saved\|[.][.][.]\|Date\|[-][-]\)' /tmp/wget-output >/tmp/http-headers-processed
|
||||||
|
|
||||||
nginx_version="$(nginx -v 2>&1 | awk '{print $3, $4}')"
|
nginx_version="$(nginx -v 2>&1 | awk '{print $3, $4}')"
|
||||||
|
|
||||||
# Simplify the diff by getting replacing 4-5 digit length numbers with <Length>.
|
# Simplify the diff by getting replacing 4-5 digit length numbers with <Length>.
|
||||||
sed -i 's|Length: [0-9]\+\( [(][0-9]\+[.][0-9]K[)]\)\?|Length: <Length>|' /tmp/http-headers-processed
|
sed -i 's|Length: [0-9]\+\( [(][0-9]\+[.][0-9]K[)]\)\?|Length: <Length>|' /tmp/http-headers-processed
|
||||||
sed -i -e 's|Length: [0-9]\+\( [(][0-9]\+[.][0-9]K[)]\)\?|Length: <Length>|' -e "s|{nginx_version_string}|$nginx_version|g" /tmp/success-http-headers.template.txt
|
sed -i -e 's|Length: [0-9]\+\( [(][0-9]\+[.][0-9]K[)]\)\?|Length: <Length>|' -e "s|{nginx_version_string}|$nginx_version|g" /tmp/success-http-headers.template.txt
|
||||||
if ! diff -ur /tmp/http-headers-processed /tmp/success-http-headers.template.txt; then
|
if ! diff -ur /tmp/http-headers-processed /tmp/success-http-headers.template.txt; then
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
|
@ -58,12 +62,14 @@ if ! diff -ur /tmp/http-headers-processed /tmp/success-http-headers.template.txt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Start the RabbitMQ queue worker related section
|
# Start the RabbitMQ queue worker related section
|
||||||
echo; echo "Now confirming all the RabbitMQ queue processors are correctly registered!"; echo
|
echo
|
||||||
|
echo "Now confirming all the RabbitMQ queue processors are correctly registered!"
|
||||||
|
echo
|
||||||
# These hacky shell scripts just extract the sorted list of queue processors, running and expected
|
# These hacky shell scripts just extract the sorted list of queue processors, running and expected
|
||||||
supervisorctl status | cut -f1 -dR | cut -f2- -d: | grep events | cut -f1 -d" " | cut -f3- -d_ | cut -f1 -d- | sort -u > /tmp/running_queue_processors.txt
|
supervisorctl status | cut -f1 -dR | cut -f2- -d: | grep events | cut -f1 -d" " | cut -f3- -d_ | cut -f1 -d- | sort -u >/tmp/running_queue_processors.txt
|
||||||
su zulip -c /home/zulip/deployments/current/scripts/lib/queue_workers.py | sort -u > /tmp/all_queue_processors.txt
|
su zulip -c /home/zulip/deployments/current/scripts/lib/queue_workers.py | sort -u >/tmp/all_queue_processors.txt
|
||||||
su zulip -c "/home/zulip/deployments/current/scripts/lib/queue_workers.py --queue-type test" | sort -u > /tmp/expected_test_queues.txt
|
su zulip -c "/home/zulip/deployments/current/scripts/lib/queue_workers.py --queue-type test" | sort -u >/tmp/expected_test_queues.txt
|
||||||
grep -v -x -f /tmp/expected_test_queues.txt /tmp/all_queue_processors.txt > /tmp/expected_queue_processors.txt
|
grep -v -x -f /tmp/expected_test_queues.txt /tmp/all_queue_processors.txt >/tmp/expected_queue_processors.txt
|
||||||
if ! diff /tmp/expected_queue_processors.txt /tmp/running_queue_processors.txt >/dev/null; then
|
if ! diff /tmp/expected_queue_processors.txt /tmp/running_queue_processors.txt >/dev/null; then
|
||||||
set +x
|
set +x
|
||||||
echo "FAILURE: Runnable queue processors declared in zerver/worker/queue_processors.py "
|
echo "FAILURE: Runnable queue processors declared in zerver/worker/queue_processors.py "
|
||||||
|
@ -74,7 +80,9 @@ if ! diff /tmp/expected_queue_processors.txt /tmp/running_queue_processors.txt >
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo; echo "Now running RabbitMQ consumer Nagios tests"; echo
|
echo
|
||||||
|
echo "Now running RabbitMQ consumer Nagios tests"
|
||||||
|
echo
|
||||||
# First run the check that usually runs in cron and populates the state files
|
# First run the check that usually runs in cron and populates the state files
|
||||||
/home/zulip/deployments/current/scripts/nagios/check-rabbitmq-consumers
|
/home/zulip/deployments/current/scripts/nagios/check-rabbitmq-consumers
|
||||||
|
|
||||||
|
@ -95,10 +103,12 @@ done
|
||||||
|
|
||||||
# Some of the Nagios tests have been temporarily disabled to work
|
# Some of the Nagios tests have been temporarily disabled to work
|
||||||
# around a Travis CI infrastructure issue.
|
# around a Travis CI infrastructure issue.
|
||||||
echo; echo "Now running additional Nagios tests"; echo
|
echo
|
||||||
if ! /usr/lib/nagios/plugins/zulip_app_frontend/check_queue_worker_errors || \
|
echo "Now running additional Nagios tests"
|
||||||
! su zulip -c /usr/lib/nagios/plugins/zulip_postgres_appdb/check_fts_update_log; then # || \
|
echo
|
||||||
# ! su zulip -c "/usr/lib/nagios/plugins/zulip_app_frontend/check_send_receive_time --site=https://127.0.0.1/api --nagios --insecure"; then
|
if ! /usr/lib/nagios/plugins/zulip_app_frontend/check_queue_worker_errors \
|
||||||
|
|| ! su zulip -c /usr/lib/nagios/plugins/zulip_postgres_appdb/check_fts_update_log; then # || \
|
||||||
|
# ! su zulip -c "/usr/lib/nagios/plugins/zulip_app_frontend/check_send_receive_time --site=https://127.0.0.1/api --nagios --insecure"; then
|
||||||
set +x
|
set +x
|
||||||
echo
|
echo
|
||||||
echo "FAILURE: Nagios checks don't pass:"
|
echo "FAILURE: Nagios checks don't pass:"
|
||||||
|
|
|
@ -17,11 +17,11 @@ if [ $# -ne 0 ] && [ "$1" == "--reviews" ]; then
|
||||||
fi
|
fi
|
||||||
push_args=()
|
push_args=()
|
||||||
|
|
||||||
function is_merged {
|
function is_merged() {
|
||||||
! git rev-list -n 1 origin/master.."$1" | grep -q .
|
! git rev-list -n 1 origin/master.."$1" | grep -q .
|
||||||
}
|
}
|
||||||
|
|
||||||
function clean_ref {
|
function clean_ref() {
|
||||||
ref="$1"
|
ref="$1"
|
||||||
case "$ref" in
|
case "$ref" in
|
||||||
*/master | */HEAD)
|
*/master | */HEAD)
|
||||||
|
|
|
@ -8,12 +8,12 @@
|
||||||
# Do not invoke gitlint if commit message is empty
|
# Do not invoke gitlint if commit message is empty
|
||||||
if grep -q '^[^#]' "$1"; then
|
if grep -q '^[^#]' "$1"; then
|
||||||
lint_cmd="cd ~/zulip && python -m gitlint.cli"
|
lint_cmd="cd ~/zulip && python -m gitlint.cli"
|
||||||
if \
|
if
|
||||||
if [ -z "$VIRTUAL_ENV" ] && command -v vagrant > /dev/null && [ -e .vagrant ]; then
|
if [ -z "$VIRTUAL_ENV" ] && command -v vagrant >/dev/null && [ -e .vagrant ]; then
|
||||||
! vagrant ssh -c "$lint_cmd"
|
! vagrant ssh -c "$lint_cmd"
|
||||||
else
|
else
|
||||||
! eval "$lint_cmd"
|
! eval "$lint_cmd"
|
||||||
fi < "$1"
|
fi <"$1"
|
||||||
then
|
then
|
||||||
echo "WARNING: Your commit message does not match Zulip's style guide."
|
echo "WARNING: Your commit message does not match Zulip's style guide."
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
function error_out {
|
function error_out() {
|
||||||
echo -en '\e[0;31m'
|
echo -en '\e[0;31m'
|
||||||
echo "$1"
|
echo "$1"
|
||||||
echo -en '\e[0m'
|
echo -en '\e[0m'
|
||||||
|
|
|
@ -18,7 +18,7 @@ if [ ${#changed_files} -eq 0 ]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$VIRTUAL_ENV" ] && command -v vagrant > /dev/null && [ -e .vagrant ]; then
|
if [ -z "$VIRTUAL_ENV" ] && command -v vagrant >/dev/null && [ -e .vagrant ]; then
|
||||||
vcmd="/srv/zulip/tools/lint --skip=gitlint --force $(printf '%q ' "${changed_files[@]}") || true"
|
vcmd="/srv/zulip/tools/lint --skip=gitlint --force $(printf '%q ' "${changed_files[@]}") || true"
|
||||||
echo "Running lint using vagrant..."
|
echo "Running lint using vagrant..."
|
||||||
vagrant ssh -c "$vcmd"
|
vagrant ssh -c "$vcmd"
|
||||||
|
|
|
@ -36,12 +36,15 @@ WARNING='\033[93m'
|
||||||
ENDC='\033[0m'
|
ENDC='\033[0m'
|
||||||
|
|
||||||
# Make the script independent of the location from where it is executed
|
# Make the script independent of the location from where it is executed
|
||||||
PARENT_PATH=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )
|
PARENT_PATH=$(
|
||||||
|
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||||
|
pwd -P
|
||||||
|
)
|
||||||
cd "$PARENT_PATH"
|
cd "$PARENT_PATH"
|
||||||
mkdir -p ../var/log
|
mkdir -p ../var/log
|
||||||
LOG_PATH="../var/log/provision.log"
|
LOG_PATH="../var/log/provision.log"
|
||||||
|
|
||||||
echo "PROVISIONING STARTING." >> $LOG_PATH
|
echo "PROVISIONING STARTING." >>$LOG_PATH
|
||||||
|
|
||||||
# PYTHONUNBUFFERED is important to ensure that tracebacks don't get
|
# PYTHONUNBUFFERED is important to ensure that tracebacks don't get
|
||||||
# lost far above where they should be in the output.
|
# lost far above where they should be in the output.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
usage () {
|
usage() {
|
||||||
cat >&2 <<EOF
|
cat >&2 <<EOF
|
||||||
usage: $0 PULL_REQUEST_ID [REMOTE]
|
usage: $0 PULL_REQUEST_ID [REMOTE]
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ fi
|
||||||
pr_url=https://api.github.com/repos/"${repo_fq}"/pulls/"${pr_id}"
|
pr_url=https://api.github.com/repos/"${repo_fq}"/pulls/"${pr_id}"
|
||||||
pr_details="$(curl -s "$pr_url")"
|
pr_details="$(curl -s "$pr_url")"
|
||||||
|
|
||||||
pr_jq () {
|
pr_jq() {
|
||||||
echo "$pr_details" | jq "$@"
|
echo "$pr_details" | jq "$@"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,17 +4,15 @@ set -x
|
||||||
|
|
||||||
export DJANGO_SETTINGS_MODULE=zproject.test_settings
|
export DJANGO_SETTINGS_MODULE=zproject.test_settings
|
||||||
|
|
||||||
create_zulip_test()
|
create_zulip_test() {
|
||||||
{
|
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test <<EOF
|
||||||
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test <<EOF
|
|
||||||
DROP DATABASE IF EXISTS zulip_test;
|
DROP DATABASE IF EXISTS zulip_test;
|
||||||
CREATE DATABASE zulip_test TEMPLATE zulip_test_base;
|
CREATE DATABASE zulip_test TEMPLATE zulip_test_base;
|
||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
create_zulip_test_template()
|
create_zulip_test_template() {
|
||||||
{
|
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test <<EOF
|
||||||
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test << EOF
|
|
||||||
DROP DATABASE IF EXISTS zulip_test_template;
|
DROP DATABASE IF EXISTS zulip_test_template;
|
||||||
CREATE DATABASE zulip_test_template TEMPLATE zulip_test;
|
CREATE DATABASE zulip_test_template TEMPLATE zulip_test;
|
||||||
EOF
|
EOF
|
||||||
|
@ -41,7 +39,7 @@ create_zulip_test
|
||||||
zerver.UserProfile zerver.Stream zerver.Recipient \
|
zerver.UserProfile zerver.Stream zerver.Recipient \
|
||||||
zerver.Subscription zerver.Message zerver.Huddle zerver.Realm \
|
zerver.Subscription zerver.Message zerver.Huddle zerver.Realm \
|
||||||
zerver.UserMessage zerver.Client \
|
zerver.UserMessage zerver.Client \
|
||||||
zerver.DefaultStream > zerver/tests/fixtures/messages.json
|
zerver.DefaultStream >zerver/tests/fixtures/messages.json
|
||||||
|
|
||||||
# create pristine template database, for fast fixture restoration after tests are run.
|
# create pristine template database, for fast fixture restoration after tests are run.
|
||||||
create_zulip_test_template
|
create_zulip_test_template
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
usage () {
|
usage() {
|
||||||
cat >&2 <<EOF
|
cat >&2 <<EOF
|
||||||
usage: $0 PULL_REQUEST_ID [REMOTE]
|
usage: $0 PULL_REQUEST_ID [REMOTE]
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,6 @@ if ! [ -d ".git/hooks/" ]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for hook in pre-commit commit-msg
|
for hook in pre-commit commit-msg; do
|
||||||
do
|
|
||||||
ln -snf ../../tools/"$hook" .git/hooks/
|
ln -snf ../../tools/"$hook" .git/hooks/
|
||||||
done
|
done
|
||||||
|
|
|
@ -17,7 +17,7 @@ set -e
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
# Set the hostname early
|
# Set the hostname early
|
||||||
echo "$HOSTNAME" > /etc/hostname
|
echo "$HOSTNAME" >/etc/hostname
|
||||||
hostname "$HOSTNAME"
|
hostname "$HOSTNAME"
|
||||||
sed -i "s/localhost$/localhost $HOSTNAME $SERVER/" /etc/hosts
|
sed -i "s/localhost$/localhost $HOSTNAME $SERVER/" /etc/hosts
|
||||||
|
|
||||||
|
@ -57,15 +57,15 @@ EOF
|
||||||
# function so we do can it again later with the zulip user
|
# function so we do can it again later with the zulip user
|
||||||
function install_keys() {
|
function install_keys() {
|
||||||
USERNAME="$1"
|
USERNAME="$1"
|
||||||
SSHDIR="$( getent passwd "$USERNAME" | cut -d: -f6 )/.ssh"
|
SSHDIR="$(getent passwd "$USERNAME" | cut -d: -f6)/.ssh"
|
||||||
KEYDATA="$($AWS --output text \
|
KEYDATA="$($AWS --output text \
|
||||||
secretsmanager get-secret-value \
|
secretsmanager get-secret-value \
|
||||||
--secret-id "$SSH_SECRET_ID" \
|
--secret-id "$SSH_SECRET_ID" \
|
||||||
--query SecretString)"
|
--query SecretString)"
|
||||||
mkdir -p "$SSHDIR"
|
mkdir -p "$SSHDIR"
|
||||||
echo "$KEYDATA" | jq -r .public | base64 -d > "$SSHDIR/id_rsa.pub"
|
echo "$KEYDATA" | jq -r .public | base64 -d >"$SSHDIR/id_rsa.pub"
|
||||||
echo "$KEYDATA" | jq -r .private | base64 -d > "$SSHDIR/id_rsa"
|
echo "$KEYDATA" | jq -r .private | base64 -d >"$SSHDIR/id_rsa"
|
||||||
chown -R "$USERNAME:$USERNAME" "$SSHDIR"
|
chown -R "$USERNAME:$USERNAME" "$SSHDIR"
|
||||||
chmod 600 "$SSHDIR/id_rsa"
|
chmod 600 "$SSHDIR/id_rsa"
|
||||||
}
|
}
|
||||||
install_keys root
|
install_keys root
|
||||||
|
|
|
@ -8,7 +8,7 @@ if [ ! -d "/srv/zulip-aws-tools/v2/$AWS_CLI_VERSION" ]; then
|
||||||
cd /srv/zulip-aws-tools || exit 1
|
cd /srv/zulip-aws-tools || exit 1
|
||||||
rm -rf awscli.zip awscli.zip.sha256 aws/
|
rm -rf awscli.zip awscli.zip.sha256 aws/
|
||||||
wget -q "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-$AWS_CLI_VERSION.zip" -O awscli.zip
|
wget -q "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-$AWS_CLI_VERSION.zip" -O awscli.zip
|
||||||
echo "$AWS_CLI_SHA awscli.zip" > awscli.zip.sha256
|
echo "$AWS_CLI_SHA awscli.zip" >awscli.zip.sha256
|
||||||
sha256sum -c awscli.zip.sha256
|
sha256sum -c awscli.zip.sha256
|
||||||
unzip -q awscli.zip
|
unzip -q awscli.zip
|
||||||
(
|
(
|
||||||
|
|
|
@ -1,11 +1,10 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
run()
|
run() {
|
||||||
{
|
|
||||||
PGHOST=localhost PGUSER=zulip \
|
PGHOST=localhost PGUSER=zulip \
|
||||||
"$(dirname "$0")/../../scripts/setup/terminate-psql-sessions" zulip_test zulip_test_base zulip_test_template
|
"$(dirname "$0")/../../scripts/setup/terminate-psql-sessions" zulip_test zulip_test_base zulip_test_template
|
||||||
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test << EOF
|
psql -v ON_ERROR_STOP=1 -h localhost postgres zulip_test <<EOF
|
||||||
DROP DATABASE IF EXISTS zulip_test;
|
DROP DATABASE IF EXISTS zulip_test;
|
||||||
CREATE DATABASE zulip_test TEMPLATE zulip_test_template;
|
CREATE DATABASE zulip_test TEMPLATE zulip_test_template;
|
||||||
EOF
|
EOF
|
||||||
|
|
|
@ -8,7 +8,7 @@ email=desdemona@zulip.com
|
||||||
mkdir -p var/puppeteer
|
mkdir -p var/puppeteer
|
||||||
|
|
||||||
password=$(./manage.py print_initial_password "$email" | grep -F "$email" | awk '{ print $2 }')
|
password=$(./manage.py print_initial_password "$email" | grep -F "$email" | awk '{ print $2 }')
|
||||||
cat > var/puppeteer/test_credentials.js <<EOF
|
cat >var/puppeteer/test_credentials.js <<EOF
|
||||||
// Generated by tools/setup/generate-test-credentials
|
// Generated by tools/setup/generate-test-credentials
|
||||||
var test_credentials = {default_user: {username: '$email', password: '$password'}};
|
var test_credentials = {default_user: {username: '$email', password: '$password'}};
|
||||||
try { exports.test_credentials = test_credentials; } catch (e) {}
|
try { exports.test_credentials = test_credentials; } catch (e) {}
|
||||||
|
|
|
@ -43,19 +43,19 @@ if [ -z "$BRANCH" ]; then
|
||||||
BRANCH=$(crudini --get "$zulip_install_config_file" repo default_branch)
|
BRANCH=$(crudini --get "$zulip_install_config_file" repo default_branch)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
AWS_ZONE_ID=$( crudini --get "$zulip_install_config_file" aws zone_id)
|
AWS_ZONE_ID=$(crudini --get "$zulip_install_config_file" aws zone_id)
|
||||||
SECURITY_GROUPS=$(crudini --get "$zulip_install_config_file" aws security_groups)
|
SECURITY_GROUPS=$(crudini --get "$zulip_install_config_file" aws security_groups)
|
||||||
AMI_ID=$( crudini --get "$zulip_install_config_file" aws image_id)
|
AMI_ID=$(crudini --get "$zulip_install_config_file" aws image_id)
|
||||||
INSTANCE_TYPE=$( crudini --get "$zulip_install_config_file" aws instance_type)
|
INSTANCE_TYPE=$(crudini --get "$zulip_install_config_file" aws instance_type)
|
||||||
SSH_SECRET_ID=$( crudini --get "$zulip_install_config_file" aws ssh_secret_id)
|
SSH_SECRET_ID=$(crudini --get "$zulip_install_config_file" aws ssh_secret_id)
|
||||||
|
|
||||||
# Verify it doesn't exist already
|
# Verify it doesn't exist already
|
||||||
ZONE_NAME=$($AWS route53 get-hosted-zone --id "$AWS_ZONE_ID" | jq -r '.HostedZone.Name' )
|
ZONE_NAME=$($AWS route53 get-hosted-zone --id "$AWS_ZONE_ID" | jq -r '.HostedZone.Name')
|
||||||
HOSTNAME="$SERVER.${ZONE_NAME%?}" # Remove trailing .
|
HOSTNAME="$SERVER.${ZONE_NAME%?}" # Remove trailing .
|
||||||
EXISTING_RECORDS=$($AWS route53 list-resource-record-sets \
|
EXISTING_RECORDS=$($AWS route53 list-resource-record-sets \
|
||||||
--hosted-zone-id "$AWS_ZONE_ID" \
|
--hosted-zone-id "$AWS_ZONE_ID" \
|
||||||
--query "ResourceRecordSets[?Name == '$HOSTNAME.']" \
|
--query "ResourceRecordSets[?Name == '$HOSTNAME.']" \
|
||||||
| jq '. | length')
|
| jq '. | length')
|
||||||
if [ "$EXISTING_RECORDS" != "0" ]; then
|
if [ "$EXISTING_RECORDS" != "0" ]; then
|
||||||
echo "$HOSTNAME already exists!"
|
echo "$HOSTNAME already exists!"
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -72,16 +72,16 @@ BOOTDATA=$(mktemp)
|
||||||
echo "BRANCH=$BRANCH"
|
echo "BRANCH=$BRANCH"
|
||||||
echo "SSH_SECRET_ID=$SSH_SECRET_ID"
|
echo "SSH_SECRET_ID=$SSH_SECRET_ID"
|
||||||
sed '/^AWS=/ r ./bootstrap-awscli' bootstrap-aws-installer
|
sed '/^AWS=/ r ./bootstrap-awscli' bootstrap-aws-installer
|
||||||
} >> "$BOOTDATA"
|
} >>"$BOOTDATA"
|
||||||
|
|
||||||
TAGS="[{Key=Name,Value=$SERVER},{Key=role,Value=\"$ROLES\"}]"
|
TAGS="[{Key=Name,Value=$SERVER},{Key=role,Value=\"$ROLES\"}]"
|
||||||
INSTANCE_DATA=$($AWS ec2 run-instances \
|
INSTANCE_DATA=$($AWS ec2 run-instances \
|
||||||
--iam-instance-profile 'Name="EC2ProdInstance"' \
|
--iam-instance-profile 'Name="EC2ProdInstance"' \
|
||||||
--image-id "$AMI_ID" \
|
--image-id "$AMI_ID" \
|
||||||
--instance-type "$INSTANCE_TYPE" \
|
--instance-type "$INSTANCE_TYPE" \
|
||||||
--security-group-ids "$SECURITY_GROUPS" \
|
--security-group-ids "$SECURITY_GROUPS" \
|
||||||
--tag-specifications "ResourceType=instance,Tags=$TAGS" \
|
--tag-specifications "ResourceType=instance,Tags=$TAGS" \
|
||||||
--user-data "file://$BOOTDATA")
|
--user-data "file://$BOOTDATA")
|
||||||
INSTANCEID=$(echo "$INSTANCE_DATA" | jq -r .Instances[0].InstanceId)
|
INSTANCEID=$(echo "$INSTANCE_DATA" | jq -r .Instances[0].InstanceId)
|
||||||
|
|
||||||
# Wait for public IP assignment
|
# Wait for public IP assignment
|
||||||
|
@ -89,12 +89,12 @@ PUBLIC_DNS_NAME=""
|
||||||
while [ -z "$PUBLIC_DNS_NAME" ]; do
|
while [ -z "$PUBLIC_DNS_NAME" ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
PUBLIC_DNS_NAME=$($AWS ec2 describe-instances --instance-ids "$INSTANCEID" \
|
PUBLIC_DNS_NAME=$($AWS ec2 describe-instances --instance-ids "$INSTANCEID" \
|
||||||
| jq -r .Reservations[0].Instances[0].PublicDnsName )
|
| jq -r .Reservations[0].Instances[0].PublicDnsName)
|
||||||
done
|
done
|
||||||
|
|
||||||
# Add the hostname to the zone
|
# Add the hostname to the zone
|
||||||
ROUTE53_CHANGES=$(mktemp)
|
ROUTE53_CHANGES=$(mktemp)
|
||||||
cat > "$ROUTE53_CHANGES" <<EOF
|
cat >"$ROUTE53_CHANGES" <<EOF
|
||||||
{
|
{
|
||||||
"Comment": "Add the $HOSTNAME CNAME record",
|
"Comment": "Add the $HOSTNAME CNAME record",
|
||||||
"Changes": [
|
"Changes": [
|
||||||
|
|
|
@ -5,7 +5,7 @@ version=0.7.1
|
||||||
tarball="shellcheck-v$version.linux.x86_64.tar.xz"
|
tarball="shellcheck-v$version.linux.x86_64.tar.xz"
|
||||||
sha256=64f17152d96d7ec261ad3086ed42d18232fcb65148b44571b564d688269d36c8
|
sha256=64f17152d96d7ec261ad3086ed42d18232fcb65148b44571b564d688269d36c8
|
||||||
|
|
||||||
check_version () {
|
check_version() {
|
||||||
out="$(shellcheck --version 2>/dev/null)" && [[ "$out" = *"
|
out="$(shellcheck --version 2>/dev/null)" && [[ "$out" = *"
|
||||||
version: $version
|
version: $version
|
||||||
"* ]]
|
"* ]]
|
||||||
|
@ -16,7 +16,7 @@ if ! check_version; then
|
||||||
trap 'rm -r "$tmpdir"' EXIT
|
trap 'rm -r "$tmpdir"' EXIT
|
||||||
cd "$tmpdir"
|
cd "$tmpdir"
|
||||||
wget -nv "https://github.com/koalaman/shellcheck/releases/download/v$version/$tarball"
|
wget -nv "https://github.com/koalaman/shellcheck/releases/download/v$version/$tarball"
|
||||||
sha256sum -c <<< "$sha256 $tarball"
|
sha256sum -c <<<"$sha256 $tarball"
|
||||||
tar -xJf "$tarball" --no-same-owner --strip-components=1 -C /usr/local/bin "shellcheck-v$version/shellcheck"
|
tar -xJf "$tarball" --no-same-owner --strip-components=1 -C /usr/local/bin "shellcheck-v$version/shellcheck"
|
||||||
check_version
|
check_version
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
if [ "$(node_modules/.bin/svgo -f static/images/integrations/logos | grep -o '\.[0-9]% = ' | wc -l)" -ge 1 ]
|
if [ "$(node_modules/.bin/svgo -f static/images/integrations/logos | grep -o '\.[0-9]% = ' | wc -l)" -ge 1 ]; then
|
||||||
then
|
echo "ERROR: svgo detected unoptimized SVG files in the \`static/images/integrations/logos\` folder." 1>&2
|
||||||
echo "ERROR: svgo detected unoptimized SVG files in the \`static/images/integrations/logos\` folder." 1>&2
|
echo "Please run \`svgo -f static/images/integrations/logos\` and commit the file changes to optimize them."
|
||||||
echo "Please run \`svgo -f static/images/integrations/logos\` and commit the file changes to optimize them."
|
exit 1
|
||||||
exit 1
|
else
|
||||||
else
|
echo "SUCCESS: SVG files in static/images/integrations/logos are all optimized!"
|
||||||
echo "SUCCESS: SVG files in static/images/integrations/logos are all optimized!"
|
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -29,18 +29,18 @@ set -x
|
||||||
|
|
||||||
POSTGRES_USER="postgres"
|
POSTGRES_USER="postgres"
|
||||||
if [ "$(uname)" = "OpenBSD" ]; then
|
if [ "$(uname)" = "OpenBSD" ]; then
|
||||||
POSTGRES_USER="_postgresql"
|
POSTGRES_USER="_postgresql"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ROOT_POSTGRES=(sudo -i -u "$POSTGRES_USER" psql)
|
ROOT_POSTGRES=(sudo -i -u "$POSTGRES_USER" psql)
|
||||||
DEFAULT_DB=""
|
DEFAULT_DB=""
|
||||||
if [ "$(uname)" = "Darwin" ]; then
|
if [ "$(uname)" = "Darwin" ]; then
|
||||||
ROOT_POSTGRES=(psql)
|
ROOT_POSTGRES=(psql)
|
||||||
DEFAULT_DB="postgres"
|
DEFAULT_DB="postgres"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$(uname)" = "OpenBSD" ]; then
|
if [ "$(uname)" = "OpenBSD" ]; then
|
||||||
DEFAULT_DB="postgres"
|
DEFAULT_DB="postgres"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
VAGRANTUSERNAME=$(whoami)
|
VAGRANTUSERNAME=$(whoami)
|
||||||
|
@ -64,7 +64,7 @@ fi
|
||||||
uuid_var_path=$($(readlink -f "$(dirname "$0")/../../scripts/lib/zulip_tools.py") get_dev_uuid)
|
uuid_var_path=$($(readlink -f "$(dirname "$0")/../../scripts/lib/zulip_tools.py") get_dev_uuid)
|
||||||
rm -f "$uuid_var_path/$STATUS_FILE_NAME"
|
rm -f "$uuid_var_path/$STATUS_FILE_NAME"
|
||||||
|
|
||||||
"${ROOT_POSTGRES[@]}" -v ON_ERROR_STOP=1 -e "$DEFAULT_DB" << EOF
|
"${ROOT_POSTGRES[@]}" -v ON_ERROR_STOP=1 -e "$DEFAULT_DB" <<EOF
|
||||||
DO \$\$BEGIN
|
DO \$\$BEGIN
|
||||||
CREATE USER $USERNAME;
|
CREATE USER $USERNAME;
|
||||||
EXCEPTION WHEN duplicate_object THEN
|
EXCEPTION WHEN duplicate_object THEN
|
||||||
|
@ -87,7 +87,7 @@ umask go-rw
|
||||||
PGPASS_PREFIX="*:*:*:$USERNAME:"
|
PGPASS_PREFIX="*:*:*:$USERNAME:"
|
||||||
PGPASS_ESCAPED_PREFIX="*:\\*:\\*:$USERNAME:"
|
PGPASS_ESCAPED_PREFIX="*:\\*:\\*:$USERNAME:"
|
||||||
if ! grep -q "$PGPASS_ESCAPED_PREFIX" ~/.pgpass; then
|
if ! grep -q "$PGPASS_ESCAPED_PREFIX" ~/.pgpass; then
|
||||||
echo "$PGPASS_PREFIX$PASSWORD" >> ~/.pgpass
|
echo "$PGPASS_PREFIX$PASSWORD" >>~/.pgpass
|
||||||
else
|
else
|
||||||
sed -i "s/$PGPASS_ESCAPED_PREFIX.*\$/$PGPASS_PREFIX$PASSWORD/" ~/.pgpass
|
sed -i "s/$PGPASS_ESCAPED_PREFIX.*\$/$PGPASS_PREFIX$PASSWORD/" ~/.pgpass
|
||||||
fi
|
fi
|
||||||
|
@ -106,7 +106,7 @@ psql -v ON_ERROR_STOP=1 -e -h localhost "$DBNAME_BASE" "$USERNAME" <<EOF
|
||||||
CREATE SCHEMA zulip;
|
CREATE SCHEMA zulip;
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
"${ROOT_POSTGRES[@]}" -v ON_ERROR_STOP=1 -e "$DBNAME_BASE" << EOF
|
"${ROOT_POSTGRES[@]}" -v ON_ERROR_STOP=1 -e "$DBNAME_BASE" <<EOF
|
||||||
CREATE EXTENSION pgroonga;
|
CREATE EXTENSION pgroonga;
|
||||||
GRANT USAGE ON SCHEMA pgroonga TO $USERNAME;
|
GRANT USAGE ON SCHEMA pgroonga TO $USERNAME;
|
||||||
EOF
|
EOF
|
||||||
|
|
|
@ -8,18 +8,20 @@ TEMP=$(getopt -o f --long force -- "$@")
|
||||||
eval set -- "$TEMP"
|
eval set -- "$TEMP"
|
||||||
|
|
||||||
# extract options.
|
# extract options.
|
||||||
while true ; do
|
while true; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
-f|--force)
|
-f | --force)
|
||||||
FORCEARG="--force";
|
FORCEARG="--force"
|
||||||
shift;;
|
shift
|
||||||
|
;;
|
||||||
--)
|
--)
|
||||||
shift;
|
shift
|
||||||
break;;
|
break
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
function run {
|
function run() {
|
||||||
echo '----'
|
echo '----'
|
||||||
printf 'Running'
|
printf 'Running'
|
||||||
printf ' %q' "$@"
|
printf ' %q' "$@"
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
color_message () {
|
color_message() {
|
||||||
local color_code="$1" message="$2"
|
local color_code="$1" message="$2"
|
||||||
printf '\e[%sm%s\e[0m\n' "$color_code" "$message" >&2
|
printf '\e[%sm%s\e[0m\n' "$color_code" "$message" >&2
|
||||||
}
|
}
|
||||||
|
|
||||||
loglevel=()
|
loglevel=()
|
||||||
|
|
||||||
usage () {
|
usage() {
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
usage:
|
usage:
|
||||||
--help, -h show this help message and exit
|
--help, -h show this help message and exit
|
||||||
|
@ -18,17 +18,35 @@ usage:
|
||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
args="$(getopt -o hL: --long help,loglevel:,skip-check-links,skip-external-links -- "$@")" ||
|
args="$(getopt -o hL: --long help,loglevel:,skip-check-links,skip-external-links -- "$@")" \
|
||||||
{ usage >&2; exit 1; }
|
|| {
|
||||||
|
usage >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
eval "set -- $args"
|
eval "set -- $args"
|
||||||
while true; do
|
while true; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
-h|--help) usage; exit 0;;
|
-h | --help)
|
||||||
-L|--loglevel) loglevel=("$1" "$2"); shift 2;;
|
usage
|
||||||
--skip-check-links) skip_check_links=1; shift;;
|
exit 0
|
||||||
--skip-external-links) skip_external_links=1; shift;;
|
;;
|
||||||
--) shift; break;;
|
-L | --loglevel)
|
||||||
*) exit 1;;
|
loglevel=("$1" "$2")
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--skip-check-links)
|
||||||
|
skip_check_links=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--skip-external-links)
|
||||||
|
skip_external_links=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--)
|
||||||
|
shift
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
*) exit 1 ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
|
@ -10,10 +10,16 @@ args="$(getopt -o +f --long help,force -- "$@")"
|
||||||
eval "set -- $args"
|
eval "set -- $args"
|
||||||
while true; do
|
while true; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
--help) usage;;
|
--help) usage ;;
|
||||||
-f|--force) FORCE=1; shift;;
|
-f | --force)
|
||||||
--) shift; break;;
|
FORCE=1
|
||||||
*) usage;;
|
shift
|
||||||
|
;;
|
||||||
|
--)
|
||||||
|
shift
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
*) usage ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -27,9 +33,8 @@ if [ "$EUID" -ne 0 ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
lxc-ls -f \
|
lxc-ls -f \
|
||||||
| perl -lane '$_ = $F[0]; print if (/^zulip-install-/ && !/-base$/)' \
|
| perl -lane '$_ = $F[0]; print if (/^zulip-install-/ && !/-base$/)' \
|
||||||
| while read -r c
|
| while read -r c; do
|
||||||
do
|
echo "$c"
|
||||||
echo "$c"
|
lxc-destroy -f -n "$c"
|
||||||
lxc-destroy -f -n "$c"
|
done
|
||||||
done
|
|
||||||
|
|
|
@ -10,14 +10,23 @@ args="$(getopt -o +r: --long help,release: -- "$@")"
|
||||||
eval "set -- $args"
|
eval "set -- $args"
|
||||||
while true; do
|
while true; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
--help) usage;;
|
--help) usage ;;
|
||||||
-r|--release) RELEASE="$2"; shift; shift;;
|
-r | --release)
|
||||||
--) shift; break;;
|
RELEASE="$2"
|
||||||
*) usage;;
|
shift
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--)
|
||||||
|
shift
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
*) usage ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
INSTALLER="$1"; shift || usage
|
INSTALLER="$1"
|
||||||
INSTALLER_ARGS=("$@"); set --
|
shift || usage
|
||||||
|
INSTALLER_ARGS=("$@")
|
||||||
|
set --
|
||||||
|
|
||||||
if [ -z "$RELEASE" ] || [ -z "$INSTALLER" ]; then
|
if [ -z "$RELEASE" ] || [ -z "$INSTALLER" ]; then
|
||||||
usage
|
usage
|
||||||
|
@ -42,7 +51,8 @@ while [ -z "$CONTAINER_NAME" ] || lxc-info -n "$CONTAINER_NAME" >/dev/null 2>&1;
|
||||||
CONTAINER_NAME=zulip-install-"$(basename "$shared_dir")"
|
CONTAINER_NAME=zulip-install-"$(basename "$shared_dir")"
|
||||||
done
|
done
|
||||||
|
|
||||||
message="$(cat <<EOF
|
message="$(
|
||||||
|
cat <<EOF
|
||||||
|
|
||||||
Container:
|
Container:
|
||||||
sudo lxc-attach --clear-env -n $CONTAINER_NAME
|
sudo lxc-attach --clear-env -n $CONTAINER_NAME
|
||||||
|
@ -68,7 +78,7 @@ mount -t overlay overlay \
|
||||||
"$shared_dir"/mnt
|
"$shared_dir"/mnt
|
||||||
|
|
||||||
lxc-copy --ephemeral --keepdata -n "$BASE_CONTAINER_NAME" -N "$CONTAINER_NAME" \
|
lxc-copy --ephemeral --keepdata -n "$BASE_CONTAINER_NAME" -N "$CONTAINER_NAME" \
|
||||||
-m bind="$shared_dir"/mnt:/mnt/src/,bind=/srv/zulip/test-install/pip-cache:/root/.cache/pip
|
-m bind="$shared_dir"/mnt:/mnt/src/,bind=/srv/zulip/test-install/pip-cache:/root/.cache/pip
|
||||||
|
|
||||||
"$THIS_DIR"/lxc-wait -n "$CONTAINER_NAME"
|
"$THIS_DIR"/lxc-wait -n "$CONTAINER_NAME"
|
||||||
|
|
||||||
|
|
|
@ -10,10 +10,17 @@ args="$(getopt -o +n: --long help,name: -- "$@")"
|
||||||
eval "set -- $args"
|
eval "set -- $args"
|
||||||
while true; do
|
while true; do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
--help) usage;;
|
--help) usage ;;
|
||||||
-n|--name) CONTAINER_NAME="$2"; shift; shift;;
|
-n | --name)
|
||||||
--) shift; break;;
|
CONTAINER_NAME="$2"
|
||||||
*) usage;;
|
shift
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--)
|
||||||
|
shift
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
*) usage ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -31,7 +38,10 @@ poll_runlevel() {
|
||||||
for _ in {1..60}; do
|
for _ in {1..60}; do
|
||||||
echo "lxc-wait: $CONTAINER_NAME: polling for boot..." >&2
|
echo "lxc-wait: $CONTAINER_NAME: polling for boot..." >&2
|
||||||
runlevel="$(lxc-attach --clear-env -n "$CONTAINER_NAME" -- runlevel 2>/dev/null)" \
|
runlevel="$(lxc-attach --clear-env -n "$CONTAINER_NAME" -- runlevel 2>/dev/null)" \
|
||||||
|| { sleep 1; continue; }
|
|| {
|
||||||
|
sleep 1
|
||||||
|
continue
|
||||||
|
}
|
||||||
if [ "$runlevel" != "${0%[0-9]}" ]; then
|
if [ "$runlevel" != "${0%[0-9]}" ]; then
|
||||||
echo "lxc-wait: $CONTAINER_NAME: booted!" >&2
|
echo "lxc-wait: $CONTAINER_NAME: booted!" >&2
|
||||||
poll_network
|
poll_network
|
||||||
|
@ -42,14 +52,16 @@ poll_runlevel() {
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
poll_network() {
|
poll_network() {
|
||||||
for _ in {1..60}; do
|
for _ in {1..60}; do
|
||||||
echo "lxc-wait: $CONTAINER_NAME: polling for network..." >&2
|
echo "lxc-wait: $CONTAINER_NAME: polling for network..." >&2
|
||||||
# New hosts don't have `host` or `nslookup`
|
# New hosts don't have `host` or `nslookup`
|
||||||
lxc-attach --clear-env -n "$CONTAINER_NAME" -- \
|
lxc-attach --clear-env -n "$CONTAINER_NAME" -- \
|
||||||
ping -q -c 1 archive.ubuntu.com 2>/dev/null >/dev/null \
|
ping -q -c 1 archive.ubuntu.com 2>/dev/null >/dev/null \
|
||||||
|| { sleep 1; continue; }
|
|| {
|
||||||
|
sleep 1
|
||||||
|
continue
|
||||||
|
}
|
||||||
echo "lxc-wait: $CONTAINER_NAME: network is up!" >&2
|
echo "lxc-wait: $CONTAINER_NAME: network is up!" >&2
|
||||||
exit 0
|
exit 0
|
||||||
done
|
done
|
||||||
|
@ -57,6 +69,4 @@ poll_network() {
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
poll_runlevel
|
poll_runlevel
|
||||||
|
|
||||||
|
|
|
@ -7,13 +7,15 @@ if [ "$EUID" -ne 0 ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RELEASE="$1"
|
RELEASE="$1"
|
||||||
ARCH=amd64 # TODO: maybe i686 too
|
ARCH=amd64 # TODO: maybe i686 too
|
||||||
|
|
||||||
case "$RELEASE" in
|
case "$RELEASE" in
|
||||||
bionic) extra_packages=(python-pip)
|
bionic)
|
||||||
;;
|
extra_packages=(python-pip)
|
||||||
focal) extra_packages=(python3-pip)
|
;;
|
||||||
;;
|
focal)
|
||||||
|
extra_packages=(python3-pip)
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
echo "error: unsupported target release: $RELEASE" >&2
|
echo "error: unsupported target release: $RELEASE" >&2
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -46,16 +48,16 @@ run apt-get dist-upgrade -y
|
||||||
# As an optimization, we install a bunch of packages the installer
|
# As an optimization, we install a bunch of packages the installer
|
||||||
# would install for itself.
|
# would install for itself.
|
||||||
run apt-get install -y --no-install-recommends \
|
run apt-get install -y --no-install-recommends \
|
||||||
xvfb parallel unzip zip jq python3-pip wget curl eatmydata \
|
xvfb parallel unzip zip jq python3-pip wget curl eatmydata \
|
||||||
git crudini openssl ssl-cert \
|
git crudini openssl ssl-cert \
|
||||||
build-essential python3-dev \
|
build-essential python3-dev \
|
||||||
memcached redis-server \
|
memcached redis-server \
|
||||||
hunspell-en-us supervisor libssl-dev puppet \
|
hunspell-en-us supervisor libssl-dev puppet \
|
||||||
gettext libffi-dev libfreetype6-dev zlib1g-dev libjpeg-dev \
|
gettext libffi-dev libfreetype6-dev zlib1g-dev libjpeg-dev \
|
||||||
libldap2-dev \
|
libldap2-dev \
|
||||||
libxml2-dev libxslt1-dev libpq-dev \
|
libxml2-dev libxslt1-dev libpq-dev \
|
||||||
virtualenv \
|
virtualenv \
|
||||||
"${extra_packages[@]}"
|
"${extra_packages[@]}"
|
||||||
|
|
||||||
run ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime
|
run ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime
|
||||||
run locale-gen en_US.UTF-8 || true
|
run locale-gen en_US.UTF-8 || true
|
||||||
|
|
|
@ -7,13 +7,13 @@ echo 'Testing whether migrations are consistent with models'
|
||||||
new_auto_named_migrations=$(./manage.py showmigrations \
|
new_auto_named_migrations=$(./manage.py showmigrations \
|
||||||
| grep -E ' [0-9]{4}_auto_' \
|
| grep -E ' [0-9]{4}_auto_' \
|
||||||
| grep -Eve ' [0-9]{4}_auto_201[67]' \
|
| grep -Eve ' [0-9]{4}_auto_201[67]' \
|
||||||
-e ' 0052_auto_fix_realmalias_realm_nullable' \
|
-e ' 0052_auto_fix_realmalias_realm_nullable' \
|
||||||
-e ' 0003_auto_20150817_1733' \
|
-e ' 0003_auto_20150817_1733' \
|
||||||
-e ' 0002_auto_20150110_0810' \
|
-e ' 0002_auto_20150110_0810' \
|
||||||
-e ' 0002_auto_20190420_0723' \
|
-e ' 0002_auto_20190420_0723' \
|
||||||
-e ' 0009_auto_20191118_0520' \
|
-e ' 0009_auto_20191118_0520' \
|
||||||
| sed 's/\[[x ]\] / /' \
|
| sed 's/\[[x ]\] / /' \
|
||||||
|| true)
|
|| true)
|
||||||
if [ "$new_auto_named_migrations" != "" ]; then
|
if [ "$new_auto_named_migrations" != "" ]; then
|
||||||
echo "ERROR: New migrations with unclear automatically generated names."
|
echo "ERROR: New migrations with unclear automatically generated names."
|
||||||
echo "Please rename these migrations to have readable names:"
|
echo "Please rename these migrations to have readable names:"
|
||||||
|
|
|
@ -6,7 +6,7 @@ if [ ! -d /srv/zulip-py3-venv ] || [ ! -d /srv/zulip-thumbor-venv ]; then
|
||||||
./tools/setup/setup_venvs.py
|
./tools/setup/setup_venvs.py
|
||||||
fi
|
fi
|
||||||
|
|
||||||
compile_requirements () {
|
compile_requirements() {
|
||||||
source="$1"
|
source="$1"
|
||||||
output="$2"
|
output="$2"
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue