zulip_ops: Delete the long-disused `stats1.zulip.net` config and its dependencies.

This consists of the `zulip_ops::stats` Puppet class, which has apparently
not been used since 2014, and a number of files that I believe were
only used for that.  Also a couple of tiny loose ends in other files.
This commit is contained in:
Greg Price 2017-08-10 16:20:12 -07:00 committed by Tim Abbott
parent 0042fc0c19
commit 61666a9262
20 changed files with 1 additions and 1421 deletions

View File

@ -1,3 +1,2 @@
static/js/blueslip.js
puppet/zulip_ops/files/statsd/local.js
static/webpack-bundles

View File

@ -1066,14 +1066,11 @@ def get_realm_activity(request, realm_str):
content = sent_messages_report(realm_str)
data += [(page_title, content)]
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (realm_str,)
title = realm_str
return render(
request,
'analytics/activity.html',
context=dict(data=data, realm_link=realm_link, title=title),
context=dict(data=data, realm_link=None, title=title),
)
@require_server_admin

View File

@ -1,50 +0,0 @@
WSGISocketPrefix /usr/lib/apache2/modules/
Listen 444
<VirtualHost *:444>
ServerName stats1.zulip.net
SSLEngine on
SSLCertificateFile /etc/ssl/certs/stats1.zulip.net.crt
SSLCertificateKeyFile /etc/ssl/certs/stats1.zulip.net.key
Header add Strict-Transport-Security "max-age=15768000"
<Location "/">
AuthType Digest
AuthName "wiki"
AuthDigestProvider file
AuthUserFile /etc/apache2/users/wiki
Require valid-user
</Location>
# Graphite specific setup
DocumentRoot "/opt/graphite/webapp"
WSGIDaemonProcess graphite processes=5 threads=5 display-name='%{GROUP}' inactivity-timeout=120
WSGIProcessGroup graphite
WSGIApplicationGroup %{GLOBAL}
WSGIImportScript /opt/graphite/conf/graphite.wsgi process-group=graphite application-group=%{GLOBAL}
WSGIScriptAlias / /opt/graphite/conf/graphite.wsgi
Alias /content/ /opt/graphite/webapp/content/
<Location "/content/">
SetHandler None
</Location>
Alias /media/ "/usr/lib/pymodules/python2/django/contrib/admin/media/"
<Location "/media/">
SetHandler None
</Location>
<Directory /opt/graphite/conf/>
Order deny,allow
Allow from all
</Directory>
ErrorLog /var/log/apache2/error.log
LogLevel warn
CustomLog /var/log/apache2/access.log combined
ServerSignature On
</VirtualHost>

View File

@ -1,43 +0,0 @@
<VirtualHost *:80>
ServerName graphiti.zulip.net
Redirect permanent / https://graphiti.zulip.net/
</VirtualHost>
<VirtualHost *:443>
ServerName graphiti.zulip.net
SSLEngine on
SSLCertificateFile /etc/ssl/certs/stats1.zulip.net.crt
SSLCertificateKeyFile /etc/ssl/certs/stats1.zulip.net.key
Header add Strict-Transport-Security "max-age=15768000"
Header add X-Frame-Options DENY
<Location "/">
AuthType Digest
AuthName "wiki"
AuthDigestProvider file
AuthUserFile /etc/apache2/users/wiki
Require valid-user
</Location>
# Graphiti reverse-proxy to unicorn serving at localhost:8088
ProxyRequests Off
ProxyPreserveHost On
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
ProxyPass / http://127.0.0.1:8088/
ProxyPassReverse / http://127.0.0.1:8088/
ErrorLog /var/log/apache2/error.log
LogLevel warn
CustomLog /var/log/apache2/access.log combined
ServerSignature On
</VirtualHost>

View File

@ -1,38 +0,0 @@
WSGISocketPrefix /usr/lib/apache2/modules/
<VirtualHost *:443>
ServerName stats1.zulip.net
SSLEngine on
SSLCertificateFile /etc/ssl/certs/stats1.zulip.net.crt
SSLCertificateKeyFile /etc/ssl/certs/stats1.zulip.net.key
Header add Strict-Transport-Security "max-age=15768000"
<Location "/">
AuthType Digest
AuthName "wiki"
AuthDigestProvider file
AuthUserFile /etc/apache2/users/wiki
Require valid-user
</Location>
ErrorLog /var/log/apache2/error.log
LogLevel warn
CustomLog /var/log/apache2/access.log combined
ServerSignature On
Header add Strict-Transport-Security "max-age=15768000"
# Graphiti reverse-proxy to unicorn serving at localhost:8088
ProxyRequests Off
ProxyPreserveHost On
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
ProxyPass / http://127.0.0.1:8088/
ProxyPassReverse /grapiti http://127.0.0.1:8088/
</VirtualHost>

View File

@ -1,4 +0,0 @@
MAILTO=root
SHELL=/bin/bash
0 3 * * * zulip /home/zulip/zulip/puppet/zulip_ops/files/graphite/daily_rsync_backup.sh

View File

@ -1,60 +0,0 @@
# The form of each line in this file should be as follows:
#
# output_template (frequency) = method input_pattern
#
# This will capture any received metrics that match 'input_pattern'
# for calculating an aggregate metric. The calculation will occur
# every 'frequency' seconds and the 'method' can specify 'sum' or
# 'avg'. The name of the aggregate metric will be derived from
# 'output_template' filling in any captured fields from 'input_pattern'.
#
# For example, if you're metric naming scheme is:
#
# <env>.applications.<app>.<server>.<metric>
#
# You could configure some aggregations like so:
#
# <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
# <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
#
# As an example, if the following metrics are received:
#
# prod.applications.apache.www01.requests
# prod.applications.apache.www01.requests
#
# They would all go into the same aggregation buffer and after 60 seconds the
# aggregate metric 'prod.applications.apache.all.requests' would be calculated
# by summing their values.
#
# Note that any time this file is modified, it will be re-read automatically.
# NOTE: If you use the `sum` aggregation method, make sure the aggregation period is
# 5 seconds unless you know what you are doing. statsd pushes to carbon
# every 5 seconds (see local.js), so aggregating over a longer period of time
# will inflate the output value
# Aggregate all per-bucket remote cache stats into a generic hit/miss stat
stats.<app>.cache.all.hit (5) = sum stats.<app>.cache.*.hit
stats.<app>.cache.all.miss (5) = sum stats.<app>.cache.*.miss
# Aggregate all per-bucket remote cache stats counts into a generic hit/miss stat
stats_counts.<app>.cache.all.hit (5) = sum stats_counts.<app>.cache.*.hit
stats_counts.<app>.cache.all.miss (5) = sum stats_counts.<app>.cache.*.miss
# Aggregate all per-domain active stats to overall active stats
stats.gauges.<app>.users.active.all.<bucket> (5) = sum stats.gauges.<app>.users.active.*.<bucket>
stats.gauges.<app>.users.reading.all.<bucket> (5) = sum stats.gauges.<app>.users.reading.*.<bucket>
# Aggregate all per-realm end-to-end send stats to overall
stats.timers.<app>.endtoend.send_time.all.<type> (5) = sum stats.timers.<app>.endtoend.send_time.*.<type>
stats.timers.<app>.endtoend.receive_time.all.<type> (5) = sum stats.timers.<app>.endtoend.receive_time.*.<type>
stats.timers.<app>.endtoend.displayed_time.all.<type> (5) = sum stats.timers.<app>.endtoend.displayed_time.*.<type>
# Aggregate all per-realm narrow timing stats
stats.timers.<app>.narrow.initial_core.all.<type> (5) = sum stats.timers.<app>.narrow.initial_core.*.<type>
stats.timers.<app>.narrow.initial_free.all.<type> (5) = sum stats.timers.<app>.narrow.initial_free.*.<type>
stats.timers.<app>.narrow.network.all.<type> (5) = sum stats.timers.<app>.narrow.network.*.<type>
# Do the same for unnarrow times
stats.timers.<app>.unnarrow.initial_core.all.<type> (5) = sum stats.timers.<app>.unnarrow.initial_core.*.<type>
stats.timers.<app>.unnarrow.initial_free.all.<type> (5) = sum stats.timers.<app>.unnarrow.initial_free.*.<type>

View File

@ -1,280 +0,0 @@
[cache]
# Configure carbon directories.
#
# OS environment variables can be used to tell carbon where graphite is
# installed, where to read configuration from and where to write data.
#
# GRAPHITE_ROOT - Root directory of the graphite installation.
# Defaults to ../
# GRAPHITE_CONF_DIR - Configuration directory (where this file lives).
# Defaults to $GRAPHITE_ROOT/conf/
# GRAPHITE_STORAGE_DIR - Storage directory for whipser/rrd/log/pid files.
# Defaults to $GRAPHITE_ROOT/storage/
#
# To change other directory paths, add settings to this file. The following
# configuration variables are available with these default values:
#
# STORAGE_DIR = $GRAPHITE_STORAGE_DIR
# LOCAL_DATA_DIR = STORAGE_DIR/whisper/
# WHITELISTS_DIR = STORAGE_DIR/lists/
# CONF_DIR = STORAGE_DIR/conf/
# LOG_DIR = STORAGE_DIR/log/
# PID_DIR = STORAGE_DIR/
#
# For FHS style directory structures, use:
#
# STORAGE_DIR = /var/lib/carbon/
# CONF_DIR = /etc/carbon/
# LOG_DIR = /var/log/carbon/
# PID_DIR = /var/run/
#
#LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
# Specify the user to drop privileges to
# If this is blank carbon runs as the user that invokes it
# This user must have write access to the local data directory
USER =
# Limit the size of the cache to avoid swapping or becoming CPU bound.
# Sorts and serving cache queries gets more expensive as the cache grows.
# Use the value "inf" (infinity) for an unlimited cache size.
MAX_CACHE_SIZE = inf
# Limits the number of whisper update_many() calls per second, which effectively
# means the number of write requests sent to the disk. This is intended to
# prevent over-utilizing the disk and thus starving the rest of the system.
# When the rate of required updates exceeds this, then carbon's caching will
# take effect and increase the overall throughput accordingly.
MAX_UPDATES_PER_SECOND = 500
# Softly limits the number of whisper files that get created each minute.
# Setting this value low (like at 50) is a good way to ensure your graphite
# system will not be adversely impacted when a bunch of new metrics are
# sent to it. The trade off is that it will take much longer for those metrics'
# database files to all get created and thus longer until the data becomes usable.
# Setting this value high (like "inf" for infinity) will cause graphite to create
# the files quickly but at the risk of slowing I/O down considerably for a while.
MAX_CREATES_PER_MINUTE = 50
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2003
# Set this to True to enable the UDP listener. By default this is off
# because it is very common to run multiple carbon daemons and managing
# another (rarely used) port for every carbon instance is not fun.
ENABLE_UDP_LISTENER = False
UDP_RECEIVER_INTERFACE = 0.0.0.0
UDP_RECEIVER_PORT = 2003
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2004
# Per security concerns outlined in Bug #817247 the pickle receiver
# will use a more secure and slightly less efficient unpickler.
# Set this to True to revert to the old-fashioned insecure unpickler.
USE_INSECURE_UNPICKLER = False
CACHE_QUERY_INTERFACE = 0.0.0.0
CACHE_QUERY_PORT = 7002
# Set this to False to drop datapoints received after the cache
# reaches MAX_CACHE_SIZE. If this is True (the default) then sockets
# over which metrics are received will temporarily stop accepting
# data until the cache size falls below 95% MAX_CACHE_SIZE.
USE_FLOW_CONTROL = True
# By default, carbon-cache will log every whisper update. This can be excessive and
# degrade performance if logging on the same volume as the whisper data is stored.
LOG_UPDATES = False
# On some systems it is desirable for whisper to write synchronously.
# Set this option to True if you'd like to try this. Basically it will
# shift the onus of buffering writes from the kernel into carbon's cache.
WHISPER_AUTOFLUSH = False
# By default new Whisper files are created pre-allocated with the data region
# filled with zeros to prevent fragmentation and speed up contiguous reads and
# writes (which are common). Enabling this option will cause Whisper to create
# the file sparsely instead. Enabling this option may allow a large increase of
# MAX_CREATES_PER_MINUTE but may have longer term performance implications
# depending on the underlying storage configuration.
# WHISPER_SPARSE_CREATE = False
# Enabling this option will cause Whisper to lock each Whisper file it writes
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
# multiple carbon-cache daemons are writing to the same files
# WHISPER_LOCK_WRITES = False
# Set this to True to enable whitelisting and blacklisting of metrics in
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
# empty, all metrics will pass through
# USE_WHITELIST = False
# By default, carbon itself will log statistics (such as a count,
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
# CARBON_METRIC_PREFIX = carbon
# CARBON_METRIC_INTERVAL = 60
# Enable AMQP if you want to receve metrics using an amqp broker
# ENABLE_AMQP = False
# Verbose means a line will be logged for every metric received
# useful for testing
# AMQP_VERBOSE = False
# AMQP_HOST = localhost
# AMQP_PORT = 5672
# AMQP_VHOST = /
# AMQP_USER = guest
# AMQP_PASSWORD = guest
# AMQP_EXCHANGE = graphite
# AMQP_METRIC_NAME_IN_BODY = False
# The manhole interface allows you to SSH into the carbon daemon
# and get a python interpreter. BE CAREFUL WITH THIS! If you do
# something like time.sleep() in the interpreter, the whole process
# will sleep! This is *extremely* helpful in debugging, assuming
# you are familiar with the code. If you are not, please don't
# mess with this, you are asking for trouble :)
#
# ENABLE_MANHOLE = False
# MANHOLE_INTERFACE = 127.0.0.1
# MANHOLE_PORT = 7222
# MANHOLE_USER = admin
# MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE=
# Patterns for all of the metrics this machine will store. Read more at
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
#
# Example: store all sales, linux servers, and utilization metrics
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
#
# Example: store everything
# BIND_PATTERNS = #
# To configure special settings for the carbon-cache instance 'b', uncomment this:
#[cache:b]
#LINE_RECEIVER_PORT = 2103
#PICKLE_RECEIVER_PORT = 2104
#CACHE_QUERY_PORT = 7102
# and any other settings you want to customize, defaults are inherited
# from [carbon] section.
# You can then specify the --instance=b option to manage this instance
[relay]
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2013
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2014
# To use consistent hashing instead of the user defined relay-rules.conf,
# change this to:
# RELAY_METHOD = consistent-hashing
RELAY_METHOD = rules
# If you use consistent-hashing you may want to add redundancy
# of your data by replicating every datapoint to more than
# one machine.
REPLICATION_FACTOR = 1
# This is a list of carbon daemons we will send any relayed or
# generated metrics to. The default provided would send to a single
# carbon-cache instance on the default port. However if you
# use multiple carbon-cache instances then it would look like this:
#
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
#
# The general form is IP:PORT:INSTANCE where the :INSTANCE part is
# optional and refers to the "None" instance if omitted.
#
# Note that if the destinations are all carbon-caches then this should
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
# instances listed (order matters!).
#
# If using RELAY_METHOD = rules, all destinations used in relay-rules.conf
# must be defined in this list
DESTINATIONS = 127.0.0.1:2004
# This defines the maximum "message size" between carbon daemons.
# You shouldn't need to tune this unless you really know what you're doing.
MAX_DATAPOINTS_PER_MESSAGE = 500
MAX_QUEUE_SIZE = 10000
# Set this to False to drop datapoints when any send queue (sending datapoints
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
# default) then sockets over which metrics are received will temporarily stop accepting
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
USE_FLOW_CONTROL = True
# Set this to True to enable whitelisting and blacklisting of metrics in
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
# empty, all metrics will pass through
# USE_WHITELIST = False
# By default, carbon itself will log statistics (such as a count,
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
# CARBON_METRIC_PREFIX = carbon
# CARBON_METRIC_INTERVAL = 60
[aggregator]
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2023
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2024
# This is a list of carbon daemons we will send any relayed or
# generated metrics to. The default provided would send to a single
# carbon-cache instance on the default port. However if you
# use multiple carbon-cache instances then it would look like this:
#
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
#
# The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is
# optional and refers to the "None" instance if omitted.
#
# Note that if the destinations are all carbon-caches then this should
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
# instances listed (order matters!).
DESTINATIONS = 127.0.0.1:2004
# If you want to add redundancy to your data by replicating every
# datapoint to more than one machine, increase this.
REPLICATION_FACTOR = 1
# This is the maximum number of datapoints that can be queued up
# for a single destination. Once this limit is hit, we will
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
# we will drop any subsequently received datapoints.
MAX_QUEUE_SIZE = 10000
# Set this to False to drop datapoints when any send queue (sending datapoints
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
# default) then sockets over which metrics are received will temporarily stop accepting
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
USE_FLOW_CONTROL = True
# This defines the maximum "message size" between carbon daemons.
# You shouldn't need to tune this unless you really know what you're doing.
MAX_DATAPOINTS_PER_MESSAGE = 500
# This defines how many datapoints the aggregator remembers for
# each metric. Aggregation only happens for datapoints that fall in
# the past MAX_AGGREGATION_INTERVALS * intervalSize seconds.
MAX_AGGREGATION_INTERVALS = 5
# Set this to True to enable whitelisting and blacklisting of metrics in
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
# empty, all metrics will pass through
# USE_WHITELIST = False
# By default, carbon itself will log statistics (such as a count,
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
# CARBON_METRIC_PREFIX = carbon
# CARBON_METRIC_INTERVAL = 60

View File

@ -1,3 +0,0 @@
#!/usr/bin/env bash
rsync -avz /srv/graphite/ /mnt/graphite-backup

View File

@ -1,16 +0,0 @@
import os, sys
sys.path.append('/opt/graphite/webapp')
os.environ['DJANGO_SETTINGS_MODULE'] = 'graphite.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
# READ THIS
# Initializing the search index can be very expensive, please include
# the WSGIScriptImport directive pointing to this script in your vhost
# config to ensure the index is preloaded before any requests are handed
# to the process.
from graphite.logger import log
log.info("graphite.wsgi - pid %d - reloading search index" % os.getpid())
import graphite.metrics.search

View File

@ -1,13 +0,0 @@
TIME_ZONE = "America/New_York"
ALLOWED_HOSTS = ['graphite.humbughq.com', 'graphite.zulip.net', 'stats1.zulip.net']
DATABASES = {
'default': {
'NAME': '/opt/graphite/storage/graphite.db',
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
}

View File

@ -1,11 +0,0 @@
#!/bin/sh
mkdir /srv/graphite
mkfs.ext4 /dev/xvdb
echo "/dev/xvdb /srv/graphite ext4 noatime,defaults,barrier=0 1 1" >> /etc/fstab
mount /srv/graphite
mkfs.ext4 /dev/xvdf1
echo "/dev/xvdf1 /mnt/graphite-backup ext4 noatime,defaults,barrier=0 1 1" >> /etc/fstab
mount /mnt/graphite-backup

View File

@ -1,31 +0,0 @@
# Example configuration from
# https://gist.github.com/tristanbes/4046457#file-example-sh
[min]
pattern = \.min$
xFilesFactor = 0.1
aggregationMethod = min
[max]
pattern = \.max$
xFilesFactor = 0.1
aggregationMethod = max
[sum]
pattern = \.sum$
xFilesFactor = 0
aggregationMethod = sum
[count]
pattern = \.count$
xFilesFactor = 0
aggregationMethod = sum
[count_legacy]
pattern = ^stats_counts.*
xFilesFactor = 0
aggregationMethod = sum
[default_average]
pattern = .*
xFilesFactor = 0.3
aggregationMethod = average

View File

@ -1,26 +0,0 @@
# Schema definitions for Whisper files. Entries are scanned in order,
# and first match wins. This file is scanned for changes every 60 seconds.
#
# [name]
# pattern = regex
# retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ...
# statsd specific
[stats]
pattern = ^stats.*
#retentions = 10:2160,60:10080,600:262974
# 5s data for 6hr
# 10s data for 12hr
# 1min data for 2 weeks
# 10min data for 5 years
retentions = 5s:6h,10s:12h,1min:14d,10min:5y
# Carbon's internal metrics. This entry should match what is specified in
# CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings
[carbon]
pattern = ^carbon\.
retentions = 60:90d
[default_1min_for_1day]
pattern = .*
retentions = 60s:1d

View File

@ -1,61 +0,0 @@
---
graphiti_base_url: https://stats1.zulip.net/graphiti/
graphite_base_url: https://graphiti:xxxxxxxxxxxxxxxxxx@stats1.zulip.net:444/
graphite_userpw: "graphiti:xxxxxxxxxxxxxxxxxx"
graphite_auth: :digest
graphite_cert: "/home/zulip/graphiti/humbughq_cert_internal.pem"
#graphite_base_url: https://user:pass@graphite01.pp.local
redis_url: localhost:6978:1/graphiti
tmp_dir: /tmp
fonts:
- DroidSans
- DejaVuSans
auto_refresh:
enabled: true # checked by default?
interval: 120 # seconds
default_options:
title: "New Graph"
from: -7d
font: DroidSans
fontSize: 10
thickness: 2
bgcolor: "#FFFFFF"
fgcolor: "#333333"
majorGridLineColor: "#ADADAD"
minorGridLineColor: "#E5E5E5"
default_metrics:
- "stats.foobar"
metric_prefix: "stats"
# Configure a service for snapshoting graphs. Current options are
# s3 (amazon s3) and fs (filesystem)
snapshots:
# for s3 you need to provide `bucket`, `access_key_id`, and `secret_access_key`
#
# service: s3
# bucket: mysnapshots
# access_key_id: BLAH
# secret_access_key: BLAHBLAH
# for local filesystem you need to provide a dir to save the images
# and the public route to that dir
#
# service: fs
# dir: public/storage
# public_host: http://graphiti.local/storage
# These are options that are passed to Pony
# https://github.com/benprew/pony
# in `to:` SLUG gets replaced with the slug of the dashboard being sent
reports:
from: "Stampy <stampy@paperlesspost.com>"
to: "graphiti+SLUG@paperlesspost.com"
via: smtp
via_options:
address: 'smtp.gmail.com'
port: 587
authentication: plain
enable_starttls_auto: true,
user_name: "stampy@paperlesspost.com"
password: "PASSWORD"
snapshots:
service: none

View File

@ -1,96 +0,0 @@
/*
Required Variables:
port: StatsD listening port [default: 8125]
Graphite Required Variables:
(Leave these unset to avoid sending stats to Graphite.
Set debug flag and leave these unset to run in 'dry' debug mode -
useful for testing statsd clients without a Graphite server.)
graphiteHost: hostname or IP of Graphite server
graphitePort: port of Graphite server
Optional Variables:
backends: an array of backends to load. Each backend must exist
by name in the directory backends/. If not specified,
the default graphite backend will be loaded.
debug: debug flag [default: false]
address: address to listen on over UDP [default: 0.0.0.0]
address_ipv6: defines if the address is an IPv4 or IPv6 address [true or false, default: false]
port: port to listen for messages on over UDP [default: 8125]
mgmt_address: address to run the management TCP interface on
[default: 0.0.0.0]
mgmt_port: port to run the management TCP interface on [default: 8126]
dumpMessages: log all incoming messages
flushInterval: interval (in ms) to flush to Graphite
percentThreshold: for time information, calculate the Nth percentile(s)
(can be a single value or list of floating-point values)
negative values mean to use "top" Nth percentile(s) values
[%, default: 90]
keyFlush: log the most frequently sent keys [object, default: undefined]
interval: how often to log frequent keys [ms, default: 0]
percent: percentage of frequent keys to log [%, default: 100]
log: location of log file for frequent keys [default: STDOUT]
deleteIdleStats: don't send values to graphite for inactive counters, sets, gauges, or timeers
as opposed to sending 0. For gauges, this unsets the gauge (instead of sending
the previous value). Can be indivdually overriden. [default: false]
deleteGauges : don't send values to graphite for inactive gauges, as opposed to sending the previous value [default: false]
deleteTimers: don't send values to graphite for inactive timers, as opposed to sending 0 [default: false]
deleteSets: don't send values to graphite for inactive sets, as opposed to sending 0 [default: false]
deleteCounters: don't send values to graphite for inactive counters, as opposed to sending 0 [default: false]
prefixStats: prefix to use for the statsd statistics data for this running instance of statsd [default: statsd]
applies to both legacy and new namespacing
console:
prettyprint: whether to prettyprint the console backend
output [true or false, default: true]
log: log settings [object, default: undefined]
backend: where to log: stdout or syslog [string, default: stdout]
application: name of the application for syslog [string, default: statsd]
level: log level for [node-]syslog [string, default: LOG_INFO]
graphite:
legacyNamespace: use the legacy namespace [default: true]
globalPrefix: global prefix to use for sending stats to graphite [default: "stats"]
prefixCounter: graphite prefix for counter metrics [default: "counters"]
prefixTimer: graphite prefix for timer metrics [default: "timers"]
prefixGauge: graphite prefix for gauge metrics [default: "gauges"]
prefixSet: graphite prefix for set metrics [default: "sets"]
repeater: an array of hashes of the for host: and port:
that details other statsd servers to which the received
packets should be "repeated" (duplicated to).
e.g. [ { host: '10.10.10.10', port: 8125 },
{ host: 'observer', port: 88125 } ]
repeaterProtocol: whether to use udp4 or udp6 for repeaters.
["udp4" or "udp6", default: "udp4"]
histogram: for timers, an array of mappings of strings (to match metrics) and
corresponding ordered non-inclusive upper limits of bins.
For all matching metrics, histograms are maintained over
time by writing the frequencies for all bins.
'inf' means infinity. A lower limit of 0 is assumed.
default: [], meaning no histograms for any timer.
First match wins. examples:
* histogram to only track render durations, with unequal
class intervals and catchall for outliers:
[ { metric: 'render', bins: [ 0.01, 0.1, 1, 10, 'inf'] } ]
* histogram for all timers except 'foo' related,
equal class interval and catchall for outliers:
[ { metric: 'foo', bins: [] },
{ metric: '', bins: [ 50, 100, 150, 200, 'inf'] } ]
*/
{
"graphitePort": 2023
, "graphiteHost": "localhost"
, "port": 8125
, "backends": [ "./backends/graphite" ]
, "flushInterval": 5000
}

View File

@ -1,492 +0,0 @@
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specifiy
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize yes
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile /var/run/redis/redis-server.pid
# Accept connections on the specified port, default is 6379.
# If port 0 is specified Redis will not listen on a TCP socket.
port 6978
# If you want you can bind a single interface, if the bind option is not
# specified all the interfaces will listen for incoming connections.
#
bind 127.0.0.1
# Specify the path for the unix socket that will be used to listen for
# incoming connections. There is no default, so Redis will not listen
# on a unix socket when not specified.
#
# unixsocket /var/run/redis/redis.sock
# unixsocketperm 755
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 0
# Set server verbosity to 'debug'
# it can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile /var/log/redis/redis-server.log
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
# syslog-enabled no
# Specify the syslog identity.
# syslog-ident redis
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
# syslog-facility local0
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING #################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
save 900 1
save 300 10
save 60 10000
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# Also the Append Only File will be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir /var/lib/redis
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
# When a slave lost the connection with the master, or when the replication
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
# still reply to client requests, possibly with out of data data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale data is set to 'no' the slave will reply with
# an error "SYNC with master in progress" to all the kind of commands
# but to INFO and SLAVEOF.
#
slave-serve-stale-data yes
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
#
# repl-ping-slave-period 10
# The following option sets a timeout for both Bulk transfer I/O timeout and
# master data or ping response timeout. The default value is 60 seconds.
#
# It is important to make sure that this value is greater than the value
# specified for repl-ping-slave-period otherwise a timeout will be detected
# every time there is low traffic between the master and the slave.
#
# repl-timeout 60
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
# Command renaming.
#
# It is possilbe to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# of hard to guess so that it will be still available for internal-use
# tools but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possilbe to completely kill a command renaming it into
# an empty string:
#
# rename-command CONFIG ""
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default there
# is no limit, and it's up to the number of file descriptors the Redis process
# is able to open. The special value '0' means no limits.
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 128
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys
# accordingly to the eviction policy selected (see maxmemmory-policy).
#
# If Redis can't remove keys according to the policy, or if the policy is
# set to 'noeviction', Redis will start to reply with errors to commands
# that would use more memory, like SET, LPUSH, and so on, and will continue
# to reply to read-only commands like GET.
#
# This option is usually useful when using Redis as an LRU cache, or to set
# an hard memory limit for an instance (using the 'noeviction' policy).
#
# WARNING: If you have slaves attached to an instance with maxmemory on,
# the size of the output buffers needed to feed the slaves are subtracted
# from the used memory count, so that network problems / resyncs will
# not trigger a loop where keys are evicted, and in turn the output
# buffer of slaves is full with DELs of keys evicted triggering the deletion
# of more keys, and so forth until the database is completely emptied.
#
# In short... if you have slaves attached it is suggested that you set a lower
# limit for maxmemory so that there is some free RAM on the system for slave
# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached? You can select among five behavior:
#
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
# allkeys->random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
# Note: with all the kind of policies, Redis will return an error on write
# operations, when there are not suitable keys for eviction.
#
# At the date of writing this commands are: set setnx setex append
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
# getset mset msetnx exec sort
#
# The default is:
#
# maxmemory-policy volatile-lru
# LRU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can select as well the sample
# size to check. For instance for default Redis will check three keys and
# pick the one that was used less recently, you can change the sample size
# using the following configuration directive.
#
# maxmemory-samples 3
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. If you can live
# with the idea that the latest records will be lost if something like a crash
# happens this is the preferred way to run Redis. If instead you care a lot
# about your data and don't want to that a single record can get lost you should
# enable the append only mode: when this mode is enabled Redis will append
# every write operation received in the file appendonly.aof. This file will
# be read on startup in order to rebuild the full dataset in memory.
#
# Note that you can have both the async dumps and the append only file if you
# like (you have to comment the "save" statements above to disable the dumps).
# Still if append only mode is enabled Redis will load the data from the
# log file at startup ignoring the dump.rdb file.
#
# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
# log file in background when it gets too big.
appendonly no
# The name of the append only file (default: "appendonly.aof")
# appendfilename appendonly.aof
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only if one second passed since the last fsync. Compromise.
#
# The default is "everysec" that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
# When the AOF fsync policy is set to always or everysec, and a background
# saving process (a background save or AOF log background rewriting) is
# performing a lot of I/O against the disk, in some Linux configurations
# Redis may block too long on the fsync() call. Note that there is no fix for
# this currently, as even performing fsync in a different thread will block
# our synchronous write(2) call.
#
# In order to mitigate this problem it's possible to use the following option
# that will prevent fsync() from being called in the main process while a
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving the durability of Redis is
# the same as "appendfsync none", that in pratical terms means that it is
# possible to lost up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# If you have latency problems turn this to "yes". Otherwise leave it as
# "no" that is the safest pick from the point of view of durability.
no-appendfsync-on-rewrite no
# Automatic rewrite of the append only file.
# Redis is able to automatically rewrite the log file implicitly calling
# BGREWRITEAOF when the AOF log size will growth by the specified percentage.
#
# This is how it works: Redis remembers the size of the AOF file after the
# latest rewrite (or if no rewrite happened since the restart, the size of
# the AOF at startup is used).
#
# This base size is compared to the current size. If the current size is
# bigger than the specified percentage, the rewrite is triggered. Also
# you need to specify a minimal size for the AOF file to be rewritten, this
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
# Specify a precentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
################################## SLOW LOG ###################################
# The Redis Slow Log is a system to log queries that exceeded a specified
# execution time. The execution time does not include the I/O operations
# like talking with the client, sending the reply and so forth,
# but just the time needed to actually execute the command (this is the only
# stage of command execution where the thread is blocked and can not serve
# other requests in the meantime).
#
# You can configure the slow log with two parameters: one tells Redis
# what is the execution time, in microseconds, to exceed in order for the
# command to get logged, and the other parameter is the length of the
# slow log. When a new command is logged the oldest one is removed from the
# queue of logged commands.
# The following time is expressed in microseconds, so 1000000 is equivalent
# to one second. Note that a negative number disables the slow log, while
# a value of zero forces the logging of every command.
slowlog-log-slower-than 10000
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
slowlog-max-len 128
################################ VIRTUAL MEMORY ###############################
### WARNING! Virtual Memory is deprecated in Redis 2.4
### The use of Virtual Memory is strongly discouraged.
# Virtual Memory allows Redis to work with datasets bigger than the actual
# amount of RAM needed to hold the whole dataset in memory.
# In order to do so very used keys are taken in memory while the other keys
# are swapped into a swap file, similarly to what operating systems do
# with memory pages.
#
# To enable VM just set 'vm-enabled' to yes, and set the following three
# VM parameters accordingly to your needs.
vm-enabled no
# vm-enabled yes
# This is the path of the Redis swap file. As you can guess, swap files
# can't be shared by different Redis instances, so make sure to use a swap
# file for every redis process you are running. Redis will complain if the
# swap file is already in use.
#
# The best kind of storage for the Redis swap file (that's accessed at random)
# is a Solid State Disk (SSD).
#
# *** WARNING *** if you are using a shared hosting the default of putting
# the swap file under /tmp is not secure. Create a dir with access granted
# only to Redis user and configure Redis to create the swap file there.
vm-swap-file /var/lib/redis/redis.swap
# vm-max-memory configures the VM to use at max the specified amount of
# RAM. Everything that deos not fit will be swapped on disk *if* possible, that
# is, if there is still enough contiguous space in the swap file.
#
# With vm-max-memory 0 the system will swap everything it can. Not a good
# default, just specify the max amount of RAM you can in bytes, but it's
# better to leave some margin. For instance specify an amount of RAM
# that's more or less between 60 and 80% of your free RAM.
vm-max-memory 0
# Redis swap files is split into pages. An object can be saved using multiple
# contiguous pages, but pages can't be shared between different objects.
# So if your page is too big, small objects swapped out on disk will waste
# a lot of space. If you page is too small, there is less space in the swap
# file (assuming you configured the same number of total swap file pages).
#
# If you use a lot of small objects, use a page size of 64 or 32 bytes.
# If you use a lot of big objects, use a bigger page size.
# If unsure, use the default :)
vm-page-size 32
# Number of total memory pages in the swap file.
# Given that the page table (a bitmap of free/used pages) is taken in memory,
# every 8 pages on disk will consume 1 byte of RAM.
#
# The total swap size is vm-page-size * vm-pages
#
# With the default of 32-bytes memory pages and 134217728 pages Redis will
# use a 4 GB swap file, that will use 16 MB of RAM for the page table.
#
# It's better to use the smallest acceptable value for your application,
# but the default is large in order to work in most conditions.
vm-pages 134217728
# Max number of VM I/O threads running at the same time.
# This threads are used to read/write data from/to swap file, since they
# also encode and decode objects from disk to memory or the reverse, a bigger
# number of threads can help with big objects even if they can't help with
# I/O itself as the physical device may not be able to couple with many
# reads/writes operations at the same time.
#
# The special value of 0 turn off threaded I/O and enables the blocking
# Virtual Memory implementation.
vm-max-threads 4
############################### ADVANCED CONFIG ###############################
# Hashes are encoded in a special way (much more memory efficient) when they
# have at max a given numer of elements, and the biggest element does not
# exceed a given threshold. You can configure this limits with the following
# configuration directives.
hash-max-zipmap-entries 512
hash-max-zipmap-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# you are under the following limits:
list-max-ziplist-entries 512
list-max-ziplist-value 64
# Sets have a special encoding in just one case: when a set is composed
# of just strings that happens to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
set-max-intset-entries 512
# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# that is rhashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf

View File

@ -1,54 +0,0 @@
; Supervisor config file for stats.zulip.net
; It runs graphite, graphiti, statsd
[program:zulip-carbon-cache]
command=/opt/graphite/bin/carbon-cache.py --debug start
priority=200 ; the relative start priority (default 999)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; whether/when to restart (default: unexpected)
stopsignal=TERM ; signal used to kill process (default TERM)
stopwaitsecs=30 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/var/log/zulip/carbon-cache.log ; stdout log path, NONE for none; default AUTO
directory=/home/zulip/
[program:zulip-carbon-aggregator]
command=/opt/graphite/bin/carbon-aggregator.py --debug start
priority=200 ; the relative start priority (default 999)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; whether/when to restart (default: unexpected)
stopsignal=TERM ; signal used to kill process (default TERM)
stopwaitsecs=30 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/var/log/zulip/carbon-aggregator.log ; stdout log path, NONE for none; default AUTO
directory=/home/zulip/
[program:zulip-statsd]
command=node stats.js /home/zulip/zulip/puppet/zulip/files/statsd/local.js
priority=200 ; the relative start priority (default 999)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; whether/when to restart (default: unexpected)
stopsignal=TERM ; signal used to kill process (default TERM)
stopwaitsecs=30 ; max num secs to wait b4 SIGKILL (default 10)
user=zulip ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/var/log/zulip/statsd.log ; stdout log path, NONE for none; default AUTO
directory=/home/zulip/statsd
[program:zulip-graphiti]
command=/home/zulip/zulip/tools/run-graphiti
autostart=true ; start at supervisord start (default: true)
autorestart=true ; whether/when to restart (default: unexpected)
stopsignal=TERM ; signal used to kill process (default TERM)
stopwaitsecs=30 ; max num secs to wait b4 SIGKILL (default 10)
user=zulip ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/var/log/zulip/graphiti.log ; stdout log path, NONE for none; default AUTO
directory=/home/zulip/graphiti
[group:zulip-stats]
programs=zulip-carbon-cache,zulip-carbon-aggregator,zulip-statsd,zulip-graphiti

View File

@ -1,129 +0,0 @@
class zulip_ops::stats {
include zulip_ops::base
include zulip_ops::apache
include zulip::supervisor
$stats_packages = [ "libssl-dev", "zlib1g-dev", "redis-server",
# "python3-twisted", "python3-django", # missing on trusty
# "python3-django-tagging", # missing on trusty and xenial!
"python-twisted", "python-django", "python-django-tagging",
"python-carbon", "python-graphite-web", # can't find these anywhere! did this ever work?
"python3-cairo",
# "python3-whisper", # missing on trusty and xenial!
"python-cairo", "python-whisper"
]
package { $stats_packages: ensure => "installed" }
file { "/root/setup_disks.sh":
ensure => file,
owner => 'root',
group => 'root',
mode => 744,
source => 'puppet:///modules/zulip_ops/graphite/setup_disks.sh',
}
file { "/etc/cron.d/graphite_backup":
ensure => file,
owner => "root",
group => "root",
mode => 644,
source => "puppet:///modules/zulip_ops/cron.d/graphite_backup",
}
exec { "setup_disks":
command => "/root/setup_disks.sh",
creates => "/srv/graphite"
}
file { "/etc/ssl/certs/stats1.zulip.net.crt":
require => File["/etc/apache2/certs/"],
ensure => file,
owner => "root",
group => "root",
mode => 640,
source => "puppet:///modules/zulip_ops/certs/stats1.zulip.net.crt",
}
file { "/opt/graphite/conf/carbon.conf":
ensure => file,
owner => "root",
group => "root",
mode => 644,
source => "puppet:///modules/zulip_ops/graphite/carbon.conf",
}
file { "/opt/graphite/conf/aggregation-rules.conf":
ensure => file,
owner => "root",
group => "root",
mode => 644,
source => "puppet:///modules/zulip_ops/graphite/aggregation-rules.conf",
}
file { "/opt/graphite/conf/storage-aggregation.conf":
ensure => file,
owner => "root",
group => "root",
mode => 644,
source => "puppet:///modules/zulip_ops/graphite/storage-aggregation.conf",
}
file { "/opt/graphite/conf/storage-schemas.conf":
ensure => file,
owner => "root",
group => "root",
mode => 644,
source => "puppet:///modules/zulip_ops/graphite/storage-schemas.conf",
}
file { "/opt/graphite/webapp/graphite/local_settings.py":
ensure => file,
owner => "root",
group => "root",
mode => 644,
source => "puppet:///modules/zulip_ops/graphite/local_settings.py",
}
file { "/opt/graphite/conf/graphite.wsgi":
ensure => file,
owner => "root",
group => "root",
mode => 644,
source => "puppet:///modules/zulip_ops/graphite/graphite.wsgi",
}
file { "/home/zulip/graphiti/config/settings.yml":
ensure => file,
owner => "zulip",
group => "zulip",
mode => 644,
source => "puppet:///modules/zulip_ops/graphiti/settings.yml",
}
apache2site { 'graphite':
require => [File['/etc/apache2/sites-available/'],
Apache2mod['headers'], Apache2mod['ssl'],
],
ensure => present,
}
apache2site { 'graphiti':
require => [File['/etc/apache2/sites-available/'],
Apache2mod['headers'], Apache2mod['ssl'],
],
ensure => present,
}
file { "/etc/redis/redis.conf":
require => Package[redis-server],
ensure => file,
owner => "root",
group => "root",
mode => 644,
source => "puppet:///modules/zulip_ops/statsd/redis.conf",
}
service { 'redis-server':
ensure => running,
subscribe => File['/etc/redis/redis.conf'],
}
file { "/etc/supervisor/conf.d/stats.conf":
ensure => file,
owner => "root",
group => "root",
mode => 644,
source => "puppet:///modules/zulip_ops/supervisor/conf.d/stats.conf",
}
}

View File

@ -1,9 +0,0 @@
#!/bin/bash
export HOME=/home/zulip
export PATH="/home/zulip/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
cd /home/zulip/graphiti
exec bundle exec unicorn -p 8088