mirror of https://github.com/zulip/zulip.git
kandra: Use generic "vector" process, not dedicated "akamai" process.
This makes the Vector configuration extensible, to allow it to be used not just for ingesting Akamai logs.
This commit is contained in:
parent
6e4895b05f
commit
60759ab5fb
|
@ -5,33 +5,19 @@ class kandra::prometheus::akamai {
|
|||
include kandra::vector
|
||||
include zulip::supervisor
|
||||
|
||||
$bin = $kandra::vector::bin
|
||||
$conf = '/etc/vector.toml'
|
||||
$pipelines = {
|
||||
'static' => zulipsecret('secrets', 'akamai_static_sqs_url', ''),
|
||||
'realm' => zulipsecret('secrets', 'akamai_realm_sqs_url', ''),
|
||||
}
|
||||
|
||||
file { $conf:
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
content => template('kandra/vector.toml.template.erb'),
|
||||
concat::fragment { 'vector_akamai':
|
||||
target => $kandra::vector::conf,
|
||||
order => '50',
|
||||
content => template('kandra/vector_akamai.toml.template.erb'),
|
||||
}
|
||||
file { "${zulip::common::supervisor_conf_dir}/prometheus_akamai_exporter.conf":
|
||||
ensure => file,
|
||||
require => [
|
||||
User[zulip],
|
||||
Package[supervisor],
|
||||
File['/etc/vector.toml'],
|
||||
File[$bin],
|
||||
],
|
||||
before => Exec['Cleanup vector'],
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
content => template('kandra/supervisor/conf.d/prometheus_akamai_exporter.conf.template.erb'),
|
||||
notify => Service[supervisor],
|
||||
concat::fragment { 'vector_akamai_export':
|
||||
target => $kandra::vector::conf,
|
||||
content => ',"akamai_logs2metrics*"',
|
||||
order => '90',
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ class kandra::vector {
|
|||
$version = $zulip::common::versions['vector']['version']
|
||||
$dir = "/srv/zulip-vector-${version}"
|
||||
$bin = "${dir}/bin/vector"
|
||||
$conf = '/etc/vector.toml'
|
||||
|
||||
$arch = $facts['os']['architecture'] ? {
|
||||
'amd64' => 'x86_64',
|
||||
|
@ -17,4 +18,56 @@ class kandra::vector {
|
|||
bin => [$bin],
|
||||
cleanup_after => [Service[supervisor]],
|
||||
}
|
||||
file { "${zulip::common::supervisor_conf_dir}/vector.conf":
|
||||
ensure => file,
|
||||
require => [
|
||||
User[zulip],
|
||||
Package[supervisor],
|
||||
Concat[$conf],
|
||||
File[$bin],
|
||||
],
|
||||
before => Exec['Cleanup vector'],
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
content => template('kandra/supervisor/conf.d/vector.conf.template.erb'),
|
||||
notify => Service[supervisor],
|
||||
}
|
||||
|
||||
exec { 'reload vector':
|
||||
command => 'supervisorctl signal HUP vector',
|
||||
require => Service[supervisor],
|
||||
refreshonly => true,
|
||||
}
|
||||
concat { $conf:
|
||||
ensure => present,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
notify => Exec['reload vector'],
|
||||
}
|
||||
# All of the pipelines need to be included in the Prometheus
|
||||
# exporter; they insert their strings at order 90, with a leading
|
||||
# comma, in the middle of the "inputs" block
|
||||
$vector_export = @(VECTOR)
|
||||
[sources.vector_metrics]
|
||||
type = "internal_metrics"
|
||||
[sinks.prometheus_exporter]
|
||||
type = "prometheus_exporter"
|
||||
address = "0.0.0.0:9081"
|
||||
flush_period_secs = 120
|
||||
suppress_timestamp = true
|
||||
buckets = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5]
|
||||
inputs = ["vector_metrics"
|
||||
|-VECTOR
|
||||
concat::fragment { 'vector_export_prefix':
|
||||
target => $conf,
|
||||
content => $vector_export,
|
||||
order => '89',
|
||||
}
|
||||
concat::fragment { 'vector_export_suffix':
|
||||
target => $conf,
|
||||
content => "]\n",
|
||||
order => '99',
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
[program:prometheus_akamai_exporter]
|
||||
command=<%= @bin %> --config <%= @conf %>
|
||||
priority=10
|
||||
autostart=true
|
||||
autorestart=true
|
||||
user=zulip
|
||||
redirect_stderr=true
|
||||
stdout_logfile=/var/log/zulip/akamai_exporter.log
|
|
@ -0,0 +1,8 @@
|
|||
[program:vector]
|
||||
command=<%= @bin %> --config <%= @conf %> --require-healthy true
|
||||
priority=10
|
||||
autostart=true
|
||||
autorestart=true
|
||||
user=zulip
|
||||
redirect_stderr=true
|
||||
stdout_logfile=/var/log/zulip/vector.log
|
|
@ -1,6 +1,3 @@
|
|||
[sources.vector_metrics]
|
||||
type = "internal_metrics"
|
||||
|
||||
# Akamai Datastream2 logs all accesses into AWS S3:
|
||||
# https://techdocs.akamai.com/datastream2/docs/stream-amazon-s3
|
||||
#
|
||||
|
@ -19,7 +16,7 @@
|
|||
sqs.poll_secs = 15
|
||||
sqs.queue_url = "<%= sqs_url %>"
|
||||
|
||||
[transforms.parse_<%= key %>]
|
||||
[transforms.akamai_parse_<%= key %>]
|
||||
type = "remap"
|
||||
inputs = ["s3_akamai_<%= key %>"]
|
||||
source = '''
|
||||
|
@ -27,47 +24,39 @@
|
|||
.turnAroundTimeSec = to_int!(.turnAroundTimeMSec) / 1000.0
|
||||
'''
|
||||
|
||||
[transforms.logs2metrics_<%= key %>]
|
||||
[transforms.akamai_logs2metrics_<%= key %>]
|
||||
type = "log_to_metric"
|
||||
inputs = ["parse_<%= key %>"]
|
||||
inputs = ["akamai_parse_<%= key %>"]
|
||||
|
||||
[[transforms.logs2metrics_<%= key %>.metrics]]
|
||||
[[transforms.akamai_logs2metrics_<%= key %>.metrics]]
|
||||
field = "cacheStatus"
|
||||
name = "requests_cache_count"
|
||||
namespace = "akamai_<%= key %>"
|
||||
type = "counter"
|
||||
[transforms.logs2metrics_<%= key %>.metrics.tags]
|
||||
[transforms.akamai_logs2metrics_<%= key %>.metrics.tags]
|
||||
status_code = "{{statusCode}}"
|
||||
cached = "{{cacheStatus}}"
|
||||
host = "{{reqHost}}"
|
||||
|
||||
[[transforms.logs2metrics_<%= key %>.metrics]]
|
||||
[[transforms.akamai_logs2metrics_<%= key %>.metrics]]
|
||||
field = "bytes"
|
||||
name = "requests_bytes"
|
||||
namespace = "akamai_<%= key %>"
|
||||
type = "counter"
|
||||
increment_by_value = true
|
||||
[transforms.logs2metrics_<%= key %>.metrics.tags]
|
||||
[transforms.akamai_logs2metrics_<%= key %>.metrics.tags]
|
||||
status_code = "{{statusCode}}"
|
||||
cached = "{{cacheStatus}}"
|
||||
host = "{{reqHost}}"
|
||||
|
||||
[[transforms.logs2metrics_<%= key %>.metrics]]
|
||||
[[transforms.akamai_logs2metrics_<%= key %>.metrics]]
|
||||
field = "turnAroundTimeSec"
|
||||
name = "turnaround_time_sec"
|
||||
namespace = "akamai_<%= key %>"
|
||||
type = "histogram"
|
||||
[transforms.logs2metrics_<%= key %>.metrics.tags]
|
||||
[transforms.akamai_logs2metrics_<%= key %>.metrics.tags]
|
||||
status_code = "{{statusCode}}"
|
||||
cached = "{{cacheStatus}}"
|
||||
host = "{{reqHost}}"
|
||||
|
||||
<% end %>
|
||||
|
||||
[sinks.prometheus_exporter]
|
||||
type = "prometheus_exporter"
|
||||
inputs = ["vector_metrics", "logs2metrics*"]
|
||||
buckets = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5]
|
||||
address = "0.0.0.0:9081"
|
||||
flush_period_secs = 120
|
||||
suppress_timestamp = true
|
Loading…
Reference in New Issue