kandra: Use generic "vector" process, not dedicated "akamai" process.

This makes the Vector configuration extensible, to allow it to be used
not just for ingesting Akamai logs.
This commit is contained in:
Alex Vandiver 2024-09-25 13:08:25 -04:00 committed by Tim Abbott
parent 6e4895b05f
commit 60759ab5fb
5 changed files with 78 additions and 50 deletions

View File

@ -5,33 +5,19 @@ class kandra::prometheus::akamai {
include kandra::vector include kandra::vector
include zulip::supervisor include zulip::supervisor
$bin = $kandra::vector::bin
$conf = '/etc/vector.toml'
$pipelines = { $pipelines = {
'static' => zulipsecret('secrets', 'akamai_static_sqs_url', ''), 'static' => zulipsecret('secrets', 'akamai_static_sqs_url', ''),
'realm' => zulipsecret('secrets', 'akamai_realm_sqs_url', ''), 'realm' => zulipsecret('secrets', 'akamai_realm_sqs_url', ''),
} }
file { $conf: concat::fragment { 'vector_akamai':
ensure => file, target => $kandra::vector::conf,
owner => 'root', order => '50',
group => 'root', content => template('kandra/vector_akamai.toml.template.erb'),
mode => '0644',
content => template('kandra/vector.toml.template.erb'),
} }
file { "${zulip::common::supervisor_conf_dir}/prometheus_akamai_exporter.conf": concat::fragment { 'vector_akamai_export':
ensure => file, target => $kandra::vector::conf,
require => [ content => ',"akamai_logs2metrics*"',
User[zulip], order => '90',
Package[supervisor],
File['/etc/vector.toml'],
File[$bin],
],
before => Exec['Cleanup vector'],
owner => 'root',
group => 'root',
mode => '0644',
content => template('kandra/supervisor/conf.d/prometheus_akamai_exporter.conf.template.erb'),
notify => Service[supervisor],
} }
} }

View File

@ -4,6 +4,7 @@ class kandra::vector {
$version = $zulip::common::versions['vector']['version'] $version = $zulip::common::versions['vector']['version']
$dir = "/srv/zulip-vector-${version}" $dir = "/srv/zulip-vector-${version}"
$bin = "${dir}/bin/vector" $bin = "${dir}/bin/vector"
$conf = '/etc/vector.toml'
$arch = $facts['os']['architecture'] ? { $arch = $facts['os']['architecture'] ? {
'amd64' => 'x86_64', 'amd64' => 'x86_64',
@ -17,4 +18,56 @@ class kandra::vector {
bin => [$bin], bin => [$bin],
cleanup_after => [Service[supervisor]], cleanup_after => [Service[supervisor]],
} }
file { "${zulip::common::supervisor_conf_dir}/vector.conf":
ensure => file,
require => [
User[zulip],
Package[supervisor],
Concat[$conf],
File[$bin],
],
before => Exec['Cleanup vector'],
owner => 'root',
group => 'root',
mode => '0644',
content => template('kandra/supervisor/conf.d/vector.conf.template.erb'),
notify => Service[supervisor],
}
exec { 'reload vector':
command => 'supervisorctl signal HUP vector',
require => Service[supervisor],
refreshonly => true,
}
concat { $conf:
ensure => present,
owner => 'root',
group => 'root',
mode => '0644',
notify => Exec['reload vector'],
}
# All of the pipelines need to be included in the Prometheus
# exporter; they insert their strings at order 90, with a leading
# comma, in the middle of the "inputs" block
$vector_export = @(VECTOR)
[sources.vector_metrics]
type = "internal_metrics"
[sinks.prometheus_exporter]
type = "prometheus_exporter"
address = "0.0.0.0:9081"
flush_period_secs = 120
suppress_timestamp = true
buckets = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5]
inputs = ["vector_metrics"
|-VECTOR
concat::fragment { 'vector_export_prefix':
target => $conf,
content => $vector_export,
order => '89',
}
concat::fragment { 'vector_export_suffix':
target => $conf,
content => "]\n",
order => '99',
}
} }

View File

@ -1,8 +0,0 @@
[program:prometheus_akamai_exporter]
command=<%= @bin %> --config <%= @conf %>
priority=10
autostart=true
autorestart=true
user=zulip
redirect_stderr=true
stdout_logfile=/var/log/zulip/akamai_exporter.log

View File

@ -0,0 +1,8 @@
[program:vector]
command=<%= @bin %> --config <%= @conf %> --require-healthy true
priority=10
autostart=true
autorestart=true
user=zulip
redirect_stderr=true
stdout_logfile=/var/log/zulip/vector.log

View File

@ -1,6 +1,3 @@
[sources.vector_metrics]
type = "internal_metrics"
# Akamai Datastream2 logs all accesses into AWS S3: # Akamai Datastream2 logs all accesses into AWS S3:
# https://techdocs.akamai.com/datastream2/docs/stream-amazon-s3 # https://techdocs.akamai.com/datastream2/docs/stream-amazon-s3
# #
@ -19,7 +16,7 @@
sqs.poll_secs = 15 sqs.poll_secs = 15
sqs.queue_url = "<%= sqs_url %>" sqs.queue_url = "<%= sqs_url %>"
[transforms.parse_<%= key %>] [transforms.akamai_parse_<%= key %>]
type = "remap" type = "remap"
inputs = ["s3_akamai_<%= key %>"] inputs = ["s3_akamai_<%= key %>"]
source = ''' source = '''
@ -27,47 +24,39 @@
.turnAroundTimeSec = to_int!(.turnAroundTimeMSec) / 1000.0 .turnAroundTimeSec = to_int!(.turnAroundTimeMSec) / 1000.0
''' '''
[transforms.logs2metrics_<%= key %>] [transforms.akamai_logs2metrics_<%= key %>]
type = "log_to_metric" type = "log_to_metric"
inputs = ["parse_<%= key %>"] inputs = ["akamai_parse_<%= key %>"]
[[transforms.logs2metrics_<%= key %>.metrics]] [[transforms.akamai_logs2metrics_<%= key %>.metrics]]
field = "cacheStatus" field = "cacheStatus"
name = "requests_cache_count" name = "requests_cache_count"
namespace = "akamai_<%= key %>" namespace = "akamai_<%= key %>"
type = "counter" type = "counter"
[transforms.logs2metrics_<%= key %>.metrics.tags] [transforms.akamai_logs2metrics_<%= key %>.metrics.tags]
status_code = "{{statusCode}}" status_code = "{{statusCode}}"
cached = "{{cacheStatus}}" cached = "{{cacheStatus}}"
host = "{{reqHost}}" host = "{{reqHost}}"
[[transforms.logs2metrics_<%= key %>.metrics]] [[transforms.akamai_logs2metrics_<%= key %>.metrics]]
field = "bytes" field = "bytes"
name = "requests_bytes" name = "requests_bytes"
namespace = "akamai_<%= key %>" namespace = "akamai_<%= key %>"
type = "counter" type = "counter"
increment_by_value = true increment_by_value = true
[transforms.logs2metrics_<%= key %>.metrics.tags] [transforms.akamai_logs2metrics_<%= key %>.metrics.tags]
status_code = "{{statusCode}}" status_code = "{{statusCode}}"
cached = "{{cacheStatus}}" cached = "{{cacheStatus}}"
host = "{{reqHost}}" host = "{{reqHost}}"
[[transforms.logs2metrics_<%= key %>.metrics]] [[transforms.akamai_logs2metrics_<%= key %>.metrics]]
field = "turnAroundTimeSec" field = "turnAroundTimeSec"
name = "turnaround_time_sec" name = "turnaround_time_sec"
namespace = "akamai_<%= key %>" namespace = "akamai_<%= key %>"
type = "histogram" type = "histogram"
[transforms.logs2metrics_<%= key %>.metrics.tags] [transforms.akamai_logs2metrics_<%= key %>.metrics.tags]
status_code = "{{statusCode}}" status_code = "{{statusCode}}"
cached = "{{cacheStatus}}" cached = "{{cacheStatus}}"
host = "{{reqHost}}" host = "{{reqHost}}"
<% end %> <% end %>
[sinks.prometheus_exporter]
type = "prometheus_exporter"
inputs = ["vector_metrics", "logs2metrics*"]
buckets = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5]
address = "0.0.0.0:9081"
flush_period_secs = 120
suppress_timestamp = true