[sources.vector_metrics] type = "internal_metrics" [sources.s3_akamai_static] # Akamai Datastream2 logs all accesses into AWS S3: # https://techdocs.akamai.com/datastream2/docs/stream-amazon-s3 # # The S3 bucket is configured to send event notifications to the SQS # queue, which this host is allowed to read from. This consumer # deletes the messages from the queue, and the S3 bucket is # configured to purge old logs. # https://vector.dev/docs/reference/configuration/sources/aws_s3/ type = "aws_s3" region = "us-east-1" compression = "gzip" sqs.delete_message = true sqs.poll_secs = 15 sqs.queue_url = "<%= @sqs_url %>" [transforms.parsing] type = "remap" inputs = ["s3_akamai_static"] source = ''' . = parse_json!(string!(.message)) .turnAroundTimeSec = to_int!(.turnAroundTimeMSec) / 1000.0 ''' [transforms.logs2metrics-requests] type = "log_to_metric" inputs = ["parsing"] [[transforms.logs2metrics-requests.metrics]] field = "cacheStatus" name = "requests_cache_count" namespace = "akamai_static" type = "counter" [transforms.logs2metrics-requests.metrics.tags] status_code = "{{statusCode}}" cached = "{{cacheStatus}}" host = "{{reqHost}}" [[transforms.logs2metrics-requests.metrics]] field = "bytes" name = "requests_bytes" namespace = "akamai_static" type = "counter" increment_by_value = true [transforms.logs2metrics-requests.metrics.tags] status_code = "{{statusCode}}" cached = "{{cacheStatus}}" host = "{{reqHost}}" [[transforms.logs2metrics-requests.metrics]] field = "turnAroundTimeSec" name = "turnaround_time_sec" namespace = "akamai_static" type = "histogram" [transforms.logs2metrics-requests.metrics.tags] status_code = "{{statusCode}}" cached = "{{cacheStatus}}" host = "{{reqHost}}" [sinks.prometheus_exporter] type = "prometheus_exporter" inputs = ["vector_metrics", "logs2metrics*"] buckets = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5] address = "0.0.0.0:9081" flush_period_secs = 120 suppress_timestamp = true