refactor otlp
This commit is contained in:
parent
fce4a39b94
commit
661c350544
3 changed files with 64 additions and 182 deletions
|
@ -1,19 +1,40 @@
|
||||||
{ pkgs, config, ... }:
|
{ pkgs, config, ... }:
|
||||||
|
let
|
||||||
|
telegraf_sink = 8088;
|
||||||
|
prometheus_port = 8090;
|
||||||
|
in
|
||||||
{
|
{
|
||||||
|
|
||||||
services.opentelemetry-collector = {
|
imports = [
|
||||||
enable = true;
|
# telemetry sink
|
||||||
package = pkgs.unstable.opentelemetry-collector-contrib;
|
{
|
||||||
settings = {
|
services.opentelemetry-collector.settings = {
|
||||||
receivers = {
|
receivers.influxdb.endpoint = "127.0.0.1:${toString telegraf_sink }";
|
||||||
|
service.pipelines.metrics.receivers = [ "influxdb" ];
|
||||||
# provide a influxdb sink
|
|
||||||
influxdb = {
|
|
||||||
endpoint = "127.0.0.1:8088";
|
|
||||||
};
|
};
|
||||||
|
services.telegraf.extraConfig.outputs.influxdb_v2.urls = [ "http://127.0.0.1:${toString telegraf_sink}" ];
|
||||||
|
}
|
||||||
|
|
||||||
# scrape opentelemetry-colectors metrics
|
# prometheus export
|
||||||
prometheus.config.scrape_configs = [
|
{
|
||||||
|
services.opentelemetry-collector.settings = {
|
||||||
|
exporters.prometheus.endpoint = "127.0.0.1:${toString prometheus_port}";
|
||||||
|
service.pipelines.metrics.exporters = [ "prometheus" ];
|
||||||
|
};
|
||||||
|
services.prometheus.scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "opentelemetry";
|
||||||
|
metrics_path = "/metrics";
|
||||||
|
scrape_interval = "10s";
|
||||||
|
static_configs = [{ targets = [ "localhost:${toString prometheus_port}" ]; }];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
# todo : move to netdata component
|
||||||
|
# netdata sink
|
||||||
|
{
|
||||||
|
services.opentelemetry-collector.settings.receivers.prometheus.config.scrape_configs = [
|
||||||
{
|
{
|
||||||
job_name = "netdata";
|
job_name = "netdata";
|
||||||
scrape_interval = "10s";
|
scrape_interval = "10s";
|
||||||
|
@ -27,6 +48,18 @@
|
||||||
};
|
};
|
||||||
}];
|
}];
|
||||||
}
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
services.opentelemetry-collector = {
|
||||||
|
enable = true;
|
||||||
|
package = pkgs.unstable.opentelemetry-collector-contrib;
|
||||||
|
settings = {
|
||||||
|
receivers = {
|
||||||
|
# scrape opentelemetry-colectors metrics
|
||||||
|
prometheus.config.scrape_configs = [
|
||||||
|
# todo: this should be collected another way (opentelemetry internal?)
|
||||||
{
|
{
|
||||||
job_name = "otelcol";
|
job_name = "otelcol";
|
||||||
scrape_interval = "10s";
|
scrape_interval = "10s";
|
||||||
|
@ -45,6 +78,7 @@
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
job_name = "node";
|
job_name = "node";
|
||||||
static_configs = [{
|
static_configs = [{
|
||||||
|
@ -59,25 +93,24 @@
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
exporters = {
|
# ship to chungus
|
||||||
# provide prometheus sink under `/metrics` to
|
exporters.otlp = {
|
||||||
prometheus = {
|
# todo : move this to orbi and route from orbi to chungus
|
||||||
endpoint = "127.0.0.1:8090";
|
|
||||||
};
|
|
||||||
otlp = {
|
|
||||||
endpoint = "10.100.0.2:4317"; # chungus
|
endpoint = "10.100.0.2:4317"; # chungus
|
||||||
tls.insecure = true;
|
tls.insecure = true;
|
||||||
};
|
};
|
||||||
};
|
|
||||||
|
|
||||||
service = {
|
service = {
|
||||||
pipelines.metrics = {
|
pipelines.metrics = {
|
||||||
receivers = [ "influxdb" "prometheus" ];
|
receivers = [ "prometheus" ];
|
||||||
exporters = [ "prometheus" "otlp" ];
|
exporters = [ "otlp" ];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# todo : this should be automatically be collected
|
||||||
# open telemetries own metrics?
|
# open telemetries own metrics?
|
||||||
telemetry.metrics.address = "0.0.0.0:8100";
|
telemetry.metrics.address = "0.0.0.0:8100";
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,46 +17,6 @@
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
checkConfig = "syntax-only";
|
checkConfig = "syntax-only";
|
||||||
enable = true;
|
enable = true;
|
||||||
# keep data for 30 days
|
|
||||||
extraFlags = [ "--storage.tsdb.retention.time=90d" ];
|
|
||||||
|
|
||||||
ruleFiles = [
|
|
||||||
(pkgs.writeText "prometheus-rules.yml" (builtins.toJSON {
|
|
||||||
groups = [
|
|
||||||
{
|
|
||||||
name = "core";
|
|
||||||
rules = [
|
|
||||||
{
|
|
||||||
alert = "InstanceDown";
|
|
||||||
expr = "up == 0";
|
|
||||||
for = "5m";
|
|
||||||
labels.severity = "page";
|
|
||||||
annotations = {
|
|
||||||
summary = "Instance {{ $labels.instance }} down";
|
|
||||||
description = "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.";
|
|
||||||
};
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
# todo : move this to open telemetry
|
|
||||||
{
|
|
||||||
name = "home-assistant";
|
|
||||||
rules = [
|
|
||||||
{
|
|
||||||
record = "home_open_window_sum";
|
|
||||||
expr = ''sum( homeassistant_binary_sensor_state{entity=~"binary_sensor\\.window_02_contact|binary_sensor\\.window_03_contact|binary_sensor\\.window_04_contact|binary_sensor\\.window_05_contact|binary_sensor\\.window_06_contact|binary_sensor\\.window_07_contact"} )'';
|
|
||||||
}
|
|
||||||
] ++ (map
|
|
||||||
(number:
|
|
||||||
{
|
|
||||||
record = "home_at_least_n_windows_open";
|
|
||||||
expr = ''home_open_window_sum >= bool ${toString number}'';
|
|
||||||
labels.n = number;
|
|
||||||
}) [ 1 2 3 ]);
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}))
|
|
||||||
];
|
|
||||||
|
|
||||||
exporters = {
|
exporters = {
|
||||||
node = {
|
node = {
|
||||||
|
@ -66,52 +26,5 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
scrapeConfigs = [
|
|
||||||
{
|
|
||||||
job_name = "opentelemetry";
|
|
||||||
metrics_path = "/metrics";
|
|
||||||
scrape_interval = "10s";
|
|
||||||
static_configs = [{ targets = [ "localhost:8090" ]; }];
|
|
||||||
}
|
|
||||||
#{
|
|
||||||
# job_name = "netdata";
|
|
||||||
# metrics_path = "/api/v1/allmetrics";
|
|
||||||
# params.format = [ "prometheus" ];
|
|
||||||
# scrape_interval = "5s";
|
|
||||||
# static_configs = [
|
|
||||||
# {
|
|
||||||
# targets = [ "localhost:19999" ];
|
|
||||||
# labels = {
|
|
||||||
# service = "netdata";
|
|
||||||
# server = config.networking.hostName;
|
|
||||||
# };
|
|
||||||
# }
|
|
||||||
# ];
|
|
||||||
#}
|
|
||||||
#{
|
|
||||||
# job_name = "node";
|
|
||||||
# static_configs = [{
|
|
||||||
# targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ];
|
|
||||||
# labels = {
|
|
||||||
# service = "node-exporter";
|
|
||||||
# server = config.networking.hostName;
|
|
||||||
# };
|
|
||||||
# }];
|
|
||||||
#}
|
|
||||||
#{
|
|
||||||
# # see https://www.home-assistant.io/integrations/prometheus/
|
|
||||||
# job_name = "home-assistant";
|
|
||||||
# scrape_interval = "60s";
|
|
||||||
# metrics_path = "/api/prometheus";
|
|
||||||
# bearer_token_file = toString config.sops.secrets.hass_long_term_token.path;
|
|
||||||
# static_configs = [{
|
|
||||||
# targets = [ "localhost:8123" ];
|
|
||||||
# labels = {
|
|
||||||
# service = "hass";
|
|
||||||
# server = config.networking.hostName;
|
|
||||||
# };
|
|
||||||
# }];
|
|
||||||
#}
|
|
||||||
];
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,29 +1,11 @@
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
let
|
|
||||||
urls = [
|
|
||||||
{ url = "https://bitwarden.ingolf-wagner.de"; path = ""; }
|
|
||||||
{ url = "https://flix.ingolf-wagner.de"; path = "web/index.html"; }
|
|
||||||
{ url = "https://git.ingolf-wagner.de"; path = ""; }
|
|
||||||
{ url = "https://ingolf-wagner.de"; path = ""; }
|
|
||||||
{ url = "https://nextcloud.ingolf-wagner.de"; path = "login"; }
|
|
||||||
{ url = "https://tech.ingolf-wagner.de"; path = ""; }
|
|
||||||
{ url = "https://matrix.ingolf-wagner.de"; path = ""; }
|
|
||||||
];
|
|
||||||
|
|
||||||
in
|
|
||||||
{
|
{
|
||||||
|
|
||||||
systemd.services.telegraf.path = [ pkgs.inetutils ];
|
systemd.services.telegraf.path = [ pkgs.inetutils ];
|
||||||
|
|
||||||
services.telegraf = {
|
services.telegraf = {
|
||||||
enable = true;
|
enable = true;
|
||||||
extraConfig = {
|
extraConfig = {
|
||||||
#outputs.prometheus_client = {
|
|
||||||
# listen = ":9273";
|
|
||||||
# metric_version = 2;
|
|
||||||
#};
|
|
||||||
outputs.influxdb_v2 = {
|
|
||||||
urls = [ "http://127.0.0.1:8088" ];
|
|
||||||
};
|
|
||||||
|
|
||||||
global_tags = {
|
global_tags = {
|
||||||
service = "telegraf";
|
service = "telegraf";
|
||||||
|
@ -34,58 +16,12 @@ in
|
||||||
inputs = {
|
inputs = {
|
||||||
cpu = { };
|
cpu = { };
|
||||||
diskio = { };
|
diskio = { };
|
||||||
smart.attributes = true;
|
|
||||||
x509_cert = [{
|
|
||||||
sources = (map (url: "${url.url}:443") urls);
|
|
||||||
interval = "30m"; # agent.interval = "10s" is default
|
|
||||||
}];
|
|
||||||
http_response =
|
|
||||||
let fullUrls = map ({ url, path }: "${url}/${path}") urls;
|
|
||||||
in [{ urls = fullUrls; }];
|
|
||||||
processes = { };
|
processes = { };
|
||||||
system = { };
|
system = { };
|
||||||
systemd_units = { };
|
systemd_units = { };
|
||||||
internet_speed.interval = "10m";
|
|
||||||
nginx.urls = [ "http://localhost/nginx_status" ];
|
|
||||||
ping = [{ urls = [ "10.100.0.1" ]; }]; # actually important to make pepe visible over wireguard
|
ping = [{ urls = [ "10.100.0.1" ]; }]; # actually important to make pepe visible over wireguard
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# todo : do this prometheus
|
|
||||||
services.prometheus.ruleFiles = [
|
|
||||||
(pkgs.writeText "telegraf.yml" (builtins.toJSON {
|
|
||||||
groups = [
|
|
||||||
{
|
|
||||||
name = "telegraf";
|
|
||||||
rules = [
|
|
||||||
{
|
|
||||||
alert = "HttpResponseNotOk";
|
|
||||||
expr = "0 * (http_response_http_response_code != 200) + 1";
|
|
||||||
for = "5m";
|
|
||||||
labels.severity = "page";
|
|
||||||
annotations = {
|
|
||||||
summary = "{{ $labels.exported_server }} does not return Ok";
|
|
||||||
description = "{{ $labels.exported_server }} does not return Ok for more than 5 minutes";
|
|
||||||
};
|
|
||||||
}
|
|
||||||
{
|
|
||||||
alert = "CertificatExpires";
|
|
||||||
expr = ''x509_cert_expiry{issuer_common_name="R3"} < ${toString (60 * 60 * 24 * 5)}'';
|
|
||||||
for = "1d";
|
|
||||||
labels.severity = "page";
|
|
||||||
annotations = {
|
|
||||||
summary = "{{ $labels.san }} does Expire Soon";
|
|
||||||
description = "{{ $labels.san }} does expire in less than 5 days";
|
|
||||||
};
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}))
|
|
||||||
];
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue