2024-05-15 14:39:21 +02:00
|
|
|
{ config, lib, ... }:
|
|
|
|
with lib;
|
|
|
|
with types;
|
|
|
|
let
|
|
|
|
cfg = config.components.monitor.promtail;
|
|
|
|
in
|
|
|
|
{
|
|
|
|
options.components.monitor.promtail = {
|
|
|
|
enable = mkOption {
|
|
|
|
type = lib.types.bool;
|
|
|
|
default = config.components.monitor.enable;
|
|
|
|
};
|
|
|
|
port = mkOption {
|
|
|
|
type = int;
|
|
|
|
default = 3500;
|
|
|
|
description = "port to provide promtail export";
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
config = mkMerge [
|
|
|
|
|
|
|
|
(mkIf config.components.monitor.promtail.enable {
|
|
|
|
|
|
|
|
services.opentelemetry-collector.settings = {
|
|
|
|
receivers.loki = {
|
|
|
|
protocols.http.endpoint = "127.0.0.1:${toString cfg.port}";
|
|
|
|
use_incoming_timestamp = true;
|
|
|
|
};
|
|
|
|
service.pipelines.logs.receivers = [ "loki" ];
|
|
|
|
};
|
|
|
|
|
|
|
|
services.promtail = {
|
|
|
|
enable = true;
|
|
|
|
configuration = {
|
|
|
|
server = {
|
|
|
|
http_listen_port = 28183;
|
|
|
|
grpc_listen_port = 0;
|
|
|
|
};
|
2024-05-17 20:06:43 +02:00
|
|
|
positions.filename = "/var/cache/promtail/positions.yaml";
|
2024-05-15 14:39:21 +02:00
|
|
|
|
|
|
|
clients = [
|
|
|
|
{ url = "http://127.0.0.1:${toString cfg.port}/loki/api/v1/push"; }
|
|
|
|
];
|
|
|
|
|
|
|
|
scrape_configs = [
|
|
|
|
{
|
|
|
|
job_name = "journal";
|
|
|
|
journal = {
|
2024-05-15 22:54:12 +02:00
|
|
|
json = true;
|
2024-05-15 14:39:21 +02:00
|
|
|
max_age = "12h";
|
2024-05-15 22:54:12 +02:00
|
|
|
labels.job = "systemd-journal";
|
2024-05-15 14:39:21 +02:00
|
|
|
};
|
2024-05-15 22:54:12 +02:00
|
|
|
pipeline_stages = [
|
|
|
|
{
|
2024-05-16 13:10:48 +02:00
|
|
|
# journalctl -o json | jq and you'll see these
|
2024-05-15 22:54:12 +02:00
|
|
|
json.expressions = {
|
|
|
|
transport = "_TRANSPORT";
|
|
|
|
unit = "_SYSTEMD_UNIT";
|
|
|
|
msg = "MESSAGE";
|
2024-05-16 13:10:48 +02:00
|
|
|
priority = "PRIORITY";
|
|
|
|
facility = "SYSLOG_FACILITY";
|
|
|
|
boot_id = "_BOOT_ID";
|
|
|
|
instance = "_HOSTNAME";
|
|
|
|
|
|
|
|
# coredump
|
2024-05-15 22:54:12 +02:00
|
|
|
coredump_cgroup = "COREDUMP_CGROUP";
|
|
|
|
coredump_exe = "COREDUMP_EXE";
|
|
|
|
coredump_cmdline = "COREDUMP_CMDLINE";
|
|
|
|
coredump_uid = "COREDUMP_UID";
|
|
|
|
coredump_gid = "COREDUMP_GID";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
{
|
|
|
|
# Set the unit (defaulting to the transport like audit and kernel)
|
|
|
|
template = {
|
|
|
|
source = "unit";
|
|
|
|
template = "{{if .unit}}{{.unit}}{{else}}{{.transport}}{{end}}";
|
|
|
|
};
|
|
|
|
}
|
|
|
|
{ labels.coredump_unit = "coredump_unit"; }
|
2024-05-15 14:39:21 +02:00
|
|
|
{
|
2024-05-15 22:54:12 +02:00
|
|
|
# Normalize session IDs (session-1234.scope -> session.scope) to limit number of label values
|
|
|
|
replace = {
|
|
|
|
source = "unit";
|
|
|
|
expression = "^(session-\\d+.scope)$";
|
|
|
|
replace = "session.scope";
|
|
|
|
};
|
2024-05-15 14:39:21 +02:00
|
|
|
}
|
2024-05-15 22:54:12 +02:00
|
|
|
{ labels.unit = "unit"; }
|
|
|
|
{
|
|
|
|
# Write the proper message instead of JSON
|
|
|
|
output.source = "msg";
|
|
|
|
}
|
|
|
|
];
|
2024-05-15 14:39:21 +02:00
|
|
|
}
|
|
|
|
];
|
|
|
|
};
|
|
|
|
};
|
|
|
|
})
|
|
|
|
];
|
|
|
|
}
|