{ config, lib, ... }: with lib; with types; let cfg = config.components.monitor.promtail; in { options.components.monitor.promtail = { enable = mkOption { type = lib.types.bool; default = config.components.monitor.enable; }; port = mkOption { type = int; default = 3500; description = "port to provide promtail export"; }; }; config = mkMerge [ (mkIf config.components.monitor.promtail.enable { services.opentelemetry-collector.settings = { receivers.loki = { protocols.http.endpoint = "127.0.0.1:${toString cfg.port}"; use_incoming_timestamp = true; }; service.pipelines.logs.receivers = [ "loki" ]; }; services.promtail = { enable = true; configuration = { server = { http_listen_port = 28183; grpc_listen_port = 0; }; positions.filename = "/var/cache/promtail/positions.yaml"; clients = [ { url = "http://127.0.0.1:${toString cfg.port}/loki/api/v1/push"; } ]; scrape_configs = [ { job_name = "journal"; journal = { json = true; max_age = "12h"; labels.job = "systemd-journal"; }; pipeline_stages = [ { # journalctl -o json | jq and you'll see these json.expressions = { transport = "_TRANSPORT"; unit = "_SYSTEMD_UNIT"; msg = "MESSAGE"; priority = "PRIORITY"; facility = "SYSLOG_FACILITY"; boot_id = "_BOOT_ID"; instance = "_HOSTNAME"; # coredump coredump_cgroup = "COREDUMP_CGROUP"; coredump_exe = "COREDUMP_EXE"; coredump_cmdline = "COREDUMP_CMDLINE"; coredump_uid = "COREDUMP_UID"; coredump_gid = "COREDUMP_GID"; }; } { # Set the unit (defaulting to the transport like audit and kernel) template = { source = "unit"; template = "{{if .unit}}{{.unit}}{{else}}{{.transport}}{{end}}"; }; } { labels.coredump_unit = "coredump_unit"; } { # Normalize session IDs (session-1234.scope -> session.scope) to limit number of label values replace = { source = "unit"; expression = "^(session-\\d+.scope)$"; replace = "session.scope"; }; } { labels.unit = "unit"; } { # Write the proper message instead of JSON output.source = "msg"; } ]; } ]; }; }; }) ]; }