migrate to nixos-telemetry flake
Some checks failed
Build all NixOS Configurations / nix build (push) Failing after 4m18s

This commit is contained in:
Ingolf Wagner 2024-10-16 23:18:47 +09:00
parent 180bd7ca44
commit 8deb5b98ed
No known key found for this signature in database
GPG key ID: 76BF5F1928B9618B
18 changed files with 98 additions and 641 deletions

View file

@ -5,7 +5,6 @@
./gui ./gui
./mainUser.nix ./mainUser.nix
./media ./media
./monitor
./network ./network
./nixos ./nixos
./terminal ./terminal

View file

@ -8,10 +8,9 @@ with types;
]; ];
config = { config = {
components.monitor.enable = mkDefault true; telemetry.enable = mkDefault true;
components.monitor.metrics.enable = mkDefault false; telemetry.metrics.enable = mkDefault false;
components.monitor.opentelemetry.enable = false; telemetry.opentelemetry.enable = false;
services.journald.extraConfig = "SystemMaxUse=1G"; services.journald.extraConfig = "SystemMaxUse=1G";
}; };

View file

@ -1,32 +0,0 @@
{ lib, config, ... }:
with lib;
with types;
{
options.components.monitor = {
enable = mkOption {
type = bool;
default = true;
};
metrics.enable = mkOption {
type = bool;
default = config.components.monitor.enable;
};
logs.enable = mkOption {
type = bool;
default = config.components.monitor.enable;
};
};
imports = [
./logs-promtail.nix
./metrics-export-zfs.nix
./metrics-netdata.nix
./metrics-prometheus.nix
./metrics-telegraf.nix
./opentelemetry.nix
];
config = mkIf config.components.monitor.enable { };
}

View file

@ -1,185 +0,0 @@
{ config, lib, ... }:
with lib;
with types;
let
cfg = config.components.monitor.promtail;
in
{
options.components.monitor.promtail = {
enable = mkOption {
type = lib.types.bool;
default = config.components.monitor.logs.enable;
};
port = mkOption {
type = int;
default = 3500;
description = "port to provide promtail export";
};
};
config = mkMerge [
(mkIf config.components.monitor.opentelemetry.enable {
services.opentelemetry-collector.settings = {
receivers.loki = {
protocols.http.endpoint = "127.0.0.1:${toString cfg.port}";
use_incoming_timestamp = true;
};
service.pipelines.logs.receivers = [ "loki" ];
};
})
(mkIf config.components.monitor.promtail.enable {
services.promtail = {
enable = true;
configuration = {
server.disable = true;
positions.filename = "/var/cache/promtail/positions.yaml";
clients = [
{ url = "http://127.0.0.1:${toString cfg.port}/loki/api/v1/push"; }
];
scrape_configs =
let
_replace = index: replacement: ''{{ Replace .Value "${toString index}" "${replacement}" 1 }}'';
_elseif = index: ''{{ else if eq .Value "${toString index}" }}'';
_if = index: ''{{ if eq .Value "${toString index}" }}'';
_end = ''{{ end }}'';
elseblock = index: replacement: "${_elseif index}${_replace index replacement}";
ifblock = index: replacement: "${_if index}${_replace index replacement}";
createTemplateLine =
list:
"${
concatStrings (
imap0 (
index: replacement: if index == 0 then ifblock index replacement else elseblock index replacement
) list
)
}${_end}";
in
[
{
job_name = "journal";
journal = {
json = true;
max_age = "12h";
labels.job = "systemd-journal";
};
pipeline_stages = [
{
# Set of key/value pairs of JMESPath expressions. The key will be
# the key in the extracted data while the expression will be the value,
# evaluated as a JMESPath from the source data.
json.expressions = {
# journalctl -o json | jq and you'll see these
boot_id = "_BOOT_ID";
facility = "SYSLOG_FACILITY";
facility_label = "SYSLOG_FACILITY";
instance = "_HOSTNAME";
msg = "MESSAGE";
priority = "PRIORITY";
priority_label = "PRIORITY";
transport = "_TRANSPORT";
unit = "_SYSTEMD_UNIT";
# coredump
#coredump_cgroup = "COREDUMP_CGROUP";
#coredump_exe = "COREDUMP_EXE";
#coredump_cmdline = "COREDUMP_CMDLINE";
#coredump_uid = "COREDUMP_UID";
#coredump_gid = "COREDUMP_GID";
};
}
{
# Set the unit (defaulting to the transport like audit and kernel)
template = {
source = "unit";
template = "{{if .unit}}{{.unit}}{{else}}{{.transport}}{{end}}";
};
}
{
# Normalize session IDs (session-1234.scope -> session.scope) to limit number of label values
replace = {
source = "unit";
expression = "^(session-\\d+.scope)$";
replace = "session.scope";
};
}
{
# Map priority to human readable
template = {
source = "priority_label";
#template = ''{{ if eq .Value "0" }}{{ Replace .Value "0" "emerg" 1 }}{{ else if eq .Value "1" }}{{ Replace .Value "1" "alert" 1 }}{{ else if eq .Value "2" }}{{ Replace .Value "2" "crit" 1 }}{{ else if eq .Value "3" }}{{ Replace .Value "3" "err" 1 }}{{ else if eq .Value "4" }}{{ Replace .Value "4" "warning" 1 }}{{ else if eq .Value "5" }}{{ Replace .Value "5" "notice" 1 }}{{ else if eq .Value "6" }}{{ Replace .Value "6" "info" 1 }}{{ else if eq .Value "7" }}{{ Replace .Value "7" "debug" 1 }}{{ end }}'';
template = createTemplateLine [
"emergency"
"alert"
"critical"
"error"
"warning"
"notice"
"info"
"debug"
];
};
}
{
# Map facility to human readable
template = {
source = "facility_label";
template = createTemplateLine [
"kern" # Kernel messages
"user" # User-level messages
"mail" # Mail system Archaic POSIX still supported and sometimes used (for more mail(1))
"daemon" # System daemons All daemons, including systemd and its subsystems
"auth" # Security/authorization messages Also watch for different facility 10
"syslog" # Messages generated internally by syslogd For syslogd implementations (not used by systemd, see facility 3)
"lpr" # Line printer subsystem (archaic subsystem)
"news" # Network news subsystem (archaic subsystem)
"uucp" # UUCP subsystem (archaic subsystem)
"clock" # Clock daemon systemd-timesyncd
"authpriv" # Security/authorization messages Also watch for different facility 4
"ftp" # FTP daemon
"-" # NTP subsystem
"-" # Log audit
"-" # Log alert
"cron" # Scheduling daemon
"local0" # Local use 0 (local0)
"local1" # Local use 1 (local1)
"local2" # Local use 2 (local2)
"local3" # Local use 3 (local3)
"local4" # Local use 4 (local4)
"local5" # Local use 5 (local5)
"local6" # Local use 6 (local6)
"local7" # Local use 7 (local7)
];
};
}
{
# Key is REQUIRED and the name for the label that will be created.
# Value is optional and will be the name from extracted data whose value
# will be used for the value of the label. If empty, the value will be
# inferred to be the same as the key.
labels = {
boot_id = "";
facility = "";
facility_label = "";
instance = "";
priority = "";
priority_label = "";
transport = "";
unit = "";
};
}
{
# Write the proper message instead of JSON
output.source = "msg";
}
];
}
];
};
};
})
];
}

View file

@ -1,39 +0,0 @@
{
pkgs,
config,
lib,
...
}:
with lib;
with types;
{
options.components.monitor.exporters.zfs.enable = mkOption {
type = lib.types.bool;
default = config.components.monitor.metrics.enable;
};
config = mkMerge [
(mkIf config.components.monitor.exporters.zfs.enable {
services.telegraf.extraConfig.inputs.zfs = { };
services.prometheus.exporters.zfs.enable = true;
services.opentelemetry-collector.settings = {
receivers.prometheus.config.scrape_configs = [
{
job_name = "zfs";
scrape_interval = "10s";
static_configs = [
{
targets = [ "127.0.0.1:${toString config.services.prometheus.exporters.zfs.port}" ];
}
];
}
];
service.pipelines.metrics.receivers = [ "prometheus" ];
};
})
];
}

View file

@ -1,40 +0,0 @@
{
lib,
pkgs,
config,
...
}:
with lib;
with types;
{
options.components.monitor.netdata = {
enable = mkOption {
type = bool;
default = config.components.monitor.metrics.enable;
};
};
config = mkIf config.components.monitor.netdata.enable {
# netdata sink
services.opentelemetry-collector.settings.receivers.prometheus.config.scrape_configs = [
{
job_name = "netdata";
scrape_interval = "10s";
metrics_path = "/api/v1/allmetrics";
params.format = [ "prometheus" ];
static_configs = [ { targets = [ "127.0.0.1:19999" ]; } ];
}
];
# https://docs.netdata.cloud/daemon/config/
services.netdata = {
enable = lib.mkDefault true;
config = {
global = {
"memory mode" = "ram";
};
};
};
};
}

View file

@ -1,45 +0,0 @@
{ config, lib, ... }:
with lib;
with types;
let
cfg = config.components.monitor.prometheus;
in
{
options.components.monitor.prometheus = {
enable = mkOption {
type = lib.types.bool;
default = config.components.monitor.metrics.enable;
};
port = mkOption {
type = int;
default = 8090;
description = "port to provide Prometheus export";
};
};
config = mkMerge [
(mkIf config.components.monitor.prometheus.enable {
services.prometheus = {
checkConfig = "syntax-only";
enable = true;
};
})
(mkIf config.components.monitor.prometheus.enable {
services.opentelemetry-collector.settings = {
exporters.prometheus.endpoint = "127.0.0.1:${toString cfg.port}";
service.pipelines.metrics.exporters = [ "prometheus" ];
};
services.prometheus.scrapeConfigs = [
{
job_name = "opentelemetry";
metrics_path = "/metrics";
scrape_interval = "10s";
static_configs = [ { targets = [ "localhost:${toString cfg.port}" ]; } ];
}
];
})
];
}

View file

@ -1,57 +0,0 @@
{
config,
pkgs,
lib,
...
}:
with lib;
with types;
let
cfg = config.components.monitor.telegraf;
in
{
options.components.monitor.telegraf = {
enable = mkOption {
type = lib.types.bool;
default = config.components.monitor.metrics.enable;
};
influxDBPort = mkOption {
type = int;
default = 8088;
description = "Port to listen on influxDB input";
};
};
config = lib.mkMerge [
(mkIf config.components.monitor.telegraf.enable {
# opentelemetry wireing
services.opentelemetry-collector.settings = {
receivers.influxdb.endpoint = "127.0.0.1:${toString cfg.influxDBPort}";
service.pipelines.metrics.receivers = [ "influxdb" ];
};
services.telegraf.extraConfig.outputs.influxdb_v2.urls = [
"http://127.0.0.1:${toString cfg.influxDBPort}"
];
})
(mkIf config.components.monitor.telegraf.enable {
systemd.services.telegraf.path = [ pkgs.inetutils ];
services.telegraf = {
enable = true;
extraConfig = {
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs < all them plugins
inputs = {
cpu = { };
diskio = { };
processes = { };
system = { };
systemd_units = { };
ping = [ { urls = [ "10.100.0.1" ]; } ]; # actually important to make machine visible over wireguard
};
};
};
})
];
}

View file

@ -1,218 +0,0 @@
{
pkgs,
config,
lib,
...
}:
with lib;
with types;
let
cfg = config.components.monitor.opentelemetry;
in
{
options.components.monitor.opentelemetry = {
enable = mkOption {
type = bool;
default = config.components.monitor.enable;
description = "weather or not to use opentelemetry";
};
receiver.endpoint = mkOption {
type = nullOr str;
default = null;
description = "endpoint to receive the opentelementry data from other collectors";
};
exporter.endpoint = mkOption {
type = nullOr str;
default = null;
description = "endpoint to ship opentelementry data too";
};
exporter.debug = mkOption {
type = nullOr (enum [
"logs"
"metrics"
]);
default = null;
description = "enable debug exporter.";
};
metrics.endpoint = mkOption {
type = str;
default = "127.0.0.1:8100";
description = "endpoint on where to provide opentelementry metrics";
};
};
config = mkMerge [
(mkIf config.components.monitor.opentelemetry.enable {
services.opentelemetry-collector = {
enable = true;
package = pkgs.opentelemetry-collector-contrib;
};
})
# add default tags to metrics
# todo : make sure we filter out metrics from otlp receivers
(mkIf config.components.monitor.enable {
services.opentelemetry-collector.settings = {
processors = {
# https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
"resourcedetection/system" = {
detectors = [ "system" ];
override = false;
system.hostname_sources = [ "os" ];
};
metricstransform.transforms = [
{
include = ".*";
match_type = "regexp";
action = "update";
operations = [
{
action = "add_label";
new_label = "machine";
new_value = config.networking.hostName;
}
];
}
];
};
};
})
(mkIf config.components.monitor.metrics.enable {
services.opentelemetry-collector.settings = {
service.pipelines.metrics.processors = [
"metricstransform"
"resourcedetection/system"
];
};
})
(mkIf config.components.monitor.logs.enable {
services.opentelemetry-collector.settings = {
service.pipelines.logs.processors = [ "resourcedetection/system" ];
};
})
(mkIf (config.components.monitor.opentelemetry.exporter.debug != null) {
services.opentelemetry-collector.settings = {
exporters.debug = {
verbosity = "detailed";
sampling_initial = 5;
sampling_thereafter = 200;
};
service.pipelines.${config.components.monitor.opentelemetry.exporter.debug} = {
exporters = [ "debug" ];
};
};
})
# ship to next instance
(mkIf (config.components.monitor.opentelemetry.exporter.endpoint != null) {
services.opentelemetry-collector.settings = {
exporters.otlp = {
endpoint = cfg.exporter.endpoint;
tls.insecure = true;
};
};
})
(mkIf
(
config.components.monitor.opentelemetry.exporter.endpoint != null
&& config.components.monitor.logs.enable
)
{
services.opentelemetry-collector.settings = {
service.pipelines.logs.exporters = [ "otlp" ];
};
}
)
(mkIf
(
config.components.monitor.opentelemetry.exporter.endpoint != null
&& config.components.monitor.metrics.enable
)
{
services.opentelemetry-collector.settings = {
service.pipelines.metrics.exporters = [ "otlp" ];
};
}
)
# ship from other instance
(mkIf (config.components.monitor.opentelemetry.receiver.endpoint != null) {
services.opentelemetry-collector.settings = {
receivers.otlp.protocols.grpc.endpoint = cfg.receiver.endpoint;
};
})
(mkIf
(
config.components.monitor.opentelemetry.receiver.endpoint != null
&& config.components.monitor.logs.enable
)
{
services.opentelemetry-collector.settings = {
service.pipelines.logs.receivers = [ "otlp" ];
};
}
)
(mkIf
(
config.components.monitor.opentelemetry.receiver.endpoint != null
&& config.components.monitor.metrics.enable
)
{
services.opentelemetry-collector.settings = {
service.pipelines.metrics.receivers = [ "otlp" ];
};
}
)
# scrape opentelemetry-colectors metrics
# todo: this should be collected another way (opentelemetry internal?)
# todo : enable me only when metrics.endpoint is set.
(mkIf config.components.monitor.metrics.enable {
services.opentelemetry-collector.settings = {
receivers = {
prometheus.config.scrape_configs = [
{
job_name = "otelcol";
scrape_interval = "10s";
static_configs = [
{
targets = [ cfg.metrics.endpoint ];
}
];
metric_relabel_configs = [
{
source_labels = [ "__name__" ];
regex = ".*grpc_io.*";
action = "drop";
}
];
}
];
};
service = {
pipelines.metrics = {
receivers = [ "prometheus" ];
};
# todo : this should be automatically be collected
# open telemetries own metrics?
telemetry.metrics.address = cfg.metrics.endpoint;
};
};
})
(mkIf (!config.components.monitor.metrics.enable) {
services.opentelemetry-collector.settings = {
service.telemetry.metrics.level = "none";
};
})
];
}

View file

@ -327,6 +327,24 @@
"type": "github" "type": "github"
} }
}, },
"flake-parts_7": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib_5"
},
"locked": {
"lastModified": 1727826117,
"narHash": "sha256-K5ZLCyfO/Zj9mPFldf3iwS6oZStJcU4tSpiXTMYaaL0=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "3d04084d54bedc3d6b8b736c70ef449225c361b1",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": { "flake-utils": {
"locked": { "locked": {
"lastModified": 1644229661, "lastModified": 1644229661,
@ -843,6 +861,18 @@
"url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz"
} }
}, },
"nixpkgs-lib_5": {
"locked": {
"lastModified": 1727825735,
"narHash": "sha256-0xHYkMkeLVQAMa7gvkddbPqpxph+hDzdu1XdGPJR+Os=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/fb192fec7cc7a4c26d51779e9bab07ce6fa5597a.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/fb192fec7cc7a4c26d51779e9bab07ce6fa5597a.tar.gz"
}
},
"nixpkgs-stable": { "nixpkgs-stable": {
"locked": { "locked": {
"lastModified": 1710695816, "lastModified": 1710695816,
@ -1082,7 +1112,8 @@
"srvos": "srvos", "srvos": "srvos",
"stylix": "stylix", "stylix": "stylix",
"taskwarrior": "taskwarrior", "taskwarrior": "taskwarrior",
"treefmt-nix": "treefmt-nix_6" "telemetry": "telemetry",
"treefmt-nix": "treefmt-nix_7"
} }
}, },
"sops-nix": { "sops-nix": {
@ -1281,6 +1312,28 @@
"type": "github" "type": "github"
} }
}, },
"telemetry": {
"inputs": {
"flake-parts": "flake-parts_7",
"nixpkgs": [
"nixpkgs"
],
"treefmt-nix": "treefmt-nix_6"
},
"locked": {
"lastModified": 1729088108,
"narHash": "sha256-5rhe/VW3XdFS+0rw3BFzCo8AX07OnV8L/5IpjX9zNxE=",
"owner": "mrvandalo",
"repo": "nixos-telemetry",
"rev": "8ad47f66e92d5ad3f8ca55776289f83c227caaff",
"type": "github"
},
"original": {
"owner": "mrvandalo",
"repo": "nixos-telemetry",
"type": "github"
}
},
"tinted-foot": { "tinted-foot": {
"flake": false, "flake": false,
"locked": { "locked": {
@ -1436,6 +1489,27 @@
} }
}, },
"treefmt-nix_6": { "treefmt-nix_6": {
"inputs": {
"nixpkgs": [
"telemetry",
"nixpkgs"
]
},
"locked": {
"lastModified": 1727984844,
"narHash": "sha256-xpRqITAoD8rHlXQafYZOLvUXCF6cnZkPfoq67ThN0Hc=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "4446c7a6fc0775df028c5a3f6727945ba8400e64",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
},
"treefmt-nix_7": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [
"nixpkgs" "nixpkgs"

View file

@ -11,8 +11,8 @@
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs"; flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
flake-parts.url = "github:hercules-ci/flake-parts"; flake-parts.url = "github:hercules-ci/flake-parts";
healthchecks.inputs.nixpkgs.follows = "nixpkgs"; healthchecks.inputs.nixpkgs.follows = "nixpkgs";
#healthchecks.url = "git+file:///home/palo/dev/nixos/healthcheck";
healthchecks.url = "github:mrvandalo/nixos-healthchecks"; healthchecks.url = "github:mrvandalo/nixos-healthchecks";
#healthchecks.url = "git+file:///home/palo/dev/nixos/healthcheck";
home-manager-utils.inputs.home-manager.follows = "home-manager"; home-manager-utils.inputs.home-manager.follows = "home-manager";
home-manager-utils.url = "github:mrvandalo/home-manager-utils"; home-manager-utils.url = "github:mrvandalo/home-manager-utils";
home-manager.inputs.nixpkgs.follows = "nixpkgs"; home-manager.inputs.nixpkgs.follows = "nixpkgs";
@ -41,8 +41,11 @@
stylix.inputs.nixpkgs.follows = "nixpkgs"; stylix.inputs.nixpkgs.follows = "nixpkgs";
stylix.url = "github:danth/stylix"; stylix.url = "github:danth/stylix";
taskwarrior.inputs.nixpkgs.follows = "nixpkgs"; taskwarrior.inputs.nixpkgs.follows = "nixpkgs";
#taskwarrior.url = "git+file:///home/palo/dev/nixos/taskwarrior-flake";
taskwarrior.url = "github:mrvandalo/taskwarrior-flake"; taskwarrior.url = "github:mrvandalo/taskwarrior-flake";
#taskwarrior.url = "git+file:///home/palo/dev/nixos/taskwarrior-flake";
telemetry.inputs.nixpkgs.follows = "nixpkgs";
telemetry.url = "github:mrvandalo/nixos-telemetry";
#telemetry.url = "git+file:///home/palo/dev/nixos/nixos-telemetry";
treefmt-nix.inputs.nixpkgs.follows = "nixpkgs"; treefmt-nix.inputs.nixpkgs.follows = "nixpkgs";
treefmt-nix.url = "github:numtide/treefmt-nix"; treefmt-nix.url = "github:numtide/treefmt-nix";
@ -59,6 +62,7 @@
clan-core, clan-core,
clan-fact-generators, clan-fact-generators,
flake-parts, flake-parts,
healthchecks,
home-manager, home-manager,
home-manager-utils, home-manager-utils,
kmonad, kmonad,
@ -79,8 +83,8 @@
srvos, srvos,
stylix, stylix,
taskwarrior, taskwarrior,
telemetry,
treefmt-nix, treefmt-nix,
healthchecks,
}: }:
let let
@ -278,7 +282,8 @@
./components ./components
./features ./features
#./modules #./modules
inputs.clan-core.nixosModules.clanCore clan-core.nixosModules.clanCore
telemetry.nixosModules.telemetry
{ {
clan.core.clanDir = ./.; # fixes issues with clanCore https://git.clan.lol/clan/clan-core/issues/1979 clan.core.clanDir = ./.; # fixes issues with clanCore https://git.clan.lol/clan/clan-core/issues/1979
} }
@ -301,6 +306,7 @@
#./system/all # todo : spread this across features and components #./system/all # todo : spread this across features and components
# some modules I always use # some modules I always use
telemetry.nixosModules.telemetry
permown.nixosModules.permown permown.nixosModules.permown
kmonad.nixosModules.default kmonad.nixosModules.default
# some default things I always want # some default things I always want

View file

@ -54,9 +54,8 @@
components.network.wifi.enable = true; components.network.wifi.enable = true;
components.terminal.enable = true; components.terminal.enable = true;
components.monitor.enable = true; telemetry.enable = true;
components.monitor.opentelemetry.exporter.endpoint = "10.100.0.1:4317"; # orbi telemetry.opentelemetry.exporter.endpoint = "10.100.0.1:4317"; # orbi
#components.monitor.opentelemetry.exporter.debug = "logs";
home-manager.users.mainUser.home.sessionPath = [ "$HOME/.timewarrior/scripts" ]; home-manager.users.mainUser.home.sessionPath = [ "$HOME/.timewarrior/scripts" ];
# todo: move to homemanager # todo: move to homemanager

View file

@ -68,8 +68,8 @@
features.boot.ssh.kernelModules = [ "e1000e" ]; features.boot.ssh.kernelModules = [ "e1000e" ];
features.boot.tor.enable = true; features.boot.tor.enable = true;
components.monitor.enable = true; telemetry.enable = true;
components.monitor.opentelemetry.receiver.endpoint = "0.0.0.0:4317"; telemetry.opentelemetry.receiver.endpoint = "0.0.0.0:4317";
networking.firewall.interfaces.wg0.allowedTCPPorts = [ 4317 ]; networking.firewall.interfaces.wg0.allowedTCPPorts = [ 4317 ];
networking.firewall.interfaces.wg0.allowedUDPPorts = [ 4317 ]; networking.firewall.interfaces.wg0.allowedUDPPorts = [ 4317 ];

View file

@ -38,9 +38,9 @@
components.network.wifi.enable = true; components.network.wifi.enable = true;
components.terminal.enable = true; components.terminal.enable = true;
components.monitor.enable = true; telemetry.enable = true;
components.monitor.opentelemetry.exporter.endpoint = "10.100.0.1:4317"; # orbi telemetry.opentelemetry.exporter.endpoint = "10.100.0.1:4317"; # orbi
components.monitor.exporters.zfs.enable = false; telemetry.prometheus.exporters.zfs.enable = false;
home-manager.users.mainUser.home.sessionPath = [ "$HOME/.timewarrior/scripts" ]; home-manager.users.mainUser.home.sessionPath = [ "$HOME/.timewarrior/scripts" ];
home-manager.users.mainUser.bugwarrior.config = { home-manager.users.mainUser.bugwarrior.config = {

View file

@ -60,9 +60,9 @@
features.network.fail2ban.enable = true; features.network.fail2ban.enable = true;
features.boot.ssh.enable = true; features.boot.ssh.enable = true;
components.monitor.enable = true; telemetry.enable = true;
components.monitor.opentelemetry.receiver.endpoint = "0.0.0.0:4317"; telemetry.opentelemetry.receiver.endpoint = "0.0.0.0:4317";
components.monitor.opentelemetry.exporter.endpoint = "10.100.0.2:4317"; # chnungus telemetry.opentelemetry.exporter.endpoint = "10.100.0.2:4317"; # chnungus
networking.firewall.interfaces.wg0.allowedTCPPorts = [ 4317 ]; networking.firewall.interfaces.wg0.allowedTCPPorts = [ 4317 ];
networking.firewall.interfaces.wg0.allowedUDPPorts = [ 4317 ]; networking.firewall.interfaces.wg0.allowedUDPPorts = [ 4317 ];
healthchecks.closed.public.ports.opentelemetry = [ 4317 ]; healthchecks.closed.public.ports.opentelemetry = [ 4317 ];

View file

@ -17,8 +17,6 @@
features.boot.ssh.enable = true; features.boot.ssh.enable = true;
features.boot.tor.enable = true; features.boot.tor.enable = true;
components.monitor.enable = false;
networking.hostName = "probe"; networking.hostName = "probe";
users.users.root.openssh.authorizedKeys.keys = [ users.users.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJkqVvuJSvRMO5pG2CHNNBxjB7HlJudK4TQs3BhbOWOD" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJkqVvuJSvRMO5pG2CHNNBxjB7HlJudK4TQs3BhbOWOD"

View file

@ -19,7 +19,6 @@
components.gui.wayland.enable = false; components.gui.wayland.enable = false;
components.gui.xorg.enable = true; components.gui.xorg.enable = true;
components.mainUser.enable = true; components.mainUser.enable = true;
components.monitor.enable = false;
components.network.enable = true; components.network.enable = true;
components.network.wifi.enable = true; components.network.wifi.enable = true;
components.terminal.enable = true; components.terminal.enable = true;

View file

@ -28,7 +28,6 @@
components.network.enable = true; components.network.enable = true;
components.network.wifi.enable = true; components.network.wifi.enable = true;
components.mainUser.enable = true; components.mainUser.enable = true;
components.monitor.enable = false;
users.users.mainUser.extraGroups = [ "video" ]; users.users.mainUser.extraGroups = [ "video" ];