hook up chungus with colmena

feature/nixinite
Ingolf Wagner 2023-04-29 23:29:05 +02:00
parent a934150cd2
commit f8678722c8
Signed by: palo
GPG Key ID: 76BF5F1928B9618B
12 changed files with 554 additions and 8 deletions

View File

@ -42,6 +42,26 @@
"type": "github"
}
},
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1682788423,
"narHash": "sha256-qYHMvTtFG9XZoYvGMk9n5QmowJ1CgIb5i5/EEEFJFFo=",
"owner": "nix-community",
"repo": "disko",
"rev": "617c77a440aac0b99c888da42406c79253ab7ef4",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"doom-emacs": {
"flake": false,
"locked": {
@ -874,6 +894,7 @@
"root": {
"inputs": {
"colmena": "colmena",
"disko": "disko",
"doom-emacs-nix": "doom-emacs-nix",
"emacs-overlay": "emacs-overlay_2",
"grocy-scanner": "grocy-scanner",
@ -930,11 +951,11 @@
"secrets": {
"flake": false,
"locked": {
"lastModified": 1676307221,
"narHash": "sha256-6XX4HQHuQxRnD2p3M1fLBOpfl9wFKIGc51Lm/bGqPOU=",
"lastModified": 1682803277,
"narHash": "sha256-K8v9TNTgeyGob7bH23K/2ZBSBphXyJ8KwfYdNNEv3NI=",
"ref": "main",
"rev": "060661c725d1c9cdfe6c54692fd22193dfced4f2",
"revCount": 46,
"rev": "d57bd16c25bac2f2e709cb9b39ded1c01aca290e",
"revCount": 48,
"type": "git",
"url": "ssh://gitea@git.ingolf-wagner.de/palo/nixos-secrets.git"
},

View File

@ -3,6 +3,7 @@
description = "my krops file";
inputs = {
secrets = {
url = "git+ssh://gitea@git.ingolf-wagner.de/palo/nixos-secrets.git?ref=main";
flake = false;
@ -72,11 +73,16 @@
url = "github:kmonad/kmonad?dir=nix";
inputs.nixpkgs.follows = "nixpkgs";
};
disko = {
url = "github:nix-community/disko";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs =
{ self
, colmena
, disko
, doom-emacs-nix
, emacs-overlay
, grocy-scanner
@ -90,9 +96,9 @@
, permown
, polygon-art
, private_assets
, retiolum
, secrets
, sops-nix
, retiolum
, landingpage
, kmonad
}:
@ -180,6 +186,7 @@
(sopsModule name)
home-manager.nixosModules.home-manager
permown.nixosModules.permown
disko.nixosModules.disko
];
home-manager.useGlobalPkgs = true;
home-manager.useUserPackages = true;
@ -255,6 +262,15 @@
];
};
chungus = { name, nodes, pkgs, ... }: {
#deployment.targetHost = "${name}.private";
deployment.targetHost = "192.168.178.31";
deployment.tags = [ "server" "online" "private" ];
imports = [
grocy-scanner.nixosModule
];
};
robi = { name, nodes, pkgs, ... }: {
deployment.targetHost = "${name}.private";
deployment.tags = [ "server" "online" "private" ];

View File

@ -0,0 +1,76 @@
{ config, pkgs, lib, ... }: {
imports = [
./hardware-configuration.nix
../../system/server
./disko-config.nix
./packages.nix
./tinc.nix
#./mail-fetcher.nix
#./hass.nix
#./zigbee2mqtt.nix
#./syncthing.nix
#./wifi-access-point.nix
#./borg.nix
#./taskwarrior-pushover.nix
#./jellyfin.nix
#./wireguard.nix
# logging
./loki.nix
./loki-promtail.nix
./prometheus.nix
./grafana.nix
./telegraf.nix
#./home-display.nix
#./tdarr.nix
];
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
boot.tmpOnTmpfs = true; # make /tmp a tmpfs (performance!)
boot.supportedFilesystems = [ "zfs" ];
# head -c4 /dev/urandom | od -A none -t x4
networking.hostId = "e439b116";
boot.zfs.extraPools = [ "zraid" ];
sops.defaultSopsFile = ../../secrets/chungus.yaml;
networking.hostName = "chungus";
programs.custom. zsh.enable = true;
users.users.root.shell = pkgs.zsh;
# todo : rename to component.init.ssh
#configuration.init-ssh = {
# enable = "enabled";
# kernelModules = [ "e1000e" ];
#};
# just enable lan
#networking.dhcpcd.allowInterfaces = [ "enp0s25" ];
# nix-shell -p speedtest_cli --run speedtest
#configuration.fireqos = {
# enable = false;
# interface = "enp0s25";
# input = 200000;
# output = 2000;
# balance = false;
#};
services.printing.enable = false;
services.smartd.enable = true;
#home-manager.users.mailUser.home.stateVersion = "22.11";
}

View File

@ -1,9 +1,7 @@
# nix run github:nix-community/disko -- --mode zap_create_mount ./disko-config.nix
# nix run github:nix-community/disko -- --mode create./disko-config.nix --dry-run
# nix run github:nix-community/disko -- --mode mount ./disko-config.nix --dry-run
# nixos-generate-config --no-filesystems --root /mnt
# vim /mnt/configuration.nix
# nixos-install --root /mnt
# nixos-install
{ ... }:
let
raid_disks = {
@ -97,6 +95,7 @@ in
};
# use boot.zfs.extraPools = [ "zraid" ] to mount this pool during boot time.
# or `zpool import -f zraid` once on the first boot and reboot
zraid = {
type = "zpool";
mode = "raidz2";

View File

@ -0,0 +1,24 @@
{ config, ... }:
{
services.nginx.virtualHosts.${config.services.grafana.settings.server.domain} = {
extraConfig = ''
allow ${config.tinc.private.subnet};
deny all;
'';
locations."/" = {
proxyPass = "http://localhost:${toString config.services.grafana.settings.server.http_port}";
proxyWebsockets = true;
};
};
services.grafana = {
enable = true;
settings.server = {
domain = "grafana.pepe.private";
http_port = 2342;
http_addr = "localhost";
};
};
}

View File

@ -0,0 +1,31 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
[
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "nvme" "usbhid" "usb_storage" "sd_mod" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp0s31f6.useDHCP = lib.mkDefault true;
# networking.interfaces.tinc.private.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp3s0.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
powerManagement.cpuFreqGovernor = "powersave";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
# high-resolution display
hardware.video.hidpi.enable = lib.mkDefault true;
}

View File

@ -0,0 +1,41 @@
{ config, ... }:
{
services.promtail = {
enable = true;
configuration = {
server = {
http_listen_port = 28183;
grpc_listen_port = 0;
};
positions.filename = "/tmp/positions.yaml";
clients = [
{ url = "http://127.0.0.1:3100/loki/api/v1/push"; }
];
scrape_configs = [
{
job_name = "journal";
journal = {
max_age = "12h";
labels = {
job = "systemd-journal";
host = config.networking.hostName;
};
};
relabel_configs = [
{
source_labels = [ "__journal__systemd_unit" ];
target_label = "unit";
}
{
source_labels = [ "__journal__transport" ];
target_label = "transport";
}
];
}
];
};
};
}

View File

@ -0,0 +1,99 @@
{ config, pkgs, ... }:
{
services.loki = {
enable = true;
configuration = {
server = {
http_listen_port = 3100;
log_level = "warn";
};
auth_enabled = false;
ingester = {
lifecycler = {
address = "127.0.0.1";
ring = {
kvstore = {
store = "inmemory";
};
replication_factor = 1;
};
};
chunk_idle_period = "1h";
max_chunk_age = "1h";
chunk_target_size = 999999;
chunk_retain_period = "30s";
max_transfer_retries = 0;
};
schema_config = {
configs = [{
from = "2022-06-06";
store = "boltdb-shipper";
object_store = "filesystem";
schema = "v11";
index = {
prefix = "index_";
period = "24h";
};
}];
};
storage_config = {
boltdb_shipper = {
active_index_directory = "/var/lib/loki/boltdb-shipper-active";
cache_location = "/var/lib/loki/boltdb-shipper-cache";
cache_ttl = "24h";
shared_store = "filesystem";
};
filesystem = {
directory = "/var/lib/loki/chunks";
};
};
limits_config = {
reject_old_samples = true;
reject_old_samples_max_age = "168h";
};
chunk_store_config = {
max_look_back_period = "0s";
};
table_manager = {
retention_deletes_enabled = false;
retention_period = "0s";
};
compactor = {
working_directory = "/var/lib/loki";
shared_store = "filesystem";
compactor_ring = {
kvstore = {
store = "inmemory";
};
};
};
};
# user, group, dataDir, extraFlags, (configFile)
};
#services.nginx = {
# enable = true;
# virtualHosts.loki = {
# serverName = "loki.pepe.private";
# locations."/" = {
# proxyWebsockets = true;
# proxyPass = "http://127.0.0.1:3100";
# #extraConfig = ''
# # access_log off;
# # allow ${config.tinc.private.subnet};
# # deny all;
# #'';
# };
# };
#};
}

View File

@ -0,0 +1,8 @@
{ config, lib, pkgs, ... }:
{
environment.systemPackages = [
pkgs.mediainfo
pkgs.youtube-dl
];
}

View File

@ -0,0 +1,132 @@
{ config, pkgs, lib, ... }: {
sops.secrets.hass_long_term_token.owner = "prometheus";
services.nginx = {
enable = true;
statusPage = true;
virtualHosts = {
"prometheus.pepe.private" = {
extraConfig = ''
allow ${config.tinc.private.subnet};
deny all;
'';
locations."/" = { proxyPass = "http://localhost:${toString config.services.prometheus.port}"; };
};
};
};
services.prometheus = {
checkConfig = "syntax-only";
enable = true;
# keep data for 30 days
extraFlags = [ "--storage.tsdb.retention.time=30d" ];
ruleFiles = [
(pkgs.writeText "prometheus-rules.yml" (builtins.toJSON {
groups = [
{
name = "core";
rules = [
{
alert = "InstanceDown";
expr = "up == 0";
for = "5m";
labels.severity = "page";
annotations = {
summary = "Instance {{ $labels.instance }} down";
description = "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.";
};
}
];
}
{
name = "home-assistant";
rules = [
{
record = "home_open_window_sum";
expr = ''sum( homeassistant_binary_sensor_state{entity=~"binary_sensor\\.window_02_contact|binary_sensor\\.window_03_contact|binary_sensor\\.window_04_contact|binary_sensor\\.window_05_contact|binary_sensor\\.window_06_contact|binary_sensor\\.window_07_contact"} )'';
}
] ++ (map
(number:
{
record = "home_at_least_n_windows_open";
expr = ''home_open_window_sum >= bool ${toString number}'';
labels.n = number;
}) [ 1 2 3 ]);
}
];
}))
];
#alertmanager = {
# enable = true;
# configuration = {
#};
#};
exporters = {
systemd.enable = true;
node = {
enable = true;
enabledCollectors = [ "systemd" ];
port = 9002;
};
};
scrapeConfigs = [
{
job_name = "netdata";
metrics_path = "/api/v1/allmetrics";
params.format = [ "prometheus" ];
scrape_interval = "5s";
static_configs = [
{
targets = [ "localhost:19999" ];
labels = {
service = "netdata";
server = "pepe";
};
}
];
}
{
job_name = "systemd";
static_configs = [{
targets = [ "localhost:${toString config.services.prometheus.exporters.systemd.port}" ];
labels = {
service = "systemd-exporter";
server = "pepe";
};
}];
}
{
job_name = "node";
static_configs = [{
targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ];
labels = {
service = "node-exporter";
server = "pepe";
};
}];
}
{
# see https://www.home-assistant.io/integrations/prometheus/
job_name = "home-assistant";
scrape_interval = "60s";
metrics_path = "/api/prometheus";
bearer_token_file = toString config.sops.secrets.hass_long_term_token.path;
static_configs = [{
targets = [ "localhost:8123" ];
labels = {
service = "hass";
server = "pepe";
};
}];
}
];
};
}

View File

@ -0,0 +1,93 @@
{ pkgs, ... }:
let
urls = [
{ url = "https://bitwarden.ingolf-wagner.de"; path = ""; }
{ url = "https://flix.ingolf-wagner.de"; path = "web/index.html"; }
{ url = "https://git.ingolf-wagner.de"; path = ""; }
{ url = "https://ingolf-wagner.de"; path = ""; }
{ url = "https://nextcloud.ingolf-wagner.de"; path = "login"; }
{ url = "https://tech.ingolf-wagner.de"; path = ""; }
];
in
{
systemd.services.telegraf.path = [ pkgs.inetutils ];
services.telegraf = {
enable = true;
extraConfig = {
outputs.prometheus_client = {
listen = ":9273";
metric_version = 2;
};
# https://github.com/influxdata/telegraf/tree/master/plugins/inputs < all them plugins
inputs = {
cpu = { };
diskio = { };
x509_cert = [{
sources = (map (url: "${url.url}:443") urls);
interval = "30m"; # agent.interval = "10s" is default
}];
http_response =
let fullUrls = map ({ url, path }: "${url}/${path}") urls;
in [{ urls = fullUrls; }];
processes = { };
systemd_units = { };
internet_speed.interval = "50m";
nginx.urls = [ "http://localhost/nginx_status" ];
ping = [{ urls = [ "10.100.0.1" ]; }]; # actually important to make pepe visible over wireguard
};
};
};
services.prometheus.scrapeConfigs = [
{
# see https://www.home-assistant.io/integrations/prometheus/
job_name = "telgraf";
metrics_path = "/metrics";
static_configs = [{
targets = [ "localhost:9273" ];
labels = {
service = "telegraf";
server = "pepe";
};
}];
}
];
services.prometheus.ruleFiles = [
(pkgs.writeText "telegraf.yml" (builtins.toJSON {
groups = [
{
name = "telegraf";
rules = [
{
alert = "HttpResponseNotOk";
expr = "0 * (http_response_http_response_code != 200) + 1";
for = "5m";
labels.severity = "page";
annotations = {
summary = "{{ $labels.exported_server }} does not return Ok";
description = "{{ $labels.exported_server }} does not return Ok for more than 5 minutes";
};
}
{
alert = "CertificatExpires";
expr = ''x509_cert_expiry{issuer_common_name="R3"} < ${toString (60 * 60 * 24 * 5)}'';
for = "1d";
labels.severity = "page";
annotations = {
summary = "{{ $labels.san }} does Expire Soon";
description = "{{ $labels.san }} does expire in less than 5 days";
};
}
];
}
];
}))
];
}

View File

@ -0,0 +1,6 @@
{
tinc.private.enable = true;
tinc.private.ipv4 = "10.23.42.28";
}