graylog: fixing stuff

This commit is contained in:
Ingolf Wagner 2021-07-15 23:09:55 +02:00
parent 1b2176d9de
commit 8627dac495
No known key found for this signature in database
GPG key ID: 76BF5F1928B9618B
10 changed files with 272 additions and 82 deletions

View file

@ -6,7 +6,7 @@
./hardware-configuration.nix
#./home-assistant.nix
./kodi.nix
#./kodi.nix
./syncthing.nix
./tinc.nix
#./wifi-access-point.nix

View file

@ -6,7 +6,7 @@
./tinc.nix
./syncthing.nix
./kodi.nix
#./kodi.nix
];

View file

@ -119,7 +119,6 @@ in {
"2","cirtical","Critical conditions"
"3","error","Error Condition"
"4","warning","May indicate that an error will occur if action is not taken."
"4","warn","May indicate that an error will occur if action is not taken."
"5","notice","Events that are unusual, but not error conditions."
"6","info","Normal operational messages that require no action."
"7","debug","Information useful to developers for debugging the application."

View file

@ -238,9 +238,39 @@ in {
environment.systemPackages = [ pkgs.smbclient ];
# send log to host systems graylog (use tinc or wireguard if host is not graylog)
services.SystemdJournal2Gelf.enable = true;
services.SystemdJournal2Gelf.graylogServer = "${hostAddress}:11201";
services.journalbeat = {
enable = true;
extraConfig = ''
journalbeat.inputs:
- paths: []
# Position to start reading from journal. Valid values: head, tail, cursor
seek: cursor
# Fallback position if no cursor data is available.
cursor_seek_fallback: tail
output.logstash:
# Boolean flag to enable or disable the output module.
enabled: true
# Graylog host and the beats input
hosts: ["${hostAddress}:5044"]
# If enabled only a subset of events in a batch of events is transferred per
# transaction. The number of events to be sent increases up to `bulk_max_size`
# if no error is encountered.
slow_start: true
# The number of seconds to wait before trying to reconnect to Graylog
# after a network error. After waiting backoff.init seconds, the Beat
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Graylog after a network error. The default is 60s.
backoff.max: 60s
'';
};
};
};
@ -255,9 +285,9 @@ in {
# open ports for logging
networking.firewall.interfaces."ve-nextcloud".allowedTCPPorts =
[ 11201 12304 12305 ];
[ 5044 12304 12305 ];
networking.firewall.interfaces."ve-nextcloud".allowedUDPPorts =
[ 11201 12304 12305 ];
[ 5044 12304 12305 ];
# host nginx setup
services.nginx = {

View file

@ -49,9 +49,38 @@ in {
config = { config, pkgs, lib, ... }: {
# send log to host systems graylog (use tinc or wireguard if host is not graylog)
services.SystemdJournal2Gelf.enable = true;
services.SystemdJournal2Gelf.graylogServer = "${hostAddress}:11201";
services.journalbeat = {
enable = true;
extraConfig = ''
journalbeat.inputs:
- paths: []
# Position to start reading from journal. Valid values: head, tail, cursor
seek: cursor
# Fallback position if no cursor data is available.
cursor_seek_fallback: tail
output.logstash:
# Boolean flag to enable or disable the output module.
enabled: true
# Graylog host and the beats input
hosts: ["${hostAddress}:5044"]
# If enabled only a subset of events in a batch of events is transferred per
# transaction. The number of events to be sent increases up to `bulk_max_size`
# if no error is encountered.
slow_start: true
# The number of seconds to wait before trying to reconnect to Graylog
# after a network error. After waiting backoff.init seconds, the Beat
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Graylog after a network error. The default is 60s.
backoff.max: 60s
'';
};
services.journald.extraConfig = "SystemMaxUse=1G";
@ -229,9 +258,9 @@ in {
# open ports for logging
networking.firewall.interfaces."ve-torrent".allowedTCPPorts =
[ 11201 12304 12305 ];
[ 5044 12304 12305 ];
networking.firewall.interfaces."ve-torrent".allowedUDPPorts =
[ 11201 12304 12305 ];
[ 5044 12304 12305 ];
# host nginx setup
services.nginx = {

View file

@ -1,5 +1,139 @@
{ lib, ... }: {
# send data to graylog
services.SystemdJournal2Gelf.enable = lib.mkDefault true;
services.SystemdJournal2Gelf.enable = lib.mkDefault false;
services.SystemdJournal2Gelf.graylogServer = "workhorse.private:11201";
services.journalbeat = {
enable = true;
# https://docs.graylog.org/en/4.0/pages/sending/journald.html
extraConfig = ''
journalbeat.inputs:
# Paths that should be crawled and fetched. Possible values files and directories.
# When setting a directory, all journals under it are merged.
# When empty starts to read from local journal.
- paths: []
# The number of seconds to wait before trying to read again from journals.
#backoff: 1s
# The maximum number of seconds to wait before attempting to read again from journals.
#max_backoff: 20s
# Position to start reading from journal. Valid values: head, tail, cursor
seek: tail
# Fallback position if no cursor data is available.
#cursor_seek_fallback: tail
# Exact matching for field values of events.
# Matching for nginx entries: "systemd.unit=nginx"
#include_matches: []
output.logstash:
# Boolean flag to enable or disable the output module.
enabled: true
# Graylog host and the beats input
hosts: ["workhorse.private:5044"]
# Number of workers per Graylog host.
#worker: 1
# Set gzip compression level.
#compression_level: 3
# Configure escaping HTML symbols in strings.
#escape_html: false
# Optional maximum time to live for a connection to Graylog, after which the
# connection will be re-established. A value of `0s` (the default) will
# disable this feature.
#
# Not yet supported for async connections (i.e. with the "pipelining" option set)
ttl: 30s
# Optionally load-balance events between Graylog hosts. Default is false.
#loadbalance: false
# If enabled only a subset of events in a batch of events is transferred per
# transaction. The number of events to be sent increases up to `bulk_max_size`
# if no error is encountered.
slow_start: true
# The number of seconds to wait before trying to reconnect to Graylog
# after a network error. After waiting backoff.init seconds, the Beat
# tries to reconnect. If the attempt fails, the backoff timer is increased
# exponentially up to backoff.max. After a successful connection, the backoff
# timer is reset. The default is 1s.
backoff.init: 1s
# The maximum number of seconds to wait before attempting to connect to
# Graylog after a network error. The default is 60s.
backoff.max: 60s
# SOCKS5 proxy server URL
#proxy_url: socks5://user:password@socks5-server:2233
# Resolve names locally when using a proxy server. Defaults to false.
#proxy_use_local_resolver: false
# Enable SSL support. SSL is automatically enabled if any SSL setting is set.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions from 1.1
# up to 1.3 are enabled.
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client certificate key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase:
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE-based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
# Configure a pin that can be used to do extra validation of the verified certificate chain,
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
#
# The pin is a base64 encoded string of the SHA-256 fingerprint.
#ssl.ca_sha256: ""
# The number of times to retry publishing an event after a publishing failure.
# After the specified number of retries, the events are typically dropped.
# Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting
# and retry until all events are published. Set max_retries to a value less
# than 0 to retry until all events are published. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Graylog request. The
# default is 2048.
bulk_max_size: 2048
# The number of seconds to wait for responses from the Graylog server before
# timing out. The default is 30s.
#timeout: 30s
'';
};
}

View file

@ -6,7 +6,7 @@ with builtins; {
resource.graylog_index_set.default = let
maxIndexSize = 200;
maxIndexCount = 20;
isDefault = false;
isDefault = true;
in {
title = "default";
description = ''
@ -18,7 +18,7 @@ with builtins; {
}MB of logs!
'';
default = isDefault;
index_prefix = "test-graylog";
index_prefix = "graylog";
rotation_strategy_class =
"org.graylog2.indexer.rotation.strategies.SizeBasedRotationStrategy";
retention_strategy_class =

View file

@ -4,18 +4,56 @@ with builtins; {
resource = {
graylog_input.journald = {
title = "Journald Logs";
# https://javadoc.io/doc/org.graylog2/graylog2-inputs/latest/index.html
type = "org.graylog2.inputs.gelf.udp.GELFUDPInput";
global = true;
attributes = toJSON ({
bind_address = "0.0.0.0";
decompress_size_limit = 8388608;
number_worker_threads = 4;
port = 11201;
recv_buffer_size = 262144;
});
graylog_input = {
journald = {
title = "Journald Logs";
# https://javadoc.io/doc/org.graylog2/graylog2-inputs/latest/index.html
type = "org.graylog2.inputs.gelf.udp.GELFUDPInput";
global = true;
attributes = toJSON ({
bind_address = "0.0.0.0";
decompress_size_limit = 8388608;
number_worker_threads = 4;
port = 11201;
recv_buffer_size = 262144;
});
};
journalbeat = {
title = "Journalbeat Logs";
# https://javadoc.io/doc/org.graylog2/graylog2-inputs/latest/index.html
type = "org.graylog.plugins.beats.Beats2Input";
global = true;
attributes = toJSON ({
bind_address = "0.0.0.0";
no_beats_prefix = true;
number_worker_threads = 4;
port = 5044;
recv_buffer_size = 1048576;
tcp_keepalive = false;
tls_cert_file = "";
tls_client_auth = "disabled";
tls_client_auth_cert_file = "";
tls_enable = false;
tls_key_file = "";
tls_key_password = "";
});
};
};
graylog_input_static_fields.journald = {
input_id = "\${graylog_input.journald.id}";
fields = {
from_journald = true;
systemdjournal2gelf = true;
};
};
graylog_input_static_fields.journalbeat = {
input_id = "\${graylog_input.journalbeat.id}";
fields = {
from_journald = true;
journalbeat = true;
};
};
graylog_stream.journald = {
@ -35,75 +73,27 @@ with builtins; {
inverted = false;
};
graylog_input_static_fields.journald = {
input_id = "\${graylog_input.journald.id}";
fields = { from_journald = true; };
};
graylog_pipeline_connection = {
journald = {
stream_id = "\${graylog_stream.journald.id}";
pipeline_ids = [
"\${graylog_pipeline.journald_fix_loglevel.id}"
#"\${graylog_pipeline.journald_fix_loglevel.id}"
"\${graylog_pipeline.journald_iptable_parse.id}"
"\${graylog_pipeline.journald_loglevel_int_to_str.id}"
#"\${graylog_pipeline.journald_loglevel_int_to_str.id}"
];
};
};
graylog_pipeline = {
journald_fix_loglevel.source = ''
pipeline "journald : fix loglevel"
stage 0 match either
rule "journald : lookup log level"
stage 1 match either
rule "journald : replace log level"
end
'';
journald_iptable_parse.source = ''
pipeline "journald : ip table parse"
stage 0 match either
rule "journald : iptables split"
end
'';
journald_loglevel_int_to_str.source = ''
pipeline "journald : loglevel int to str"
stage 9 match either
rule "journald : int to str"
end
'';
};
graylog_pipeline_rule = {
loglevelLookup.source = ''
rule "journald : lookup log level"
when
has_field("level")
then
let lookup = lookup_value("systemd-log-level-reverse",$message.level);
set_field("level_fix",lookup);
end
'';
loglevelReplace.source = ''
rule "journald : replace log level"
when
has_field("level_fix")
then
set_field("level",$message.level_fix);
end
'';
loglevelIntToStr.source = ''
rule "journald : int to str"
when
has_field("level")
then
let lookup = lookup_value("systemd_log_level",$message.level);
set_field("level_type",lookup);
end
'';
iptableSplit.source = ''
rule "journald : iptables split"
when

View file

@ -3,10 +3,7 @@ with builtins; {
resource = {
graylog_pipeline_connection = {
nextcloud = {
stream_id = "\${graylog_stream.journald.id}";
pipeline_ids = [ "\${graylog_pipeline.nextcloud.id}" ];
};
journald.pipeline_ids = [ "\${graylog_pipeline.nextcloud.id}" ];
};
graylog_pipeline = {
@ -14,6 +11,8 @@ with builtins; {
pipeline "nextcloud : parsing"
stage 10 match either
rule "nextcloud : parse level 1"
stage 11 match either
rule "nextcloud : parse level 2"
end
'';
};
@ -28,6 +27,15 @@ with builtins; {
set_fields(to_map(parsedJson),"nextcloud_");
end
'';
nextcloudLevel2.source = ''
rule "nextcloud : parse level 2"
when
has_field("nextcloud_message")
then
let parsedJson = parse_json(to_string($message.nextcloud_message));
set_fields(to_map(parsedJson),"nextcloud_message_");
end
'';
};
};

Binary file not shown.