graylog: fixing stuff
This commit is contained in:
parent
1b2176d9de
commit
8627dac495
10 changed files with 272 additions and 82 deletions
|
@ -6,7 +6,7 @@
|
||||||
./hardware-configuration.nix
|
./hardware-configuration.nix
|
||||||
|
|
||||||
#./home-assistant.nix
|
#./home-assistant.nix
|
||||||
./kodi.nix
|
#./kodi.nix
|
||||||
./syncthing.nix
|
./syncthing.nix
|
||||||
./tinc.nix
|
./tinc.nix
|
||||||
#./wifi-access-point.nix
|
#./wifi-access-point.nix
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
./tinc.nix
|
./tinc.nix
|
||||||
./syncthing.nix
|
./syncthing.nix
|
||||||
./kodi.nix
|
#./kodi.nix
|
||||||
|
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
|
@ -119,7 +119,6 @@ in {
|
||||||
"2","cirtical","Critical conditions"
|
"2","cirtical","Critical conditions"
|
||||||
"3","error","Error Condition"
|
"3","error","Error Condition"
|
||||||
"4","warning","May indicate that an error will occur if action is not taken."
|
"4","warning","May indicate that an error will occur if action is not taken."
|
||||||
"4","warn","May indicate that an error will occur if action is not taken."
|
|
||||||
"5","notice","Events that are unusual, but not error conditions."
|
"5","notice","Events that are unusual, but not error conditions."
|
||||||
"6","info","Normal operational messages that require no action."
|
"6","info","Normal operational messages that require no action."
|
||||||
"7","debug","Information useful to developers for debugging the application."
|
"7","debug","Information useful to developers for debugging the application."
|
||||||
|
|
|
@ -238,9 +238,39 @@ in {
|
||||||
|
|
||||||
environment.systemPackages = [ pkgs.smbclient ];
|
environment.systemPackages = [ pkgs.smbclient ];
|
||||||
|
|
||||||
# send log to host systems graylog (use tinc or wireguard if host is not graylog)
|
services.journalbeat = {
|
||||||
services.SystemdJournal2Gelf.enable = true;
|
enable = true;
|
||||||
services.SystemdJournal2Gelf.graylogServer = "${hostAddress}:11201";
|
extraConfig = ''
|
||||||
|
journalbeat.inputs:
|
||||||
|
- paths: []
|
||||||
|
# Position to start reading from journal. Valid values: head, tail, cursor
|
||||||
|
seek: cursor
|
||||||
|
# Fallback position if no cursor data is available.
|
||||||
|
cursor_seek_fallback: tail
|
||||||
|
output.logstash:
|
||||||
|
# Boolean flag to enable or disable the output module.
|
||||||
|
enabled: true
|
||||||
|
# Graylog host and the beats input
|
||||||
|
hosts: ["${hostAddress}:5044"]
|
||||||
|
|
||||||
|
# If enabled only a subset of events in a batch of events is transferred per
|
||||||
|
# transaction. The number of events to be sent increases up to `bulk_max_size`
|
||||||
|
# if no error is encountered.
|
||||||
|
slow_start: true
|
||||||
|
|
||||||
|
# The number of seconds to wait before trying to reconnect to Graylog
|
||||||
|
# after a network error. After waiting backoff.init seconds, the Beat
|
||||||
|
# tries to reconnect. If the attempt fails, the backoff timer is increased
|
||||||
|
# exponentially up to backoff.max. After a successful connection, the backoff
|
||||||
|
# timer is reset. The default is 1s.
|
||||||
|
backoff.init: 1s
|
||||||
|
|
||||||
|
# The maximum number of seconds to wait before attempting to connect to
|
||||||
|
# Graylog after a network error. The default is 60s.
|
||||||
|
backoff.max: 60s
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -255,9 +285,9 @@ in {
|
||||||
|
|
||||||
# open ports for logging
|
# open ports for logging
|
||||||
networking.firewall.interfaces."ve-nextcloud".allowedTCPPorts =
|
networking.firewall.interfaces."ve-nextcloud".allowedTCPPorts =
|
||||||
[ 11201 12304 12305 ];
|
[ 5044 12304 12305 ];
|
||||||
networking.firewall.interfaces."ve-nextcloud".allowedUDPPorts =
|
networking.firewall.interfaces."ve-nextcloud".allowedUDPPorts =
|
||||||
[ 11201 12304 12305 ];
|
[ 5044 12304 12305 ];
|
||||||
|
|
||||||
# host nginx setup
|
# host nginx setup
|
||||||
services.nginx = {
|
services.nginx = {
|
||||||
|
|
|
@ -49,9 +49,38 @@ in {
|
||||||
|
|
||||||
config = { config, pkgs, lib, ... }: {
|
config = { config, pkgs, lib, ... }: {
|
||||||
|
|
||||||
# send log to host systems graylog (use tinc or wireguard if host is not graylog)
|
services.journalbeat = {
|
||||||
services.SystemdJournal2Gelf.enable = true;
|
enable = true;
|
||||||
services.SystemdJournal2Gelf.graylogServer = "${hostAddress}:11201";
|
extraConfig = ''
|
||||||
|
journalbeat.inputs:
|
||||||
|
- paths: []
|
||||||
|
# Position to start reading from journal. Valid values: head, tail, cursor
|
||||||
|
seek: cursor
|
||||||
|
# Fallback position if no cursor data is available.
|
||||||
|
cursor_seek_fallback: tail
|
||||||
|
output.logstash:
|
||||||
|
# Boolean flag to enable or disable the output module.
|
||||||
|
enabled: true
|
||||||
|
# Graylog host and the beats input
|
||||||
|
hosts: ["${hostAddress}:5044"]
|
||||||
|
|
||||||
|
# If enabled only a subset of events in a batch of events is transferred per
|
||||||
|
# transaction. The number of events to be sent increases up to `bulk_max_size`
|
||||||
|
# if no error is encountered.
|
||||||
|
slow_start: true
|
||||||
|
|
||||||
|
# The number of seconds to wait before trying to reconnect to Graylog
|
||||||
|
# after a network error. After waiting backoff.init seconds, the Beat
|
||||||
|
# tries to reconnect. If the attempt fails, the backoff timer is increased
|
||||||
|
# exponentially up to backoff.max. After a successful connection, the backoff
|
||||||
|
# timer is reset. The default is 1s.
|
||||||
|
backoff.init: 1s
|
||||||
|
|
||||||
|
# The maximum number of seconds to wait before attempting to connect to
|
||||||
|
# Graylog after a network error. The default is 60s.
|
||||||
|
backoff.max: 60s
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
services.journald.extraConfig = "SystemMaxUse=1G";
|
services.journald.extraConfig = "SystemMaxUse=1G";
|
||||||
|
|
||||||
|
@ -229,9 +258,9 @@ in {
|
||||||
|
|
||||||
# open ports for logging
|
# open ports for logging
|
||||||
networking.firewall.interfaces."ve-torrent".allowedTCPPorts =
|
networking.firewall.interfaces."ve-torrent".allowedTCPPorts =
|
||||||
[ 11201 12304 12305 ];
|
[ 5044 12304 12305 ];
|
||||||
networking.firewall.interfaces."ve-torrent".allowedUDPPorts =
|
networking.firewall.interfaces."ve-torrent".allowedUDPPorts =
|
||||||
[ 11201 12304 12305 ];
|
[ 5044 12304 12305 ];
|
||||||
|
|
||||||
# host nginx setup
|
# host nginx setup
|
||||||
services.nginx = {
|
services.nginx = {
|
||||||
|
|
|
@ -1,5 +1,139 @@
|
||||||
{ lib, ... }: {
|
{ lib, ... }: {
|
||||||
|
|
||||||
# send data to graylog
|
# send data to graylog
|
||||||
services.SystemdJournal2Gelf.enable = lib.mkDefault true;
|
services.SystemdJournal2Gelf.enable = lib.mkDefault false;
|
||||||
services.SystemdJournal2Gelf.graylogServer = "workhorse.private:11201";
|
services.SystemdJournal2Gelf.graylogServer = "workhorse.private:11201";
|
||||||
|
|
||||||
|
services.journalbeat = {
|
||||||
|
enable = true;
|
||||||
|
# https://docs.graylog.org/en/4.0/pages/sending/journald.html
|
||||||
|
extraConfig = ''
|
||||||
|
journalbeat.inputs:
|
||||||
|
# Paths that should be crawled and fetched. Possible values files and directories.
|
||||||
|
# When setting a directory, all journals under it are merged.
|
||||||
|
# When empty starts to read from local journal.
|
||||||
|
- paths: []
|
||||||
|
|
||||||
|
# The number of seconds to wait before trying to read again from journals.
|
||||||
|
#backoff: 1s
|
||||||
|
# The maximum number of seconds to wait before attempting to read again from journals.
|
||||||
|
#max_backoff: 20s
|
||||||
|
|
||||||
|
# Position to start reading from journal. Valid values: head, tail, cursor
|
||||||
|
seek: tail
|
||||||
|
|
||||||
|
# Fallback position if no cursor data is available.
|
||||||
|
#cursor_seek_fallback: tail
|
||||||
|
|
||||||
|
# Exact matching for field values of events.
|
||||||
|
# Matching for nginx entries: "systemd.unit=nginx"
|
||||||
|
#include_matches: []
|
||||||
|
|
||||||
|
output.logstash:
|
||||||
|
# Boolean flag to enable or disable the output module.
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
# Graylog host and the beats input
|
||||||
|
hosts: ["workhorse.private:5044"]
|
||||||
|
|
||||||
|
# Number of workers per Graylog host.
|
||||||
|
#worker: 1
|
||||||
|
|
||||||
|
# Set gzip compression level.
|
||||||
|
#compression_level: 3
|
||||||
|
|
||||||
|
# Configure escaping HTML symbols in strings.
|
||||||
|
#escape_html: false
|
||||||
|
|
||||||
|
# Optional maximum time to live for a connection to Graylog, after which the
|
||||||
|
# connection will be re-established. A value of `0s` (the default) will
|
||||||
|
# disable this feature.
|
||||||
|
#
|
||||||
|
# Not yet supported for async connections (i.e. with the "pipelining" option set)
|
||||||
|
ttl: 30s
|
||||||
|
|
||||||
|
# Optionally load-balance events between Graylog hosts. Default is false.
|
||||||
|
#loadbalance: false
|
||||||
|
|
||||||
|
# If enabled only a subset of events in a batch of events is transferred per
|
||||||
|
# transaction. The number of events to be sent increases up to `bulk_max_size`
|
||||||
|
# if no error is encountered.
|
||||||
|
slow_start: true
|
||||||
|
|
||||||
|
# The number of seconds to wait before trying to reconnect to Graylog
|
||||||
|
# after a network error. After waiting backoff.init seconds, the Beat
|
||||||
|
# tries to reconnect. If the attempt fails, the backoff timer is increased
|
||||||
|
# exponentially up to backoff.max. After a successful connection, the backoff
|
||||||
|
# timer is reset. The default is 1s.
|
||||||
|
backoff.init: 1s
|
||||||
|
|
||||||
|
# The maximum number of seconds to wait before attempting to connect to
|
||||||
|
# Graylog after a network error. The default is 60s.
|
||||||
|
backoff.max: 60s
|
||||||
|
|
||||||
|
# SOCKS5 proxy server URL
|
||||||
|
#proxy_url: socks5://user:password@socks5-server:2233
|
||||||
|
|
||||||
|
# Resolve names locally when using a proxy server. Defaults to false.
|
||||||
|
#proxy_use_local_resolver: false
|
||||||
|
|
||||||
|
# Enable SSL support. SSL is automatically enabled if any SSL setting is set.
|
||||||
|
#ssl.enabled: true
|
||||||
|
|
||||||
|
# Configure SSL verification mode. If `none` is configured, all server hosts
|
||||||
|
# and certificates will be accepted. In this mode, SSL based connections are
|
||||||
|
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||||
|
# `full`.
|
||||||
|
#ssl.verification_mode: full
|
||||||
|
|
||||||
|
# List of supported/valid TLS versions. By default all TLS versions from 1.1
|
||||||
|
# up to 1.3 are enabled.
|
||||||
|
#ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3]
|
||||||
|
|
||||||
|
# Optional SSL configuration options. SSL is off by default.
|
||||||
|
# List of root certificates for HTTPS server verifications
|
||||||
|
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
|
||||||
|
|
||||||
|
# Certificate for SSL client authentication
|
||||||
|
#ssl.certificate: "/etc/pki/client/cert.pem"
|
||||||
|
|
||||||
|
# Client certificate key
|
||||||
|
#ssl.key: "/etc/pki/client/cert.key"
|
||||||
|
|
||||||
|
# Optional passphrase for decrypting the Certificate Key.
|
||||||
|
#ssl.key_passphrase:
|
||||||
|
|
||||||
|
# Configure cipher suites to be used for SSL connections
|
||||||
|
#ssl.cipher_suites: []
|
||||||
|
|
||||||
|
# Configure curve types for ECDHE-based cipher suites
|
||||||
|
#ssl.curve_types: []
|
||||||
|
|
||||||
|
# Configure what types of renegotiation are supported. Valid options are
|
||||||
|
# never, once, and freely. Default is never.
|
||||||
|
#ssl.renegotiation: never
|
||||||
|
|
||||||
|
# Configure a pin that can be used to do extra validation of the verified certificate chain,
|
||||||
|
# this allow you to ensure that a specific certificate is used to validate the chain of trust.
|
||||||
|
#
|
||||||
|
# The pin is a base64 encoded string of the SHA-256 fingerprint.
|
||||||
|
#ssl.ca_sha256: ""
|
||||||
|
|
||||||
|
# The number of times to retry publishing an event after a publishing failure.
|
||||||
|
# After the specified number of retries, the events are typically dropped.
|
||||||
|
# Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting
|
||||||
|
# and retry until all events are published. Set max_retries to a value less
|
||||||
|
# than 0 to retry until all events are published. The default is 3.
|
||||||
|
#max_retries: 3
|
||||||
|
|
||||||
|
# The maximum number of events to bulk in a single Graylog request. The
|
||||||
|
# default is 2048.
|
||||||
|
bulk_max_size: 2048
|
||||||
|
|
||||||
|
# The number of seconds to wait for responses from the Graylog server before
|
||||||
|
# timing out. The default is 30s.
|
||||||
|
#timeout: 30s
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ with builtins; {
|
||||||
resource.graylog_index_set.default = let
|
resource.graylog_index_set.default = let
|
||||||
maxIndexSize = 200;
|
maxIndexSize = 200;
|
||||||
maxIndexCount = 20;
|
maxIndexCount = 20;
|
||||||
isDefault = false;
|
isDefault = true;
|
||||||
in {
|
in {
|
||||||
title = "default";
|
title = "default";
|
||||||
description = ''
|
description = ''
|
||||||
|
@ -18,7 +18,7 @@ with builtins; {
|
||||||
}MB of logs!
|
}MB of logs!
|
||||||
'';
|
'';
|
||||||
default = isDefault;
|
default = isDefault;
|
||||||
index_prefix = "test-graylog";
|
index_prefix = "graylog";
|
||||||
rotation_strategy_class =
|
rotation_strategy_class =
|
||||||
"org.graylog2.indexer.rotation.strategies.SizeBasedRotationStrategy";
|
"org.graylog2.indexer.rotation.strategies.SizeBasedRotationStrategy";
|
||||||
retention_strategy_class =
|
retention_strategy_class =
|
||||||
|
|
|
@ -4,18 +4,56 @@ with builtins; {
|
||||||
|
|
||||||
resource = {
|
resource = {
|
||||||
|
|
||||||
graylog_input.journald = {
|
graylog_input = {
|
||||||
title = "Journald Logs";
|
journald = {
|
||||||
# https://javadoc.io/doc/org.graylog2/graylog2-inputs/latest/index.html
|
title = "Journald Logs";
|
||||||
type = "org.graylog2.inputs.gelf.udp.GELFUDPInput";
|
# https://javadoc.io/doc/org.graylog2/graylog2-inputs/latest/index.html
|
||||||
global = true;
|
type = "org.graylog2.inputs.gelf.udp.GELFUDPInput";
|
||||||
attributes = toJSON ({
|
global = true;
|
||||||
bind_address = "0.0.0.0";
|
attributes = toJSON ({
|
||||||
decompress_size_limit = 8388608;
|
bind_address = "0.0.0.0";
|
||||||
number_worker_threads = 4;
|
decompress_size_limit = 8388608;
|
||||||
port = 11201;
|
number_worker_threads = 4;
|
||||||
recv_buffer_size = 262144;
|
port = 11201;
|
||||||
});
|
recv_buffer_size = 262144;
|
||||||
|
});
|
||||||
|
};
|
||||||
|
journalbeat = {
|
||||||
|
title = "Journalbeat Logs";
|
||||||
|
# https://javadoc.io/doc/org.graylog2/graylog2-inputs/latest/index.html
|
||||||
|
type = "org.graylog.plugins.beats.Beats2Input";
|
||||||
|
global = true;
|
||||||
|
attributes = toJSON ({
|
||||||
|
bind_address = "0.0.0.0";
|
||||||
|
no_beats_prefix = true;
|
||||||
|
number_worker_threads = 4;
|
||||||
|
port = 5044;
|
||||||
|
recv_buffer_size = 1048576;
|
||||||
|
tcp_keepalive = false;
|
||||||
|
tls_cert_file = "";
|
||||||
|
tls_client_auth = "disabled";
|
||||||
|
tls_client_auth_cert_file = "";
|
||||||
|
tls_enable = false;
|
||||||
|
tls_key_file = "";
|
||||||
|
tls_key_password = "";
|
||||||
|
});
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
graylog_input_static_fields.journald = {
|
||||||
|
input_id = "\${graylog_input.journald.id}";
|
||||||
|
fields = {
|
||||||
|
from_journald = true;
|
||||||
|
systemdjournal2gelf = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
graylog_input_static_fields.journalbeat = {
|
||||||
|
input_id = "\${graylog_input.journalbeat.id}";
|
||||||
|
fields = {
|
||||||
|
from_journald = true;
|
||||||
|
journalbeat = true;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
graylog_stream.journald = {
|
graylog_stream.journald = {
|
||||||
|
@ -35,75 +73,27 @@ with builtins; {
|
||||||
inverted = false;
|
inverted = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
graylog_input_static_fields.journald = {
|
|
||||||
input_id = "\${graylog_input.journald.id}";
|
|
||||||
fields = { from_journald = true; };
|
|
||||||
};
|
|
||||||
|
|
||||||
graylog_pipeline_connection = {
|
graylog_pipeline_connection = {
|
||||||
journald = {
|
journald = {
|
||||||
stream_id = "\${graylog_stream.journald.id}";
|
stream_id = "\${graylog_stream.journald.id}";
|
||||||
pipeline_ids = [
|
pipeline_ids = [
|
||||||
"\${graylog_pipeline.journald_fix_loglevel.id}"
|
#"\${graylog_pipeline.journald_fix_loglevel.id}"
|
||||||
"\${graylog_pipeline.journald_iptable_parse.id}"
|
"\${graylog_pipeline.journald_iptable_parse.id}"
|
||||||
"\${graylog_pipeline.journald_loglevel_int_to_str.id}"
|
#"\${graylog_pipeline.journald_loglevel_int_to_str.id}"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
graylog_pipeline = {
|
graylog_pipeline = {
|
||||||
journald_fix_loglevel.source = ''
|
|
||||||
pipeline "journald : fix loglevel"
|
|
||||||
stage 0 match either
|
|
||||||
rule "journald : lookup log level"
|
|
||||||
stage 1 match either
|
|
||||||
rule "journald : replace log level"
|
|
||||||
end
|
|
||||||
'';
|
|
||||||
|
|
||||||
journald_iptable_parse.source = ''
|
journald_iptable_parse.source = ''
|
||||||
pipeline "journald : ip table parse"
|
pipeline "journald : ip table parse"
|
||||||
stage 0 match either
|
stage 0 match either
|
||||||
rule "journald : iptables split"
|
rule "journald : iptables split"
|
||||||
end
|
end
|
||||||
'';
|
'';
|
||||||
|
|
||||||
journald_loglevel_int_to_str.source = ''
|
|
||||||
pipeline "journald : loglevel int to str"
|
|
||||||
stage 9 match either
|
|
||||||
rule "journald : int to str"
|
|
||||||
end
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
|
|
||||||
graylog_pipeline_rule = {
|
graylog_pipeline_rule = {
|
||||||
loglevelLookup.source = ''
|
|
||||||
rule "journald : lookup log level"
|
|
||||||
when
|
|
||||||
has_field("level")
|
|
||||||
then
|
|
||||||
let lookup = lookup_value("systemd-log-level-reverse",$message.level);
|
|
||||||
set_field("level_fix",lookup);
|
|
||||||
end
|
|
||||||
'';
|
|
||||||
loglevelReplace.source = ''
|
|
||||||
rule "journald : replace log level"
|
|
||||||
when
|
|
||||||
has_field("level_fix")
|
|
||||||
then
|
|
||||||
set_field("level",$message.level_fix);
|
|
||||||
end
|
|
||||||
'';
|
|
||||||
loglevelIntToStr.source = ''
|
|
||||||
rule "journald : int to str"
|
|
||||||
when
|
|
||||||
has_field("level")
|
|
||||||
then
|
|
||||||
let lookup = lookup_value("systemd_log_level",$message.level);
|
|
||||||
set_field("level_type",lookup);
|
|
||||||
end
|
|
||||||
'';
|
|
||||||
|
|
||||||
iptableSplit.source = ''
|
iptableSplit.source = ''
|
||||||
rule "journald : iptables split"
|
rule "journald : iptables split"
|
||||||
when
|
when
|
||||||
|
|
|
@ -3,10 +3,7 @@ with builtins; {
|
||||||
resource = {
|
resource = {
|
||||||
|
|
||||||
graylog_pipeline_connection = {
|
graylog_pipeline_connection = {
|
||||||
nextcloud = {
|
journald.pipeline_ids = [ "\${graylog_pipeline.nextcloud.id}" ];
|
||||||
stream_id = "\${graylog_stream.journald.id}";
|
|
||||||
pipeline_ids = [ "\${graylog_pipeline.nextcloud.id}" ];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
graylog_pipeline = {
|
graylog_pipeline = {
|
||||||
|
@ -14,6 +11,8 @@ with builtins; {
|
||||||
pipeline "nextcloud : parsing"
|
pipeline "nextcloud : parsing"
|
||||||
stage 10 match either
|
stage 10 match either
|
||||||
rule "nextcloud : parse level 1"
|
rule "nextcloud : parse level 1"
|
||||||
|
stage 11 match either
|
||||||
|
rule "nextcloud : parse level 2"
|
||||||
end
|
end
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
@ -28,6 +27,15 @@ with builtins; {
|
||||||
set_fields(to_map(parsedJson),"nextcloud_");
|
set_fields(to_map(parsedJson),"nextcloud_");
|
||||||
end
|
end
|
||||||
'';
|
'';
|
||||||
|
nextcloudLevel2.source = ''
|
||||||
|
rule "nextcloud : parse level 2"
|
||||||
|
when
|
||||||
|
has_field("nextcloud_message")
|
||||||
|
then
|
||||||
|
let parsedJson = parse_json(to_string($message.nextcloud_message));
|
||||||
|
set_fields(to_map(parsedJson),"nextcloud_message_");
|
||||||
|
end
|
||||||
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
Binary file not shown.
Loading…
Reference in a new issue