Compare commits
4 commits
de068f6d0d
...
69f6794dae
Author | SHA1 | Date | |
---|---|---|---|
Tristan Daniël Maat | 69f6794dae | ||
Tristan Daniël Maat | 8dc5e13363 | ||
Tristan Daniël Maat | 2413eb32fb | ||
Tristan Daniël Maat | 2f2292c376 |
|
@ -14,9 +14,12 @@
|
|||
"${modulesPath}/profiles/minimal.nix"
|
||||
(import ../modules)
|
||||
|
||||
./services/backups.nix
|
||||
./services/conduit.nix
|
||||
./services/fail2ban.nix
|
||||
./services/foundryvtt.nix
|
||||
./services/gitea.nix
|
||||
./services/metrics
|
||||
./services/nextcloud.nix
|
||||
./services/webserver.nix
|
||||
./services/wireguard.nix
|
||||
|
@ -135,34 +138,45 @@
|
|||
recommendedProxySettings = true;
|
||||
clientMaxBodySize = "10G";
|
||||
domain = "tlater.net";
|
||||
|
||||
statusPage = true; # For metrics, should be accessible only from localhost
|
||||
|
||||
commonHttpConfig = ''
|
||||
log_format upstream_time '$remote_addr - $remote_user [$time_local] '
|
||||
'"$request" $status $body_bytes_sent '
|
||||
'"$http_referer" "$http_user_agent" '
|
||||
'rt=$request_time uct="$upstream_connect_time" '
|
||||
'uht="$upstream_header_time" urt="$upstream_response_time"';
|
||||
'';
|
||||
};
|
||||
|
||||
services.logrotate = {
|
||||
enable = true;
|
||||
|
||||
settings = lib.mapAttrs' (virtualHost: _:
|
||||
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
|
||||
frequency = "daily";
|
||||
rotate = 2;
|
||||
compress = true;
|
||||
delaycompress = true;
|
||||
su = "${config.services.nginx.user} ${config.services.nginx.group}";
|
||||
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
|
||||
})
|
||||
config.services.nginx.virtualHosts;
|
||||
};
|
||||
systemd.tmpfiles.rules =
|
||||
lib.mapAttrsToList (
|
||||
virtualHost: _:
|
||||
#
|
||||
"d /var/log/nginx/${virtualHost} 0750 ${config.services.nginx.user} ${config.services.nginx.group}"
|
||||
)
|
||||
config.services.nginx.virtualHosts;
|
||||
|
||||
security.acme = {
|
||||
defaults.email = "tm@tlater.net";
|
||||
acceptTerms = true;
|
||||
};
|
||||
|
||||
services.fail2ban = {
|
||||
enable = true;
|
||||
extraPackages = [pkgs.ipset];
|
||||
banaction = "iptables-ipset-proto6-allports";
|
||||
bantime-increment.enable = true;
|
||||
|
||||
jails = {
|
||||
nginx-botsearch = ''
|
||||
enabled = true
|
||||
logpath = /var/log/nginx/access.log
|
||||
'';
|
||||
};
|
||||
|
||||
ignoreIP = [
|
||||
"127.0.0.0/8"
|
||||
"10.0.0.0/8"
|
||||
"172.16.0.0/12"
|
||||
"192.168.0.0/16"
|
||||
];
|
||||
};
|
||||
|
||||
# Remove some unneeded packages
|
||||
environment.defaultPackages = [];
|
||||
|
||||
|
|
229
configuration/services/backups.nix
Normal file
229
configuration/services/backups.nix
Normal file
|
@ -0,0 +1,229 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
inherit (lib) types optional singleton;
|
||||
mkShutdownScript = service:
|
||||
pkgs.writeShellScript "backup-${service}-shutdown" ''
|
||||
if systemctl is-active --quiet '${service}'; then
|
||||
touch '/tmp/${service}-was-active'
|
||||
systemctl stop '${service}'
|
||||
fi
|
||||
'';
|
||||
mkRestartScript = service:
|
||||
pkgs.writeShellScript "backup-${service}-restart" ''
|
||||
if [ -f '/tmp/${service}-was-active' ]; then
|
||||
rm '/tmp/${service}-was-active'
|
||||
systemctl start '${service}'
|
||||
fi
|
||||
'';
|
||||
writeScript = name: packages: text:
|
||||
lib.getExe (pkgs.writeShellApplication {
|
||||
inherit name text;
|
||||
runtimeInputs = packages;
|
||||
});
|
||||
in {
|
||||
options = {
|
||||
services.backups = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Configure restic backups with a specific tag.
|
||||
'';
|
||||
type = types.attrsOf (types.submodule ({
|
||||
config,
|
||||
name,
|
||||
...
|
||||
}: {
|
||||
options = {
|
||||
user = lib.mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
The user as which to run the backup.
|
||||
'';
|
||||
};
|
||||
paths = lib.mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = ''
|
||||
The paths to back up.
|
||||
'';
|
||||
};
|
||||
tag = lib.mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
The restic tag to mark the backup with.
|
||||
'';
|
||||
default = name;
|
||||
};
|
||||
preparation = {
|
||||
packages = lib.mkOption {
|
||||
type = types.listOf types.package;
|
||||
default = [];
|
||||
description = ''
|
||||
The list of packages to make available in the
|
||||
preparation script.
|
||||
'';
|
||||
};
|
||||
text = lib.mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
The preparation script to run before the backup.
|
||||
|
||||
This should include things like database dumps and
|
||||
enabling maintenance modes. If a service needs to be
|
||||
shut down for backups, use `pauseServices` instead.
|
||||
'';
|
||||
};
|
||||
};
|
||||
cleanup = {
|
||||
packages = lib.mkOption {
|
||||
type = types.listOf types.package;
|
||||
default = [];
|
||||
description = ''
|
||||
The list of packages to make available in the
|
||||
cleanup script.
|
||||
'';
|
||||
};
|
||||
text = lib.mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
The cleanup script to run after the backup.
|
||||
|
||||
This should do things like cleaning up database dumps
|
||||
and disabling maintenance modes.
|
||||
'';
|
||||
};
|
||||
};
|
||||
pauseServices = lib.mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
The systemd services that need to be shut down before
|
||||
the backup can run. Services will be restarted after the
|
||||
backup is complete.
|
||||
|
||||
This is intended to be used for services that do not
|
||||
support hot backups.
|
||||
'';
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf (config.services.backups != {}) {
|
||||
systemd.services =
|
||||
{
|
||||
restic-prune = {
|
||||
# Doesn't hurt to finish the ongoing prune
|
||||
restartIfChanged = false;
|
||||
|
||||
environment = {
|
||||
RESTIC_PASSWORD_FILE = config.sops.secrets."restic/local-backups".path;
|
||||
RESTIC_REPOSITORY = "/var/lib/backups/";
|
||||
RESTIC_CACHE_DIR = "%C/restic-prune";
|
||||
};
|
||||
|
||||
path = with pkgs; [
|
||||
restic
|
||||
];
|
||||
|
||||
script = ''
|
||||
# TODO(tlater): In an append-only setup, we should be
|
||||
# careful with this; an attacker could delete backups by
|
||||
# simply appending ad infinitum:
|
||||
# https://restic.readthedocs.io/en/stable/060_forget.html#security-considerations-in-append-only-mode
|
||||
restic forget --keep-last 3 --prune
|
||||
restic check
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
Group = "backup";
|
||||
|
||||
CacheDirectory = "restic-prune";
|
||||
CacheDirectoryMode = "0700";
|
||||
ReadWritePaths = "/var/lib/backups/";
|
||||
|
||||
# Ensure we don't leave behind any files with the
|
||||
# temporary UID of this service.
|
||||
ExecStopPost = "+${pkgs.coreutils}/bin/chown -R root:backup /var/lib/backups/";
|
||||
};
|
||||
};
|
||||
}
|
||||
// lib.mapAttrs' (name: backup:
|
||||
lib.nameValuePair "backup-${name}" {
|
||||
# Don't want to restart mid-backup
|
||||
restartIfChanged = false;
|
||||
|
||||
environment = {
|
||||
RESTIC_CACHE_DIR = "%C/backup-${name}";
|
||||
RESTIC_PASSWORD_FILE = config.sops.secrets."restic/local-backups".path;
|
||||
# TODO(tlater): If I ever add more than one repo, service
|
||||
# shutdown/restarting will potentially break if multiple
|
||||
# backups for the same service overlap. A more clever
|
||||
# sentinel file with reference counts would probably solve
|
||||
# this.
|
||||
RESTIC_REPOSITORY = "/var/lib/backups/";
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
User = backup.user;
|
||||
Group = "backup";
|
||||
RuntimeDirectory = "backup-${name}";
|
||||
CacheDirectory = "backup-${name}";
|
||||
CacheDirectoryMode = "0700";
|
||||
PrivateTmp = true;
|
||||
|
||||
ExecStart = [
|
||||
(lib.concatStringsSep " " (["${pkgs.restic}/bin/restic" "backup" "--tag" name] ++ backup.paths))
|
||||
];
|
||||
|
||||
ExecStartPre =
|
||||
map (service: "+${mkShutdownScript service}") backup.pauseServices
|
||||
++ singleton (writeScript "backup-${name}-repo-init" [pkgs.restic pkgs.coreutils] ''
|
||||
restic snapshots || (restic init && chmod -R g+rwx "$RESTIC_REPOSITORY"/*)
|
||||
'')
|
||||
++ optional (backup.preparation.text != null)
|
||||
(writeScript "backup-${name}-prepare" backup.preparation.packages backup.preparation.text);
|
||||
|
||||
# TODO(tlater): Add repo pruning/checking
|
||||
ExecStopPost =
|
||||
map (service: "+${mkRestartScript service}") backup.pauseServices
|
||||
++ optional (backup.cleanup.text != null)
|
||||
(writeScript "backup-${name}-cleanup" backup.cleanup.packages backup.cleanup.text);
|
||||
};
|
||||
})
|
||||
config.services.backups;
|
||||
|
||||
systemd.timers =
|
||||
{
|
||||
restic-prune = {
|
||||
wantedBy = ["timers.target"];
|
||||
timerConfig.OnCalendar = "Thursday 03:00:00 UTC";
|
||||
# Don't make this persistent, in case the server was offline
|
||||
# for a while. This job cannot run at the same time as any
|
||||
# of the backup jobs.
|
||||
};
|
||||
}
|
||||
// lib.mapAttrs' (name: backup:
|
||||
lib.nameValuePair "backup-${name}" {
|
||||
wantedBy = ["timers.target"];
|
||||
timerConfig = {
|
||||
OnCalendar = "Wednesday 02:30:00 UTC";
|
||||
RandomizedDelaySec = "1h";
|
||||
FixedRandomDelay = true;
|
||||
Persistent = true;
|
||||
};
|
||||
})
|
||||
config.services.backups;
|
||||
|
||||
users.groups.backup = {};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/backups/ 0770 root backup"
|
||||
];
|
||||
};
|
||||
}
|
|
@ -173,6 +173,9 @@ in {
|
|||
# Various other security settings
|
||||
no-tlsv1
|
||||
no-tlsv1_1
|
||||
|
||||
# Monitoring
|
||||
prometheus
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -205,6 +208,7 @@ in {
|
|||
addSSL = true;
|
||||
extraConfig = ''
|
||||
merge_slashes off;
|
||||
access_log /var/log/nginx/${domain}/access.log upstream_time;
|
||||
'';
|
||||
|
||||
locations = {
|
||||
|
@ -231,4 +235,14 @@ in {
|
|||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.backups.conduit = {
|
||||
user = "root";
|
||||
paths = [
|
||||
"/var/lib/private/matrix-conduit/"
|
||||
];
|
||||
# Other services store their data in conduit, so no other services
|
||||
# need to be shut down currently.
|
||||
pauseServices = ["conduit.service"];
|
||||
};
|
||||
}
|
||||
|
|
42
configuration/services/fail2ban.nix
Normal file
42
configuration/services/fail2ban.nix
Normal file
|
@ -0,0 +1,42 @@
|
|||
{pkgs, ...}: {
|
||||
services.fail2ban = {
|
||||
enable = true;
|
||||
extraPackages = [pkgs.ipset];
|
||||
banaction = "iptables-ipset-proto6-allports";
|
||||
bantime-increment.enable = true;
|
||||
|
||||
jails = {
|
||||
nginx-botsearch = ''
|
||||
enabled = true
|
||||
logpath = /var/log/nginx/access.log
|
||||
'';
|
||||
};
|
||||
|
||||
ignoreIP = [
|
||||
"127.0.0.0/8"
|
||||
"10.0.0.0/8"
|
||||
"172.16.0.0/12"
|
||||
"192.168.0.0/16"
|
||||
];
|
||||
};
|
||||
|
||||
# Allow metrics services to connect to the socket as well
|
||||
users.groups.fail2ban = {};
|
||||
systemd.services.fail2ban.serviceConfig = {
|
||||
ExecStartPost =
|
||||
"+"
|
||||
+ (pkgs.writeShellScript "fail2ban-post-start" ''
|
||||
while ! [ -S /var/run/fail2ban/fail2ban.sock ]; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
while ! ${pkgs.netcat}/bin/nc -zU /var/run/fail2ban/fail2ban.sock; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
${pkgs.coreutils}/bin/chown root:fail2ban /var/run/fail2ban /var/run/fail2ban/fail2ban.sock
|
||||
${pkgs.coreutils}/bin/chmod 660 /var/run/fail2ban/fail2ban.sock
|
||||
${pkgs.coreutils}/bin/chmod 710 /var/run/fail2ban
|
||||
'');
|
||||
};
|
||||
}
|
|
@ -8,11 +8,11 @@
|
|||
in {
|
||||
imports = [flake-inputs.foundryvtt.nixosModules.foundryvtt];
|
||||
|
||||
services.foundryvtt = {
|
||||
enable = true;
|
||||
hostName = domain;
|
||||
minifyStaticFiles = true;
|
||||
};
|
||||
# services.foundryvtt = {
|
||||
# enable = true;
|
||||
# hostName = domain;
|
||||
# minifyStaticFiles = true;
|
||||
# };
|
||||
|
||||
# Want to start it manually when I need it, not have it constantly
|
||||
# running
|
||||
|
@ -25,6 +25,7 @@ in {
|
|||
enableACME = true;
|
||||
extraConfig = ''
|
||||
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
|
||||
access_log /var/log/nginx/${domain}/access.log upstream_time;
|
||||
'';
|
||||
|
||||
locations."/" = {
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
{config, ...}: let
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
domain = "gitea.${config.services.nginx.domain}";
|
||||
in {
|
||||
services.gitea = {
|
||||
|
@ -15,11 +20,23 @@ in {
|
|||
SSH_PORT = 2222;
|
||||
};
|
||||
|
||||
metrics = {
|
||||
ENABLED = true;
|
||||
TOKEN = "#metricstoken#";
|
||||
};
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
session.COOKIE_SECURE = true;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.gitea.serviceConfig.ExecStartPre = let
|
||||
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
||||
secretPath = config.sops.secrets."gitea/metrics-token".path;
|
||||
runConfig = "${config.services.gitea.customDir}/conf/app.ini";
|
||||
in [
|
||||
"+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'"
|
||||
];
|
||||
|
||||
# Set up SSL
|
||||
services.nginx.virtualHosts."${domain}" = let
|
||||
httpAddress = config.services.gitea.settings.server.HTTP_ADDR;
|
||||
|
@ -29,9 +46,18 @@ in {
|
|||
enableACME = true;
|
||||
extraConfig = ''
|
||||
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
|
||||
access_log /var/log/nginx/${domain}/access.log upstream_time;
|
||||
'';
|
||||
|
||||
locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
|
||||
locations."/metrics" = {
|
||||
extraConfig = ''
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
|
||||
deny all;
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
# Block repeated failed login attempts
|
||||
|
@ -52,4 +78,24 @@ in {
|
|||
enabled = true
|
||||
'';
|
||||
};
|
||||
|
||||
services.backups.gitea = {
|
||||
user = "gitea";
|
||||
paths = [
|
||||
"/var/lib/gitea/gitea-db.sql"
|
||||
"/var/lib/gitea/repositories/"
|
||||
"/var/lib/gitea/data/"
|
||||
"/var/lib/gitea/custom/"
|
||||
# Conf is backed up via nix
|
||||
];
|
||||
preparation = {
|
||||
packages = [config.services.postgresql.package];
|
||||
text = "pg_dump ${config.services.gitea.database.name} --file=/var/lib/gitea/gitea-db.sql";
|
||||
};
|
||||
cleanup = {
|
||||
packages = [pkgs.coreutils];
|
||||
text = "rm /var/lib/gitea/gitea-db.sql";
|
||||
};
|
||||
pauseServices = ["gitea.service"];
|
||||
};
|
||||
}
|
||||
|
|
9
configuration/services/metrics/default.nix
Normal file
9
configuration/services/metrics/default.nix
Normal file
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
imports = [
|
||||
./options.nix
|
||||
|
||||
./exporters.nix
|
||||
./grafana.nix
|
||||
./victoriametrics.nix
|
||||
];
|
||||
}
|
101
configuration/services/metrics/exporters.nix
Normal file
101
configuration/services/metrics/exporters.nix
Normal file
|
@ -0,0 +1,101 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
yaml = pkgs.formats.yaml {};
|
||||
in {
|
||||
services.prometheus = {
|
||||
exporters = {
|
||||
# Periodically check domain registration status
|
||||
domain = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
extraFlags = let
|
||||
conf.domains = [
|
||||
"tlater.net"
|
||||
"tlater.com"
|
||||
];
|
||||
in [
|
||||
"--config=${yaml.generate "domains.yml" conf}"
|
||||
];
|
||||
};
|
||||
|
||||
# System statistics
|
||||
node = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
};
|
||||
systemd = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
extraFlags = [
|
||||
# Disabled by default because only supported from systemd 235+
|
||||
"--systemd.collector.enable-restart-count"
|
||||
"--systemd.collector.enable-ip-accounting"
|
||||
];
|
||||
};
|
||||
|
||||
# Various nginx metrics
|
||||
nginx = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
};
|
||||
|
||||
nginxlog = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
group = "nginx";
|
||||
|
||||
settings.namespaces =
|
||||
lib.mapAttrsToList (name: virtualHost: {
|
||||
inherit name;
|
||||
metrics_override.prefix = "nginxlog";
|
||||
namespace_label = "vhost";
|
||||
|
||||
format = lib.concatStringsSep " " [
|
||||
"$remote_addr - $remote_user [$time_local]"
|
||||
''"$request" $status $body_bytes_sent''
|
||||
''"$http_referer" "$http_user_agent"''
|
||||
''rt=$request_time uct="$upstream_connect_time"''
|
||||
''uht="$upstream_header_time" urt="$upstream_response_time"''
|
||||
];
|
||||
|
||||
source.files = [
|
||||
"/var/log/nginx/${name}/access.log"
|
||||
];
|
||||
})
|
||||
config.services.nginx.virtualHosts;
|
||||
};
|
||||
};
|
||||
|
||||
extraExporters = {
|
||||
fail2ban = let
|
||||
cfg = config.services.prometheus.extraExporters.fail2ban;
|
||||
in {
|
||||
port = 9191;
|
||||
serviceOpts = {
|
||||
after = ["fail2ban.service"];
|
||||
requires = ["fail2ban.service"];
|
||||
serviceConfig = {
|
||||
Group = "fail2ban";
|
||||
RestrictAddressFamilies = ["AF_UNIX" "AF_INET" "AF_INET6"];
|
||||
ExecStart = lib.concatStringsSep " " [
|
||||
"${pkgs.local.prometheus-fail2ban-exporter}/bin/fail2ban-prometheus-exporter"
|
||||
"--collector.f2b.socket=/var/run/fail2ban/fail2ban.sock"
|
||||
"--web.listen-address='${cfg.listenAddress}:${toString cfg.port}'"
|
||||
"--collector.f2b.exit-on-socket-connection-error=true"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# TODO(tlater):
|
||||
# - wireguard (?)
|
||||
# - postgres (?)
|
||||
# - blackbox (?) (curl to see if http and similar is up)
|
||||
# - ssl_exporter (?)
|
||||
};
|
||||
}
|
48
configuration/services/metrics/grafana.nix
Normal file
48
configuration/services/metrics/grafana.nix
Normal file
|
@ -0,0 +1,48 @@
|
|||
{config, ...}: let
|
||||
domain = "metrics.${config.services.nginx.domain}";
|
||||
in {
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server.http_port = 3001; # Default overlaps with gitea
|
||||
|
||||
security = {
|
||||
admin_user = "tlater";
|
||||
admin_password = "$__file{${config.sops.secrets."grafana/adminPassword".path}}";
|
||||
secret_key = "$__file{${config.sops.secrets."grafana/secretKey".path}}";
|
||||
cookie_secure = true;
|
||||
cookie_samesite = "strict";
|
||||
content_security_policy = true;
|
||||
};
|
||||
|
||||
database = {
|
||||
user = "grafana";
|
||||
name = "grafana";
|
||||
type = "postgres";
|
||||
host = "/run/postgresql";
|
||||
};
|
||||
};
|
||||
|
||||
provision = {
|
||||
enable = true;
|
||||
|
||||
datasources.settings.datasources = [
|
||||
{
|
||||
name = "Victoriametrics - tlater.net";
|
||||
url = "http://localhost:8428";
|
||||
type = "prometheus";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."${domain}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
extraConfig = ''
|
||||
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
|
||||
access_log /var/log/nginx/${domain}/access.log upstream_time;
|
||||
'';
|
||||
locations."/".proxyPass = "http://localhost:${toString config.services.grafana.settings.server.http_port}";
|
||||
};
|
||||
}
|
204
configuration/services/metrics/options.nix
Normal file
204
configuration/services/metrics/options.nix
Normal file
|
@ -0,0 +1,204 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
inherit (lib) types mkOption mkDefault;
|
||||
yaml = pkgs.formats.yaml {};
|
||||
in {
|
||||
options = {
|
||||
services.prometheus = {
|
||||
extraExporters = mkOption {
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
description = "The port on which this exporter listens.";
|
||||
};
|
||||
listenAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "127.0.0.1";
|
||||
description = "Address to listen on.";
|
||||
};
|
||||
serviceOpts = mkOption {
|
||||
type = types.attrs;
|
||||
description = "An attrset to be merged with the exporter's systemd service.";
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
services.victoriametrics.scrapeConfigs = mkOption {
|
||||
type = types.attrsOf (types.submodule ({
|
||||
name,
|
||||
self,
|
||||
...
|
||||
}: {
|
||||
options = {
|
||||
job_name = mkOption {
|
||||
type = types.str;
|
||||
default = name;
|
||||
};
|
||||
|
||||
extraSettings = mkOption {
|
||||
type = types.anything;
|
||||
description = ''
|
||||
Other settings to set for this scrape config.
|
||||
'';
|
||||
default = {};
|
||||
};
|
||||
|
||||
targets = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = lib.mdDoc ''
|
||||
Addresses scrape targets for this config listen on.
|
||||
|
||||
Shortcut for `static_configs = lib.singleton {targets = [<targets>];}`
|
||||
'';
|
||||
default = [];
|
||||
};
|
||||
|
||||
static_configs = mkOption {
|
||||
default = [];
|
||||
type = types.listOf (types.submodule {
|
||||
options = {
|
||||
targets = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = lib.mdDoc ''
|
||||
The addresses scrape targets for this config listen on.
|
||||
|
||||
Must in `listenAddress:port` format.
|
||||
'';
|
||||
};
|
||||
labels = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
description = lib.mdDoc ''
|
||||
Labels to apply to all targets defined for this static config.
|
||||
'';
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
systemd.services = lib.mkMerge [
|
||||
(lib.mapAttrs' (name: exporter:
|
||||
lib.nameValuePair "prometheus-${name}-exporter" (lib.mkMerge [
|
||||
{
|
||||
# Shamelessly copied from upstream because the upstream
|
||||
# module is an intractable mess
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target"];
|
||||
serviceConfig.Restart = mkDefault "always";
|
||||
serviceConfig.PrivateTmp = mkDefault true;
|
||||
serviceConfig.WorkingDirectory = mkDefault /tmp;
|
||||
serviceConfig.DynamicUser = mkDefault true;
|
||||
# Hardening
|
||||
serviceConfig.CapabilityBoundingSet = mkDefault [""];
|
||||
serviceConfig.DeviceAllow = [""];
|
||||
serviceConfig.LockPersonality = true;
|
||||
serviceConfig.MemoryDenyWriteExecute = true;
|
||||
serviceConfig.NoNewPrivileges = true;
|
||||
serviceConfig.PrivateDevices = mkDefault true;
|
||||
serviceConfig.ProtectClock = mkDefault true;
|
||||
serviceConfig.ProtectControlGroups = true;
|
||||
serviceConfig.ProtectHome = true;
|
||||
serviceConfig.ProtectHostname = true;
|
||||
serviceConfig.ProtectKernelLogs = true;
|
||||
serviceConfig.ProtectKernelModules = true;
|
||||
serviceConfig.ProtectKernelTunables = true;
|
||||
serviceConfig.ProtectSystem = mkDefault "strict";
|
||||
serviceConfig.RemoveIPC = true;
|
||||
serviceConfig.RestrictAddressFamilies = ["AF_INET" "AF_INET6"];
|
||||
serviceConfig.RestrictNamespaces = true;
|
||||
serviceConfig.RestrictRealtime = true;
|
||||
serviceConfig.RestrictSUIDSGID = true;
|
||||
serviceConfig.SystemCallArchitectures = "native";
|
||||
serviceConfig.UMask = "0077";
|
||||
}
|
||||
exporter.serviceOpts
|
||||
]))
|
||||
config.services.prometheus.extraExporters)
|
||||
|
||||
{
|
||||
vmagent-scrape-exporters = let
|
||||
listenAddress = config.services.victoriametrics.listenAddress;
|
||||
vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
|
||||
promscrape = yaml.generate "prometheus.yml" {
|
||||
scrape_configs = lib.mapAttrsToList (_: scrape:
|
||||
lib.recursiveUpdate {
|
||||
inherit (scrape) job_name;
|
||||
static_configs =
|
||||
scrape.static_configs
|
||||
++ lib.optional (scrape.targets != []) {targets = scrape.targets;};
|
||||
}
|
||||
scrape.extraSettings)
|
||||
config.services.victoriametrics.scrapeConfigs;
|
||||
};
|
||||
in {
|
||||
enable = true;
|
||||
path = [pkgs.victoriametrics];
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target" "victoriametrics.service"];
|
||||
serviceConfig = {
|
||||
ExecStart = [
|
||||
(lib.concatStringsSep " " [
|
||||
"${pkgs.victoriametrics}/bin/vmagent"
|
||||
"-promscrape.config=${promscrape}"
|
||||
"-remoteWrite.url=http://${vmAddr}/api/v1/write"
|
||||
"-remoteWrite.tmpDataPath=%t/vmagent"
|
||||
])
|
||||
];
|
||||
SupplementaryGroups = "metrics";
|
||||
|
||||
DynamicUser = true;
|
||||
RuntimeDirectory = "vmagent";
|
||||
CapabilityBoundingSet = [""];
|
||||
DeviceAllow = [""];
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectSystem = "strict";
|
||||
RemoveIPC = true;
|
||||
RestrictAddressFamilies = ["AF_INET" "AF_INET6"];
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
SystemCallArchitectures = "native";
|
||||
UMask = "0077";
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
users.groups.metrics = {};
|
||||
|
||||
services.victoriametrics.scrapeConfigs = let
|
||||
allExporters =
|
||||
lib.mapAttrs (name: exporter: {
|
||||
inherit (exporter) listenAddress port;
|
||||
}) ((lib.filterAttrs (_: exporter: builtins.isAttrs exporter && exporter.enable)
|
||||
config.services.prometheus.exporters)
|
||||
// config.services.prometheus.extraExporters);
|
||||
in
|
||||
lib.mapAttrs (_: exporter: {
|
||||
targets = ["${exporter.listenAddress}:${toString exporter.port}"];
|
||||
})
|
||||
allExporters;
|
||||
};
|
||||
}
|
13
configuration/services/metrics/victoriametrics.nix
Normal file
13
configuration/services/metrics/victoriametrics.nix
Normal file
|
@ -0,0 +1,13 @@
|
|||
{config, ...}: {
|
||||
config.services.victoriametrics = {
|
||||
enable = true;
|
||||
|
||||
scrapeConfigs = {
|
||||
gitea = {
|
||||
targets = ["127.0.0.1:${toString config.services.gitea.settings.server.HTTP_PORT}"];
|
||||
extraSettings.authorization.credentials_file = config.sops.secrets."gitea/metrics-token".path;
|
||||
};
|
||||
coturn.targets = ["127.0.0.1:9641"];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -50,6 +50,9 @@ in {
|
|||
services.nginx.virtualHosts."${hostName}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
extraConfig = ''
|
||||
access_log /var/log/nginx/${hostName}/access.log upstream_time;
|
||||
'';
|
||||
};
|
||||
|
||||
# Block repeated failed login attempts
|
||||
|
@ -74,4 +77,33 @@ in {
|
|||
bantime = 86400
|
||||
'';
|
||||
};
|
||||
|
||||
services.backups.nextcloud = {
|
||||
user = "nextcloud";
|
||||
paths = [
|
||||
"/var/lib/nextcloud/nextcloud-db.sql"
|
||||
"/var/lib/nextcloud/data/"
|
||||
"/var/lib/nextcloud/config/config.php"
|
||||
];
|
||||
preparation = {
|
||||
packages = [
|
||||
config.services.postgresql.package
|
||||
config.services.nextcloud.occ
|
||||
];
|
||||
text = ''
|
||||
nextcloud-occ maintenance:mode --on
|
||||
pg_dump ${config.services.nextcloud.config.dbname} --file=/var/lib/nextcloud/nextcloud-db.sql
|
||||
'';
|
||||
};
|
||||
cleanup = {
|
||||
packages = [
|
||||
pkgs.coreutils
|
||||
config.services.nextcloud.occ
|
||||
];
|
||||
text = ''
|
||||
rm /var/lib/nextcloud/nextcloud-db.sql
|
||||
nextcloud-occ maintenance:mode --off
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -16,6 +16,12 @@
|
|||
# that operation needs to be performed manually on the system as
|
||||
# well.
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "grafana";
|
||||
ensurePermissions = {
|
||||
"DATABASE grafana" = "ALL PRIVILEGES";
|
||||
};
|
||||
}
|
||||
{
|
||||
name = "nextcloud";
|
||||
ensurePermissions = {
|
||||
|
@ -25,6 +31,7 @@
|
|||
];
|
||||
|
||||
ensureDatabases = [
|
||||
"grafana"
|
||||
"nextcloud"
|
||||
];
|
||||
};
|
||||
|
|
|
@ -110,4 +110,12 @@ in {
|
|||
# ProtectHome = "read-only"; # See further up
|
||||
};
|
||||
};
|
||||
|
||||
services.backups.starbound = {
|
||||
user = "root";
|
||||
paths = [
|
||||
"/var/lib/private/starbound/storage/universe/"
|
||||
];
|
||||
pauseServices = ["starbound.service"];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ in {
|
|||
enableACME = true;
|
||||
extraConfig = ''
|
||||
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
|
||||
access_log /var/log/nginx/${domain}/access.log upstream_time;
|
||||
'';
|
||||
|
||||
locations."/".proxyPass = "http://${addr}:${toString port}";
|
||||
|
|
|
@ -3,22 +3,44 @@
|
|||
defaultSopsFile = ../keys/production.yaml;
|
||||
|
||||
secrets = {
|
||||
# Gitea
|
||||
"gitea/metrics-token" = {
|
||||
owner = "gitea";
|
||||
group = "metrics";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
# Grafana
|
||||
"grafana/adminPassword" = {
|
||||
owner = "grafana";
|
||||
group = "grafana";
|
||||
};
|
||||
"grafana/secretKey" = {
|
||||
owner = "grafana";
|
||||
group = "grafana";
|
||||
};
|
||||
|
||||
# Heisenbridge
|
||||
"heisenbridge/as-token" = {};
|
||||
"heisenbridge/hs-token" = {};
|
||||
|
||||
# Nextcloud
|
||||
"nextcloud/tlater" = {
|
||||
owner = "nextcloud";
|
||||
group = "nextcloud";
|
||||
};
|
||||
|
||||
"steam/tlater" = {};
|
||||
|
||||
"heisenbridge/as-token" = {};
|
||||
"heisenbridge/hs-token" = {};
|
||||
|
||||
"wireguard/server-key" = {
|
||||
# Restic
|
||||
"restic/local-backups" = {
|
||||
owner = "root";
|
||||
group = "systemd-network";
|
||||
group = "backup";
|
||||
mode = "0440";
|
||||
};
|
||||
|
||||
# Steam
|
||||
"steam/tlater" = {};
|
||||
|
||||
# Turn
|
||||
"turn/env" = {};
|
||||
"turn/secret" = {
|
||||
owner = "turnserver";
|
||||
|
@ -29,6 +51,13 @@
|
|||
"turn/ssl-cert" = {
|
||||
owner = "turnserver";
|
||||
};
|
||||
|
||||
# Wireguard
|
||||
"wireguard/server-key" = {
|
||||
owner = "root";
|
||||
group = "systemd-network";
|
||||
mode = "0440";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
10
flake.nix
10
flake.nix
|
@ -78,7 +78,7 @@
|
|||
# Utility scripts #
|
||||
###################
|
||||
packages.${system} = let
|
||||
inherit (nixpkgs.legacyPackages.${system}) writeShellScript;
|
||||
inherit (nixpkgs.legacyPackages.${system}) writeShellScript writeShellScriptBin;
|
||||
vm = nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
specialArgs.flake-inputs = inputs;
|
||||
|
@ -106,6 +106,14 @@
|
|||
"${vm.config.system.build.vm}/bin/run-tlaternet-vm"
|
||||
'';
|
||||
|
||||
update-pkgs = let
|
||||
nvfetcher-bin = "${nvfetcher.packages.${system}.default}/bin/nvfetcher";
|
||||
in
|
||||
writeShellScriptBin "update-pkgs" ''
|
||||
cd "$(git rev-parse --show-toplevel)/pkgs"
|
||||
${nvfetcher-bin} -o _sources_pkgs -c nvfetcher.toml
|
||||
'';
|
||||
|
||||
update-nextcloud-apps = let
|
||||
nvfetcher-bin = "${nvfetcher.packages.${system}.default}/bin/nvfetcher";
|
||||
in
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
metrics:
|
||||
tlater: ENC[AES256_GCM,data:4nB0H45nlongb0x1OOKzNXIk96PovZ7OwENovnBZUwMl9ncfYwTHT30OlLsPA75w1govH0jyBRkn1Pe/qHzY1Zt53B8=,iv:AfZ4So6HnjOXzqiHM3WpOsQZJs2CEckuxGfyDxc4TNA=,tag:fIXOKJSVDLpLbiLd2zAu9w==,type:str]
|
||||
nextcloud:
|
||||
tlater: ENC[AES256_GCM,data:zNsPm4uFaIRe3LjcwmayRg==,iv:5wam6bP5zP708jC9UrLV0s8qspl3Pm4fPzbMFYBUyPQ=,tag:apnJUMeJwMn9q0NhO4ptmA==,type:str]
|
||||
steam:
|
||||
|
@ -7,6 +9,8 @@ heisenbridge:
|
|||
hs-token: ENC[AES256_GCM,data:u52WpkQFd/J7JFoE/rfNluebyZQLOokvkVdL7+AEAvrhJhrkJli1ztkD79lbC+6tGUH4tT3T+nX9wvGKnrRUQg==,iv:as+9fVuvMg2IoE2WIKD9mHi+znhNcWRh5Zq+yr0xcDQ=,tag:mZ7fh7U0MfgI8hyq/28Bcg==,type:str]
|
||||
wireguard:
|
||||
server-key: ENC[AES256_GCM,data:mXb7ZznJHf5CgV8rI4uzPBATMRbmd7LimgtCkQM9kAjbIaGwUBqJZBN3fXs=,iv:3Po1Orinzov9rnEm9cLzgJY1PeD+5Jl9115MriABHh8=,tag:E/2CjDO1JCvJzxCnqKcNyw==,type:str]
|
||||
restic:
|
||||
local-backups: ENC[AES256_GCM,data:NLNVlR9G9bLSZOkMoPvkbBbAZlKkmiUbdWHOFDnaefuy9wNLH53ctOIyS0rSsQLaJCSBTpgPSWIIXUSuzoK/eA==,iv:DzuujmyJJP4GiE5z7KOOGUEzUgOwmtf/7UYhwkyLe9g=,tag:cElFhpVC7S6HYlB6UyN7PQ==,type:str]
|
||||
turn:
|
||||
env: ENC[AES256_GCM,data:kt5nhVo9pb/ZbPUEcqSYXxN9YMgQKnFb5VRfFFS/qoIaJ73uD2fuJKqcxAyVRrdLqnSAWSQBgTgunBzdP7xqLAK2qt8DYAQWHkIe9uxFbSXZpdmw,iv:9lq6SFwTFN4GGm6gPiJpUMasMdnHVF6XLGYrsyG3kjU=,tag:428Qf9DOiiHt/Wjb188b8g==,type:str]
|
||||
secret: ENC[AES256_GCM,data:si7ee6Xfhdgdyzbp6aQpF7pz3TmTBb7iQ82lRPVXNDg9JfHI+lbmgAsSnRLX5qMCA6P9R045sSMosqidL8QwRg==,iv:SrhpZKK8D45yxCEfDb9P3TwtA14+qEI+wcRqcN/a6pw=,tag:PiwV+mOL9xHJgJft6sc61g==,type:str]
|
||||
|
@ -19,8 +23,8 @@ sops:
|
|||
azure_kv: []
|
||||
hc_vault: []
|
||||
age: []
|
||||
lastmodified: "2023-04-23T17:34:53Z"
|
||||
mac: ENC[AES256_GCM,data:UaGB4uwmYGVbKud5KrvdKeYTnYrs8nnQsT590KIS/b/9JhpQo5JXFtHsm1AteEBg9ygmY6tYKDcK4AXwz/uR/m3CW5If03dBNG8F9Uy3dPL5KaebC/EsNVIaRavWTbSZgqhnBgYeM+HkeQPskSWuwviSNU0D7d1n98Q89Y0kQfA=,iv:kEsRh8hb1amd2qozyxwYHCHdX80c2mO5Mm7npKX3DKc=,tag:p5GPd0OZvowghT92pxxXeA==,type:str]
|
||||
lastmodified: "2023-09-25T00:42:25Z"
|
||||
mac: ENC[AES256_GCM,data:28o/elUKslgn5auYfr34N9fE7B6EoZ6njL6yT0emjfoTjsCADJOLcHfUDNWb3AMP3Z5e/w8WsxI7MpwuwUXRvZ6u9Kui1IBcQu/V6GEzpBVw7JkLHZvsUFHOj/uEBcPvON7pKfXtG3vdH8FF1cxeenFm1Z0cX4C0WrNaxumGknA=,iv:GYK0/JZtCkbVorus+9HQbtxAnIXviiNkoC9dMqTHflM=,tag:R3N5hf/UV2nqyOI50Imr6g==,type:str]
|
||||
pgp:
|
||||
- created_at: "2022-10-12T00:46:51Z"
|
||||
enc: |
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
gitea:
|
||||
metrics-token: ENC[AES256_GCM,data:J4QdfI1wKyM=,iv:8fqCbftyhj90eIVFxjEp9RXKC1y1IaLnV1r2MOdY15M=,tag:8W/juv1OZh4hJco02qXO6g==,type:str]
|
||||
grafana:
|
||||
adminPassword: ENC[AES256_GCM,data:dYfaxUpQpzA=,iv:j5wSem8C5+V4c5qRzXQJhsU7/FOtpvrnaEyFBmW6zJ4=,tag:oc8n3TkEbjF2gjuOobZuLA==,type:str]
|
||||
secretKey: ENC[AES256_GCM,data:Atruvh2MsNY=,iv:y2MaCUCEzGIydHp6G0DJHfk289S1is0twKm2oUYwDhM=,tag:nAWeg+YqaYqk6k22oBkAhQ==,type:str]
|
||||
nextcloud:
|
||||
tlater: ENC[AES256_GCM,data:91kDcO4hpng=,iv:ayuILRmRru4ZxTCur9H2xHuLjkDzwPdS/4lEog/tesU=,tag:qYhJxnNDcCwUM7xe7Tlcjw==,type:str]
|
||||
steam:
|
||||
|
@ -7,6 +12,8 @@ heisenbridge:
|
|||
hs-token: ENC[AES256_GCM,data:VBwvwomv0Xg=,iv:q6INtJ+rg+QiXj8uBdBzQYQZUBBXp+9odxDHwvu8Jxc=,tag:XKhm8nxygAkKaiVPJ2Fcdg==,type:str]
|
||||
wireguard:
|
||||
server-key: ENC[AES256_GCM,data:FvY897XdKoa/mckE8JQLCkklsnYD6Wz1wpsu5t3uhEnW3iarnDQxF9msuYU=,iv:jqGXfekM+Vs+J9b5nlZ5Skd1ZKHajoUo2Dc4tMYPm1w=,tag:EehikjI/FCU8wqtpvJRamQ==,type:str]
|
||||
restic:
|
||||
local-backups: ENC[AES256_GCM,data:3QjEv03t7wE=,iv:y/6Lv4eUbZZfGPwUONykz8VNL62cAJuWaJy9yk3aAmk=,tag:wMlGsepuG9JjwtUKGWSibw==,type:str]
|
||||
turn:
|
||||
env: ENC[AES256_GCM,data:xjIz/AY109lyiL5N01p5T3HcYco/rM5CJSRTtg==,iv:16bW6OpyOK/QL0QPGQp/Baa9xyT8E3ZsYkwqmjuofk0=,tag:J5re3uKxIykw3YunvQWBgg==,type:str]
|
||||
secret: ENC[AES256_GCM,data:eQ7dAocoZtg=,iv:fgzjTPv30WqTKlLy+yMn5MsKQgjhPnwlGFFwYEg3gWs=,tag:1ze33U1NBkgMX/9SiaBNQg==,type:str]
|
||||
|
@ -19,8 +26,8 @@ sops:
|
|||
azure_kv: []
|
||||
hc_vault: []
|
||||
age: []
|
||||
lastmodified: "2023-04-23T17:35:16Z"
|
||||
mac: ENC[AES256_GCM,data:4cW8k6o3jET8k+yJGyApjOyuSUQb+d+4wX/RTNnpbt+867sExQrZUrOMif/u8S4WmcKVSJgvrzuxK9hpDPYhJ1d/5YuHH1Dyj7QDRdhbZYHhkpPus0ZVTEpSknZzx2eWH1ch/fyJJknlrBlfb/tz50Dv+w9mhkL7qteaIq+Vmsc=,iv:YMfAuGwu1kAM0wGkq3kzVMnC72yo7ZT04BuEwoLRPIA=,tag:6I1VRzteRaLuxN+sfLA5Mw==,type:str]
|
||||
lastmodified: "2023-10-07T02:17:50Z"
|
||||
mac: ENC[AES256_GCM,data:vZDq33YIn0Nf1FQ2+ySezox6igiw6zNFCu3l3kaIsBKo1797pohmAxj2Lcc+OmlBjj98khaBIlbQuA5ULM+uPN5ILaz3NuXD5PZtsV+rL2PsLNMW9FBSmJ0m0YQrt0nZ0tpzifn12XghcSK2IXv+FnxlfrAJCxDvr5tRm90uUwU=,iv:ct8CzIWjaoJ1UjZcdFSr8lZ626vA0RvM883V6H5plWc=,tag:waJNtp/UbRDOfyzNElrung==,type:str]
|
||||
pgp:
|
||||
- created_at: "2022-10-12T16:48:23Z"
|
||||
enc: |
|
||||
|
|
21
pkgs/_sources_pkgs/generated.json
Normal file
21
pkgs/_sources_pkgs/generated.json
Normal file
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"prometheus-fail2ban-exporter": {
|
||||
"cargoLocks": null,
|
||||
"date": null,
|
||||
"extract": null,
|
||||
"name": "prometheus-fail2ban-exporter",
|
||||
"passthru": null,
|
||||
"pinned": false,
|
||||
"src": {
|
||||
"deepClone": false,
|
||||
"fetchSubmodules": false,
|
||||
"leaveDotGit": false,
|
||||
"name": null,
|
||||
"rev": "v0.10.0",
|
||||
"sha256": "sha256-8nIW1XaHCBqQCoLkV1ZYE3NTbVZ6c+UOqYD08XQiv+4=",
|
||||
"type": "git",
|
||||
"url": "https://gitlab.com/hectorjsmith/fail2ban-prometheus-exporter"
|
||||
},
|
||||
"version": "v0.10.0"
|
||||
}
|
||||
}
|
16
pkgs/_sources_pkgs/generated.nix
Normal file
16
pkgs/_sources_pkgs/generated.nix
Normal file
|
@ -0,0 +1,16 @@
|
|||
# This file was generated by nvfetcher, please do not modify it manually.
|
||||
{ fetchgit, fetchurl, fetchFromGitHub, dockerTools }:
|
||||
{
|
||||
prometheus-fail2ban-exporter = {
|
||||
pname = "prometheus-fail2ban-exporter";
|
||||
version = "v0.10.0";
|
||||
src = fetchgit {
|
||||
url = "https://gitlab.com/hectorjsmith/fail2ban-prometheus-exporter";
|
||||
rev = "v0.10.0";
|
||||
fetchSubmodules = false;
|
||||
deepClone = false;
|
||||
leaveDotGit = false;
|
||||
sha256 = "sha256-8nIW1XaHCBqQCoLkV1ZYE3NTbVZ6c+UOqYD08XQiv+4=";
|
||||
};
|
||||
};
|
||||
}
|
|
@ -7,6 +7,9 @@
|
|||
in
|
||||
{
|
||||
starbound = callPackage ./starbound {};
|
||||
prometheus-fail2ban-exporter = callPackage ./prometheus/fail2ban-exporter.nix {
|
||||
sources = pkgs.callPackage ./_sources_pkgs/generated.nix {};
|
||||
};
|
||||
}
|
||||
// (
|
||||
# Add nextcloud apps
|
||||
|
|
3
pkgs/nvfetcher.toml
Normal file
3
pkgs/nvfetcher.toml
Normal file
|
@ -0,0 +1,3 @@
|
|||
[prometheus-fail2ban-exporter]
|
||||
src.manual = "v0.10.0" # No gitlab support in nvfetcher
|
||||
fetch.git = "https://gitlab.com/hectorjsmith/fail2ban-prometheus-exporter"
|
8
pkgs/prometheus/fail2ban-exporter.nix
Normal file
8
pkgs/prometheus/fail2ban-exporter.nix
Normal file
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
buildGoModule,
|
||||
sources,
|
||||
}:
|
||||
buildGoModule {
|
||||
inherit (sources.prometheus-fail2ban-exporter) pname src version;
|
||||
vendorHash = "sha256-qU6opwhhvzbQOhfGVyiVgKhfCSB0Z4eSRAJnv6ht2I0=";
|
||||
}
|
Loading…
Reference in a new issue