style(treewide): Apply linter suggestions
This commit is contained in:
parent
5c6b697e55
commit
63b3cbe00b
20 changed files with 432 additions and 450 deletions
checks
configuration
flake.nixmodules/crowdsec
pkgs/crowdsec
|
@ -8,6 +8,15 @@
|
|||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
statix' = pkgs.statix.overrideAttrs (old: {
|
||||
patches = old.patches ++ [
|
||||
(pkgs.fetchpatch {
|
||||
url = "https://github.com/oppiliappan/statix/commit/925dec39bb705acbbe77178b4d658fe1b752abbb.patch";
|
||||
hash = "sha256-0wacO6wuYJ4ufN9PGucRVJucFdFFNF+NoHYIrLXsCWs=";
|
||||
})
|
||||
];
|
||||
});
|
||||
|
||||
runNuCheck =
|
||||
{
|
||||
name,
|
||||
|
@ -44,7 +53,7 @@ nixpkgs.lib.recursiveUpdate {
|
|||
pkgs.deadnix
|
||||
pkgs.nixfmt-rfc-style
|
||||
pkgs.shellcheck
|
||||
pkgs.statix
|
||||
statix'
|
||||
];
|
||||
|
||||
check = ./lints.nu;
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
modulesPath,
|
||||
flake-inputs,
|
||||
...
|
||||
|
@ -31,13 +29,7 @@
|
|||
./sops.nix
|
||||
];
|
||||
|
||||
nixpkgs.overlays = [
|
||||
(final: prev: {
|
||||
local = import ../pkgs {
|
||||
pkgs = prev;
|
||||
};
|
||||
})
|
||||
];
|
||||
nixpkgs.overlays = [ (_: prev: { local = import ../pkgs { pkgs = prev; }; }) ];
|
||||
|
||||
nix = {
|
||||
extraOptions = ''
|
||||
|
|
|
@ -6,26 +6,35 @@
|
|||
boot.kernelParams = [ "nomodeset" ];
|
||||
|
||||
networking.hostName = "testvm";
|
||||
# Sets the base domain for nginx to a local domain so that we can
|
||||
# easily test locally with the VM.
|
||||
services.nginx.domain = "dev.local";
|
||||
|
||||
services = {
|
||||
# Sets the base domain for nginx to a local domain so that we can
|
||||
# easily test locally with the VM.
|
||||
nginx.domain = "dev.local";
|
||||
|
||||
# Don't run this
|
||||
batteryManager.enable = lib.mkForce false;
|
||||
|
||||
openssh.hostKeys = lib.mkForce [
|
||||
{
|
||||
type = "rsa";
|
||||
bits = 4096;
|
||||
path = "/etc/staging.key";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
# Use the staging secrets
|
||||
sops.defaultSopsFile = lib.mkOverride 99 ../../keys/staging.yaml;
|
||||
|
||||
systemd.network.networks."10-eth0" = {
|
||||
matchConfig.Name = "eth0";
|
||||
gateway = [
|
||||
"192.168.9.1"
|
||||
];
|
||||
gateway = [ "192.168.9.1" ];
|
||||
networkConfig = {
|
||||
Address = "192.168.9.2/24";
|
||||
};
|
||||
};
|
||||
|
||||
# Don't run this
|
||||
services.batteryManager.enable = lib.mkForce false;
|
||||
|
||||
# Both so we have a predictable key for the staging env, as well as
|
||||
# to have a static key for decrypting the sops secrets for the
|
||||
# staging env.
|
||||
|
@ -34,14 +43,6 @@
|
|||
source = ../../keys/hosts/staging.key;
|
||||
};
|
||||
|
||||
services.openssh.hostKeys = lib.mkForce [
|
||||
{
|
||||
type = "rsa";
|
||||
bits = 4096;
|
||||
path = "/etc/staging.key";
|
||||
}
|
||||
];
|
||||
|
||||
virtualisation.vmVariant = {
|
||||
virtualisation = {
|
||||
memorySize = 3941;
|
||||
|
|
|
@ -1,40 +1,49 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
recommendedTlsSettings = true;
|
||||
recommendedOptimisation = true;
|
||||
recommendedGzipSettings = true;
|
||||
recommendedProxySettings = true;
|
||||
clientMaxBodySize = "10G";
|
||||
services = {
|
||||
nginx = {
|
||||
enable = true;
|
||||
recommendedTlsSettings = true;
|
||||
recommendedOptimisation = true;
|
||||
recommendedGzipSettings = true;
|
||||
recommendedProxySettings = true;
|
||||
clientMaxBodySize = "10G";
|
||||
|
||||
statusPage = true; # For metrics, should be accessible only from localhost
|
||||
statusPage = true; # For metrics, should be accessible only from localhost
|
||||
|
||||
commonHttpConfig = ''
|
||||
log_format upstream_time '$remote_addr - $remote_user [$time_local] '
|
||||
'"$request" $status $body_bytes_sent '
|
||||
'"$http_referer" "$http_user_agent" '
|
||||
'rt=$request_time uct="$upstream_connect_time" '
|
||||
'uht="$upstream_header_time" urt="$upstream_response_time"';
|
||||
'';
|
||||
};
|
||||
commonHttpConfig = ''
|
||||
log_format upstream_time '$remote_addr - $remote_user [$time_local] '
|
||||
'"$request" $status $body_bytes_sent '
|
||||
'"$http_referer" "$http_user_agent" '
|
||||
'rt=$request_time uct="$upstream_connect_time" '
|
||||
'uht="$upstream_header_time" urt="$upstream_response_time"';
|
||||
'';
|
||||
};
|
||||
|
||||
services.logrotate.settings =
|
||||
{
|
||||
# Override the default, just keep fewer logs
|
||||
nginx.rotate = 6;
|
||||
}
|
||||
// lib.mapAttrs' (
|
||||
virtualHost: _:
|
||||
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
|
||||
frequency = "daily";
|
||||
rotate = 2;
|
||||
compress = true;
|
||||
delaycompress = true;
|
||||
su = "${config.services.nginx.user} ${config.services.nginx.group}";
|
||||
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
|
||||
logrotate.settings =
|
||||
{
|
||||
# Override the default, just keep fewer logs
|
||||
nginx.rotate = 6;
|
||||
}
|
||||
) config.services.nginx.virtualHosts;
|
||||
// lib.mapAttrs' (
|
||||
virtualHost: _:
|
||||
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
|
||||
frequency = "daily";
|
||||
rotate = 2;
|
||||
compress = true;
|
||||
delaycompress = true;
|
||||
su = "${config.services.nginx.user} ${config.services.nginx.group}";
|
||||
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
|
||||
}
|
||||
) config.services.nginx.virtualHosts;
|
||||
|
||||
backups.acme = {
|
||||
user = "acme";
|
||||
paths = lib.mapAttrsToList (
|
||||
virtualHost: _: "/var/lib/acme/${virtualHost}"
|
||||
) config.services.nginx.virtualHosts;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = lib.mapAttrsToList (
|
||||
virtualHost: _:
|
||||
|
@ -66,11 +75,4 @@
|
|||
systemd.services.nginx.serviceConfig.SupplementaryGroups = [
|
||||
config.security.acme.certs."tlater.net".group
|
||||
];
|
||||
|
||||
services.backups.acme = {
|
||||
user = "acme";
|
||||
paths = lib.mapAttrsToList (
|
||||
virtualHost: _: "/var/lib/acme/${virtualHost}"
|
||||
) config.services.nginx.virtualHosts;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ in
|
|||
'';
|
||||
type = types.attrsOf (
|
||||
types.submodule (
|
||||
{ config, name, ... }:
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
user = lib.mkOption {
|
||||
|
@ -246,7 +246,7 @@ in
|
|||
};
|
||||
}
|
||||
// lib.mapAttrs' (
|
||||
name: backup:
|
||||
name: _:
|
||||
lib.nameValuePair "backup-${name}" {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
|
|
|
@ -18,33 +18,153 @@ in
|
|||
./matrix-hookshot.nix
|
||||
];
|
||||
|
||||
services.matrix-conduit = {
|
||||
enable = true;
|
||||
package = flake-inputs.continuwuity.packages.${pkgs.system}.default;
|
||||
settings.global = {
|
||||
address = "127.0.0.1";
|
||||
server_name = domain;
|
||||
new_user_displayname_suffix = "🦆";
|
||||
allow_check_for_updates = true;
|
||||
services = {
|
||||
matrix-conduit = {
|
||||
enable = true;
|
||||
package = flake-inputs.continuwuity.packages.${pkgs.system}.default;
|
||||
settings.global = {
|
||||
address = "127.0.0.1";
|
||||
server_name = domain;
|
||||
new_user_displayname_suffix = "🦆";
|
||||
allow_check_for_updates = true;
|
||||
|
||||
# Set up delegation: https://docs.conduit.rs/delegation.html#automatic-recommended
|
||||
# This is primarily to make sliding sync work
|
||||
well_known = {
|
||||
client = "https://${domain}";
|
||||
server = "${domain}:443";
|
||||
# Set up delegation: https://docs.conduit.rs/delegation.html#automatic-recommended
|
||||
# This is primarily to make sliding sync work
|
||||
well_known = {
|
||||
client = "https://${domain}";
|
||||
server = "${domain}:443";
|
||||
};
|
||||
|
||||
turn_uris =
|
||||
let
|
||||
address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
|
||||
tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
|
||||
in
|
||||
[
|
||||
"turn:${address}?transport=udp"
|
||||
"turn:${address}?transport=tcp"
|
||||
"turns:${tls-address}?transport=udp"
|
||||
"turns:${tls-address}?transport=tcp"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
turn_uris =
|
||||
let
|
||||
address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
|
||||
tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
|
||||
in
|
||||
[
|
||||
"turn:${address}?transport=udp"
|
||||
"turn:${address}?transport=tcp"
|
||||
"turns:${tls-address}?transport=udp"
|
||||
"turns:${tls-address}?transport=tcp"
|
||||
];
|
||||
coturn = {
|
||||
enable = true;
|
||||
no-cli = true;
|
||||
use-auth-secret = true;
|
||||
static-auth-secret-file = config.sops.secrets."turn/secret".path;
|
||||
realm = turn-realm;
|
||||
relay-ips = [ "116.202.158.55" ];
|
||||
|
||||
# SSL config
|
||||
pkey = "${config.security.acme.certs."tlater.net".directory}/key.pem";
|
||||
cert = "${config.security.acme.certs."tlater.net".directory}/fullchain.pem";
|
||||
|
||||
# Based on suggestions from
|
||||
# https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md
|
||||
# and
|
||||
# https://www.foxypossibilities.com/2018/05/19/setting-up-a-turn-sever-for-matrix-on-nixos/
|
||||
no-tcp-relay = true;
|
||||
secure-stun = true;
|
||||
extraConfig = ''
|
||||
# Deny various local IP ranges, see
|
||||
# https://www.rtcsec.com/article/cve-2020-26262-bypass-of-coturns-access-control-protection/
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255 denied-peer-ip=::1
|
||||
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
|
||||
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
|
||||
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
|
||||
# *Allow* any IP addresses that we explicitly set as relay IPs
|
||||
${concatMapStringsSep "\n" (ip: "allowed-peer-ip=${ip}") config.services.coturn.relay-ips}
|
||||
|
||||
# Various other security settings
|
||||
no-tlsv1
|
||||
no-tlsv1_1
|
||||
|
||||
# Monitoring
|
||||
prometheus
|
||||
'';
|
||||
};
|
||||
|
||||
nginx.virtualHosts."${domain}" = {
|
||||
useACMEHost = "tlater.net";
|
||||
|
||||
listen = [
|
||||
{
|
||||
addr = "0.0.0.0";
|
||||
port = 80;
|
||||
}
|
||||
{
|
||||
addr = "[::0]";
|
||||
port = 80;
|
||||
}
|
||||
{
|
||||
addr = "0.0.0.0";
|
||||
port = 443;
|
||||
ssl = true;
|
||||
}
|
||||
{
|
||||
addr = "[::0]";
|
||||
port = 443;
|
||||
ssl = true;
|
||||
}
|
||||
{
|
||||
addr = "0.0.0.0";
|
||||
port = 8448;
|
||||
ssl = true;
|
||||
}
|
||||
{
|
||||
addr = "[::0]";
|
||||
port = 8448;
|
||||
ssl = true;
|
||||
}
|
||||
];
|
||||
|
||||
forceSSL = true;
|
||||
enableHSTS = true;
|
||||
extraConfig = ''
|
||||
merge_slashes off;
|
||||
'';
|
||||
|
||||
locations = {
|
||||
"/_matrix" = {
|
||||
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
|
||||
# Recommended by conduit
|
||||
extraConfig = ''
|
||||
proxy_buffering off;
|
||||
'';
|
||||
};
|
||||
"/.well-known/matrix" = {
|
||||
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
backups.conduit = {
|
||||
user = "root";
|
||||
paths = [ "/var/lib/private/matrix-conduit/" ];
|
||||
# Other services store their data in conduit, so no other services
|
||||
# need to be shut down currently.
|
||||
pauseServices = [ "conduit.service" ];
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -60,122 +180,4 @@ in
|
|||
systemd.services.coturn.serviceConfig.SupplementaryGroups = [
|
||||
config.security.acme.certs."tlater.net".group
|
||||
];
|
||||
|
||||
services.coturn = {
|
||||
enable = true;
|
||||
no-cli = true;
|
||||
use-auth-secret = true;
|
||||
static-auth-secret-file = config.sops.secrets."turn/secret".path;
|
||||
realm = turn-realm;
|
||||
relay-ips = [ "116.202.158.55" ];
|
||||
|
||||
# SSL config
|
||||
pkey = "${config.security.acme.certs."tlater.net".directory}/key.pem";
|
||||
cert = "${config.security.acme.certs."tlater.net".directory}/fullchain.pem";
|
||||
|
||||
# Based on suggestions from
|
||||
# https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md
|
||||
# and
|
||||
# https://www.foxypossibilities.com/2018/05/19/setting-up-a-turn-sever-for-matrix-on-nixos/
|
||||
no-tcp-relay = true;
|
||||
secure-stun = true;
|
||||
extraConfig = ''
|
||||
# Deny various local IP ranges, see
|
||||
# https://www.rtcsec.com/article/cve-2020-26262-bypass-of-coturns-access-control-protection/
|
||||
no-multicast-peers
|
||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||
denied-peer-ip=240.0.0.0-255.255.255.255 denied-peer-ip=::1
|
||||
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
|
||||
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
|
||||
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
|
||||
# *Allow* any IP addresses that we explicitly set as relay IPs
|
||||
${concatMapStringsSep "\n" (ip: "allowed-peer-ip=${ip}") config.services.coturn.relay-ips}
|
||||
|
||||
# Various other security settings
|
||||
no-tlsv1
|
||||
no-tlsv1_1
|
||||
|
||||
# Monitoring
|
||||
prometheus
|
||||
'';
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."${domain}" = {
|
||||
useACMEHost = "tlater.net";
|
||||
|
||||
listen = [
|
||||
{
|
||||
addr = "0.0.0.0";
|
||||
port = 80;
|
||||
}
|
||||
{
|
||||
addr = "[::0]";
|
||||
port = 80;
|
||||
}
|
||||
{
|
||||
addr = "0.0.0.0";
|
||||
port = 443;
|
||||
ssl = true;
|
||||
}
|
||||
{
|
||||
addr = "[::0]";
|
||||
port = 443;
|
||||
ssl = true;
|
||||
}
|
||||
{
|
||||
addr = "0.0.0.0";
|
||||
port = 8448;
|
||||
ssl = true;
|
||||
}
|
||||
{
|
||||
addr = "[::0]";
|
||||
port = 8448;
|
||||
ssl = true;
|
||||
}
|
||||
];
|
||||
|
||||
forceSSL = true;
|
||||
enableHSTS = true;
|
||||
extraConfig = ''
|
||||
merge_slashes off;
|
||||
'';
|
||||
|
||||
locations = {
|
||||
"/_matrix" = {
|
||||
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
|
||||
# Recommended by conduit
|
||||
extraConfig = ''
|
||||
proxy_buffering off;
|
||||
'';
|
||||
};
|
||||
"/.well-known/matrix" = {
|
||||
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.backups.conduit = {
|
||||
user = "root";
|
||||
paths = [ "/var/lib/private/matrix-conduit/" ];
|
||||
# Other services store their data in conduit, so no other services
|
||||
# need to be shut down currently.
|
||||
pauseServices = [ "conduit.service" ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -74,9 +74,7 @@ in
|
|||
services.matrix-hookshot = {
|
||||
enable = true;
|
||||
|
||||
serviceDependencies = [
|
||||
"conduit.service"
|
||||
];
|
||||
serviceDependencies = [ "conduit.service" ];
|
||||
|
||||
registrationFile = "/run/matrix-hookshot/registration.yaml";
|
||||
|
||||
|
@ -126,15 +124,11 @@ in
|
|||
listeners = [
|
||||
{
|
||||
port = 9000;
|
||||
resources = [
|
||||
"webhooks"
|
||||
];
|
||||
resources = [ "webhooks" ];
|
||||
}
|
||||
{
|
||||
port = 9001;
|
||||
resources = [
|
||||
"metrics"
|
||||
];
|
||||
resources = [ "metrics" ];
|
||||
}
|
||||
];
|
||||
|
||||
|
|
|
@ -8,9 +8,7 @@
|
|||
security.crowdsec = {
|
||||
enable = true;
|
||||
|
||||
parserWhitelist = [
|
||||
"10.45.249.2"
|
||||
];
|
||||
parserWhitelist = [ "10.45.249.2" ];
|
||||
|
||||
extraGroups = [
|
||||
"systemd-journal"
|
||||
|
@ -21,25 +19,19 @@
|
|||
{
|
||||
source = "journalctl";
|
||||
labels.type = "syslog";
|
||||
journalctl_filter = [
|
||||
"SYSLOG_IDENTIFIER=Nextcloud"
|
||||
];
|
||||
journalctl_filter = [ "SYSLOG_IDENTIFIER=Nextcloud" ];
|
||||
}
|
||||
|
||||
{
|
||||
source = "journalctl";
|
||||
labels.type = "syslog";
|
||||
journalctl_filter = [
|
||||
"SYSLOG_IDENTIFIER=sshd-session"
|
||||
];
|
||||
journalctl_filter = [ "SYSLOG_IDENTIFIER=sshd-session" ];
|
||||
}
|
||||
|
||||
{
|
||||
labels.type = "nginx";
|
||||
filenames =
|
||||
[
|
||||
"/var/log/nginx/*.log"
|
||||
]
|
||||
[ "/var/log/nginx/*.log" ]
|
||||
++ lib.mapAttrsToList (
|
||||
vHost: _: "/var/log/nginx/${vHost}/access.log"
|
||||
) config.services.nginx.virtualHosts;
|
||||
|
|
|
@ -11,37 +11,39 @@ in
|
|||
{
|
||||
imports = [ flake-inputs.foundryvtt.nixosModules.foundryvtt ];
|
||||
|
||||
services.foundryvtt = {
|
||||
enable = true;
|
||||
hostName = domain;
|
||||
minifyStaticFiles = true;
|
||||
proxySSL = true;
|
||||
proxyPort = 443;
|
||||
package = flake-inputs.foundryvtt.packages.${pkgs.system}.foundryvtt_11;
|
||||
services = {
|
||||
foundryvtt = {
|
||||
enable = true;
|
||||
hostName = domain;
|
||||
minifyStaticFiles = true;
|
||||
proxySSL = true;
|
||||
proxyPort = 443;
|
||||
package = flake-inputs.foundryvtt.packages.${pkgs.system}.foundryvtt_11;
|
||||
};
|
||||
|
||||
nginx.virtualHosts."${domain}" =
|
||||
let
|
||||
inherit (config.services.foundryvtt) port;
|
||||
in
|
||||
{
|
||||
forceSSL = true;
|
||||
useACMEHost = "tlater.net";
|
||||
enableHSTS = true;
|
||||
|
||||
locations."/" = {
|
||||
proxyWebsockets = true;
|
||||
proxyPass = "http://localhost:${toString port}";
|
||||
};
|
||||
};
|
||||
|
||||
backups.foundryvtt = {
|
||||
user = "foundryvtt";
|
||||
paths = [ config.services.foundryvtt.dataDir ];
|
||||
pauseServices = [ "foundryvtt.service" ];
|
||||
};
|
||||
};
|
||||
|
||||
# Want to start it manually when I need it, not have it constantly
|
||||
# running
|
||||
systemd.services.foundryvtt.wantedBy = lib.mkForce [ ];
|
||||
|
||||
services.nginx.virtualHosts."${domain}" =
|
||||
let
|
||||
inherit (config.services.foundryvtt) port;
|
||||
in
|
||||
{
|
||||
forceSSL = true;
|
||||
useACMEHost = "tlater.net";
|
||||
enableHSTS = true;
|
||||
|
||||
locations."/" = {
|
||||
proxyWebsockets = true;
|
||||
proxyPass = "http://localhost:${toString port}";
|
||||
};
|
||||
};
|
||||
|
||||
services.backups.foundryvtt = {
|
||||
user = "foundryvtt";
|
||||
paths = [ config.services.foundryvtt.dataDir ];
|
||||
pauseServices = [ "foundryvtt.service" ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -8,24 +8,68 @@ let
|
|||
domain = "gitea.${config.services.nginx.domain}";
|
||||
in
|
||||
{
|
||||
services.forgejo = {
|
||||
enable = true;
|
||||
database.type = "postgres";
|
||||
services = {
|
||||
forgejo = {
|
||||
enable = true;
|
||||
database.type = "postgres";
|
||||
|
||||
settings = {
|
||||
server = {
|
||||
DOMAIN = domain;
|
||||
HTTP_ADDR = "127.0.0.1";
|
||||
ROOT_URL = "https://${domain}/";
|
||||
SSH_PORT = 2222;
|
||||
settings = {
|
||||
server = {
|
||||
DOMAIN = domain;
|
||||
HTTP_ADDR = "127.0.0.1";
|
||||
ROOT_URL = "https://${domain}/";
|
||||
SSH_PORT = 2222;
|
||||
};
|
||||
|
||||
metrics = {
|
||||
ENABLED = true;
|
||||
TOKEN = "#metricstoken#";
|
||||
};
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
session.COOKIE_SECURE = true;
|
||||
};
|
||||
};
|
||||
|
||||
# Set up SSL
|
||||
nginx.virtualHosts."${domain}" =
|
||||
let
|
||||
httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
|
||||
httpPort = config.services.forgejo.settings.server.HTTP_PORT;
|
||||
in
|
||||
{
|
||||
forceSSL = true;
|
||||
useACMEHost = "tlater.net";
|
||||
enableHSTS = true;
|
||||
|
||||
locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
|
||||
locations."/metrics" = {
|
||||
extraConfig = ''
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
|
||||
deny all;
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
metrics = {
|
||||
ENABLED = true;
|
||||
TOKEN = "#metricstoken#";
|
||||
backups.forgejo = {
|
||||
user = "forgejo";
|
||||
paths = [
|
||||
"/var/lib/forgejo/forgejo-db.sql"
|
||||
"/var/lib/forgejo/repositories/"
|
||||
"/var/lib/forgejo/data/"
|
||||
"/var/lib/forgejo/custom/"
|
||||
# Conf is backed up via nix
|
||||
];
|
||||
preparation = {
|
||||
packages = [ config.services.postgresql.package ];
|
||||
text = "pg_dump ${config.services.forgejo.database.name} --file=/var/lib/forgejo/forgejo-db.sql";
|
||||
};
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
session.COOKIE_SECURE = true;
|
||||
cleanup = {
|
||||
packages = [ pkgs.coreutils ];
|
||||
text = "rm /var/lib/forgejo/forgejo-db.sql";
|
||||
};
|
||||
pauseServices = [ "forgejo.service" ];
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -36,46 +80,4 @@ in
|
|||
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
|
||||
in
|
||||
[ "+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'" ];
|
||||
|
||||
# Set up SSL
|
||||
services.nginx.virtualHosts."${domain}" =
|
||||
let
|
||||
httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
|
||||
httpPort = config.services.forgejo.settings.server.HTTP_PORT;
|
||||
in
|
||||
{
|
||||
forceSSL = true;
|
||||
useACMEHost = "tlater.net";
|
||||
enableHSTS = true;
|
||||
|
||||
locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
|
||||
locations."/metrics" = {
|
||||
extraConfig = ''
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
|
||||
deny all;
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
services.backups.forgejo = {
|
||||
user = "forgejo";
|
||||
paths = [
|
||||
"/var/lib/forgejo/forgejo-db.sql"
|
||||
"/var/lib/forgejo/repositories/"
|
||||
"/var/lib/forgejo/data/"
|
||||
"/var/lib/forgejo/custom/"
|
||||
# Conf is backed up via nix
|
||||
];
|
||||
preparation = {
|
||||
packages = [ config.services.postgresql.package ];
|
||||
text = "pg_dump ${config.services.forgejo.database.name} --file=/var/lib/forgejo/forgejo-db.sql";
|
||||
};
|
||||
cleanup = {
|
||||
packages = [ pkgs.coreutils ];
|
||||
text = "rm /var/lib/forgejo/forgejo-db.sql";
|
||||
};
|
||||
pauseServices = [ "forgejo.service" ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ in
|
|||
listenAddress = "127.0.0.1";
|
||||
group = "nginx";
|
||||
|
||||
settings.namespaces = lib.mapAttrsToList (name: virtualHost: {
|
||||
settings.namespaces = lib.mapAttrsToList (name: _: {
|
||||
inherit name;
|
||||
metrics_override.prefix = "nginxlog";
|
||||
namespace_label = "vhost";
|
||||
|
|
|
@ -38,7 +38,7 @@ in
|
|||
services.victoriametrics.scrapeConfigs = mkOption {
|
||||
type = types.attrsOf (
|
||||
types.submodule (
|
||||
{ name, self, ... }:
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
job_name = mkOption {
|
||||
|
@ -106,35 +106,37 @@ in
|
|||
# module is an intractable mess
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig.Restart = mkDefault "always";
|
||||
serviceConfig.PrivateTmp = mkDefault true;
|
||||
serviceConfig.WorkingDirectory = mkDefault /tmp;
|
||||
serviceConfig.DynamicUser = mkDefault true;
|
||||
# Hardening
|
||||
serviceConfig.CapabilityBoundingSet = mkDefault [ "" ];
|
||||
serviceConfig.DeviceAllow = [ "" ];
|
||||
serviceConfig.LockPersonality = true;
|
||||
serviceConfig.MemoryDenyWriteExecute = true;
|
||||
serviceConfig.NoNewPrivileges = true;
|
||||
serviceConfig.PrivateDevices = mkDefault true;
|
||||
serviceConfig.ProtectClock = mkDefault true;
|
||||
serviceConfig.ProtectControlGroups = true;
|
||||
serviceConfig.ProtectHome = true;
|
||||
serviceConfig.ProtectHostname = true;
|
||||
serviceConfig.ProtectKernelLogs = true;
|
||||
serviceConfig.ProtectKernelModules = true;
|
||||
serviceConfig.ProtectKernelTunables = true;
|
||||
serviceConfig.ProtectSystem = mkDefault "strict";
|
||||
serviceConfig.RemoveIPC = true;
|
||||
serviceConfig.RestrictAddressFamilies = [
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
];
|
||||
serviceConfig.RestrictNamespaces = true;
|
||||
serviceConfig.RestrictRealtime = true;
|
||||
serviceConfig.RestrictSUIDSGID = true;
|
||||
serviceConfig.SystemCallArchitectures = "native";
|
||||
serviceConfig.UMask = "0077";
|
||||
serviceConfig = {
|
||||
Restart = mkDefault "always";
|
||||
PrivateTmp = mkDefault true;
|
||||
WorkingDirectory = mkDefault /tmp;
|
||||
DynamicUser = mkDefault true;
|
||||
# Hardening
|
||||
CapabilityBoundingSet = mkDefault [ "" ];
|
||||
DeviceAllow = [ "" ];
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = mkDefault true;
|
||||
ProtectClock = mkDefault true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectSystem = mkDefault "strict";
|
||||
RemoveIPC = true;
|
||||
RestrictAddressFamilies = [
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
];
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
SystemCallArchitectures = "native";
|
||||
UMask = "0077";
|
||||
};
|
||||
}
|
||||
exporter.serviceOpts
|
||||
]
|
||||
|
@ -144,7 +146,7 @@ in
|
|||
{
|
||||
vmagent-scrape-exporters =
|
||||
let
|
||||
listenAddress = config.services.victoriametrics.listenAddress;
|
||||
inherit (config.services.victoriametrics) listenAddress;
|
||||
vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
|
||||
promscrape = yaml.generate "prometheus.yml" {
|
||||
scrape_configs = lib.mapAttrsToList (
|
||||
|
@ -153,7 +155,7 @@ in
|
|||
inherit (scrape) job_name;
|
||||
static_configs =
|
||||
scrape.static_configs
|
||||
++ lib.optional (scrape.targets != [ ]) { targets = scrape.targets; };
|
||||
++ lib.optional (scrape.targets != [ ]) { inherit (scrape) targets; };
|
||||
} scrape.extraSettings
|
||||
) config.services.victoriametrics.scrapeConfigs;
|
||||
};
|
||||
|
@ -212,7 +214,7 @@ in
|
|||
|
||||
services.victoriametrics.scrapeConfigs =
|
||||
let
|
||||
allExporters = lib.mapAttrs (name: exporter: { inherit (exporter) listenAddress port; }) (
|
||||
allExporters = lib.mapAttrs (_: exporter: { inherit (exporter) listenAddress port; }) (
|
||||
(lib.filterAttrs (
|
||||
name: exporter:
|
||||
# A bunch of deprecated exporters that need to be ignored
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.services.victorialogs;
|
||||
in
|
||||
|
|
|
@ -9,93 +9,95 @@ let
|
|||
hostName = "nextcloud.${config.services.nginx.domain}";
|
||||
in
|
||||
{
|
||||
services.nextcloud = {
|
||||
inherit hostName;
|
||||
services = {
|
||||
nextcloud = {
|
||||
inherit hostName;
|
||||
|
||||
package = nextcloud;
|
||||
phpPackage = lib.mkForce (
|
||||
pkgs.php.override {
|
||||
packageOverrides = final: prev: {
|
||||
extensions = prev.extensions // {
|
||||
pgsql = prev.extensions.pgsql.overrideAttrs (old: {
|
||||
configureFlags = [ "--with-pgsql=${lib.getDev config.services.postgresql.package}" ];
|
||||
});
|
||||
pdo_pgsql = prev.extensions.pdo_pgsql.overrideAttrs (old: {
|
||||
configureFlags = [ "--with-pdo-pgsql=${lib.getDev config.services.postgresql.package}" ];
|
||||
});
|
||||
package = nextcloud;
|
||||
phpPackage = lib.mkForce (
|
||||
pkgs.php.override {
|
||||
packageOverrides = _: prev: {
|
||||
extensions = prev.extensions // {
|
||||
pgsql = prev.extensions.pgsql.overrideAttrs (_: {
|
||||
configureFlags = [ "--with-pgsql=${lib.getDev config.services.postgresql.package}" ];
|
||||
});
|
||||
pdo_pgsql = prev.extensions.pdo_pgsql.overrideAttrs (_: {
|
||||
configureFlags = [ "--with-pdo-pgsql=${lib.getDev config.services.postgresql.package}" ];
|
||||
});
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
enable = true;
|
||||
maxUploadSize = "2G";
|
||||
https = true;
|
||||
}
|
||||
);
|
||||
enable = true;
|
||||
maxUploadSize = "2G";
|
||||
https = true;
|
||||
|
||||
configureRedis = true;
|
||||
configureRedis = true;
|
||||
|
||||
config = {
|
||||
dbtype = "pgsql";
|
||||
dbhost = "/run/postgresql";
|
||||
config = {
|
||||
dbtype = "pgsql";
|
||||
dbhost = "/run/postgresql";
|
||||
|
||||
adminuser = "tlater";
|
||||
adminpassFile = config.sops.secrets."nextcloud/tlater".path;
|
||||
adminuser = "tlater";
|
||||
adminpassFile = config.sops.secrets."nextcloud/tlater".path;
|
||||
};
|
||||
|
||||
settings = {
|
||||
default_phone_region = "AT";
|
||||
overwriteprotocol = "https";
|
||||
};
|
||||
|
||||
phpOptions = {
|
||||
"opcache.interned_strings_buffer" = "16";
|
||||
};
|
||||
|
||||
extraApps = {
|
||||
inherit (config.services.nextcloud.package.packages.apps)
|
||||
calendar
|
||||
contacts
|
||||
cookbook
|
||||
news
|
||||
;
|
||||
};
|
||||
};
|
||||
|
||||
settings = {
|
||||
default_phone_region = "AT";
|
||||
overwriteprotocol = "https";
|
||||
# Set up SSL
|
||||
nginx.virtualHosts."${hostName}" = {
|
||||
forceSSL = true;
|
||||
useACMEHost = "tlater.net";
|
||||
# The upstream module already adds HSTS
|
||||
};
|
||||
|
||||
phpOptions = {
|
||||
"opcache.interned_strings_buffer" = "16";
|
||||
};
|
||||
|
||||
extraApps = {
|
||||
inherit (config.services.nextcloud.package.packages.apps)
|
||||
calendar
|
||||
contacts
|
||||
cookbook
|
||||
news
|
||||
;
|
||||
backups.nextcloud = {
|
||||
user = "nextcloud";
|
||||
paths = [
|
||||
"/var/lib/nextcloud/nextcloud-db.sql"
|
||||
"/var/lib/nextcloud/data/"
|
||||
"/var/lib/nextcloud/config/config.php"
|
||||
];
|
||||
preparation = {
|
||||
packages = [
|
||||
config.services.postgresql.package
|
||||
config.services.nextcloud.occ
|
||||
];
|
||||
text = ''
|
||||
nextcloud-occ maintenance:mode --on
|
||||
pg_dump ${config.services.nextcloud.config.dbname} --file=/var/lib/nextcloud/nextcloud-db.sql
|
||||
'';
|
||||
};
|
||||
cleanup = {
|
||||
packages = [
|
||||
pkgs.coreutils
|
||||
config.services.nextcloud.occ
|
||||
];
|
||||
text = ''
|
||||
rm /var/lib/nextcloud/nextcloud-db.sql
|
||||
nextcloud-occ maintenance:mode --off
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Ensure that this service doesn't start before postgres is ready
|
||||
systemd.services.nextcloud-setup.after = [ "postgresql.service" ];
|
||||
|
||||
# Set up SSL
|
||||
services.nginx.virtualHosts."${hostName}" = {
|
||||
forceSSL = true;
|
||||
useACMEHost = "tlater.net";
|
||||
# The upstream module already adds HSTS
|
||||
};
|
||||
|
||||
services.backups.nextcloud = {
|
||||
user = "nextcloud";
|
||||
paths = [
|
||||
"/var/lib/nextcloud/nextcloud-db.sql"
|
||||
"/var/lib/nextcloud/data/"
|
||||
"/var/lib/nextcloud/config/config.php"
|
||||
];
|
||||
preparation = {
|
||||
packages = [
|
||||
config.services.postgresql.package
|
||||
config.services.nextcloud.occ
|
||||
];
|
||||
text = ''
|
||||
nextcloud-occ maintenance:mode --on
|
||||
pg_dump ${config.services.nextcloud.config.dbname} --file=/var/lib/nextcloud/nextcloud-db.sql
|
||||
'';
|
||||
};
|
||||
cleanup = {
|
||||
packages = [
|
||||
pkgs.coreutils
|
||||
config.services.nextcloud.occ
|
||||
];
|
||||
text = ''
|
||||
rm /var/lib/nextcloud/nextcloud-db.sql
|
||||
nextcloud-occ maintenance:mode --off
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{ config, ... }:
|
||||
let
|
||||
domain = config.services.nginx.domain;
|
||||
inherit (config.services.nginx) domain;
|
||||
in
|
||||
{
|
||||
services.tlaternet-webserver = {
|
||||
|
|
|
@ -121,8 +121,6 @@
|
|||
run-vm = {
|
||||
type = "app";
|
||||
program =
|
||||
let
|
||||
in
|
||||
(pkgs.writeShellScript "" ''
|
||||
${vm.config.system.build.vm.outPath}/bin/run-testvm-vm
|
||||
'').outPath;
|
||||
|
|
|
@ -267,9 +267,7 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
systemd.packages = [
|
||||
cfg.package
|
||||
];
|
||||
systemd.packages = [ cfg.package ];
|
||||
|
||||
environment = {
|
||||
systemPackages = [
|
||||
|
|
|
@ -6,10 +6,11 @@
|
|||
...
|
||||
}:
|
||||
let
|
||||
inherit (flake-inputs.self.packages.${pkgs.system}) crowdsec-firewall-bouncer;
|
||||
|
||||
crowdsecCfg = config.security.crowdsec;
|
||||
cfg = crowdsecCfg.remediationComponents.firewallBouncer;
|
||||
settingsFormat = pkgs.formats.yaml { };
|
||||
crowdsec-firewall-bouncer = flake-inputs.self.packages.${pkgs.system}.crowdsec-firewall-bouncer;
|
||||
in
|
||||
{
|
||||
options.security.crowdsec.remediationComponents.firewallBouncer = {
|
||||
|
@ -31,9 +32,7 @@ in
|
|||
security.crowdsec.remediationComponents.firewallBouncer.settings = {
|
||||
mode = lib.mkDefault "${if config.networking.nftables.enable then "nftables" else "iptables"}";
|
||||
log_mode = "stdout";
|
||||
iptables_chains = [
|
||||
"nixos-fw"
|
||||
];
|
||||
iptables_chains = [ "nixos-fw" ];
|
||||
|
||||
# Don't let users easily override this; unfortunately we need to
|
||||
# set up this key through substitution at runtime.
|
||||
|
@ -78,9 +77,7 @@ in
|
|||
requiredBy = [ "crowdsec.service" ];
|
||||
|
||||
path =
|
||||
lib.optionals (cfg.settings.mode == "ipset" || cfg.settings.mode == "iptables") [
|
||||
pkgs.ipset
|
||||
]
|
||||
lib.optionals (cfg.settings.mode == "ipset" || cfg.settings.mode == "iptables") [ pkgs.ipset ]
|
||||
++ lib.optional (cfg.settings.mode == "iptables") pkgs.iptables
|
||||
++ lib.optional (cfg.settings.mode == "nftables") pkgs.nftables;
|
||||
};
|
||||
|
|
|
@ -1,5 +1 @@
|
|||
{
|
||||
imports = [
|
||||
./cs-firewall-bouncer.nix
|
||||
];
|
||||
}
|
||||
{ imports = [ ./cs-firewall-bouncer.nix ]; }
|
||||
|
|
|
@ -1,4 +1 @@
|
|||
{
|
||||
sources,
|
||||
}:
|
||||
sources.crowdsec-hub.src
|
||||
{ sources }: sources.crowdsec-hub.src
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue