Compare commits

...
Sign in to create a new pull request.

17 commits

Author SHA1 Message Date
94ec261a94
chore(foundryvtt): Upgrade to version 13 2025-05-25 02:11:46 +08:00
f2cbeebbb5
flake.lock: Update
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/ec7c109a4f794fce09aad87239eab7f66540b888' (2025-05-15)
  → 'github:nix-community/disko/df522e787fdffc4f32ed3e1fca9ed0968a384d62' (2025-05-20)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/b965e4c283060415956ccd39eee4ca34a6a56cf8' (2025-05-16)
  → 'github:nixos/nixpkgs/c3ee76c437067f1ae09d6e530df46a3f80977992' (2025-05-24)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/e93ee1d900ad264d65e9701a5c6f895683433386' (2025-05-05)
  → 'github:Mic92/sops-nix/8d215e1c981be3aa37e47aeabd4e61bb069548fd' (2025-05-18)
2025-05-25 00:42:02 +08:00
fc6be0c4c2
chore(treewide): Upgrade to NixOS 25.05 2025-05-25 00:42:01 +08:00
b067bbc8c0
fix(immich): Set the correct backup attribute 2025-05-24 05:33:08 +08:00
913944cff3
feat(immich): Add immich service 2025-05-24 05:30:06 +08:00
8d0be61483
chore(ignore-revs): Ignore linter changes in blame 2025-05-20 20:49:40 +08:00
63b3cbe00b
style(treewide): Apply linter suggestions 2025-05-20 20:48:47 +08:00
5c6b697e55
feat(checks): Add linters 2025-05-20 20:48:46 +08:00
fc991a0b07
fix(metrics-exporters): Fix dbus socket access for DynamicUsers
This broke the systemd exporter, I don't fully know what caused the
breakage, though.

See
https://discourse.nixos.org/t/systemd-exporter-couldnt-get-dbus-connection-read-unix-run-dbus-system-bus-socket-recvmsg-connection-reset-by-peer/
2025-05-20 01:07:38 +08:00
09b90433e6
feat(conduit): Switch from conduwuit to continuwuity 2025-05-17 15:46:53 +08:00
1bef207356
bump(crowdsec-up): Update hub 2025-05-17 15:46:53 +08:00
cfbc2999d7
flake.lock: Update
Flake lock file updates:

• Updated input 'conduwuit':
    'github:girlbossceo/conduwuit/00f7745ec4ebcea5f892376c5de5db1299f71696' (2025-04-04)
  → 'github:girlbossceo/conduwuit/d8311a5ff672fdc4729d956af5e3af8646b0670d' (2025-04-09)
• Updated input 'disko':
    'github:nix-community/disko/329d3d7e8bc63dd30c39e14e6076db590a6eabe6' (2025-04-02)
  → 'github:nix-community/disko/ec7c109a4f794fce09aad87239eab7f66540b888' (2025-05-15)
• Updated input 'foundryvtt':
    'github:reckenrode/nix-foundryvtt/a7fa493ba2c623cf90e83756b62285b3b58f18d2' (2025-02-16)
  → 'github:reckenrode/nix-foundryvtt/f1b401831d796dd94cf5a11b65fd169a199d4ff0' (2025-05-10)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/bdb91860de2f719b57eef819b5617762f7120c70' (2025-04-03)
  → 'github:nixos/nixpkgs/d6c9326e40bb557ebb8c040b4375590bc06413f8' (2025-05-16)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/a462b946265ed006720d02153882780b12a8376d' (2025-04-04)
  → 'github:nixos/nixpkgs/b965e4c283060415956ccd39eee4ca34a6a56cf8' (2025-05-16)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/cff8437c5fe8c68fc3a840a21bf1f4dc801da40d' (2025-04-04)
  → 'github:Mic92/sops-nix/e93ee1d900ad264d65e9701a5c6f895683433386' (2025-05-05)
2025-05-17 15:46:52 +08:00
89f9196ef0
fix(victorialogs): Use new upstream module 2025-04-04 21:40:08 +08:00
e37c589654
bump(crowdsec-hub): Update hub 2025-04-04 21:21:32 +08:00
b396835f88
flake.lock: Update
Flake lock file updates:

• Updated input 'conduwuit':
    'github:girlbossceo/conduwuit/0f81c1e1ccdcb0c5c6d5a27e82f16eb37b1e61c8' (2025-04-01)
  → 'github:girlbossceo/conduwuit/00f7745ec4ebcea5f892376c5de5db1299f71696' (2025-04-04)
• Updated input 'disko':
    'github:nix-community/disko/15dbf8cebd8e2655a883b74547108e089f051bf0' (2025-02-18)
  → 'github:nix-community/disko/329d3d7e8bc63dd30c39e14e6076db590a6eabe6' (2025-04-02)
• Updated input 'nixpkgs':
    'github:nixos/nixpkgs/11415c7ae8539d6292f2928317ee7a8410b28bb9' (2025-02-21)
  → 'github:nixos/nixpkgs/bdb91860de2f719b57eef819b5617762f7120c70' (2025-04-03)
• Updated input 'nixpkgs-unstable':
    'github:nixos/nixpkgs/8465e233b0668cf162c608a92e62e8d78c1ba7e4' (2025-02-22)
  → 'github:nixos/nixpkgs/a462b946265ed006720d02153882780b12a8376d' (2025-04-04)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/07af005bb7d60c7f118d9d9f5530485da5d1e975' (2025-02-11)
  → 'github:Mic92/sops-nix/cff8437c5fe8c68fc3a840a21bf1f4dc801da40d' (2025-04-04)
2025-04-04 21:21:31 +08:00
ee760bfa1b
feat(victoriametrics): Add missing scrape configs 2025-04-04 21:07:38 +08:00
3c6afa0c66
feat(matrix): Switch to conduwuit
This fixes support for the new sliding sync API.
2025-04-04 21:06:53 +08:00
29 changed files with 697 additions and 601 deletions

View file

@ -9,3 +9,6 @@ fd138d45e6a2cad89fead6e9f246ba282070d6b7
# Switch to alejandra formatting
046a88905ddfa7f9edc3291c310dbb985dee34f9
# Apply wide linting
63b3cbe00be80ccb4b221aad64eb657ae5c96d70

61
checks/default.nix Normal file
View file

@ -0,0 +1,61 @@
{
self,
nixpkgs,
deploy-rs,
system,
...
}:
let
pkgs = nixpkgs.legacyPackages.${system};
statix' = pkgs.statix.overrideAttrs (old: {
patches = old.patches ++ [
(pkgs.fetchpatch {
url = "https://github.com/oppiliappan/statix/commit/925dec39bb705acbbe77178b4d658fe1b752abbb.patch";
hash = "sha256-0wacO6wuYJ4ufN9PGucRVJucFdFFNF+NoHYIrLXsCWs=";
})
];
});
runNuCheck =
{
name,
packages,
check,
}:
pkgs.stdenvNoCC.mkDerivation {
inherit name;
src = nixpkgs.lib.cleanSourceWith {
src = self;
filter = nixpkgs.lib.cleanSourceFilter;
};
dontPatch = true;
dontConfigure = true;
dontBuild = true;
dontInstall = true;
dontFixup = true;
doCheck = true;
checkInputs = nixpkgs.lib.singleton pkgs.nushell ++ packages;
checkPhase = ''
nu ${check}
'';
};
in
nixpkgs.lib.recursiveUpdate {
lints = runNuCheck {
name = "lints";
packages = [
pkgs.deadnix
pkgs.nixfmt-rfc-style
pkgs.shellcheck
statix'
];
check = ./lints.nu;
};
} (deploy-rs.lib.${system}.deployChecks self.deploy)

39
checks/lints.nu Normal file
View file

@ -0,0 +1,39 @@
#!/usr/bin/env nu
let shell_files = ls **/*.sh | get name
let nix_files = ls **/*.nix | where name !~ "hardware-configuration.nix|_sources" | get name
let linters = [
([shellcheck] ++ $shell_files)
([nixfmt --check --strict] ++ $nix_files)
([deadnix --fail] ++ $nix_files)
([statix check] ++ $nix_files)
]
mkdir $env.out
def run-linter [linterArgs: list<string>] {
print $'Running ($linterArgs.0)...'
let exit_code = try {
^$linterArgs.0 ...($linterArgs | skip 1)
$env.LAST_EXIT_CODE
} catch {|e| $e.exit_code}
[$linterArgs.0, $exit_code]
}
let results = $linters | each {|linter| run-linter $linter}
print 'Linter results:'
let success = $results | each {|result|
match $result.1 {
0 => {print $'(ansi green)($result.0)(ansi reset)'}
_ => {print $'(ansi red)($result.0)(ansi reset)'}
}
$result.1
} | math sum
exit $success

View file

@ -1,7 +1,5 @@
{
config,
pkgs,
lib,
modulesPath,
flake-inputs,
...
@ -21,6 +19,7 @@
./services/crowdsec.nix
./services/foundryvtt.nix
./services/gitea.nix
./services/immich.nix
./services/metrics
./services/nextcloud.nix
./services/webserver.nix
@ -31,13 +30,7 @@
./sops.nix
];
nixpkgs.overlays = [
(final: prev: {
local = import ../pkgs {
pkgs = prev;
};
})
];
nixpkgs.overlays = [ (_: prev: { local = import ../pkgs { pkgs = prev; }; }) ];
nix = {
extraOptions = ''

View file

@ -80,6 +80,17 @@
inherit mountOptions;
mountpoint = "/var";
};
"/volume/var/lib/private/matrix-conduit" = {
mountOptions = [
# Explicitly don't compress here, since
# conduwuit's database does compression by
# itself, and relies on being able to read the
# raw file data from disk (which is impossible
# if btrfs compresses it)
"noatime"
];
mountpoint = "/var/lib/private/matrix-conduit";
};
"/volume/nix-store" = {
inherit mountOptions;
mountpoint = "/nix";

View file

@ -6,26 +6,35 @@
boot.kernelParams = [ "nomodeset" ];
networking.hostName = "testvm";
# Sets the base domain for nginx to a local domain so that we can
# easily test locally with the VM.
services.nginx.domain = "dev.local";
services = {
# Sets the base domain for nginx to a local domain so that we can
# easily test locally with the VM.
nginx.domain = "dev.local";
# Don't run this
batteryManager.enable = lib.mkForce false;
openssh.hostKeys = lib.mkForce [
{
type = "rsa";
bits = 4096;
path = "/etc/staging.key";
}
];
};
# Use the staging secrets
sops.defaultSopsFile = lib.mkOverride 99 ../../keys/staging.yaml;
systemd.network.networks."10-eth0" = {
matchConfig.Name = "eth0";
gateway = [
"192.168.9.1"
];
gateway = [ "192.168.9.1" ];
networkConfig = {
Address = "192.168.9.2/24";
};
};
# Don't run this
services.batteryManager.enable = lib.mkForce false;
# Both so we have a predictable key for the staging env, as well as
# to have a static key for decrypting the sops secrets for the
# staging env.
@ -34,14 +43,6 @@
source = ../../keys/hosts/staging.key;
};
services.openssh.hostKeys = lib.mkForce [
{
type = "rsa";
bits = 4096;
path = "/etc/staging.key";
}
];
virtualisation.vmVariant = {
virtualisation = {
memorySize = 3941;

View file

@ -1,40 +1,49 @@
{ config, lib, ... }:
{
services.nginx = {
enable = true;
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
clientMaxBodySize = "10G";
services = {
nginx = {
enable = true;
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
clientMaxBodySize = "10G";
statusPage = true; # For metrics, should be accessible only from localhost
statusPage = true; # For metrics, should be accessible only from localhost
commonHttpConfig = ''
log_format upstream_time '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'rt=$request_time uct="$upstream_connect_time" '
'uht="$upstream_header_time" urt="$upstream_response_time"';
'';
};
commonHttpConfig = ''
log_format upstream_time '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'rt=$request_time uct="$upstream_connect_time" '
'uht="$upstream_header_time" urt="$upstream_response_time"';
'';
};
services.logrotate.settings =
{
# Override the default, just keep fewer logs
nginx.rotate = 6;
}
// lib.mapAttrs' (
virtualHost: _:
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
frequency = "daily";
rotate = 2;
compress = true;
delaycompress = true;
su = "${config.services.nginx.user} ${config.services.nginx.group}";
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
logrotate.settings =
{
# Override the default, just keep fewer logs
nginx.rotate = 6;
}
) config.services.nginx.virtualHosts;
// lib.mapAttrs' (
virtualHost: _:
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
frequency = "daily";
rotate = 2;
compress = true;
delaycompress = true;
su = "${config.services.nginx.user} ${config.services.nginx.group}";
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
}
) config.services.nginx.virtualHosts;
backups.acme = {
user = "acme";
paths = lib.mapAttrsToList (
virtualHost: _: "/var/lib/acme/${virtualHost}"
) config.services.nginx.virtualHosts;
};
};
systemd.tmpfiles.rules = lib.mapAttrsToList (
virtualHost: _:
@ -66,11 +75,4 @@
systemd.services.nginx.serviceConfig.SupplementaryGroups = [
config.security.acme.certs."tlater.net".group
];
services.backups.acme = {
user = "acme";
paths = lib.mapAttrsToList (
virtualHost: _: "/var/lib/acme/${virtualHost}"
) config.services.nginx.virtualHosts;
};
}

View file

@ -57,7 +57,7 @@ in
'';
type = types.attrsOf (
types.submodule (
{ config, name, ... }:
{ name, ... }:
{
options = {
user = lib.mkOption {
@ -246,7 +246,7 @@ in
};
}
// lib.mapAttrs' (
name: backup:
name: _:
lib.nameValuePair "backup-${name}" {
wantedBy = [ "timers.target" ];
timerConfig = {

View file

@ -1,4 +1,5 @@
{
pkgs,
config,
lib,
...
@ -16,159 +17,166 @@ in
./matrix-hookshot.nix
];
services.matrix-conduit = {
enable = true;
settings.global = {
address = "127.0.0.1";
server_name = domain;
database_backend = "rocksdb";
services = {
matrix-conduit = {
enable = true;
package = pkgs.matrix-continuwuity;
settings.global = {
address = "127.0.0.1";
server_name = domain;
new_user_displayname_suffix = "🦆";
allow_check_for_updates = true;
# Set up delegation: https://docs.conduit.rs/delegation.html#automatic-recommended
# This is primarily to make sliding sync work
well_known = {
client = "https://${domain}";
server = "${domain}:443";
# Set up delegation: https://docs.conduit.rs/delegation.html#automatic-recommended
# This is primarily to make sliding sync work
well_known = {
client = "https://${domain}";
server = "${domain}:443";
};
turn_uris =
let
address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
in
[
"turn:${address}?transport=udp"
"turn:${address}?transport=tcp"
"turns:${tls-address}?transport=udp"
"turns:${tls-address}?transport=tcp"
];
};
};
turn_uris =
let
address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
in
[
"turn:${address}?transport=udp"
"turn:${address}?transport=tcp"
"turns:${tls-address}?transport=udp"
"turns:${tls-address}?transport=tcp"
];
coturn = {
enable = true;
no-cli = true;
use-auth-secret = true;
static-auth-secret-file = config.sops.secrets."turn/secret".path;
realm = turn-realm;
relay-ips = [ "116.202.158.55" ];
# SSL config
pkey = "${config.security.acme.certs."tlater.net".directory}/key.pem";
cert = "${config.security.acme.certs."tlater.net".directory}/fullchain.pem";
# Based on suggestions from
# https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md
# and
# https://www.foxypossibilities.com/2018/05/19/setting-up-a-turn-sever-for-matrix-on-nixos/
no-tcp-relay = true;
secure-stun = true;
extraConfig = ''
# Deny various local IP ranges, see
# https://www.rtcsec.com/article/cve-2020-26262-bypass-of-coturns-access-control-protection/
no-multicast-peers
denied-peer-ip=0.0.0.0-0.255.255.255
denied-peer-ip=10.0.0.0-10.255.255.255
denied-peer-ip=100.64.0.0-100.127.255.255
denied-peer-ip=127.0.0.0-127.255.255.255
denied-peer-ip=169.254.0.0-169.254.255.255
denied-peer-ip=172.16.0.0-172.31.255.255
denied-peer-ip=192.0.0.0-192.0.0.255
denied-peer-ip=192.0.2.0-192.0.2.255
denied-peer-ip=192.88.99.0-192.88.99.255
denied-peer-ip=192.168.0.0-192.168.255.255
denied-peer-ip=198.18.0.0-198.19.255.255
denied-peer-ip=198.51.100.0-198.51.100.255
denied-peer-ip=203.0.113.0-203.0.113.255
denied-peer-ip=240.0.0.0-255.255.255.255 denied-peer-ip=::1
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
# *Allow* any IP addresses that we explicitly set as relay IPs
${concatMapStringsSep "\n" (ip: "allowed-peer-ip=${ip}") config.services.coturn.relay-ips}
# Various other security settings
no-tlsv1
no-tlsv1_1
# Monitoring
prometheus
'';
};
nginx.virtualHosts."${domain}" = {
useACMEHost = "tlater.net";
listen = [
{
addr = "0.0.0.0";
port = 80;
}
{
addr = "[::0]";
port = 80;
}
{
addr = "0.0.0.0";
port = 443;
ssl = true;
}
{
addr = "[::0]";
port = 443;
ssl = true;
}
{
addr = "0.0.0.0";
port = 8448;
ssl = true;
}
{
addr = "[::0]";
port = 8448;
ssl = true;
}
];
forceSSL = true;
enableHSTS = true;
extraConfig = ''
merge_slashes off;
'';
locations = {
"/_matrix" = {
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
# Recommended by conduit
extraConfig = ''
proxy_buffering off;
'';
};
"/.well-known/matrix" = {
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
};
};
};
backups.conduit = {
user = "root";
paths = [ "/var/lib/private/matrix-conduit/" ];
# Other services store their data in conduit, so no other services
# need to be shut down currently.
pauseServices = [ "conduit.service" ];
};
};
# Pass in the TURN secret via EnvironmentFile, not supported by
# upstream module currently.
#
# See also https://gitlab.com/famedly/conduit/-/issues/314
systemd.services.conduit.serviceConfig.EnvironmentFile = config.sops.secrets."turn/env".path;
systemd.services.conduit.serviceConfig = {
ExecStart = lib.mkForce "${config.services.matrix-conduit.package}/bin/conduwuit";
# Pass in the TURN secret via EnvironmentFile, not supported by
# upstream module currently.
#
# See also https://gitlab.com/famedly/conduit/-/issues/314
EnvironmentFile = config.sops.secrets."turn/env".path;
};
systemd.services.coturn.serviceConfig.SupplementaryGroups = [
config.security.acme.certs."tlater.net".group
];
services.coturn = {
enable = true;
no-cli = true;
use-auth-secret = true;
static-auth-secret-file = config.sops.secrets."turn/secret".path;
realm = turn-realm;
relay-ips = [ "116.202.158.55" ];
# SSL config
pkey = "${config.security.acme.certs."tlater.net".directory}/key.pem";
cert = "${config.security.acme.certs."tlater.net".directory}/fullchain.pem";
# Based on suggestions from
# https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md
# and
# https://www.foxypossibilities.com/2018/05/19/setting-up-a-turn-sever-for-matrix-on-nixos/
no-tcp-relay = true;
secure-stun = true;
extraConfig = ''
# Deny various local IP ranges, see
# https://www.rtcsec.com/article/cve-2020-26262-bypass-of-coturns-access-control-protection/
no-multicast-peers
denied-peer-ip=0.0.0.0-0.255.255.255
denied-peer-ip=10.0.0.0-10.255.255.255
denied-peer-ip=100.64.0.0-100.127.255.255
denied-peer-ip=127.0.0.0-127.255.255.255
denied-peer-ip=169.254.0.0-169.254.255.255
denied-peer-ip=172.16.0.0-172.31.255.255
denied-peer-ip=192.0.0.0-192.0.0.255
denied-peer-ip=192.0.2.0-192.0.2.255
denied-peer-ip=192.88.99.0-192.88.99.255
denied-peer-ip=192.168.0.0-192.168.255.255
denied-peer-ip=198.18.0.0-198.19.255.255
denied-peer-ip=198.51.100.0-198.51.100.255
denied-peer-ip=203.0.113.0-203.0.113.255
denied-peer-ip=240.0.0.0-255.255.255.255 denied-peer-ip=::1
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
# *Allow* any IP addresses that we explicitly set as relay IPs
${concatMapStringsSep "\n" (ip: "allowed-peer-ip=${ip}") config.services.coturn.relay-ips}
# Various other security settings
no-tlsv1
no-tlsv1_1
# Monitoring
prometheus
'';
};
services.nginx.virtualHosts."${domain}" = {
useACMEHost = "tlater.net";
listen = [
{
addr = "0.0.0.0";
port = 80;
}
{
addr = "[::0]";
port = 80;
}
{
addr = "0.0.0.0";
port = 443;
ssl = true;
}
{
addr = "[::0]";
port = 443;
ssl = true;
}
{
addr = "0.0.0.0";
port = 8448;
ssl = true;
}
{
addr = "[::0]";
port = 8448;
ssl = true;
}
];
forceSSL = true;
enableHSTS = true;
extraConfig = ''
merge_slashes off;
'';
locations = {
"/_matrix" = {
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
# Recommended by conduit
extraConfig = ''
proxy_buffering off;
'';
};
"/.well-known/matrix" = {
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
};
};
};
services.backups.conduit = {
user = "root";
paths = [ "/var/lib/private/matrix-conduit/" ];
# Other services store their data in conduit, so no other services
# need to be shut down currently.
pauseServices = [ "conduit.service" ];
};
}

View file

@ -29,16 +29,29 @@ let
};
# Encryption support
extraSettings = {
"de.sorunome.msc2409.push_ephemeral" = true;
push_ephemeral = true;
"org.matrix.msc3202" = true;
};
# TODO(tlater): Enable when
# https://github.com/matrix-org/matrix-hookshot/issues/1060 is
# fixed
# extraSettings = {
# "de.sorunome.msc2409.push_ephemeral" = true;
# push_ephemeral = true;
# "org.matrix.msc3202" = true;
# };
runtimeRegistration = "${cfg.registrationFile}";
};
in
{
# users = {
# users.matrix-hookshot = {
# home = "/run/matrix-hookshot";
# group = "matrix-hookshot";
# isSystemUser = true;
# };
# groups.matrix-hookshot = { };
# };
systemd.services.matrix-hookshot = {
serviceConfig = {
Type = lib.mkForce "exec";
@ -49,6 +62,7 @@ in
# Some library in matrix-hookshot wants a home directory
Environment = [ "HOME=/run/matrix-hookshot" ];
# User = "matrix-hookshot";
DynamicUser = true;
StateDirectory = "matrix-hookshot";
RuntimeDirectory = "matrix-hookshot";
@ -62,7 +76,11 @@ in
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [ "AF_INET AF_INET6" ];
RestrictAddressFamilies = [
# "AF_UNIX"
"AF_INET"
"AF_INET6"
];
LockPersonality = true;
RestrictRealtime = true;
ProtectProc = "invisible";
@ -71,12 +89,15 @@ in
};
};
# services.redis.servers.matrix-hookshot = {
# enable = true;
# user = "matrix-hookshot";
# };
services.matrix-hookshot = {
enable = true;
serviceDependencies = [
"conduit.service"
];
serviceDependencies = [ "conduit.service" ];
registrationFile = "/run/matrix-hookshot/registration.yaml";
@ -91,6 +112,8 @@ in
bot.displayname = "Hookshot";
# cache.redisUri = "redis://${config.services.redis.servers.matrix-hookshot.unixSocket}";
generic = {
enabled = true;
outbound = false;
@ -100,7 +123,10 @@ in
allowJsTransformationFunctions = true;
};
encryption.storagePath = "/var/lib/matrix-hookshot/cryptostore";
# TODO(tlater): Enable when
# https://github.com/matrix-org/matrix-hookshot/issues/1060 is
# fixed
# encryption.storagePath = "/var/lib/matrix-hookshot/cryptostore";
permissions = [
{
@ -126,19 +152,15 @@ in
listeners = [
{
port = 9000;
resources = [
"webhooks"
];
resources = [ "webhooks" ];
}
{
port = 9001;
resources = [
"metrics"
];
resources = [ "metrics" ];
}
];
metrics.enable = true;
metrics.enabled = true;
};
};
}

View file

@ -8,9 +8,7 @@
security.crowdsec = {
enable = true;
parserWhitelist = [
"10.45.249.2"
];
parserWhitelist = [ "10.45.249.2" ];
extraGroups = [
"systemd-journal"
@ -21,25 +19,19 @@
{
source = "journalctl";
labels.type = "syslog";
journalctl_filter = [
"SYSLOG_IDENTIFIER=Nextcloud"
];
journalctl_filter = [ "SYSLOG_IDENTIFIER=Nextcloud" ];
}
{
source = "journalctl";
labels.type = "syslog";
journalctl_filter = [
"SYSLOG_IDENTIFIER=sshd-session"
];
journalctl_filter = [ "SYSLOG_IDENTIFIER=sshd-session" ];
}
{
labels.type = "nginx";
filenames =
[
"/var/log/nginx/*.log"
]
[ "/var/log/nginx/*.log" ]
++ lib.mapAttrsToList (
vHost: _: "/var/log/nginx/${vHost}/access.log"
) config.services.nginx.virtualHosts;

View file

@ -11,37 +11,39 @@ in
{
imports = [ flake-inputs.foundryvtt.nixosModules.foundryvtt ];
services.foundryvtt = {
enable = true;
hostName = domain;
minifyStaticFiles = true;
proxySSL = true;
proxyPort = 443;
package = flake-inputs.foundryvtt.packages.${pkgs.system}.foundryvtt_11;
services = {
foundryvtt = {
enable = true;
hostName = domain;
minifyStaticFiles = true;
proxySSL = true;
proxyPort = 443;
package = flake-inputs.foundryvtt.packages.${pkgs.system}.foundryvtt_13;
};
nginx.virtualHosts."${domain}" =
let
inherit (config.services.foundryvtt) port;
in
{
forceSSL = true;
useACMEHost = "tlater.net";
enableHSTS = true;
locations."/" = {
proxyWebsockets = true;
proxyPass = "http://localhost:${toString port}";
};
};
backups.foundryvtt = {
user = "foundryvtt";
paths = [ config.services.foundryvtt.dataDir ];
pauseServices = [ "foundryvtt.service" ];
};
};
# Want to start it manually when I need it, not have it constantly
# running
systemd.services.foundryvtt.wantedBy = lib.mkForce [ ];
services.nginx.virtualHosts."${domain}" =
let
inherit (config.services.foundryvtt) port;
in
{
forceSSL = true;
useACMEHost = "tlater.net";
enableHSTS = true;
locations."/" = {
proxyWebsockets = true;
proxyPass = "http://localhost:${toString port}";
};
};
services.backups.foundryvtt = {
user = "foundryvtt";
paths = [ config.services.foundryvtt.dataDir ];
pauseServices = [ "foundryvtt.service" ];
};
}

View file

@ -8,24 +8,68 @@ let
domain = "gitea.${config.services.nginx.domain}";
in
{
services.forgejo = {
enable = true;
database.type = "postgres";
services = {
forgejo = {
enable = true;
database.type = "postgres";
settings = {
server = {
DOMAIN = domain;
HTTP_ADDR = "127.0.0.1";
ROOT_URL = "https://${domain}/";
SSH_PORT = 2222;
settings = {
server = {
DOMAIN = domain;
HTTP_ADDR = "127.0.0.1";
ROOT_URL = "https://${domain}/";
SSH_PORT = 2222;
};
metrics = {
ENABLED = true;
TOKEN = "#metricstoken#";
};
service.DISABLE_REGISTRATION = true;
session.COOKIE_SECURE = true;
};
};
# Set up SSL
nginx.virtualHosts."${domain}" =
let
httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
httpPort = config.services.forgejo.settings.server.HTTP_PORT;
in
{
forceSSL = true;
useACMEHost = "tlater.net";
enableHSTS = true;
locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
locations."/metrics" = {
extraConfig = ''
access_log off;
allow 127.0.0.1;
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
deny all;
'';
};
};
metrics = {
ENABLED = true;
TOKEN = "#metricstoken#";
backups.forgejo = {
user = "forgejo";
paths = [
"/var/lib/forgejo/forgejo-db.sql"
"/var/lib/forgejo/repositories/"
"/var/lib/forgejo/data/"
"/var/lib/forgejo/custom/"
# Conf is backed up via nix
];
preparation = {
packages = [ config.services.postgresql.package ];
text = "pg_dump ${config.services.forgejo.database.name} --file=/var/lib/forgejo/forgejo-db.sql";
};
service.DISABLE_REGISTRATION = true;
session.COOKIE_SECURE = true;
cleanup = {
packages = [ pkgs.coreutils ];
text = "rm /var/lib/forgejo/forgejo-db.sql";
};
pauseServices = [ "forgejo.service" ];
};
};
@ -36,46 +80,4 @@ in
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
in
[ "+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'" ];
# Set up SSL
services.nginx.virtualHosts."${domain}" =
let
httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
httpPort = config.services.forgejo.settings.server.HTTP_PORT;
in
{
forceSSL = true;
useACMEHost = "tlater.net";
enableHSTS = true;
locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
locations."/metrics" = {
extraConfig = ''
access_log off;
allow 127.0.0.1;
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
deny all;
'';
};
};
services.backups.forgejo = {
user = "forgejo";
paths = [
"/var/lib/forgejo/forgejo-db.sql"
"/var/lib/forgejo/repositories/"
"/var/lib/forgejo/data/"
"/var/lib/forgejo/custom/"
# Conf is backed up via nix
];
preparation = {
packages = [ config.services.postgresql.package ];
text = "pg_dump ${config.services.forgejo.database.name} --file=/var/lib/forgejo/forgejo-db.sql";
};
cleanup = {
packages = [ pkgs.coreutils ];
text = "rm /var/lib/forgejo/forgejo-db.sql";
};
pauseServices = [ "forgejo.service" ];
};
}

View file

@ -0,0 +1,67 @@
{
pkgs,
config,
lib,
...
}:
let
hostName = "immich.${config.services.nginx.domain}";
in
{
services = {
immich = {
enable = true;
settings.server.externalDomain = "https://${hostName}";
environment.IMMICH_TELEMETRY_INCLUDE = "all";
};
nginx.virtualHosts.${hostName} =
let
local = "http://${config.services.immich.host}:${toString config.services.immich.port}";
in
{
forceSSL = true;
useACMEHost = "tlater.net";
enableHSTS = true;
locations."/" = {
proxyPass = local;
proxyWebsockets = true;
};
locations."/metrics" = {
extraConfig = ''
access_log off;
allow 127.0.0.1;
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
deny all;
'';
};
};
backups.immich =
let
db-dump = "${config.services.immich.mediaLocation}/immich-db.sql";
in
{
user = "immich";
paths = [ config.services.immich.mediaLocation ];
preparation = {
packages = [ config.services.postgresql.package ];
text = ''
pg_dump ${config.services.immich.database.name} --clean --if-exists --file=${db-dump}
'';
};
cleanup = {
packages = [ pkgs.coreutils ];
text = "rm ${db-dump}";
};
pauseServices = [
"immich-server.service"
"immich-machine-learning.service"
];
};
};
}

View file

@ -74,7 +74,7 @@ in
listenAddress = "127.0.0.1";
group = "nginx";
settings.namespaces = lib.mapAttrsToList (name: virtualHost: {
settings.namespaces = lib.mapAttrsToList (name: _: {
inherit name;
metrics_override.prefix = "nginxlog";
namespace_label = "vhost";
@ -97,4 +97,6 @@ in
# - postgres (?)
# - ssl_exporter (?)
};
services.dbus.implementation = "broker";
}

View file

@ -1,9 +1,4 @@
{
pkgs,
config,
flake-inputs,
...
}:
{ pkgs, config, ... }:
let
domain = "metrics.${config.services.nginx.domain}";
in
@ -35,7 +30,7 @@ in
declarativePlugins = [
pkgs.grafanaPlugins.victoriametrics-metrics-datasource
flake-inputs.nixpkgs-unstable.legacyPackages.${pkgs.system}.grafanaPlugins.victoriametrics-logs-datasource
pkgs.grafanaPlugins.victoriametrics-logs-datasource
];
provision = {

View file

@ -38,7 +38,7 @@ in
services.victoriametrics.scrapeConfigs = mkOption {
type = types.attrsOf (
types.submodule (
{ name, self, ... }:
{ name, ... }:
{
options = {
job_name = mkOption {
@ -106,35 +106,37 @@ in
# module is an intractable mess
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig.Restart = mkDefault "always";
serviceConfig.PrivateTmp = mkDefault true;
serviceConfig.WorkingDirectory = mkDefault /tmp;
serviceConfig.DynamicUser = mkDefault true;
# Hardening
serviceConfig.CapabilityBoundingSet = mkDefault [ "" ];
serviceConfig.DeviceAllow = [ "" ];
serviceConfig.LockPersonality = true;
serviceConfig.MemoryDenyWriteExecute = true;
serviceConfig.NoNewPrivileges = true;
serviceConfig.PrivateDevices = mkDefault true;
serviceConfig.ProtectClock = mkDefault true;
serviceConfig.ProtectControlGroups = true;
serviceConfig.ProtectHome = true;
serviceConfig.ProtectHostname = true;
serviceConfig.ProtectKernelLogs = true;
serviceConfig.ProtectKernelModules = true;
serviceConfig.ProtectKernelTunables = true;
serviceConfig.ProtectSystem = mkDefault "strict";
serviceConfig.RemoveIPC = true;
serviceConfig.RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
serviceConfig.RestrictNamespaces = true;
serviceConfig.RestrictRealtime = true;
serviceConfig.RestrictSUIDSGID = true;
serviceConfig.SystemCallArchitectures = "native";
serviceConfig.UMask = "0077";
serviceConfig = {
Restart = mkDefault "always";
PrivateTmp = mkDefault true;
WorkingDirectory = mkDefault /tmp;
DynamicUser = mkDefault true;
# Hardening
CapabilityBoundingSet = mkDefault [ "" ];
DeviceAllow = [ "" ];
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = mkDefault true;
ProtectClock = mkDefault true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectSystem = mkDefault "strict";
RemoveIPC = true;
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
UMask = "0077";
};
}
exporter.serviceOpts
]
@ -144,7 +146,7 @@ in
{
vmagent-scrape-exporters =
let
listenAddress = config.services.victoriametrics.listenAddress;
inherit (config.services.victoriametrics) listenAddress;
vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
promscrape = yaml.generate "prometheus.yml" {
scrape_configs = lib.mapAttrsToList (
@ -153,7 +155,7 @@ in
inherit (scrape) job_name;
static_configs =
scrape.static_configs
++ lib.optional (scrape.targets != [ ]) { targets = scrape.targets; };
++ lib.optional (scrape.targets != [ ]) { inherit (scrape) targets; };
} scrape.extraSettings
) config.services.victoriametrics.scrapeConfigs;
};
@ -212,7 +214,7 @@ in
services.victoriametrics.scrapeConfigs =
let
allExporters = lib.mapAttrs (name: exporter: { inherit (exporter) listenAddress port; }) (
allExporters = lib.mapAttrs (_: exporter: { inherit (exporter) listenAddress port; }) (
(lib.filterAttrs (
name: exporter:
# A bunch of deprecated exporters that need to be ignored

View file

@ -1,37 +1,22 @@
{
config,
pkgs,
lib,
...
}:
{ config, lib, ... }:
let
cfg = config.services.victorialogs;
pkg = pkgs.victoriametrics;
dirname = "victorialogs";
in
{
options.services.victorialogs =
let
inherit (lib.types) str;
in
{
listenAddress = lib.mkOption {
default = ":9428";
type = str;
};
bindAddress = lib.mkOption {
readOnly = true;
type = str;
description = ''
Final address on which victorialogs listens.
'';
};
};
options.services.victorialogs.bindAddress = lib.mkOption {
readOnly = true;
type = lib.types.str;
description = ''
Final address on which victorialogs listens.
'';
};
config = {
services.victorialogs.bindAddress =
(lib.optionalString (lib.hasPrefix ":" cfg.listenAddress) "127.0.0.1") + cfg.listenAddress;
services.victorialogs = {
enable = true;
bindAddress =
(lib.optionalString (lib.hasPrefix ":" cfg.listenAddress) "127.0.0.1") + cfg.listenAddress;
};
services.journald.upload = {
enable = true;
@ -40,71 +25,6 @@ in
NetworkTimeoutSec = "20s";
};
};
systemd.services."systemd-journal-upload".after = [ "victorialogs.service" ];
systemd.services.victorialogs = {
description = "VictoriaLogs log database";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
startLimitBurst = 5;
serviceConfig = {
ExecStart = lib.escapeShellArgs [
"${pkg}/bin/victoria-logs"
"-storageDataPath=/var/lib/${dirname}"
"-httpListenAddr=${cfg.listenAddress}"
];
DynamicUser = true;
RestartSec = 1;
Restart = "on-failure";
RuntimeDirectory = dirname;
RuntimeDirectoryMode = "0700";
StateDirectory = dirname;
StateDirectoryMode = "0700";
LimitNOFILE = 1048576;
# Hardening
DeviceAllow = [ "/dev/null rw" ];
DevicePolicy = "strict";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateTmp = true;
PrivateUsers = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "full";
RemoveIPC = true;
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_UNIX"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"~@privileged"
];
};
postStart = lib.mkBefore ''
until ${lib.getBin pkgs.curl}/bin/curl -s -o /dev/null http://${cfg.bindAddress}/ping; do
sleep 1;
done
'';
};
};
}

View file

@ -84,9 +84,16 @@ in
in
[ "${address}:${toString port}" ];
immich.targets = [
"127.0.0.1:8081"
"127.0.0.1:8082"
];
# Configured in the hookshot listeners, but it's hard to filter
# the correct values out of that config.
matrixHookshot.targets = [ "127.0.0.1:9001" ];
victorialogs.targets = [ config.services.victorialogs.bindAddress ];
};
};
}

View file

@ -5,97 +5,99 @@
...
}:
let
nextcloud = pkgs.nextcloud30;
nextcloud = pkgs.nextcloud31;
hostName = "nextcloud.${config.services.nginx.domain}";
in
{
services.nextcloud = {
inherit hostName;
services = {
nextcloud = {
inherit hostName;
package = nextcloud;
phpPackage = lib.mkForce (
pkgs.php.override {
packageOverrides = final: prev: {
extensions = prev.extensions // {
pgsql = prev.extensions.pgsql.overrideAttrs (old: {
configureFlags = [ "--with-pgsql=${lib.getDev config.services.postgresql.package}" ];
});
pdo_pgsql = prev.extensions.pdo_pgsql.overrideAttrs (old: {
configureFlags = [ "--with-pdo-pgsql=${lib.getDev config.services.postgresql.package}" ];
});
package = nextcloud;
phpPackage = lib.mkForce (
pkgs.php.override {
packageOverrides = _: prev: {
extensions = prev.extensions // {
pgsql = prev.extensions.pgsql.overrideAttrs (_: {
configureFlags = [ "--with-pgsql=${lib.getDev config.services.postgresql.package.pg_config}" ];
});
pdo_pgsql = prev.extensions.pdo_pgsql.overrideAttrs (_: {
configureFlags = [ "--with-pdo-pgsql=${lib.getDev config.services.postgresql.package.pg_config}" ];
});
};
};
};
}
);
enable = true;
maxUploadSize = "2G";
https = true;
}
);
enable = true;
maxUploadSize = "2G";
https = true;
configureRedis = true;
configureRedis = true;
config = {
dbtype = "pgsql";
dbhost = "/run/postgresql";
config = {
dbtype = "pgsql";
dbhost = "/run/postgresql";
adminuser = "tlater";
adminpassFile = config.sops.secrets."nextcloud/tlater".path;
adminuser = "tlater";
adminpassFile = config.sops.secrets."nextcloud/tlater".path;
};
settings = {
default_phone_region = "AT";
overwriteprotocol = "https";
};
phpOptions = {
"opcache.interned_strings_buffer" = "16";
};
extraApps = {
inherit (config.services.nextcloud.package.packages.apps)
calendar
contacts
cookbook
news
;
};
};
settings = {
default_phone_region = "AT";
overwriteprotocol = "https";
# Set up SSL
nginx.virtualHosts."${hostName}" = {
forceSSL = true;
useACMEHost = "tlater.net";
# The upstream module already adds HSTS
};
phpOptions = {
"opcache.interned_strings_buffer" = "16";
};
extraApps = {
inherit (config.services.nextcloud.package.packages.apps)
calendar
contacts
cookbook
news
;
backups.nextcloud = {
user = "nextcloud";
paths = [
"/var/lib/nextcloud/nextcloud-db.sql"
"/var/lib/nextcloud/data/"
"/var/lib/nextcloud/config/config.php"
];
preparation = {
packages = [
config.services.postgresql.package
config.services.nextcloud.occ
];
text = ''
nextcloud-occ maintenance:mode --on
pg_dump ${config.services.nextcloud.config.dbname} --file=/var/lib/nextcloud/nextcloud-db.sql
'';
};
cleanup = {
packages = [
pkgs.coreutils
config.services.nextcloud.occ
];
text = ''
rm /var/lib/nextcloud/nextcloud-db.sql
nextcloud-occ maintenance:mode --off
'';
};
};
};
# Ensure that this service doesn't start before postgres is ready
systemd.services.nextcloud-setup.after = [ "postgresql.service" ];
# Set up SSL
services.nginx.virtualHosts."${hostName}" = {
forceSSL = true;
useACMEHost = "tlater.net";
# The upstream module already adds HSTS
};
services.backups.nextcloud = {
user = "nextcloud";
paths = [
"/var/lib/nextcloud/nextcloud-db.sql"
"/var/lib/nextcloud/data/"
"/var/lib/nextcloud/config/config.php"
];
preparation = {
packages = [
config.services.postgresql.package
config.services.nextcloud.occ
];
text = ''
nextcloud-occ maintenance:mode --on
pg_dump ${config.services.nextcloud.config.dbname} --file=/var/lib/nextcloud/nextcloud-db.sql
'';
};
cleanup = {
packages = [
pkgs.coreutils
config.services.nextcloud.occ
];
text = ''
rm /var/lib/nextcloud/nextcloud-db.sql
nextcloud-occ maintenance:mode --off
'';
};
};
}

View file

@ -1,6 +1,6 @@
{ config, ... }:
let
domain = config.services.nginx.domain;
inherit (config.services.nginx) domain;
in
{
services.tlaternet-webserver = {

43
flake.lock generated
View file

@ -300,11 +300,11 @@
]
},
"locked": {
"lastModified": 1739841949,
"narHash": "sha256-lSOXdgW/1zi/SSu7xp71v+55D5Egz8ACv0STkj7fhbs=",
"lastModified": 1747742835,
"narHash": "sha256-kYL4GCwwznsypvsnA20oyvW8zB/Dvn6K5G/tgMjVMT4=",
"owner": "nix-community",
"repo": "disko",
"rev": "15dbf8cebd8e2655a883b74547108e089f051bf0",
"rev": "df522e787fdffc4f32ed3e1fca9ed0968a384d62",
"type": "github"
},
"original": {
@ -595,11 +595,11 @@
]
},
"locked": {
"lastModified": 1739712626,
"narHash": "sha256-u3m+awbdL+0BKk8IWidsWMr+R0ian3GZMUlH7623kd8=",
"lastModified": 1746877938,
"narHash": "sha256-N9J96pSPg4vbozV+ZZ++dwLnMIf2Le6ONNMO0kZCj1M=",
"owner": "reckenrode",
"repo": "nix-foundryvtt",
"rev": "a7fa493ba2c623cf90e83756b62285b3b58f18d2",
"rev": "f1b401831d796dd94cf5a11b65fd169a199d4ff0",
"type": "github"
},
"original": {
@ -744,34 +744,18 @@
"type": "github"
}
},
"nixpkgs-unstable": {
"locked": {
"lastModified": 1740215764,
"narHash": "sha256-wzBbGGZ6i1VVBA/cDJaLfuuGYCUriD7fwsLgJJHRVRk=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "8465e233b0668cf162c608a92e62e8d78c1ba7e4",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable-small",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1740162160,
"narHash": "sha256-SSYxFhqCOb3aiPb6MmN68yEzBIltfom8IgRz7phHscM=",
"lastModified": 1748085680,
"narHash": "sha256-XG90Q/040NiV70gAVvoYbXg1lULbiwIzfkWmwSINyGQ=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "11415c7ae8539d6292f2928317ee7a8410b28bb9",
"rev": "4e6eeca5ed45465087274fc9dc6bc2011254a0f3",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-24.11-small",
"ref": "nixos-25.05-small",
"repo": "nixpkgs",
"type": "github"
}
@ -1022,7 +1006,6 @@
"disko": "disko",
"foundryvtt": "foundryvtt",
"nixpkgs": "nixpkgs_2",
"nixpkgs-unstable": "nixpkgs-unstable",
"sonnenshift": "sonnenshift",
"sops-nix": "sops-nix",
"tlaternet-webserver": "tlaternet-webserver"
@ -1096,11 +1079,11 @@
]
},
"locked": {
"lastModified": 1739262228,
"narHash": "sha256-7JAGezJ0Dn5qIyA2+T4Dt/xQgAbhCglh6lzCekTVMeU=",
"lastModified": 1747603214,
"narHash": "sha256-lAblXm0VwifYCJ/ILPXJwlz0qNY07DDYdLD+9H+Wc8o=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "07af005bb7d60c7f118d9d9f5530485da5d1e975",
"rev": "8d215e1c981be3aa37e47aeabd4e61bb069548fd",
"type": "github"
},
"original": {

View file

@ -2,8 +2,7 @@
description = "tlater.net host configuration";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11-small";
nixpkgs-unstable.url = "github:nixos/nixpkgs/nixos-unstable-small";
nixpkgs.url = "github:nixos/nixpkgs/nixos-25.05-small";
disko = {
url = "github:nix-community/disko";
inputs.nixpkgs.follows = "nixpkgs";
@ -92,7 +91,7 @@
#########
# Tests #
#########
checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) deploy-rs.lib;
checks.${system} = import ./checks (inputs // { inherit system; });
###########################
# Garbage collection root #
@ -117,8 +116,6 @@
run-vm = {
type = "app";
program =
let
in
(pkgs.writeShellScript "" ''
${vm.config.system.build.vm.outPath}/bin/run-testvm-vm
'').outPath;

View file

@ -247,10 +247,7 @@ in
online_client = {
# By default, we don't let crowdsec phone home, since
# this is usually within NixOS users' concerns.
#
# TODO: Enable when this option becomes available
# (1.6.4, current nixpkgs-unstable)
# sharing = lib.mkDefault false;
sharing = lib.mkDefault false;
credentials_path = cfg.centralApiCredentials;
};
};
@ -267,9 +264,7 @@ in
};
};
systemd.packages = [
cfg.package
];
systemd.packages = [ cfg.package ];
environment = {
systemPackages = [

View file

@ -6,10 +6,11 @@
...
}:
let
inherit (flake-inputs.self.packages.${pkgs.system}) crowdsec-firewall-bouncer;
crowdsecCfg = config.security.crowdsec;
cfg = crowdsecCfg.remediationComponents.firewallBouncer;
settingsFormat = pkgs.formats.yaml { };
crowdsec-firewall-bouncer = flake-inputs.self.packages.${pkgs.system}.crowdsec-firewall-bouncer;
in
{
options.security.crowdsec.remediationComponents.firewallBouncer = {
@ -31,9 +32,7 @@ in
security.crowdsec.remediationComponents.firewallBouncer.settings = {
mode = lib.mkDefault "${if config.networking.nftables.enable then "nftables" else "iptables"}";
log_mode = "stdout";
iptables_chains = [
"nixos-fw"
];
iptables_chains = [ "nixos-fw" ];
# Don't let users easily override this; unfortunately we need to
# set up this key through substitution at runtime.
@ -78,9 +77,7 @@ in
requiredBy = [ "crowdsec.service" ];
path =
lib.optionals (cfg.settings.mode == "ipset" || cfg.settings.mode == "iptables") [
pkgs.ipset
]
lib.optionals (cfg.settings.mode == "ipset" || cfg.settings.mode == "iptables") [ pkgs.ipset ]
++ lib.optional (cfg.settings.mode == "iptables") pkgs.iptables
++ lib.optional (cfg.settings.mode == "nftables") pkgs.nftables;
};

View file

@ -1,5 +1 @@
{
imports = [
./cs-firewall-bouncer.nix
];
}
{ imports = [ ./cs-firewall-bouncer.nix ]; }

View file

@ -21,7 +21,7 @@
},
"crowdsec-hub": {
"cargoLocks": null,
"date": "2025-02-22",
"date": "2025-05-17",
"extract": null,
"name": "crowdsec-hub",
"passthru": null,
@ -33,10 +33,10 @@
"name": null,
"owner": "crowdsecurity",
"repo": "hub",
"rev": "f9883cd6c7d1913c13e4a3a69d9a0b887a7d57df",
"sha256": "sha256-45pUln7Qj5luY9I9BE2qhzjH7kv4IbYvNoEX3/4AVVg=",
"rev": "850614b9fcd4298f559b422c5ac685a69aa2e5ff",
"sha256": "sha256-96MMwFN5KongQA3YJVSuk7Kanbr1gR94CCyiflmez2k=",
"type": "github"
},
"version": "f9883cd6c7d1913c13e4a3a69d9a0b887a7d57df"
"version": "850614b9fcd4298f559b422c5ac685a69aa2e5ff"
}
}

View file

@ -14,14 +14,14 @@
};
crowdsec-hub = {
pname = "crowdsec-hub";
version = "f9883cd6c7d1913c13e4a3a69d9a0b887a7d57df";
version = "850614b9fcd4298f559b422c5ac685a69aa2e5ff";
src = fetchFromGitHub {
owner = "crowdsecurity";
repo = "hub";
rev = "f9883cd6c7d1913c13e4a3a69d9a0b887a7d57df";
rev = "850614b9fcd4298f559b422c5ac685a69aa2e5ff";
fetchSubmodules = false;
sha256 = "sha256-45pUln7Qj5luY9I9BE2qhzjH7kv4IbYvNoEX3/4AVVg=";
sha256 = "sha256-96MMwFN5KongQA3YJVSuk7Kanbr1gR94CCyiflmez2k=";
};
date = "2025-02-22";
date = "2025-05-17";
};
}

View file

@ -1,4 +1 @@
{
sources,
}:
sources.crowdsec-hub.src
{ sources }: sources.crowdsec-hub.src