Compare commits

..

3 commits

Author SHA1 Message Date
d482b7ab3a
WIP: feat(webserver): Reimplement music player 2025-11-30 00:27:04 +08:00
17ff62f0b9
flake.lock: Update
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/af087d076d3860760b3323f6b583f4d828c1ac17' (2025-11-04)
  → 'github:nix-community/disko/2055a08fd0e2fd41318279a5355eb8a161accf26' (2025-11-28)
• Updated input 'nixpkgs':
    'https://releases.nixos.org/nixos/25.05/nixos-25.05.813095.1c8ba8d3f763/nixexprs.tar.xz?lastModified=1763948260&narHash=sha256-zZk7fn2ARAqmLwaYTpxBJmj81KIdz11NiWt7ydHHD/M%3D&rev=1c8ba8d3f7634acac4a2094eef7c32ad9106532c' (2025-11-24)
  → 'https://releases.nixos.org/nixos/25.05/nixos-25.05.813221.9a7b80b6f82a/nixexprs.tar.xz?lastModified=1764316264&narHash=sha256-UcoE0ISg9Nnzx/2n7VvQl3fRsLg%2BDcVa/ZGf/DZNHbs%3D&rev=9a7b80b6f82a71ea04270d7ba11b48855681c4b0' (2025-11-28)
• Updated input 'nixpkgs-unstable':
    'https://releases.nixos.org/nixos/unstable/nixos-25.11pre900642.050e09e09111/nixexprs.tar.xz?lastModified=1763835633&narHash=sha256-nzRnw0UkYQpDm0o20AKvG/5oHCXy5qEGOsFAVhB5NmA%3D&rev=050e09e091117c3d7328c7b2b7b577492c43c134' (2025-11-22)
  → 'https://releases.nixos.org/nixos/unstable/nixos-26.05pre903292.2fad6eac6077/nixexprs.tar.xz?lastModified=1764242076&narHash=sha256-6/1EG2fiKvLoUJ8FD7ymRx87e4zcfJTzAdUYgo4CDLA%3D&rev=2fad6eac6077f03fe109c4d4eb171cf96791faa4' (2025-11-27)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/877bb495a6f8faf0d89fc10bd142c4b7ed2bcc0b' (2025-11-20)
  → 'github:Mic92/sops-nix/c482a1c1bbe030be6688ed7dc84f7213f304f1ec' (2025-11-24)
2025-11-30 00:27:04 +08:00
823caecc59
feat(vm): Fix shutdown 2025-11-30 00:26:50 +08:00
44 changed files with 2198 additions and 1154 deletions

View file

@ -1,82 +1,47 @@
{ self, ... }:
{ flake-inputs }:
let
inherit (flake-inputs.nixpkgs) lib;
pkgs = flake-inputs.nixpkgs.legacyPackages.x86_64-linux;
checkLib = pkgs.callPackage ./lib.nix { };
in
{
perSystem =
x86_64-linux = lib.mergeAttrsList [
flake-inputs.self.nixosConfigurations.hetzner-1.config.serviceTests
{
inputs',
lib,
pkgs,
...
}:
let
mkLint =
{
name,
fileset,
checkInputs ? [ ],
script,
}:
pkgs.stdenvNoCC.mkDerivation {
inherit name;
nix = checkLib.mkLint {
name = "nix-lints";
fileset = lib.fileset.fileFilter (file: file.hasExt "nix") ../.;
src = lib.fileset.toSource {
root = ../.;
fileset = lib.fileset.difference fileset (
lib.fileset.fileFilter (
file: file.type != "regular" || file.name == "hardware-configuration.nix"
) ../.
);
};
checkInputs = lib.attrValues {
inherit (pkgs) deadnix nixfmt-rfc-style;
checkInputs = [ pkgs.nushell ] ++ checkInputs;
checkPhase = ''
nu -c '${script}' | tee $out
'';
dontPatch = true;
dontConfigure = true;
dontBuild = true;
dontInstall = true;
dontFixup = true;
doCheck = true;
};
in
{
checks = {
nix = mkLint {
name = "nix-lints";
fileset = lib.fileset.fileFilter (file: file.hasExt "nix") ../.;
checkInputs = lib.attrValues {
inherit (pkgs) deadnix nixfmt-rfc-style;
statix = pkgs.statix.overrideAttrs (old: {
patches = old.patches ++ [
(pkgs.fetchpatch {
url = "https://github.com/oppiliappan/statix/commit/925dec39bb705acbbe77178b4d658fe1b752abbb.patch";
hash = "sha256-0wacO6wuYJ4ufN9PGucRVJucFdFFNF+NoHYIrLXsCWs=";
})
];
});
};
script = /* bash */ ''
statix check **/*.nix
deadnix --fail **/*.nix
nixfmt --check --strict **/*.nix
'';
statix = pkgs.statix.overrideAttrs (old: {
patches = old.patches ++ [
(pkgs.fetchpatch {
url = "https://github.com/oppiliappan/statix/commit/925dec39bb705acbbe77178b4d658fe1b752abbb.patch";
hash = "sha256-0wacO6wuYJ4ufN9PGucRVJucFdFFNF+NoHYIrLXsCWs=";
})
];
});
};
lockfile = mkLint {
name = "nix-lockfile";
fileset = ../flake.lock;
checkInputs = lib.attrValues { inherit (inputs'.flint.packages) flint; };
script = ''
statix check **/*.nix
deadnix --fail **/*.nix
nixfmt --check --strict **/*.nix
'';
};
script = /* bash */ ''
flint --fail-if-multiple-versions
'';
};
}
// self.nixosConfigurations.hetzner-1.config.serviceTests;
};
lockfile = checkLib.mkLint {
name = "nix-lockfile";
fileset = ../flake.lock;
checkInputs = lib.attrValues { inherit (flake-inputs.flint.packages.x86_64-linux) flint; };
script = ''
flint --fail-if-multiple-versions
'';
};
}
];
}

35
checks/lib.nix Normal file
View file

@ -0,0 +1,35 @@
{ pkgs, lib, ... }:
{
mkLint =
{
name,
fileset,
checkInputs ? [ ],
script,
}:
pkgs.stdenvNoCC.mkDerivation {
inherit name;
src = lib.fileset.toSource {
root = ../.;
fileset = lib.fileset.difference fileset (
lib.fileset.fileFilter (
file: file.type != "regular" || file.name == "hardware-configuration.nix"
) ../.
);
};
checkInputs = [ pkgs.nushell ] ++ checkInputs;
checkPhase = ''
nu -c '${script}' | tee $out
'';
dontPatch = true;
dontConfigure = true;
dontBuild = true;
dontInstall = true;
dontFixup = true;
doCheck = true;
};
}

View file

@ -1,5 +1,4 @@
{
pkgs,
lib,
modulesPath,
flake-inputs,
@ -54,19 +53,6 @@
};
logrotate.enable = true;
postgresql = {
package = pkgs.postgresql_14;
enable = true;
# Only enable connections via the unix socket, and check with the
# OS to make sure the user matches the database name.
#
# See https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
authentication = ''
local sameuser all peer
'';
};
};
security = {

View file

@ -2,7 +2,6 @@
imports = [
./hardware-configuration.nix
./disko.nix
./vm.nix
];
# Intel's special encrypted memory<->CPU feature. Hetzner's BIOS

View file

@ -80,7 +80,7 @@
inherit mountOptions;
mountpoint = "/var";
};
"/volume/var/lib/private/continuwuity" = {
"/volume/var/lib/private/matrix-conduit" = {
mountOptions = [
# Explicitly don't compress here, since
# conduwuit's database does compression by
@ -89,7 +89,7 @@
# if btrfs compresses it)
"noatime"
];
mountpoint = "/var/lib/private/continuwuity";
mountpoint = "/var/lib/private/matrix-conduit";
};
"/volume/nix-store" = {
inherit mountOptions;

View file

@ -1,70 +0,0 @@
{ lib, ... }:
{
virtualisation.vmVariant = {
users.users.tlater.password = "insecure";
# Disable graphical tty so -curses works
boot.kernelParams = [ "nomodeset" ];
networking.hostName = lib.mkForce "testvm";
services = {
# Sets the base domain for nginx to a local domain so that we can
# easily test locally with the VM.
nginx.domain = lib.mkForce "dev.local";
# Don't run this
batteryManager.enable = lib.mkForce false;
btrfs.autoScrub.enable = lib.mkForce false;
openssh.hostKeys = lib.mkForce [
{
type = "rsa";
bits = 4096;
path = "/etc/staging.key";
}
];
};
# Use the staging secrets
sops.defaultSopsFile = lib.mkOverride 99 ../../../keys/staging.yaml;
systemd.network.networks."10-eth0" = {
matchConfig.Name = "eth0";
gateway = [ "192.168.9.1" ];
networkConfig = {
Address = "192.168.9.2/24";
};
};
# Both so we have a predictable key for the staging env, as well as
# to have a static key for decrypting the sops secrets for the
# staging env.
environment.etc."staging.key" = {
mode = "0400";
source = ../../../keys/hosts/staging.key;
};
# Pretend the acme renew succeeds.
#
# TODO(tlater): Set up pebble to retrieve certs "properly"
# instead
systemd.services."acme-order-renew-tlater.net".script = ''
touch out/acme-success
'';
virtualisation = {
memorySize = 3941;
cores = 2;
graphics = false;
diskSize = 1024 * 20;
qemu = {
networkingOptions = lib.mkForce [
"-device virtio-net,netdev=n1"
"-netdev bridge,id=n1,br=br0,helper=$(which qemu-bridge-helper)"
];
};
};
};
}

View file

@ -0,0 +1,63 @@
{ lib, ... }:
{
users.users.tlater.password = "insecure";
# Disable graphical tty so -curses works
boot.kernelParams = [ "nomodeset" ];
networking.hostName = "testvm";
systemd.services.matrix-hookshot.enable = lib.mkForce false;
services = {
# Sets the base domain for nginx to a local domain so that we can
# easily test locally with the VM.
nginx.domain = "dev.local";
# Don't run this
batteryManager.enable = lib.mkForce false;
openssh.hostKeys = lib.mkForce [
{
type = "rsa";
bits = 4096;
path = "/etc/staging.key";
}
];
};
# Use the staging secrets
sops.defaultSopsFile = lib.mkOverride 99 ../../keys/staging.yaml;
systemd.network.networks."10-eth0" = {
matchConfig.Name = "eth0";
gateway = [ "192.168.9.1" ];
networkConfig = {
Address = "192.168.9.2/24";
};
};
# Both so we have a predictable key for the staging env, as well as
# to have a static key for decrypting the sops secrets for the
# staging env.
environment.etc."staging.key" = {
mode = "0400";
source = ../../keys/hosts/staging.key;
};
virtualisation.vmVariant = {
virtualisation = {
memorySize = 3941;
cores = 2;
graphics = false;
diskSize = 1024 * 20;
};
virtualisation.qemu = {
networkingOptions = lib.mkForce [
"-device virtio-net,netdev=n1"
"-netdev bridge,id=n1,br=br0,helper=$(which qemu-bridge-helper)"
];
};
};
}

View file

@ -51,9 +51,20 @@
paths = [ "/var/lib/acme/tlater.net" ];
};
systemd.services.nginx.serviceConfig.SupplementaryGroups = [
config.security.acme.certs."tlater.net".group
];
systemd.services = {
nginx.serviceConfig.SupplementaryGroups = [ config.security.acme.certs."tlater.net".group ];
# Don't attempt to retrieve a certificate if the domain name
# doesn't *actually* match the cert name
#
# TODO(tlater): Set up pebble to retrieve certs "properly"
# instead
"acme-tlater.net".serviceConfig.ExecCondition =
let
confirm = ''[[ "tlater.net" = "${config.services.nginx.domain}" ]]'';
in
''${pkgs.runtimeShell} -c '${confirm}' '';
};
sops.secrets = {
"porkbun/api-key".owner = "acme";
@ -74,18 +85,10 @@
security.acme.certs."tlater.net".extraDomainNames = [ config.services.nginx.domain ];
# Pretend the acme renew succeeds.
#
# TODO(tlater): Set up pebble to retrieve certs "properly"
# instead
systemd.services."acme-order-renew-tlater.net".script = ''
touch out/acme-success
'';
services.nginx = {
domain = "testHost.test";
domain = "testHost";
virtualHosts."${config.services.nginx.domain}.local" = {
virtualHosts."${config.services.nginx.domain}" = {
useACMEHost = "tlater.net";
onlySSL = true;
enableHSTS = true;
@ -106,7 +109,6 @@
{ pkgs, ... }:
{
environment.systemPackages = [ pkgs.curl ];
networking.hosts."192.168.1.2" = [ "testHost.test" ];
};
};
@ -123,7 +125,7 @@
"--silent",
"--dump-header -",
"--cacert /certs/tlater.net/fullchain.pem",
"https://testHost.test",
"https://testHost",
"-o /dev/null"
]))

View file

@ -7,12 +7,15 @@
let
inherit (lib.strings) concatMapStringsSep;
cfg = config.services.matrix-continuwuity;
cfg = config.services.matrix-conduit;
domain = "matrix.${config.services.nginx.domain}";
turn-realm = "turn.${config.services.nginx.domain}";
in
{
imports = [ ./heisenbridge.nix ];
imports = [
./heisenbridge.nix
./matrix-hookshot.nix
];
networking.firewall = {
allowedTCPPorts = [
@ -45,14 +48,14 @@ in
};
services = {
matrix-continuwuity = {
matrix-conduit = {
enable = true;
package = pkgs.matrix-continuwuity;
settings.global = {
address = [ "127.0.0.1" ];
address = "127.0.0.1";
server_name = domain;
new_user_displayname_suffix = "🦆";
turn_secret_file = "/run/credentials/continuwuity.service/turn-secret";
allow_check_for_updates = true;
# Set up delegation: https://docs.conduit.rs/delegation.html#automatic-recommended
# This is primarily to make sliding sync work
@ -173,30 +176,35 @@ in
locations = {
"/_matrix" = {
proxyPass = "http://${lib.head cfg.settings.global.address}:${toString cfg.settings.global.port}";
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
# Recommended by conduit
extraConfig = ''
proxy_buffering off;
'';
};
"/.well-known/matrix" = {
proxyPass = "http://${lib.head cfg.settings.global.address}:${toString cfg.settings.global.port}";
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
};
};
};
backups.conduit = {
user = "root";
paths = [ "/var/lib/private/matrix-continuwuity/" ];
paths = [ "/var/lib/private/matrix-conduit/" ];
# Other services store their data in conduit, so no other services
# need to be shut down currently.
pauseServices = [ "continuwuity.service" ];
pauseServices = [ "conduit.service" ];
};
};
systemd.services.continuwuity.serviceConfig.LoadCredential = "turn-secret:${
config.sops.secrets."turn/env".path
}";
systemd.services.conduit.serviceConfig = {
ExecStart = lib.mkForce "${config.services.matrix-conduit.package}/bin/conduwuit";
# Pass in the TURN secret via EnvironmentFile, not supported by
# upstream module currently.
#
# See also https://gitlab.com/famedly/conduit/-/issues/314
EnvironmentFile = config.sops.secrets."turn/env".path;
};
systemd.services.coturn.serviceConfig.SupplementaryGroups = [
config.security.acme.certs."tlater.net".group

View file

@ -5,7 +5,7 @@
...
}:
let
conduitCfg = config.services.matrix-continuwuity;
conduitCfg = config.services.matrix-conduit;
matrixLib = pkgs.callPackage ./lib.nix { };
in
{
@ -36,7 +36,7 @@ in
{
description = "Matrix<->IRC bridge";
wantedBy = [ "multi-user.target" ];
after = [ "continuwuity.service" ];
after = [ "conduit.service" ];
serviceConfig = {
Type = "exec";

View file

@ -0,0 +1,172 @@
{
pkgs,
lib,
config,
...
}:
let
matrixLib = pkgs.callPackage ./lib.nix { };
cfg = config.services.matrix-hookshot;
conduitCfg = config.services.matrix-conduit;
domain = conduitCfg.settings.global.server_name;
registration = matrixLib.writeRegistrationScript {
id = "matrix-hookshot";
url = "http://127.0.0.1:9993";
sender_localpart = "hookshot";
namespaces = {
aliases = [ ];
rooms = [ ];
users = [
{
regex = "@${cfg.settings.generic.userIdPrefix}.*:${domain}";
exclusive = true;
}
];
};
# Encryption support
# TODO(tlater): Enable when
# https://github.com/matrix-org/matrix-hookshot/issues/1060 is
# fixed
# extraSettings = {
# "de.sorunome.msc2409.push_ephemeral" = true;
# push_ephemeral = true;
# "org.matrix.msc3202" = true;
# };
runtimeRegistration = "${cfg.registrationFile}";
};
in
{
# users = {
# users.matrix-hookshot = {
# home = "/run/matrix-hookshot";
# group = "matrix-hookshot";
# isSystemUser = true;
# };
# groups.matrix-hookshot = { };
# };
systemd.services.matrix-hookshot = {
serviceConfig = {
Type = lib.mkForce "exec";
LoadCredential = "matrix-hookshot:/run/secrets/matrix-hookshot";
inherit (registration) ExecStartPre;
# Some library in matrix-hookshot wants a home directory
Environment = [ "HOME=/run/matrix-hookshot" ];
# User = "matrix-hookshot";
DynamicUser = true;
StateDirectory = "matrix-hookshot";
RuntimeDirectory = "matrix-hookshot";
RuntimeDirectoryMode = "0700";
RestrictNamespaces = true;
PrivateUsers = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [
# "AF_UNIX"
"AF_INET"
"AF_INET6"
];
LockPersonality = true;
RestrictRealtime = true;
ProtectProc = "invisible";
ProcSubset = "pid";
UMask = 77;
};
};
# services.redis.servers.matrix-hookshot = {
# enable = true;
# user = "matrix-hookshot";
# };
services.matrix-hookshot = {
enable = true;
serviceDependencies = [ "conduit.service" ];
registrationFile = "/run/matrix-hookshot/registration.yaml";
settings = {
bridge = {
inherit domain;
url = "http://localhost:${toString conduitCfg.settings.global.port}";
mediaUrl = conduitCfg.settings.global.well_known.client;
port = 9993;
bindAddress = "127.0.0.1";
};
bot.displayname = "Hookshot";
# cache.redisUri = "redis://${config.services.redis.servers.matrix-hookshot.unixSocket}";
generic = {
enabled = true;
outbound = false;
# Only allow webhooks from localhost for the moment
urlPrefix = "http://127.0.0.1:9000/webhook";
userIdPrefix = "_webhooks_";
allowJsTransformationFunctions = true;
};
# TODO(tlater): Enable when
# https://github.com/matrix-org/matrix-hookshot/issues/1060 is
# fixed
# encryption.storagePath = "/var/lib/matrix-hookshot/cryptostore";
permissions = [
{
actor = "matrix.tlater.net";
services = [
{
service = "*";
level = "notifications";
}
];
}
{
actor = "@tlater:matrix.tlater.net";
services = [
{
service = "*";
level = "admin";
}
];
}
];
listeners = [
{
port = 9000;
resources = [ "webhooks" ];
}
{
port = 9001;
resources = [ "metrics" ];
}
];
metrics.enabled = true;
};
};
sops.secrets = {
# Accessed via systemd cred through /run/secrets/matrix-hookshot
"matrix-hookshot/as-token" = { };
"matrix-hookshot/hs-token" = { };
};
}

View file

@ -0,0 +1,50 @@
{
"allowAdminCommands" : true,
"allowAdminCommandsFromAnyone" : false,
"allowAnonymousConnections" : true,
"allowAssetsMismatch" : true,
"anonymousConnectionsAreAdmin" : false,
"bannedIPs" : [],
"bannedUuids" : [],
"checkAssetsDigest" : false,
"clearPlayerFiles" : false,
"clearUniverseFiles" : false,
"clientIPJoinable" : false,
"clientP2PJoinable" : true,
"configurationVersion" : {
"basic" : 2,
"server" : 4
},
"crafting" : {
"filterHaveMaterials" : false
},
"gameServerBind" : "::",
"gameServerPort" : 21025,
"interactiveHighlight" : true,
"inventory" : {
"pickupToActionBar" : true
},
"maxPlayers" : 8,
"maxTeamSize" : 4,
"monochromeLighting" : false,
"playerBackupFileCount" : 3,
"queryServerBind" : "::",
"queryServerPort" : 21025,
"rconServerBind" : "::",
"rconServerPassword" : "",
"rconServerPort" : 21026,
"rconServerTimeout" : 1000,
"runQueryServer" : false,
"runRconServer" : false,
"safeScripts" : true,
"scriptInstructionLimit" : 10000000,
"scriptInstructionMeasureInterval" : 10000,
"scriptProfilingEnabled" : false,
"scriptRecursionLimit" : 100,
"serverFidelity" : "automatic",
"serverName" : "tlater.net",
"serverOverrideAssetsDigest" : null,
"serverUsers" : {
},
"tutorialMessages" : true
}

View file

@ -1,80 +1,45 @@
{ config, lib, ... }:
{
services = {
crowdsec = {
enable = true;
autoUpdateService = true;
pkgs,
config,
lib,
...
}:
{
security.crowdsec = {
enable = true;
settings = {
general.api.server = {
enable = true;
online_client.sharing = false;
};
parserWhitelist = [ "10.45.249.2" ];
lapi.credentialsFile = "/var/lib/crowdsec/state/local_credentials.yaml";
};
extraGroups = [
"systemd-journal"
"nginx"
];
hub = {
collections = [
"crowdsecurity/base-http-scenarios"
"crowdsecurity/http-cve"
"crowdsecurity/linux"
"crowdsecurity/nextcloud"
"crowdsecurity/nginx"
"crowdsecurity/sshd"
];
};
acquisitions = [
{
source = "journalctl";
labels.type = "syslog";
journalctl_filter = [ "SYSLOG_IDENTIFIER=Nextcloud" ];
}
localConfig = {
acquisitions = [
{
labels.type = "syslog";
journalctl_filter = [
"SYSLOG_IDENTIFIER=Nextcloud"
"SYSLOG_IDENTIFIER=sshd-session"
];
source = "journalctl";
}
{
source = "journalctl";
labels.type = "syslog";
journalctl_filter = [ "SYSLOG_IDENTIFIER=sshd-session" ];
}
{
labels.type = "nginx";
filenames = [
"/var/log/nginx/*.log"
]
++ lib.mapAttrsToList (
vHost: _: "/var/log/nginx/${vHost}/access.log"
) config.services.nginx.virtualHosts;
}
];
{
labels.type = "nginx";
filenames = [
"/var/log/nginx/*.log"
]
++ lib.mapAttrsToList (
vHost: _: "/var/log/nginx/${vHost}/access.log"
) config.services.nginx.virtualHosts;
}
];
parsers.s02Enrich = [
{
name = "nixos/parser-whitelist";
description = "Parser whitelist generated by the crowdsec NixOS module";
whitelist = {
reason = "Filtered by NixOS whitelist";
ip = [ "10.45.249.2" ];
};
}
];
postOverflows.s01Whitelist = [
{
description = "custom matrix whitelist";
name = "tetsumaki/matrix";
whitelist = {
reason = "whitelist false positive for matrix";
expression = [
"evt.Overflow.Alert.Events[0].GetMeta('target_fqdn') == '${config.services.matrix-continuwuity.settings.global.server_name}'"
"evt.Overflow.Alert.GetScenario() in ['crowdsecurity/http-probing', 'crowdsecurity/http-crawl-non_statics']"
];
};
}
];
};
};
crowdsec-firewall-bouncer = {
remediationComponents.firewallBouncer = {
enable = true;
settings.prometheus = {
enabled = true;
@ -82,23 +47,37 @@
listen_port = "60601";
};
};
victoriametrics.scrapeConfigs = {
crowdsec.targets =
let
cfg = config.services.crowdsec.settings.general;
address = cfg.prometheus.listen_addr;
port = cfg.prometheus.listen_port;
in
[ "${address}:${toString port}" ];
csFirewallBouncer.targets =
let
cfg = config.services.crowdsec-firewall-bouncer.settings;
address = cfg.prometheus.listen_addr;
port = cfg.prometheus.listen_port;
in
[ "${address}:${toString port}" ];
};
};
# Add whitelists for matrix
systemd.tmpfiles.settings."10-matrix" =
let
stateDir = config.security.crowdsec.stateDirectory;
in
{
"${stateDir}/config/postoverflows".d = {
user = "crowdsec";
group = "crowdsec";
mode = "0700";
};
"${stateDir}/config/postoverflows/s01-whitelist".d = {
user = "crowdsec";
group = "crowdsec";
mode = "0700";
};
"${stateDir}/config/postoverflows/s01-whitelist/matrix-whitelist.yaml"."L+".argument =
((pkgs.formats.yaml { }).generate "crowdsec-matrix-whitelist.yaml" {
name = "tetsumaki/matrix";
description = "custom matrix whitelist";
whitelist = {
reason = "whitelist false positive for matrix";
expression = [
"evt.Overflow.Alert.Events[0].GetMeta('target_fqdn') == '${config.services.matrix-conduit.settings.global.server_name}'"
"evt.Overflow.Alert.GetScenario() in ['crowdsecurity/http-probing', 'crowdsecurity/http-crawl-non_statics']"
];
};
}).outPath;
};
}

View file

@ -11,6 +11,8 @@
./ntfy-sh
./minecraft.nix
./nextcloud.nix
./postgres.nix
# ./starbound.nix -- Not currently used
./webserver.nix
./wireguard.nix
];

View file

@ -23,7 +23,7 @@ in
minifyStaticFiles = true;
proxySSL = true;
proxyPort = 443;
package = flake-inputs.foundryvtt.packages.${pkgs.stdenv.hostPlatform.system}.foundryvtt_13;
package = flake-inputs.foundryvtt.packages.${pkgs.system}.foundryvtt_13;
};
nginx.virtualHosts."${domain}" =

View file

@ -18,9 +18,6 @@ in
enable = true;
settings.server.externalDomain = "https://${hostName}";
# We're using vectorchord now
database.enableVectors = false;
environment.IMMICH_TELEMETRY_INCLUDE = "all";
};

View file

@ -8,93 +8,67 @@ in
443
];
services = {
grafana = {
enable = true;
settings = {
server = {
http_port = 3001; # Default overlaps with gitea
root_url = "https://metrics.tlater.net";
};
security = {
admin_user = "tlater";
admin_password = "$__file{${config.sops.secrets."grafana/adminPassword".path}}";
secret_key = "$__file{${config.sops.secrets."grafana/secretKey".path}}";
cookie_secure = true;
cookie_samesite = "strict";
content_security_policy = true;
};
database = {
user = "grafana";
name = "grafana";
type = "postgres";
host = "/run/postgresql";
};
services.grafana = {
enable = true;
settings = {
server = {
http_port = 3001; # Default overlaps with gitea
root_url = "https://metrics.tlater.net";
};
declarativePlugins = [
pkgs.grafanaPlugins.victoriametrics-metrics-datasource
pkgs.grafanaPlugins.victoriametrics-logs-datasource
];
security = {
admin_user = "tlater";
admin_password = "$__file{${config.sops.secrets."grafana/adminPassword".path}}";
secret_key = "$__file{${config.sops.secrets."grafana/secretKey".path}}";
cookie_secure = true;
cookie_samesite = "strict";
content_security_policy = true;
};
provision = {
enable = true;
datasources.settings.datasources = [
{
name = "Victoriametrics - tlater.net";
url = "http://localhost:8428";
type = "victoriametrics-metrics-datasource";
access = "proxy";
isDefault = true;
}
{
name = "Victorialogs - tlater.net";
url = "http://${config.services.victorialogs.bindAddress}";
type = "victoriametrics-logs-datasource";
access = "proxy";
}
];
alerting.contactPoints.settings.contactPoints = [
{
name = "ntfy";
receivers = [
{
uid = "ntfy";
type = "webhook";
settings.url = "http://${config.services.ntfy-sh.settings.listen-http}/local-alerts?template=grafana";
}
];
}
];
database = {
user = "grafana";
name = "grafana";
type = "postgres";
host = "/run/postgresql";
};
};
postgresql = {
ensureUsers = [
declarativePlugins = [
pkgs.grafanaPlugins.victoriametrics-metrics-datasource
pkgs.grafanaPlugins.victoriametrics-logs-datasource
];
provision = {
enable = true;
datasources.settings.datasources = [
{
name = "grafana";
ensureDBOwnership = true;
name = "Victoriametrics - tlater.net";
url = "http://localhost:8428";
type = "victoriametrics-metrics-datasource";
access = "proxy";
isDefault = true;
}
{
name = "Victorialogs - tlater.net";
url = "http://${config.services.victorialogs.bindAddress}";
type = "victoriametrics-logs-datasource";
access = "proxy";
}
];
ensureDatabases = [ "grafana" ];
};
};
nginx.virtualHosts."${domain}" = {
forceSSL = true;
useACMEHost = "tlater.net";
enableHSTS = true;
locations = {
"/".proxyPass = "http://localhost:${toString config.services.grafana.settings.server.http_port}";
"/api/live" = {
proxyWebsockets = true;
proxyPass = "http://localhost:${toString config.services.grafana.settings.server.http_port}";
};
services.nginx.virtualHosts."${domain}" = {
forceSSL = true;
useACMEHost = "tlater.net";
enableHSTS = true;
locations = {
"/".proxyPass = "http://localhost:${toString config.services.grafana.settings.server.http_port}";
"/api/live" = {
proxyWebsockets = true;
proxyPass = "http://localhost:${toString config.services.grafana.settings.server.http_port}";
};
};
};

View file

@ -68,11 +68,31 @@ in
coturn.targets = [ "127.0.0.1:9641" ];
crowdsec.targets =
let
address = config.security.crowdsec.settings.prometheus.listen_addr;
port = config.security.crowdsec.settings.prometheus.listen_port;
in
[ "${address}:${toString port}" ];
csFirewallBouncer.targets =
let
address =
config.security.crowdsec.remediationComponents.firewallBouncer.settings.prometheus.listen_addr;
port =
config.security.crowdsec.remediationComponents.firewallBouncer.settings.prometheus.listen_port;
in
[ "${address}:${toString port}" ];
immich.targets = [
"127.0.0.1:8081"
"127.0.0.1:8082"
];
# Configured in the hookshot listeners, but it's hard to filter
# the correct values out of that config.
matrixHookshot.targets = [ "127.0.0.1:9001" ];
victorialogs.targets = [ config.services.victorialogs.bindAddress ];
};
};

View file

@ -5,7 +5,7 @@
...
}:
let
nextcloud = pkgs.nextcloud32;
nextcloud = pkgs.nextcloud31;
hostName = "nextcloud.${config.services.nginx.domain}";
in
{
@ -103,19 +103,8 @@ in
};
};
services.postgresql = {
ensureUsers = [
{
name = "nextcloud";
ensureDBOwnership = true;
}
];
ensureDatabases = [ "nextcloud" ];
};
# Ensure that this service doesn't start before postgres is ready
systemd.services.nextcloud-setup.after = [ "postgresql.target" ];
systemd.services.nextcloud-setup.after = [ "postgresql.service" ];
sops.secrets."nextcloud/tlater" = {
owner = "nextcloud";

View file

@ -17,6 +17,7 @@ in
services.ntfy-sh = {
enable = true;
package = flake-inputs.nixpkgs-unstable.legacyPackages.${pkgs.system}.ntfy-sh;
environmentFile = config.sops.secrets."ntfy/users".path;
@ -137,17 +138,17 @@ in
"curl",
"--silent",
"--show-error",
f"--max-time {2 + timeout}",
f"--max-time {timeout}",
"-u tlater:insecure",
f"http://ntfy.testHost/{topic}/json",
"-o messages"
"> messages"
]
client.succeed(f'{" ".join(systemd_invocation)} "{" ".join(curl)}"')
# Give some slack so the host doesn't send messages before
# we're listening
time.sleep(2)
time.sleep(1)
yield

View file

@ -0,0 +1,35 @@
{ pkgs, ... }:
{
services.postgresql = {
package = pkgs.postgresql_14;
enable = true;
# Only enable connections via the unix socket, and check with the
# OS to make sure the user matches the database name.
#
# See https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
authentication = ''
local sameuser all peer
'';
# Note: The following options with ensure.* are set-only; i.e.,
# when permissions/users/databases are removed from these lists,
# that operation needs to be performed manually on the system as
# well.
ensureUsers = [
{
name = "grafana";
ensureDBOwnership = true;
}
{
name = "nextcloud";
ensureDBOwnership = true;
}
];
ensureDatabases = [
"grafana"
"nextcloud"
];
};
}

View file

@ -0,0 +1,129 @@
{
flake-inputs,
pkgs,
lib,
...
}:
let
inherit (lib) concatStringsSep;
in
{
networking.firewall.allowedTCPPorts = [ 21025 ];
# Sadly, steam-run requires some X libs
environment.noXlibs = false;
systemd.services.starbound = {
description = "Starbound";
after = [ "network.target" ];
serviceConfig = {
ExecStart = "${
flake-inputs.self.packages.${pkgs.system}.starbound
}/bin/launch-starbound ${./configs/starbound.json}";
Type = "simple";
# Credential loading for steam auth (if necessary; prefer
# anonymous login wherever possible).
LoadCredential = "steam:/run/secrets/steam/tlater";
# Security settings
DynamicUser = true;
# This is where the StateDirectory ends up
WorkingDirectory = "/var/lib/starbound";
# Creates /var/lib/starbound (or rather, a symlink there to
# /var/lib/private/starbound), and sets it up to be writeable to
# by the dynamic user.
StateDirectory = "starbound";
# Note some settings below are basically tautologous with
# `NoNewPrivileges`, but they all work slightly differently so
# add additional layers in case of bugs.
## THESE SETTINGS ARE A GOOD IDEA BUT THE STEAM CLIENT IS
## REALLY, REALLY BAD, AND FOR SOME REASON I NEED TO USE IT TO
## DOWNLOAD GAME SERVERS AS WELL:
##
# To guarantee the above (only permits 64-bit syscalls, 32-bit
# syscalls can circumvent the above restrictions).
#
# Obviously, if running a 32 bit game server, change this.
# SystemCallArchitectures = "native";
# Game servers shouldn't need to create new namespaces ever.
#
# TODO: Since steam uses namespaces for things *entirely
# unrelated* to installing game servers, we need to allow
# namespace access. Ideally I'd instead do this in an
# ExecStartPre, but alas, this isn't possible because of
# https://github.com/systemd/systemd/issues/19604.
#
# RestrictNamespaces = true;
# Don't need to let the game server see other user accounts
PrivateUsers = true;
# *Probably* not harmful for game servers, which probably don't update dynamically
ProtectHostname = true;
# Yeah, if a game server tries to edit the hardware clock something's fishy
ProtectClock = true;
# Don't let game servers modify kernel settings, duh
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
# Game servers shouldn't use cgroups themselves either
ProtectControlGroups = true;
# Most game servers will never need other socket types
RestrictAddressFamilies = [ "AF_UNIX AF_INET AF_INET6" ];
# Also a no-brainer, no game server should ever need this
LockPersonality = true;
# Some game servers will probably try to set this, but they
# don't need it. It's only required for audio processing and
# such, which the server end doesn't need to do.
RestrictRealtime = true;
# Don't allow a variety of syscalls that gameservers have no
# business using anyway
SystemCallFilter =
"~"
+ (concatStringsSep " " [
"@clock"
"@cpu-emulation"
"@debug"
"@keyring"
"@memlock"
"@module"
# "@mount" TODO: Consider adding when steamcmd is run in ExecStartPre
"@obsolete"
"@raw-io"
"@reboot"
# "@resources" TODO: Ditto
"@setuid"
"@swap"
]);
# Normally only "read-only", but steamcmd will puke if there is
# no home directory to write to (though the nix package will
# implicitly symlink to the path that we set in its override, so
# no actual files are created, besides a symlink).
ProtectHome = "tmpfs";
# Implied by DynamicUser anyway, but it doesn't hurt to add
# these explicitly, at least for reference.
RemoveIPC = true;
PrivateTmp = true;
PrivateDevices = true;
NoNewPrivileges = true;
RestrictSUIDSGID = true;
ProtectSystem = "strict";
# ProtectHome = "read-only"; # See further up
};
};
services.backups.starbound = {
user = "root";
paths = [ "/var/lib/private/starbound/storage/universe/" ];
pauseServices = [ "starbound.service" ];
};
# Accessed via systemd cred through /run/secrets/steam
sops.secrets."steam/tlater" = { };
}

View file

@ -20,7 +20,7 @@ in
after = [ "network.target" ];
script = ''
${lib.getExe flake-inputs.self.packages.${pkgs.stdenv.hostPlatform.system}.webserver}
${lib.getExe flake-inputs.self.packages.${pkgs.system}.webserver}
'';
environment = {

View file

@ -1,43 +0,0 @@
{ self, ... }:
{
# Systems on which to make dev utilities runnable; anything
# NixOS-related encodes its own system.
systems = [ "x86_64-linux" ];
perSystem =
{
inputs',
self',
pkgs,
lib,
...
}:
{
apps = {
default = self'.apps.runVm;
runVm = {
type = "app";
program = lib.getExe self.nixosConfigurations.hetzner-1.config.system.build.vm;
meta.description = "Run the test VM";
};
};
devShells = {
default = pkgs.mkShell {
sopsPGPKeyDirs = [
"./keys/hosts/"
"./keys/users/"
];
packages = lib.attrValues {
inherit (inputs'.sops-nix.packages) sops-import-keys-hook sops-init-gpg-key;
inherit (pkgs) deploy-rs;
};
};
minecraft = pkgs.mkShell { packages = lib.attrValues { inherit (pkgs) packwiz; }; };
webserver = self'.packages.webserver.devShell;
};
};
}

84
flake.lock generated
View file

@ -46,9 +46,7 @@
"deploy-rs",
"flake-compat"
],
"flake-parts": [
"flake-parts"
],
"flake-parts": "flake-parts",
"nix-test-runner": "nix-test-runner",
"nixpkgs": [
"sonnenshift",
@ -79,11 +77,11 @@
"utils": "utils"
},
"locked": {
"lastModified": 1770019181,
"narHash": "sha256-hwsYgDnby50JNVpTRYlF3UR/Rrpt01OrxVuryF40CFY=",
"lastModified": 1762286984,
"narHash": "sha256-9I2H9x5We6Pl+DBYHjR1s3UT8wgwcpAH03kn9CqtdQc=",
"owner": "serokell",
"repo": "deploy-rs",
"rev": "77c906c0ba56aabdbc72041bf9111b565cdd6171",
"rev": "9c870f63e28ec1e83305f7f6cb73c941e699f74f",
"type": "github"
},
"original": {
@ -125,11 +123,11 @@
]
},
"locked": {
"lastModified": 1771355198,
"narHash": "sha256-89m5VKxIs8QNiIvLsxHu5NpyhDsoXTtoN801IAurnW4=",
"lastModified": 1764350888,
"narHash": "sha256-6Rp18zavTlnlZzcoLoBTJMBahL2FycVkw2rAEs3cQvo=",
"owner": "nix-community",
"repo": "disko",
"rev": "92fceb111901a6f13e81199be4fab95fce86a5c9",
"rev": "2055a08fd0e2fd41318279a5355eb8a161accf26",
"type": "github"
},
"original": {
@ -156,14 +154,18 @@
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
"nixpkgs-lib": [
"sonnenshift",
"crate2nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1769996383,
"narHash": "sha256-AnYjnFWgS49RlqX7LrC4uA+sCCDBj0Ry/WOJ5XWAsa0=",
"lastModified": 1712014858,
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "57928607ea566b5db3ad13af0e57e921e6b12381",
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
"type": "github"
},
"original": {
@ -179,11 +181,11 @@
]
},
"locked": {
"lastModified": 1767431140,
"narHash": "sha256-ug37Jt6r8LP3161suTh6IW+fkx0a7kiSAhAPsVcPrkA=",
"lastModified": 1762437643,
"narHash": "sha256-nQ2ItqrkvOYEjJr1HcXkIEFS4SEy5q1ax0Y1CTuKhHs=",
"owner": "NotAShelf",
"repo": "flint",
"rev": "7832a5b5f5ef1243818f8f5e357ad1ee2d35d2b7",
"rev": "36c565edd971166718d21ae973c792b194ca737d",
"type": "github"
},
"original": {
@ -199,11 +201,11 @@
]
},
"locked": {
"lastModified": 1767491610,
"narHash": "sha256-/Nldo9ILD7T5aQKuyeUccNPXjhNBrovGXEoi5k7m9Bo=",
"lastModified": 1761916399,
"narHash": "sha256-wLZ8km5ftKlIDdHJrFiDQivXc5b+7DRxmBp2347H5g8=",
"owner": "reckenrode",
"repo": "nix-foundryvtt",
"rev": "35e789ba383fbfaa9039005b9b24669c5be6b8ab",
"rev": "8cceb7af3dfbe465b5108db5c098b097edf85790",
"type": "github"
},
"original": {
@ -253,30 +255,28 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1771208521,
"narHash": "sha256-G2qGwj2t77kM0hZatRrTp2+50obn4ssSXoLCrJKZtgQ=",
"rev": "fa56d7d6de78f5a7f997b0ea2bc6efd5868ad9e8",
"lastModified": 1764316264,
"narHash": "sha256-UcoE0ISg9Nnzx/2n7VvQl3fRsLg+DcVa/ZGf/DZNHbs=",
"rev": "9a7b80b6f82a71ea04270d7ba11b48855681c4b0",
"type": "tarball",
"url": "https://releases.nixos.org/nixos/25.11/nixos-25.11.6074.fa56d7d6de78/nixexprs.tar.xz"
"url": "https://releases.nixos.org/nixos/25.05/nixos-25.05.813221.9a7b80b6f82a/nixexprs.tar.xz?lastModified=1764316264&rev=9a7b80b6f82a71ea04270d7ba11b48855681c4b0"
},
"original": {
"type": "tarball",
"url": "https://channels.nixos.org/nixos-25.11/nixexprs.tar.xz"
"url": "https://channels.nixos.org/nixos-25.05/nixexprs.tar.xz"
}
},
"nixpkgs-lib": {
"nixpkgs-unstable": {
"locked": {
"lastModified": 1769909678,
"narHash": "sha256-cBEymOf4/o3FD5AZnzC3J9hLbiZ+QDT/KDuyHXVJOpM=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "72716169fe93074c333e8d0173151350670b824c",
"type": "github"
"lastModified": 1764242076,
"narHash": "sha256-6/1EG2fiKvLoUJ8FD7ymRx87e4zcfJTzAdUYgo4CDLA=",
"rev": "2fad6eac6077f03fe109c4d4eb171cf96791faa4",
"type": "tarball",
"url": "https://releases.nixos.org/nixos/unstable/nixos-26.05pre903292.2fad6eac6077/nixexprs.tar.xz?lastModified=1764242076&rev=2fad6eac6077f03fe109c4d4eb171cf96791faa4"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
"type": "tarball",
"url": "https://channels.nixos.org/nixos-unstable/nixexprs.tar.xz"
}
},
"pre-commit-hooks": {
@ -322,10 +322,10 @@
"inputs": {
"deploy-rs": "deploy-rs",
"disko": "disko",
"flake-parts": "flake-parts",
"flint": "flint",
"foundryvtt": "foundryvtt",
"nixpkgs": "nixpkgs",
"nixpkgs-unstable": "nixpkgs-unstable",
"sonnenshift": "sonnenshift",
"sops-nix": "sops-nix"
}
@ -338,11 +338,11 @@
]
},
"locked": {
"lastModified": 1764578400,
"narHash": "sha256-8V0SpIcYyjpP+nAHfYJDof7CofLTwVVDo5QLZ0epjOQ=",
"lastModified": 1763619077,
"narHash": "sha256-dlfamaoIzFEgwgtzPJuw5Tl5SqjbWcV8CsbP2hVBeuI=",
"ref": "refs/heads/main",
"rev": "bf17617899692c9c2bfebfce87320a4174e6dc28",
"revCount": 27,
"rev": "64a2c8a3743ea6897ecac6692fba8aebc3389fca",
"revCount": 26,
"type": "git",
"url": "ssh://git@github.com/sonnenshift/battery-manager"
},
@ -358,11 +358,11 @@
]
},
"locked": {
"lastModified": 1771166946,
"narHash": "sha256-UFc4lfGBr+wJmwgDGJDn1cVD6DTr0/8TdronNUiyXlU=",
"lastModified": 1764021963,
"narHash": "sha256-1m84V2ROwNEbqeS9t37/mkry23GBhfMt8qb6aHHmjuc=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "2d0cf89b4404529778bc82de7e42b5754e0fe4fa",
"rev": "c482a1c1bbe030be6688ed7dc84f7213f304f1ec",
"type": "github"
},
"original": {

147
flake.nix
View file

@ -1,7 +1,9 @@
{
description = "tlater.net host configuration";
inputs = {
nixpkgs.url = "https://channels.nixos.org/nixos-25.11/nixexprs.tar.xz";
flake-parts.url = "github:hercules-ci/flake-parts";
nixpkgs.url = "https://channels.nixos.org/nixos-25.05/nixexprs.tar.xz";
nixpkgs-unstable.url = "https://channels.nixos.org/nixos-unstable/nixexprs.tar.xz";
## Nix/OS utilities
@ -41,49 +43,142 @@
crate2nix.inputs = {
flake-compat.follows = "deploy-rs/flake-compat";
devshell.inputs.flake-utils.follows = "deploy-rs/utils";
flake-parts.follows = "flake-parts";
};
};
};
};
outputs =
{ flake-parts, ... }@inputs:
flake-parts.lib.mkFlake { inherit inputs; } (
{ self, ... }@args:
{
imports = [
(flake-parts.lib.importApply ./flakeModules/deploy-rs.nix args)
./checks
./dev-utils.nix
./pkgs
];
{
self,
nixpkgs,
sops-nix,
...
}@inputs:
let
system = "x86_64-linux";
flake.nixosConfigurations.hetzner-1 = inputs.nixpkgs.lib.nixosSystem {
vm = nixpkgs.lib.nixosSystem {
inherit system;
specialArgs.flake-inputs = inputs;
modules = [
./configuration
./configuration/hardware-specific/vm.nix
];
};
# deploy-rs unfortunately uses an `import nixpkgs`, and its
# library functions depend on an instantiated nixpkgs, so we
# can't get around multi-nixpkgs-eval.
inherit
(import nixpkgs {
inherit system;
overlays = [
inputs.deploy-rs.overlays.default
(_: prev: {
deploy-rs = {
inherit (nixpkgs.legacyPackages.${system}) deploy-rs;
inherit (prev.deploy-rs) lib;
};
})
];
})
deploy-rs
;
in
{
##################
# Configurations #
##################
nixosConfigurations = {
# The actual system definition
hetzner-1 = nixpkgs.lib.nixosSystem {
inherit system;
specialArgs.flake-inputs = inputs;
modules = [
./configuration
./configuration/hardware-specific/hetzner
];
};
};
deploy.nodes.hetzner-1 = {
############################
# Deployment configuration #
############################
deploy.nodes = {
hetzner-1 = {
hostname = "116.202.158.55";
profiles.system = {
user = "root";
activation = "nixos";
closure = self.nixosConfigurations.hetzner-1;
path = deploy-rs.lib.activate.nixos self.nixosConfigurations.hetzner-1;
};
sshUser = "tlater";
sshOpts = [
"-p"
"2222"
"-o"
"ForwardAgent=yes"
];
sshUser = "tlater";
sshOpts = [
"-p"
"2222"
"-o"
"ForwardAgent=yes"
];
};
};
#########
# Tests #
#########
checks = import ./checks { flake-inputs = inputs; };
###########################
# Garbage collection root #
###########################
packages.${system} = {
default = vm.config.system.build.vm;
}
// import ./pkgs {
pkgs = nixpkgs.legacyPackages.${system};
flake-inputs = inputs;
};
###################
# Utility scripts #
###################
apps.${system} = {
default = self.apps.${system}.run-vm;
run-vm = {
type = "app";
program =
(nixpkgs.legacyPackages.${system}.writeShellScript "" ''
${vm.config.system.build.vm.outPath}/bin/run-testvm-vm
'').outPath;
};
};
###########################
# Development environment #
###########################
devShells.${system} = {
default = nixpkgs.legacyPackages.${system}.mkShell {
sopsPGPKeyDirs = [
"./keys/hosts/"
"./keys/users/"
];
packages = nixpkgs.lib.attrValues {
inherit (sops-nix.packages.${system}) sops-import-keys-hook sops-init-gpg-key;
inherit (deploy-rs) deploy-rs;
};
};
}
);
minecraft = nixpkgs.legacyPackages.${system}.mkShell {
packages = nixpkgs.lib.attrValues { inherit (nixpkgs.legacyPackages.${system}) packwiz; };
};
webserver = self.packages.${system}.webserver.devShell;
};
};
}

View file

@ -1,136 +0,0 @@
{ lib, ... }@exportingFlake:
let
inherit (lib) mkOption types;
deploy-rs-for-system =
system:
(import exportingFlake.inputs.nixpkgs {
inherit system;
overlays = [
exportingFlake.inputs.deploy-rs.overlays.default
(_final: prev: {
deploy-rs = {
inherit (exportingFlake.inputs.nixpkgs.legacyPackages.${system}) deploy-rs;
inherit (prev.deploy-rs) lib;
};
})
];
}).deploy-rs;
in
{ config, ... }:
let
cfg = config.deploy;
in
{
options.deploy =
let
genericOptions =
let
mkGenericOption =
type:
mkOption {
type = types.nullOr type;
default = null;
};
in
{
options = {
sshUser = mkGenericOption types.str;
user = mkGenericOption types.str;
sshOpts = mkGenericOption (types.listOf types.str);
fastConnection = mkGenericOption types.bool;
autoRollback = mkGenericOption types.bool;
magicRollback = mkGenericOption types.bool;
confirmTimeout = mkGenericOption types.int;
activationTimeout = mkGenericOption types.int;
tempPath = mkGenericOption types.str;
interactiveSudo = mkGenericOption types.bool;
};
};
profileModule =
{ config, ... }:
{
imports = [ genericOptions ];
options = {
activation = mkOption {
type = types.oneOf [
(types.enum [
"nixos"
"home-manager"
"darwin"
"noop"
])
];
};
closure = mkOption { type = types.raw; };
profilePath = mkOption {
type = types.nullOr types.str;
default = null;
};
path = mkOption {
type = types.raw;
internal = true;
};
};
config =
let
inherit (config.closure.config.nixpkgs.hostPlatform) system;
deploy-rs = deploy-rs-for-system system;
in
lib.mkMerge [
(lib.mkIf (lib.elem config.activation [
"nixos"
"home-manager"
"darwin"
"noop"
]) { path = deploy-rs.lib.activate.${config.activation} config.closure; })
];
};
nodeModule = {
imports = [ genericOptions ];
options = {
hostname = mkOption { type = types.str; };
profilesOrder = mkOption {
type = types.listOf types.str;
default = [ ];
};
profiles = mkOption {
type = types.attrsOf (types.submoduleWith { modules = [ profileModule ]; });
apply = lib.mapAttrs (
_: profile:
lib.filterAttrs (
name: val:
!(lib.elem name [
"activation"
"closure"
])
&& val != null
) profile
);
default = { };
};
};
};
in
{
nodes = mkOption {
default = { };
type = types.attrsOf (types.submoduleWith { modules = [ nodeModule ]; });
apply = lib.mapAttrs (_: node: lib.filterAttrs (_: val: val != null) node);
};
};
config = lib.mkIf (cfg.nodes != { }) { flake.deploy.nodes = cfg.nodes; };
}

View file

@ -20,6 +20,9 @@ steam:
heisenbridge:
as-token: ENC[AES256_GCM,data:+2yo6T18j34622H8ZWblAFB2phLw1q0k0vUQEZ5sFj7dQaRnkEiAMi0R3p17Zq0pOtGEC0RRZuPLYkcZ1oKP0w==,iv:lGwrQYp//FufpmJocrLIVyy9RK7lEEVcpAi0wmkjr34=,tag:yV06UbhAYJQz36O2XdhY+A==,type:str]
hs-token: ENC[AES256_GCM,data:u52WpkQFd/J7JFoE/rfNluebyZQLOokvkVdL7+AEAvrhJhrkJli1ztkD79lbC+6tGUH4tT3T+nX9wvGKnrRUQg==,iv:as+9fVuvMg2IoE2WIKD9mHi+znhNcWRh5Zq+yr0xcDQ=,tag:mZ7fh7U0MfgI8hyq/28Bcg==,type:str]
matrix-hookshot:
as-token: ENC[AES256_GCM,data:nXTanPhDyDF7R3AllLqpM5dzljBrHwlh1KJnTGIi5PhbDY2lPj4+uXkMEwvm1u+hQjPyM7vKZPfK+0/dms6Y7A==,iv:fSakJN+yai0gfOJKFxxaxgyUtk0pNmIeqVgrdq92/24=,tag:Qc7+SUnm5/Nq5+QIScR9kQ==,type:str]
hs-token: ENC[AES256_GCM,data:Bwyj0JTTN0NNnwOs1zA8CqbtZSNcvlINeT7QVc2eJiHda92J6vQk7bSxy6KuqCN9DxlUsK13ggYjNORY2vic5w==,iv:Npnp8arYQ3Yb6CXrnKgE03hD7ZjGINPa/DwFI8D+5tA=,tag:FqNE6yI0nF4puEUw9MGAjQ==,type:str]
wireguard:
server-key: ENC[AES256_GCM,data:mXb7ZznJHf5CgV8rI4uzPBATMRbmd7LimgtCkQM9kAjbIaGwUBqJZBN3fXs=,iv:3Po1Orinzov9rnEm9cLzgJY1PeD+5Jl9115MriABHh8=,tag:E/2CjDO1JCvJzxCnqKcNyw==,type:str]
restic:
@ -29,8 +32,8 @@ turn:
env: ENC[AES256_GCM,data:kt5nhVo9pb/ZbPUEcqSYXxN9YMgQKnFb5VRfFFS/qoIaJ73uD2fuJKqcxAyVRrdLqnSAWSQBgTgunBzdP7xqLAK2qt8DYAQWHkIe9uxFbSXZpdmw,iv:9lq6SFwTFN4GGm6gPiJpUMasMdnHVF6XLGYrsyG3kjU=,tag:428Qf9DOiiHt/Wjb188b8g==,type:str]
secret: ENC[AES256_GCM,data:si7ee6Xfhdgdyzbp6aQpF7pz3TmTBb7iQ82lRPVXNDg9JfHI+lbmgAsSnRLX5qMCA6P9R045sSMosqidL8QwRg==,iv:SrhpZKK8D45yxCEfDb9P3TwtA14+qEI+wcRqcN/a6pw=,tag:PiwV+mOL9xHJgJft6sc61g==,type:str]
sops:
lastmodified: "2025-12-01T11:39:17Z"
mac: ENC[AES256_GCM,data:TwhGOW/V9/IoBifzh1MSwy/ff7ONTnxEmwERD8Yl2E27WG/6dTVz0/nIlZ8KsEKLC6vB2m+sJT+14Q9KCj4Cn/bWV1PmhytktGPxLQpgF55+pZlSK1aLUPLq0hwE93b4MAeOvzoOXtCQguh1dsB2RkinabFoMeZ2xJ7Kc+jHlfA=,iv:Ri8aEA4tssGDv2UuKeza8vs94IovM9GARLIEapb9Ya0=,tag:MDgAffj7ndmMwpw7mBXNRg==,type:str]
lastmodified: "2025-11-29T14:52:24Z"
mac: ENC[AES256_GCM,data:RC18s48jxRFQMtbmu74P7G4uhm2yHk9TB0wN7z4g8SNE3nfkYMvHAJqPr3A3dO+T33zkTFcSRm7fhWItUahTCW3fO10u6kDvWbnyjlSuAy86Tkz2iqeW4iSOzKswDptAgb/B+juAHhEMxDnkG5vpPlIcD0SVP89NlflXftogOqw=,iv:2vN2TJvzePzBJfUeBxvGXwGmRsB5sopqyWm9uUv/rzA=,tag:C6UOWrUxVsRMFncL1y1eTQ==,type:str]
pgp:
- created_at: "2025-10-03T21:38:48Z"
enc: |-

View file

@ -21,6 +21,9 @@ steam:
heisenbridge:
as-token: ENC[AES256_GCM,data:tXbOeo7nv8I=,iv:wJAKcOXX9nGIw4n38ThOoj29u7dUWhsxSQG/p79JlEw=,tag:rTVaGS2UuWcea1uBa8YX2g==,type:str]
hs-token: ENC[AES256_GCM,data:VBwvwomv0Xg=,iv:q6INtJ+rg+QiXj8uBdBzQYQZUBBXp+9odxDHwvu8Jxc=,tag:XKhm8nxygAkKaiVPJ2Fcdg==,type:str]
matrix-hookshot:
as-token: ENC[AES256_GCM,data:uSUOo4f2KqA=,iv:Xb9G8Ecv6m59m51kDw2bOfq3SMJt4g9/6/EdH74R+KM=,tag:K9MSfO2c2Y4rlf0eYrmTnw==,type:str]
hs-token: ENC[AES256_GCM,data:0KsyA06InL4=,iv:zAR0Y1fk8SyodcSLBHlQ8I+BAmttz9Hkd8Q3OREFqs4=,tag:t1Et8N/3seq95DeGoUd7Sw==,type:str]
wireguard:
server-key: ENC[AES256_GCM,data:FvY897XdKoa/mckE8JQLCkklsnYD6Wz1wpsu5t3uhEnW3iarnDQxF9msuYU=,iv:jqGXfekM+Vs+J9b5nlZ5Skd1ZKHajoUo2Dc4tMYPm1w=,tag:EehikjI/FCU8wqtpvJRamQ==,type:str]
restic:
@ -30,8 +33,8 @@ turn:
env: ENC[AES256_GCM,data:xjIz/AY109lyiL5N01p5T3HcYco/rM5CJSRTtg==,iv:16bW6OpyOK/QL0QPGQp/Baa9xyT8E3ZsYkwqmjuofk0=,tag:J5re3uKxIykw3YunvQWBgg==,type:str]
secret: ENC[AES256_GCM,data:eQ7dAocoZtg=,iv:fgzjTPv30WqTKlLy+yMn5MsKQgjhPnwlGFFwYEg3gWs=,tag:1ze33U1NBkgMX/9SiaBNQg==,type:str]
sops:
lastmodified: "2025-12-01T11:39:26Z"
mac: ENC[AES256_GCM,data:11VQAYk8Am0k8OO6BtU17qpuEhcJ8ylRhJWQNHVAsmi5BCFjD1zU3NkWhtSstPrBcqHMenG+9XuEzpNnbccHI2ru0qlILsQvNj5OKo96FnvYtzApYlApoAzOetCx08Lfxa4RGLN/XCUSuccjBIU2PZRWEK+z+Cm1wHUFeqc1xPc=,iv:6y9j55Cld+GoOVGWAqsEgURRna6dHA2mGZwHVA+ZOE8=,tag:bSZi3nYmYrn3nFT2+RBPUQ==,type:str]
lastmodified: "2025-11-29T11:54:33Z"
mac: ENC[AES256_GCM,data:SaTvwxfARVou/ZjrWfdC8J6je8l89Zuumdz7PkmY2Tl2CQVxZmEt4AyV4bWiCtWhJmfH1Qa8m4Q+DyqimjapgYT5cUB1yxlknp233bB/+5C5k3KozU2hmh80KYgR496FtQvI74p0qw/lw00CGCR3WHNcIc0dbTiDzC90HlOpafg=,iv:vxMCAjpgyWvxk18LalmFhwOb5b2ThCDq1KTaX2OPvpM=,tag:QMA+tC4hs/FBnuVDye38Vg==,type:str]
pgp:
- created_at: "2025-10-03T21:38:26Z"
enc: |-

View file

@ -0,0 +1,383 @@
{
pkgs,
lib,
config,
...
}:
let
cfg = config.security.crowdsec;
settingsFormat = pkgs.formats.yaml { };
hub = pkgs.fetchFromGitHub {
owner = "crowdsecurity";
repo = "hub";
rev = "7a3b4753f4577257c0cbeb8f8f90c7f17d2ae008";
hash = "sha256-HB4jHyhiO8gjBkLmpo6bDbwhfm5m5nAtNlKhDkZjt2I=";
};
cscli = pkgs.writeShellScriptBin "cscli" ''
export PATH="$PATH:${cfg.package}/bin/"
sudo=exec
if [ "$USER" != "crowdsec" ]; then
sudo='exec /run/wrappers/bin/sudo -u crowdsec'
fi
$sudo ${cfg.package}/bin/cscli "$@"
'';
acquisitions = ''
---
${lib.concatMapStringsSep "\n---\n" builtins.toJSON cfg.acquisitions}
---
'';
in
{
imports = [ ./remediations ];
options.security.crowdsec =
let
inherit (lib.types)
nullOr
listOf
package
path
str
;
in
{
enable = lib.mkEnableOption "crowdsec";
package = lib.mkOption {
type = package;
default = pkgs.crowdsec;
};
stateDirectory = lib.mkOption {
type = path;
readOnly = true;
description = ''
The state directory of the crowdsec instance. Cannot be
changed, but is exposed for downstream use.
'';
};
settings = lib.mkOption {
inherit (settingsFormat) type;
default = { };
description = ''
The crowdsec configuration. Refer to
<https://docs.crowdsec.net/docs/next/configuration/crowdsec_configuration/>
for details on supported values.
'';
};
parserWhitelist = lib.mkOption {
type = listOf str;
default = [ ];
description = ''
Set of IP addresses to add to a parser-based whitelist.
Addresses can be specified either as plain IP addresses or
in CIDR notation.
'';
};
acquisitions = lib.mkOption {
type = listOf settingsFormat.type;
default = [ ];
description = ''
Log acquisitions.
'';
};
extraGroups = lib.mkOption {
type = listOf str;
default = [ ];
description = ''
Additional groups to make the service part of.
Required to permit reading from various log sources.
'';
};
hubConfigurations = {
collections = lib.mkOption {
type = listOf str;
description = ''
List of pre-made crowdsec collections to install.
'';
};
scenarios = lib.mkOption {
type = listOf str;
description = ''
List of pre-made crowdsec scenarios to install.
'';
};
parsers = lib.mkOption {
type = listOf str;
description = ''
List of pre-made crowdsec parsers to install.
'';
};
postoverflows = lib.mkOption {
type = listOf str;
description = ''
List of pre-made crowdsec postoverflows to install.
'';
};
appsecConfigs = lib.mkOption {
type = listOf str;
description = ''
List of pre-made crowdsec appsec configurations to install.
'';
};
appsecRules = lib.mkOption {
type = listOf str;
description = ''
List of pre-made crowdsec appsec rules to install.
'';
};
};
centralApiCredentials = lib.mkOption {
type = nullOr path;
default = null;
description = ''
The API key to access crowdsec's central API - this is
required to access any of the shared blocklists.
Use of this feature is optional, entering no API key (the
default) turns all sharing or receiving of blocked IPs off.
Note that adding the API key by itself does not enable
sharing of blocked IPs with the central API. This limits the
types of blocklists this instance can access.
To also turn sharing blocked IPs on, set
`api.server.online_client.sharing = true;`.
'';
};
ctiApiKey = lib.mkOption {
type = nullOr path;
default = null;
description = ''
The API key for crowdsec's CTI offering.
'';
};
};
config = lib.mkIf cfg.enable {
# Set up default settings; anything that *shouldn't* be changed is
# set to the default priority so that users need to use
# `lib.mkForce`.
security.crowdsec = {
stateDirectory = "/var/lib/crowdsec";
settings = {
common = {
daemonize = true;
# The default logs to files, which isn't the preferred way
# on NixOS
log_media = "stdout";
};
config_paths = {
config_dir = "${cfg.stateDirectory}/config/";
data_dir = "${cfg.stateDirectory}/data/";
# This "config" file is intended to be written to using the
# cscli tool, so you can temporarily make it so rules don't
# do anything but log what they *would* do for
# experimentation.
simulation_path = "${cfg.stateDirectory}/config/simulation.yaml";
pattern_dir = lib.mkDefault "${cfg.package}/share/crowdsec/config/patterns";
hub_dir = hub;
index_path = "${hub}/.index.json";
# Integrations aren't supported for now
notification_dir = lib.mkDefault "/var/empty/";
plugin_dir = lib.mkDefault "/var/empty/";
};
crowdsec_service.acquisition_path =
# Using an if/else here because `mkMerge` does not work in
# YAML-type options
if cfg.acquisitions == [ ] then
"${cfg.package}/share/crowdsec/config/acquis.yaml"
else
pkgs.writeText "acquis.yaml" acquisitions;
cscli = {
prometheus_uri = lib.mkDefault "127.0.0.1:6060";
};
db_config = {
type = lib.mkDefault "sqlite";
db_path = lib.mkDefault "${cfg.stateDirectory}/data/crowdsec.db";
use_wal = lib.mkDefault true;
flush = {
max_items = lib.mkDefault 5000;
max_age = lib.mkDefault "7d";
};
};
api = {
cti = {
enabled = cfg.ctiApiKey != null;
key = cfg.ctiApiKey;
};
client.credentials_path = "${cfg.stateDirectory}/local_credentials.yaml";
server = {
listen_uri = lib.mkDefault "127.0.0.1:8080";
profiles_path = lib.mkDefault "${cfg.package}/share/crowdsec/config/profiles.yaml";
console_path = lib.mkDefault "${cfg.package}/share/crowdsec/config/console.yaml";
online_client = {
# By default, we don't let crowdsec phone home, since
# this is usually within NixOS users' concerns.
sharing = lib.mkDefault false;
credentials_path = cfg.centralApiCredentials;
};
};
};
# We enable prometheus by default, since cscli relies on it
# for metrics
prometheus = {
enabled = lib.mkDefault true;
level = lib.mkDefault "full";
listen_addr = lib.mkDefault "127.0.0.1";
listen_port = lib.mkDefault 6060;
};
};
};
systemd.packages = [ cfg.package ];
environment = {
systemPackages = [
# To add completions; sadly need to hand-roll this since
# neither `symlinkJoin` nor `buildEnv` have collision
# handling.
(pkgs.runCommandNoCCLocal "cscli" { } ''
mkdir -p $out
ln -s ${cscli}/bin $out/bin
ln -s ${cfg.package}/share $out/share
'')
];
etc."crowdsec/config.yaml".source = settingsFormat.generate "crowdsec-settings.yaml" cfg.settings;
};
systemd = {
tmpfiles.settings."10-crowdsec" = {
"${cfg.stateDirectory}".d = {
user = "crowdsec";
group = "crowdsec";
mode = "0700";
};
# This must be created for the setup service to work
"${cfg.stateDirectory}/config".d = {
user = "crowdsec";
group = "crowdsec";
mode = "0700";
};
"${cfg.stateDirectory}/config/parsers".d = lib.mkIf (cfg.parserWhitelist != [ ]) {
user = "crowdsec";
group = "crowdsec";
mode = "0700";
};
"${cfg.stateDirectory}/config/parsers/s02-enrich".d = lib.mkIf (cfg.parserWhitelist != [ ]) {
user = "crowdsec";
group = "crowdsec";
mode = "0700";
};
"${cfg.stateDirectory}/config/parsers/s02-enrich/nixos-whitelist.yaml" =
lib.mkIf (cfg.parserWhitelist != [ ])
{
"L+".argument =
(settingsFormat.generate "crowdsec-nixos-whitelist.yaml" {
name = "nixos/parser-whitelist";
description = "Parser whitelist generated by the crowdsec NixOS module";
whitelist = {
reason = "Filtered by NixOS whitelist";
ip = lib.lists.filter (ip: !(lib.hasInfix "/" ip)) cfg.parserWhitelist;
cidr = lib.lists.filter (ip: lib.hasInfix "/" ip) cfg.parserWhitelist;
};
}).outPath;
};
};
services = {
crowdsec-setup = {
# TODO(tlater): Depend on tmpfiles path for
# /var/lib/crowdsec/config
description = "Crowdsec database and config preparation";
script = ''
if [ ! -e '${cfg.settings.config_paths.simulation_path}' ]; then
cp '${cfg.package}/share/crowdsec/config/simulation.yaml' '${cfg.settings.config_paths.simulation_path}'
fi
if [ ! -e '${cfg.settings.api.client.credentials_path}' ]; then
${cfg.package}/bin/cscli machines add --auto --file '${cfg.settings.api.client.credentials_path}'
fi
'';
serviceConfig = {
User = "crowdsec";
Group = "crowdsec";
StateDirectory = "crowdsec";
Type = "oneshot";
RemainAfterExit = true;
};
};
# Note that the service basics are already defined upstream
crowdsec = {
enable = true;
after = [ "crowdsec-setup.service" ];
bindsTo = [ "crowdsec-setup.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
User = "crowdsec";
Group = "crowdsec";
SupplementaryGroups = cfg.extraGroups;
StateDirectory = "crowdsec";
};
};
};
};
users = {
users.crowdsec = {
isSystemUser = true;
home = cfg.stateDirectory;
group = "crowdsec";
};
groups = {
crowdsec = { };
};
};
};
}

View file

@ -0,0 +1,87 @@
{
flake-inputs,
pkgs,
lib,
config,
...
}:
let
inherit (flake-inputs.self.packages.${pkgs.system}) crowdsec-firewall-bouncer;
crowdsecCfg = config.security.crowdsec;
cfg = crowdsecCfg.remediationComponents.firewallBouncer;
settingsFormat = pkgs.formats.yaml { };
in
{
options.security.crowdsec.remediationComponents.firewallBouncer = {
enable = lib.mkEnableOption "cs-firewall-bouncer";
settings = lib.mkOption {
inherit (settingsFormat) type;
default = { };
description = ''
The bouncer configuration. Refer to
<https://docs.crowdsec.net/u/bouncers/firewall/> for details
on supported values.
'';
};
};
config = lib.mkIf cfg.enable {
security.crowdsec.remediationComponents.firewallBouncer.settings = {
mode = lib.mkDefault "${if config.networking.nftables.enable then "nftables" else "iptables"}";
log_mode = "stdout";
iptables_chains = [ "nixos-fw" ];
# Don't let users easily override this; unfortunately we need to
# set up this key through substitution at runtime.
api_key = lib.mkForce "\${API_KEY}";
api_url = lib.mkDefault "http://${crowdsecCfg.settings.api.server.listen_uri}";
};
systemd = {
packages = [ crowdsec-firewall-bouncer ];
services = {
crowdsec-firewall-bouncer-setup = {
description = "Crowdsec firewall bouncer config preparation";
script = ''
if [ ! -e '${crowdsecCfg.stateDirectory}/firewall_bouncer_credentials.yaml' ]; then
${crowdsecCfg.package}/bin/cscli -oraw bouncers add "cs-firewall-bouncer-$(${pkgs.coreutils}/bin/date +%s)" > \
${crowdsecCfg.stateDirectory}/firewall_bouncer_credentials.yaml
fi
# Stdout redirection is deliberately used to forcibly
# overwrite the file if it exists
API_KEY="$(<${crowdsecCfg.stateDirectory}/firewall_bouncer_credentials.yaml)" \
${lib.getExe pkgs.envsubst} \
-i ${settingsFormat.generate "crowdsec-firewall-bouncer.yaml" cfg.settings} \
> /var/lib/crowdsec/config/crowdsec-firewall-bouncer.yaml
'';
serviceConfig = {
User = "crowdsec";
Group = "crowdsec";
Type = "oneshot";
RemainAfterExit = true;
};
};
crowdsec-firewall-bouncer = {
enable = true;
after = [ "crowdsec-firewall-bouncer-setup.service" ];
bindsTo = [ "crowdsec-firewall-bouncer-setup.service" ];
requiredBy = [ "crowdsec.service" ];
path =
lib.optionals (cfg.settings.mode == "ipset" || cfg.settings.mode == "iptables") [ pkgs.ipset ]
++ lib.optional (cfg.settings.mode == "iptables") pkgs.iptables
++ lib.optional (cfg.settings.mode == "nftables") pkgs.nftables;
};
};
};
};
}

View file

@ -0,0 +1 @@
{ imports = [ ./cs-firewall-bouncer.nix ]; }

View file

@ -1 +1,6 @@
{ imports = [ ./serviceTests/stub.nix ]; }
{
imports = [
./crowdsec
./serviceTests/stub.nix
];
}

View file

@ -1,10 +1,8 @@
{
perSystem =
{ pkgs, ... }:
{
packages = pkgs.lib.packagesFromDirectoryRecursive {
inherit (pkgs) callPackage;
directory = ./packages;
};
};
{ pkgs, flake-inputs }:
let
inherit (flake-inputs.nixpkgs-unstable.legacyPackages.${pkgs.system}) ast-grep;
in
pkgs.lib.packagesFromDirectoryRecursive {
callPackage = pkgs.lib.callPackageWith (pkgs // { inherit ast-grep; });
directory = ./packages;
}

View file

@ -0,0 +1,51 @@
{
lib,
fetchFromGitHub,
buildGoModule,
envsubst,
coreutils,
writers,
nix-update,
}:
let
envsubstBin = lib.getExe envsubst;
in
buildGoModule (drv: {
pname = "crowdsec-firewall-bouncer";
version = drv.src.rev;
src = fetchFromGitHub {
owner = "crowdsecurity";
repo = "cs-firewall-bouncer";
rev = "0.0.34";
sha256 = "sha256-lDO9pwPkbI+FDTdXBv03c0p8wbkRUiIDNl1ip3AZo2g=";
};
vendorHash = "sha256-SbpclloBgd9vffC0lBduGRqPOqmzQ0J91/KeDHCh0jo=";
postInstall = ''
mkdir -p $out/lib/systemd/system
CFG=/var/lib/crowdsec/config BIN=$out/bin/cs-firewall-bouncer ${envsubstBin} \
-i ./config/crowdsec-firewall-bouncer.service \
-o $out/lib/systemd/system/crowdsec-firewall-bouncer.service
substituteInPlace $out/lib/systemd/system/crowdsec-firewall-bouncer.service \
--replace-fail /bin/sleep ${coreutils}/bin/sleep
'';
passthru.updateScript =
writers.writeNuBin "update-crowdsec-firewall-bouncer"
{
makeWrapperArgs = [
"--prefix"
"PATH"
":"
(lib.makeBinPath [ nix-update ])
];
}
''
nix-update --flake --format crowdsec-firewall-bouncer
'';
})

View file

@ -0,0 +1,38 @@
{
lib,
fetchFromGitHub,
stdenvNoCC,
writers,
nix-update,
}:
# Using `mkDerivation` so nix-update can pick up the version
stdenvNoCC.mkDerivation (drv: {
pname = "crowdsec-hub";
version = drv.src.rev;
src = fetchFromGitHub {
owner = "crowdsecurity";
repo = "hub";
rev = "b63d9e925cfdd70f818a6a136ea53d5c8ca96d9a";
hash = "sha256-FMArGnR/pI/QlzsznStp8vzs/LbXooVgLdoTw+eSbec=";
};
installPhase = ''
cp -r $src $out
'';
passthru.updateScript =
writers.writeNuBin "update-crowdsec-hub"
{
makeWrapperArgs = [
"--prefix"
"PATH"
":"
(lib.makeBinPath [ nix-update ])
];
}
''
nix-update --flake --format --version=branch crowdsec-hub
'';
})

File diff suppressed because it is too large Load diff

View file

@ -11,6 +11,7 @@ axum = { version = "0.8.7", features = ["macros"], optional = true }
console_error_panic_hook = { version = "0.1.7", optional = true }
figment = { version = "0.10.19", features = ["toml", "env"] }
leptos = "0.8.3"
leptos-use = "0.16.3"
leptos_axum = { version = "0.8.3", optional = true }
leptos_meta = "0.8.3"
leptos_router = "0.8.3"
@ -19,9 +20,9 @@ reqwest = "0.12.24"
serde = { version = "1.0.228", features = ["derive"] }
thiserror = "2.0.17"
tokio = { version = "1.48.0", features = ["rt-multi-thread"], optional = true }
url = { version = "2.5.7", features = ["serde"] }
url = "2.5.7"
wasm-bindgen = { version = "=0.2.100", optional = true }
web-sys = "^0.3.77"
web-sys = { version = "^0.3.77", features = ["AnalyserNode", "AudioContext", "AudioDestinationNode", "GainNode", "HtmlMediaElement", "MediaElementAudioSourceNode"] }
[features]
hydrate = [

View file

@ -237,11 +237,7 @@ rustPlatform.buildRustPackage (drv: {
buildPhase = ''
runHook preBuild
# dart-sass and wasm-pack want a home directory to put cache files
export HOME=$(mktemp -d)
cargo leptos build --release
runHook postBuild
'';
@ -292,7 +288,6 @@ rustPlatform.buildRustPackage (drv: {
(lib.makeBinPath [
ast-grep
nix-prefetch-github
cargo
])
];
} ./update.nu;

View file

@ -7,10 +7,12 @@ use leptos_router::{
mod homepage;
mod mail;
mod music_sample;
use crate::components::Navbar;
use homepage::HomePage;
use mail::Mail;
use music_sample::MusicSample;
pub fn shell(options: LeptosOptions) -> impl IntoView {
view! {
@ -49,6 +51,7 @@ pub fn App() -> impl IntoView {
<Routes fallback=|| "Page not found.".into_view()>
<Route path=StaticSegment("") view=HomePage />
<Route path=StaticSegment("mail") view=Mail />
<Route path=StaticSegment("music_sample") view=MusicSample />
</Routes>
</main>
</Router>

View file

@ -0,0 +1,75 @@
#![allow(dead_code, unused_variables)]
use leptos::{logging, prelude::*};
use leptos_meta::{Meta, Title};
use leptos_use::use_event_listener;
use ssr_safe::{MediaPlayer, MediaPlayerError};
mod ssr_safe;
#[component]
fn Controls() -> impl IntoView {
let player: LocalResource<Result<MediaPlayer, MediaPlayerError>> = expect_context();
Effect::new(move || {
let audio_element = if let Some(Ok(p)) = player.get() {
Some(p.audio_element())
} else {
None
};
use_event_listener(audio_element, ssr_safe::media_events::error, |ev| {
logging::error!("{:?}", ev);
});
});
view! {
<div class="notification">
<Suspense fallback=move || "Initializing audio player...">
<ErrorBoundary fallback=|errors| { "Failed to initialize audio player" }>
<div class="level is-mobile">
<div class="level-left">
// The play/pause/etc button
<div class="level-item">
<button class="button" type="button">
<span class="icon is-medium" />
</button>
</div>
</div>
// The title display
<div class="level-item">
{move || {
Ok::<_, MediaPlayerError>(player.get().transpose()?.map(|p| p.get_title()))
}}
</div>
// The artist display
<div class="level-right">
<div class="level-item">Artist</div>
</div>
</div>
</ErrorBoundary>
</Suspense>
</div>
}
}
#[component]
pub fn MusicSample() -> impl IntoView {
let player = LocalResource::new(MediaPlayer::new);
provide_context(player);
view! {
<Meta name="description" content="tlater.net music visualizer sample" />
<Title text="tlater.net music player" />
<section class="hero is-fullheight-with-navbar">
<div class="hero-body p-0">Body</div>
<div class="hero-foot">
<Controls />
</div>
</section>
}
}

View file

@ -0,0 +1,121 @@
use leptos::{ev::EventDescriptor, logging};
use leptos_use::use_event_listener;
use web_sys::EventTarget;
pub const DEFAULT_MP3: &str = "/Mseq_-_Journey.mp3a";
#[derive(Clone)]
pub struct MediaPlayer {
context: web_sys::AudioContext,
audio_element: web_sys::HtmlAudioElement,
}
impl MediaPlayer {
pub async fn new() -> Result<Self, MediaPlayerError> {
let context = web_sys::AudioContext::new()?;
let audio_element = web_sys::HtmlAudioElement::new_with_src(DEFAULT_MP3)?;
let source_node = context.create_media_element_source(&audio_element)?;
let gain_node = context.create_gain()?;
let analyser_node = context.create_analyser()?;
analyser_node.set_fft_size(2048);
analyser_node.set_smoothing_time_constant(0.8);
source_node.connect_with_audio_node(&analyser_node)?;
source_node.connect_with_audio_node(&gain_node)?;
gain_node.connect_with_audio_node(&context.destination())?;
Ok(Self {
context,
audio_element,
})
}
pub fn set_title(&self, title: &str) {
self.audio_element.set_src(title);
}
pub fn get_title(&self) -> String {
// Hardcoded for now, eventually I'll make this a proper
// player again...
"Journey".to_owned()
}
pub fn context(&self) -> EventTarget {
self.context.clone().into()
}
pub fn audio_element(&self) -> EventTarget {
self.audio_element.clone().into()
}
pub fn use_media_event<Ev, F>(&self, event: Ev, handler: F) -> impl Fn() + Clone + Send + Sync + use<Ev, F>
where
F: FnMut(<Ev as EventDescriptor>::EventType) + 'static,
Ev: EventDescriptor + 'static,
{
use_event_listener(self.audio_element.clone(), event, handler)
}
pub fn use_statechange<F>(&self, handler: F) -> impl Fn() + Clone + Send + Sync
where
F: FnMut(<media_events::statechange as EventDescriptor>::EventType) + 'static,
{
use_event_listener(self.context.clone(), media_events::statechange, handler)
}
}
#[derive(thiserror::Error, Debug, Clone)]
pub enum MediaPlayerError {
#[error("todo")]
Todo,
}
impl From<web_sys::wasm_bindgen::JsValue> for MediaPlayerError {
fn from(value: web_sys::wasm_bindgen::JsValue) -> Self {
logging::error!("Some kind of error");
Self::Todo {}
}
}
pub mod media_events {
use leptos::ev::EventDescriptor;
use std::borrow::Cow;
#[derive(Copy, Clone, Debug)]
#[allow(non_camel_case_types)]
pub struct error;
impl EventDescriptor for error {
type EventType = web_sys::Event;
const BUBBLES: bool = false;
#[inline(always)]
fn name(&self) -> Cow<'static, str> {
"error".into()
}
#[inline(always)]
fn event_delegation_key(&self) -> Cow<'static, str> {
"$$$error".into()
}
}
#[derive(Copy, Clone, Debug)]
#[allow(non_camel_case_types)]
pub struct statechange;
impl EventDescriptor for statechange {
type EventType = web_sys::Event;
const BUBBLES: bool = false;
#[inline(always)]
fn name(&self) -> Cow<'static, str> {
"statechange".into()
}
#[inline(always)]
fn event_delegation_key(&self) -> Cow<'static, str> {
"$$$statechange".into()
}
}
}

View file

@ -46,11 +46,8 @@ iv.$family-monospace: "Hack", iv.$family-monospace;
@forward "bulma/sass/grid/columns";
@forward "bulma/sass/helpers/typography";
@forward "bulma/sass/helpers/color";
@forward "bulma/sass/layout/container";
@forward "bulma/sass/layout/section";
@forward "bulma/sass/helpers";
@forward "bulma/sass/layout";
@forward "bulma/sass/components/navbar" with (
$navbar-burger-color: iv.$grey-light,

View file

@ -5,7 +5,7 @@ let packages_with_updatescript = (
| from json
| $in.packages.x86_64-linux
| columns
| where {|p| nix eval $'.#($p)' --apply 'builtins.hasAttr "updateScript"' | $in == 'true' }
| filter {|p| nix eval $'.#($p)' --apply 'builtins.hasAttr "updateScript"' | $in == 'true' }
)
for $package in $packages_with_updatescript {