treewide: Start using nixpkgs-fmt formatting
This commit is contained in:
parent
501c3466bc
commit
fd138d45e6
|
@ -1,10 +1,9 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
modulesPath,
|
||||
flake-inputs,
|
||||
...
|
||||
{ config
|
||||
, pkgs
|
||||
, lib
|
||||
, modulesPath
|
||||
, flake-inputs
|
||||
, ...
|
||||
}: {
|
||||
imports = [
|
||||
flake-inputs.disko.nixosModules.disko
|
||||
|
@ -47,15 +46,15 @@
|
|||
'';
|
||||
|
||||
# Enable remote builds from tlater
|
||||
settings.trusted-users = ["@wheel"];
|
||||
settings.trusted-users = [ "@wheel" ];
|
||||
};
|
||||
|
||||
nixpkgs.config.allowUnfreePredicate = pkg:
|
||||
builtins.elem (lib.getName pkg) ["steam-original" "steam-runtime" "steam-run" "steamcmd"];
|
||||
builtins.elem (lib.getName pkg) [ "steam-original" "steam-runtime" "steam-run" "steamcmd" ];
|
||||
|
||||
# Optimization for minecraft servers, see:
|
||||
# https://bugs.mojang.com/browse/MC-183518
|
||||
boot.kernelParams = ["highres=off" "nohz=off"];
|
||||
boot.kernelParams = [ "highres=off" "nohz=off" ];
|
||||
|
||||
networking = {
|
||||
usePredictableInterfaceNames = false;
|
||||
|
@ -106,15 +105,15 @@
|
|||
|
||||
users.users.tlater = {
|
||||
isNormalUser = true;
|
||||
extraGroups = ["wheel"];
|
||||
openssh.authorizedKeys.keyFiles = [../keys/tlater.pub];
|
||||
extraGroups = [ "wheel" ];
|
||||
openssh.authorizedKeys.keyFiles = [ ../keys/tlater.pub ];
|
||||
};
|
||||
|
||||
services = {
|
||||
openssh = {
|
||||
enable = true;
|
||||
allowSFTP = false;
|
||||
ports = [2222];
|
||||
ports = [ 2222 ];
|
||||
startWhenNeeded = true;
|
||||
|
||||
settings = {
|
||||
|
@ -133,14 +132,14 @@
|
|||
pam = {
|
||||
sshAgentAuth = {
|
||||
enable = true;
|
||||
authorizedKeysFiles = ["/etc/ssh/authorized_keys.d/%u"];
|
||||
authorizedKeysFiles = [ "/etc/ssh/authorized_keys.d/%u" ];
|
||||
};
|
||||
services.sudo.sshAgentAuth = true;
|
||||
};
|
||||
};
|
||||
|
||||
# Remove some unneeded packages
|
||||
environment.defaultPackages = [];
|
||||
environment.defaultPackages = [ ];
|
||||
|
||||
system.stateVersion = "20.09";
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
# disables it by default.
|
||||
#
|
||||
# TODO(tlater): See if would be useful for anything?
|
||||
boot.kernelParams = ["nosgx"];
|
||||
boot.kernelParams = [ "nosgx" ];
|
||||
|
||||
networking.hostName = "hetzner-1";
|
||||
services.nginx.domain = "tlater.net";
|
||||
|
|
|
@ -1,82 +1,84 @@
|
|||
{
|
||||
disko.devices.disk = let
|
||||
bootPartition = {
|
||||
size = "1M";
|
||||
type = "EF02";
|
||||
};
|
||||
|
||||
swapPartition = {
|
||||
# 8G is apparently recommended for this much RAM, but we set up
|
||||
# 4G on both disks for mirroring purposes.
|
||||
#
|
||||
# That'll still be 8G during normal operation, and it's probably
|
||||
# not too bad to have slightly less swap if a disk dies.
|
||||
size = "4G";
|
||||
content = {
|
||||
type = "swap";
|
||||
randomEncryption = true;
|
||||
disko.devices.disk =
|
||||
let
|
||||
bootPartition = {
|
||||
size = "1M";
|
||||
type = "EF02";
|
||||
};
|
||||
};
|
||||
|
||||
mountOptions = ["compress=zstd" "noatime"];
|
||||
in {
|
||||
sda = {
|
||||
type = "disk";
|
||||
device = "/dev/sda";
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = bootPartition;
|
||||
swap = swapPartition;
|
||||
swapPartition = {
|
||||
# 8G is apparently recommended for this much RAM, but we set up
|
||||
# 4G on both disks for mirroring purposes.
|
||||
#
|
||||
# That'll still be 8G during normal operation, and it's probably
|
||||
# not too bad to have slightly less swap if a disk dies.
|
||||
size = "4G";
|
||||
content = {
|
||||
type = "swap";
|
||||
randomEncryption = true;
|
||||
};
|
||||
};
|
||||
|
||||
disk1 = {
|
||||
size = "100%";
|
||||
# Empty partition to combine in RAID0 with the other disk
|
||||
mountOptions = [ "compress=zstd" "noatime" ];
|
||||
in
|
||||
{
|
||||
sda = {
|
||||
type = "disk";
|
||||
device = "/dev/sda";
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = bootPartition;
|
||||
swap = swapPartition;
|
||||
|
||||
disk1 = {
|
||||
size = "100%";
|
||||
# Empty partition to combine in RAID0 with the other disk
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
sdb = {
|
||||
type = "disk";
|
||||
device = "/dev/sdb";
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = bootPartition;
|
||||
swap = swapPartition;
|
||||
sdb = {
|
||||
type = "disk";
|
||||
device = "/dev/sdb";
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
boot = bootPartition;
|
||||
swap = swapPartition;
|
||||
|
||||
disk2 = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "btrfs";
|
||||
# Hack to get multi-device btrfs going
|
||||
# See https://github.com/nix-community/disko/issues/99
|
||||
extraArgs = ["-d" "raid1" "-m" "raid1" "--runtime-features" "quota" "/dev/sda3"];
|
||||
subvolumes = {
|
||||
"/volume" = {};
|
||||
"/volume/root" = {
|
||||
inherit mountOptions;
|
||||
mountpoint = "/";
|
||||
disk2 = {
|
||||
size = "100%";
|
||||
content = {
|
||||
type = "btrfs";
|
||||
# Hack to get multi-device btrfs going
|
||||
# See https://github.com/nix-community/disko/issues/99
|
||||
extraArgs = [ "-d" "raid1" "-m" "raid1" "--runtime-features" "quota" "/dev/sda3" ];
|
||||
subvolumes = {
|
||||
"/volume" = { };
|
||||
"/volume/root" = {
|
||||
inherit mountOptions;
|
||||
mountpoint = "/";
|
||||
};
|
||||
"/volume/home" = {
|
||||
inherit mountOptions;
|
||||
mountpoint = "/home";
|
||||
};
|
||||
"/volume/var" = {
|
||||
inherit mountOptions;
|
||||
mountpoint = "/var";
|
||||
};
|
||||
"/volume/nix-store" = {
|
||||
inherit mountOptions;
|
||||
mountpoint = "/nix";
|
||||
};
|
||||
"/snapshots" = { };
|
||||
};
|
||||
"/volume/home" = {
|
||||
inherit mountOptions;
|
||||
mountpoint = "/home";
|
||||
};
|
||||
"/volume/var" = {
|
||||
inherit mountOptions;
|
||||
mountpoint = "/var";
|
||||
};
|
||||
"/volume/nix-store" = {
|
||||
inherit mountOptions;
|
||||
mountpoint = "/nix";
|
||||
};
|
||||
"/snapshots" = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
{lib, ...}: {
|
||||
{ lib, ... }: {
|
||||
users.users.tlater.password = "insecure";
|
||||
|
||||
# Disable graphical tty so -curses works
|
||||
boot.kernelParams = ["nomodeset"];
|
||||
boot.kernelParams = [ "nomodeset" ];
|
||||
|
||||
networking.hostName = "testvm";
|
||||
# Sets the base domain for nginx to a local domain so that we can
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
{ config
|
||||
, lib
|
||||
, ...
|
||||
}: {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
@ -27,31 +26,33 @@
|
|||
# Override the default, just keep fewer logs
|
||||
nginx.rotate = 6;
|
||||
}
|
||||
// lib.mapAttrs' (virtualHost: _:
|
||||
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
|
||||
frequency = "daily";
|
||||
rotate = 2;
|
||||
compress = true;
|
||||
delaycompress = true;
|
||||
su = "${config.services.nginx.user} ${config.services.nginx.group}";
|
||||
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
|
||||
})
|
||||
config.services.nginx.virtualHosts;
|
||||
// lib.mapAttrs'
|
||||
(virtualHost: _:
|
||||
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
|
||||
frequency = "daily";
|
||||
rotate = 2;
|
||||
compress = true;
|
||||
delaycompress = true;
|
||||
su = "${config.services.nginx.user} ${config.services.nginx.group}";
|
||||
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
|
||||
})
|
||||
config.services.nginx.virtualHosts;
|
||||
|
||||
systemd.tmpfiles.rules =
|
||||
lib.mapAttrsToList (
|
||||
virtualHost: _:
|
||||
#
|
||||
"d /var/log/nginx/${virtualHost} 0750 ${config.services.nginx.user} ${config.services.nginx.group}"
|
||||
)
|
||||
config.services.nginx.virtualHosts;
|
||||
lib.mapAttrsToList
|
||||
(
|
||||
virtualHost: _:
|
||||
#
|
||||
"d /var/log/nginx/${virtualHost} 0750 ${config.services.nginx.user} ${config.services.nginx.group}"
|
||||
)
|
||||
config.services.nginx.virtualHosts;
|
||||
|
||||
security.acme = {
|
||||
defaults.email = "tm@tlater.net";
|
||||
acceptTerms = true;
|
||||
|
||||
certs."tlater.net" = {
|
||||
extraDomainNames = ["*.tlater.net"];
|
||||
extraDomainNames = [ "*.tlater.net" ];
|
||||
dnsProvider = "hetzner";
|
||||
group = "nginx";
|
||||
credentialFiles."HETZNER_API_KEY_FILE" = config.sops.secrets."hetzner-api".path;
|
||||
|
@ -62,6 +63,6 @@
|
|||
user = "acme";
|
||||
paths =
|
||||
lib.mapAttrsToList (virtualHost: _: "/var/lib/acme/${virtualHost}")
|
||||
config.services.nginx.virtualHosts;
|
||||
config.services.nginx.virtualHosts;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
{ pkgs
|
||||
, config
|
||||
, ...
|
||||
}: {
|
||||
systemd.services.afvalcalendar = {
|
||||
description = "Enschede afvalcalendar -> ical converter";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
|
||||
script = ''
|
||||
${pkgs.local.afvalcalendar}/bin/afvalcalendar > /srv/afvalcalendar/afvalcalendar.ical
|
||||
|
@ -26,14 +25,14 @@
|
|||
ProtectKernelModules = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectControlGroups = true;
|
||||
RestrictAddressFamilies = ["AF_UNIX" "AF_INET" "AF_INET6"];
|
||||
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
|
||||
RestrictNamespaces = true;
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
SystemCallArchitectures = "native";
|
||||
SystemCallFilter = ["@system-service" "~@privileged @resources @setuid @keyring"];
|
||||
SystemCallFilter = [ "@system-service" "~@privileged @resources @setuid @keyring" ];
|
||||
|
||||
Umask = 0002;
|
||||
SupplementaryGroups = "afvalcalendar-hosting";
|
||||
|
@ -50,7 +49,7 @@
|
|||
root = "/srv/afvalcalendar";
|
||||
};
|
||||
|
||||
users.groups.afvalcalendar-hosting = {};
|
||||
users.groups.afvalcalendar-hosting = { };
|
||||
systemd.tmpfiles.settings."10-afvalcalendar" = {
|
||||
"/srv/afvalcalendar".d = {
|
||||
user = "nginx";
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
{ config
|
||||
, pkgs
|
||||
, lib
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
inherit (lib) types optional singleton;
|
||||
mkShutdownScript = service:
|
||||
pkgs.writeShellScript "backup-${service}-shutdown" ''
|
||||
|
@ -42,17 +42,17 @@
|
|||
RESTIC_REPOSITORY = "rclone:storagebox:backups";
|
||||
RCLONE_CONFIG = rcloneConfig;
|
||||
};
|
||||
in {
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.backups = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Configure restic backups with a specific tag.
|
||||
'';
|
||||
type = types.attrsOf (types.submodule ({
|
||||
config,
|
||||
name,
|
||||
...
|
||||
}: {
|
||||
type = types.attrsOf (types.submodule ({ config
|
||||
, name
|
||||
, ...
|
||||
}: {
|
||||
options = {
|
||||
user = lib.mkOption {
|
||||
type = types.str;
|
||||
|
@ -76,7 +76,7 @@ in {
|
|||
preparation = {
|
||||
packages = lib.mkOption {
|
||||
type = types.listOf types.package;
|
||||
default = [];
|
||||
default = [ ];
|
||||
description = ''
|
||||
The list of packages to make available in the
|
||||
preparation script.
|
||||
|
@ -97,7 +97,7 @@ in {
|
|||
cleanup = {
|
||||
packages = lib.mkOption {
|
||||
type = types.listOf types.package;
|
||||
default = [];
|
||||
default = [ ];
|
||||
description = ''
|
||||
The list of packages to make available in the
|
||||
cleanup script.
|
||||
|
@ -116,7 +116,7 @@ in {
|
|||
};
|
||||
pauseServices = lib.mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
default = [ ];
|
||||
description = ''
|
||||
The systemd services that need to be shut down before
|
||||
the backup can run. Services will be restarted after the
|
||||
|
@ -131,7 +131,7 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf (config.services.backups != {}) {
|
||||
config = lib.mkIf (config.services.backups != { }) {
|
||||
systemd.services =
|
||||
{
|
||||
restic-prune = {
|
||||
|
@ -164,79 +164,81 @@ in {
|
|||
};
|
||||
};
|
||||
}
|
||||
// lib.mapAttrs' (name: backup:
|
||||
lib.nameValuePair "backup-${name}" {
|
||||
# Don't want to restart mid-backup
|
||||
restartIfChanged = false;
|
||||
// lib.mapAttrs'
|
||||
(name: backup:
|
||||
lib.nameValuePair "backup-${name}" {
|
||||
# Don't want to restart mid-backup
|
||||
restartIfChanged = false;
|
||||
|
||||
environment =
|
||||
resticEnv
|
||||
// {
|
||||
RESTIC_CACHE_DIR = "%C/backup-${name}";
|
||||
};
|
||||
environment =
|
||||
resticEnv
|
||||
// {
|
||||
RESTIC_CACHE_DIR = "%C/backup-${name}";
|
||||
};
|
||||
|
||||
path = with pkgs; [
|
||||
coreutils
|
||||
openssh
|
||||
rclone
|
||||
restic
|
||||
];
|
||||
|
||||
# TODO(tlater): If I ever add more than one repo, service
|
||||
# shutdown/restarting will potentially break if multiple
|
||||
# backups for the same service overlap. A more clever
|
||||
# sentinel file with reference counts would probably solve
|
||||
# this.
|
||||
serviceConfig = {
|
||||
User = backup.user;
|
||||
Group = "backup";
|
||||
RuntimeDirectory = "backup-${name}";
|
||||
CacheDirectory = "backup-${name}";
|
||||
CacheDirectoryMode = "0700";
|
||||
PrivateTmp = true;
|
||||
|
||||
ExecStart = [
|
||||
(lib.concatStringsSep " " (["${pkgs.restic}/bin/restic" "backup" "--tag" name] ++ backup.paths))
|
||||
path = with pkgs; [
|
||||
coreutils
|
||||
openssh
|
||||
rclone
|
||||
restic
|
||||
];
|
||||
|
||||
ExecStartPre =
|
||||
map (service: "+${mkShutdownScript service}") backup.pauseServices
|
||||
++ singleton (writeScript "backup-${name}-repo-init" [] ''
|
||||
restic snapshots || restic init
|
||||
'')
|
||||
++ optional (backup.preparation.text != null)
|
||||
(writeScript "backup-${name}-prepare" backup.preparation.packages backup.preparation.text);
|
||||
# TODO(tlater): If I ever add more than one repo, service
|
||||
# shutdown/restarting will potentially break if multiple
|
||||
# backups for the same service overlap. A more clever
|
||||
# sentinel file with reference counts would probably solve
|
||||
# this.
|
||||
serviceConfig = {
|
||||
User = backup.user;
|
||||
Group = "backup";
|
||||
RuntimeDirectory = "backup-${name}";
|
||||
CacheDirectory = "backup-${name}";
|
||||
CacheDirectoryMode = "0700";
|
||||
PrivateTmp = true;
|
||||
|
||||
# TODO(tlater): Add repo pruning/checking
|
||||
ExecStopPost =
|
||||
map (service: "+${mkRestartScript service}") backup.pauseServices
|
||||
++ optional (backup.cleanup.text != null)
|
||||
(writeScript "backup-${name}-cleanup" backup.cleanup.packages backup.cleanup.text);
|
||||
};
|
||||
})
|
||||
config.services.backups;
|
||||
ExecStart = [
|
||||
(lib.concatStringsSep " " ([ "${pkgs.restic}/bin/restic" "backup" "--tag" name ] ++ backup.paths))
|
||||
];
|
||||
|
||||
ExecStartPre =
|
||||
map (service: "+${mkShutdownScript service}") backup.pauseServices
|
||||
++ singleton (writeScript "backup-${name}-repo-init" [ ] ''
|
||||
restic snapshots || restic init
|
||||
'')
|
||||
++ optional (backup.preparation.text != null)
|
||||
(writeScript "backup-${name}-prepare" backup.preparation.packages backup.preparation.text);
|
||||
|
||||
# TODO(tlater): Add repo pruning/checking
|
||||
ExecStopPost =
|
||||
map (service: "+${mkRestartScript service}") backup.pauseServices
|
||||
++ optional (backup.cleanup.text != null)
|
||||
(writeScript "backup-${name}-cleanup" backup.cleanup.packages backup.cleanup.text);
|
||||
};
|
||||
})
|
||||
config.services.backups;
|
||||
|
||||
systemd.timers =
|
||||
{
|
||||
restic-prune = {
|
||||
wantedBy = ["timers.target"];
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig.OnCalendar = "Thursday 03:00:00 UTC";
|
||||
# Don't make this persistent, in case the server was offline
|
||||
# for a while. This job cannot run at the same time as any
|
||||
# of the backup jobs.
|
||||
};
|
||||
}
|
||||
// lib.mapAttrs' (name: backup:
|
||||
lib.nameValuePair "backup-${name}" {
|
||||
wantedBy = ["timers.target"];
|
||||
timerConfig = {
|
||||
OnCalendar = "Wednesday 02:30:00 UTC";
|
||||
RandomizedDelaySec = "1h";
|
||||
FixedRandomDelay = true;
|
||||
Persistent = true;
|
||||
};
|
||||
})
|
||||
config.services.backups;
|
||||
// lib.mapAttrs'
|
||||
(name: backup:
|
||||
lib.nameValuePair "backup-${name}" {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "Wednesday 02:30:00 UTC";
|
||||
RandomizedDelaySec = "1h";
|
||||
FixedRandomDelay = true;
|
||||
Persistent = true;
|
||||
};
|
||||
})
|
||||
config.services.backups;
|
||||
|
||||
users = {
|
||||
# This user is only used to own the ssh key, because apparently
|
||||
|
@ -245,7 +247,7 @@ in {
|
|||
group = "backup";
|
||||
isSystemUser = true;
|
||||
};
|
||||
groups.backup = {};
|
||||
groups.backup = { };
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
{
|
||||
config,
|
||||
flake-inputs,
|
||||
...
|
||||
{ config
|
||||
, flake-inputs
|
||||
, ...
|
||||
}: {
|
||||
imports = [
|
||||
flake-inputs.sonnenshift.nixosModules.default
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
{ pkgs
|
||||
, config
|
||||
, lib
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
inherit (lib.strings) concatMapStringsSep;
|
||||
|
||||
cfg = config.services.matrix-conduit;
|
||||
domain = "matrix.${config.services.nginx.domain}";
|
||||
turn-realm = "turn.${config.services.nginx.domain}";
|
||||
in {
|
||||
in
|
||||
{
|
||||
services.matrix-conduit = {
|
||||
enable = true;
|
||||
settings.global = {
|
||||
|
@ -17,99 +18,103 @@ in {
|
|||
server_name = domain;
|
||||
database_backend = "rocksdb";
|
||||
|
||||
turn_uris = let
|
||||
address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
|
||||
tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
|
||||
in [
|
||||
"turn:${address}?transport=udp"
|
||||
"turn:${address}?transport=tcp"
|
||||
"turns:${tls-address}?transport=udp"
|
||||
"turns:${tls-address}?transport=tcp"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.heisenbridge = let
|
||||
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
||||
registrationFile = builtins.toFile "heisenbridge-registration.yaml" (builtins.toJSON {
|
||||
id = "heisenbridge";
|
||||
url = "http://127.0.0.1:9898";
|
||||
as_token = "@AS_TOKEN@";
|
||||
hs_token = "@HS_TOKEN@";
|
||||
rate_limited = false;
|
||||
sender_localpart = "heisenbridge";
|
||||
namespaces = {
|
||||
users = [
|
||||
{
|
||||
regex = "@irc_.*";
|
||||
exclusive = true;
|
||||
}
|
||||
{
|
||||
regex = "@heisenbridge:.*";
|
||||
exclusive = true;
|
||||
}
|
||||
turn_uris =
|
||||
let
|
||||
address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
|
||||
tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
|
||||
in
|
||||
[
|
||||
"turn:${address}?transport=udp"
|
||||
"turn:${address}?transport=tcp"
|
||||
"turns:${tls-address}?transport=udp"
|
||||
"turns:${tls-address}?transport=tcp"
|
||||
];
|
||||
aliases = [];
|
||||
rooms = [];
|
||||
};
|
||||
});
|
||||
|
||||
# TODO(tlater): Starting with systemd 253 it will become possible
|
||||
# to do the credential setup as part of ExecStartPre/preStart
|
||||
# instead.
|
||||
#
|
||||
# This will also make it possible to actually set caps on the
|
||||
# heisenbridge process using systemd, so that we can run the
|
||||
# identd process.
|
||||
execScript = pkgs.writeShellScript "heisenbridge" ''
|
||||
cp ${registrationFile} "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
||||
chmod 600 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
|
||||
${replaceSecretBin} '@AS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_as-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
||||
${replaceSecretBin} '@HS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_hs-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
||||
chmod 400 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
|
||||
|
||||
${pkgs.heisenbridge}/bin/heisenbridge \
|
||||
--config $RUNTIME_DIRECTORY/heisenbridge-registration.yaml \
|
||||
--owner @tlater:matrix.tlater.net \
|
||||
'http://localhost:${toString cfg.settings.global.port}'
|
||||
'';
|
||||
in {
|
||||
description = "Matrix<->IRC bridge";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["conduit.service"];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
|
||||
LoadCredential = "heisenbridge:/run/secrets/heisenbridge";
|
||||
|
||||
ExecStart = execScript;
|
||||
|
||||
DynamicUser = true;
|
||||
RuntimeDirectory = "heisenbridge";
|
||||
RuntimeDirectoryMode = "0700";
|
||||
|
||||
RestrictNamespaces = true;
|
||||
PrivateUsers = true;
|
||||
ProtectHostname = true;
|
||||
ProtectClock = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectControlGroups = true;
|
||||
RestrictAddressFamilies = ["AF_INET AF_INET6"];
|
||||
LockPersonality = true;
|
||||
RestrictRealtime = true;
|
||||
ProtectProc = "invisible";
|
||||
ProcSubset = "pid";
|
||||
UMask = 0077;
|
||||
|
||||
# For the identd port
|
||||
# CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"];
|
||||
# AmbientCapabilities = ["CAP_NET_BIND_SERVICE"];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.heisenbridge =
|
||||
let
|
||||
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
||||
registrationFile = builtins.toFile "heisenbridge-registration.yaml" (builtins.toJSON {
|
||||
id = "heisenbridge";
|
||||
url = "http://127.0.0.1:9898";
|
||||
as_token = "@AS_TOKEN@";
|
||||
hs_token = "@HS_TOKEN@";
|
||||
rate_limited = false;
|
||||
sender_localpart = "heisenbridge";
|
||||
namespaces = {
|
||||
users = [
|
||||
{
|
||||
regex = "@irc_.*";
|
||||
exclusive = true;
|
||||
}
|
||||
{
|
||||
regex = "@heisenbridge:.*";
|
||||
exclusive = true;
|
||||
}
|
||||
];
|
||||
aliases = [ ];
|
||||
rooms = [ ];
|
||||
};
|
||||
});
|
||||
|
||||
# TODO(tlater): Starting with systemd 253 it will become possible
|
||||
# to do the credential setup as part of ExecStartPre/preStart
|
||||
# instead.
|
||||
#
|
||||
# This will also make it possible to actually set caps on the
|
||||
# heisenbridge process using systemd, so that we can run the
|
||||
# identd process.
|
||||
execScript = pkgs.writeShellScript "heisenbridge" ''
|
||||
cp ${registrationFile} "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
||||
chmod 600 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
|
||||
${replaceSecretBin} '@AS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_as-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
||||
${replaceSecretBin} '@HS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_hs-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
||||
chmod 400 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
|
||||
|
||||
${pkgs.heisenbridge}/bin/heisenbridge \
|
||||
--config $RUNTIME_DIRECTORY/heisenbridge-registration.yaml \
|
||||
--owner @tlater:matrix.tlater.net \
|
||||
'http://localhost:${toString cfg.settings.global.port}'
|
||||
'';
|
||||
in
|
||||
{
|
||||
description = "Matrix<->IRC bridge";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "conduit.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
|
||||
LoadCredential = "heisenbridge:/run/secrets/heisenbridge";
|
||||
|
||||
ExecStart = execScript;
|
||||
|
||||
DynamicUser = true;
|
||||
RuntimeDirectory = "heisenbridge";
|
||||
RuntimeDirectoryMode = "0700";
|
||||
|
||||
RestrictNamespaces = true;
|
||||
PrivateUsers = true;
|
||||
ProtectHostname = true;
|
||||
ProtectClock = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectControlGroups = true;
|
||||
RestrictAddressFamilies = [ "AF_INET AF_INET6" ];
|
||||
LockPersonality = true;
|
||||
RestrictRealtime = true;
|
||||
ProtectProc = "invisible";
|
||||
ProcSubset = "pid";
|
||||
UMask = 0077;
|
||||
|
||||
# For the identd port
|
||||
# CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"];
|
||||
# AmbientCapabilities = ["CAP_NET_BIND_SERVICE"];
|
||||
};
|
||||
};
|
||||
|
||||
# Pass in the TURN secret via EnvironmentFile, not supported by
|
||||
# upstream module currently.
|
||||
#
|
||||
|
@ -249,6 +254,6 @@ in {
|
|||
];
|
||||
# Other services store their data in conduit, so no other services
|
||||
# need to be shut down currently.
|
||||
pauseServices = ["conduit.service"];
|
||||
pauseServices = [ "conduit.service" ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{pkgs, ...}: {
|
||||
{ pkgs, ... }: {
|
||||
services.fail2ban = {
|
||||
enable = true;
|
||||
extraPackages = [pkgs.ipset];
|
||||
extraPackages = [ pkgs.ipset ];
|
||||
banaction = "iptables-ipset-proto6-allports";
|
||||
bantime-increment.enable = true;
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
|||
};
|
||||
|
||||
# Allow metrics services to connect to the socket as well
|
||||
users.groups.fail2ban = {};
|
||||
users.groups.fail2ban = { };
|
||||
systemd.services.fail2ban.serviceConfig = {
|
||||
ExecStartPost =
|
||||
"+"
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
flake-inputs,
|
||||
...
|
||||
}: let
|
||||
{ lib
|
||||
, config
|
||||
, flake-inputs
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
domain = "foundryvtt.${config.services.nginx.domain}";
|
||||
in {
|
||||
imports = [flake-inputs.foundryvtt.nixosModules.foundryvtt];
|
||||
in
|
||||
{
|
||||
imports = [ flake-inputs.foundryvtt.nixosModules.foundryvtt ];
|
||||
|
||||
services.foundryvtt = {
|
||||
enable = true;
|
||||
|
@ -18,26 +19,28 @@ in {
|
|||
|
||||
# Want to start it manually when I need it, not have it constantly
|
||||
# running
|
||||
systemd.services.foundryvtt.wantedBy = lib.mkForce [];
|
||||
systemd.services.foundryvtt.wantedBy = lib.mkForce [ ];
|
||||
|
||||
services.nginx.virtualHosts."${domain}" = let
|
||||
inherit (config.services.foundryvtt) port;
|
||||
in {
|
||||
forceSSL = true;
|
||||
useACMEHost = "tlater.net";
|
||||
enableHSTS = true;
|
||||
services.nginx.virtualHosts."${domain}" =
|
||||
let
|
||||
inherit (config.services.foundryvtt) port;
|
||||
in
|
||||
{
|
||||
forceSSL = true;
|
||||
useACMEHost = "tlater.net";
|
||||
enableHSTS = true;
|
||||
|
||||
locations."/" = {
|
||||
proxyWebsockets = true;
|
||||
proxyPass = "http://localhost:${toString port}";
|
||||
locations."/" = {
|
||||
proxyWebsockets = true;
|
||||
proxyPass = "http://localhost:${toString port}";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.backups.foundryvtt = {
|
||||
user = "foundryvtt";
|
||||
paths = [
|
||||
config.services.foundryvtt.dataDir
|
||||
];
|
||||
pauseServices = ["foundryvtt.service"];
|
||||
pauseServices = [ "foundryvtt.service" ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
{ pkgs
|
||||
, config
|
||||
, lib
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
domain = "gitea.${config.services.nginx.domain}";
|
||||
in {
|
||||
in
|
||||
{
|
||||
services.forgejo = {
|
||||
enable = true;
|
||||
database.type = "postgres";
|
||||
|
@ -27,33 +28,37 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
systemd.services.forgejo.serviceConfig.ExecStartPre = let
|
||||
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
||||
secretPath = config.sops.secrets."forgejo/metrics-token".path;
|
||||
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
|
||||
in [
|
||||
"+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'"
|
||||
];
|
||||
systemd.services.forgejo.serviceConfig.ExecStartPre =
|
||||
let
|
||||
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
||||
secretPath = config.sops.secrets."forgejo/metrics-token".path;
|
||||
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
|
||||
in
|
||||
[
|
||||
"+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'"
|
||||
];
|
||||
|
||||
# Set up SSL
|
||||
services.nginx.virtualHosts."${domain}" = let
|
||||
httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
|
||||
httpPort = config.services.forgejo.settings.server.HTTP_PORT;
|
||||
in {
|
||||
forceSSL = true;
|
||||
useACMEHost = "tlater.net";
|
||||
enableHSTS = true;
|
||||
services.nginx.virtualHosts."${domain}" =
|
||||
let
|
||||
httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
|
||||
httpPort = config.services.forgejo.settings.server.HTTP_PORT;
|
||||
in
|
||||
{
|
||||
forceSSL = true;
|
||||
useACMEHost = "tlater.net";
|
||||
enableHSTS = true;
|
||||
|
||||
locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
|
||||
locations."/metrics" = {
|
||||
extraConfig = ''
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
|
||||
deny all;
|
||||
'';
|
||||
locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
|
||||
locations."/metrics" = {
|
||||
extraConfig = ''
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
|
||||
deny all;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Block repeated failed login attempts
|
||||
#
|
||||
|
@ -83,13 +88,13 @@ in {
|
|||
# Conf is backed up via nix
|
||||
];
|
||||
preparation = {
|
||||
packages = [config.services.postgresql.package];
|
||||
packages = [ config.services.postgresql.package ];
|
||||
text = "pg_dump ${config.services.forgejo.database.name} --file=/var/lib/forgejo/forgejo-db.sql";
|
||||
};
|
||||
cleanup = {
|
||||
packages = [pkgs.coreutils];
|
||||
packages = [ pkgs.coreutils ];
|
||||
text = "rm /var/lib/forgejo/forgejo-db.sql";
|
||||
};
|
||||
pauseServices = ["forgejo.service"];
|
||||
pauseServices = [ "forgejo.service" ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,25 +1,28 @@
|
|||
{ config
|
||||
, pkgs
|
||||
, lib
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
yaml = pkgs.formats.yaml { };
|
||||
in
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
yaml = pkgs.formats.yaml {};
|
||||
in {
|
||||
services.prometheus = {
|
||||
exporters = {
|
||||
# Periodically check domain registration status
|
||||
domain = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
extraFlags = let
|
||||
conf.domains = [
|
||||
"tlater.net"
|
||||
"tlater.com"
|
||||
extraFlags =
|
||||
let
|
||||
conf.domains = [
|
||||
"tlater.net"
|
||||
"tlater.com"
|
||||
];
|
||||
in
|
||||
[
|
||||
"--config=${yaml.generate "domains.yml" conf}"
|
||||
];
|
||||
in [
|
||||
"--config=${yaml.generate "domains.yml" conf}"
|
||||
];
|
||||
};
|
||||
|
||||
# System statistics
|
||||
|
@ -49,47 +52,50 @@ in {
|
|||
group = "nginx";
|
||||
|
||||
settings.namespaces =
|
||||
lib.mapAttrsToList (name: virtualHost: {
|
||||
inherit name;
|
||||
metrics_override.prefix = "nginxlog";
|
||||
namespace_label = "vhost";
|
||||
lib.mapAttrsToList
|
||||
(name: virtualHost: {
|
||||
inherit name;
|
||||
metrics_override.prefix = "nginxlog";
|
||||
namespace_label = "vhost";
|
||||
|
||||
format = lib.concatStringsSep " " [
|
||||
"$remote_addr - $remote_user [$time_local]"
|
||||
''"$request" $status $body_bytes_sent''
|
||||
''"$http_referer" "$http_user_agent"''
|
||||
''rt=$request_time uct="$upstream_connect_time"''
|
||||
''uht="$upstream_header_time" urt="$upstream_response_time"''
|
||||
];
|
||||
format = lib.concatStringsSep " " [
|
||||
"$remote_addr - $remote_user [$time_local]"
|
||||
''"$request" $status $body_bytes_sent''
|
||||
''"$http_referer" "$http_user_agent"''
|
||||
''rt=$request_time uct="$upstream_connect_time"''
|
||||
''uht="$upstream_header_time" urt="$upstream_response_time"''
|
||||
];
|
||||
|
||||
source.files = [
|
||||
"/var/log/nginx/${name}/access.log"
|
||||
];
|
||||
})
|
||||
config.services.nginx.virtualHosts;
|
||||
source.files = [
|
||||
"/var/log/nginx/${name}/access.log"
|
||||
];
|
||||
})
|
||||
config.services.nginx.virtualHosts;
|
||||
};
|
||||
};
|
||||
|
||||
extraExporters = {
|
||||
fail2ban = let
|
||||
cfg = config.services.prometheus.extraExporters.fail2ban;
|
||||
in {
|
||||
port = 9191;
|
||||
serviceOpts = {
|
||||
after = ["fail2ban.service"];
|
||||
requires = ["fail2ban.service"];
|
||||
serviceConfig = {
|
||||
Group = "fail2ban";
|
||||
RestrictAddressFamilies = ["AF_UNIX" "AF_INET" "AF_INET6"];
|
||||
ExecStart = lib.concatStringsSep " " [
|
||||
"${pkgs.local.prometheus-fail2ban-exporter}/bin/fail2ban-prometheus-exporter"
|
||||
"--collector.f2b.socket=/var/run/fail2ban/fail2ban.sock"
|
||||
"--web.listen-address='${cfg.listenAddress}:${toString cfg.port}'"
|
||||
"--collector.f2b.exit-on-socket-connection-error=true"
|
||||
];
|
||||
fail2ban =
|
||||
let
|
||||
cfg = config.services.prometheus.extraExporters.fail2ban;
|
||||
in
|
||||
{
|
||||
port = 9191;
|
||||
serviceOpts = {
|
||||
after = [ "fail2ban.service" ];
|
||||
requires = [ "fail2ban.service" ];
|
||||
serviceConfig = {
|
||||
Group = "fail2ban";
|
||||
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
|
||||
ExecStart = lib.concatStringsSep " " [
|
||||
"${pkgs.local.prometheus-fail2ban-exporter}/bin/fail2ban-prometheus-exporter"
|
||||
"--collector.f2b.socket=/var/run/fail2ban/fail2ban.sock"
|
||||
"--web.listen-address='${cfg.listenAddress}:${toString cfg.port}'"
|
||||
"--collector.f2b.exit-on-socket-connection-error=true"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# TODO(tlater):
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
{config, ...}: let
|
||||
{ config, ... }:
|
||||
let
|
||||
domain = "metrics.${config.services.nginx.domain}";
|
||||
in {
|
||||
in
|
||||
{
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
settings = {
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
{
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
{ pkgs
|
||||
, config
|
||||
, lib
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
inherit (lib) types mkOption mkDefault;
|
||||
yaml = pkgs.formats.yaml {};
|
||||
in {
|
||||
yaml = pkgs.formats.yaml { };
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.prometheus = {
|
||||
extraExporters = mkOption {
|
||||
|
@ -31,11 +32,10 @@ in {
|
|||
};
|
||||
|
||||
services.victoriametrics.scrapeConfigs = mkOption {
|
||||
type = types.attrsOf (types.submodule ({
|
||||
name,
|
||||
self,
|
||||
...
|
||||
}: {
|
||||
type = types.attrsOf (types.submodule ({ name
|
||||
, self
|
||||
, ...
|
||||
}: {
|
||||
options = {
|
||||
job_name = mkOption {
|
||||
type = types.str;
|
||||
|
@ -47,7 +47,7 @@ in {
|
|||
description = ''
|
||||
Other settings to set for this scrape config.
|
||||
'';
|
||||
default = {};
|
||||
default = { };
|
||||
};
|
||||
|
||||
targets = mkOption {
|
||||
|
@ -57,11 +57,11 @@ in {
|
|||
|
||||
Shortcut for `static_configs = lib.singleton {targets = [<targets>];}`
|
||||
'';
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
static_configs = mkOption {
|
||||
default = [];
|
||||
default = [ ];
|
||||
type = types.listOf (types.submodule {
|
||||
options = {
|
||||
targets = mkOption {
|
||||
|
@ -77,7 +77,7 @@ in {
|
|||
description = lib.mdDoc ''
|
||||
Labels to apply to all targets defined for this static config.
|
||||
'';
|
||||
default = {};
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
});
|
||||
|
@ -89,116 +89,125 @@ in {
|
|||
|
||||
config = {
|
||||
systemd.services = lib.mkMerge [
|
||||
(lib.mapAttrs' (name: exporter:
|
||||
lib.nameValuePair "prometheus-${name}-exporter" (lib.mkMerge [
|
||||
{
|
||||
# Shamelessly copied from upstream because the upstream
|
||||
# module is an intractable mess
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target"];
|
||||
serviceConfig.Restart = mkDefault "always";
|
||||
serviceConfig.PrivateTmp = mkDefault true;
|
||||
serviceConfig.WorkingDirectory = mkDefault /tmp;
|
||||
serviceConfig.DynamicUser = mkDefault true;
|
||||
# Hardening
|
||||
serviceConfig.CapabilityBoundingSet = mkDefault [""];
|
||||
serviceConfig.DeviceAllow = [""];
|
||||
serviceConfig.LockPersonality = true;
|
||||
serviceConfig.MemoryDenyWriteExecute = true;
|
||||
serviceConfig.NoNewPrivileges = true;
|
||||
serviceConfig.PrivateDevices = mkDefault true;
|
||||
serviceConfig.ProtectClock = mkDefault true;
|
||||
serviceConfig.ProtectControlGroups = true;
|
||||
serviceConfig.ProtectHome = true;
|
||||
serviceConfig.ProtectHostname = true;
|
||||