Use nixpkgs-fmt for formatting #114
|
@ -1,10 +1,9 @@
|
||||||
{
|
{ config
|
||||||
config,
|
, pkgs
|
||||||
pkgs,
|
, lib
|
||||||
lib,
|
, modulesPath
|
||||||
modulesPath,
|
, flake-inputs
|
||||||
flake-inputs,
|
, ...
|
||||||
...
|
|
||||||
}: {
|
}: {
|
||||||
imports = [
|
imports = [
|
||||||
flake-inputs.disko.nixosModules.disko
|
flake-inputs.disko.nixosModules.disko
|
||||||
|
@ -47,15 +46,15 @@
|
||||||
'';
|
'';
|
||||||
|
|
||||||
# Enable remote builds from tlater
|
# Enable remote builds from tlater
|
||||||
settings.trusted-users = ["@wheel"];
|
settings.trusted-users = [ "@wheel" ];
|
||||||
};
|
};
|
||||||
|
|
||||||
nixpkgs.config.allowUnfreePredicate = pkg:
|
nixpkgs.config.allowUnfreePredicate = pkg:
|
||||||
builtins.elem (lib.getName pkg) ["steam-original" "steam-runtime" "steam-run" "steamcmd"];
|
builtins.elem (lib.getName pkg) [ "steam-original" "steam-runtime" "steam-run" "steamcmd" ];
|
||||||
|
|
||||||
# Optimization for minecraft servers, see:
|
# Optimization for minecraft servers, see:
|
||||||
# https://bugs.mojang.com/browse/MC-183518
|
# https://bugs.mojang.com/browse/MC-183518
|
||||||
boot.kernelParams = ["highres=off" "nohz=off"];
|
boot.kernelParams = [ "highres=off" "nohz=off" ];
|
||||||
|
|
||||||
networking = {
|
networking = {
|
||||||
usePredictableInterfaceNames = false;
|
usePredictableInterfaceNames = false;
|
||||||
|
@ -106,15 +105,15 @@
|
||||||
|
|
||||||
users.users.tlater = {
|
users.users.tlater = {
|
||||||
isNormalUser = true;
|
isNormalUser = true;
|
||||||
extraGroups = ["wheel"];
|
extraGroups = [ "wheel" ];
|
||||||
openssh.authorizedKeys.keyFiles = [../keys/tlater.pub];
|
openssh.authorizedKeys.keyFiles = [ ../keys/tlater.pub ];
|
||||||
};
|
};
|
||||||
|
|
||||||
services = {
|
services = {
|
||||||
openssh = {
|
openssh = {
|
||||||
enable = true;
|
enable = true;
|
||||||
allowSFTP = false;
|
allowSFTP = false;
|
||||||
ports = [2222];
|
ports = [ 2222 ];
|
||||||
startWhenNeeded = true;
|
startWhenNeeded = true;
|
||||||
|
|
||||||
settings = {
|
settings = {
|
||||||
|
@ -133,14 +132,14 @@
|
||||||
pam = {
|
pam = {
|
||||||
sshAgentAuth = {
|
sshAgentAuth = {
|
||||||
enable = true;
|
enable = true;
|
||||||
authorizedKeysFiles = ["/etc/ssh/authorized_keys.d/%u"];
|
authorizedKeysFiles = [ "/etc/ssh/authorized_keys.d/%u" ];
|
||||||
};
|
};
|
||||||
services.sudo.sshAgentAuth = true;
|
services.sudo.sshAgentAuth = true;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# Remove some unneeded packages
|
# Remove some unneeded packages
|
||||||
environment.defaultPackages = [];
|
environment.defaultPackages = [ ];
|
||||||
|
|
||||||
system.stateVersion = "20.09";
|
system.stateVersion = "20.09";
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
# disables it by default.
|
# disables it by default.
|
||||||
#
|
#
|
||||||
# TODO(tlater): See if would be useful for anything?
|
# TODO(tlater): See if would be useful for anything?
|
||||||
boot.kernelParams = ["nosgx"];
|
boot.kernelParams = [ "nosgx" ];
|
||||||
|
|
||||||
networking.hostName = "hetzner-1";
|
networking.hostName = "hetzner-1";
|
||||||
services.nginx.domain = "tlater.net";
|
services.nginx.domain = "tlater.net";
|
||||||
|
|
|
@ -1,82 +1,84 @@
|
||||||
{
|
{
|
||||||
disko.devices.disk = let
|
disko.devices.disk =
|
||||||
bootPartition = {
|
let
|
||||||
size = "1M";
|
bootPartition = {
|
||||||
type = "EF02";
|
size = "1M";
|
||||||
};
|
type = "EF02";
|
||||||
|
|
||||||
swapPartition = {
|
|
||||||
# 8G is apparently recommended for this much RAM, but we set up
|
|
||||||
# 4G on both disks for mirroring purposes.
|
|
||||||
#
|
|
||||||
# That'll still be 8G during normal operation, and it's probably
|
|
||||||
# not too bad to have slightly less swap if a disk dies.
|
|
||||||
size = "4G";
|
|
||||||
content = {
|
|
||||||
type = "swap";
|
|
||||||
randomEncryption = true;
|
|
||||||
};
|
};
|
||||||
};
|
|
||||||
|
|
||||||
mountOptions = ["compress=zstd" "noatime"];
|
swapPartition = {
|
||||||
in {
|
# 8G is apparently recommended for this much RAM, but we set up
|
||||||
sda = {
|
# 4G on both disks for mirroring purposes.
|
||||||
type = "disk";
|
#
|
||||||
device = "/dev/sda";
|
# That'll still be 8G during normal operation, and it's probably
|
||||||
content = {
|
# not too bad to have slightly less swap if a disk dies.
|
||||||
type = "gpt";
|
size = "4G";
|
||||||
partitions = {
|
content = {
|
||||||
boot = bootPartition;
|
type = "swap";
|
||||||
swap = swapPartition;
|
randomEncryption = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
disk1 = {
|
mountOptions = [ "compress=zstd" "noatime" ];
|
||||||
size = "100%";
|
in
|
||||||
# Empty partition to combine in RAID0 with the other disk
|
{
|
||||||
|
sda = {
|
||||||
|
type = "disk";
|
||||||
|
device = "/dev/sda";
|
||||||
|
content = {
|
||||||
|
type = "gpt";
|
||||||
|
partitions = {
|
||||||
|
boot = bootPartition;
|
||||||
|
swap = swapPartition;
|
||||||
|
|
||||||
|
disk1 = {
|
||||||
|
size = "100%";
|
||||||
|
# Empty partition to combine in RAID0 with the other disk
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
|
||||||
|
|
||||||
sdb = {
|
sdb = {
|
||||||
type = "disk";
|
type = "disk";
|
||||||
device = "/dev/sdb";
|
device = "/dev/sdb";
|
||||||
content = {
|
content = {
|
||||||
type = "gpt";
|
type = "gpt";
|
||||||
partitions = {
|
partitions = {
|
||||||
boot = bootPartition;
|
boot = bootPartition;
|
||||||
swap = swapPartition;
|
swap = swapPartition;
|
||||||
|
|
||||||
disk2 = {
|
disk2 = {
|
||||||
size = "100%";
|
size = "100%";
|
||||||
content = {
|
content = {
|
||||||
type = "btrfs";
|
type = "btrfs";
|
||||||
# Hack to get multi-device btrfs going
|
# Hack to get multi-device btrfs going
|
||||||
# See https://github.com/nix-community/disko/issues/99
|
# See https://github.com/nix-community/disko/issues/99
|
||||||
extraArgs = ["-d" "raid1" "-m" "raid1" "--runtime-features" "quota" "/dev/sda3"];
|
extraArgs = [ "-d" "raid1" "-m" "raid1" "--runtime-features" "quota" "/dev/sda3" ];
|
||||||
subvolumes = {
|
subvolumes = {
|
||||||
"/volume" = {};
|
"/volume" = { };
|
||||||
"/volume/root" = {
|
"/volume/root" = {
|
||||||
inherit mountOptions;
|
inherit mountOptions;
|
||||||
mountpoint = "/";
|
mountpoint = "/";
|
||||||
|
};
|
||||||
|
"/volume/home" = {
|
||||||
|
inherit mountOptions;
|
||||||
|
mountpoint = "/home";
|
||||||
|
};
|
||||||
|
"/volume/var" = {
|
||||||
|
inherit mountOptions;
|
||||||
|
mountpoint = "/var";
|
||||||
|
};
|
||||||
|
"/volume/nix-store" = {
|
||||||
|
inherit mountOptions;
|
||||||
|
mountpoint = "/nix";
|
||||||
|
};
|
||||||
|
"/snapshots" = { };
|
||||||
};
|
};
|
||||||
"/volume/home" = {
|
|
||||||
inherit mountOptions;
|
|
||||||
mountpoint = "/home";
|
|
||||||
};
|
|
||||||
"/volume/var" = {
|
|
||||||
inherit mountOptions;
|
|
||||||
mountpoint = "/var";
|
|
||||||
};
|
|
||||||
"/volume/nix-store" = {
|
|
||||||
inherit mountOptions;
|
|
||||||
mountpoint = "/nix";
|
|
||||||
};
|
|
||||||
"/snapshots" = {};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
{lib, ...}: {
|
{ lib, ... }: {
|
||||||
users.users.tlater.password = "insecure";
|
users.users.tlater.password = "insecure";
|
||||||
|
|
||||||
# Disable graphical tty so -curses works
|
# Disable graphical tty so -curses works
|
||||||
boot.kernelParams = ["nomodeset"];
|
boot.kernelParams = [ "nomodeset" ];
|
||||||
|
|
||||||
networking.hostName = "testvm";
|
networking.hostName = "testvm";
|
||||||
# Sets the base domain for nginx to a local domain so that we can
|
# Sets the base domain for nginx to a local domain so that we can
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
{
|
{ config
|
||||||
config,
|
, lib
|
||||||
lib,
|
, ...
|
||||||
...
|
|
||||||
}: {
|
}: {
|
||||||
services.nginx = {
|
services.nginx = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
@ -27,31 +26,33 @@
|
||||||
# Override the default, just keep fewer logs
|
# Override the default, just keep fewer logs
|
||||||
nginx.rotate = 6;
|
nginx.rotate = 6;
|
||||||
}
|
}
|
||||||
// lib.mapAttrs' (virtualHost: _:
|
// lib.mapAttrs'
|
||||||
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
|
(virtualHost: _:
|
||||||
frequency = "daily";
|
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
|
||||||
rotate = 2;
|
frequency = "daily";
|
||||||
compress = true;
|
rotate = 2;
|
||||||
delaycompress = true;
|
compress = true;
|
||||||
su = "${config.services.nginx.user} ${config.services.nginx.group}";
|
delaycompress = true;
|
||||||
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
|
su = "${config.services.nginx.user} ${config.services.nginx.group}";
|
||||||
})
|
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
|
||||||
config.services.nginx.virtualHosts;
|
})
|
||||||
|
config.services.nginx.virtualHosts;
|
||||||
|
|
||||||
systemd.tmpfiles.rules =
|
systemd.tmpfiles.rules =
|
||||||
lib.mapAttrsToList (
|
lib.mapAttrsToList
|
||||||
virtualHost: _:
|
(
|
||||||
#
|
virtualHost: _:
|
||||||
"d /var/log/nginx/${virtualHost} 0750 ${config.services.nginx.user} ${config.services.nginx.group}"
|
#
|
||||||
)
|
"d /var/log/nginx/${virtualHost} 0750 ${config.services.nginx.user} ${config.services.nginx.group}"
|
||||||
config.services.nginx.virtualHosts;
|
)
|
||||||
|
config.services.nginx.virtualHosts;
|
||||||
|
|
||||||
security.acme = {
|
security.acme = {
|
||||||
defaults.email = "tm@tlater.net";
|
defaults.email = "tm@tlater.net";
|
||||||
acceptTerms = true;
|
acceptTerms = true;
|
||||||
|
|
||||||
certs."tlater.net" = {
|
certs."tlater.net" = {
|
||||||
extraDomainNames = ["*.tlater.net"];
|
extraDomainNames = [ "*.tlater.net" ];
|
||||||
dnsProvider = "hetzner";
|
dnsProvider = "hetzner";
|
||||||
group = "nginx";
|
group = "nginx";
|
||||||
credentialFiles."HETZNER_API_KEY_FILE" = config.sops.secrets."hetzner-api".path;
|
credentialFiles."HETZNER_API_KEY_FILE" = config.sops.secrets."hetzner-api".path;
|
||||||
|
@ -62,6 +63,6 @@
|
||||||
user = "acme";
|
user = "acme";
|
||||||
paths =
|
paths =
|
||||||
lib.mapAttrsToList (virtualHost: _: "/var/lib/acme/${virtualHost}")
|
lib.mapAttrsToList (virtualHost: _: "/var/lib/acme/${virtualHost}")
|
||||||
config.services.nginx.virtualHosts;
|
config.services.nginx.virtualHosts;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,11 @@
|
||||||
{
|
{ pkgs
|
||||||
pkgs,
|
, config
|
||||||
config,
|
, ...
|
||||||
...
|
|
||||||
}: {
|
}: {
|
||||||
systemd.services.afvalcalendar = {
|
systemd.services.afvalcalendar = {
|
||||||
description = "Enschede afvalcalendar -> ical converter";
|
description = "Enschede afvalcalendar -> ical converter";
|
||||||
wantedBy = ["multi-user.target"];
|
wantedBy = [ "multi-user.target" ];
|
||||||
after = ["network.target"];
|
after = [ "network.target" ];
|
||||||
|
|
||||||
script = ''
|
script = ''
|
||||||
${pkgs.local.afvalcalendar}/bin/afvalcalendar > /srv/afvalcalendar/afvalcalendar.ical
|
${pkgs.local.afvalcalendar}/bin/afvalcalendar > /srv/afvalcalendar/afvalcalendar.ical
|
||||||
|
@ -26,14 +25,14 @@
|
||||||
ProtectKernelModules = true;
|
ProtectKernelModules = true;
|
||||||
ProtectKernelLogs = true;
|
ProtectKernelLogs = true;
|
||||||
ProtectControlGroups = true;
|
ProtectControlGroups = true;
|
||||||
RestrictAddressFamilies = ["AF_UNIX" "AF_INET" "AF_INET6"];
|
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
|
||||||
RestrictNamespaces = true;
|
RestrictNamespaces = true;
|
||||||
LockPersonality = true;
|
LockPersonality = true;
|
||||||
MemoryDenyWriteExecute = true;
|
MemoryDenyWriteExecute = true;
|
||||||
RestrictRealtime = true;
|
RestrictRealtime = true;
|
||||||
RestrictSUIDSGID = true;
|
RestrictSUIDSGID = true;
|
||||||
SystemCallArchitectures = "native";
|
SystemCallArchitectures = "native";
|
||||||
SystemCallFilter = ["@system-service" "~@privileged @resources @setuid @keyring"];
|
SystemCallFilter = [ "@system-service" "~@privileged @resources @setuid @keyring" ];
|
||||||
|
|
||||||
Umask = 0002;
|
Umask = 0002;
|
||||||
SupplementaryGroups = "afvalcalendar-hosting";
|
SupplementaryGroups = "afvalcalendar-hosting";
|
||||||
|
@ -50,7 +49,7 @@
|
||||||
root = "/srv/afvalcalendar";
|
root = "/srv/afvalcalendar";
|
||||||
};
|
};
|
||||||
|
|
||||||
users.groups.afvalcalendar-hosting = {};
|
users.groups.afvalcalendar-hosting = { };
|
||||||
systemd.tmpfiles.settings."10-afvalcalendar" = {
|
systemd.tmpfiles.settings."10-afvalcalendar" = {
|
||||||
"/srv/afvalcalendar".d = {
|
"/srv/afvalcalendar".d = {
|
||||||
user = "nginx";
|
user = "nginx";
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
{
|
{ config
|
||||||
config,
|
, pkgs
|
||||||
pkgs,
|
, lib
|
||||||
lib,
|
, ...
|
||||||
...
|
}:
|
||||||
}: let
|
let
|
||||||
inherit (lib) types optional singleton;
|
inherit (lib) types optional singleton;
|
||||||
mkShutdownScript = service:
|
mkShutdownScript = service:
|
||||||
pkgs.writeShellScript "backup-${service}-shutdown" ''
|
pkgs.writeShellScript "backup-${service}-shutdown" ''
|
||||||
|
@ -42,17 +42,17 @@
|
||||||
RESTIC_REPOSITORY = "rclone:storagebox:backups";
|
RESTIC_REPOSITORY = "rclone:storagebox:backups";
|
||||||
RCLONE_CONFIG = rcloneConfig;
|
RCLONE_CONFIG = rcloneConfig;
|
||||||
};
|
};
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
options = {
|
options = {
|
||||||
services.backups = lib.mkOption {
|
services.backups = lib.mkOption {
|
||||||
description = lib.mdDoc ''
|
description = lib.mdDoc ''
|
||||||
Configure restic backups with a specific tag.
|
Configure restic backups with a specific tag.
|
||||||
'';
|
'';
|
||||||
type = types.attrsOf (types.submodule ({
|
type = types.attrsOf (types.submodule ({ config
|
||||||
config,
|
, name
|
||||||
name,
|
, ...
|
||||||
...
|
}: {
|
||||||
}: {
|
|
||||||
options = {
|
options = {
|
||||||
user = lib.mkOption {
|
user = lib.mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
|
@ -76,7 +76,7 @@ in {
|
||||||
preparation = {
|
preparation = {
|
||||||
packages = lib.mkOption {
|
packages = lib.mkOption {
|
||||||
type = types.listOf types.package;
|
type = types.listOf types.package;
|
||||||
default = [];
|
default = [ ];
|
||||||
description = ''
|
description = ''
|
||||||
The list of packages to make available in the
|
The list of packages to make available in the
|
||||||
preparation script.
|
preparation script.
|
||||||
|
@ -97,7 +97,7 @@ in {
|
||||||
cleanup = {
|
cleanup = {
|
||||||
packages = lib.mkOption {
|
packages = lib.mkOption {
|
||||||
type = types.listOf types.package;
|
type = types.listOf types.package;
|
||||||
default = [];
|
default = [ ];
|
||||||
description = ''
|
description = ''
|
||||||
The list of packages to make available in the
|
The list of packages to make available in the
|
||||||
cleanup script.
|
cleanup script.
|
||||||
|
@ -116,7 +116,7 @@ in {
|
||||||
};
|
};
|
||||||
pauseServices = lib.mkOption {
|
pauseServices = lib.mkOption {
|
||||||
type = types.listOf types.str;
|
type = types.listOf types.str;
|
||||||
default = [];
|
default = [ ];
|
||||||
description = ''
|
description = ''
|
||||||
The systemd services that need to be shut down before
|
The systemd services that need to be shut down before
|
||||||
the backup can run. Services will be restarted after the
|
the backup can run. Services will be restarted after the
|
||||||
|
@ -131,7 +131,7 @@ in {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = lib.mkIf (config.services.backups != {}) {
|
config = lib.mkIf (config.services.backups != { }) {
|
||||||
systemd.services =
|
systemd.services =
|
||||||
{
|
{
|
||||||
restic-prune = {
|
restic-prune = {
|
||||||
|
@ -164,79 +164,81 @@ in {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
// lib.mapAttrs' (name: backup:
|
// lib.mapAttrs'
|
||||||
lib.nameValuePair "backup-${name}" {
|
(name: backup:
|
||||||
# Don't want to restart mid-backup
|
lib.nameValuePair "backup-${name}" {
|
||||||
restartIfChanged = false;
|
# Don't want to restart mid-backup
|
||||||
|
restartIfChanged = false;
|
||||||
|
|
||||||
environment =
|
environment =
|
||||||
resticEnv
|
resticEnv
|
||||||
// {
|
// {
|
||||||
RESTIC_CACHE_DIR = "%C/backup-${name}";
|
RESTIC_CACHE_DIR = "%C/backup-${name}";
|
||||||
};
|
};
|
||||||
|
|
||||||
path = with pkgs; [
|
path = with pkgs; [
|
||||||
coreutils
|
coreutils
|
||||||
openssh
|
openssh
|
||||||
rclone
|
rclone
|
||||||
restic
|
restic
|
||||||
];
|
|
||||||
|
|
||||||
# TODO(tlater): If I ever add more than one repo, service
|
|
||||||
# shutdown/restarting will potentially break if multiple
|
|
||||||
# backups for the same service overlap. A more clever
|
|
||||||
# sentinel file with reference counts would probably solve
|
|
||||||
# this.
|
|
||||||
serviceConfig = {
|
|
||||||
User = backup.user;
|
|
||||||
Group = "backup";
|
|
||||||
RuntimeDirectory = "backup-${name}";
|
|
||||||
CacheDirectory = "backup-${name}";
|
|
||||||
CacheDirectoryMode = "0700";
|
|
||||||
PrivateTmp = true;
|
|
||||||
|
|
||||||
ExecStart = [
|
|
||||||
(lib.concatStringsSep " " (["${pkgs.restic}/bin/restic" "backup" "--tag" name] ++ backup.paths))
|
|
||||||
];
|
];
|
||||||
|
|
||||||
ExecStartPre =
|
# TODO(tlater): If I ever add more than one repo, service
|
||||||
map (service: "+${mkShutdownScript service}") backup.pauseServices
|
# shutdown/restarting will potentially break if multiple
|
||||||
++ singleton (writeScript "backup-${name}-repo-init" [] ''
|
# backups for the same service overlap. A more clever
|
||||||
restic snapshots || restic init
|
# sentinel file with reference counts would probably solve
|
||||||
'')
|
# this.
|
||||||
++ optional (backup.preparation.text != null)
|
serviceConfig = {
|
||||||
(writeScript "backup-${name}-prepare" backup.preparation.packages backup.preparation.text);
|
User = backup.user;
|
||||||
|
Group = "backup";
|
||||||
|
RuntimeDirectory = "backup-${name}";
|
||||||
|
CacheDirectory = "backup-${name}";
|
||||||
|
CacheDirectoryMode = "0700";
|
||||||
|
PrivateTmp = true;
|
||||||
|
|
||||||
# TODO(tlater): Add repo pruning/checking
|
ExecStart = [
|
||||||
ExecStopPost =
|
(lib.concatStringsSep " " ([ "${pkgs.restic}/bin/restic" "backup" "--tag" name ] ++ backup.paths))
|
||||||
map (service: "+${mkRestartScript service}") backup.pauseServices
|
];
|
||||||
++ optional (backup.cleanup.text != null)
|
|
||||||
(writeScript "backup-${name}-cleanup" backup.cleanup.packages backup.cleanup.text);
|
ExecStartPre =
|
||||||
};
|
map (service: "+${mkShutdownScript service}") backup.pauseServices
|
||||||
})
|
++ singleton (writeScript "backup-${name}-repo-init" [ ] ''
|
||||||
config.services.backups;
|
restic snapshots || restic init
|
||||||
|
'')
|
||||||
|
++ optional (backup.preparation.text != null)
|
||||||
|
(writeScript "backup-${name}-prepare" backup.preparation.packages backup.preparation.text);
|
||||||
|
|
||||||
|
# TODO(tlater): Add repo pruning/checking
|
||||||
|
ExecStopPost =
|
||||||
|
map (service: "+${mkRestartScript service}") backup.pauseServices
|
||||||
|
++ optional (backup.cleanup.text != null)
|
||||||
|
(writeScript "backup-${name}-cleanup" backup.cleanup.packages backup.cleanup.text);
|
||||||
|
};
|
||||||
|
})
|
||||||
|
config.services.backups;
|
||||||
|
|
||||||
systemd.timers =
|
systemd.timers =
|
||||||
{
|
{
|
||||||
restic-prune = {
|
restic-prune = {
|
||||||
wantedBy = ["timers.target"];
|
wantedBy = [ "timers.target" ];
|
||||||
timerConfig.OnCalendar = "Thursday 03:00:00 UTC";
|
timerConfig.OnCalendar = "Thursday 03:00:00 UTC";
|
||||||
# Don't make this persistent, in case the server was offline
|
# Don't make this persistent, in case the server was offline
|
||||||
# for a while. This job cannot run at the same time as any
|
# for a while. This job cannot run at the same time as any
|
||||||
# of the backup jobs.
|
# of the backup jobs.
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
// lib.mapAttrs' (name: backup:
|
// lib.mapAttrs'
|
||||||
lib.nameValuePair "backup-${name}" {
|
(name: backup:
|
||||||
wantedBy = ["timers.target"];
|
lib.nameValuePair "backup-${name}" {
|
||||||
timerConfig = {
|
wantedBy = [ "timers.target" ];
|
||||||
OnCalendar = "Wednesday 02:30:00 UTC";
|
timerConfig = {
|
||||||
RandomizedDelaySec = "1h";
|
OnCalendar = "Wednesday 02:30:00 UTC";
|
||||||
FixedRandomDelay = true;
|
RandomizedDelaySec = "1h";
|
||||||
Persistent = true;
|
FixedRandomDelay = true;
|
||||||
};
|
Persistent = true;
|
||||||
})
|
};
|
||||||
config.services.backups;
|
})
|
||||||
|
config.services.backups;
|
||||||
|
|
||||||
users = {
|
users = {
|
||||||
# This user is only used to own the ssh key, because apparently
|
# This user is only used to own the ssh key, because apparently
|
||||||
|
@ -245,7 +247,7 @@ in {
|
||||||
group = "backup";
|
group = "backup";
|
||||||
isSystemUser = true;
|
isSystemUser = true;
|
||||||
};
|
};
|
||||||
groups.backup = {};
|
groups.backup = { };
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
{
|
{ config
|
||||||
config,
|
, flake-inputs
|
||||||
flake-inputs,
|
, ...
|
||||||
...
|
|
||||||
}: {
|
}: {
|
||||||
imports = [
|
imports = [
|
||||||
flake-inputs.sonnenshift.nixosModules.default
|
flake-inputs.sonnenshift.nixosModules.default
|
||||||
|
|
|
@ -1,15 +1,16 @@
|
||||||
{
|
{ pkgs
|
||||||
pkgs,
|
, config
|
||||||
config,
|
, lib
|
||||||
lib,
|
, ...
|
||||||
...
|
}:
|
||||||
}: let
|
let
|
||||||
inherit (lib.strings) concatMapStringsSep;
|
inherit (lib.strings) concatMapStringsSep;
|
||||||
|
|
||||||
cfg = config.services.matrix-conduit;
|
cfg = config.services.matrix-conduit;
|
||||||
domain = "matrix.${config.services.nginx.domain}";
|
domain = "matrix.${config.services.nginx.domain}";
|
||||||
turn-realm = "turn.${config.services.nginx.domain}";
|
turn-realm = "turn.${config.services.nginx.domain}";
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
services.matrix-conduit = {
|
services.matrix-conduit = {
|
||||||
enable = true;
|
enable = true;
|
||||||
settings.global = {
|
settings.global = {
|
||||||
|
@ -17,99 +18,103 @@ in {
|
||||||
server_name = domain;
|
server_name = domain;
|
||||||
database_backend = "rocksdb";
|
database_backend = "rocksdb";
|
||||||
|
|
||||||
turn_uris = let
|
turn_uris =
|
||||||
address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
|
let
|
||||||
tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
|
address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
|
||||||
in [
|
tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
|
||||||
"turn:${address}?transport=udp"
|
in
|
||||||
"turn:${address}?transport=tcp"
|
[
|
||||||
"turns:${tls-address}?transport=udp"
|
"turn:${address}?transport=udp"
|
||||||
"turns:${tls-address}?transport=tcp"
|
"turn:${address}?transport=tcp"
|
||||||
];
|
"turns:${tls-address}?transport=udp"
|
||||||
};
|
"turns:${tls-address}?transport=tcp"
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services.heisenbridge = let
|
|
||||||
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
|
||||||
registrationFile = builtins.toFile "heisenbridge-registration.yaml" (builtins.toJSON {
|
|
||||||
id = "heisenbridge";
|
|
||||||
url = "http://127.0.0.1:9898";
|
|
||||||
as_token = "@AS_TOKEN@";
|
|
||||||
hs_token = "@HS_TOKEN@";
|
|
||||||
rate_limited = false;
|
|
||||||
sender_localpart = "heisenbridge";
|
|
||||||
namespaces = {
|
|
||||||
users = [
|
|
||||||
{
|
|
||||||
regex = "@irc_.*";
|
|
||||||
exclusive = true;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
regex = "@heisenbridge:.*";
|
|
||||||
exclusive = true;
|
|
||||||
}
|
|
||||||
];
|
];
|
||||||
aliases = [];
|
|
||||||
rooms = [];
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
# TODO(tlater): Starting with systemd 253 it will become possible
|
|
||||||
# to do the credential setup as part of ExecStartPre/preStart
|
|
||||||
# instead.
|
|
||||||
#
|
|
||||||
# This will also make it possible to actually set caps on the
|
|
||||||
# heisenbridge process using systemd, so that we can run the
|
|
||||||
# identd process.
|
|
||||||
execScript = pkgs.writeShellScript "heisenbridge" ''
|
|
||||||
cp ${registrationFile} "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
|
||||||
chmod 600 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
|
|
||||||
${replaceSecretBin} '@AS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_as-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
|
||||||
${replaceSecretBin} '@HS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_hs-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
|
||||||
chmod 400 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
|
|
||||||
|
|
||||||
${pkgs.heisenbridge}/bin/heisenbridge \
|
|
||||||
--config $RUNTIME_DIRECTORY/heisenbridge-registration.yaml \
|
|
||||||
--owner @tlater:matrix.tlater.net \
|
|
||||||
'http://localhost:${toString cfg.settings.global.port}'
|
|
||||||
'';
|
|
||||||
in {
|
|
||||||
description = "Matrix<->IRC bridge";
|
|
||||||
wantedBy = ["multi-user.target"];
|
|
||||||
after = ["conduit.service"];
|
|
||||||
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "simple";
|
|
||||||
|
|
||||||
LoadCredential = "heisenbridge:/run/secrets/heisenbridge";
|
|
||||||
|
|
||||||
ExecStart = execScript;
|
|
||||||
|
|
||||||
DynamicUser = true;
|
|
||||||
RuntimeDirectory = "heisenbridge";
|
|
||||||
RuntimeDirectoryMode = "0700";
|
|
||||||
|
|
||||||
RestrictNamespaces = true;
|
|
||||||
PrivateUsers = true;
|
|
||||||
ProtectHostname = true;
|
|
||||||
ProtectClock = true;
|
|
||||||
ProtectKernelTunables = true;
|
|
||||||
ProtectKernelModules = true;
|
|
||||||
ProtectKernelLogs = true;
|
|
||||||
ProtectControlGroups = true;
|
|
||||||
RestrictAddressFamilies = ["AF_INET AF_INET6"];
|
|
||||||
LockPersonality = true;
|
|
||||||
RestrictRealtime = true;
|
|
||||||
ProtectProc = "invisible";
|
|
||||||
ProcSubset = "pid";
|
|
||||||
UMask = 0077;
|
|
||||||
|
|
||||||
# For the identd port
|
|
||||||
# CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"];
|
|
||||||
# AmbientCapabilities = ["CAP_NET_BIND_SERVICE"];
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
systemd.services.heisenbridge =
|
||||||
|
let
|
||||||
|
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
||||||
|
registrationFile = builtins.toFile "heisenbridge-registration.yaml" (builtins.toJSON {
|
||||||
|
id = "heisenbridge";
|
||||||
|
url = "http://127.0.0.1:9898";
|
||||||
|
as_token = "@AS_TOKEN@";
|
||||||
|
hs_token = "@HS_TOKEN@";
|
||||||
|
rate_limited = false;
|
||||||
|
sender_localpart = "heisenbridge";
|
||||||
|
namespaces = {
|
||||||
|
users = [
|
||||||
|
{
|
||||||
|
regex = "@irc_.*";
|
||||||
|
exclusive = true;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
regex = "@heisenbridge:.*";
|
||||||
|
exclusive = true;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
aliases = [ ];
|
||||||
|
rooms = [ ];
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
# TODO(tlater): Starting with systemd 253 it will become possible
|
||||||
|
# to do the credential setup as part of ExecStartPre/preStart
|
||||||
|
# instead.
|
||||||
|
#
|
||||||
|
# This will also make it possible to actually set caps on the
|
||||||
|
# heisenbridge process using systemd, so that we can run the
|
||||||
|
# identd process.
|
||||||
|
execScript = pkgs.writeShellScript "heisenbridge" ''
|
||||||
|
cp ${registrationFile} "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
||||||
|
chmod 600 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
|
||||||
|
${replaceSecretBin} '@AS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_as-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
||||||
|
${replaceSecretBin} '@HS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_hs-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
|
||||||
|
chmod 400 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
|
||||||
|
|
||||||
|
${pkgs.heisenbridge}/bin/heisenbridge \
|
||||||
|
--config $RUNTIME_DIRECTORY/heisenbridge-registration.yaml \
|
||||||
|
--owner @tlater:matrix.tlater.net \
|
||||||
|
'http://localhost:${toString cfg.settings.global.port}'
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
{
|
||||||
|
description = "Matrix<->IRC bridge";
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "conduit.service" ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "simple";
|
||||||
|
|
||||||
|
LoadCredential = "heisenbridge:/run/secrets/heisenbridge";
|
||||||
|
|
||||||
|
ExecStart = execScript;
|
||||||
|
|
||||||
|
DynamicUser = true;
|
||||||
|
RuntimeDirectory = "heisenbridge";
|
||||||
|
RuntimeDirectoryMode = "0700";
|
||||||
|
|
||||||
|
RestrictNamespaces = true;
|
||||||
|
PrivateUsers = true;
|
||||||
|
ProtectHostname = true;
|
||||||
|
ProtectClock = true;
|
||||||
|
ProtectKernelTunables = true;
|
||||||
|
ProtectKernelModules = true;
|
||||||
|
ProtectKernelLogs = true;
|
||||||
|
ProtectControlGroups = true;
|
||||||
|
RestrictAddressFamilies = [ "AF_INET AF_INET6" ];
|
||||||
|
LockPersonality = true;
|
||||||
|
RestrictRealtime = true;
|
||||||
|
ProtectProc = "invisible";
|
||||||
|
ProcSubset = "pid";
|
||||||
|
UMask = 0077;
|
||||||
|
|
||||||
|
# For the identd port
|
||||||
|
# CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"];
|
||||||
|
# AmbientCapabilities = ["CAP_NET_BIND_SERVICE"];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
# Pass in the TURN secret via EnvironmentFile, not supported by
|
# Pass in the TURN secret via EnvironmentFile, not supported by
|
||||||
# upstream module currently.
|
# upstream module currently.
|
||||||
#
|
#
|
||||||
|
@ -249,6 +254,6 @@ in {
|
||||||
];
|
];
|
||||||
# Other services store their data in conduit, so no other services
|
# Other services store their data in conduit, so no other services
|
||||||
# need to be shut down currently.
|
# need to be shut down currently.
|
||||||
pauseServices = ["conduit.service"];
|
pauseServices = [ "conduit.service" ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{pkgs, ...}: {
|
{ pkgs, ... }: {
|
||||||
services.fail2ban = {
|
services.fail2ban = {
|
||||||
enable = true;
|
enable = true;
|
||||||
extraPackages = [pkgs.ipset];
|
extraPackages = [ pkgs.ipset ];
|
||||||
banaction = "iptables-ipset-proto6-allports";
|
banaction = "iptables-ipset-proto6-allports";
|
||||||
bantime-increment.enable = true;
|
bantime-increment.enable = true;
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
# Allow metrics services to connect to the socket as well
|
# Allow metrics services to connect to the socket as well
|
||||||
users.groups.fail2ban = {};
|
users.groups.fail2ban = { };
|
||||||
systemd.services.fail2ban.serviceConfig = {
|
systemd.services.fail2ban.serviceConfig = {
|
||||||
ExecStartPost =
|
ExecStartPost =
|
||||||
"+"
|
"+"
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
{
|
{ lib
|
||||||
lib,
|
, config
|
||||||
config,
|
, flake-inputs
|
||||||
flake-inputs,
|
, ...
|
||||||
...
|
}:
|
||||||
}: let
|
let
|
||||||
domain = "foundryvtt.${config.services.nginx.domain}";
|
domain = "foundryvtt.${config.services.nginx.domain}";
|
||||||
in {
|
in
|
||||||
imports = [flake-inputs.foundryvtt.nixosModules.foundryvtt];
|
{
|
||||||
|
imports = [ flake-inputs.foundryvtt.nixosModules.foundryvtt ];
|
||||||
|
|
||||||
services.foundryvtt = {
|
services.foundryvtt = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
@ -18,26 +19,28 @@ in {
|
||||||
|
|
||||||
# Want to start it manually when I need it, not have it constantly
|
# Want to start it manually when I need it, not have it constantly
|
||||||
# running
|
# running
|
||||||
systemd.services.foundryvtt.wantedBy = lib.mkForce [];
|
systemd.services.foundryvtt.wantedBy = lib.mkForce [ ];
|
||||||
|
|
||||||
services.nginx.virtualHosts."${domain}" = let
|
services.nginx.virtualHosts."${domain}" =
|
||||||
inherit (config.services.foundryvtt) port;
|
let
|
||||||
in {
|
inherit (config.services.foundryvtt) port;
|
||||||
forceSSL = true;
|
in
|
||||||
useACMEHost = "tlater.net";
|
{
|
||||||
enableHSTS = true;
|
forceSSL = true;
|
||||||
|
useACMEHost = "tlater.net";
|
||||||
|
enableHSTS = true;
|
||||||
|
|
||||||
locations."/" = {
|
locations."/" = {
|
||||||
proxyWebsockets = true;
|
proxyWebsockets = true;
|
||||||
proxyPass = "http://localhost:${toString port}";
|
proxyPass = "http://localhost:${toString port}";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
|
||||||
|
|
||||||
services.backups.foundryvtt = {
|
services.backups.foundryvtt = {
|
||||||
user = "foundryvtt";
|
user = "foundryvtt";
|
||||||
paths = [
|
paths = [
|
||||||
config.services.foundryvtt.dataDir
|
config.services.foundryvtt.dataDir
|
||||||
];
|
];
|
||||||
pauseServices = ["foundryvtt.service"];
|
pauseServices = [ "foundryvtt.service" ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
{
|
{ pkgs
|
||||||
pkgs,
|
, config
|
||||||
config,
|
, lib
|
||||||
lib,
|
, ...
|
||||||
...
|
}:
|
||||||
}: let
|
let
|
||||||
domain = "gitea.${config.services.nginx.domain}";
|
domain = "gitea.${config.services.nginx.domain}";
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
services.forgejo = {
|
services.forgejo = {
|
||||||
enable = true;
|
enable = true;
|
||||||
database.type = "postgres";
|
database.type = "postgres";
|
||||||
|
@ -27,33 +28,37 @@ in {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.services.forgejo.serviceConfig.ExecStartPre = let
|
systemd.services.forgejo.serviceConfig.ExecStartPre =
|
||||||
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
let
|
||||||
secretPath = config.sops.secrets."forgejo/metrics-token".path;
|
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
||||||
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
|
secretPath = config.sops.secrets."forgejo/metrics-token".path;
|
||||||
in [
|
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
|
||||||
"+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'"
|
in
|
||||||
];
|
[
|
||||||
|
"+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'"
|
||||||
|
];
|
||||||
|
|
||||||
# Set up SSL
|
# Set up SSL
|
||||||
services.nginx.virtualHosts."${domain}" = let
|
services.nginx.virtualHosts."${domain}" =
|
||||||
httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
|
let
|
||||||
httpPort = config.services.forgejo.settings.server.HTTP_PORT;
|
httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
|
||||||
in {
|
httpPort = config.services.forgejo.settings.server.HTTP_PORT;
|
||||||
forceSSL = true;
|
in
|
||||||
useACMEHost = "tlater.net";
|
{
|
||||||
enableHSTS = true;
|
forceSSL = true;
|
||||||
|
useACMEHost = "tlater.net";
|
||||||
|
enableHSTS = true;
|
||||||
|
|
||||||
locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
|
locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
|
||||||
locations."/metrics" = {
|
locations."/metrics" = {
|
||||||
extraConfig = ''
|
extraConfig = ''
|
||||||
access_log off;
|
access_log off;
|
||||||
allow 127.0.0.1;
|
allow 127.0.0.1;
|
||||||
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
|
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
|
||||||
deny all;
|
deny all;
|
||||||
'';
|
'';
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
|
||||||
|
|
||||||
# Block repeated failed login attempts
|
# Block repeated failed login attempts
|
||||||
#
|
#
|
||||||
|
@ -83,13 +88,13 @@ in {
|
||||||
# Conf is backed up via nix
|
# Conf is backed up via nix
|
||||||
];
|
];
|
||||||
preparation = {
|
preparation = {
|
||||||
packages = [config.services.postgresql.package];
|
packages = [ config.services.postgresql.package ];
|
||||||
text = "pg_dump ${config.services.forgejo.database.name} --file=/var/lib/forgejo/forgejo-db.sql";
|
text = "pg_dump ${config.services.forgejo.database.name} --file=/var/lib/forgejo/forgejo-db.sql";
|
||||||
};
|
};
|
||||||
cleanup = {
|
cleanup = {
|
||||||
packages = [pkgs.coreutils];
|
packages = [ pkgs.coreutils ];
|
||||||
text = "rm /var/lib/forgejo/forgejo-db.sql";
|
text = "rm /var/lib/forgejo/forgejo-db.sql";
|
||||||
};
|
};
|
||||||
pauseServices = ["forgejo.service"];
|
pauseServices = [ "forgejo.service" ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,25 +1,28 @@
|
||||||
|
{ config
|
||||||
|
, pkgs
|
||||||
|
, lib
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
yaml = pkgs.formats.yaml { };
|
||||||
|
in
|
||||||
{
|
{
|
||||||
config,
|
|
||||||
pkgs,
|
|
||||||
lib,
|
|
||||||
...
|
|
||||||
}: let
|
|
||||||
yaml = pkgs.formats.yaml {};
|
|
||||||
in {
|
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
exporters = {
|
exporters = {
|
||||||
# Periodically check domain registration status
|
# Periodically check domain registration status
|
||||||
domain = {
|
domain = {
|
||||||
enable = true;
|
enable = true;
|
||||||
listenAddress = "127.0.0.1";
|
listenAddress = "127.0.0.1";
|
||||||
extraFlags = let
|
extraFlags =
|
||||||
conf.domains = [
|
let
|
||||||
"tlater.net"
|
conf.domains = [
|
||||||
"tlater.com"
|
"tlater.net"
|
||||||
|
"tlater.com"
|
||||||
|
];
|
||||||
|
in
|
||||||
|
[
|
||||||
|
"--config=${yaml.generate "domains.yml" conf}"
|
||||||
];
|
];
|
||||||
in [
|
|
||||||
"--config=${yaml.generate "domains.yml" conf}"
|
|
||||||
];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
# System statistics
|
# System statistics
|
||||||
|
@ -49,47 +52,50 @@ in {
|
||||||
group = "nginx";
|
group = "nginx";
|
||||||
|
|
||||||
settings.namespaces =
|
settings.namespaces =
|
||||||
lib.mapAttrsToList (name: virtualHost: {
|
lib.mapAttrsToList
|
||||||
inherit name;
|
(name: virtualHost: {
|
||||||
metrics_override.prefix = "nginxlog";
|
inherit name;
|
||||||
namespace_label = "vhost";
|
metrics_override.prefix = "nginxlog";
|
||||||
|
namespace_label = "vhost";
|
||||||
|
|
||||||
format = lib.concatStringsSep " " [
|
format = lib.concatStringsSep " " [
|
||||||
"$remote_addr - $remote_user [$time_local]"
|
"$remote_addr - $remote_user [$time_local]"
|
||||||
''"$request" $status $body_bytes_sent''
|
''"$request" $status $body_bytes_sent''
|
||||||
''"$http_referer" "$http_user_agent"''
|
''"$http_referer" "$http_user_agent"''
|
||||||
''rt=$request_time uct="$upstream_connect_time"''
|
''rt=$request_time uct="$upstream_connect_time"''
|
||||||
''uht="$upstream_header_time" urt="$upstream_response_time"''
|
''uht="$upstream_header_time" urt="$upstream_response_time"''
|
||||||
];
|
];
|
||||||
|
|
||||||
source.files = [
|
source.files = [
|
||||||
"/var/log/nginx/${name}/access.log"
|
"/var/log/nginx/${name}/access.log"
|
||||||
];
|
];
|
||||||
})
|
})
|
||||||
config.services.nginx.virtualHosts;
|
config.services.nginx.virtualHosts;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
extraExporters = {
|
extraExporters = {
|
||||||
fail2ban = let
|
fail2ban =
|
||||||
cfg = config.services.prometheus.extraExporters.fail2ban;
|
let
|
||||||
in {
|
cfg = config.services.prometheus.extraExporters.fail2ban;
|
||||||
port = 9191;
|
in
|
||||||
serviceOpts = {
|
{
|
||||||
after = ["fail2ban.service"];
|
port = 9191;
|
||||||
requires = ["fail2ban.service"];
|
serviceOpts = {
|
||||||
serviceConfig = {
|
after = [ "fail2ban.service" ];
|
||||||
Group = "fail2ban";
|
requires = [ "fail2ban.service" ];
|
||||||
RestrictAddressFamilies = ["AF_UNIX" "AF_INET" "AF_INET6"];
|
serviceConfig = {
|
||||||
ExecStart = lib.concatStringsSep " " [
|
Group = "fail2ban";
|
||||||
"${pkgs.local.prometheus-fail2ban-exporter}/bin/fail2ban-prometheus-exporter"
|
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
|
||||||
"--collector.f2b.socket=/var/run/fail2ban/fail2ban.sock"
|
ExecStart = lib.concatStringsSep " " [
|
||||||
"--web.listen-address='${cfg.listenAddress}:${toString cfg.port}'"
|
"${pkgs.local.prometheus-fail2ban-exporter}/bin/fail2ban-prometheus-exporter"
|
||||||
"--collector.f2b.exit-on-socket-connection-error=true"
|
"--collector.f2b.socket=/var/run/fail2ban/fail2ban.sock"
|
||||||
];
|
"--web.listen-address='${cfg.listenAddress}:${toString cfg.port}'"
|
||||||
|
"--collector.f2b.exit-on-socket-connection-error=true"
|
||||||
|
];
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
# TODO(tlater):
|
# TODO(tlater):
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
{config, ...}: let
|
{ config, ... }:
|
||||||
|
let
|
||||||
domain = "metrics.${config.services.nginx.domain}";
|
domain = "metrics.${config.services.nginx.domain}";
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
services.grafana = {
|
services.grafana = {
|
||||||
enable = true;
|
enable = true;
|
||||||
settings = {
|
settings = {
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
{
|
{ pkgs
|
||||||
pkgs,
|
, config
|
||||||
config,
|
, lib
|
||||||
lib,
|
, ...
|
||||||
...
|
}:
|
||||||
}: let
|
let
|
||||||
inherit (lib) types mkOption mkDefault;
|
inherit (lib) types mkOption mkDefault;
|
||||||
yaml = pkgs.formats.yaml {};
|
yaml = pkgs.formats.yaml { };
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
options = {
|
options = {
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
extraExporters = mkOption {
|
extraExporters = mkOption {
|
||||||
|
@ -31,11 +32,10 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
services.victoriametrics.scrapeConfigs = mkOption {
|
services.victoriametrics.scrapeConfigs = mkOption {
|
||||||
type = types.attrsOf (types.submodule ({
|
type = types.attrsOf (types.submodule ({ name
|
||||||
name,
|
, self
|
||||||
self,
|
, ...
|
||||||
...
|
}: {
|
||||||
}: {
|
|
||||||
options = {
|
options = {
|
||||||
job_name = mkOption {
|
job_name = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
|
@ -47,7 +47,7 @@ in {
|
||||||
description = ''
|
description = ''
|
||||||
Other settings to set for this scrape config.
|
Other settings to set for this scrape config.
|
||||||
'';
|
'';
|
||||||
default = {};
|
default = { };
|
||||||
};
|
};
|
||||||
|
|
||||||
targets = mkOption {
|
targets = mkOption {
|
||||||
|
@ -57,11 +57,11 @@ in {
|
||||||
|
|
||||||
Shortcut for `static_configs = lib.singleton {targets = [<targets>];}`
|
Shortcut for `static_configs = lib.singleton {targets = [<targets>];}`
|
||||||
'';
|
'';
|
||||||
default = [];
|
default = [ ];
|
||||||
};
|
};
|
||||||
|
|
||||||
static_configs = mkOption {
|
static_configs = mkOption {
|
||||||
default = [];
|
default = [ ];
|
||||||
type = types.listOf (types.submodule {
|
type = types.listOf (types.submodule {
|
||||||
options = {
|
options = {
|
||||||
targets = mkOption {
|
targets = mkOption {
|
||||||
|
@ -77,7 +77,7 @@ in {
|
||||||
description = lib.mdDoc ''
|
description = lib.mdDoc ''
|
||||||
Labels to apply to all targets defined for this static config.
|
Labels to apply to all targets defined for this static config.
|
||||||
'';
|
'';
|
||||||
default = {};
|
default = { };
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
@ -89,116 +89,125 @@ in {
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
systemd.services = lib.mkMerge [
|
systemd.services = lib.mkMerge [
|
||||||
(lib.mapAttrs' (name: exporter:
|
(lib.mapAttrs'
|
||||||
lib.nameValuePair "prometheus-${name}-exporter" (lib.mkMerge [
|
(name: exporter:
|
||||||
{
|
lib.nameValuePair "prometheus-${name}-exporter" (lib.mkMerge [
|
||||||
# Shamelessly copied from upstream because the upstream
|
{
|
||||||
# module is an intractable mess
|
# Shamelessly copied from upstream because the upstream
|
||||||
wantedBy = ["multi-user.target"];
|
# module is an intractable mess
|
||||||
after = ["network.target"];
|
wantedBy = [ "multi-user.target" ];
|
||||||
serviceConfig.Restart = mkDefault "always";
|
after = [ "network.target" ];
|
||||||
serviceConfig.PrivateTmp = mkDefault true;
|
serviceConfig.Restart = mkDefault "always";
|
||||||
serviceConfig.WorkingDirectory = mkDefault /tmp;
|
serviceConfig.PrivateTmp = mkDefault true;
|
||||||
serviceConfig.DynamicUser = mkDefault true;
|
serviceConfig.WorkingDirectory = mkDefault /tmp;
|
||||||
# Hardening
|
serviceConfig.DynamicUser = mkDefault true;
|
||||||
serviceConfig.CapabilityBoundingSet = mkDefault [""];
|
# Hardening
|
||||||
serviceConfig.DeviceAllow = [""];
|
serviceConfig.CapabilityBoundingSet = mkDefault [ "" ];
|
||||||
serviceConfig.LockPersonality = true;
|
serviceConfig.DeviceAllow = [ "" ];
|
||||||
serviceConfig.MemoryDenyWriteExecute = true;
|
serviceConfig.LockPersonality = true;
|
||||||
serviceConfig.NoNewPrivileges = true;
|
serviceConfig.MemoryDenyWriteExecute = true;
|
||||||
serviceConfig.PrivateDevices = mkDefault true;
|
serviceConfig.NoNewPrivileges = true;
|
||||||
serviceConfig.ProtectClock = mkDefault true;
|
serviceConfig.PrivateDevices = mkDefault true;
|
||||||
serviceConfig.ProtectControlGroups = true;
|
serviceConfig.ProtectClock = mkDefault true;
|
||||||
serviceConfig.ProtectHome = true;
|
serviceConfig.ProtectControlGroups = true;
|
||||||
serviceConfig.ProtectHostname = true;
|
serviceConfig.ProtectHome = true;
|
||||||
serviceConfig.ProtectKernelLogs = true;
|
serviceConfig.ProtectHostname = true;
|
||||||
serviceConfig.ProtectKernelModules = true;
|
serviceConfig.ProtectKernelLogs = true;
|
||||||
serviceConfig.ProtectKernelTunables = true;
|
serviceConfig.ProtectKernelModules = true;
|
||||||
serviceConfig.ProtectSystem = mkDefault "strict";
|
serviceConfig.ProtectKernelTunables = true;
|
||||||
serviceConfig.RemoveIPC = true;
|
serviceConfig.ProtectSystem = mkDefault "strict";
|
||||||
serviceConfig.RestrictAddressFamilies = ["AF_INET" "AF_INET6"];
|
serviceConfig.RemoveIPC = true;
|
||||||
serviceConfig.RestrictNamespaces = true;
|
serviceConfig.RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||||
serviceConfig.RestrictRealtime = true;
|
serviceConfig.RestrictNamespaces = true;
|
||||||
serviceConfig.RestrictSUIDSGID = true;
|
serviceConfig.RestrictRealtime = true;
|
||||||
serviceConfig.SystemCallArchitectures = "native";
|
serviceConfig.RestrictSUIDSGID = true;
|
||||||
serviceConfig.UMask = "0077";
|
serviceConfig.SystemCallArchitectures = "native";
|
||||||
}
|
serviceConfig.UMask = "0077";
|
||||||
exporter.serviceOpts
|
}
|
||||||
]))
|
exporter.serviceOpts
|
||||||
config.services.prometheus.extraExporters)
|
]))
|
||||||
|
config.services.prometheus.extraExporters)
|
||||||
|
|
||||||
{
|
{
|
||||||
vmagent-scrape-exporters = let
|
vmagent-scrape-exporters =
|
||||||
listenAddress = config.services.victoriametrics.listenAddress;
|
let
|
||||||
vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
|
listenAddress = config.services.victoriametrics.listenAddress;
|
||||||
promscrape = yaml.generate "prometheus.yml" {
|
vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
|
||||||
scrape_configs = lib.mapAttrsToList (_: scrape:
|
promscrape = yaml.generate "prometheus.yml" {
|
||||||
lib.recursiveUpdate {
|
scrape_configs = lib.mapAttrsToList
|
||||||
inherit (scrape) job_name;
|
(_: scrape:
|
||||||
static_configs =
|
lib.recursiveUpdate
|
||||||
scrape.static_configs
|
{
|
||||||
++ lib.optional (scrape.targets != []) {targets = scrape.targets;};
|
inherit (scrape) job_name;
|
||||||
}
|
static_configs =
|
||||||
scrape.extraSettings)
|
scrape.static_configs
|
||||||
config.services.victoriametrics.scrapeConfigs;
|
++ lib.optional (scrape.targets != [ ]) { targets = scrape.targets; };
|
||||||
};
|
}
|
||||||
in {
|
scrape.extraSettings)
|
||||||
enable = true;
|
config.services.victoriametrics.scrapeConfigs;
|
||||||
path = [pkgs.victoriametrics];
|
};
|
||||||
wantedBy = ["multi-user.target"];
|
in
|
||||||
after = ["network.target" "victoriametrics.service"];
|
{
|
||||||
serviceConfig = {
|
enable = true;
|
||||||
ExecStart = [
|
path = [ pkgs.victoriametrics ];
|
||||||
(lib.concatStringsSep " " [
|
wantedBy = [ "multi-user.target" ];
|
||||||
"${pkgs.victoriametrics}/bin/vmagent"
|
after = [ "network.target" "victoriametrics.service" ];
|
||||||
"-promscrape.config=${promscrape}"
|
serviceConfig = {
|
||||||
"-remoteWrite.url=http://${vmAddr}/api/v1/write"
|
ExecStart = [
|
||||||
"-remoteWrite.tmpDataPath=%t/vmagent"
|
(lib.concatStringsSep " " [
|
||||||
])
|
"${pkgs.victoriametrics}/bin/vmagent"
|
||||||
];
|
"-promscrape.config=${promscrape}"
|
||||||
SupplementaryGroups = "metrics";
|
"-remoteWrite.url=http://${vmAddr}/api/v1/write"
|
||||||
|
"-remoteWrite.tmpDataPath=%t/vmagent"
|
||||||
|
])
|
||||||
|
];
|
||||||
|
SupplementaryGroups = "metrics";
|
||||||
|
|
||||||
DynamicUser = true;
|
DynamicUser = true;
|
||||||
RuntimeDirectory = "vmagent";
|
RuntimeDirectory = "vmagent";
|
||||||
CapabilityBoundingSet = [""];
|
CapabilityBoundingSet = [ "" ];
|
||||||
DeviceAllow = [""];
|
DeviceAllow = [ "" ];
|
||||||
LockPersonality = true;
|
LockPersonality = true;
|
||||||
MemoryDenyWriteExecute = true;
|
MemoryDenyWriteExecute = true;
|
||||||
NoNewPrivileges = true;
|
NoNewPrivileges = true;
|
||||||
PrivateDevices = true;
|
PrivateDevices = true;
|
||||||
ProtectClock = true;
|
ProtectClock = true;
|
||||||
ProtectControlGroups = true;
|
ProtectControlGroups = true;
|
||||||
ProtectHome = true;
|
ProtectHome = true;
|
||||||
ProtectHostname = true;
|
ProtectHostname = true;
|
||||||
ProtectKernelLogs = true;
|
ProtectKernelLogs = true;
|
||||||
ProtectKernelModules = true;
|
ProtectKernelModules = true;
|
||||||
ProtectKernelTunables = true;
|
ProtectKernelTunables = true;
|
||||||
ProtectSystem = "strict";
|
ProtectSystem = "strict";
|
||||||
RemoveIPC = true;
|
RemoveIPC = true;
|
||||||
RestrictAddressFamilies = ["AF_INET" "AF_INET6"];
|
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||||
RestrictNamespaces = true;
|
RestrictNamespaces = true;
|
||||||
RestrictRealtime = true;
|
RestrictRealtime = true;
|
||||||
RestrictSUIDSGID = true;
|
RestrictSUIDSGID = true;
|
||||||
SystemCallArchitectures = "native";
|
SystemCallArchitectures = "native";
|
||||||
UMask = "0077";
|
UMask = "0077";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
users.groups.metrics = {};
|
users.groups.metrics = { };
|
||||||
|
|
||||||
services.victoriametrics.scrapeConfigs = let
|
services.victoriametrics.scrapeConfigs =
|
||||||
allExporters =
|
let
|
||||||
lib.mapAttrs (name: exporter: {
|
allExporters =
|
||||||
inherit (exporter) listenAddress port;
|
lib.mapAttrs
|
||||||
}) ((lib.filterAttrs (_: exporter: builtins.isAttrs exporter && exporter.enable)
|
(name: exporter: {
|
||||||
config.services.prometheus.exporters)
|
inherit (exporter) listenAddress port;
|
||||||
// config.services.prometheus.extraExporters);
|
})
|
||||||
in
|
((lib.filterAttrs (_: exporter: builtins.isAttrs exporter && exporter.enable)
|
||||||
lib.mapAttrs (_: exporter: {
|
config.services.prometheus.exporters)
|
||||||
targets = ["${exporter.listenAddress}:${toString exporter.port}"];
|
// config.services.prometheus.extraExporters);
|
||||||
})
|
in
|
||||||
allExporters;
|
lib.mapAttrs
|
||||||
|
(_: exporter: {
|
||||||
|
targets = [ "${exporter.listenAddress}:${toString exporter.port}" ];
|
||||||
|
})
|
||||||
|
allExporters;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
{config, ...}: {
|
{ config, ... }: {
|
||||||
config.services.victoriametrics = {
|
config.services.victoriametrics = {
|
||||||
enable = true;
|
enable = true;
|
||||||
extraOptions = [
|
extraOptions = [
|
||||||
|
@ -7,10 +7,10 @@
|
||||||
|
|
||||||
scrapeConfigs = {
|
scrapeConfigs = {
|
||||||
forgejo = {
|
forgejo = {
|
||||||
targets = ["127.0.0.1:${toString config.services.forgejo.settings.server.HTTP_PORT}"];
|
targets = [ "127.0.0.1:${toString config.services.forgejo.settings.server.HTTP_PORT}" ];
|
||||||
extraSettings.authorization.credentials_file = config.sops.secrets."forgejo/metrics-token".path;
|
extraSettings.authorization.credentials_file = config.sops.secrets."forgejo/metrics-token".path;
|
||||||
};
|
};
|
||||||
coturn.targets = ["127.0.0.1:9641"];
|
coturn.targets = [ "127.0.0.1:9641" ];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,14 +1,15 @@
|
||||||
{
|
{ pkgs
|
||||||
pkgs,
|
, config
|
||||||
config,
|
, ...
|
||||||
...
|
}:
|
||||||
}: let
|
let
|
||||||
# Update pending on rewrite of nextcloud news, though there is an
|
# Update pending on rewrite of nextcloud news, though there is an
|
||||||
# alpha to switch to if it becomes necessary:
|
# alpha to switch to if it becomes necessary:
|
||||||
# https://github.com/nextcloud/news/issues/2610
|
# https://github.com/nextcloud/news/issues/2610
|
||||||
nextcloud = pkgs.nextcloud27;
|
nextcloud = pkgs.nextcloud27;
|
||||||
hostName = "nextcloud.${config.services.nginx.domain}";
|
hostName = "nextcloud.${config.services.nginx.domain}";
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
services.nextcloud = {
|
services.nextcloud = {
|
||||||
inherit hostName;
|
inherit hostName;
|
||||||
|
|
||||||
|
@ -42,7 +43,7 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
# Ensure that this service doesn't start before postgres is ready
|
# Ensure that this service doesn't start before postgres is ready
|
||||||
systemd.services.nextcloud-setup.after = ["postgresql.service"];
|
systemd.services.nextcloud-setup.after = [ "postgresql.service" ];
|
||||||
|
|
||||||
# Set up SSL
|
# Set up SSL
|
||||||
services.nginx.virtualHosts."${hostName}" = {
|
services.nginx.virtualHosts."${hostName}" = {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
{pkgs, ...}: {
|
{ pkgs, ... }: {
|
||||||
services.postgresql = {
|
services.postgresql = {
|
||||||
package = pkgs.postgresql_14;
|
package = pkgs.postgresql_14;
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
|
@ -1,16 +1,17 @@
|
||||||
{
|
{ pkgs
|
||||||
pkgs,
|
, lib
|
||||||
lib,
|
, ...
|
||||||
...
|
}:
|
||||||
}: let
|
let
|
||||||
inherit (lib) concatStringsSep;
|
inherit (lib) concatStringsSep;
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
# Sadly, steam-run requires some X libs
|
# Sadly, steam-run requires some X libs
|
||||||
environment.noXlibs = false;
|
environment.noXlibs = false;
|
||||||
|
|
||||||
systemd.services.starbound = {
|
systemd.services.starbound = {
|
||||||
description = "Starbound";
|
description = "Starbound";
|
||||||
after = ["network.target"];
|
after = [ "network.target" ];
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStart = "${pkgs.local.starbound}/bin/launch-starbound ${./configs/starbound.json}";
|
ExecStart = "${pkgs.local.starbound}/bin/launch-starbound ${./configs/starbound.json}";
|
||||||
|
@ -67,7 +68,7 @@ in {
|
||||||
# Game servers shouldn't use cgroups themselves either
|
# Game servers shouldn't use cgroups themselves either
|
||||||
ProtectControlGroups = true;
|
ProtectControlGroups = true;
|
||||||
# Most game servers will never need other socket types
|
# Most game servers will never need other socket types
|
||||||
RestrictAddressFamilies = ["AF_UNIX AF_INET AF_INET6"];
|
RestrictAddressFamilies = [ "AF_UNIX AF_INET AF_INET6" ];
|
||||||
# Also a no-brainer, no game server should ever need this
|
# Also a no-brainer, no game server should ever need this
|
||||||
LockPersonality = true;
|
LockPersonality = true;
|
||||||
# Some game servers will probably try to set this, but they
|
# Some game servers will probably try to set this, but they
|
||||||
|
@ -116,6 +117,6 @@ in {
|
||||||
paths = [
|
paths = [
|
||||||
"/var/lib/private/starbound/storage/universe/"
|
"/var/lib/private/starbound/storage/universe/"
|
||||||
];
|
];
|
||||||
pauseServices = ["starbound.service"];
|
pauseServices = [ "starbound.service" ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
{config, ...}: let
|
{ config, ... }:
|
||||||
|
let
|
||||||
domain = config.services.nginx.domain;
|
domain = config.services.nginx.domain;
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
services.tlaternet-webserver = {
|
services.tlaternet-webserver = {
|
||||||
enable = true;
|
enable = true;
|
||||||
listen = {
|
listen = {
|
||||||
|
@ -10,15 +12,17 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
# Set up SSL
|
# Set up SSL
|
||||||
services.nginx.virtualHosts."${domain}" = let
|
services.nginx.virtualHosts."${domain}" =
|
||||||
inherit (config.services.tlaternet-webserver.listen) addr port;
|
let
|
||||||
in {
|
inherit (config.services.tlaternet-webserver.listen) addr port;
|
||||||
serverAliases = ["www.${domain}"];
|
in
|
||||||
|
{
|
||||||
|
serverAliases = [ "www.${domain}" ];
|
||||||
|
|
||||||
forceSSL = true;
|
forceSSL = true;
|
||||||
useACMEHost = "tlater.net";
|
useACMEHost = "tlater.net";
|
||||||
enableHSTS = true;
|
enableHSTS = true;
|
||||||
|
|
||||||
locations."/".proxyPass = "http://${addr}:${toString port}";
|
locations."/".proxyPass = "http://${addr}:${toString port}";
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
{config, ...}: {
|
{ config, ... }: {
|
||||||
# iptables needs to permit forwarding from wg0 to wg0
|
# iptables needs to permit forwarding from wg0 to wg0
|
||||||
networking.firewall.extraCommands = ''
|
networking.firewall.extraCommands = ''
|
||||||
iptables -A FORWARD -i wg0 -o wg0 -j ACCEPT
|
iptables -A FORWARD -i wg0 -o wg0 -j ACCEPT
|
||||||
|
@ -26,7 +26,7 @@
|
||||||
{
|
{
|
||||||
# yui
|
# yui
|
||||||
wireguardPeerConfig = {
|
wireguardPeerConfig = {
|
||||||
AllowedIPs = ["10.45.249.2/32"];
|
AllowedIPs = [ "10.45.249.2/32" ];
|
||||||
PublicKey = "5mlnqEVJWks5OqgeFA2bLIrvST9TlCE81Btl+j4myz0=";
|
PublicKey = "5mlnqEVJWks5OqgeFA2bLIrvST9TlCE81Btl+j4myz0=";
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@
|
||||||
{
|
{
|
||||||
# yuanyuan
|
# yuanyuan
|
||||||
wireguardPeerConfig = {
|
wireguardPeerConfig = {
|
||||||
AllowedIPs = ["10.45.249.10/32"];
|
AllowedIPs = [ "10.45.249.10/32" ];
|
||||||
PublicKey = "0UsFE2atz/O5P3OKQ8UHyyyGQNJbp1MeIWUJLuoerwE=";
|
PublicKey = "0UsFE2atz/O5P3OKQ8UHyyyGQNJbp1MeIWUJLuoerwE=";
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,8 +31,8 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
# Heisenbridge
|
# Heisenbridge
|
||||||
"heisenbridge/as-token" = {};
|
"heisenbridge/as-token" = { };
|
||||||
"heisenbridge/hs-token" = {};
|
"heisenbridge/hs-token" = { };
|
||||||
|
|
||||||
"hetzner-api" = {
|
"hetzner-api" = {
|
||||||
owner = "acme";
|
owner = "acme";
|
||||||
|
@ -62,10 +62,10 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
# Steam
|
# Steam
|
||||||
"steam/tlater" = {};
|
"steam/tlater" = { };
|
||||||
|
|
||||||
# Turn
|
# Turn
|
||||||
"turn/env" = {};
|
"turn/env" = { };
|
||||||
"turn/secret" = {
|
"turn/secret" = {
|
||||||
owner = "turnserver";
|
owner = "turnserver";
|
||||||
};
|
};
|
||||||
|
|
240
flake.nix
240
flake.nix
|
@ -32,126 +32,130 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs = {
|
outputs =
|
||||||
self,
|
{ self
|
||||||
nixpkgs,
|
, nixpkgs
|
||||||
sops-nix,
|
, sops-nix
|
||||||
nvfetcher,
|
, nvfetcher
|
||||||
deploy-rs,
|
, deploy-rs
|
||||||
...
|
, ...
|
||||||
} @ inputs: let
|
} @ inputs:
|
||||||
system = "x86_64-linux";
|
let
|
||||||
pkgs = nixpkgs.legacyPackages.${system};
|
system = "x86_64-linux";
|
||||||
in {
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
##################
|
in
|
||||||
# Configurations #
|
{
|
||||||
##################
|
##################
|
||||||
nixosConfigurations = {
|
# Configurations #
|
||||||
# The actual system definition
|
##################
|
||||||
hetzner-1 = nixpkgs.lib.nixosSystem {
|
nixosConfigurations = {
|
||||||
inherit system;
|
# The actual system definition
|
||||||
specialArgs.flake-inputs = inputs;
|
hetzner-1 = nixpkgs.lib.nixosSystem {
|
||||||
|
inherit system;
|
||||||
|
specialArgs.flake-inputs = inputs;
|
||||||
|
|
||||||
modules = [
|
modules = [
|
||||||
./configuration
|
./configuration
|
||||||
./configuration/hardware-specific/hetzner
|
./configuration/hardware-specific/hetzner
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
############################
|
||||||
|
# Deployment configuration #
|
||||||
|
############################
|
||||||
|
deploy.nodes = {
|
||||||
|
hetzner-1 = {
|
||||||
|
hostname = "116.202.158.55";
|
||||||
|
|
||||||
|
profiles.system = {
|
||||||
|
user = "root";
|
||||||
|
path = deploy-rs.lib.${system}.activate.nixos self.nixosConfigurations.hetzner-1;
|
||||||
|
};
|
||||||
|
|
||||||
|
sshUser = "tlater";
|
||||||
|
sshOpts = [ "-p" "2222" "-o" "ForwardAgent=yes" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
#########
|
||||||
|
# Tests #
|
||||||
|
#########
|
||||||
|
checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) deploy-rs.lib;
|
||||||
|
|
||||||
|
###################
|
||||||
|
# Utility scripts #
|
||||||
|
###################
|
||||||
|
apps.${system} = {
|
||||||
|
default = self.apps.${system}.run-vm;
|
||||||
|
|
||||||
|
run-vm = {
|
||||||
|
type = "app";
|
||||||
|
program =
|
||||||
|
let
|
||||||
|
vm = nixpkgs.lib.nixosSystem {
|
||||||
|
inherit system;
|
||||||
|
specialArgs.flake-inputs = inputs;
|
||||||
|
|
||||||
|
modules = [
|
||||||
|
./configuration
|
||||||
|
./configuration/hardware-specific/vm.nix
|
||||||
|
];
|
||||||
|
};
|
||||||
|
in
|
||||||
|
(pkgs.writeShellScript "" ''
|
||||||
|
${vm.config.system.build.vm.outPath}/bin/run-testvm-vm
|
||||||
|
'').outPath;
|
||||||
|
};
|
||||||
|
|
||||||
|
update-pkgs = {
|
||||||
|
type = "app";
|
||||||
|
program =
|
||||||
|
let
|
||||||
|
nvfetcher-bin = "${nvfetcher.packages.${system}.default}/bin/nvfetcher";
|
||||||
|
in
|
||||||
|
(pkgs.writeShellScript "update-pkgs" ''
|
||||||
|
cd "$(git rev-parse --show-toplevel)/pkgs"
|
||||||
|
${nvfetcher-bin} -o _sources_pkgs -c nvfetcher.toml
|
||||||
|
'').outPath;
|
||||||
|
};
|
||||||
|
|
||||||
|
update-nextcloud-apps = {
|
||||||
|
type = "app";
|
||||||
|
program =
|
||||||
|
let
|
||||||
|
nvfetcher-bin = "${nvfetcher.packages.${system}.default}/bin/nvfetcher";
|
||||||
|
in
|
||||||
|
(pkgs.writeShellScript "update-nextcloud-apps" ''
|
||||||
|
cd "$(git rev-parse --show-toplevel)/pkgs"
|
||||||
|
${nvfetcher-bin} -o _sources_nextcloud -c nextcloud-apps.toml
|
||||||
|
'').outPath;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
###########################
|
||||||
|
# Development environment #
|
||||||
|
###########################
|
||||||
|
devShells.${system}.default = nixpkgs.legacyPackages.${system}.mkShell {
|
||||||
|
sopsPGPKeyDirs = [ "./keys/hosts/" "./keys/users/" ];
|
||||||
|
nativeBuildInputs = [
|
||||||
|
sops-nix.packages.${system}.sops-import-keys-hook
|
||||||
|
];
|
||||||
|
|
||||||
|
packages = with pkgs; [
|
||||||
|
sops-nix.packages.${system}.sops-init-gpg-key
|
||||||
|
deploy-rs.packages.${system}.default
|
||||||
|
|
||||||
|
nixpkgs-fmt
|
||||||
|
|
||||||
|
cargo
|
||||||
|
clippy
|
||||||
|
rustc
|
||||||
|
rustfmt
|
||||||
|
rust-analyzer
|
||||||
|
pkg-config
|
||||||
|
openssl
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
############################
|
|
||||||
# Deployment configuration #
|
|
||||||
############################
|
|
||||||
deploy.nodes = {
|
|
||||||
hetzner-1 = {
|
|
||||||
hostname = "116.202.158.55";
|
|
||||||
|
|
||||||
profiles.system = {
|
|
||||||
user = "root";
|
|
||||||
path = deploy-rs.lib.${system}.activate.nixos self.nixosConfigurations.hetzner-1;
|
|
||||||
};
|
|
||||||
|
|
||||||
sshUser = "tlater";
|
|
||||||
sshOpts = ["-p" "2222" "-o" "ForwardAgent=yes"];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
#########
|
|
||||||
# Tests #
|
|
||||||
#########
|
|
||||||
checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) deploy-rs.lib;
|
|
||||||
|
|
||||||
###################
|
|
||||||
# Utility scripts #
|
|
||||||
###################
|
|
||||||
apps.${system} = {
|
|
||||||
default = self.apps.${system}.run-vm;
|
|
||||||
|
|
||||||
run-vm = {
|
|
||||||
type = "app";
|
|
||||||
program = let
|
|
||||||
vm = nixpkgs.lib.nixosSystem {
|
|
||||||
inherit system;
|
|
||||||
specialArgs.flake-inputs = inputs;
|
|
||||||
|
|
||||||
modules = [
|
|
||||||
./configuration
|
|
||||||
./configuration/hardware-specific/vm.nix
|
|
||||||
];
|
|
||||||
};
|
|
||||||
in
|
|
||||||
(pkgs.writeShellScript "" ''
|
|
||||||
${vm.config.system.build.vm.outPath}/bin/run-testvm-vm
|
|
||||||
'')
|
|
||||||
.outPath;
|
|
||||||
};
|
|
||||||
|
|
||||||
update-pkgs = {
|
|
||||||
type = "app";
|
|
||||||
program = let
|
|
||||||
nvfetcher-bin = "${nvfetcher.packages.${system}.default}/bin/nvfetcher";
|
|
||||||
in
|
|
||||||
(pkgs.writeShellScript "update-pkgs" ''
|
|
||||||
cd "$(git rev-parse --show-toplevel)/pkgs"
|
|
||||||
${nvfetcher-bin} -o _sources_pkgs -c nvfetcher.toml
|
|
||||||
'')
|
|
||||||
.outPath;
|
|
||||||
};
|
|
||||||
|
|
||||||
update-nextcloud-apps = {
|
|
||||||
type = "app";
|
|
||||||
program = let
|
|
||||||
nvfetcher-bin = "${nvfetcher.packages.${system}.default}/bin/nvfetcher";
|
|
||||||
in
|
|
||||||
(pkgs.writeShellScript "update-nextcloud-apps" ''
|
|
||||||
cd "$(git rev-parse --show-toplevel)/pkgs"
|
|
||||||
${nvfetcher-bin} -o _sources_nextcloud -c nextcloud-apps.toml
|
|
||||||
'')
|
|
||||||
.outPath;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
###########################
|
|
||||||
# Development environment #
|
|
||||||
###########################
|
|
||||||
devShells.${system}.default = nixpkgs.legacyPackages.${system}.mkShell {
|
|
||||||
sopsPGPKeyDirs = ["./keys/hosts/" "./keys/users/"];
|
|
||||||
nativeBuildInputs = [
|
|
||||||
sops-nix.packages.${system}.sops-import-keys-hook
|
|
||||||
];
|
|
||||||
|
|
||||||
packages = with pkgs; [
|
|
||||||
sops-nix.packages.${system}.sops-init-gpg-key
|
|
||||||
deploy-rs.packages.${system}.default
|
|
||||||
|
|
||||||
cargo
|
|
||||||
clippy
|
|
||||||
rustc
|
|
||||||
rustfmt
|
|
||||||
rust-analyzer
|
|
||||||
pkg-config
|
|
||||||
openssl
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
{
|
{ config
|
||||||
config,
|
, pkgs
|
||||||
pkgs,
|
, lib
|
||||||
lib,
|
, ...
|
||||||
...
|
|
||||||
}: {
|
}: {
|
||||||
options = {
|
options = {
|
||||||
services.nginx.domain = lib.mkOption {
|
services.nginx.domain = lib.mkOption {
|
||||||
|
@ -10,36 +9,37 @@
|
||||||
description = "The base domain name to append to virtual domain names";
|
description = "The base domain name to append to virtual domain names";
|
||||||
};
|
};
|
||||||
|
|
||||||
services.nginx.virtualHosts = let
|
services.nginx.virtualHosts =
|
||||||
extraVirtualHostOptions = {
|
let
|
||||||
name,
|
extraVirtualHostOptions =
|
||||||
config,
|
{ name
|
||||||
...
|
, config
|
||||||
}: {
|
, ...
|
||||||
options = {
|
}: {
|
||||||
enableHSTS = lib.mkEnableOption "Enable HSTS";
|
options = {
|
||||||
|
enableHSTS = lib.mkEnableOption "Enable HSTS";
|
||||||
|
|
||||||
addAccessLog = lib.mkOption {
|
addAccessLog = lib.mkOption {
|
||||||
type = lib.types.bool;
|
type = lib.types.bool;
|
||||||
default = true;
|
default = true;
|
||||||
description = ''
|
description = ''
|
||||||
Add special logging to `/var/log/nginx/''${serverName}`
|
Add special logging to `/var/log/nginx/''${serverName}`
|
||||||
'';
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = {
|
||||||
|
extraConfig = lib.concatStringsSep "\n" [
|
||||||
|
(lib.optionalString config.enableHSTS ''
|
||||||
|
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
|
||||||
|
'')
|
||||||
|
(lib.optionalString config.addAccessLog ''
|
||||||
|
access_log /var/log/nginx/${name}/access.log upstream_time;
|
||||||
|
'')
|
||||||
|
];
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
in
|
||||||
|
|
||||||
config = {
|
|
||||||
extraConfig = lib.concatStringsSep "\n" [
|
|
||||||
(lib.optionalString config.enableHSTS ''
|
|
||||||
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
|
|
||||||
'')
|
|
||||||
(lib.optionalString config.addAccessLog ''
|
|
||||||
access_log /var/log/nginx/${name}/access.log upstream_time;
|
|
||||||
'')
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
in
|
|
||||||
lib.mkOption {
|
lib.mkOption {
|
||||||
type = lib.types.attrsOf (lib.types.submodule extraVirtualHostOptions);
|
type = lib.types.attrsOf (lib.types.submodule extraVirtualHostOptions);
|
||||||
};
|
};
|
||||||
|
@ -47,13 +47,15 @@
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
# Don't attempt to run acme if the domain name is not tlater.net
|
# Don't attempt to run acme if the domain name is not tlater.net
|
||||||
systemd.services = let
|
systemd.services =
|
||||||
confirm = ''[[ "tlater.net" = ${config.services.nginx.domain} ]]'';
|
let
|
||||||
in
|
confirm = ''[[ "tlater.net" = ${config.services.nginx.domain} ]]'';
|
||||||
lib.mapAttrs' (cert: _:
|
in
|
||||||
lib.nameValuePair "acme-${cert}" {
|
lib.mapAttrs'
|
||||||
serviceConfig.ExecCondition = ''${pkgs.runtimeShell} -c '${confirm}' '';
|
(cert: _:
|
||||||
})
|
lib.nameValuePair "acme-${cert}" {
|
||||||
config.security.acme.certs;
|
serviceConfig.ExecCondition = ''${pkgs.runtimeShell} -c '${confirm}' '';
|
||||||
|
})
|
||||||
|
config.security.acme.certs;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
{
|
{ pkgs
|
||||||
pkgs,
|
, rustPlatform
|
||||||
rustPlatform,
|
, ...
|
||||||
...
|
|
||||||
}:
|
}:
|
||||||
rustPlatform.buildRustPackage {
|
rustPlatform.buildRustPackage {
|
||||||
pname = "afvalcalendar";
|
pname = "afvalcalendar";
|
||||||
|
|
|
@ -1,22 +1,23 @@
|
||||||
{
|
{ pkgs
|
||||||
pkgs,
|
, lib
|
||||||
lib,
|
,
|
||||||
}: let
|
}:
|
||||||
|
let
|
||||||
inherit (builtins) fromJSON mapAttrs readFile;
|
inherit (builtins) fromJSON mapAttrs readFile;
|
||||||
inherit (pkgs) callPackage;
|
inherit (pkgs) callPackage;
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
starbound = callPackage ./starbound {};
|
starbound = callPackage ./starbound { };
|
||||||
prometheus-fail2ban-exporter = callPackage ./prometheus/fail2ban-exporter.nix {
|
prometheus-fail2ban-exporter = callPackage ./prometheus/fail2ban-exporter.nix {
|
||||||
sources = pkgs.callPackage ./_sources_pkgs/generated.nix {};
|
sources = pkgs.callPackage ./_sources_pkgs/generated.nix { };
|
||||||
};
|
};
|
||||||
afvalcalendar = callPackage ./afvalcalendar {};
|
afvalcalendar = callPackage ./afvalcalendar { };
|
||||||
}
|
}
|
||||||
// (
|
// (
|
||||||
# Add nextcloud apps
|
# Add nextcloud apps
|
||||||
let
|
let
|
||||||
mkNextcloudApp = pkgs.callPackage ./mkNextcloudApp.nix {};
|
mkNextcloudApp = pkgs.callPackage ./mkNextcloudApp.nix { };
|
||||||
sources = fromJSON (readFile ./_sources_nextcloud/generated.json);
|
sources = fromJSON (readFile ./_sources_nextcloud/generated.json);
|
||||||
in
|
in
|
||||||
mapAttrs (_: source: mkNextcloudApp source) sources
|
mapAttrs (_: source: mkNextcloudApp source) sources
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{ fetchNextcloudApp
|
||||||
fetchNextcloudApp,
|
, lib
|
||||||
lib,
|
,
|
||||||
}: source:
|
}: source:
|
||||||
fetchNextcloudApp {
|
fetchNextcloudApp {
|
||||||
url = source.src.url;
|
url = source.src.url;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{ buildGoModule
|
||||||
buildGoModule,
|
, sources
|
||||||
sources,
|
,
|
||||||
}:
|
}:
|
||||||
buildGoModule {
|
buildGoModule {
|
||||||
inherit (sources.prometheus-fail2ban-exporter) pname src version;
|
inherit (sources.prometheus-fail2ban-exporter) pname src version;
|
||||||
|
|
|
@ -1,34 +1,35 @@
|
||||||
{
|
{ stdenv
|
||||||
stdenv,
|
, lib
|
||||||
lib,
|
, makeWrapper
|
||||||
makeWrapper,
|
, patchelf
|
||||||
patchelf,
|
, steamPackages
|
||||||
steamPackages,
|
, replace-secret
|
||||||
replace-secret,
|
,
|
||||||
}: let
|
}:
|
||||||
|
let
|
||||||
# Use the directory in which starbound is installed so steamcmd
|
# Use the directory in which starbound is installed so steamcmd
|
||||||
# doesn't have to be reinstalled constantly (we're using DynamicUser
|
# doesn't have to be reinstalled constantly (we're using DynamicUser
|
||||||
# with StateDirectory to persist this).
|
# with StateDirectory to persist this).
|
||||||
steamcmd = steamPackages.steamcmd.override {
|
steamcmd = steamPackages.steamcmd.override {
|
||||||
steamRoot = "/var/lib/starbound/.steamcmd";
|
steamRoot = "/var/lib/starbound/.steamcmd";
|
||||||
};
|
};
|
||||||
wrapperPath = lib.makeBinPath [patchelf steamcmd replace-secret];
|
wrapperPath = lib.makeBinPath [ patchelf steamcmd replace-secret ];
|
||||||
in
|
in
|
||||||
stdenv.mkDerivation {
|
stdenv.mkDerivation {
|
||||||
name = "starbound-update-script";
|
name = "starbound-update-script";
|
||||||
nativeBuildInputs = [makeWrapper];
|
nativeBuildInputs = [ makeWrapper ];
|
||||||
dontUnpack = true;
|
dontUnpack = true;
|
||||||
patchPhase = ''
|
patchPhase = ''
|
||||||
interpreter="$(cat $NIX_CC/nix-support/dynamic-linker)"
|
interpreter="$(cat $NIX_CC/nix-support/dynamic-linker)"
|
||||||
substitute ${./launch-starbound.sh} launch-starbound --subst-var interpreter
|
substitute ${./launch-starbound.sh} launch-starbound --subst-var interpreter
|
||||||
'';
|
'';
|
||||||
installPhase = ''
|
installPhase = ''
|
||||||
mkdir -p $out/bin
|
mkdir -p $out/bin
|
||||||
cp launch-starbound $out/bin/launch-starbound
|
cp launch-starbound $out/bin/launch-starbound
|
||||||
chmod +x $out/bin/launch-starbound
|
chmod +x $out/bin/launch-starbound
|
||||||
'';
|
'';
|
||||||
postFixup = ''
|
postFixup = ''
|
||||||
wrapProgram $out/bin/launch-starbound \
|
wrapProgram $out/bin/launch-starbound \
|
||||||
--prefix PATH : "${wrapperPath}"
|
--prefix PATH : "${wrapperPath}"
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue