Switch to nixfmt formatter #122
|
@ -1,10 +1,12 @@
|
||||||
{ config
|
{
|
||||||
, pkgs
|
config,
|
||||||
, lib
|
pkgs,
|
||||||
, modulesPath
|
lib,
|
||||||
, flake-inputs
|
modulesPath,
|
||||||
, ...
|
flake-inputs,
|
||||||
}: {
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
imports = [
|
imports = [
|
||||||
flake-inputs.disko.nixosModules.disko
|
flake-inputs.disko.nixosModules.disko
|
||||||
flake-inputs.sops-nix.nixosModules.sops
|
flake-inputs.sops-nix.nixosModules.sops
|
||||||
|
@ -51,7 +53,10 @@
|
||||||
|
|
||||||
# Optimization for minecraft servers, see:
|
# Optimization for minecraft servers, see:
|
||||||
# https://bugs.mojang.com/browse/MC-183518
|
# https://bugs.mojang.com/browse/MC-183518
|
||||||
boot.kernelParams = [ "highres=off" "nohz=off" ];
|
boot.kernelParams = [
|
||||||
|
"highres=off"
|
||||||
|
"nohz=off"
|
||||||
|
];
|
||||||
|
|
||||||
networking = {
|
networking = {
|
||||||
usePredictableInterfaceNames = false;
|
usePredictableInterfaceNames = false;
|
||||||
|
|
|
@ -25,9 +25,7 @@
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
# IPv6
|
# IPv6
|
||||||
{
|
{ addressConfig.Address = "2a01:4f8:10b:3c85::2/64"; }
|
||||||
addressConfig.Address = "2a01:4f8:10b:3c85::2/64";
|
|
||||||
}
|
|
||||||
];
|
];
|
||||||
|
|
||||||
networkConfig = {
|
networkConfig = {
|
||||||
|
|
|
@ -19,7 +19,10 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
mountOptions = [ "compress=zstd" "noatime" ];
|
mountOptions = [
|
||||||
|
"compress=zstd"
|
||||||
|
"noatime"
|
||||||
|
];
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
sda = {
|
sda = {
|
||||||
|
@ -54,7 +57,15 @@
|
||||||
type = "btrfs";
|
type = "btrfs";
|
||||||
# Hack to get multi-device btrfs going
|
# Hack to get multi-device btrfs going
|
||||||
# See https://github.com/nix-community/disko/issues/99
|
# See https://github.com/nix-community/disko/issues/99
|
||||||
extraArgs = [ "-d" "raid1" "-m" "raid1" "--runtime-features" "quota" "/dev/sda3" ];
|
extraArgs = [
|
||||||
|
"-d"
|
||||||
|
"raid1"
|
||||||
|
"-m"
|
||||||
|
"raid1"
|
||||||
|
"--runtime-features"
|
||||||
|
"quota"
|
||||||
|
"/dev/sda3"
|
||||||
|
];
|
||||||
subvolumes = {
|
subvolumes = {
|
||||||
"/volume" = { };
|
"/volume" = { };
|
||||||
"/volume/root" = {
|
"/volume/root" = {
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
{ lib, ... }: {
|
{ lib, ... }:
|
||||||
|
{
|
||||||
users.users.tlater.password = "insecure";
|
users.users.tlater.password = "insecure";
|
||||||
|
|
||||||
# Disable graphical tty so -curses works
|
# Disable graphical tty so -curses works
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
{ config
|
{ config, lib, ... }:
|
||||||
, lib
|
{
|
||||||
, ...
|
|
||||||
}: {
|
|
||||||
services.nginx = {
|
services.nginx = {
|
||||||
enable = true;
|
enable = true;
|
||||||
recommendedTlsSettings = true;
|
recommendedTlsSettings = true;
|
||||||
|
@ -26,26 +24,23 @@
|
||||||
# Override the default, just keep fewer logs
|
# Override the default, just keep fewer logs
|
||||||
nginx.rotate = 6;
|
nginx.rotate = 6;
|
||||||
}
|
}
|
||||||
// lib.mapAttrs'
|
// lib.mapAttrs' (
|
||||||
(virtualHost: _:
|
virtualHost: _:
|
||||||
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
|
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
|
||||||
frequency = "daily";
|
frequency = "daily";
|
||||||
rotate = 2;
|
rotate = 2;
|
||||||
compress = true;
|
compress = true;
|
||||||
delaycompress = true;
|
delaycompress = true;
|
||||||
su = "${config.services.nginx.user} ${config.services.nginx.group}";
|
su = "${config.services.nginx.user} ${config.services.nginx.group}";
|
||||||
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
|
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
|
||||||
})
|
}
|
||||||
config.services.nginx.virtualHosts;
|
) config.services.nginx.virtualHosts;
|
||||||
|
|
||||||
systemd.tmpfiles.rules =
|
systemd.tmpfiles.rules = lib.mapAttrsToList (
|
||||||
lib.mapAttrsToList
|
virtualHost: _:
|
||||||
(
|
#
|
||||||
virtualHost: _:
|
"d /var/log/nginx/${virtualHost} 0750 ${config.services.nginx.user} ${config.services.nginx.group}"
|
||||||
#
|
) config.services.nginx.virtualHosts;
|
||||||
"d /var/log/nginx/${virtualHost} 0750 ${config.services.nginx.user} ${config.services.nginx.group}"
|
|
||||||
)
|
|
||||||
config.services.nginx.virtualHosts;
|
|
||||||
|
|
||||||
security.acme = {
|
security.acme = {
|
||||||
defaults.email = "tm@tlater.net";
|
defaults.email = "tm@tlater.net";
|
||||||
|
@ -61,8 +56,8 @@
|
||||||
|
|
||||||
services.backups.acme = {
|
services.backups.acme = {
|
||||||
user = "acme";
|
user = "acme";
|
||||||
paths =
|
paths = lib.mapAttrsToList (
|
||||||
lib.mapAttrsToList (virtualHost: _: "/var/lib/acme/${virtualHost}")
|
virtualHost: _: "/var/lib/acme/${virtualHost}"
|
||||||
config.services.nginx.virtualHosts;
|
) config.services.nginx.virtualHosts;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
{ pkgs
|
{ pkgs, config, ... }:
|
||||||
, config
|
{
|
||||||
, ...
|
|
||||||
}: {
|
|
||||||
systemd.services.afvalcalendar = {
|
systemd.services.afvalcalendar = {
|
||||||
description = "Enschede afvalcalendar -> ical converter";
|
description = "Enschede afvalcalendar -> ical converter";
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
@ -25,16 +23,23 @@
|
||||||
ProtectKernelModules = true;
|
ProtectKernelModules = true;
|
||||||
ProtectKernelLogs = true;
|
ProtectKernelLogs = true;
|
||||||
ProtectControlGroups = true;
|
ProtectControlGroups = true;
|
||||||
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
|
RestrictAddressFamilies = [
|
||||||
|
"AF_UNIX"
|
||||||
|
"AF_INET"
|
||||||
|
"AF_INET6"
|
||||||
|
];
|
||||||
RestrictNamespaces = true;
|
RestrictNamespaces = true;
|
||||||
LockPersonality = true;
|
LockPersonality = true;
|
||||||
MemoryDenyWriteExecute = true;
|
MemoryDenyWriteExecute = true;
|
||||||
RestrictRealtime = true;
|
RestrictRealtime = true;
|
||||||
RestrictSUIDSGID = true;
|
RestrictSUIDSGID = true;
|
||||||
SystemCallArchitectures = "native";
|
SystemCallArchitectures = "native";
|
||||||
SystemCallFilter = [ "@system-service" "~@privileged @resources @setuid @keyring" ];
|
SystemCallFilter = [
|
||||||
|
"@system-service"
|
||||||
|
"~@privileged @resources @setuid @keyring"
|
||||||
|
];
|
||||||
|
|
||||||
Umask = 0002;
|
Umask = 2;
|
||||||
SupplementaryGroups = "afvalcalendar-hosting";
|
SupplementaryGroups = "afvalcalendar-hosting";
|
||||||
|
|
||||||
ReadWritePaths = "/srv/afvalcalendar";
|
ReadWritePaths = "/srv/afvalcalendar";
|
||||||
|
|
|
@ -1,29 +1,35 @@
|
||||||
{ config
|
{
|
||||||
, pkgs
|
config,
|
||||||
, lib
|
pkgs,
|
||||||
, ...
|
lib,
|
||||||
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
inherit (lib) types optional singleton;
|
inherit (lib) types optional singleton;
|
||||||
mkShutdownScript = service:
|
mkShutdownScript =
|
||||||
|
service:
|
||||||
pkgs.writeShellScript "backup-${service}-shutdown" ''
|
pkgs.writeShellScript "backup-${service}-shutdown" ''
|
||||||
if systemctl is-active --quiet '${service}'; then
|
if systemctl is-active --quiet '${service}'; then
|
||||||
touch '/tmp/${service}-was-active'
|
touch '/tmp/${service}-was-active'
|
||||||
systemctl stop '${service}'
|
systemctl stop '${service}'
|
||||||
fi
|
fi
|
||||||
'';
|
'';
|
||||||
mkRestartScript = service:
|
mkRestartScript =
|
||||||
|
service:
|
||||||
pkgs.writeShellScript "backup-${service}-restart" ''
|
pkgs.writeShellScript "backup-${service}-restart" ''
|
||||||
if [ -f '/tmp/${service}-was-active' ]; then
|
if [ -f '/tmp/${service}-was-active' ]; then
|
||||||
rm '/tmp/${service}-was-active'
|
rm '/tmp/${service}-was-active'
|
||||||
systemctl start '${service}'
|
systemctl start '${service}'
|
||||||
fi
|
fi
|
||||||
'';
|
'';
|
||||||
writeScript = name: packages: text:
|
writeScript =
|
||||||
lib.getExe (pkgs.writeShellApplication {
|
name: packages: text:
|
||||||
inherit name text;
|
lib.getExe (
|
||||||
runtimeInputs = packages;
|
pkgs.writeShellApplication {
|
||||||
});
|
inherit name text;
|
||||||
|
runtimeInputs = packages;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
# *NOT* a TOML file, for some reason quotes are interpreted
|
# *NOT* a TOML file, for some reason quotes are interpreted
|
||||||
# *literally
|
# *literally
|
||||||
|
@ -49,85 +55,87 @@ in
|
||||||
description = lib.mdDoc ''
|
description = lib.mdDoc ''
|
||||||
Configure restic backups with a specific tag.
|
Configure restic backups with a specific tag.
|
||||||
'';
|
'';
|
||||||
type = types.attrsOf (types.submodule ({ config
|
type = types.attrsOf (
|
||||||
, name
|
types.submodule (
|
||||||
, ...
|
{ config, name, ... }:
|
||||||
}: {
|
{
|
||||||
options = {
|
options = {
|
||||||
user = lib.mkOption {
|
user = lib.mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
description = ''
|
description = ''
|
||||||
The user as which to run the backup.
|
The user as which to run the backup.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
paths = lib.mkOption {
|
paths = lib.mkOption {
|
||||||
type = types.listOf types.str;
|
type = types.listOf types.str;
|
||||||
description = ''
|
description = ''
|
||||||
The paths to back up.
|
The paths to back up.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
tag = lib.mkOption {
|
tag = lib.mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
description = ''
|
description = ''
|
||||||
The restic tag to mark the backup with.
|
The restic tag to mark the backup with.
|
||||||
'';
|
'';
|
||||||
default = name;
|
default = name;
|
||||||
};
|
};
|
||||||
preparation = {
|
preparation = {
|
||||||
packages = lib.mkOption {
|
packages = lib.mkOption {
|
||||||
type = types.listOf types.package;
|
type = types.listOf types.package;
|
||||||
default = [ ];
|
default = [ ];
|
||||||
description = ''
|
description = ''
|
||||||
The list of packages to make available in the
|
The list of packages to make available in the
|
||||||
preparation script.
|
preparation script.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
text = lib.mkOption {
|
text = lib.mkOption {
|
||||||
type = types.nullOr types.str;
|
type = types.nullOr types.str;
|
||||||
default = null;
|
default = null;
|
||||||
description = ''
|
description = ''
|
||||||
The preparation script to run before the backup.
|
The preparation script to run before the backup.
|
||||||
|
|
||||||
This should include things like database dumps and
|
This should include things like database dumps and
|
||||||
enabling maintenance modes. If a service needs to be
|
enabling maintenance modes. If a service needs to be
|
||||||
shut down for backups, use `pauseServices` instead.
|
shut down for backups, use `pauseServices` instead.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
cleanup = {
|
cleanup = {
|
||||||
packages = lib.mkOption {
|
packages = lib.mkOption {
|
||||||
type = types.listOf types.package;
|
type = types.listOf types.package;
|
||||||
default = [ ];
|
default = [ ];
|
||||||
description = ''
|
description = ''
|
||||||
The list of packages to make available in the
|
The list of packages to make available in the
|
||||||
cleanup script.
|
cleanup script.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
text = lib.mkOption {
|
text = lib.mkOption {
|
||||||
type = types.nullOr types.str;
|
type = types.nullOr types.str;
|
||||||
default = null;
|
default = null;
|
||||||
description = ''
|
description = ''
|
||||||
The cleanup script to run after the backup.
|
The cleanup script to run after the backup.
|
||||||
|
|
||||||
This should do things like cleaning up database dumps
|
This should do things like cleaning up database dumps
|
||||||
and disabling maintenance modes.
|
and disabling maintenance modes.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
pauseServices = lib.mkOption {
|
pauseServices = lib.mkOption {
|
||||||
type = types.listOf types.str;
|
type = types.listOf types.str;
|
||||||
default = [ ];
|
default = [ ];
|
||||||
description = ''
|
description = ''
|
||||||
The systemd services that need to be shut down before
|
The systemd services that need to be shut down before
|
||||||
the backup can run. Services will be restarted after the
|
the backup can run. Services will be restarted after the
|
||||||
backup is complete.
|
backup is complete.
|
||||||
|
|
||||||
This is intended to be used for services that do not
|
This is intended to be used for services that do not
|
||||||
support hot backups.
|
support hot backups.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}));
|
}
|
||||||
|
)
|
||||||
|
);
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -164,58 +172,68 @@ in
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
// lib.mapAttrs'
|
// lib.mapAttrs' (
|
||||||
(name: backup:
|
name: backup:
|
||||||
lib.nameValuePair "backup-${name}" {
|
lib.nameValuePair "backup-${name}" {
|
||||||
# Don't want to restart mid-backup
|
# Don't want to restart mid-backup
|
||||||
restartIfChanged = false;
|
restartIfChanged = false;
|
||||||
|
|
||||||
environment =
|
environment = resticEnv // {
|
||||||
resticEnv
|
RESTIC_CACHE_DIR = "%C/backup-${name}";
|
||||||
// {
|
};
|
||||||
RESTIC_CACHE_DIR = "%C/backup-${name}";
|
|
||||||
};
|
|
||||||
|
|
||||||
path = with pkgs; [
|
path = with pkgs; [
|
||||||
coreutils
|
coreutils
|
||||||
openssh
|
openssh
|
||||||
rclone
|
rclone
|
||||||
restic
|
restic
|
||||||
|
];
|
||||||
|
|
||||||
|
# TODO(tlater): If I ever add more than one repo, service
|
||||||
|
# shutdown/restarting will potentially break if multiple
|
||||||
|
# backups for the same service overlap. A more clever
|
||||||
|
# sentinel file with reference counts would probably solve
|
||||||
|
# this.
|
||||||
|
serviceConfig = {
|
||||||
|
User = backup.user;
|
||||||
|
Group = "backup";
|
||||||
|
RuntimeDirectory = "backup-${name}";
|
||||||
|
CacheDirectory = "backup-${name}";
|
||||||
|
CacheDirectoryMode = "0700";
|
||||||
|
PrivateTmp = true;
|
||||||
|
|
||||||
|
ExecStart = [
|
||||||
|
(lib.concatStringsSep " " (
|
||||||
|
[
|
||||||
|
"${pkgs.restic}/bin/restic"
|
||||||
|
"backup"
|
||||||
|
"--tag"
|
||||||
|
name
|
||||||
|
]
|
||||||
|
++ backup.paths
|
||||||
|
))
|
||||||
];
|
];
|
||||||
|
|
||||||
# TODO(tlater): If I ever add more than one repo, service
|
ExecStartPre =
|
||||||
# shutdown/restarting will potentially break if multiple
|
map (service: "+${mkShutdownScript service}") backup.pauseServices
|
||||||
# backups for the same service overlap. A more clever
|
++ singleton (
|
||||||
# sentinel file with reference counts would probably solve
|
writeScript "backup-${name}-repo-init" [ ] ''
|
||||||
# this.
|
|
||||||
serviceConfig = {
|
|
||||||
User = backup.user;
|
|
||||||
Group = "backup";
|
|
||||||
RuntimeDirectory = "backup-${name}";
|
|
||||||
CacheDirectory = "backup-${name}";
|
|
||||||
CacheDirectoryMode = "0700";
|
|
||||||
PrivateTmp = true;
|
|
||||||
|
|
||||||
ExecStart = [
|
|
||||||
(lib.concatStringsSep " " ([ "${pkgs.restic}/bin/restic" "backup" "--tag" name ] ++ backup.paths))
|
|
||||||
];
|
|
||||||
|
|
||||||
ExecStartPre =
|
|
||||||
map (service: "+${mkShutdownScript service}") backup.pauseServices
|
|
||||||
++ singleton (writeScript "backup-${name}-repo-init" [ ] ''
|
|
||||||
restic snapshots || restic init
|
restic snapshots || restic init
|
||||||
'')
|
''
|
||||||
++ optional (backup.preparation.text != null)
|
)
|
||||||
(writeScript "backup-${name}-prepare" backup.preparation.packages backup.preparation.text);
|
++ optional (backup.preparation.text != null) (
|
||||||
|
writeScript "backup-${name}-prepare" backup.preparation.packages backup.preparation.text
|
||||||
|
);
|
||||||
|
|
||||||
# TODO(tlater): Add repo pruning/checking
|
# TODO(tlater): Add repo pruning/checking
|
||||||
ExecStopPost =
|
ExecStopPost =
|
||||||
map (service: "+${mkRestartScript service}") backup.pauseServices
|
map (service: "+${mkRestartScript service}") backup.pauseServices
|
||||||
++ optional (backup.cleanup.text != null)
|
++ optional (backup.cleanup.text != null) (
|
||||||
(writeScript "backup-${name}-cleanup" backup.cleanup.packages backup.cleanup.text);
|
writeScript "backup-${name}-cleanup" backup.cleanup.packages backup.cleanup.text
|
||||||
};
|
);
|
||||||
})
|
};
|
||||||
config.services.backups;
|
}
|
||||||
|
) config.services.backups;
|
||||||
|
|
||||||
systemd.timers =
|
systemd.timers =
|
||||||
{
|
{
|
||||||
|
@ -227,18 +245,18 @@ in
|
||||||
# of the backup jobs.
|
# of the backup jobs.
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
// lib.mapAttrs'
|
// lib.mapAttrs' (
|
||||||
(name: backup:
|
name: backup:
|
||||||
lib.nameValuePair "backup-${name}" {
|
lib.nameValuePair "backup-${name}" {
|
||||||
wantedBy = [ "timers.target" ];
|
wantedBy = [ "timers.target" ];
|
||||||
timerConfig = {
|
timerConfig = {
|
||||||
OnCalendar = "Wednesday 02:30:00 UTC";
|
OnCalendar = "Wednesday 02:30:00 UTC";
|
||||||
RandomizedDelaySec = "1h";
|
RandomizedDelaySec = "1h";
|
||||||
FixedRandomDelay = true;
|
FixedRandomDelay = true;
|
||||||
Persistent = true;
|
Persistent = true;
|
||||||
};
|
};
|
||||||
})
|
}
|
||||||
config.services.backups;
|
) config.services.backups;
|
||||||
|
|
||||||
users = {
|
users = {
|
||||||
# This user is only used to own the ssh key, because apparently
|
# This user is only used to own the ssh key, because apparently
|
||||||
|
|
|
@ -1,10 +1,6 @@
|
||||||
{ config
|
{ config, flake-inputs, ... }:
|
||||||
, flake-inputs
|
{
|
||||||
, ...
|
imports = [ flake-inputs.sonnenshift.nixosModules.default ];
|
||||||
}: {
|
|
||||||
imports = [
|
|
||||||
flake-inputs.sonnenshift.nixosModules.default
|
|
||||||
];
|
|
||||||
|
|
||||||
services.batteryManager = {
|
services.batteryManager = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
{ pkgs
|
{
|
||||||
, config
|
pkgs,
|
||||||
, lib
|
config,
|
||||||
, ...
|
lib,
|
||||||
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
inherit (lib.strings) concatMapStringsSep;
|
inherit (lib.strings) concatMapStringsSep;
|
||||||
|
@ -42,28 +43,30 @@ in
|
||||||
systemd.services.heisenbridge =
|
systemd.services.heisenbridge =
|
||||||
let
|
let
|
||||||
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
||||||
registrationFile = builtins.toFile "heisenbridge-registration.yaml" (builtins.toJSON {
|
registrationFile = builtins.toFile "heisenbridge-registration.yaml" (
|
||||||
id = "heisenbridge";
|
builtins.toJSON {
|
||||||
url = "http://127.0.0.1:9898";
|
id = "heisenbridge";
|
||||||
as_token = "@AS_TOKEN@";
|
url = "http://127.0.0.1:9898";
|
||||||
hs_token = "@HS_TOKEN@";
|
as_token = "@AS_TOKEN@";
|
||||||
rate_limited = false;
|
hs_token = "@HS_TOKEN@";
|
||||||
sender_localpart = "heisenbridge";
|
rate_limited = false;
|
||||||
namespaces = {
|
sender_localpart = "heisenbridge";
|
||||||
users = [
|
namespaces = {
|
||||||
{
|
users = [
|
||||||
regex = "@irc_.*";
|
{
|
||||||
exclusive = true;
|
regex = "@irc_.*";
|
||||||
}
|
exclusive = true;
|
||||||
{
|
}
|
||||||
regex = "@heisenbridge:.*";
|
{
|
||||||
exclusive = true;
|
regex = "@heisenbridge:.*";
|
||||||
}
|
exclusive = true;
|
||||||
];
|
}
|
||||||
aliases = [ ];
|
];
|
||||||
rooms = [ ];
|
aliases = [ ];
|
||||||
};
|
rooms = [ ];
|
||||||
});
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
# TODO(tlater): Starting with systemd 253 it will become possible
|
# TODO(tlater): Starting with systemd 253 it will become possible
|
||||||
# to do the credential setup as part of ExecStartPre/preStart
|
# to do the credential setup as part of ExecStartPre/preStart
|
||||||
|
@ -114,7 +117,7 @@ in
|
||||||
RestrictRealtime = true;
|
RestrictRealtime = true;
|
||||||
ProtectProc = "invisible";
|
ProtectProc = "invisible";
|
||||||
ProcSubset = "pid";
|
ProcSubset = "pid";
|
||||||
UMask = 0077;
|
UMask = 77;
|
||||||
|
|
||||||
# For the identd port
|
# For the identd port
|
||||||
# CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"];
|
# CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"];
|
||||||
|
@ -134,9 +137,7 @@ in
|
||||||
use-auth-secret = true;
|
use-auth-secret = true;
|
||||||
static-auth-secret-file = config.sops.secrets."turn/secret".path;
|
static-auth-secret-file = config.sops.secrets."turn/secret".path;
|
||||||
realm = turn-realm;
|
realm = turn-realm;
|
||||||
relay-ips = [
|
relay-ips = [ "116.202.158.55" ];
|
||||||
"116.202.158.55"
|
|
||||||
];
|
|
||||||
|
|
||||||
# SSL config
|
# SSL config
|
||||||
#
|
#
|
||||||
|
@ -245,9 +246,7 @@ in
|
||||||
|
|
||||||
services.backups.conduit = {
|
services.backups.conduit = {
|
||||||
user = "root";
|
user = "root";
|
||||||
paths = [
|
paths = [ "/var/lib/private/matrix-conduit/" ];
|
||||||
"/var/lib/private/matrix-conduit/"
|
|
||||||
];
|
|
||||||
# Other services store their data in conduit, so no other services
|
# Other services store their data in conduit, so no other services
|
||||||
# need to be shut down currently.
|
# need to be shut down currently.
|
||||||
pauseServices = [ "conduit.service" ];
|
pauseServices = [ "conduit.service" ];
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
{ pkgs, ... }: {
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
services.fail2ban = {
|
services.fail2ban = {
|
||||||
enable = true;
|
enable = true;
|
||||||
extraPackages = [ pkgs.ipset ];
|
extraPackages = [ pkgs.ipset ];
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
{ lib
|
{
|
||||||
, config
|
lib,
|
||||||
, flake-inputs
|
config,
|
||||||
, pkgs
|
flake-inputs,
|
||||||
, ...
|
pkgs,
|
||||||
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
domain = "foundryvtt.${config.services.nginx.domain}";
|
domain = "foundryvtt.${config.services.nginx.domain}";
|
||||||
|
@ -40,9 +41,7 @@ in
|
||||||
|
|
||||||
services.backups.foundryvtt = {
|
services.backups.foundryvtt = {
|
||||||
user = "foundryvtt";
|
user = "foundryvtt";
|
||||||
paths = [
|
paths = [ config.services.foundryvtt.dataDir ];
|
||||||
config.services.foundryvtt.dataDir
|
|
||||||
];
|
|
||||||
pauseServices = [ "foundryvtt.service" ];
|
pauseServices = [ "foundryvtt.service" ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
{ pkgs
|
{
|
||||||
, config
|
pkgs,
|
||||||
, lib
|
config,
|
||||||
, ...
|
lib,
|
||||||
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
domain = "gitea.${config.services.nginx.domain}";
|
domain = "gitea.${config.services.nginx.domain}";
|
||||||
|
@ -34,9 +35,7 @@ in
|
||||||
secretPath = config.sops.secrets."forgejo/metrics-token".path;
|
secretPath = config.sops.secrets."forgejo/metrics-token".path;
|
||||||
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
|
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
|
||||||
in
|
in
|
||||||
[
|
[ "+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'" ];
|
||||||
"+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'"
|
|
||||||
];
|
|
||||||
|
|
||||||
# Set up SSL
|
# Set up SSL
|
||||||
services.nginx.virtualHosts."${domain}" =
|
services.nginx.virtualHosts."${domain}" =
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
{ config
|
{
|
||||||
, pkgs
|
config,
|
||||||
, lib
|
pkgs,
|
||||||
, ...
|
lib,
|
||||||
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
yaml = pkgs.formats.yaml { };
|
yaml = pkgs.formats.yaml { };
|
||||||
|
@ -20,9 +21,7 @@ in
|
||||||
"tlater.com"
|
"tlater.com"
|
||||||
];
|
];
|
||||||
in
|
in
|
||||||
[
|
[ "--config=${yaml.generate "domains.yml" conf}" ];
|
||||||
"--config=${yaml.generate "domains.yml" conf}"
|
|
||||||
];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
# System statistics
|
# System statistics
|
||||||
|
@ -51,26 +50,21 @@ in
|
||||||
listenAddress = "127.0.0.1";
|
listenAddress = "127.0.0.1";
|
||||||
group = "nginx";
|
group = "nginx";
|
||||||
|
|
||||||
settings.namespaces =
|
settings.namespaces = lib.mapAttrsToList (name: virtualHost: {
|
||||||
lib.mapAttrsToList
|
inherit name;
|
||||||
(name: virtualHost: {
|
metrics_override.prefix = "nginxlog";
|
||||||
inherit name;
|
namespace_label = "vhost";
|
||||||
metrics_override.prefix = "nginxlog";
|
|
||||||
namespace_label = "vhost";
|
|
||||||
|
|
||||||
format = lib.concatStringsSep " " [
|
format = lib.concatStringsSep " " [
|
||||||
"$remote_addr - $remote_user [$time_local]"
|
"$remote_addr - $remote_user [$time_local]"
|
||||||
''"$request" $status $body_bytes_sent''
|
''"$request" $status $body_bytes_sent''
|
||||||
''"$http_referer" "$http_user_agent"''
|
''"$http_referer" "$http_user_agent"''
|
||||||
''rt=$request_time uct="$upstream_connect_time"''
|
''rt=$request_time uct="$upstream_connect_time"''
|
||||||
''uht="$upstream_header_time" urt="$upstream_response_time"''
|
''uht="$upstream_header_time" urt="$upstream_response_time"''
|
||||||
];
|
];
|
||||||
|
|
||||||
source.files = [
|
source.files = [ "/var/log/nginx/${name}/access.log" ];
|
||||||
"/var/log/nginx/${name}/access.log"
|
}) config.services.nginx.virtualHosts;
|
||||||
];
|
|
||||||
})
|
|
||||||
config.services.nginx.virtualHosts;
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -86,7 +80,11 @@ in
|
||||||
requires = [ "fail2ban.service" ];
|
requires = [ "fail2ban.service" ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Group = "fail2ban";
|
Group = "fail2ban";
|
||||||
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
|
RestrictAddressFamilies = [
|
||||||
|
"AF_UNIX"
|
||||||
|
"AF_INET"
|
||||||
|
"AF_INET6"
|
||||||
|
];
|
||||||
ExecStart = lib.concatStringsSep " " [
|
ExecStart = lib.concatStringsSep " " [
|
||||||
"${pkgs.local.prometheus-fail2ban-exporter}/bin/fail2ban-prometheus-exporter"
|
"${pkgs.local.prometheus-fail2ban-exporter}/bin/fail2ban-prometheus-exporter"
|
||||||
"--collector.f2b.socket=/var/run/fail2ban/fail2ban.sock"
|
"--collector.f2b.socket=/var/run/fail2ban/fail2ban.sock"
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
{ pkgs
|
{
|
||||||
, config
|
pkgs,
|
||||||
, lib
|
config,
|
||||||
, ...
|
lib,
|
||||||
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
inherit (lib) types mkOption mkDefault;
|
inherit (lib) types mkOption mkDefault;
|
||||||
|
@ -11,87 +12,94 @@ in
|
||||||
options = {
|
options = {
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
extraExporters = mkOption {
|
extraExporters = mkOption {
|
||||||
type = types.attrsOf (types.submodule {
|
type = types.attrsOf (
|
||||||
options = {
|
types.submodule {
|
||||||
port = mkOption {
|
options = {
|
||||||
type = types.int;
|
port = mkOption {
|
||||||
description = "The port on which this exporter listens.";
|
type = types.int;
|
||||||
|
description = "The port on which this exporter listens.";
|
||||||
|
};
|
||||||
|
listenAddress = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "127.0.0.1";
|
||||||
|
description = "Address to listen on.";
|
||||||
|
};
|
||||||
|
serviceOpts = mkOption {
|
||||||
|
type = types.attrs;
|
||||||
|
description = "An attrset to be merged with the exporter's systemd service.";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
listenAddress = mkOption {
|
}
|
||||||
type = types.str;
|
);
|
||||||
default = "127.0.0.1";
|
|
||||||
description = "Address to listen on.";
|
|
||||||
};
|
|
||||||
serviceOpts = mkOption {
|
|
||||||
type = types.attrs;
|
|
||||||
description = "An attrset to be merged with the exporter's systemd service.";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
});
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
services.victoriametrics.scrapeConfigs = mkOption {
|
services.victoriametrics.scrapeConfigs = mkOption {
|
||||||
type = types.attrsOf (types.submodule ({ name
|
type = types.attrsOf (
|
||||||
, self
|
types.submodule (
|
||||||
, ...
|
{ name, self, ... }:
|
||||||
}: {
|
{
|
||||||
options = {
|
options = {
|
||||||
job_name = mkOption {
|
job_name = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = name;
|
default = name;
|
||||||
};
|
|
||||||
|
|
||||||
extraSettings = mkOption {
|
|
||||||
type = types.anything;
|
|
||||||
description = ''
|
|
||||||
Other settings to set for this scrape config.
|
|
||||||
'';
|
|
||||||
default = { };
|
|
||||||
};
|
|
||||||
|
|
||||||
targets = mkOption {
|
|
||||||
type = types.listOf types.str;
|
|
||||||
description = lib.mdDoc ''
|
|
||||||
Addresses scrape targets for this config listen on.
|
|
||||||
|
|
||||||
Shortcut for `static_configs = lib.singleton {targets = [<targets>];}`
|
|
||||||
'';
|
|
||||||
default = [ ];
|
|
||||||
};
|
|
||||||
|
|
||||||
static_configs = mkOption {
|
|
||||||
default = [ ];
|
|
||||||
type = types.listOf (types.submodule {
|
|
||||||
options = {
|
|
||||||
targets = mkOption {
|
|
||||||
type = types.listOf types.str;
|
|
||||||
description = lib.mdDoc ''
|
|
||||||
The addresses scrape targets for this config listen on.
|
|
||||||
|
|
||||||
Must in `listenAddress:port` format.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
labels = mkOption {
|
|
||||||
type = types.attrsOf types.str;
|
|
||||||
description = lib.mdDoc ''
|
|
||||||
Labels to apply to all targets defined for this static config.
|
|
||||||
'';
|
|
||||||
default = { };
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
});
|
|
||||||
};
|
extraSettings = mkOption {
|
||||||
};
|
type = types.anything;
|
||||||
}));
|
description = ''
|
||||||
|
Other settings to set for this scrape config.
|
||||||
|
'';
|
||||||
|
default = { };
|
||||||
|
};
|
||||||
|
|
||||||
|
targets = mkOption {
|
||||||
|
type = types.listOf types.str;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Addresses scrape targets for this config listen on.
|
||||||
|
|
||||||
|
Shortcut for `static_configs = lib.singleton {targets = [<targets>];}`
|
||||||
|
'';
|
||||||
|
default = [ ];
|
||||||
|
};
|
||||||
|
|
||||||
|
static_configs = mkOption {
|
||||||
|
default = [ ];
|
||||||
|
type = types.listOf (
|
||||||
|
types.submodule {
|
||||||
|
options = {
|
||||||
|
targets = mkOption {
|
||||||
|
type = types.listOf types.str;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
The addresses scrape targets for this config listen on.
|
||||||
|
|
||||||
|
Must in `listenAddress:port` format.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
labels = mkOption {
|
||||||
|
type = types.attrsOf types.str;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Labels to apply to all targets defined for this static config.
|
||||||
|
'';
|
||||||
|
default = { };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)
|
||||||
|
);
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
systemd.services = lib.mkMerge [
|
systemd.services = lib.mkMerge [
|
||||||
(lib.mapAttrs'
|
(lib.mapAttrs' (
|
||||||
(name: exporter:
|
name: exporter:
|
||||||
lib.nameValuePair "prometheus-${name}-exporter" (lib.mkMerge [
|
lib.nameValuePair "prometheus-${name}-exporter" (
|
||||||
|
lib.mkMerge [
|
||||||
{
|
{
|
||||||
# Shamelessly copied from upstream because the upstream
|
# Shamelessly copied from upstream because the upstream
|
||||||
# module is an intractable mess
|
# module is an intractable mess
|
||||||
|
@ -117,7 +125,10 @@ in
|
||||||
serviceConfig.ProtectKernelTunables = true;
|
serviceConfig.ProtectKernelTunables = true;
|
||||||
serviceConfig.ProtectSystem = mkDefault "strict";
|
serviceConfig.ProtectSystem = mkDefault "strict";
|
||||||
serviceConfig.RemoveIPC = true;
|
serviceConfig.RemoveIPC = true;
|
||||||
serviceConfig.RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
|
serviceConfig.RestrictAddressFamilies = [
|
||||||
|
"AF_INET"
|
||||||
|
"AF_INET6"
|
||||||
|
];
|
||||||
serviceConfig.RestrictNamespaces = true;
|
serviceConfig.RestrictNamespaces = true;
|
||||||
serviceConfig.RestrictRealtime = true;
|
serviceConfig.RestrictRealtime = true;
|
||||||
serviceConfig.RestrictSUIDSGID = true;
|
serviceConfig.RestrictSUIDSGID = true;
|
||||||
|
@ -125,8 +136,9 @@ in
|
||||||
serviceConfig.UMask = "0077";
|
serviceConfig.UMask = "0077";
|
||||||
}
|
}
|
||||||
exporter.serviceOpts
|
exporter.serviceOpts
|
||||||
]))
|
]
|
||||||
config.services.prometheus.extraExporters)
|
)
|
||||||
|
) config.services.prometheus.extraExporters)
|
||||||
|
|
||||||
{
|
{
|
||||||
vmagent-scrape-exporters =
|
vmagent-scrape-exporters =
|
||||||
|
@ -134,24 +146,25 @@ in
|
||||||
listenAddress = config.services.victoriametrics.listenAddress;
|
listenAddress = config.services.victoriametrics.listenAddress;
|
||||||
vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
|
vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
|
||||||
promscrape = yaml.generate "prometheus.yml" {
|
promscrape = yaml.generate "prometheus.yml" {
|
||||||
scrape_configs = lib.mapAttrsToList
|
scrape_configs = lib.mapAttrsToList (
|
||||||
(_: scrape:
|
_: scrape:
|
||||||
lib.recursiveUpdate
|
lib.recursiveUpdate {
|
||||||
{
|
inherit (scrape) job_name;
|
||||||
inherit (scrape) job_name;
|
static_configs =
|
||||||
static_configs =
|
scrape.static_configs
|
||||||
scrape.static_configs
|
++ lib.optional (scrape.targets != [ ]) { targets = scrape.targets; };
|
||||||
++ lib.optional (scrape.targets != [ ]) { targets = scrape.targets; };
|
} scrape.extraSettings
|
||||||
}
|
) config.services.victoriametrics.scrapeConfigs;
|
||||||
scrape.extraSettings)
|
|
||||||
config.services.victoriametrics.scrapeConfigs;
|
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
enable = true;
|
enable = true;
|
||||||
path = [ pkgs.victoriametrics ];
|
path = [ pkgs.victoriametrics ];
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
after = [ "network.target" "victoriametrics.service" ];
|
after = [
|
||||||
|
"network.target"
|
||||||
|
"victoriametrics.service"
|
||||||
|
];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStart = [
|
ExecStart = [
|
||||||
(lib.concatStringsSep " " [
|
(lib.concatStringsSep " " [
|
||||||
|
@ -180,7 +193,10 @@ in
|
||||||
ProtectKernelTunables = true;
|
ProtectKernelTunables = true;
|
||||||
ProtectSystem = "strict";
|
ProtectSystem = "strict";
|
||||||
RemoveIPC = true;
|
RemoveIPC = true;
|
||||||
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
|
RestrictAddressFamilies = [
|
||||||
|
"AF_INET"
|
||||||
|
"AF_INET6"
|
||||||
|
];
|
||||||
RestrictNamespaces = true;
|
RestrictNamespaces = true;
|
||||||
RestrictRealtime = true;
|
RestrictRealtime = true;
|
||||||
RestrictSUIDSGID = true;
|
RestrictSUIDSGID = true;
|
||||||
|
@ -195,19 +211,15 @@ in
|
||||||
|
|
||||||
services.victoriametrics.scrapeConfigs =
|
services.victoriametrics.scrapeConfigs =
|
||||||
let
|
let
|
||||||
allExporters =
|
allExporters = lib.mapAttrs (name: exporter: { inherit (exporter) listenAddress port; }) (
|
||||||
lib.mapAttrs
|
(lib.filterAttrs (
|
||||||
(name: exporter: {
|
_: exporter: builtins.isAttrs exporter && exporter.enable
|
||||||
inherit (exporter) listenAddress port;
|
) config.services.prometheus.exporters)
|
||||||
})
|
// config.services.prometheus.extraExporters
|
||||||
((lib.filterAttrs (_: exporter: builtins.isAttrs exporter && exporter.enable)
|
);
|
||||||
config.services.prometheus.exporters)
|
|
||||||
// config.services.prometheus.extraExporters);
|
|
||||||
in
|
in
|
||||||
lib.mapAttrs
|
lib.mapAttrs (_: exporter: {
|
||||||
(_: exporter: {
|
targets = [ "${exporter.listenAddress}:${toString exporter.port}" ];
|
||||||
targets = [ "${exporter.listenAddress}:${toString exporter.port}" ];
|
}) allExporters;
|
||||||
})
|
|
||||||
allExporters;
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,8 @@
|
||||||
{ config, ... }: {
|
{ config, ... }:
|
||||||
|
{
|
||||||
config.services.victoriametrics = {
|
config.services.victoriametrics = {
|
||||||
enable = true;
|
enable = true;
|
||||||
extraOptions = [
|
extraOptions = [ "-storage.minFreeDiskSpaceBytes=5GB" ];
|
||||||
"-storage.minFreeDiskSpaceBytes=5GB"
|
|
||||||
];
|
|
||||||
|
|
||||||
scrapeConfigs = {
|
scrapeConfigs = {
|
||||||
forgejo = {
|
forgejo = {
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
{ pkgs
|
{
|
||||||
, config
|
pkgs,
|
||||||
, lib
|
config,
|
||||||
, ...
|
lib,
|
||||||
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
# Update pending on rewrite of nextcloud news, though there is an
|
# Update pending on rewrite of nextcloud news, though there is an
|
||||||
|
@ -15,8 +16,8 @@ in
|
||||||
inherit hostName;
|
inherit hostName;
|
||||||
|
|
||||||
package = nextcloud;
|
package = nextcloud;
|
||||||
phpPackage = lib.mkForce
|
phpPackage = lib.mkForce (
|
||||||
(pkgs.php.override {
|
pkgs.php.override {
|
||||||
packageOverrides = final: prev: {
|
packageOverrides = final: prev: {
|
||||||
extensions = prev.extensions // {
|
extensions = prev.extensions // {
|
||||||
pgsql = prev.extensions.pgsql.overrideAttrs (old: {
|
pgsql = prev.extensions.pgsql.overrideAttrs (old: {
|
||||||
|
@ -27,7 +28,8 @@ in
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
});
|
}
|
||||||
|
);
|
||||||
enable = true;
|
enable = true;
|
||||||
maxUploadSize = "2G";
|
maxUploadSize = "2G";
|
||||||
https = true;
|
https = true;
|
||||||
|
@ -52,7 +54,14 @@ in
|
||||||
};
|
};
|
||||||
|
|
||||||
extraApps = {
|
extraApps = {
|
||||||
inherit (pkgs.local) bookmarks calendar contacts cookbook news notes;
|
inherit (pkgs.local)
|
||||||
|
bookmarks
|
||||||
|
calendar
|
||||||
|
contacts
|
||||||
|
cookbook
|
||||||
|
news
|
||||||
|
notes
|
||||||
|
;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
{ pkgs, ... }: {
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
services.postgresql = {
|
services.postgresql = {
|
||||||
package = pkgs.postgresql_14;
|
package = pkgs.postgresql_14;
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
|
@ -1,7 +1,4 @@
|
||||||
{ pkgs
|
{ pkgs, lib, ... }:
|
||||||
, lib
|
|
||||||
, ...
|
|
||||||
}:
|
|
||||||
let
|
let
|
||||||
inherit (lib) concatStringsSep;
|
inherit (lib) concatStringsSep;
|
||||||
in
|
in
|
||||||
|
@ -114,9 +111,7 @@ in
|
||||||
|
|
||||||
services.backups.starbound = {
|
services.backups.starbound = {
|
||||||
user = "root";
|
user = "root";
|
||||||
paths = [
|
paths = [ "/var/lib/private/starbound/storage/universe/" ];
|
||||||
"/var/lib/private/starbound/storage/universe/"
|
|
||||||
];
|
|
||||||
pauseServices = [ "starbound.service" ];
|
pauseServices = [ "starbound.service" ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
{ config, ... }: {
|
{ config, ... }:
|
||||||
|
{
|
||||||
# iptables needs to permit forwarding from wg0 to wg0
|
# iptables needs to permit forwarding from wg0 to wg0
|
||||||
networking.firewall.extraCommands = ''
|
networking.firewall.extraCommands = ''
|
||||||
iptables -A FORWARD -i wg0 -o wg0 -j ACCEPT
|
iptables -A FORWARD -i wg0 -o wg0 -j ACCEPT
|
||||||
|
|
29
flake.nix
29
flake.nix
|
@ -33,13 +33,14 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs =
|
outputs =
|
||||||
{ self
|
{
|
||||||
, nixpkgs
|
self,
|
||||||
, sops-nix
|
nixpkgs,
|
||||||
, nvfetcher
|
sops-nix,
|
||||||
, deploy-rs
|
nvfetcher,
|
||||||
, ...
|
deploy-rs,
|
||||||
} @ inputs:
|
...
|
||||||
|
}@inputs:
|
||||||
let
|
let
|
||||||
system = "x86_64-linux";
|
system = "x86_64-linux";
|
||||||
pkgs = nixpkgs.legacyPackages.${system};
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
|
@ -84,7 +85,12 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
sshUser = "tlater";
|
sshUser = "tlater";
|
||||||
sshOpts = [ "-p" "2222" "-o" "ForwardAgent=yes" ];
|
sshOpts = [
|
||||||
|
"-p"
|
||||||
|
"2222"
|
||||||
|
"-o"
|
||||||
|
"ForwardAgent=yes"
|
||||||
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -144,10 +150,11 @@
|
||||||
# Development environment #
|
# Development environment #
|
||||||
###########################
|
###########################
|
||||||
devShells.${system}.default = nixpkgs.legacyPackages.${system}.mkShell {
|
devShells.${system}.default = nixpkgs.legacyPackages.${system}.mkShell {
|
||||||
sopsPGPKeyDirs = [ "./keys/hosts/" "./keys/users/" ];
|
sopsPGPKeyDirs = [
|
||||||
nativeBuildInputs = [
|
"./keys/hosts/"
|
||||||
sops-nix.packages.${system}.sops-import-keys-hook
|
"./keys/users/"
|
||||||
];
|
];
|
||||||
|
nativeBuildInputs = [ sops-nix.packages.${system}.sops-import-keys-hook ];
|
||||||
|
|
||||||
packages = with pkgs; [
|
packages = with pkgs; [
|
||||||
sops-nix.packages.${system}.sops-init-gpg-key
|
sops-nix.packages.${system}.sops-init-gpg-key
|
||||||
|
|
|
@ -1,5 +1 @@
|
||||||
{
|
{ imports = [ ./nginxExtensions.nix ]; }
|
||||||
imports = [
|
|
||||||
./nginxExtensions.nix
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
{ config
|
{
|
||||||
, pkgs
|
config,
|
||||||
, lib
|
pkgs,
|
||||||
, ...
|
lib,
|
||||||
}: {
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
options = {
|
options = {
|
||||||
services.nginx.domain = lib.mkOption {
|
services.nginx.domain = lib.mkOption {
|
||||||
type = lib.types.str;
|
type = lib.types.str;
|
||||||
|
@ -12,10 +14,8 @@
|
||||||
services.nginx.virtualHosts =
|
services.nginx.virtualHosts =
|
||||||
let
|
let
|
||||||
extraVirtualHostOptions =
|
extraVirtualHostOptions =
|
||||||
{ name
|
{ name, config, ... }:
|
||||||
, config
|
{
|
||||||
, ...
|
|
||||||
}: {
|
|
||||||
options = {
|
options = {
|
||||||
enableHSTS = lib.mkEnableOption "Enable HSTS";
|
enableHSTS = lib.mkEnableOption "Enable HSTS";
|
||||||
|
|
||||||
|
@ -40,9 +40,7 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
lib.mkOption {
|
lib.mkOption { type = lib.types.attrsOf (lib.types.submodule extraVirtualHostOptions); };
|
||||||
type = lib.types.attrsOf (lib.types.submodule extraVirtualHostOptions);
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
|
@ -51,11 +49,11 @@
|
||||||
let
|
let
|
||||||
confirm = ''[[ "tlater.net" = ${config.services.nginx.domain} ]]'';
|
confirm = ''[[ "tlater.net" = ${config.services.nginx.domain} ]]'';
|
||||||
in
|
in
|
||||||
lib.mapAttrs'
|
lib.mapAttrs' (
|
||||||
(cert: _:
|
cert: _:
|
||||||
lib.nameValuePair "acme-${cert}" {
|
lib.nameValuePair "acme-${cert}" {
|
||||||
serviceConfig.ExecCondition = ''${pkgs.runtimeShell} -c '${confirm}' '';
|
serviceConfig.ExecCondition = ''${pkgs.runtimeShell} -c '${confirm}' '';
|
||||||
})
|
}
|
||||||
config.security.acme.certs;
|
) config.security.acme.certs;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,19 +1,12 @@
|
||||||
{ pkgs
|
{ pkgs, rustPlatform, ... }:
|
||||||
, rustPlatform
|
|
||||||
, ...
|
|
||||||
}:
|
|
||||||
rustPlatform.buildRustPackage {
|
rustPlatform.buildRustPackage {
|
||||||
pname = "afvalcalendar";
|
pname = "afvalcalendar";
|
||||||
version = "0.1.0";
|
version = "0.1.0";
|
||||||
src = ./.;
|
src = ./.;
|
||||||
|
|
||||||
nativeBuildInputs = with pkgs; [
|
nativeBuildInputs = with pkgs; [ pkg-config ];
|
||||||
pkg-config
|
|
||||||
];
|
|
||||||
|
|
||||||
buildInputs = with pkgs; [
|
buildInputs = with pkgs; [ openssl ];
|
||||||
openssl
|
|
||||||
];
|
|
||||||
|
|
||||||
cargoHash = "sha256-JXx6aUKdKbUTBCwlBw5i1hZy8ofCfSrhLCwFzqdA8cI=";
|
cargoHash = "sha256-JXx6aUKdKbUTBCwlBw5i1hZy8ofCfSrhLCwFzqdA8cI=";
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,4 @@
|
||||||
{ pkgs
|
{ pkgs, lib }:
|
||||||
, lib
|
|
||||||
,
|
|
||||||
}:
|
|
||||||
let
|
let
|
||||||
inherit (builtins) fromJSON mapAttrs readFile;
|
inherit (builtins) fromJSON mapAttrs readFile;
|
||||||
inherit (pkgs) callPackage;
|
inherit (pkgs) callPackage;
|
||||||
|
@ -13,7 +10,7 @@ in
|
||||||
};
|
};
|
||||||
afvalcalendar = callPackage ./afvalcalendar { };
|
afvalcalendar = callPackage ./afvalcalendar { };
|
||||||
}
|
}
|
||||||
// (
|
// (
|
||||||
# Add nextcloud apps
|
# Add nextcloud apps
|
||||||
let
|
let
|
||||||
mkNextcloudApp = pkgs.callPackage ./mkNextcloudApp.nix { };
|
mkNextcloudApp = pkgs.callPackage ./mkNextcloudApp.nix { };
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
{ fetchNextcloudApp
|
{ fetchNextcloudApp, lib }:
|
||||||
, lib
|
source:
|
||||||
,
|
|
||||||
}: source:
|
|
||||||
fetchNextcloudApp {
|
fetchNextcloudApp {
|
||||||
url = source.src.url;
|
url = source.src.url;
|
||||||
sha256 = source.src.sha256;
|
sha256 = source.src.sha256;
|
||||||
|
|
|
@ -1,7 +1,4 @@
|
||||||
{ buildGoModule
|
{ buildGoModule, sources }:
|
||||||
, sources
|
|
||||||
,
|
|
||||||
}:
|
|
||||||
buildGoModule {
|
buildGoModule {
|
||||||
inherit (sources.prometheus-fail2ban-exporter) pname src version;
|
inherit (sources.prometheus-fail2ban-exporter) pname src version;
|
||||||
vendorHash = "sha256-5o8p5p0U/c0WAIV5dACnWA3ThzSh2tt5LIFMb59i9GY=";
|
vendorHash = "sha256-5o8p5p0U/c0WAIV5dACnWA3ThzSh2tt5LIFMb59i9GY=";
|
||||||
|
|
|
@ -1,19 +1,21 @@
|
||||||
{ stdenv
|
{
|
||||||
, lib
|
stdenv,
|
||||||
, makeWrapper
|
lib,
|
||||||
, patchelf
|
makeWrapper,
|
||||||
, steamPackages
|
patchelf,
|
||||||
, replace-secret
|
steamPackages,
|
||||||
,
|
replace-secret,
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
# Use the directory in which starbound is installed so steamcmd
|
# Use the directory in which starbound is installed so steamcmd
|
||||||
# doesn't have to be reinstalled constantly (we're using DynamicUser
|
# doesn't have to be reinstalled constantly (we're using DynamicUser
|
||||||
# with StateDirectory to persist this).
|
# with StateDirectory to persist this).
|
||||||
steamcmd = steamPackages.steamcmd.override {
|
steamcmd = steamPackages.steamcmd.override { steamRoot = "/var/lib/starbound/.steamcmd"; };
|
||||||
steamRoot = "/var/lib/starbound/.steamcmd";
|
wrapperPath = lib.makeBinPath [
|
||||||
};
|
patchelf
|
||||||
wrapperPath = lib.makeBinPath [ patchelf steamcmd replace-secret ];
|
steamcmd
|
||||||
|
replace-secret
|
||||||
|
];
|
||||||
in
|
in
|
||||||
stdenv.mkDerivation {
|
stdenv.mkDerivation {
|
||||||
name = "starbound-update-script";
|
name = "starbound-update-script";
|
||||||
|
|
Loading…
Reference in a new issue