Compare commits
No commits in common. "8d0be61483178428191cf79f55705ff20cb2b8cb" and "fc991a0b07c2c1821bafd34fc0ffaff2b0bcc29f" have entirely different histories.
8d0be61483
...
fc991a0b07
22 changed files with 451 additions and 527 deletions
|
|
@ -9,6 +9,3 @@ fd138d45e6a2cad89fead6e9f246ba282070d6b7
|
||||||
|
|
||||||
# Switch to alejandra formatting
|
# Switch to alejandra formatting
|
||||||
046a88905ddfa7f9edc3291c310dbb985dee34f9
|
046a88905ddfa7f9edc3291c310dbb985dee34f9
|
||||||
|
|
||||||
# Apply wide linting
|
|
||||||
63b3cbe00be80ccb4b221aad64eb657ae5c96d70
|
|
||||||
|
|
|
||||||
|
|
@ -1,61 +0,0 @@
|
||||||
{
|
|
||||||
self,
|
|
||||||
nixpkgs,
|
|
||||||
deploy-rs,
|
|
||||||
system,
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
let
|
|
||||||
pkgs = nixpkgs.legacyPackages.${system};
|
|
||||||
|
|
||||||
statix' = pkgs.statix.overrideAttrs (old: {
|
|
||||||
patches = old.patches ++ [
|
|
||||||
(pkgs.fetchpatch {
|
|
||||||
url = "https://github.com/oppiliappan/statix/commit/925dec39bb705acbbe77178b4d658fe1b752abbb.patch";
|
|
||||||
hash = "sha256-0wacO6wuYJ4ufN9PGucRVJucFdFFNF+NoHYIrLXsCWs=";
|
|
||||||
})
|
|
||||||
];
|
|
||||||
});
|
|
||||||
|
|
||||||
runNuCheck =
|
|
||||||
{
|
|
||||||
name,
|
|
||||||
packages,
|
|
||||||
check,
|
|
||||||
}:
|
|
||||||
pkgs.stdenvNoCC.mkDerivation {
|
|
||||||
inherit name;
|
|
||||||
|
|
||||||
src = nixpkgs.lib.cleanSourceWith {
|
|
||||||
src = self;
|
|
||||||
filter = nixpkgs.lib.cleanSourceFilter;
|
|
||||||
};
|
|
||||||
|
|
||||||
dontPatch = true;
|
|
||||||
dontConfigure = true;
|
|
||||||
dontBuild = true;
|
|
||||||
dontInstall = true;
|
|
||||||
dontFixup = true;
|
|
||||||
doCheck = true;
|
|
||||||
|
|
||||||
checkInputs = nixpkgs.lib.singleton pkgs.nushell ++ packages;
|
|
||||||
|
|
||||||
checkPhase = ''
|
|
||||||
nu ${check}
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
in
|
|
||||||
nixpkgs.lib.recursiveUpdate {
|
|
||||||
lints = runNuCheck {
|
|
||||||
name = "lints";
|
|
||||||
|
|
||||||
packages = [
|
|
||||||
pkgs.deadnix
|
|
||||||
pkgs.nixfmt-rfc-style
|
|
||||||
pkgs.shellcheck
|
|
||||||
statix'
|
|
||||||
];
|
|
||||||
|
|
||||||
check = ./lints.nu;
|
|
||||||
};
|
|
||||||
} (deploy-rs.lib.${system}.deployChecks self.deploy)
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
||||||
#!/usr/bin/env nu
|
|
||||||
|
|
||||||
let shell_files = ls **/*.sh | get name
|
|
||||||
let nix_files = ls **/*.nix | where name !~ "hardware-configuration.nix|_sources" | get name
|
|
||||||
|
|
||||||
let linters = [
|
|
||||||
([shellcheck] ++ $shell_files)
|
|
||||||
([nixfmt --check --strict] ++ $nix_files)
|
|
||||||
([deadnix --fail] ++ $nix_files)
|
|
||||||
([statix check] ++ $nix_files)
|
|
||||||
]
|
|
||||||
|
|
||||||
mkdir $env.out
|
|
||||||
|
|
||||||
def run-linter [linterArgs: list<string>] {
|
|
||||||
print $'Running ($linterArgs.0)...'
|
|
||||||
|
|
||||||
let exit_code = try {
|
|
||||||
^$linterArgs.0 ...($linterArgs | skip 1)
|
|
||||||
$env.LAST_EXIT_CODE
|
|
||||||
} catch {|e| $e.exit_code}
|
|
||||||
|
|
||||||
[$linterArgs.0, $exit_code]
|
|
||||||
}
|
|
||||||
|
|
||||||
let results = $linters | each {|linter| run-linter $linter}
|
|
||||||
|
|
||||||
print 'Linter results:'
|
|
||||||
|
|
||||||
let success = $results | each {|result|
|
|
||||||
match $result.1 {
|
|
||||||
0 => {print $'(ansi green)($result.0)(ansi reset)'}
|
|
||||||
_ => {print $'(ansi red)($result.0)(ansi reset)'}
|
|
||||||
}
|
|
||||||
|
|
||||||
$result.1
|
|
||||||
} | math sum
|
|
||||||
|
|
||||||
exit $success
|
|
||||||
|
|
@ -1,5 +1,7 @@
|
||||||
{
|
{
|
||||||
config,
|
config,
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
modulesPath,
|
modulesPath,
|
||||||
flake-inputs,
|
flake-inputs,
|
||||||
...
|
...
|
||||||
|
|
@ -29,7 +31,13 @@
|
||||||
./sops.nix
|
./sops.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
nixpkgs.overlays = [ (_: prev: { local = import ../pkgs { pkgs = prev; }; }) ];
|
nixpkgs.overlays = [
|
||||||
|
(final: prev: {
|
||||||
|
local = import ../pkgs {
|
||||||
|
pkgs = prev;
|
||||||
|
};
|
||||||
|
})
|
||||||
|
];
|
||||||
|
|
||||||
nix = {
|
nix = {
|
||||||
extraOptions = ''
|
extraOptions = ''
|
||||||
|
|
|
||||||
|
|
@ -6,35 +6,26 @@
|
||||||
boot.kernelParams = [ "nomodeset" ];
|
boot.kernelParams = [ "nomodeset" ];
|
||||||
|
|
||||||
networking.hostName = "testvm";
|
networking.hostName = "testvm";
|
||||||
|
# Sets the base domain for nginx to a local domain so that we can
|
||||||
services = {
|
# easily test locally with the VM.
|
||||||
# Sets the base domain for nginx to a local domain so that we can
|
services.nginx.domain = "dev.local";
|
||||||
# easily test locally with the VM.
|
|
||||||
nginx.domain = "dev.local";
|
|
||||||
|
|
||||||
# Don't run this
|
|
||||||
batteryManager.enable = lib.mkForce false;
|
|
||||||
|
|
||||||
openssh.hostKeys = lib.mkForce [
|
|
||||||
{
|
|
||||||
type = "rsa";
|
|
||||||
bits = 4096;
|
|
||||||
path = "/etc/staging.key";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
# Use the staging secrets
|
# Use the staging secrets
|
||||||
sops.defaultSopsFile = lib.mkOverride 99 ../../keys/staging.yaml;
|
sops.defaultSopsFile = lib.mkOverride 99 ../../keys/staging.yaml;
|
||||||
|
|
||||||
systemd.network.networks."10-eth0" = {
|
systemd.network.networks."10-eth0" = {
|
||||||
matchConfig.Name = "eth0";
|
matchConfig.Name = "eth0";
|
||||||
gateway = [ "192.168.9.1" ];
|
gateway = [
|
||||||
|
"192.168.9.1"
|
||||||
|
];
|
||||||
networkConfig = {
|
networkConfig = {
|
||||||
Address = "192.168.9.2/24";
|
Address = "192.168.9.2/24";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# Don't run this
|
||||||
|
services.batteryManager.enable = lib.mkForce false;
|
||||||
|
|
||||||
# Both so we have a predictable key for the staging env, as well as
|
# Both so we have a predictable key for the staging env, as well as
|
||||||
# to have a static key for decrypting the sops secrets for the
|
# to have a static key for decrypting the sops secrets for the
|
||||||
# staging env.
|
# staging env.
|
||||||
|
|
@ -43,6 +34,14 @@
|
||||||
source = ../../keys/hosts/staging.key;
|
source = ../../keys/hosts/staging.key;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
services.openssh.hostKeys = lib.mkForce [
|
||||||
|
{
|
||||||
|
type = "rsa";
|
||||||
|
bits = 4096;
|
||||||
|
path = "/etc/staging.key";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
virtualisation.vmVariant = {
|
virtualisation.vmVariant = {
|
||||||
virtualisation = {
|
virtualisation = {
|
||||||
memorySize = 3941;
|
memorySize = 3941;
|
||||||
|
|
|
||||||
|
|
@ -1,50 +1,41 @@
|
||||||
{ config, lib, ... }:
|
{ config, lib, ... }:
|
||||||
{
|
{
|
||||||
services = {
|
services.nginx = {
|
||||||
nginx = {
|
enable = true;
|
||||||
enable = true;
|
recommendedTlsSettings = true;
|
||||||
recommendedTlsSettings = true;
|
recommendedOptimisation = true;
|
||||||
recommendedOptimisation = true;
|
recommendedGzipSettings = true;
|
||||||
recommendedGzipSettings = true;
|
recommendedProxySettings = true;
|
||||||
recommendedProxySettings = true;
|
clientMaxBodySize = "10G";
|
||||||
clientMaxBodySize = "10G";
|
|
||||||
|
|
||||||
statusPage = true; # For metrics, should be accessible only from localhost
|
statusPage = true; # For metrics, should be accessible only from localhost
|
||||||
|
|
||||||
commonHttpConfig = ''
|
commonHttpConfig = ''
|
||||||
log_format upstream_time '$remote_addr - $remote_user [$time_local] '
|
log_format upstream_time '$remote_addr - $remote_user [$time_local] '
|
||||||
'"$request" $status $body_bytes_sent '
|
'"$request" $status $body_bytes_sent '
|
||||||
'"$http_referer" "$http_user_agent" '
|
'"$http_referer" "$http_user_agent" '
|
||||||
'rt=$request_time uct="$upstream_connect_time" '
|
'rt=$request_time uct="$upstream_connect_time" '
|
||||||
'uht="$upstream_header_time" urt="$upstream_response_time"';
|
'uht="$upstream_header_time" urt="$upstream_response_time"';
|
||||||
'';
|
'';
|
||||||
};
|
|
||||||
|
|
||||||
logrotate.settings =
|
|
||||||
{
|
|
||||||
# Override the default, just keep fewer logs
|
|
||||||
nginx.rotate = 6;
|
|
||||||
}
|
|
||||||
// lib.mapAttrs' (
|
|
||||||
virtualHost: _:
|
|
||||||
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
|
|
||||||
frequency = "daily";
|
|
||||||
rotate = 2;
|
|
||||||
compress = true;
|
|
||||||
delaycompress = true;
|
|
||||||
su = "${config.services.nginx.user} ${config.services.nginx.group}";
|
|
||||||
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
|
|
||||||
}
|
|
||||||
) config.services.nginx.virtualHosts;
|
|
||||||
|
|
||||||
backups.acme = {
|
|
||||||
user = "acme";
|
|
||||||
paths = lib.mapAttrsToList (
|
|
||||||
virtualHost: _: "/var/lib/acme/${virtualHost}"
|
|
||||||
) config.services.nginx.virtualHosts;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
services.logrotate.settings =
|
||||||
|
{
|
||||||
|
# Override the default, just keep fewer logs
|
||||||
|
nginx.rotate = 6;
|
||||||
|
}
|
||||||
|
// lib.mapAttrs' (
|
||||||
|
virtualHost: _:
|
||||||
|
lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
|
||||||
|
frequency = "daily";
|
||||||
|
rotate = 2;
|
||||||
|
compress = true;
|
||||||
|
delaycompress = true;
|
||||||
|
su = "${config.services.nginx.user} ${config.services.nginx.group}";
|
||||||
|
postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
|
||||||
|
}
|
||||||
|
) config.services.nginx.virtualHosts;
|
||||||
|
|
||||||
systemd.tmpfiles.rules = lib.mapAttrsToList (
|
systemd.tmpfiles.rules = lib.mapAttrsToList (
|
||||||
virtualHost: _:
|
virtualHost: _:
|
||||||
#
|
#
|
||||||
|
|
@ -75,4 +66,11 @@
|
||||||
systemd.services.nginx.serviceConfig.SupplementaryGroups = [
|
systemd.services.nginx.serviceConfig.SupplementaryGroups = [
|
||||||
config.security.acme.certs."tlater.net".group
|
config.security.acme.certs."tlater.net".group
|
||||||
];
|
];
|
||||||
|
|
||||||
|
services.backups.acme = {
|
||||||
|
user = "acme";
|
||||||
|
paths = lib.mapAttrsToList (
|
||||||
|
virtualHost: _: "/var/lib/acme/${virtualHost}"
|
||||||
|
) config.services.nginx.virtualHosts;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -57,7 +57,7 @@ in
|
||||||
'';
|
'';
|
||||||
type = types.attrsOf (
|
type = types.attrsOf (
|
||||||
types.submodule (
|
types.submodule (
|
||||||
{ name, ... }:
|
{ config, name, ... }:
|
||||||
{
|
{
|
||||||
options = {
|
options = {
|
||||||
user = lib.mkOption {
|
user = lib.mkOption {
|
||||||
|
|
@ -246,7 +246,7 @@ in
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
// lib.mapAttrs' (
|
// lib.mapAttrs' (
|
||||||
name: _:
|
name: backup:
|
||||||
lib.nameValuePair "backup-${name}" {
|
lib.nameValuePair "backup-${name}" {
|
||||||
wantedBy = [ "timers.target" ];
|
wantedBy = [ "timers.target" ];
|
||||||
timerConfig = {
|
timerConfig = {
|
||||||
|
|
|
||||||
|
|
@ -18,153 +18,33 @@ in
|
||||||
./matrix-hookshot.nix
|
./matrix-hookshot.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
services = {
|
services.matrix-conduit = {
|
||||||
matrix-conduit = {
|
enable = true;
|
||||||
enable = true;
|
package = flake-inputs.continuwuity.packages.${pkgs.system}.default;
|
||||||
package = flake-inputs.continuwuity.packages.${pkgs.system}.default;
|
settings.global = {
|
||||||
settings.global = {
|
address = "127.0.0.1";
|
||||||
address = "127.0.0.1";
|
server_name = domain;
|
||||||
server_name = domain;
|
new_user_displayname_suffix = "🦆";
|
||||||
new_user_displayname_suffix = "🦆";
|
allow_check_for_updates = true;
|
||||||
allow_check_for_updates = true;
|
|
||||||
|
|
||||||
# Set up delegation: https://docs.conduit.rs/delegation.html#automatic-recommended
|
# Set up delegation: https://docs.conduit.rs/delegation.html#automatic-recommended
|
||||||
# This is primarily to make sliding sync work
|
# This is primarily to make sliding sync work
|
||||||
well_known = {
|
well_known = {
|
||||||
client = "https://${domain}";
|
client = "https://${domain}";
|
||||||
server = "${domain}:443";
|
server = "${domain}:443";
|
||||||
};
|
|
||||||
|
|
||||||
turn_uris =
|
|
||||||
let
|
|
||||||
address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
|
|
||||||
tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
|
|
||||||
in
|
|
||||||
[
|
|
||||||
"turn:${address}?transport=udp"
|
|
||||||
"turn:${address}?transport=tcp"
|
|
||||||
"turns:${tls-address}?transport=udp"
|
|
||||||
"turns:${tls-address}?transport=tcp"
|
|
||||||
];
|
|
||||||
};
|
};
|
||||||
};
|
|
||||||
|
|
||||||
coturn = {
|
turn_uris =
|
||||||
enable = true;
|
let
|
||||||
no-cli = true;
|
address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
|
||||||
use-auth-secret = true;
|
tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
|
||||||
static-auth-secret-file = config.sops.secrets."turn/secret".path;
|
in
|
||||||
realm = turn-realm;
|
[
|
||||||
relay-ips = [ "116.202.158.55" ];
|
"turn:${address}?transport=udp"
|
||||||
|
"turn:${address}?transport=tcp"
|
||||||
# SSL config
|
"turns:${tls-address}?transport=udp"
|
||||||
pkey = "${config.security.acme.certs."tlater.net".directory}/key.pem";
|
"turns:${tls-address}?transport=tcp"
|
||||||
cert = "${config.security.acme.certs."tlater.net".directory}/fullchain.pem";
|
];
|
||||||
|
|
||||||
# Based on suggestions from
|
|
||||||
# https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md
|
|
||||||
# and
|
|
||||||
# https://www.foxypossibilities.com/2018/05/19/setting-up-a-turn-sever-for-matrix-on-nixos/
|
|
||||||
no-tcp-relay = true;
|
|
||||||
secure-stun = true;
|
|
||||||
extraConfig = ''
|
|
||||||
# Deny various local IP ranges, see
|
|
||||||
# https://www.rtcsec.com/article/cve-2020-26262-bypass-of-coturns-access-control-protection/
|
|
||||||
no-multicast-peers
|
|
||||||
denied-peer-ip=0.0.0.0-0.255.255.255
|
|
||||||
denied-peer-ip=10.0.0.0-10.255.255.255
|
|
||||||
denied-peer-ip=100.64.0.0-100.127.255.255
|
|
||||||
denied-peer-ip=127.0.0.0-127.255.255.255
|
|
||||||
denied-peer-ip=169.254.0.0-169.254.255.255
|
|
||||||
denied-peer-ip=172.16.0.0-172.31.255.255
|
|
||||||
denied-peer-ip=192.0.0.0-192.0.0.255
|
|
||||||
denied-peer-ip=192.0.2.0-192.0.2.255
|
|
||||||
denied-peer-ip=192.88.99.0-192.88.99.255
|
|
||||||
denied-peer-ip=192.168.0.0-192.168.255.255
|
|
||||||
denied-peer-ip=198.18.0.0-198.19.255.255
|
|
||||||
denied-peer-ip=198.51.100.0-198.51.100.255
|
|
||||||
denied-peer-ip=203.0.113.0-203.0.113.255
|
|
||||||
denied-peer-ip=240.0.0.0-255.255.255.255 denied-peer-ip=::1
|
|
||||||
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
|
|
||||||
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
|
|
||||||
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
|
|
||||||
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
|
|
||||||
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
|
||||||
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
|
||||||
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
|
||||||
|
|
||||||
# *Allow* any IP addresses that we explicitly set as relay IPs
|
|
||||||
${concatMapStringsSep "\n" (ip: "allowed-peer-ip=${ip}") config.services.coturn.relay-ips}
|
|
||||||
|
|
||||||
# Various other security settings
|
|
||||||
no-tlsv1
|
|
||||||
no-tlsv1_1
|
|
||||||
|
|
||||||
# Monitoring
|
|
||||||
prometheus
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
nginx.virtualHosts."${domain}" = {
|
|
||||||
useACMEHost = "tlater.net";
|
|
||||||
|
|
||||||
listen = [
|
|
||||||
{
|
|
||||||
addr = "0.0.0.0";
|
|
||||||
port = 80;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
addr = "[::0]";
|
|
||||||
port = 80;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
addr = "0.0.0.0";
|
|
||||||
port = 443;
|
|
||||||
ssl = true;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
addr = "[::0]";
|
|
||||||
port = 443;
|
|
||||||
ssl = true;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
addr = "0.0.0.0";
|
|
||||||
port = 8448;
|
|
||||||
ssl = true;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
addr = "[::0]";
|
|
||||||
port = 8448;
|
|
||||||
ssl = true;
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
forceSSL = true;
|
|
||||||
enableHSTS = true;
|
|
||||||
extraConfig = ''
|
|
||||||
merge_slashes off;
|
|
||||||
'';
|
|
||||||
|
|
||||||
locations = {
|
|
||||||
"/_matrix" = {
|
|
||||||
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
|
|
||||||
# Recommended by conduit
|
|
||||||
extraConfig = ''
|
|
||||||
proxy_buffering off;
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
"/.well-known/matrix" = {
|
|
||||||
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
backups.conduit = {
|
|
||||||
user = "root";
|
|
||||||
paths = [ "/var/lib/private/matrix-conduit/" ];
|
|
||||||
# Other services store their data in conduit, so no other services
|
|
||||||
# need to be shut down currently.
|
|
||||||
pauseServices = [ "conduit.service" ];
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -180,4 +60,122 @@ in
|
||||||
systemd.services.coturn.serviceConfig.SupplementaryGroups = [
|
systemd.services.coturn.serviceConfig.SupplementaryGroups = [
|
||||||
config.security.acme.certs."tlater.net".group
|
config.security.acme.certs."tlater.net".group
|
||||||
];
|
];
|
||||||
|
|
||||||
|
services.coturn = {
|
||||||
|
enable = true;
|
||||||
|
no-cli = true;
|
||||||
|
use-auth-secret = true;
|
||||||
|
static-auth-secret-file = config.sops.secrets."turn/secret".path;
|
||||||
|
realm = turn-realm;
|
||||||
|
relay-ips = [ "116.202.158.55" ];
|
||||||
|
|
||||||
|
# SSL config
|
||||||
|
pkey = "${config.security.acme.certs."tlater.net".directory}/key.pem";
|
||||||
|
cert = "${config.security.acme.certs."tlater.net".directory}/fullchain.pem";
|
||||||
|
|
||||||
|
# Based on suggestions from
|
||||||
|
# https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md
|
||||||
|
# and
|
||||||
|
# https://www.foxypossibilities.com/2018/05/19/setting-up-a-turn-sever-for-matrix-on-nixos/
|
||||||
|
no-tcp-relay = true;
|
||||||
|
secure-stun = true;
|
||||||
|
extraConfig = ''
|
||||||
|
# Deny various local IP ranges, see
|
||||||
|
# https://www.rtcsec.com/article/cve-2020-26262-bypass-of-coturns-access-control-protection/
|
||||||
|
no-multicast-peers
|
||||||
|
denied-peer-ip=0.0.0.0-0.255.255.255
|
||||||
|
denied-peer-ip=10.0.0.0-10.255.255.255
|
||||||
|
denied-peer-ip=100.64.0.0-100.127.255.255
|
||||||
|
denied-peer-ip=127.0.0.0-127.255.255.255
|
||||||
|
denied-peer-ip=169.254.0.0-169.254.255.255
|
||||||
|
denied-peer-ip=172.16.0.0-172.31.255.255
|
||||||
|
denied-peer-ip=192.0.0.0-192.0.0.255
|
||||||
|
denied-peer-ip=192.0.2.0-192.0.2.255
|
||||||
|
denied-peer-ip=192.88.99.0-192.88.99.255
|
||||||
|
denied-peer-ip=192.168.0.0-192.168.255.255
|
||||||
|
denied-peer-ip=198.18.0.0-198.19.255.255
|
||||||
|
denied-peer-ip=198.51.100.0-198.51.100.255
|
||||||
|
denied-peer-ip=203.0.113.0-203.0.113.255
|
||||||
|
denied-peer-ip=240.0.0.0-255.255.255.255 denied-peer-ip=::1
|
||||||
|
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
|
||||||
|
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
|
||||||
|
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
|
||||||
|
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||||
|
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||||
|
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||||
|
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||||
|
|
||||||
|
# *Allow* any IP addresses that we explicitly set as relay IPs
|
||||||
|
${concatMapStringsSep "\n" (ip: "allowed-peer-ip=${ip}") config.services.coturn.relay-ips}
|
||||||
|
|
||||||
|
# Various other security settings
|
||||||
|
no-tlsv1
|
||||||
|
no-tlsv1_1
|
||||||
|
|
||||||
|
# Monitoring
|
||||||
|
prometheus
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
services.nginx.virtualHosts."${domain}" = {
|
||||||
|
useACMEHost = "tlater.net";
|
||||||
|
|
||||||
|
listen = [
|
||||||
|
{
|
||||||
|
addr = "0.0.0.0";
|
||||||
|
port = 80;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
addr = "[::0]";
|
||||||
|
port = 80;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
addr = "0.0.0.0";
|
||||||
|
port = 443;
|
||||||
|
ssl = true;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
addr = "[::0]";
|
||||||
|
port = 443;
|
||||||
|
ssl = true;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
addr = "0.0.0.0";
|
||||||
|
port = 8448;
|
||||||
|
ssl = true;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
addr = "[::0]";
|
||||||
|
port = 8448;
|
||||||
|
ssl = true;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
forceSSL = true;
|
||||||
|
enableHSTS = true;
|
||||||
|
extraConfig = ''
|
||||||
|
merge_slashes off;
|
||||||
|
'';
|
||||||
|
|
||||||
|
locations = {
|
||||||
|
"/_matrix" = {
|
||||||
|
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
|
||||||
|
# Recommended by conduit
|
||||||
|
extraConfig = ''
|
||||||
|
proxy_buffering off;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
"/.well-known/matrix" = {
|
||||||
|
proxyPass = "http://${cfg.settings.global.address}:${toString cfg.settings.global.port}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.backups.conduit = {
|
||||||
|
user = "root";
|
||||||
|
paths = [ "/var/lib/private/matrix-conduit/" ];
|
||||||
|
# Other services store their data in conduit, so no other services
|
||||||
|
# need to be shut down currently.
|
||||||
|
pauseServices = [ "conduit.service" ];
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -74,7 +74,9 @@ in
|
||||||
services.matrix-hookshot = {
|
services.matrix-hookshot = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
||||||
serviceDependencies = [ "conduit.service" ];
|
serviceDependencies = [
|
||||||
|
"conduit.service"
|
||||||
|
];
|
||||||
|
|
||||||
registrationFile = "/run/matrix-hookshot/registration.yaml";
|
registrationFile = "/run/matrix-hookshot/registration.yaml";
|
||||||
|
|
||||||
|
|
@ -124,11 +126,15 @@ in
|
||||||
listeners = [
|
listeners = [
|
||||||
{
|
{
|
||||||
port = 9000;
|
port = 9000;
|
||||||
resources = [ "webhooks" ];
|
resources = [
|
||||||
|
"webhooks"
|
||||||
|
];
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
port = 9001;
|
port = 9001;
|
||||||
resources = [ "metrics" ];
|
resources = [
|
||||||
|
"metrics"
|
||||||
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,9 @@
|
||||||
security.crowdsec = {
|
security.crowdsec = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
||||||
parserWhitelist = [ "10.45.249.2" ];
|
parserWhitelist = [
|
||||||
|
"10.45.249.2"
|
||||||
|
];
|
||||||
|
|
||||||
extraGroups = [
|
extraGroups = [
|
||||||
"systemd-journal"
|
"systemd-journal"
|
||||||
|
|
@ -19,19 +21,25 @@
|
||||||
{
|
{
|
||||||
source = "journalctl";
|
source = "journalctl";
|
||||||
labels.type = "syslog";
|
labels.type = "syslog";
|
||||||
journalctl_filter = [ "SYSLOG_IDENTIFIER=Nextcloud" ];
|
journalctl_filter = [
|
||||||
|
"SYSLOG_IDENTIFIER=Nextcloud"
|
||||||
|
];
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
source = "journalctl";
|
source = "journalctl";
|
||||||
labels.type = "syslog";
|
labels.type = "syslog";
|
||||||
journalctl_filter = [ "SYSLOG_IDENTIFIER=sshd-session" ];
|
journalctl_filter = [
|
||||||
|
"SYSLOG_IDENTIFIER=sshd-session"
|
||||||
|
];
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
labels.type = "nginx";
|
labels.type = "nginx";
|
||||||
filenames =
|
filenames =
|
||||||
[ "/var/log/nginx/*.log" ]
|
[
|
||||||
|
"/var/log/nginx/*.log"
|
||||||
|
]
|
||||||
++ lib.mapAttrsToList (
|
++ lib.mapAttrsToList (
|
||||||
vHost: _: "/var/log/nginx/${vHost}/access.log"
|
vHost: _: "/var/log/nginx/${vHost}/access.log"
|
||||||
) config.services.nginx.virtualHosts;
|
) config.services.nginx.virtualHosts;
|
||||||
|
|
|
||||||
|
|
@ -11,39 +11,37 @@ in
|
||||||
{
|
{
|
||||||
imports = [ flake-inputs.foundryvtt.nixosModules.foundryvtt ];
|
imports = [ flake-inputs.foundryvtt.nixosModules.foundryvtt ];
|
||||||
|
|
||||||
services = {
|
services.foundryvtt = {
|
||||||
foundryvtt = {
|
enable = true;
|
||||||
enable = true;
|
hostName = domain;
|
||||||
hostName = domain;
|
minifyStaticFiles = true;
|
||||||
minifyStaticFiles = true;
|
proxySSL = true;
|
||||||
proxySSL = true;
|
proxyPort = 443;
|
||||||
proxyPort = 443;
|
package = flake-inputs.foundryvtt.packages.${pkgs.system}.foundryvtt_11;
|
||||||
package = flake-inputs.foundryvtt.packages.${pkgs.system}.foundryvtt_11;
|
|
||||||
};
|
|
||||||
|
|
||||||
nginx.virtualHosts."${domain}" =
|
|
||||||
let
|
|
||||||
inherit (config.services.foundryvtt) port;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
forceSSL = true;
|
|
||||||
useACMEHost = "tlater.net";
|
|
||||||
enableHSTS = true;
|
|
||||||
|
|
||||||
locations."/" = {
|
|
||||||
proxyWebsockets = true;
|
|
||||||
proxyPass = "http://localhost:${toString port}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
backups.foundryvtt = {
|
|
||||||
user = "foundryvtt";
|
|
||||||
paths = [ config.services.foundryvtt.dataDir ];
|
|
||||||
pauseServices = [ "foundryvtt.service" ];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
# Want to start it manually when I need it, not have it constantly
|
# Want to start it manually when I need it, not have it constantly
|
||||||
# running
|
# running
|
||||||
systemd.services.foundryvtt.wantedBy = lib.mkForce [ ];
|
systemd.services.foundryvtt.wantedBy = lib.mkForce [ ];
|
||||||
|
|
||||||
|
services.nginx.virtualHosts."${domain}" =
|
||||||
|
let
|
||||||
|
inherit (config.services.foundryvtt) port;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
forceSSL = true;
|
||||||
|
useACMEHost = "tlater.net";
|
||||||
|
enableHSTS = true;
|
||||||
|
|
||||||
|
locations."/" = {
|
||||||
|
proxyWebsockets = true;
|
||||||
|
proxyPass = "http://localhost:${toString port}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.backups.foundryvtt = {
|
||||||
|
user = "foundryvtt";
|
||||||
|
paths = [ config.services.foundryvtt.dataDir ];
|
||||||
|
pauseServices = [ "foundryvtt.service" ];
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,68 +8,24 @@ let
|
||||||
domain = "gitea.${config.services.nginx.domain}";
|
domain = "gitea.${config.services.nginx.domain}";
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
services = {
|
services.forgejo = {
|
||||||
forgejo = {
|
enable = true;
|
||||||
enable = true;
|
database.type = "postgres";
|
||||||
database.type = "postgres";
|
|
||||||
|
|
||||||
settings = {
|
settings = {
|
||||||
server = {
|
server = {
|
||||||
DOMAIN = domain;
|
DOMAIN = domain;
|
||||||
HTTP_ADDR = "127.0.0.1";
|
HTTP_ADDR = "127.0.0.1";
|
||||||
ROOT_URL = "https://${domain}/";
|
ROOT_URL = "https://${domain}/";
|
||||||
SSH_PORT = 2222;
|
SSH_PORT = 2222;
|
||||||
};
|
|
||||||
|
|
||||||
metrics = {
|
|
||||||
ENABLED = true;
|
|
||||||
TOKEN = "#metricstoken#";
|
|
||||||
};
|
|
||||||
service.DISABLE_REGISTRATION = true;
|
|
||||||
session.COOKIE_SECURE = true;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Set up SSL
|
|
||||||
nginx.virtualHosts."${domain}" =
|
|
||||||
let
|
|
||||||
httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
|
|
||||||
httpPort = config.services.forgejo.settings.server.HTTP_PORT;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
forceSSL = true;
|
|
||||||
useACMEHost = "tlater.net";
|
|
||||||
enableHSTS = true;
|
|
||||||
|
|
||||||
locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
|
|
||||||
locations."/metrics" = {
|
|
||||||
extraConfig = ''
|
|
||||||
access_log off;
|
|
||||||
allow 127.0.0.1;
|
|
||||||
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
|
|
||||||
deny all;
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
backups.forgejo = {
|
metrics = {
|
||||||
user = "forgejo";
|
ENABLED = true;
|
||||||
paths = [
|
TOKEN = "#metricstoken#";
|
||||||
"/var/lib/forgejo/forgejo-db.sql"
|
|
||||||
"/var/lib/forgejo/repositories/"
|
|
||||||
"/var/lib/forgejo/data/"
|
|
||||||
"/var/lib/forgejo/custom/"
|
|
||||||
# Conf is backed up via nix
|
|
||||||
];
|
|
||||||
preparation = {
|
|
||||||
packages = [ config.services.postgresql.package ];
|
|
||||||
text = "pg_dump ${config.services.forgejo.database.name} --file=/var/lib/forgejo/forgejo-db.sql";
|
|
||||||
};
|
};
|
||||||
cleanup = {
|
service.DISABLE_REGISTRATION = true;
|
||||||
packages = [ pkgs.coreutils ];
|
session.COOKIE_SECURE = true;
|
||||||
text = "rm /var/lib/forgejo/forgejo-db.sql";
|
|
||||||
};
|
|
||||||
pauseServices = [ "forgejo.service" ];
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -80,4 +36,46 @@ in
|
||||||
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
|
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
|
||||||
in
|
in
|
||||||
[ "+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'" ];
|
[ "+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'" ];
|
||||||
|
|
||||||
|
# Set up SSL
|
||||||
|
services.nginx.virtualHosts."${domain}" =
|
||||||
|
let
|
||||||
|
httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
|
||||||
|
httpPort = config.services.forgejo.settings.server.HTTP_PORT;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
forceSSL = true;
|
||||||
|
useACMEHost = "tlater.net";
|
||||||
|
enableHSTS = true;
|
||||||
|
|
||||||
|
locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
|
||||||
|
locations."/metrics" = {
|
||||||
|
extraConfig = ''
|
||||||
|
access_log off;
|
||||||
|
allow 127.0.0.1;
|
||||||
|
${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
|
||||||
|
deny all;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.backups.forgejo = {
|
||||||
|
user = "forgejo";
|
||||||
|
paths = [
|
||||||
|
"/var/lib/forgejo/forgejo-db.sql"
|
||||||
|
"/var/lib/forgejo/repositories/"
|
||||||
|
"/var/lib/forgejo/data/"
|
||||||
|
"/var/lib/forgejo/custom/"
|
||||||
|
# Conf is backed up via nix
|
||||||
|
];
|
||||||
|
preparation = {
|
||||||
|
packages = [ config.services.postgresql.package ];
|
||||||
|
text = "pg_dump ${config.services.forgejo.database.name} --file=/var/lib/forgejo/forgejo-db.sql";
|
||||||
|
};
|
||||||
|
cleanup = {
|
||||||
|
packages = [ pkgs.coreutils ];
|
||||||
|
text = "rm /var/lib/forgejo/forgejo-db.sql";
|
||||||
|
};
|
||||||
|
pauseServices = [ "forgejo.service" ];
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -74,7 +74,7 @@ in
|
||||||
listenAddress = "127.0.0.1";
|
listenAddress = "127.0.0.1";
|
||||||
group = "nginx";
|
group = "nginx";
|
||||||
|
|
||||||
settings.namespaces = lib.mapAttrsToList (name: _: {
|
settings.namespaces = lib.mapAttrsToList (name: virtualHost: {
|
||||||
inherit name;
|
inherit name;
|
||||||
metrics_override.prefix = "nginxlog";
|
metrics_override.prefix = "nginxlog";
|
||||||
namespace_label = "vhost";
|
namespace_label = "vhost";
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,7 @@ in
|
||||||
services.victoriametrics.scrapeConfigs = mkOption {
|
services.victoriametrics.scrapeConfigs = mkOption {
|
||||||
type = types.attrsOf (
|
type = types.attrsOf (
|
||||||
types.submodule (
|
types.submodule (
|
||||||
{ name, ... }:
|
{ name, self, ... }:
|
||||||
{
|
{
|
||||||
options = {
|
options = {
|
||||||
job_name = mkOption {
|
job_name = mkOption {
|
||||||
|
|
@ -106,37 +106,35 @@ in
|
||||||
# module is an intractable mess
|
# module is an intractable mess
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
after = [ "network.target" ];
|
after = [ "network.target" ];
|
||||||
serviceConfig = {
|
serviceConfig.Restart = mkDefault "always";
|
||||||
Restart = mkDefault "always";
|
serviceConfig.PrivateTmp = mkDefault true;
|
||||||
PrivateTmp = mkDefault true;
|
serviceConfig.WorkingDirectory = mkDefault /tmp;
|
||||||
WorkingDirectory = mkDefault /tmp;
|
serviceConfig.DynamicUser = mkDefault true;
|
||||||
DynamicUser = mkDefault true;
|
# Hardening
|
||||||
# Hardening
|
serviceConfig.CapabilityBoundingSet = mkDefault [ "" ];
|
||||||
CapabilityBoundingSet = mkDefault [ "" ];
|
serviceConfig.DeviceAllow = [ "" ];
|
||||||
DeviceAllow = [ "" ];
|
serviceConfig.LockPersonality = true;
|
||||||
LockPersonality = true;
|
serviceConfig.MemoryDenyWriteExecute = true;
|
||||||
MemoryDenyWriteExecute = true;
|
serviceConfig.NoNewPrivileges = true;
|
||||||
NoNewPrivileges = true;
|
serviceConfig.PrivateDevices = mkDefault true;
|
||||||
PrivateDevices = mkDefault true;
|
serviceConfig.ProtectClock = mkDefault true;
|
||||||
ProtectClock = mkDefault true;
|
serviceConfig.ProtectControlGroups = true;
|
||||||
ProtectControlGroups = true;
|
serviceConfig.ProtectHome = true;
|
||||||
ProtectHome = true;
|
serviceConfig.ProtectHostname = true;
|
||||||
ProtectHostname = true;
|
serviceConfig.ProtectKernelLogs = true;
|
||||||
ProtectKernelLogs = true;
|
serviceConfig.ProtectKernelModules = true;
|
||||||
ProtectKernelModules = true;
|
serviceConfig.ProtectKernelTunables = true;
|
||||||
ProtectKernelTunables = true;
|
serviceConfig.ProtectSystem = mkDefault "strict";
|
||||||
ProtectSystem = mkDefault "strict";
|
serviceConfig.RemoveIPC = true;
|
||||||
RemoveIPC = true;
|
serviceConfig.RestrictAddressFamilies = [
|
||||||
RestrictAddressFamilies = [
|
"AF_INET"
|
||||||
"AF_INET"
|
"AF_INET6"
|
||||||
"AF_INET6"
|
];
|
||||||
];
|
serviceConfig.RestrictNamespaces = true;
|
||||||
RestrictNamespaces = true;
|
serviceConfig.RestrictRealtime = true;
|
||||||
RestrictRealtime = true;
|
serviceConfig.RestrictSUIDSGID = true;
|
||||||
RestrictSUIDSGID = true;
|
serviceConfig.SystemCallArchitectures = "native";
|
||||||
SystemCallArchitectures = "native";
|
serviceConfig.UMask = "0077";
|
||||||
UMask = "0077";
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
exporter.serviceOpts
|
exporter.serviceOpts
|
||||||
]
|
]
|
||||||
|
|
@ -146,7 +144,7 @@ in
|
||||||
{
|
{
|
||||||
vmagent-scrape-exporters =
|
vmagent-scrape-exporters =
|
||||||
let
|
let
|
||||||
inherit (config.services.victoriametrics) listenAddress;
|
listenAddress = config.services.victoriametrics.listenAddress;
|
||||||
vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
|
vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
|
||||||
promscrape = yaml.generate "prometheus.yml" {
|
promscrape = yaml.generate "prometheus.yml" {
|
||||||
scrape_configs = lib.mapAttrsToList (
|
scrape_configs = lib.mapAttrsToList (
|
||||||
|
|
@ -155,7 +153,7 @@ in
|
||||||
inherit (scrape) job_name;
|
inherit (scrape) job_name;
|
||||||
static_configs =
|
static_configs =
|
||||||
scrape.static_configs
|
scrape.static_configs
|
||||||
++ lib.optional (scrape.targets != [ ]) { inherit (scrape) targets; };
|
++ lib.optional (scrape.targets != [ ]) { targets = scrape.targets; };
|
||||||
} scrape.extraSettings
|
} scrape.extraSettings
|
||||||
) config.services.victoriametrics.scrapeConfigs;
|
) config.services.victoriametrics.scrapeConfigs;
|
||||||
};
|
};
|
||||||
|
|
@ -214,7 +212,7 @@ in
|
||||||
|
|
||||||
services.victoriametrics.scrapeConfigs =
|
services.victoriametrics.scrapeConfigs =
|
||||||
let
|
let
|
||||||
allExporters = lib.mapAttrs (_: exporter: { inherit (exporter) listenAddress port; }) (
|
allExporters = lib.mapAttrs (name: exporter: { inherit (exporter) listenAddress port; }) (
|
||||||
(lib.filterAttrs (
|
(lib.filterAttrs (
|
||||||
name: exporter:
|
name: exporter:
|
||||||
# A bunch of deprecated exporters that need to be ignored
|
# A bunch of deprecated exporters that need to be ignored
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,8 @@
|
||||||
{ config, lib, ... }:
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
let
|
let
|
||||||
cfg = config.services.victorialogs;
|
cfg = config.services.victorialogs;
|
||||||
in
|
in
|
||||||
|
|
|
||||||
|
|
@ -9,95 +9,93 @@ let
|
||||||
hostName = "nextcloud.${config.services.nginx.domain}";
|
hostName = "nextcloud.${config.services.nginx.domain}";
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
services = {
|
services.nextcloud = {
|
||||||
nextcloud = {
|
inherit hostName;
|
||||||
inherit hostName;
|
|
||||||
|
|
||||||
package = nextcloud;
|
package = nextcloud;
|
||||||
phpPackage = lib.mkForce (
|
phpPackage = lib.mkForce (
|
||||||
pkgs.php.override {
|
pkgs.php.override {
|
||||||
packageOverrides = _: prev: {
|
packageOverrides = final: prev: {
|
||||||
extensions = prev.extensions // {
|
extensions = prev.extensions // {
|
||||||
pgsql = prev.extensions.pgsql.overrideAttrs (_: {
|
pgsql = prev.extensions.pgsql.overrideAttrs (old: {
|
||||||
configureFlags = [ "--with-pgsql=${lib.getDev config.services.postgresql.package}" ];
|
configureFlags = [ "--with-pgsql=${lib.getDev config.services.postgresql.package}" ];
|
||||||
});
|
});
|
||||||
pdo_pgsql = prev.extensions.pdo_pgsql.overrideAttrs (_: {
|
pdo_pgsql = prev.extensions.pdo_pgsql.overrideAttrs (old: {
|
||||||
configureFlags = [ "--with-pdo-pgsql=${lib.getDev config.services.postgresql.package}" ];
|
configureFlags = [ "--with-pdo-pgsql=${lib.getDev config.services.postgresql.package}" ];
|
||||||
});
|
});
|
||||||
};
|
|
||||||
};
|
};
|
||||||
}
|
};
|
||||||
);
|
}
|
||||||
enable = true;
|
);
|
||||||
maxUploadSize = "2G";
|
enable = true;
|
||||||
https = true;
|
maxUploadSize = "2G";
|
||||||
|
https = true;
|
||||||
|
|
||||||
configureRedis = true;
|
configureRedis = true;
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
dbtype = "pgsql";
|
dbtype = "pgsql";
|
||||||
dbhost = "/run/postgresql";
|
dbhost = "/run/postgresql";
|
||||||
|
|
||||||
adminuser = "tlater";
|
adminuser = "tlater";
|
||||||
adminpassFile = config.sops.secrets."nextcloud/tlater".path;
|
adminpassFile = config.sops.secrets."nextcloud/tlater".path;
|
||||||
};
|
|
||||||
|
|
||||||
settings = {
|
|
||||||
default_phone_region = "AT";
|
|
||||||
overwriteprotocol = "https";
|
|
||||||
};
|
|
||||||
|
|
||||||
phpOptions = {
|
|
||||||
"opcache.interned_strings_buffer" = "16";
|
|
||||||
};
|
|
||||||
|
|
||||||
extraApps = {
|
|
||||||
inherit (config.services.nextcloud.package.packages.apps)
|
|
||||||
calendar
|
|
||||||
contacts
|
|
||||||
cookbook
|
|
||||||
news
|
|
||||||
;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
# Set up SSL
|
settings = {
|
||||||
nginx.virtualHosts."${hostName}" = {
|
default_phone_region = "AT";
|
||||||
forceSSL = true;
|
overwriteprotocol = "https";
|
||||||
useACMEHost = "tlater.net";
|
|
||||||
# The upstream module already adds HSTS
|
|
||||||
};
|
};
|
||||||
|
|
||||||
backups.nextcloud = {
|
phpOptions = {
|
||||||
user = "nextcloud";
|
"opcache.interned_strings_buffer" = "16";
|
||||||
paths = [
|
};
|
||||||
"/var/lib/nextcloud/nextcloud-db.sql"
|
|
||||||
"/var/lib/nextcloud/data/"
|
extraApps = {
|
||||||
"/var/lib/nextcloud/config/config.php"
|
inherit (config.services.nextcloud.package.packages.apps)
|
||||||
];
|
calendar
|
||||||
preparation = {
|
contacts
|
||||||
packages = [
|
cookbook
|
||||||
config.services.postgresql.package
|
news
|
||||||
config.services.nextcloud.occ
|
;
|
||||||
];
|
|
||||||
text = ''
|
|
||||||
nextcloud-occ maintenance:mode --on
|
|
||||||
pg_dump ${config.services.nextcloud.config.dbname} --file=/var/lib/nextcloud/nextcloud-db.sql
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
cleanup = {
|
|
||||||
packages = [
|
|
||||||
pkgs.coreutils
|
|
||||||
config.services.nextcloud.occ
|
|
||||||
];
|
|
||||||
text = ''
|
|
||||||
rm /var/lib/nextcloud/nextcloud-db.sql
|
|
||||||
nextcloud-occ maintenance:mode --off
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# Ensure that this service doesn't start before postgres is ready
|
# Ensure that this service doesn't start before postgres is ready
|
||||||
systemd.services.nextcloud-setup.after = [ "postgresql.service" ];
|
systemd.services.nextcloud-setup.after = [ "postgresql.service" ];
|
||||||
|
|
||||||
|
# Set up SSL
|
||||||
|
services.nginx.virtualHosts."${hostName}" = {
|
||||||
|
forceSSL = true;
|
||||||
|
useACMEHost = "tlater.net";
|
||||||
|
# The upstream module already adds HSTS
|
||||||
|
};
|
||||||
|
|
||||||
|
services.backups.nextcloud = {
|
||||||
|
user = "nextcloud";
|
||||||
|
paths = [
|
||||||
|
"/var/lib/nextcloud/nextcloud-db.sql"
|
||||||
|
"/var/lib/nextcloud/data/"
|
||||||
|
"/var/lib/nextcloud/config/config.php"
|
||||||
|
];
|
||||||
|
preparation = {
|
||||||
|
packages = [
|
||||||
|
config.services.postgresql.package
|
||||||
|
config.services.nextcloud.occ
|
||||||
|
];
|
||||||
|
text = ''
|
||||||
|
nextcloud-occ maintenance:mode --on
|
||||||
|
pg_dump ${config.services.nextcloud.config.dbname} --file=/var/lib/nextcloud/nextcloud-db.sql
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
cleanup = {
|
||||||
|
packages = [
|
||||||
|
pkgs.coreutils
|
||||||
|
config.services.nextcloud.occ
|
||||||
|
];
|
||||||
|
text = ''
|
||||||
|
rm /var/lib/nextcloud/nextcloud-db.sql
|
||||||
|
nextcloud-occ maintenance:mode --off
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
{ config, ... }:
|
{ config, ... }:
|
||||||
let
|
let
|
||||||
inherit (config.services.nginx) domain;
|
domain = config.services.nginx.domain;
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
services.tlaternet-webserver = {
|
services.tlaternet-webserver = {
|
||||||
|
|
|
||||||
|
|
@ -96,7 +96,7 @@
|
||||||
#########
|
#########
|
||||||
# Tests #
|
# Tests #
|
||||||
#########
|
#########
|
||||||
checks.${system} = import ./checks (inputs // { inherit system; });
|
checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) deploy-rs.lib;
|
||||||
|
|
||||||
###########################
|
###########################
|
||||||
# Garbage collection root #
|
# Garbage collection root #
|
||||||
|
|
@ -121,6 +121,8 @@
|
||||||
run-vm = {
|
run-vm = {
|
||||||
type = "app";
|
type = "app";
|
||||||
program =
|
program =
|
||||||
|
let
|
||||||
|
in
|
||||||
(pkgs.writeShellScript "" ''
|
(pkgs.writeShellScript "" ''
|
||||||
${vm.config.system.build.vm.outPath}/bin/run-testvm-vm
|
${vm.config.system.build.vm.outPath}/bin/run-testvm-vm
|
||||||
'').outPath;
|
'').outPath;
|
||||||
|
|
|
||||||
|
|
@ -267,7 +267,9 @@ in
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.packages = [ cfg.package ];
|
systemd.packages = [
|
||||||
|
cfg.package
|
||||||
|
];
|
||||||
|
|
||||||
environment = {
|
environment = {
|
||||||
systemPackages = [
|
systemPackages = [
|
||||||
|
|
|
||||||
|
|
@ -6,11 +6,10 @@
|
||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
inherit (flake-inputs.self.packages.${pkgs.system}) crowdsec-firewall-bouncer;
|
|
||||||
|
|
||||||
crowdsecCfg = config.security.crowdsec;
|
crowdsecCfg = config.security.crowdsec;
|
||||||
cfg = crowdsecCfg.remediationComponents.firewallBouncer;
|
cfg = crowdsecCfg.remediationComponents.firewallBouncer;
|
||||||
settingsFormat = pkgs.formats.yaml { };
|
settingsFormat = pkgs.formats.yaml { };
|
||||||
|
crowdsec-firewall-bouncer = flake-inputs.self.packages.${pkgs.system}.crowdsec-firewall-bouncer;
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
options.security.crowdsec.remediationComponents.firewallBouncer = {
|
options.security.crowdsec.remediationComponents.firewallBouncer = {
|
||||||
|
|
@ -32,7 +31,9 @@ in
|
||||||
security.crowdsec.remediationComponents.firewallBouncer.settings = {
|
security.crowdsec.remediationComponents.firewallBouncer.settings = {
|
||||||
mode = lib.mkDefault "${if config.networking.nftables.enable then "nftables" else "iptables"}";
|
mode = lib.mkDefault "${if config.networking.nftables.enable then "nftables" else "iptables"}";
|
||||||
log_mode = "stdout";
|
log_mode = "stdout";
|
||||||
iptables_chains = [ "nixos-fw" ];
|
iptables_chains = [
|
||||||
|
"nixos-fw"
|
||||||
|
];
|
||||||
|
|
||||||
# Don't let users easily override this; unfortunately we need to
|
# Don't let users easily override this; unfortunately we need to
|
||||||
# set up this key through substitution at runtime.
|
# set up this key through substitution at runtime.
|
||||||
|
|
@ -77,7 +78,9 @@ in
|
||||||
requiredBy = [ "crowdsec.service" ];
|
requiredBy = [ "crowdsec.service" ];
|
||||||
|
|
||||||
path =
|
path =
|
||||||
lib.optionals (cfg.settings.mode == "ipset" || cfg.settings.mode == "iptables") [ pkgs.ipset ]
|
lib.optionals (cfg.settings.mode == "ipset" || cfg.settings.mode == "iptables") [
|
||||||
|
pkgs.ipset
|
||||||
|
]
|
||||||
++ lib.optional (cfg.settings.mode == "iptables") pkgs.iptables
|
++ lib.optional (cfg.settings.mode == "iptables") pkgs.iptables
|
||||||
++ lib.optional (cfg.settings.mode == "nftables") pkgs.nftables;
|
++ lib.optional (cfg.settings.mode == "nftables") pkgs.nftables;
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -1 +1,5 @@
|
||||||
{ imports = [ ./cs-firewall-bouncer.nix ]; }
|
{
|
||||||
|
imports = [
|
||||||
|
./cs-firewall-bouncer.nix
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -1 +1,4 @@
|
||||||
{ sources }: sources.crowdsec-hub.src
|
{
|
||||||
|
sources,
|
||||||
|
}:
|
||||||
|
sources.crowdsec-hub.src
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue