style(treewide): Apply linter suggestions
This commit is contained in:
parent
5c6b697e55
commit
63b3cbe00b
20 changed files with 432 additions and 450 deletions
checks
configuration
flake.nixmodules/crowdsec
pkgs/crowdsec
|
@ -8,6 +8,15 @@
|
|||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
statix' = pkgs.statix.overrideAttrs (old: {
|
||||
patches = old.patches ++ [
|
||||
(pkgs.fetchpatch {
|
||||
url = "https://github.com/oppiliappan/statix/commit/925dec39bb705acbbe77178b4d658fe1b752abbb.patch";
|
||||
hash = "sha256-0wacO6wuYJ4ufN9PGucRVJucFdFFNF+NoHYIrLXsCWs=";
|
||||
})
|
||||
];
|
||||
});
|
||||
|
||||
runNuCheck =
|
||||
{
|
||||
name,
|
||||
|
@ -44,7 +53,7 @@ nixpkgs.lib.recursiveUpdate {
|
|||
pkgs.deadnix
|
||||
pkgs.nixfmt-rfc-style
|
||||
pkgs.shellcheck
|
||||
pkgs.statix
|
||||
statix'
|
||||
];
|
||||
|
||||
check = ./lints.nu;
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
modulesPath,
|
||||
flake-inputs,
|
||||
...
|
||||
|
@ -31,13 +29,7 @@
|
|||
./sops.nix
|
||||
];
|
||||
|
||||
nixpkgs.overlays = [
|
||||
(final: prev: {
|
||||
local = import ../pkgs {
|
||||
pkgs = prev;
|
||||
};
|
||||
})
|
||||
];
|
||||
nixpkgs.overlays = [ (_: prev: { local = import ../pkgs { pkgs = prev; }; }) ];
|
||||
|
||||
nix = {
|
||||
extraOptions = ''
|
||||
|
|
|
@ -6,26 +6,35 @@
|
|||
boot.kernelParams = [ "nomodeset" ];
|
||||
|
||||
networking.hostName = "testvm";
|
||||
|
||||
services = {
|
||||
# Sets the base domain for nginx to a local domain so that we can
|
||||
# easily test locally with the VM.
|
||||
services.nginx.domain = "dev.local";
|
||||
nginx.domain = "dev.local";
|
||||
|
||||
# Don't run this
|
||||
batteryManager.enable = lib.mkForce false;
|
||||
|
||||
openssh.hostKeys = lib.mkForce [
|
||||
{
|
||||
type = "rsa";
|
||||
bits = 4096;
|
||||
path = "/etc/staging.key";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
# Use the staging secrets
|
||||
sops.defaultSopsFile = lib.mkOverride 99 ../../keys/staging.yaml;
|
||||
|
||||
systemd.network.networks."10-eth0" = {
|
||||
matchConfig.Name = "eth0";
|
||||
gateway = [
|
||||
"192.168.9.1"
|
||||
];
|
||||
gateway = [ "192.168.9.1" ];
|
||||
networkConfig = {
|
||||
Address = "192.168.9.2/24";
|
||||
};
|
||||
};
|
||||
|
||||
# Don't run this
|
||||
services.batteryManager.enable = lib.mkForce false;
|
||||
|
||||
# Both so we have a predictable key for the staging env, as well as
|
||||
# to have a static key for decrypting the sops secrets for the
|
||||
# staging env.
|
||||
|
@ -34,14 +43,6 @@
|
|||
source = ../../keys/hosts/staging.key;
|
||||
};
|
||||
|
||||
services.openssh.hostKeys = lib.mkForce [
|
||||
{
|
||||
type = "rsa";
|
||||
bits = 4096;
|
||||
path = "/etc/staging.key";
|
||||
}
|
||||
];
|
||||
|
||||
virtualisation.vmVariant = {
|
||||
virtualisation = {
|
||||
memorySize = 3941;
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
services.nginx = {
|
||||
services = {
|
||||
nginx = {
|
||||
enable = true;
|
||||
recommendedTlsSettings = true;
|
||||
recommendedOptimisation = true;
|
||||
|
@ -19,7 +20,7 @@
|
|||
'';
|
||||
};
|
||||
|
||||
services.logrotate.settings =
|
||||
logrotate.settings =
|
||||
{
|
||||
# Override the default, just keep fewer logs
|
||||
nginx.rotate = 6;
|
||||
|
@ -36,6 +37,14 @@
|
|||
}
|
||||
) config.services.nginx.virtualHosts;
|
||||
|
||||
backups.acme = {
|
||||
user = "acme";
|
||||
paths = lib.mapAttrsToList (
|
||||
virtualHost: _: "/var/lib/acme/${virtualHost}"
|
||||
) config.services.nginx.virtualHosts;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = lib.mapAttrsToList (
|
||||
virtualHost: _:
|
||||
#
|
||||
|
@ -66,11 +75,4 @@
|
|||
systemd.services.nginx.serviceConfig.SupplementaryGroups = [
|
||||
config.security.acme.certs."tlater.net".group
|
||||
];
|
||||
|
||||
services.backups.acme = {
|
||||
user = "acme";
|
||||
paths = lib.mapAttrsToList (
|
||||
virtualHost: _: "/var/lib/acme/${virtualHost}"
|
||||
) config.services.nginx.virtualHosts;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ in
|
|||
'';
|
||||
type = types.attrsOf (
|
||||
types.submodule (
|
||||
{ config, name, ... }:
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
user = lib.mkOption {
|
||||
|
@ -246,7 +246,7 @@ in
|
|||
};
|
||||
}
|
||||
// lib.mapAttrs' (
|
||||
name: backup:
|
||||
name: _:
|
||||
lib.nameValuePair "backup-${name}" {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
|
|
|
@ -18,7 +18,8 @@ in
|
|||
./matrix-hookshot.nix
|
||||
];
|
||||
|
||||
services.matrix-conduit = {
|
||||
services = {
|
||||
matrix-conduit = {
|
||||
enable = true;
|
||||
package = flake-inputs.continuwuity.packages.${pkgs.system}.default;
|
||||
settings.global = {
|
||||
|
@ -48,20 +49,7 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
systemd.services.conduit.serviceConfig = {
|
||||
ExecStart = lib.mkForce "${config.services.matrix-conduit.package}/bin/conduwuit";
|
||||
# Pass in the TURN secret via EnvironmentFile, not supported by
|
||||
# upstream module currently.
|
||||
#
|
||||
# See also https://gitlab.com/famedly/conduit/-/issues/314
|
||||
EnvironmentFile = config.sops.secrets."turn/env".path;
|
||||
};
|
||||
|
||||
systemd.services.coturn.serviceConfig.SupplementaryGroups = [
|
||||
config.security.acme.certs."tlater.net".group
|
||||
];
|
||||
|
||||
services.coturn = {
|
||||
coturn = {
|
||||
enable = true;
|
||||
no-cli = true;
|
||||
use-auth-secret = true;
|
||||
|
@ -117,7 +105,7 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."${domain}" = {
|
||||
nginx.virtualHosts."${domain}" = {
|
||||
useACMEHost = "tlater.net";
|
||||
|
||||
listen = [
|
||||
|
@ -171,11 +159,25 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
services.backups.conduit = {
|
||||
backups.conduit = {
|
||||
user = "root";
|
||||
paths = [ "/var/lib/private/matrix-conduit/" ];
|
||||
# Other services store their data in conduit, so no other services
|
||||
# need to be shut down currently.
|
||||
pauseServices = [ "conduit.service" ];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.conduit.serviceConfig = {
|
||||
ExecStart = lib.mkForce "${config.services.matrix-conduit.package}/bin/conduwuit";
|
||||
# Pass in the TURN secret via EnvironmentFile, not supported by
|
||||
# upstream module currently.
|
||||
#
|
||||
# See also https://gitlab.com/famedly/conduit/-/issues/314
|
||||
EnvironmentFile = config.sops.secrets."turn/env".path;
|
||||
};
|
||||
|
||||
systemd.services.coturn.serviceConfig.SupplementaryGroups = [
|
||||
config.security.acme.certs."tlater.net".group
|
||||
];
|
||||
}
|
||||
|
|
|
@ -74,9 +74,7 @@ in
|
|||
services.matrix-hookshot = {
|
||||
enable = true;
|
||||
|
||||
serviceDependencies = [
|
||||
"conduit.service"
|
||||
];
|
||||
serviceDependencies = [ "conduit.service" ];
|
||||
|
||||
registrationFile = "/run/matrix-hookshot/registration.yaml";
|
||||
|
||||
|
@ -126,15 +124,11 @@ in
|
|||
listeners = [
|
||||
{
|
||||
port = 9000;
|
||||
resources = [
|
||||
"webhooks"
|
||||
];
|
||||
resources = [ "webhooks" ];
|
||||
}
|
||||
{
|
||||
port = 9001;
|
||||
resources = [
|
||||
"metrics"
|
||||
];
|
||||
resources = [ "metrics" ];
|
||||
}
|
||||
];
|
||||
|
||||
|
|
|
@ -8,9 +8,7 @@
|
|||
security.crowdsec = {
|
||||
enable = true;
|
||||
|
||||
parserWhitelist = [
|
||||
"10.45.249.2"
|
||||
];
|
||||
parserWhitelist = [ "10.45.249.2" ];
|
||||
|
||||
extraGroups = [
|
||||
"systemd-journal"
|
||||
|
@ -21,25 +19,19 @@
|
|||
{
|
||||
source = "journalctl";
|
||||
labels.type = "syslog";
|
||||
journalctl_filter = [
|
||||
"SYSLOG_IDENTIFIER=Nextcloud"
|
||||
];
|
||||
journalctl_filter = [ "SYSLOG_IDENTIFIER=Nextcloud" ];
|
||||
}
|
||||
|
||||
{
|
||||
source = "journalctl";
|
||||
labels.type = "syslog";
|
||||
journalctl_filter = [
|
||||
"SYSLOG_IDENTIFIER=sshd-session"
|
||||
];
|
||||
journalctl_filter = [ "SYSLOG_IDENTIFIER=sshd-session" ];
|
||||
}
|
||||
|
||||
{
|
||||
labels.type = "nginx";
|
||||
filenames =
|
||||
[
|
||||
"/var/log/nginx/*.log"
|
||||
]
|
||||
[ "/var/log/nginx/*.log" ]
|
||||
++ lib.mapAttrsToList (
|
||||
vHost: _: "/var/log/nginx/${vHost}/access.log"
|
||||
) config.services.nginx.virtualHosts;
|
||||
|
|
|
@ -11,7 +11,8 @@ in
|
|||
{
|
||||
imports = [ flake-inputs.foundryvtt.nixosModules.foundryvtt ];
|
||||
|
||||
services.foundryvtt = {
|
||||
services = {
|
||||
foundryvtt = {
|
||||
enable = true;
|
||||
hostName = domain;
|
||||
minifyStaticFiles = true;
|
||||
|
@ -20,11 +21,7 @@ in
|
|||
package = flake-inputs.foundryvtt.packages.${pkgs.system}.foundryvtt_11;
|
||||
};
|
||||
|
||||
# Want to start it manually when I need it, not have it constantly
|
||||
# running
|
||||
systemd.services.foundryvtt.wantedBy = lib.mkForce [ ];
|
||||
|
||||
services.nginx.virtualHosts."${domain}" =
|
||||
nginx.virtualHosts."${domain}" =
|
||||
let
|
||||
inherit (config.services.foundryvtt) port;
|
||||
in
|
||||
|
@ -39,9 +36,14 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
services.backups.foundryvtt = {
|
||||
backups.foundryvtt = {
|
||||
user = "foundryvtt";
|
||||
paths = [ config.services.foundryvtt.dataDir ];
|
||||
pauseServices = [ "foundryvtt.service" ];
|
||||
};
|
||||
};
|
||||
|
||||
# Want to start it manually when I need it, not have it constantly
|
||||
# running
|
||||
systemd.services.foundryvtt.wantedBy = lib.mkForce [ ];
|
||||
}
|
||||
|
|
|
@ -8,7 +8,8 @@ let
|
|||
domain = "gitea.${config.services.nginx.domain}";
|
||||
in
|
||||
{
|
||||
services.forgejo = {
|
||||
services = {
|
||||
forgejo = {
|
||||
enable = true;
|
||||
database.type = "postgres";
|
||||
|
||||
|
@ -29,16 +30,8 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
systemd.services.forgejo.serviceConfig.ExecStartPre =
|
||||
let
|
||||
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
||||
secretPath = config.sops.secrets."forgejo/metrics-token".path;
|
||||
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
|
||||
in
|
||||
[ "+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'" ];
|
||||
|
||||
# Set up SSL
|
||||
services.nginx.virtualHosts."${domain}" =
|
||||
nginx.virtualHosts."${domain}" =
|
||||
let
|
||||
httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
|
||||
httpPort = config.services.forgejo.settings.server.HTTP_PORT;
|
||||
|
@ -59,7 +52,7 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
services.backups.forgejo = {
|
||||
backups.forgejo = {
|
||||
user = "forgejo";
|
||||
paths = [
|
||||
"/var/lib/forgejo/forgejo-db.sql"
|
||||
|
@ -78,4 +71,13 @@ in
|
|||
};
|
||||
pauseServices = [ "forgejo.service" ];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.forgejo.serviceConfig.ExecStartPre =
|
||||
let
|
||||
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
|
||||
secretPath = config.sops.secrets."forgejo/metrics-token".path;
|
||||
runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
|
||||
in
|
||||
[ "+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'" ];
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ in
|
|||
listenAddress = "127.0.0.1";
|
||||
group = "nginx";
|
||||
|
||||
settings.namespaces = lib.mapAttrsToList (name: virtualHost: {
|
||||
settings.namespaces = lib.mapAttrsToList (name: _: {
|
||||
inherit name;
|
||||
metrics_override.prefix = "nginxlog";
|
||||
namespace_label = "vhost";
|
||||
|
|
|
@ -38,7 +38,7 @@ in
|
|||
services.victoriametrics.scrapeConfigs = mkOption {
|
||||
type = types.attrsOf (
|
||||
types.submodule (
|
||||
{ name, self, ... }:
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
job_name = mkOption {
|
||||
|
@ -106,35 +106,37 @@ in
|
|||
# module is an intractable mess
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig.Restart = mkDefault "always";
|
||||
serviceConfig.PrivateTmp = mkDefault true;
|
||||
serviceConfig.WorkingDirectory = mkDefault /tmp;
|
||||
serviceConfig.DynamicUser = mkDefault true;
|
||||
serviceConfig = {
|
||||
Restart = mkDefault "always";
|
||||
PrivateTmp = mkDefault true;
|
||||
WorkingDirectory = mkDefault /tmp;
|
||||
DynamicUser = mkDefault true;
|
||||
# Hardening
|
||||
serviceConfig.CapabilityBoundingSet = mkDefault [ "" ];
|
||||
serviceConfig.DeviceAllow = [ "" ];
|
||||
serviceConfig.LockPersonality = true;
|
||||
serviceConfig.MemoryDenyWriteExecute = true;
|
||||
serviceConfig.NoNewPrivileges = true;
|
||||
serviceConfig.PrivateDevices = mkDefault true;
|
||||
serviceConfig.ProtectClock = mkDefault true;
|
||||
serviceConfig.ProtectControlGroups = true;
|
||||
serviceConfig.ProtectHome = true;
|
||||
serviceConfig.ProtectHostname = true;
|
||||
serviceConfig.ProtectKernelLogs = true;
|
||||
serviceConfig.ProtectKernelModules = true;
|
||||
serviceConfig.ProtectKernelTunables = true;
|
||||
serviceConfig.ProtectSystem = mkDefault "strict";
|
||||
serviceConfig.RemoveIPC = true;
|
||||
serviceConfig.RestrictAddressFamilies = [
|
||||
CapabilityBoundingSet = mkDefault [ "" ];
|
||||
DeviceAllow = [ "" ];
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = mkDefault true;
|
||||
ProtectClock = mkDefault true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectSystem = mkDefault "strict";
|
||||
RemoveIPC = true;
|
||||
RestrictAddressFamilies = [
|
||||
"AF_INET"
|
||||
"AF_INET6"
|
||||
];
|
||||
serviceConfig.RestrictNamespaces = true;
|
||||
serviceConfig.RestrictRealtime = true;
|
||||
serviceConfig.RestrictSUIDSGID = true;
|
||||
serviceConfig.SystemCallArchitectures = "native";
|
||||
serviceConfig.UMask = "0077";
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
SystemCallArchitectures = "native";
|
||||
UMask = "0077";
|
||||
};
|
||||
}
|
||||
exporter.serviceOpts
|
||||
]
|
||||
|
@ -144,7 +146,7 @@ in
|
|||
{
|
||||
vmagent-scrape-exporters =
|
||||
let
|
||||
listenAddress = config.services.victoriametrics.listenAddress;
|
||||
inherit (config.services.victoriametrics) listenAddress;
|
||||
vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
|
||||
promscrape = yaml.generate "prometheus.yml" {
|
||||
scrape_configs = lib.mapAttrsToList (
|
||||
|
@ -153,7 +155,7 @@ in
|
|||
inherit (scrape) job_name;
|
||||
static_configs =
|
||||
scrape.static_configs
|
||||
++ lib.optional (scrape.targets != [ ]) { targets = scrape.targets; };
|
||||
++ lib.optional (scrape.targets != [ ]) { inherit (scrape) targets; };
|
||||
} scrape.extraSettings
|
||||
) config.services.victoriametrics.scrapeConfigs;
|
||||
};
|
||||
|
@ -212,7 +214,7 @@ in
|
|||
|
||||
services.victoriametrics.scrapeConfigs =
|
||||
let
|
||||
allExporters = lib.mapAttrs (name: exporter: { inherit (exporter) listenAddress port; }) (
|
||||
allExporters = lib.mapAttrs (_: exporter: { inherit (exporter) listenAddress port; }) (
|
||||
(lib.filterAttrs (
|
||||
name: exporter:
|
||||
# A bunch of deprecated exporters that need to be ignored
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.services.victorialogs;
|
||||
in
|
||||
|
|
|
@ -9,18 +9,19 @@ let
|
|||
hostName = "nextcloud.${config.services.nginx.domain}";
|
||||
in
|
||||
{
|
||||
services.nextcloud = {
|
||||
services = {
|
||||
nextcloud = {
|
||||
inherit hostName;
|
||||
|
||||
package = nextcloud;
|
||||
phpPackage = lib.mkForce (
|
||||
pkgs.php.override {
|
||||
packageOverrides = final: prev: {
|
||||
packageOverrides = _: prev: {
|
||||
extensions = prev.extensions // {
|
||||
pgsql = prev.extensions.pgsql.overrideAttrs (old: {
|
||||
pgsql = prev.extensions.pgsql.overrideAttrs (_: {
|
||||
configureFlags = [ "--with-pgsql=${lib.getDev config.services.postgresql.package}" ];
|
||||
});
|
||||
pdo_pgsql = prev.extensions.pdo_pgsql.overrideAttrs (old: {
|
||||
pdo_pgsql = prev.extensions.pdo_pgsql.overrideAttrs (_: {
|
||||
configureFlags = [ "--with-pdo-pgsql=${lib.getDev config.services.postgresql.package}" ];
|
||||
});
|
||||
};
|
||||
|
@ -60,17 +61,14 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
# Ensure that this service doesn't start before postgres is ready
|
||||
systemd.services.nextcloud-setup.after = [ "postgresql.service" ];
|
||||
|
||||
# Set up SSL
|
||||
services.nginx.virtualHosts."${hostName}" = {
|
||||
nginx.virtualHosts."${hostName}" = {
|
||||
forceSSL = true;
|
||||
useACMEHost = "tlater.net";
|
||||
# The upstream module already adds HSTS
|
||||
};
|
||||
|
||||
services.backups.nextcloud = {
|
||||
backups.nextcloud = {
|
||||
user = "nextcloud";
|
||||
paths = [
|
||||
"/var/lib/nextcloud/nextcloud-db.sql"
|
||||
|
@ -98,4 +96,8 @@ in
|
|||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Ensure that this service doesn't start before postgres is ready
|
||||
systemd.services.nextcloud-setup.after = [ "postgresql.service" ];
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{ config, ... }:
|
||||
let
|
||||
domain = config.services.nginx.domain;
|
||||
inherit (config.services.nginx) domain;
|
||||
in
|
||||
{
|
||||
services.tlaternet-webserver = {
|
||||
|
|
|
@ -121,8 +121,6 @@
|
|||
run-vm = {
|
||||
type = "app";
|
||||
program =
|
||||
let
|
||||
in
|
||||
(pkgs.writeShellScript "" ''
|
||||
${vm.config.system.build.vm.outPath}/bin/run-testvm-vm
|
||||
'').outPath;
|
||||
|
|
|
@ -267,9 +267,7 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
systemd.packages = [
|
||||
cfg.package
|
||||
];
|
||||
systemd.packages = [ cfg.package ];
|
||||
|
||||
environment = {
|
||||
systemPackages = [
|
||||
|
|
|
@ -6,10 +6,11 @@
|
|||
...
|
||||
}:
|
||||
let
|
||||
inherit (flake-inputs.self.packages.${pkgs.system}) crowdsec-firewall-bouncer;
|
||||
|
||||
crowdsecCfg = config.security.crowdsec;
|
||||
cfg = crowdsecCfg.remediationComponents.firewallBouncer;
|
||||
settingsFormat = pkgs.formats.yaml { };
|
||||
crowdsec-firewall-bouncer = flake-inputs.self.packages.${pkgs.system}.crowdsec-firewall-bouncer;
|
||||
in
|
||||
{
|
||||
options.security.crowdsec.remediationComponents.firewallBouncer = {
|
||||
|
@ -31,9 +32,7 @@ in
|
|||
security.crowdsec.remediationComponents.firewallBouncer.settings = {
|
||||
mode = lib.mkDefault "${if config.networking.nftables.enable then "nftables" else "iptables"}";
|
||||
log_mode = "stdout";
|
||||
iptables_chains = [
|
||||
"nixos-fw"
|
||||
];
|
||||
iptables_chains = [ "nixos-fw" ];
|
||||
|
||||
# Don't let users easily override this; unfortunately we need to
|
||||
# set up this key through substitution at runtime.
|
||||
|
@ -78,9 +77,7 @@ in
|
|||
requiredBy = [ "crowdsec.service" ];
|
||||
|
||||
path =
|
||||
lib.optionals (cfg.settings.mode == "ipset" || cfg.settings.mode == "iptables") [
|
||||
pkgs.ipset
|
||||
]
|
||||
lib.optionals (cfg.settings.mode == "ipset" || cfg.settings.mode == "iptables") [ pkgs.ipset ]
|
||||
++ lib.optional (cfg.settings.mode == "iptables") pkgs.iptables
|
||||
++ lib.optional (cfg.settings.mode == "nftables") pkgs.nftables;
|
||||
};
|
||||
|
|
|
@ -1,5 +1 @@
|
|||
{
|
||||
imports = [
|
||||
./cs-firewall-bouncer.nix
|
||||
];
|
||||
}
|
||||
{ imports = [ ./cs-firewall-bouncer.nix ]; }
|
||||
|
|
|
@ -1,4 +1 @@
|
|||
{
|
||||
sources,
|
||||
}:
|
||||
sources.crowdsec-hub.src
|
||||
{ sources }: sources.crowdsec-hub.src
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue