diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000..cee878d
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,8 @@
+# Run this command to always ignore formatting commits in `git blame`
+# git config blame.ignoreRevsFile .git-blame-ignore-revs
+
+# Switch to nixpkgs-fmt formatting
+fd138d45e6a2cad89fead6e9f246ba282070d6b7
+
+# Switch to alejandra formatting
+046a88905ddfa7f9edc3291c310dbb985dee34f9
diff --git a/configuration/default.nix b/configuration/default.nix
index 3b580eb..b933d19 100644
--- a/configuration/default.nix
+++ b/configuration/default.nix
@@ -1,10 +1,9 @@
-{
-  config,
-  pkgs,
-  lib,
-  modulesPath,
-  flake-inputs,
-  ...
+{ config
+, pkgs
+, lib
+, modulesPath
+, flake-inputs
+, ...
 }: {
   imports = [
     flake-inputs.disko.nixosModules.disko
@@ -47,15 +46,15 @@
     '';
 
     # Enable remote builds from tlater
-    settings.trusted-users = ["@wheel"];
+    settings.trusted-users = [ "@wheel" ];
   };
 
   nixpkgs.config.allowUnfreePredicate = pkg:
-    builtins.elem (lib.getName pkg) ["steam-original" "steam-runtime" "steam-run" "steamcmd"];
+    builtins.elem (lib.getName pkg) [ "steam-original" "steam-runtime" "steam-run" "steamcmd" ];
 
   # Optimization for minecraft servers, see:
   # https://bugs.mojang.com/browse/MC-183518
-  boot.kernelParams = ["highres=off" "nohz=off"];
+  boot.kernelParams = [ "highres=off" "nohz=off" ];
 
   networking = {
     usePredictableInterfaceNames = false;
@@ -106,15 +105,15 @@
 
   users.users.tlater = {
     isNormalUser = true;
-    extraGroups = ["wheel"];
-    openssh.authorizedKeys.keyFiles = [../keys/tlater.pub];
+    extraGroups = [ "wheel" ];
+    openssh.authorizedKeys.keyFiles = [ ../keys/tlater.pub ];
   };
 
   services = {
     openssh = {
       enable = true;
       allowSFTP = false;
-      ports = [2222];
+      ports = [ 2222 ];
       startWhenNeeded = true;
 
       settings = {
@@ -133,14 +132,14 @@
     pam = {
       sshAgentAuth = {
         enable = true;
-        authorizedKeysFiles = ["/etc/ssh/authorized_keys.d/%u"];
+        authorizedKeysFiles = [ "/etc/ssh/authorized_keys.d/%u" ];
       };
       services.sudo.sshAgentAuth = true;
     };
   };
 
   # Remove some unneeded packages
-  environment.defaultPackages = [];
+  environment.defaultPackages = [ ];
 
   system.stateVersion = "20.09";
 }
diff --git a/configuration/hardware-specific/hetzner/default.nix b/configuration/hardware-specific/hetzner/default.nix
index 5ecf63a..3106f19 100644
--- a/configuration/hardware-specific/hetzner/default.nix
+++ b/configuration/hardware-specific/hetzner/default.nix
@@ -8,7 +8,7 @@
   # disables it by default.
   #
   # TODO(tlater): See if would be useful for anything?
-  boot.kernelParams = ["nosgx"];
+  boot.kernelParams = [ "nosgx" ];
 
   networking.hostName = "hetzner-1";
   services.nginx.domain = "tlater.net";
diff --git a/configuration/hardware-specific/hetzner/disko.nix b/configuration/hardware-specific/hetzner/disko.nix
index e404688..a2ea764 100644
--- a/configuration/hardware-specific/hetzner/disko.nix
+++ b/configuration/hardware-specific/hetzner/disko.nix
@@ -1,82 +1,84 @@
 {
-  disko.devices.disk = let
-    bootPartition = {
-      size = "1M";
-      type = "EF02";
-    };
-
-    swapPartition = {
-      # 8G is apparently recommended for this much RAM, but we set up
-      # 4G on both disks for mirroring purposes.
-      #
-      # That'll still be 8G during normal operation, and it's probably
-      # not too bad to have slightly less swap if a disk dies.
-      size = "4G";
-      content = {
-        type = "swap";
-        randomEncryption = true;
+  disko.devices.disk =
+    let
+      bootPartition = {
+        size = "1M";
+        type = "EF02";
       };
-    };
 
-    mountOptions = ["compress=zstd" "noatime"];
-  in {
-    sda = {
-      type = "disk";
-      device = "/dev/sda";
-      content = {
-        type = "gpt";
-        partitions = {
-          boot = bootPartition;
-          swap = swapPartition;
+      swapPartition = {
+        # 8G is apparently recommended for this much RAM, but we set up
+        # 4G on both disks for mirroring purposes.
+        #
+        # That'll still be 8G during normal operation, and it's probably
+        # not too bad to have slightly less swap if a disk dies.
+        size = "4G";
+        content = {
+          type = "swap";
+          randomEncryption = true;
+        };
+      };
 
-          disk1 = {
-            size = "100%";
-            # Empty partition to combine in RAID0 with the other disk
+      mountOptions = [ "compress=zstd" "noatime" ];
+    in
+    {
+      sda = {
+        type = "disk";
+        device = "/dev/sda";
+        content = {
+          type = "gpt";
+          partitions = {
+            boot = bootPartition;
+            swap = swapPartition;
+
+            disk1 = {
+              size = "100%";
+              # Empty partition to combine in RAID0 with the other disk
+            };
           };
         };
       };
-    };
 
-    sdb = {
-      type = "disk";
-      device = "/dev/sdb";
-      content = {
-        type = "gpt";
-        partitions = {
-          boot = bootPartition;
-          swap = swapPartition;
+      sdb = {
+        type = "disk";
+        device = "/dev/sdb";
+        content = {
+          type = "gpt";
+          partitions = {
+            boot = bootPartition;
+            swap = swapPartition;
 
-          disk2 = {
-            size = "100%";
-            content = {
-              type = "btrfs";
-              # Hack to get multi-device btrfs going
-              # See https://github.com/nix-community/disko/issues/99
-              extraArgs = ["-d" "raid1" "-m" "raid1" "--runtime-features" "quota" "/dev/sda3"];
-              subvolumes = {
-                "/volume" = {};
-                "/volume/root" = {
-                  inherit mountOptions;
-                  mountpoint = "/";
+            disk2 = {
+              size = "100%";
+              content = {
+                type = "btrfs";
+                # Hack to get multi-device btrfs going
+                # See https://github.com/nix-community/disko/issues/99
+                extraArgs = [ "-d" "raid1" "-m" "raid1" "--runtime-features" "quota" "/dev/sda3" ];
+                subvolumes = {
+                  "/volume" = { };
+                  "/volume/root" = {
+                    inherit mountOptions;
+                    mountpoint = "/";
+                  };
+                  "/volume/home" = {
+                    inherit mountOptions;
+                    mountpoint = "/home";
+                  };
+                  "/volume/var" = {
+                    inherit mountOptions;
+                    mountpoint = "/var";
+                  };
+                  "/volume/nix-store" = {
+                    inherit mountOptions;
+                    mountpoint = "/nix";
+                  };
+                  "/snapshots" = { };
                 };
-                "/volume/home" = {
-                  inherit mountOptions;
-                  mountpoint = "/home";
-                };
-                "/volume/var" = {
-                  inherit mountOptions;
-                  mountpoint = "/var";
-                };
-                "/volume/nix-store" = {
-                  inherit mountOptions;
-                  mountpoint = "/nix";
-                };
-                "/snapshots" = {};
               };
             };
           };
         };
       };
     };
-  };
 }
diff --git a/configuration/hardware-specific/vm.nix b/configuration/hardware-specific/vm.nix
index 7f1fcf1..86fcaed 100644
--- a/configuration/hardware-specific/vm.nix
+++ b/configuration/hardware-specific/vm.nix
@@ -1,8 +1,8 @@
-{lib, ...}: {
+{ lib, ... }: {
   users.users.tlater.password = "insecure";
 
   # Disable graphical tty so -curses works
-  boot.kernelParams = ["nomodeset"];
+  boot.kernelParams = [ "nomodeset" ];
 
   networking.hostName = "testvm";
   # Sets the base domain for nginx to a local domain so that we can
diff --git a/configuration/nginx.nix b/configuration/nginx.nix
index 82baab0..d696bba 100644
--- a/configuration/nginx.nix
+++ b/configuration/nginx.nix
@@ -1,7 +1,6 @@
-{
-  config,
-  lib,
-  ...
+{ config
+, lib
+, ...
 }: {
   services.nginx = {
     enable = true;
@@ -27,31 +26,33 @@
       # Override the default, just keep fewer logs
       nginx.rotate = 6;
     }
-    // lib.mapAttrs' (virtualHost: _:
-      lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
-        frequency = "daily";
-        rotate = 2;
-        compress = true;
-        delaycompress = true;
-        su = "${config.services.nginx.user} ${config.services.nginx.group}";
-        postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
-      })
-    config.services.nginx.virtualHosts;
+    // lib.mapAttrs'
+      (virtualHost: _:
+        lib.nameValuePair "/var/log/nginx/${virtualHost}/access.log" {
+          frequency = "daily";
+          rotate = 2;
+          compress = true;
+          delaycompress = true;
+          su = "${config.services.nginx.user} ${config.services.nginx.group}";
+          postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
+        })
+      config.services.nginx.virtualHosts;
 
   systemd.tmpfiles.rules =
-    lib.mapAttrsToList (
-      virtualHost: _:
-      #
-      "d /var/log/nginx/${virtualHost} 0750 ${config.services.nginx.user} ${config.services.nginx.group}"
-    )
-    config.services.nginx.virtualHosts;
+    lib.mapAttrsToList
+      (
+        virtualHost: _:
+          #
+          "d /var/log/nginx/${virtualHost} 0750 ${config.services.nginx.user} ${config.services.nginx.group}"
+      )
+      config.services.nginx.virtualHosts;
 
   security.acme = {
     defaults.email = "tm@tlater.net";
     acceptTerms = true;
 
     certs."tlater.net" = {
-      extraDomainNames = ["*.tlater.net"];
+      extraDomainNames = [ "*.tlater.net" ];
       dnsProvider = "hetzner";
       group = "nginx";
       credentialFiles."HETZNER_API_KEY_FILE" = config.sops.secrets."hetzner-api".path;
@@ -62,6 +63,6 @@
     user = "acme";
     paths =
       lib.mapAttrsToList (virtualHost: _: "/var/lib/acme/${virtualHost}")
-      config.services.nginx.virtualHosts;
+        config.services.nginx.virtualHosts;
   };
 }
diff --git a/configuration/services/afvalcalendar.nix b/configuration/services/afvalcalendar.nix
index e27ba62..28e3a75 100644
--- a/configuration/services/afvalcalendar.nix
+++ b/configuration/services/afvalcalendar.nix
@@ -1,12 +1,11 @@
-{
-  pkgs,
-  config,
-  ...
+{ pkgs
+, config
+, ...
 }: {
   systemd.services.afvalcalendar = {
     description = "Enschede afvalcalendar -> ical converter";
-    wantedBy = ["multi-user.target"];
-    after = ["network.target"];
+    wantedBy = [ "multi-user.target" ];
+    after = [ "network.target" ];
 
     script = ''
       ${pkgs.local.afvalcalendar}/bin/afvalcalendar > /srv/afvalcalendar/afvalcalendar.ical
@@ -26,14 +25,14 @@
       ProtectKernelModules = true;
       ProtectKernelLogs = true;
       ProtectControlGroups = true;
-      RestrictAddressFamilies = ["AF_UNIX" "AF_INET" "AF_INET6"];
+      RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
       RestrictNamespaces = true;
       LockPersonality = true;
       MemoryDenyWriteExecute = true;
       RestrictRealtime = true;
       RestrictSUIDSGID = true;
       SystemCallArchitectures = "native";
-      SystemCallFilter = ["@system-service" "~@privileged @resources @setuid @keyring"];
+      SystemCallFilter = [ "@system-service" "~@privileged @resources @setuid @keyring" ];
 
       Umask = 0002;
       SupplementaryGroups = "afvalcalendar-hosting";
@@ -50,7 +49,7 @@
     root = "/srv/afvalcalendar";
   };
 
-  users.groups.afvalcalendar-hosting = {};
+  users.groups.afvalcalendar-hosting = { };
   systemd.tmpfiles.settings."10-afvalcalendar" = {
     "/srv/afvalcalendar".d = {
       user = "nginx";
diff --git a/configuration/services/backups.nix b/configuration/services/backups.nix
index 98aa473..7c77399 100644
--- a/configuration/services/backups.nix
+++ b/configuration/services/backups.nix
@@ -1,9 +1,9 @@
-{
-  config,
-  pkgs,
-  lib,
-  ...
-}: let
+{ config
+, pkgs
+, lib
+, ...
+}:
+let
   inherit (lib) types optional singleton;
   mkShutdownScript = service:
     pkgs.writeShellScript "backup-${service}-shutdown" ''
@@ -42,17 +42,17 @@
     RESTIC_REPOSITORY = "rclone:storagebox:backups";
     RCLONE_CONFIG = rcloneConfig;
   };
-in {
+in
+{
   options = {
     services.backups = lib.mkOption {
       description = lib.mdDoc ''
         Configure restic backups with a specific tag.
       '';
-      type = types.attrsOf (types.submodule ({
-        config,
-        name,
-        ...
-      }: {
+      type = types.attrsOf (types.submodule ({ config
+                                             , name
+                                             , ...
+                                             }: {
         options = {
           user = lib.mkOption {
             type = types.str;
@@ -76,7 +76,7 @@ in {
           preparation = {
             packages = lib.mkOption {
               type = types.listOf types.package;
-              default = [];
+              default = [ ];
               description = ''
                 The list of packages to make available in the
                 preparation script.
@@ -97,7 +97,7 @@ in {
           cleanup = {
             packages = lib.mkOption {
               type = types.listOf types.package;
-              default = [];
+              default = [ ];
               description = ''
                 The list of packages to make available in the
                 cleanup script.
@@ -116,7 +116,7 @@ in {
           };
           pauseServices = lib.mkOption {
             type = types.listOf types.str;
-            default = [];
+            default = [ ];
             description = ''
               The systemd services that need to be shut down before
               the backup can run. Services will be restarted after the
@@ -131,7 +131,7 @@ in {
     };
   };
 
-  config = lib.mkIf (config.services.backups != {}) {
+  config = lib.mkIf (config.services.backups != { }) {
     systemd.services =
       {
         restic-prune = {
@@ -164,79 +164,81 @@ in {
           };
         };
       }
-      // lib.mapAttrs' (name: backup:
-        lib.nameValuePair "backup-${name}" {
-          # Don't want to restart mid-backup
-          restartIfChanged = false;
+      // lib.mapAttrs'
+        (name: backup:
+          lib.nameValuePair "backup-${name}" {
+            # Don't want to restart mid-backup
+            restartIfChanged = false;
 
-          environment =
-            resticEnv
-            // {
-              RESTIC_CACHE_DIR = "%C/backup-${name}";
-            };
+            environment =
+              resticEnv
+              // {
+                RESTIC_CACHE_DIR = "%C/backup-${name}";
+              };
 
-          path = with pkgs; [
-            coreutils
-            openssh
-            rclone
-            restic
-          ];
-
-          # TODO(tlater): If I ever add more than one repo, service
-          # shutdown/restarting will potentially break if multiple
-          # backups for the same service overlap. A more clever
-          # sentinel file with reference counts would probably solve
-          # this.
-          serviceConfig = {
-            User = backup.user;
-            Group = "backup";
-            RuntimeDirectory = "backup-${name}";
-            CacheDirectory = "backup-${name}";
-            CacheDirectoryMode = "0700";
-            PrivateTmp = true;
-
-            ExecStart = [
-              (lib.concatStringsSep " " (["${pkgs.restic}/bin/restic" "backup" "--tag" name] ++ backup.paths))
+            path = with pkgs; [
+              coreutils
+              openssh
+              rclone
+              restic
             ];
 
-            ExecStartPre =
-              map (service: "+${mkShutdownScript service}") backup.pauseServices
-              ++ singleton (writeScript "backup-${name}-repo-init" [] ''
-                restic snapshots || restic init
-              '')
-              ++ optional (backup.preparation.text != null)
-              (writeScript "backup-${name}-prepare" backup.preparation.packages backup.preparation.text);
+            # TODO(tlater): If I ever add more than one repo, service
+            # shutdown/restarting will potentially break if multiple
+            # backups for the same service overlap. A more clever
+            # sentinel file with reference counts would probably solve
+            # this.
+            serviceConfig = {
+              User = backup.user;
+              Group = "backup";
+              RuntimeDirectory = "backup-${name}";
+              CacheDirectory = "backup-${name}";
+              CacheDirectoryMode = "0700";
+              PrivateTmp = true;
 
-            # TODO(tlater): Add repo pruning/checking
-            ExecStopPost =
-              map (service: "+${mkRestartScript service}") backup.pauseServices
-              ++ optional (backup.cleanup.text != null)
-              (writeScript "backup-${name}-cleanup" backup.cleanup.packages backup.cleanup.text);
-          };
-        })
-      config.services.backups;
+              ExecStart = [
+                (lib.concatStringsSep " " ([ "${pkgs.restic}/bin/restic" "backup" "--tag" name ] ++ backup.paths))
+              ];
+
+              ExecStartPre =
+                map (service: "+${mkShutdownScript service}") backup.pauseServices
+                ++ singleton (writeScript "backup-${name}-repo-init" [ ] ''
+                  restic snapshots || restic init
+                '')
+                ++ optional (backup.preparation.text != null)
+                  (writeScript "backup-${name}-prepare" backup.preparation.packages backup.preparation.text);
+
+              # TODO(tlater): Add repo pruning/checking
+              ExecStopPost =
+                map (service: "+${mkRestartScript service}") backup.pauseServices
+                ++ optional (backup.cleanup.text != null)
+                  (writeScript "backup-${name}-cleanup" backup.cleanup.packages backup.cleanup.text);
+            };
+          })
+        config.services.backups;
 
     systemd.timers =
       {
         restic-prune = {
-          wantedBy = ["timers.target"];
+          wantedBy = [ "timers.target" ];
           timerConfig.OnCalendar = "Thursday 03:00:00 UTC";
           # Don't make this persistent, in case the server was offline
           # for a while. This job cannot run at the same time as any
           # of the backup jobs.
         };
       }
-      // lib.mapAttrs' (name: backup:
-        lib.nameValuePair "backup-${name}" {
-          wantedBy = ["timers.target"];
-          timerConfig = {
-            OnCalendar = "Wednesday 02:30:00 UTC";
-            RandomizedDelaySec = "1h";
-            FixedRandomDelay = true;
-            Persistent = true;
-          };
-        })
-      config.services.backups;
+      // lib.mapAttrs'
+        (name: backup:
+          lib.nameValuePair "backup-${name}" {
+            wantedBy = [ "timers.target" ];
+            timerConfig = {
+              OnCalendar = "Wednesday 02:30:00 UTC";
+              RandomizedDelaySec = "1h";
+              FixedRandomDelay = true;
+              Persistent = true;
+            };
+          })
+        config.services.backups;
 
     users = {
       # This user is only used to own the ssh key, because apparently
@@ -245,7 +247,7 @@ in {
         group = "backup";
         isSystemUser = true;
       };
-      groups.backup = {};
+      groups.backup = { };
     };
   };
 }
diff --git a/configuration/services/battery-manager.nix b/configuration/services/battery-manager.nix
index 7f27931..7783a3b 100644
--- a/configuration/services/battery-manager.nix
+++ b/configuration/services/battery-manager.nix
@@ -1,7 +1,6 @@
-{
-  config,
-  flake-inputs,
-  ...
+{ config
+, flake-inputs
+, ...
 }: {
   imports = [
     flake-inputs.sonnenshift.nixosModules.default
diff --git a/configuration/services/conduit.nix b/configuration/services/conduit.nix
index 2462d9b..950165c 100644
--- a/configuration/services/conduit.nix
+++ b/configuration/services/conduit.nix
@@ -1,15 +1,16 @@
-{
-  pkgs,
-  config,
-  lib,
-  ...
-}: let
+{ pkgs
+, config
+, lib
+, ...
+}:
+let
   inherit (lib.strings) concatMapStringsSep;
 
   cfg = config.services.matrix-conduit;
   domain = "matrix.${config.services.nginx.domain}";
   turn-realm = "turn.${config.services.nginx.domain}";
-in {
+in
+{
   services.matrix-conduit = {
     enable = true;
     settings.global = {
@@ -17,99 +18,103 @@ in {
       server_name = domain;
       database_backend = "rocksdb";
 
-      turn_uris = let
-        address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
-        tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
-      in [
-        "turn:${address}?transport=udp"
-        "turn:${address}?transport=tcp"
-        "turns:${tls-address}?transport=udp"
-        "turns:${tls-address}?transport=tcp"
-      ];
-    };
-  };
-
-  systemd.services.heisenbridge = let
-    replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
-    registrationFile = builtins.toFile "heisenbridge-registration.yaml" (builtins.toJSON {
-      id = "heisenbridge";
-      url = "http://127.0.0.1:9898";
-      as_token = "@AS_TOKEN@";
-      hs_token = "@HS_TOKEN@";
-      rate_limited = false;
-      sender_localpart = "heisenbridge";
-      namespaces = {
-        users = [
-          {
-            regex = "@irc_.*";
-            exclusive = true;
-          }
-          {
-            regex = "@heisenbridge:.*";
-            exclusive = true;
-          }
+      turn_uris =
+        let
+          address = "${config.services.coturn.realm}:${toString config.services.coturn.listening-port}";
+          tls-address = "${config.services.coturn.realm}:${toString config.services.coturn.tls-listening-port}";
+        in
+        [
+          "turn:${address}?transport=udp"
+          "turn:${address}?transport=tcp"
+          "turns:${tls-address}?transport=udp"
+          "turns:${tls-address}?transport=tcp"
         ];
-        aliases = [];
-        rooms = [];
-      };
-    });
-
-    # TODO(tlater): Starting with systemd 253 it will become possible
-    # to do the credential setup as part of ExecStartPre/preStart
-    # instead.
-    #
-    # This will also make it possible to actually set caps on the
-    # heisenbridge process using systemd, so that we can run the
-    # identd process.
-    execScript = pkgs.writeShellScript "heisenbridge" ''
-      cp ${registrationFile} "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
-      chmod 600 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
-      ${replaceSecretBin} '@AS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_as-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
-      ${replaceSecretBin} '@HS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_hs-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
-      chmod 400 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
-
-      ${pkgs.heisenbridge}/bin/heisenbridge \
-          --config $RUNTIME_DIRECTORY/heisenbridge-registration.yaml \
-          --owner @tlater:matrix.tlater.net \
-          'http://localhost:${toString cfg.settings.global.port}'
-    '';
-  in {
-    description = "Matrix<->IRC bridge";
-    wantedBy = ["multi-user.target"];
-    after = ["conduit.service"];
-
-    serviceConfig = {
-      Type = "simple";
-
-      LoadCredential = "heisenbridge:/run/secrets/heisenbridge";
-
-      ExecStart = execScript;
-
-      DynamicUser = true;
-      RuntimeDirectory = "heisenbridge";
-      RuntimeDirectoryMode = "0700";
-
-      RestrictNamespaces = true;
-      PrivateUsers = true;
-      ProtectHostname = true;
-      ProtectClock = true;
-      ProtectKernelTunables = true;
-      ProtectKernelModules = true;
-      ProtectKernelLogs = true;
-      ProtectControlGroups = true;
-      RestrictAddressFamilies = ["AF_INET AF_INET6"];
-      LockPersonality = true;
-      RestrictRealtime = true;
-      ProtectProc = "invisible";
-      ProcSubset = "pid";
-      UMask = 0077;
-
-      # For the identd port
-      # CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"];
-      # AmbientCapabilities = ["CAP_NET_BIND_SERVICE"];
     };
   };
 
+  systemd.services.heisenbridge =
+    let
+      replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
+      registrationFile = builtins.toFile "heisenbridge-registration.yaml" (builtins.toJSON {
+        id = "heisenbridge";
+        url = "http://127.0.0.1:9898";
+        as_token = "@AS_TOKEN@";
+        hs_token = "@HS_TOKEN@";
+        rate_limited = false;
+        sender_localpart = "heisenbridge";
+        namespaces = {
+          users = [
+            {
+              regex = "@irc_.*";
+              exclusive = true;
+            }
+            {
+              regex = "@heisenbridge:.*";
+              exclusive = true;
+            }
+          ];
+          aliases = [ ];
+          rooms = [ ];
+        };
+      });
+
+      # TODO(tlater): Starting with systemd 253 it will become possible
+      # to do the credential setup as part of ExecStartPre/preStart
+      # instead.
+      #
+      # This will also make it possible to actually set caps on the
+      # heisenbridge process using systemd, so that we can run the
+      # identd process.
+      execScript = pkgs.writeShellScript "heisenbridge" ''
+        cp ${registrationFile} "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
+        chmod 600 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
+        ${replaceSecretBin} '@AS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_as-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
+        ${replaceSecretBin} '@HS_TOKEN@' "$CREDENTIALS_DIRECTORY/heisenbridge_hs-token" "$RUNTIME_DIRECTORY/heisenbridge-registration.yaml"
+        chmod 400 $RUNTIME_DIRECTORY/heisenbridge-registration.yaml
+
+        ${pkgs.heisenbridge}/bin/heisenbridge \
+            --config $RUNTIME_DIRECTORY/heisenbridge-registration.yaml \
+            --owner @tlater:matrix.tlater.net \
+            'http://localhost:${toString cfg.settings.global.port}'
+      '';
+    in
+    {
+      description = "Matrix<->IRC bridge";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "conduit.service" ];
+
+      serviceConfig = {
+        Type = "simple";
+
+        LoadCredential = "heisenbridge:/run/secrets/heisenbridge";
+
+        ExecStart = execScript;
+
+        DynamicUser = true;
+        RuntimeDirectory = "heisenbridge";
+        RuntimeDirectoryMode = "0700";
+
+        RestrictNamespaces = true;
+        PrivateUsers = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_INET AF_INET6" ];
+        LockPersonality = true;
+        RestrictRealtime = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        UMask = 0077;
+
+        # For the identd port
+        # CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"];
+        # AmbientCapabilities = ["CAP_NET_BIND_SERVICE"];
+      };
+    };
+
   # Pass in the TURN secret via EnvironmentFile, not supported by
   # upstream module currently.
   #
@@ -249,6 +254,6 @@ in {
     ];
     # Other services store their data in conduit, so no other services
     # need to be shut down currently.
-    pauseServices = ["conduit.service"];
+    pauseServices = [ "conduit.service" ];
   };
 }
diff --git a/configuration/services/fail2ban.nix b/configuration/services/fail2ban.nix
index ace3219..1811046 100644
--- a/configuration/services/fail2ban.nix
+++ b/configuration/services/fail2ban.nix
@@ -1,7 +1,7 @@
-{pkgs, ...}: {
+{ pkgs, ... }: {
   services.fail2ban = {
     enable = true;
-    extraPackages = [pkgs.ipset];
+    extraPackages = [ pkgs.ipset ];
     banaction = "iptables-ipset-proto6-allports";
     bantime-increment.enable = true;
 
@@ -21,7 +21,7 @@
   };
 
   # Allow metrics services to connect to the socket as well
-  users.groups.fail2ban = {};
+  users.groups.fail2ban = { };
   systemd.services.fail2ban.serviceConfig = {
     ExecStartPost =
       "+"
diff --git a/configuration/services/foundryvtt.nix b/configuration/services/foundryvtt.nix
index ac206fc..e69d2dd 100644
--- a/configuration/services/foundryvtt.nix
+++ b/configuration/services/foundryvtt.nix
@@ -1,12 +1,13 @@
-{
-  lib,
-  config,
-  flake-inputs,
-  ...
-}: let
+{ lib
+, config
+, flake-inputs
+, ...
+}:
+let
   domain = "foundryvtt.${config.services.nginx.domain}";
-in {
-  imports = [flake-inputs.foundryvtt.nixosModules.foundryvtt];
+in
+{
+  imports = [ flake-inputs.foundryvtt.nixosModules.foundryvtt ];
 
   services.foundryvtt = {
     enable = true;
@@ -18,26 +19,28 @@ in {
 
   # Want to start it manually when I need it, not have it constantly
   # running
-  systemd.services.foundryvtt.wantedBy = lib.mkForce [];
+  systemd.services.foundryvtt.wantedBy = lib.mkForce [ ];
 
-  services.nginx.virtualHosts."${domain}" = let
-    inherit (config.services.foundryvtt) port;
-  in {
-    forceSSL = true;
-    useACMEHost = "tlater.net";
-    enableHSTS = true;
+  services.nginx.virtualHosts."${domain}" =
+    let
+      inherit (config.services.foundryvtt) port;
+    in
+    {
+      forceSSL = true;
+      useACMEHost = "tlater.net";
+      enableHSTS = true;
 
-    locations."/" = {
-      proxyWebsockets = true;
-      proxyPass = "http://localhost:${toString port}";
+      locations."/" = {
+        proxyWebsockets = true;
+        proxyPass = "http://localhost:${toString port}";
+      };
     };
-  };
 
   services.backups.foundryvtt = {
     user = "foundryvtt";
     paths = [
       config.services.foundryvtt.dataDir
     ];
-    pauseServices = ["foundryvtt.service"];
+    pauseServices = [ "foundryvtt.service" ];
   };
 }
diff --git a/configuration/services/gitea.nix b/configuration/services/gitea.nix
index 26fe2f8..4ef6238 100644
--- a/configuration/services/gitea.nix
+++ b/configuration/services/gitea.nix
@@ -1,11 +1,12 @@
-{
-  pkgs,
-  config,
-  lib,
-  ...
-}: let
+{ pkgs
+, config
+, lib
+, ...
+}:
+let
   domain = "gitea.${config.services.nginx.domain}";
-in {
+in
+{
   services.forgejo = {
     enable = true;
     database.type = "postgres";
@@ -27,33 +28,37 @@ in {
     };
   };
 
-  systemd.services.forgejo.serviceConfig.ExecStartPre = let
-    replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
-    secretPath = config.sops.secrets."forgejo/metrics-token".path;
-    runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
-  in [
-    "+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'"
-  ];
+  systemd.services.forgejo.serviceConfig.ExecStartPre =
+    let
+      replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
+      secretPath = config.sops.secrets."forgejo/metrics-token".path;
+      runConfig = "${config.services.forgejo.customDir}/conf/app.ini";
+    in
+    [
+      "+${replaceSecretBin} '#metricstoken#' '${secretPath}' '${runConfig}'"
+    ];
 
   # Set up SSL
-  services.nginx.virtualHosts."${domain}" = let
-    httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
-    httpPort = config.services.forgejo.settings.server.HTTP_PORT;
-  in {
-    forceSSL = true;
-    useACMEHost = "tlater.net";
-    enableHSTS = true;
+  services.nginx.virtualHosts."${domain}" =
+    let
+      httpAddress = config.services.forgejo.settings.server.HTTP_ADDR;
+      httpPort = config.services.forgejo.settings.server.HTTP_PORT;
+    in
+    {
+      forceSSL = true;
+      useACMEHost = "tlater.net";
+      enableHSTS = true;
 
-    locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
-    locations."/metrics" = {
-      extraConfig = ''
-        access_log off;
-        allow 127.0.0.1;
-        ${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
-        deny all;
-      '';
+      locations."/".proxyPass = "http://${httpAddress}:${toString httpPort}";
+      locations."/metrics" = {
+        extraConfig = ''
+          access_log off;
+          allow 127.0.0.1;
+          ${lib.optionalString config.networking.enableIPv6 "allow ::1;"}
+          deny all;
+        '';
+      };
     };
-  };
 
   # Block repeated failed login attempts
   #
@@ -83,13 +88,13 @@ in {
       # Conf is backed up via nix
     ];
     preparation = {
-      packages = [config.services.postgresql.package];
+      packages = [ config.services.postgresql.package ];
       text = "pg_dump ${config.services.forgejo.database.name} --file=/var/lib/forgejo/forgejo-db.sql";
     };
     cleanup = {
-      packages = [pkgs.coreutils];
+      packages = [ pkgs.coreutils ];
       text = "rm /var/lib/forgejo/forgejo-db.sql";
     };
-    pauseServices = ["forgejo.service"];
+    pauseServices = [ "forgejo.service" ];
   };
 }
diff --git a/configuration/services/metrics/exporters.nix b/configuration/services/metrics/exporters.nix
index f3054db..e17be8e 100644
--- a/configuration/services/metrics/exporters.nix
+++ b/configuration/services/metrics/exporters.nix
@@ -1,25 +1,28 @@
+{ config
+, pkgs
+, lib
+, ...
+}:
+let
+  yaml = pkgs.formats.yaml { };
+in
 {
-  config,
-  pkgs,
-  lib,
-  ...
-}: let
-  yaml = pkgs.formats.yaml {};
-in {
   services.prometheus = {
     exporters = {
       # Periodically check domain registration status
       domain = {
         enable = true;
         listenAddress = "127.0.0.1";
-        extraFlags = let
-          conf.domains = [
-            "tlater.net"
-            "tlater.com"
+        extraFlags =
+          let
+            conf.domains = [
+              "tlater.net"
+              "tlater.com"
+            ];
+          in
+          [
+            "--config=${yaml.generate "domains.yml" conf}"
           ];
-        in [
-          "--config=${yaml.generate "domains.yml" conf}"
-        ];
       };
 
       # System statistics
@@ -49,47 +52,50 @@ in {
         group = "nginx";
 
         settings.namespaces =
-          lib.mapAttrsToList (name: virtualHost: {
-            inherit name;
-            metrics_override.prefix = "nginxlog";
-            namespace_label = "vhost";
+          lib.mapAttrsToList
+            (name: virtualHost: {
+              inherit name;
+              metrics_override.prefix = "nginxlog";
+              namespace_label = "vhost";
 
-            format = lib.concatStringsSep " " [
-              "$remote_addr - $remote_user [$time_local]"
-              ''"$request" $status $body_bytes_sent''
-              ''"$http_referer" "$http_user_agent"''
-              ''rt=$request_time uct="$upstream_connect_time"''
-              ''uht="$upstream_header_time" urt="$upstream_response_time"''
-            ];
+              format = lib.concatStringsSep " " [
+                "$remote_addr - $remote_user [$time_local]"
+                ''"$request" $status $body_bytes_sent''
+                ''"$http_referer" "$http_user_agent"''
+                ''rt=$request_time uct="$upstream_connect_time"''
+                ''uht="$upstream_header_time" urt="$upstream_response_time"''
+              ];
 
-            source.files = [
-              "/var/log/nginx/${name}/access.log"
-            ];
-          })
-          config.services.nginx.virtualHosts;
+              source.files = [
+                "/var/log/nginx/${name}/access.log"
+              ];
+            })
+            config.services.nginx.virtualHosts;
       };
     };
 
     extraExporters = {
-      fail2ban = let
-        cfg = config.services.prometheus.extraExporters.fail2ban;
-      in {
-        port = 9191;
-        serviceOpts = {
-          after = ["fail2ban.service"];
-          requires = ["fail2ban.service"];
-          serviceConfig = {
-            Group = "fail2ban";
-            RestrictAddressFamilies = ["AF_UNIX" "AF_INET" "AF_INET6"];
-            ExecStart = lib.concatStringsSep " " [
-              "${pkgs.local.prometheus-fail2ban-exporter}/bin/fail2ban-prometheus-exporter"
-              "--collector.f2b.socket=/var/run/fail2ban/fail2ban.sock"
-              "--web.listen-address='${cfg.listenAddress}:${toString cfg.port}'"
-              "--collector.f2b.exit-on-socket-connection-error=true"
-            ];
+      fail2ban =
+        let
+          cfg = config.services.prometheus.extraExporters.fail2ban;
+        in
+        {
+          port = 9191;
+          serviceOpts = {
+            after = [ "fail2ban.service" ];
+            requires = [ "fail2ban.service" ];
+            serviceConfig = {
+              Group = "fail2ban";
+              RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+              ExecStart = lib.concatStringsSep " " [
+                "${pkgs.local.prometheus-fail2ban-exporter}/bin/fail2ban-prometheus-exporter"
+                "--collector.f2b.socket=/var/run/fail2ban/fail2ban.sock"
+                "--web.listen-address='${cfg.listenAddress}:${toString cfg.port}'"
+                "--collector.f2b.exit-on-socket-connection-error=true"
+              ];
+            };
           };
         };
-      };
     };
 
     # TODO(tlater):
diff --git a/configuration/services/metrics/grafana.nix b/configuration/services/metrics/grafana.nix
index eb5106e..d13fe7b 100644
--- a/configuration/services/metrics/grafana.nix
+++ b/configuration/services/metrics/grafana.nix
@@ -1,6 +1,8 @@
-{config, ...}: let
+{ config, ... }:
+let
   domain = "metrics.${config.services.nginx.domain}";
-in {
+in
+{
   services.grafana = {
     enable = true;
     settings = {
diff --git a/configuration/services/metrics/options.nix b/configuration/services/metrics/options.nix
index 81f0865..552aec8 100644
--- a/configuration/services/metrics/options.nix
+++ b/configuration/services/metrics/options.nix
@@ -1,12 +1,13 @@
-{
-  pkgs,
-  config,
-  lib,
-  ...
-}: let
+{ pkgs
+, config
+, lib
+, ...
+}:
+let
   inherit (lib) types mkOption mkDefault;
-  yaml = pkgs.formats.yaml {};
-in {
+  yaml = pkgs.formats.yaml { };
+in
+{
   options = {
     services.prometheus = {
       extraExporters = mkOption {
@@ -31,11 +32,10 @@ in {
     };
 
     services.victoriametrics.scrapeConfigs = mkOption {
-      type = types.attrsOf (types.submodule ({
-        name,
-        self,
-        ...
-      }: {
+      type = types.attrsOf (types.submodule ({ name
+                                             , self
+                                             , ...
+                                             }: {
         options = {
           job_name = mkOption {
             type = types.str;
@@ -47,7 +47,7 @@ in {
             description = ''
               Other settings to set for this scrape config.
             '';
-            default = {};
+            default = { };
           };
 
           targets = mkOption {
@@ -57,11 +57,11 @@ in {
 
               Shortcut for `static_configs = lib.singleton {targets = [<targets>];}`
             '';
-            default = [];
+            default = [ ];
           };
 
           static_configs = mkOption {
-            default = [];
+            default = [ ];
             type = types.listOf (types.submodule {
               options = {
                 targets = mkOption {
@@ -77,7 +77,7 @@ in {
                   description = lib.mdDoc ''
                     Labels to apply to all targets defined for this static config.
                   '';
-                  default = {};
+                  default = { };
                 };
               };
             });
@@ -89,116 +89,125 @@ in {
 
   config = {
     systemd.services = lib.mkMerge [
-      (lib.mapAttrs' (name: exporter:
-        lib.nameValuePair "prometheus-${name}-exporter" (lib.mkMerge [
-          {
-            # Shamelessly copied from upstream because the upstream
-            # module is an intractable mess
-            wantedBy = ["multi-user.target"];
-            after = ["network.target"];
-            serviceConfig.Restart = mkDefault "always";
-            serviceConfig.PrivateTmp = mkDefault true;
-            serviceConfig.WorkingDirectory = mkDefault /tmp;
-            serviceConfig.DynamicUser = mkDefault true;
-            # Hardening
-            serviceConfig.CapabilityBoundingSet = mkDefault [""];
-            serviceConfig.DeviceAllow = [""];
-            serviceConfig.LockPersonality = true;
-            serviceConfig.MemoryDenyWriteExecute = true;
-            serviceConfig.NoNewPrivileges = true;
-            serviceConfig.PrivateDevices = mkDefault true;
-            serviceConfig.ProtectClock = mkDefault true;
-            serviceConfig.ProtectControlGroups = true;
-            serviceConfig.ProtectHome = true;
-            serviceConfig.ProtectHostname = true;
-            serviceConfig.ProtectKernelLogs = true;
-            serviceConfig.ProtectKernelModules = true;
-            serviceConfig.ProtectKernelTunables = true;
-            serviceConfig.ProtectSystem = mkDefault "strict";
-            serviceConfig.RemoveIPC = true;
-            serviceConfig.RestrictAddressFamilies = ["AF_INET" "AF_INET6"];
-            serviceConfig.RestrictNamespaces = true;
-            serviceConfig.RestrictRealtime = true;
-            serviceConfig.RestrictSUIDSGID = true;
-            serviceConfig.SystemCallArchitectures = "native";
-            serviceConfig.UMask = "0077";
-          }
-          exporter.serviceOpts
-        ]))
-      config.services.prometheus.extraExporters)
+      (lib.mapAttrs'
+        (name: exporter:
+          lib.nameValuePair "prometheus-${name}-exporter" (lib.mkMerge [
+            {
+              # Shamelessly copied from upstream because the upstream
+              # module is an intractable mess
+              wantedBy = [ "multi-user.target" ];
+              after = [ "network.target" ];
+              serviceConfig.Restart = mkDefault "always";
+              serviceConfig.PrivateTmp = mkDefault true;
+              serviceConfig.WorkingDirectory = mkDefault /tmp;
+              serviceConfig.DynamicUser = mkDefault true;
+              # Hardening
+              serviceConfig.CapabilityBoundingSet = mkDefault [ "" ];
+              serviceConfig.DeviceAllow = [ "" ];
+              serviceConfig.LockPersonality = true;
+              serviceConfig.MemoryDenyWriteExecute = true;
+              serviceConfig.NoNewPrivileges = true;
+              serviceConfig.PrivateDevices = mkDefault true;
+              serviceConfig.ProtectClock = mkDefault true;
+              serviceConfig.ProtectControlGroups = true;
+              serviceConfig.ProtectHome = true;
+              serviceConfig.ProtectHostname = true;
+              serviceConfig.ProtectKernelLogs = true;
+              serviceConfig.ProtectKernelModules = true;
+              serviceConfig.ProtectKernelTunables = true;
+              serviceConfig.ProtectSystem = mkDefault "strict";
+              serviceConfig.RemoveIPC = true;
+              serviceConfig.RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+              serviceConfig.RestrictNamespaces = true;
+              serviceConfig.RestrictRealtime = true;
+              serviceConfig.RestrictSUIDSGID = true;
+              serviceConfig.SystemCallArchitectures = "native";
+              serviceConfig.UMask = "0077";
+            }
+            exporter.serviceOpts
+          ]))
+        config.services.prometheus.extraExporters)
 
       {
-        vmagent-scrape-exporters = let
-          listenAddress = config.services.victoriametrics.listenAddress;
-          vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
-          promscrape = yaml.generate "prometheus.yml" {
-            scrape_configs = lib.mapAttrsToList (_: scrape:
-              lib.recursiveUpdate {
-                inherit (scrape) job_name;
-                static_configs =
-                  scrape.static_configs
-                  ++ lib.optional (scrape.targets != []) {targets = scrape.targets;};
-              }
-              scrape.extraSettings)
-            config.services.victoriametrics.scrapeConfigs;
-          };
-        in {
-          enable = true;
-          path = [pkgs.victoriametrics];
-          wantedBy = ["multi-user.target"];
-          after = ["network.target" "victoriametrics.service"];
-          serviceConfig = {
-            ExecStart = [
-              (lib.concatStringsSep " " [
-                "${pkgs.victoriametrics}/bin/vmagent"
-                "-promscrape.config=${promscrape}"
-                "-remoteWrite.url=http://${vmAddr}/api/v1/write"
-                "-remoteWrite.tmpDataPath=%t/vmagent"
-              ])
-            ];
-            SupplementaryGroups = "metrics";
+        vmagent-scrape-exporters =
+          let
+            listenAddress = config.services.victoriametrics.listenAddress;
+            vmAddr = (lib.optionalString (lib.hasPrefix ":" listenAddress) "127.0.0.1") + listenAddress;
+            promscrape = yaml.generate "prometheus.yml" {
+              scrape_configs = lib.mapAttrsToList
+                (_: scrape:
+                  lib.recursiveUpdate
+                    {
+                      inherit (scrape) job_name;
+                      static_configs =
+                        scrape.static_configs
+                        ++ lib.optional (scrape.targets != [ ]) { targets = scrape.targets; };
+                    }
+                    scrape.extraSettings)
+                config.services.victoriametrics.scrapeConfigs;
+            };
+          in
+          {
+            enable = true;
+            path = [ pkgs.victoriametrics ];
+            wantedBy = [ "multi-user.target" ];
+            after = [ "network.target" "victoriametrics.service" ];
+            serviceConfig = {
+              ExecStart = [
+                (lib.concatStringsSep " " [
+                  "${pkgs.victoriametrics}/bin/vmagent"
+                  "-promscrape.config=${promscrape}"
+                  "-remoteWrite.url=http://${vmAddr}/api/v1/write"
+                  "-remoteWrite.tmpDataPath=%t/vmagent"
+                ])
+              ];
+              SupplementaryGroups = "metrics";
 
-            DynamicUser = true;
-            RuntimeDirectory = "vmagent";
-            CapabilityBoundingSet = [""];
-            DeviceAllow = [""];
-            LockPersonality = true;
-            MemoryDenyWriteExecute = true;
-            NoNewPrivileges = true;
-            PrivateDevices = true;
-            ProtectClock = true;
-            ProtectControlGroups = true;
-            ProtectHome = true;
-            ProtectHostname = true;
-            ProtectKernelLogs = true;
-            ProtectKernelModules = true;
-            ProtectKernelTunables = true;
-            ProtectSystem = "strict";
-            RemoveIPC = true;
-            RestrictAddressFamilies = ["AF_INET" "AF_INET6"];
-            RestrictNamespaces = true;
-            RestrictRealtime = true;
-            RestrictSUIDSGID = true;
-            SystemCallArchitectures = "native";
-            UMask = "0077";
+              DynamicUser = true;
+              RuntimeDirectory = "vmagent";
+              CapabilityBoundingSet = [ "" ];
+              DeviceAllow = [ "" ];
+              LockPersonality = true;
+              MemoryDenyWriteExecute = true;
+              NoNewPrivileges = true;
+              PrivateDevices = true;
+              ProtectClock = true;
+              ProtectControlGroups = true;
+              ProtectHome = true;
+              ProtectHostname = true;
+              ProtectKernelLogs = true;
+              ProtectKernelModules = true;
+              ProtectKernelTunables = true;
+              ProtectSystem = "strict";
+              RemoveIPC = true;
+              RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+              RestrictNamespaces = true;
+              RestrictRealtime = true;
+              RestrictSUIDSGID = true;
+              SystemCallArchitectures = "native";
+              UMask = "0077";
+            };
           };
-        };
       }
     ];
 
-    users.groups.metrics = {};
+    users.groups.metrics = { };
 
-    services.victoriametrics.scrapeConfigs = let
-      allExporters =
-        lib.mapAttrs (name: exporter: {
-          inherit (exporter) listenAddress port;
-        }) ((lib.filterAttrs (_: exporter: builtins.isAttrs exporter && exporter.enable)
-          config.services.prometheus.exporters)
-        // config.services.prometheus.extraExporters);
-    in
-      lib.mapAttrs (_: exporter: {
-        targets = ["${exporter.listenAddress}:${toString exporter.port}"];
-      })
-      allExporters;
+    services.victoriametrics.scrapeConfigs =
+      let
+        allExporters =
+          lib.mapAttrs
+            (name: exporter: {
+              inherit (exporter) listenAddress port;
+            })
+            ((lib.filterAttrs (_: exporter: builtins.isAttrs exporter && exporter.enable)
+              config.services.prometheus.exporters)
+            // config.services.prometheus.extraExporters);
+      in
+      lib.mapAttrs
+        (_: exporter: {
+          targets = [ "${exporter.listenAddress}:${toString exporter.port}" ];
+        })
+        allExporters;
   };
 }
diff --git a/configuration/services/metrics/victoriametrics.nix b/configuration/services/metrics/victoriametrics.nix
index daf3f94..695b89e 100644
--- a/configuration/services/metrics/victoriametrics.nix
+++ b/configuration/services/metrics/victoriametrics.nix
@@ -1,4 +1,4 @@
-{config, ...}: {
+{ config, ... }: {
   config.services.victoriametrics = {
     enable = true;
     extraOptions = [
@@ -7,10 +7,10 @@
 
     scrapeConfigs = {
       forgejo = {
-        targets = ["127.0.0.1:${toString config.services.forgejo.settings.server.HTTP_PORT}"];
+        targets = [ "127.0.0.1:${toString config.services.forgejo.settings.server.HTTP_PORT}" ];
         extraSettings.authorization.credentials_file = config.sops.secrets."forgejo/metrics-token".path;
       };
-      coturn.targets = ["127.0.0.1:9641"];
+      coturn.targets = [ "127.0.0.1:9641" ];
     };
   };
 }
diff --git a/configuration/services/nextcloud.nix b/configuration/services/nextcloud.nix
index 2c7fe4f..7c4fcf4 100644
--- a/configuration/services/nextcloud.nix
+++ b/configuration/services/nextcloud.nix
@@ -1,14 +1,15 @@
-{
-  pkgs,
-  config,
-  ...
-}: let
+{ pkgs
+, config
+, ...
+}:
+let
   # Update pending on rewrite of nextcloud news, though there is an
   # alpha to switch to if it becomes necessary:
   # https://github.com/nextcloud/news/issues/2610
   nextcloud = pkgs.nextcloud27;
   hostName = "nextcloud.${config.services.nginx.domain}";
-in {
+in
+{
   services.nextcloud = {
     inherit hostName;
 
@@ -42,7 +43,7 @@ in {
   };
 
   # Ensure that this service doesn't start before postgres is ready
-  systemd.services.nextcloud-setup.after = ["postgresql.service"];
+  systemd.services.nextcloud-setup.after = [ "postgresql.service" ];
 
   # Set up SSL
   services.nginx.virtualHosts."${hostName}" = {
diff --git a/configuration/services/postgres.nix b/configuration/services/postgres.nix
index 018dc6e..62dfb01 100644
--- a/configuration/services/postgres.nix
+++ b/configuration/services/postgres.nix
@@ -1,4 +1,4 @@
-{pkgs, ...}: {
+{ pkgs, ... }: {
   services.postgresql = {
     package = pkgs.postgresql_14;
     enable = true;
diff --git a/configuration/services/starbound.nix b/configuration/services/starbound.nix
index 6da890e..3b54ee9 100644
--- a/configuration/services/starbound.nix
+++ b/configuration/services/starbound.nix
@@ -1,16 +1,17 @@
-{
-  pkgs,
-  lib,
-  ...
-}: let
+{ pkgs
+, lib
+, ...
+}:
+let
   inherit (lib) concatStringsSep;
-in {
+in
+{
   # Sadly, steam-run requires some X libs
   environment.noXlibs = false;
 
   systemd.services.starbound = {
     description = "Starbound";
-    after = ["network.target"];
+    after = [ "network.target" ];
 
     serviceConfig = {
       ExecStart = "${pkgs.local.starbound}/bin/launch-starbound ${./configs/starbound.json}";
@@ -67,7 +68,7 @@ in {
       # Game servers shouldn't use cgroups themselves either
       ProtectControlGroups = true;
       # Most game servers will never need other socket types
-      RestrictAddressFamilies = ["AF_UNIX AF_INET AF_INET6"];
+      RestrictAddressFamilies = [ "AF_UNIX AF_INET AF_INET6" ];
       # Also a no-brainer, no game server should ever need this
       LockPersonality = true;
       # Some game servers will probably try to set this, but they
@@ -116,6 +117,6 @@ in {
     paths = [
       "/var/lib/private/starbound/storage/universe/"
     ];
-    pauseServices = ["starbound.service"];
+    pauseServices = [ "starbound.service" ];
   };
 }
diff --git a/configuration/services/webserver.nix b/configuration/services/webserver.nix
index 387df57..e6b49b3 100644
--- a/configuration/services/webserver.nix
+++ b/configuration/services/webserver.nix
@@ -1,6 +1,8 @@
-{config, ...}: let
+{ config, ... }:
+let
   domain = config.services.nginx.domain;
-in {
+in
+{
   services.tlaternet-webserver = {
     enable = true;
     listen = {
@@ -10,15 +12,17 @@ in {
   };
 
   # Set up SSL
-  services.nginx.virtualHosts."${domain}" = let
-    inherit (config.services.tlaternet-webserver.listen) addr port;
-  in {
-    serverAliases = ["www.${domain}"];
+  services.nginx.virtualHosts."${domain}" =
+    let
+      inherit (config.services.tlaternet-webserver.listen) addr port;
+    in
+    {
+      serverAliases = [ "www.${domain}" ];
 
-    forceSSL = true;
-    useACMEHost = "tlater.net";
-    enableHSTS = true;
+      forceSSL = true;
+      useACMEHost = "tlater.net";
+      enableHSTS = true;
 
-    locations."/".proxyPass = "http://${addr}:${toString port}";
-  };
+      locations."/".proxyPass = "http://${addr}:${toString port}";
+    };
 }
diff --git a/configuration/services/wireguard.nix b/configuration/services/wireguard.nix
index 1ae6aac..057a2e9 100644
--- a/configuration/services/wireguard.nix
+++ b/configuration/services/wireguard.nix
@@ -1,4 +1,4 @@
-{config, ...}: {
+{ config, ... }: {
   # iptables needs to permit forwarding from wg0 to wg0
   networking.firewall.extraCommands = ''
     iptables -A FORWARD -i wg0 -o wg0 -j ACCEPT
@@ -26,7 +26,7 @@
           {
             # yui
             wireguardPeerConfig = {
-              AllowedIPs = ["10.45.249.2/32"];
+              AllowedIPs = [ "10.45.249.2/32" ];
               PublicKey = "5mlnqEVJWks5OqgeFA2bLIrvST9TlCE81Btl+j4myz0=";
             };
           }
@@ -34,7 +34,7 @@
           {
             # yuanyuan
             wireguardPeerConfig = {
-              AllowedIPs = ["10.45.249.10/32"];
+              AllowedIPs = [ "10.45.249.10/32" ];
               PublicKey = "0UsFE2atz/O5P3OKQ8UHyyyGQNJbp1MeIWUJLuoerwE=";
             };
           }
diff --git a/configuration/sops.nix b/configuration/sops.nix
index 0746133..bc21834 100644
--- a/configuration/sops.nix
+++ b/configuration/sops.nix
@@ -31,8 +31,8 @@
       };
 
       # Heisenbridge
-      "heisenbridge/as-token" = {};
-      "heisenbridge/hs-token" = {};
+      "heisenbridge/as-token" = { };
+      "heisenbridge/hs-token" = { };
 
       "hetzner-api" = {
         owner = "acme";
@@ -62,10 +62,10 @@
       };
 
       # Steam
-      "steam/tlater" = {};
+      "steam/tlater" = { };
 
       # Turn
-      "turn/env" = {};
+      "turn/env" = { };
       "turn/secret" = {
         owner = "turnserver";
       };
diff --git a/flake.nix b/flake.nix
index 09a74ac..afdc668 100644
--- a/flake.nix
+++ b/flake.nix
@@ -32,126 +32,130 @@
     };
   };
 
-  outputs = {
-    self,
-    nixpkgs,
-    sops-nix,
-    nvfetcher,
-    deploy-rs,
-    ...
-  } @ inputs: let
-    system = "x86_64-linux";
-    pkgs = nixpkgs.legacyPackages.${system};
-  in {
-    ##################
-    # Configurations #
-    ##################
-    nixosConfigurations = {
-      # The actual system definition
-      hetzner-1 = nixpkgs.lib.nixosSystem {
-        inherit system;
-        specialArgs.flake-inputs = inputs;
+  outputs =
+    { self
+    , nixpkgs
+    , sops-nix
+    , nvfetcher
+    , deploy-rs
+    , ...
+    } @ inputs:
+    let
+      system = "x86_64-linux";
+      pkgs = nixpkgs.legacyPackages.${system};
+    in
+    {
+      ##################
+      # Configurations #
+      ##################
+      nixosConfigurations = {
+        # The actual system definition
+        hetzner-1 = nixpkgs.lib.nixosSystem {
+          inherit system;
+          specialArgs.flake-inputs = inputs;
 
-        modules = [
-          ./configuration
-          ./configuration/hardware-specific/hetzner
+          modules = [
+            ./configuration
+            ./configuration/hardware-specific/hetzner
+          ];
+        };
+      };
+
+      ############################
+      # Deployment configuration #
+      ############################
+      deploy.nodes = {
+        hetzner-1 = {
+          hostname = "116.202.158.55";
+
+          profiles.system = {
+            user = "root";
+            path = deploy-rs.lib.${system}.activate.nixos self.nixosConfigurations.hetzner-1;
+          };
+
+          sshUser = "tlater";
+          sshOpts = [ "-p" "2222" "-o" "ForwardAgent=yes" ];
+        };
+      };
+
+      #########
+      # Tests #
+      #########
+      checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) deploy-rs.lib;
+
+      ###################
+      # Utility scripts #
+      ###################
+      apps.${system} = {
+        default = self.apps.${system}.run-vm;
+
+        run-vm = {
+          type = "app";
+          program =
+            let
+              vm = nixpkgs.lib.nixosSystem {
+                inherit system;
+                specialArgs.flake-inputs = inputs;
+
+                modules = [
+                  ./configuration
+                  ./configuration/hardware-specific/vm.nix
+                ];
+              };
+            in
+            (pkgs.writeShellScript "" ''
+              ${vm.config.system.build.vm.outPath}/bin/run-testvm-vm
+            '').outPath;
+        };
+
+        update-pkgs = {
+          type = "app";
+          program =
+            let
+              nvfetcher-bin = "${nvfetcher.packages.${system}.default}/bin/nvfetcher";
+            in
+            (pkgs.writeShellScript "update-pkgs" ''
+              cd "$(git rev-parse --show-toplevel)/pkgs"
+              ${nvfetcher-bin} -o _sources_pkgs -c nvfetcher.toml
+            '').outPath;
+        };
+
+        update-nextcloud-apps = {
+          type = "app";
+          program =
+            let
+              nvfetcher-bin = "${nvfetcher.packages.${system}.default}/bin/nvfetcher";
+            in
+            (pkgs.writeShellScript "update-nextcloud-apps" ''
+              cd "$(git rev-parse --show-toplevel)/pkgs"
+              ${nvfetcher-bin} -o _sources_nextcloud -c nextcloud-apps.toml
+            '').outPath;
+        };
+      };
+
+      ###########################
+      # Development environment #
+      ###########################
+      devShells.${system}.default = nixpkgs.legacyPackages.${system}.mkShell {
+        sopsPGPKeyDirs = [ "./keys/hosts/" "./keys/users/" ];
+        nativeBuildInputs = [
+          sops-nix.packages.${system}.sops-import-keys-hook
+        ];
+
+        packages = with pkgs; [
+          sops-nix.packages.${system}.sops-init-gpg-key
+          deploy-rs.packages.${system}.default
+
+          nixpkgs-fmt
+
+          cargo
+          clippy
+          rustc
+          rustfmt
+          rust-analyzer
+          pkg-config
+          openssl
         ];
       };
     };
-
-    ############################
-    # Deployment configuration #
-    ############################
-    deploy.nodes = {
-      hetzner-1 = {
-        hostname = "116.202.158.55";
-
-        profiles.system = {
-          user = "root";
-          path = deploy-rs.lib.${system}.activate.nixos self.nixosConfigurations.hetzner-1;
-        };
-
-        sshUser = "tlater";
-        sshOpts = ["-p" "2222" "-o" "ForwardAgent=yes"];
-      };
-    };
-
-    #########
-    # Tests #
-    #########
-    checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) deploy-rs.lib;
-
-    ###################
-    # Utility scripts #
-    ###################
-    apps.${system} = {
-      default = self.apps.${system}.run-vm;
-
-      run-vm = {
-        type = "app";
-        program = let
-          vm = nixpkgs.lib.nixosSystem {
-            inherit system;
-            specialArgs.flake-inputs = inputs;
-
-            modules = [
-              ./configuration
-              ./configuration/hardware-specific/vm.nix
-            ];
-          };
-        in
-          (pkgs.writeShellScript "" ''
-            ${vm.config.system.build.vm.outPath}/bin/run-testvm-vm
-          '')
-          .outPath;
-      };
-
-      update-pkgs = {
-        type = "app";
-        program = let
-          nvfetcher-bin = "${nvfetcher.packages.${system}.default}/bin/nvfetcher";
-        in
-          (pkgs.writeShellScript "update-pkgs" ''
-            cd "$(git rev-parse --show-toplevel)/pkgs"
-            ${nvfetcher-bin} -o _sources_pkgs -c nvfetcher.toml
-          '')
-          .outPath;
-      };
-
-      update-nextcloud-apps = {
-        type = "app";
-        program = let
-          nvfetcher-bin = "${nvfetcher.packages.${system}.default}/bin/nvfetcher";
-        in
-          (pkgs.writeShellScript "update-nextcloud-apps" ''
-            cd "$(git rev-parse --show-toplevel)/pkgs"
-            ${nvfetcher-bin} -o _sources_nextcloud -c nextcloud-apps.toml
-          '')
-          .outPath;
-      };
-    };
-
-    ###########################
-    # Development environment #
-    ###########################
-    devShells.${system}.default = nixpkgs.legacyPackages.${system}.mkShell {
-      sopsPGPKeyDirs = ["./keys/hosts/" "./keys/users/"];
-      nativeBuildInputs = [
-        sops-nix.packages.${system}.sops-import-keys-hook
-      ];
-
-      packages = with pkgs; [
-        sops-nix.packages.${system}.sops-init-gpg-key
-        deploy-rs.packages.${system}.default
-
-        cargo
-        clippy
-        rustc
-        rustfmt
-        rust-analyzer
-        pkg-config
-        openssl
-      ];
-    };
-  };
 }
diff --git a/modules/nginxExtensions.nix b/modules/nginxExtensions.nix
index 9fe489a..3603756 100644
--- a/modules/nginxExtensions.nix
+++ b/modules/nginxExtensions.nix
@@ -1,8 +1,7 @@
-{
-  config,
-  pkgs,
-  lib,
-  ...
+{ config
+, pkgs
+, lib
+, ...
 }: {
   options = {
     services.nginx.domain = lib.mkOption {
@@ -10,36 +9,37 @@
       description = "The base domain name to append to virtual domain names";
     };
 
-    services.nginx.virtualHosts = let
-      extraVirtualHostOptions = {
-        name,
-        config,
-        ...
-      }: {
-        options = {
-          enableHSTS = lib.mkEnableOption "Enable HSTS";
+    services.nginx.virtualHosts =
+      let
+        extraVirtualHostOptions =
+          { name
+          , config
+          , ...
+          }: {
+            options = {
+              enableHSTS = lib.mkEnableOption "Enable HSTS";
 
-          addAccessLog = lib.mkOption {
-            type = lib.types.bool;
-            default = true;
-            description = ''
-              Add special logging to `/var/log/nginx/''${serverName}`
-            '';
+              addAccessLog = lib.mkOption {
+                type = lib.types.bool;
+                default = true;
+                description = ''
+                  Add special logging to `/var/log/nginx/''${serverName}`
+                '';
+              };
+            };
+
+            config = {
+              extraConfig = lib.concatStringsSep "\n" [
+                (lib.optionalString config.enableHSTS ''
+                  add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
+                '')
+                (lib.optionalString config.addAccessLog ''
+                  access_log /var/log/nginx/${name}/access.log upstream_time;
+                '')
+              ];
+            };
           };
-        };
-
-        config = {
-          extraConfig = lib.concatStringsSep "\n" [
-            (lib.optionalString config.enableHSTS ''
-              add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
-            '')
-            (lib.optionalString config.addAccessLog ''
-              access_log /var/log/nginx/${name}/access.log upstream_time;
-            '')
-          ];
-        };
-      };
-    in
+      in
       lib.mkOption {
         type = lib.types.attrsOf (lib.types.submodule extraVirtualHostOptions);
       };
@@ -47,13 +47,15 @@
 
   config = {
     # Don't attempt to run acme if the domain name is not tlater.net
-    systemd.services = let
-      confirm = ''[[ "tlater.net" = ${config.services.nginx.domain} ]]'';
-    in
-      lib.mapAttrs' (cert: _:
-        lib.nameValuePair "acme-${cert}" {
-          serviceConfig.ExecCondition = ''${pkgs.runtimeShell} -c '${confirm}' '';
-        })
-      config.security.acme.certs;
+    systemd.services =
+      let
+        confirm = ''[[ "tlater.net" = ${config.services.nginx.domain} ]]'';
+      in
+      lib.mapAttrs'
+        (cert: _:
+          lib.nameValuePair "acme-${cert}" {
+            serviceConfig.ExecCondition = ''${pkgs.runtimeShell} -c '${confirm}' '';
+          })
+        config.security.acme.certs;
   };
 }
diff --git a/pkgs/afvalcalendar/default.nix b/pkgs/afvalcalendar/default.nix
index 12e2af5..12b868c 100644
--- a/pkgs/afvalcalendar/default.nix
+++ b/pkgs/afvalcalendar/default.nix
@@ -1,7 +1,6 @@
-{
-  pkgs,
-  rustPlatform,
-  ...
+{ pkgs
+, rustPlatform
+, ...
 }:
 rustPlatform.buildRustPackage {
   pname = "afvalcalendar";
diff --git a/pkgs/default.nix b/pkgs/default.nix
index 3162787..132d0f5 100644
--- a/pkgs/default.nix
+++ b/pkgs/default.nix
@@ -1,22 +1,23 @@
-{
-  pkgs,
-  lib,
-}: let
+{ pkgs
+, lib
+,
+}:
+let
   inherit (builtins) fromJSON mapAttrs readFile;
   inherit (pkgs) callPackage;
 in
-  {
-    starbound = callPackage ./starbound {};
-    prometheus-fail2ban-exporter = callPackage ./prometheus/fail2ban-exporter.nix {
-      sources = pkgs.callPackage ./_sources_pkgs/generated.nix {};
-    };
-    afvalcalendar = callPackage ./afvalcalendar {};
-  }
+{
+  starbound = callPackage ./starbound { };
+  prometheus-fail2ban-exporter = callPackage ./prometheus/fail2ban-exporter.nix {
+    sources = pkgs.callPackage ./_sources_pkgs/generated.nix { };
+  };
+  afvalcalendar = callPackage ./afvalcalendar { };
+}
   // (
-    # Add nextcloud apps
-    let
-      mkNextcloudApp = pkgs.callPackage ./mkNextcloudApp.nix {};
-      sources = fromJSON (readFile ./_sources_nextcloud/generated.json);
-    in
-      mapAttrs (_: source: mkNextcloudApp source) sources
-  )
+  # Add nextcloud apps
+  let
+    mkNextcloudApp = pkgs.callPackage ./mkNextcloudApp.nix { };
+    sources = fromJSON (readFile ./_sources_nextcloud/generated.json);
+  in
+  mapAttrs (_: source: mkNextcloudApp source) sources
+)
diff --git a/pkgs/mkNextcloudApp.nix b/pkgs/mkNextcloudApp.nix
index 9bf6b26..7453f44 100644
--- a/pkgs/mkNextcloudApp.nix
+++ b/pkgs/mkNextcloudApp.nix
@@ -1,6 +1,6 @@
-{
-  fetchNextcloudApp,
-  lib,
+{ fetchNextcloudApp
+, lib
+,
 }: source:
 fetchNextcloudApp {
   url = source.src.url;
diff --git a/pkgs/prometheus/fail2ban-exporter.nix b/pkgs/prometheus/fail2ban-exporter.nix
index b2c6a25..b74e35d 100644
--- a/pkgs/prometheus/fail2ban-exporter.nix
+++ b/pkgs/prometheus/fail2ban-exporter.nix
@@ -1,6 +1,6 @@
-{
-  buildGoModule,
-  sources,
+{ buildGoModule
+, sources
+,
 }:
 buildGoModule {
   inherit (sources.prometheus-fail2ban-exporter) pname src version;
diff --git a/pkgs/starbound/default.nix b/pkgs/starbound/default.nix
index 304f0f3..a8689f3 100644
--- a/pkgs/starbound/default.nix
+++ b/pkgs/starbound/default.nix
@@ -1,34 +1,35 @@
-{
-  stdenv,
-  lib,
-  makeWrapper,
-  patchelf,
-  steamPackages,
-  replace-secret,
-}: let
+{ stdenv
+, lib
+, makeWrapper
+, patchelf
+, steamPackages
+, replace-secret
+,
+}:
+let
   # Use the directory in which starbound is installed so steamcmd
   # doesn't have to be reinstalled constantly (we're using DynamicUser
   # with StateDirectory to persist this).
   steamcmd = steamPackages.steamcmd.override {
     steamRoot = "/var/lib/starbound/.steamcmd";
   };
-  wrapperPath = lib.makeBinPath [patchelf steamcmd replace-secret];
+  wrapperPath = lib.makeBinPath [ patchelf steamcmd replace-secret ];
 in
-  stdenv.mkDerivation {
-    name = "starbound-update-script";
-    nativeBuildInputs = [makeWrapper];
-    dontUnpack = true;
-    patchPhase = ''
-      interpreter="$(cat $NIX_CC/nix-support/dynamic-linker)"
-      substitute ${./launch-starbound.sh} launch-starbound --subst-var interpreter
-    '';
-    installPhase = ''
-      mkdir -p $out/bin
-      cp launch-starbound $out/bin/launch-starbound
-      chmod +x $out/bin/launch-starbound
-    '';
-    postFixup = ''
-      wrapProgram $out/bin/launch-starbound \
-          --prefix PATH : "${wrapperPath}"
-    '';
-  }
+stdenv.mkDerivation {
+  name = "starbound-update-script";
+  nativeBuildInputs = [ makeWrapper ];
+  dontUnpack = true;
+  patchPhase = ''
+    interpreter="$(cat $NIX_CC/nix-support/dynamic-linker)"
+    substitute ${./launch-starbound.sh} launch-starbound --subst-var interpreter
+  '';
+  installPhase = ''
+    mkdir -p $out/bin
+    cp launch-starbound $out/bin/launch-starbound
+    chmod +x $out/bin/launch-starbound
+  '';
+  postFixup = ''
+    wrapProgram $out/bin/launch-starbound \
+        --prefix PATH : "${wrapperPath}"
+  '';
+}