diff --git a/configuration/services/gitea.nix b/configuration/services/gitea.nix
index 0abdb49..4ca6454 100644
--- a/configuration/services/gitea.nix
+++ b/configuration/services/gitea.nix
@@ -17,7 +17,7 @@
containers = {
gitea = {
- image = "gitea/gitea:latest";
+ image = "docker.io/gitea/gitea:latest";
volumes = [ "gitea:/data:Z" "/etc/localtime:/etc/localtime:ro" ];
dependsOn = [ "postgres" ];
@@ -35,6 +35,11 @@
DOMAIN = "gitea.tlater.net";
SSH_PORT = "2221";
};
+
+ extraOptions = [
+ "--replace"
+ "--label" "io.containers.autoupdate=image"
+ ];
};
postgres = {
diff --git a/modules/default.nix b/modules/default.nix
index 0bc1f1c..4b9cea4 100644
--- a/modules/default.nix
+++ b/modules/default.nix
@@ -1,5 +1,5 @@
{ ... }:
{
- imports = [ ./virtualisation/pods.nix ];
+ imports = [ ./virtualisation/pods.nix ./virtualisation/oci-containers.nix ];
}
diff --git a/modules/virtualisation/oci-containers.nix b/modules/virtualisation/oci-containers.nix
new file mode 100644
index 0000000..f37dcaf
--- /dev/null
+++ b/modules/virtualisation/oci-containers.nix
@@ -0,0 +1,350 @@
+# Pulled from my own modified fork of nixpkgs, awaiting merge
+# upstream.
+
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+let
+ cfg = config.virtualisation.oci-containers;
+ proxy_env = config.networking.proxy.envVars;
+
+ defaultBackend = options.virtualisation.oci-containers.backend.default;
+
+ containerOptions =
+ { ... }: {
+
+ options = {
+
+ image = mkOption {
+ type = with types; str;
+ description = "OCI image to run.";
+ example = "library/hello-world";
+ };
+
+ imageFile = mkOption {
+ type = with types; nullOr package;
+ default = null;
+ description = ''
+ Path to an image file to load instead of pulling from a registry.
+ If defined, do not pull from registry.
+
+ You still need to set the image attribute, as it
+ will be used as the image name for docker to start a container.
+ '';
+ example = literalExample "pkgs.dockerTools.buildDockerImage {...};";
+ };
+
+ cmd = mkOption {
+ type = with types; listOf str;
+ default = [];
+ description = "Commandline arguments to pass to the image's entrypoint.";
+ example = literalExample ''
+ ["--port=9000"]
+ '';
+ };
+
+ entrypoint = mkOption {
+ type = with types; nullOr str;
+ description = "Override the default entrypoint of the image.";
+ default = null;
+ example = "/bin/my-app";
+ };
+
+ environment = mkOption {
+ type = with types; attrsOf str;
+ default = {};
+ description = "Environment variables to set for this container.";
+ example = literalExample ''
+ {
+ DATABASE_HOST = "db.example.com";
+ DATABASE_PORT = "3306";
+ }
+ '';
+ };
+
+ environmentFiles = mkOption {
+ type = with types; listOf path;
+ default = [];
+ description = "Environment files for this container.";
+ example = literalExample ''
+ [
+ /path/to/.env
+ /path/to/.env.secret
+ ]
+ '';
+ };
+
+ log-driver = mkOption {
+ type = types.str;
+ default = "journald";
+ description = ''
+ Logging driver for the container. The default of
+ "journald" means that the container's logs will be
+ handled as part of the systemd unit.
+
+ For more details and a full list of logging drivers, refer to respective backends documentation.
+
+ For Docker:
+ Docker engine documentation
+
+ For Podman:
+ Refer to the docker-run(1) man page.
+ '';
+ };
+
+ ports = mkOption {
+ type = with types; listOf str;
+ default = [];
+ description = ''
+ Network ports to publish from the container to the outer host.
+
+ Valid formats:
+
+
+
+
+ <ip>:<hostPort>:<containerPort>
+
+
+
+
+ <ip>::<containerPort>
+
+
+
+
+ <hostPort>:<containerPort>
+
+
+
+
+ <containerPort>
+
+
+
+
+ Both hostPort and
+ containerPort can be specified as a range of
+ ports. When specifying ranges for both, the number of container
+ ports in the range must match the number of host ports in the
+ range. Example: 1234-1236:1234-1236/tcp
+
+ When specifying a range for hostPort only, the
+ containerPort must not be a
+ range. In this case, the container port is published somewhere
+ within the specified hostPort range. Example:
+ 1234-1236:1234/tcp
+
+ Refer to the
+
+ Docker engine documentation for full details.
+ '';
+ example = literalExample ''
+ [
+ "8080:9000"
+ ]
+ '';
+ };
+
+ user = mkOption {
+ type = with types; nullOr str;
+ default = null;
+ description = ''
+ Override the username or UID (and optionally groupname or GID) used
+ in the container.
+ '';
+ example = "nobody:nogroup";
+ };
+
+ volumes = mkOption {
+ type = with types; listOf str;
+ default = [];
+ description = ''
+ List of volumes to attach to this container.
+
+ Note that this is a list of "src:dst" strings to
+ allow for src to refer to
+ /nix/store paths, which would be difficult with an
+ attribute set. There are also a variety of mount options available
+ as a third field; please refer to the
+
+ docker engine documentation for details.
+ '';
+ example = literalExample ''
+ [
+ "volume_name:/path/inside/container"
+ "/path/on/host:/path/inside/container"
+ ]
+ '';
+ };
+
+ workdir = mkOption {
+ type = with types; nullOr str;
+ default = null;
+ description = "Override the default working directory for the container.";
+ example = "/var/lib/hello_world";
+ };
+
+ dependsOn = mkOption {
+ type = with types; listOf str;
+ default = [];
+ description = ''
+ Define which other containers this one depends on. They will be added to both After and Requires for the unit.
+
+ Use the same name as the attribute under virtualisation.oci-containers.containers.
+ '';
+ example = literalExample ''
+ virtualisation.oci-containers.containers = {
+ node1 = {};
+ node2 = {
+ dependsOn = [ "node1" ];
+ }
+ }
+ '';
+ };
+
+ extraOptions = mkOption {
+ type = with types; listOf str;
+ default = [];
+ description = "Extra options for ${defaultBackend} run.";
+ example = literalExample ''
+ ["--network=host"]
+ '';
+ };
+
+ autoStart = mkOption {
+ type = types.bool;
+ default = true;
+ description = ''
+ When enabled, the container is automatically started on boot.
+ If this option is set to false, the container has to be started on-demand via its service.
+ '';
+ };
+ };
+ };
+
+ mkService = name: container: let
+ dependsOn = map (x: "${cfg.backend}-${x}.service") container.dependsOn;
+ in {
+ wantedBy = [] ++ optional (container.autoStart) "multi-user.target";
+ after = lib.optionals (cfg.backend == "docker") [ "docker.service" "docker.socket" ] ++ dependsOn;
+ requires = dependsOn;
+ environment = proxy_env;
+
+ path =
+ if cfg.backend == "docker" then [ config.virtualisation.docker.package ]
+ else if cfg.backend == "podman" then [ config.virtualisation.podman.package ]
+ else throw "Unhandled backend: ${cfg.backend}";
+
+ preStart = ''
+ ${cfg.backend} rm -f ${name} || true
+ ${optionalString (container.imageFile != null) ''
+ ${cfg.backend} load -i ${container.imageFile}
+ ''}
+ '';
+
+ # Podman likes knowing what systemd unit launched its container,
+ # so that it can auto-update containers and restart them.
+ #
+ # Sadly, the NixOS `script` option doesn't expose the systemd
+ # syntax `%n` that would expose the unit location, so we pass it
+ # as $1 in a scriptArg.
+ scriptArgs = lib.optionals (cfg.backend == "podman") "%n";
+
+ script = concatStringsSep " \\\n " ([
+ "exec ${cfg.backend} run"
+ "--rm"
+ "--name=${escapeShellArg name}"
+ "--log-driver=${container.log-driver}"
+ ] ++ optional (container.entrypoint != null)
+ "--entrypoint=${escapeShellArg container.entrypoint}"
+ ++ (mapAttrsToList (k: v: "-e ${escapeShellArg k}=${escapeShellArg v}") container.environment)
+ ++ optional (cfg.backend == "podman") ''-e 'PODMAN_SYSTEMD_UNIT'="$1"''
+ ++ map (f: "--env-file ${escapeShellArg f}") container.environmentFiles
+ ++ map (p: "-p ${escapeShellArg p}") container.ports
+ ++ optional (container.user != null) "-u ${escapeShellArg container.user}"
+ ++ map (v: "-v ${escapeShellArg v}") container.volumes
+ ++ optional (container.workdir != null) "-w ${escapeShellArg container.workdir}"
+ ++ map escapeShellArg container.extraOptions
+ ++ [container.image]
+ ++ map escapeShellArg container.cmd
+ );
+
+ preStop = "[ $SERVICE_RESULT = success ] || ${cfg.backend} stop ${name}";
+ postStop = "${cfg.backend} rm -f ${name} || true";
+
+ serviceConfig = {
+ StandardOutput = "null";
+ StandardError = "null";
+
+ ### There is no generalized way of supporting `reload` for docker
+ ### containers. Some containers may respond well to SIGHUP sent to their
+ ### init process, but it is not guaranteed; some apps have other reload
+ ### mechanisms, some don't have a reload signal at all, and some docker
+ ### images just have broken signal handling. The best compromise in this
+ ### case is probably to leave ExecReload undefined, so `systemctl reload`
+ ### will at least result in an error instead of potentially undefined
+ ### behaviour.
+ ###
+ ### Advanced users can still override this part of the unit to implement
+ ### a custom reload handler, since the result of all this is a normal
+ ### systemd service from the perspective of the NixOS module system.
+ ###
+ # ExecReload = ...;
+ ###
+
+ TimeoutStartSec = 0;
+ TimeoutStopSec = 120;
+ Restart = "always";
+ };
+ };
+
+in {
+ imports = [
+ (
+ lib.mkChangedOptionModule
+ [ "docker-containers" ]
+ [ "virtualisation" "oci-containers" ]
+ (oldcfg: {
+ backend = "docker";
+ containers = lib.mapAttrs (n: v: builtins.removeAttrs (v // {
+ extraOptions = v.extraDockerOptions or [];
+ }) [ "extraDockerOptions" ]) oldcfg.docker-containers;
+ })
+ )
+ ];
+ disabledModules = [ "virtualisation/oci-containers.nix" ];
+
+ options.virtualisation.oci-containers = {
+
+ backend = mkOption {
+ type = types.enum [ "podman" "docker" ];
+ default =
+ # TODO: Once https://github.com/NixOS/nixpkgs/issues/77925 is resolved default to podman
+ # if versionAtLeast config.system.stateVersion "20.09" then "podman"
+ # else "docker";
+ "docker";
+ description = "The underlying Docker implementation to use.";
+ };
+
+ containers = mkOption {
+ default = {};
+ type = types.attrsOf (types.submodule containerOptions);
+ description = "OCI (Docker) containers to run as systemd services.";
+ };
+
+ };
+
+ config = lib.mkIf (cfg.containers != {}) (lib.mkMerge [
+ {
+ systemd.services = mapAttrs' (n: v: nameValuePair "${cfg.backend}-${n}" (mkService n v)) cfg.containers;
+ }
+ (lib.mkIf (cfg.backend == "podman") {
+ virtualisation.podman.enable = true;
+ })
+ (lib.mkIf (cfg.backend == "docker") {
+ virtualisation.docker.enable = true;
+ })
+ ]);
+
+}