Switch over to nix flakes and introduce podman pods #26

Manually merged
tlater merged 5 commits from tlater/flake into master 2021-04-12 02:16:38 +01:00
28 changed files with 681 additions and 801 deletions

1
.gitattributes vendored
View file

@ -1 +0,0 @@
*.tar.gz filter=lfs diff=lfs merge=lfs -text

2
.gitignore vendored
View file

@ -1,2 +1,2 @@
/result /result
/tlater.net.qcow2 /tlaternet.qcow2

View file

@ -1,11 +0,0 @@
result: etc/nixos/configuration.nix
nix-build '<nixpkgs/nixos>' -A vm -k -I nixos-config=$^
tlaternet.qcow2:
nix-shell -p qemu --run 'qemu-img create -f qcow2 $@ 10G'
run: result tlaternet.qcow2
QEMU_OPTS="-m 4096 -nographic" QEMU_NET_OPTS="hostfwd=tcp::2222-:2222,hostfwd=tcp::8000-:80,hostfwd=tcp::25565-:25565" ./result/bin/run-tlaternet-vm
format: $(wildcard etc/nixos/**/*.nix)
nix-shell -p nixpkgs-fmt --run 'nixpkgs-fmt $^'

View file

@ -6,51 +6,30 @@ This is the NixOS configuration for [tlater.net](https://tlater.net/).
### Building ### Building
To test locally in a VM, [nix](https://nixos.org/nix/) is Build the VM with:
required. Using a properly-configured nix, a qemu-based VM running the
server can be created by first applying the following patch to disable
hardware-specific configuration:
```patch
diff --git a/etc/nixos/configuration.nix b/etc/nixos/configuration.nix
index 387113d..aabee88 100644
--- a/etc/nixos/configuration.nix
+++ b/etc/nixos/configuration.nix
@@ -2,8 +2,8 @@
{
imports = [
- ./hardware-configuration.nix
- ./linode.nix
+ # ./hardware-configuration.nix
+ # ./linode.nix
];
networking = {
```
Then building the VM with:
``` ```
nix-build '<nixpkgs/nixos>' -A vm -k -I nixos-config=./configuration.nix nixos-rebuild build-vm --flake '.#vm'
``` ```
### Running ### Running
To invoke the VM, use: Running should *mostly* be as simple as running the command the build
script echos.
``` One caveat: create a larger disk image first. This can be done by
QEMU_NET_OPTS="hostfwd=tcp::2222-:2222,hostfwd=tcp::8000-:80" ./result/bin/run-tlater.net-vm running the following in the repository root:
```bash
qemu-img create -f qcow2 ./tlaternet.qcow2 20G
``` ```
This will set up a qemu VM with ports 2222 linked to the ssh port, and Everything else should be handled by the devShell.
8000 to the http port. If other ports are required, adjust the
environment variable (notably, ssl is provided by the image, although
it should not work since it is unlikely that letsencrypt will supply
any certificates).
Note that other environment variables are available (such as one for ### New services
disabling the qt GUI, probably handy for eventual CI). They are listed
under "Building a service as a VM (for testing)" Whenever a new service is added, append an appropriate
[here](https://nixos.wiki/wiki/Cheatsheet) (not linked since the page `,hostfwd=::3<port>:<port>` to the `QEMU_NET_OPTS` specified in
isn't set up very nicely). `flake.nix` to bind the service to a host port.
There is no way to test this without binding to the host port, sadly.

71
configuration/default.nix Normal file
View file

@ -0,0 +1,71 @@
{ config, pkgs, ... }:
{
imports =
[ ./services/gitea.nix ./services/nextcloud.nix ./services/webserver.nix ];
nix = {
package = pkgs.nixFlakes;
extraOptions = ''
experimental-features = nix-command flakes
'';
};
networking = {
hostName = "tlaternet";
usePredictableInterfaceNames = false;
useDHCP = false;
interfaces.eth0.useDHCP = true;
firewall.allowedTCPPorts = [ 80 443 2222 2221 25565 ];
};
time.timeZone = "Europe/London";
users.users.tlater = {
isNormalUser = true;
extraGroups = [ "wheel" ];
openssh.authorizedKeys.keyFiles = [ ../keys/tlater.pub ];
};
services.openssh = {
enable = true;
allowSFTP = false;
passwordAuthentication = false;
permitRootLogin = "no";
ports = [ 2222 ];
startWhenNeeded = true;
};
services.nginx = {
enable = true;
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
clientMaxBodySize = "10G";
virtualHosts = let
host = port: extra:
{
forceSSL = true;
enableACME = true;
locations."/" = { proxyPass = "http://localhost:${toString port}"; };
} // extra;
in {
"tlater.net" = host 3002 { serverAliases = [ "www.tlater.net" ]; };
"gitea.tlater.net" = host 3000 { };
"nextcloud.tlater.net" = host 3001 { };
};
};
security.acme = {
email = "tm@tlater.net";
acceptTerms = true;
};
virtualisation.oci-containers.backend = "podman";
system.stateVersion = "20.09";
}

View file

@ -2,9 +2,7 @@
{ {
# Required for the lish console # Required for the lish console
boot = { boot.kernelParams = [ "console=ttyS0,19200n8" ];
kernelParams = [ "console=ttyS0,19200n8" ];
};
boot.loader = { boot.loader = {
# Timeout to allow lish to connect # Timeout to allow lish to connect

View file

@ -32,7 +32,7 @@ http {
#gzip on; #gzip on;
upstream php-handler { upstream php-handler {
server nextcloud:9000; server nextcloud-nextcloud:9000;
} }
server { server {

View file

@ -0,0 +1,38 @@
{ config, ... }:
{
virtualisation.pods.gitea = {
hostname = "gitea.tlater.net";
publish = [ "3000:3000" "2221:2221" ];
containers = {
gitea = {
image = "gitea/gitea:latest";
volumes = [ "gitea:/data:Z" "/etc/localtime:/etc/localtime:ro" ];
dependsOn = [ "postgres" ];
environment = {
DB_TYPE = "postgres";
DB_HOST = "gitea-postgres:5432";
DB_NAME = "gitea";
DB_USER = "gitea";
DB_PASSWD = "/qNDDK9WCMuubfA7D8DFwfl9T+Gy2IMDvPhiNpcxZjY=";
RUN_MODE = "prod";
DOMAIN = "gitea.tlater.net";
SSH_PORT = "2221";
};
};
postgres = {
image = "postgres:alpine";
environment = {
POSTGRES_DB = "gitea";
POSTGRES_USER = "gitea";
POSTGRES_PASSWORD = "/qNDDK9WCMuubfA7D8DFwfl9T+Gy2IMDvPhiNpcxZjY=";
};
volumes = [ "gitea-db-data-new:/var/lib/postgresql/data" ];
};
};
};
}

View file

@ -0,0 +1,52 @@
{ config, ... }:
{
virtualisation.pods.nextcloud = {
hostname = "nextcloud.tlater.net";
publish = [ "3001:80" ];
containers = {
nextcloud = {
image = "nextcloud:fpm-alpine";
dependsOn = [ "postgres" ];
volumes = [
"nextcloud-apps:/var/www/html/custom_apps"
"nextcloud-config:/var/www/html/config"
"nextcloud-data:/var/www/html/data"
];
environment = {
POSTGRES_DB = "nextcloud";
POSTGRES_USER = "nextcloud";
POSTGRES_HOST = "nextcloud-postgres";
POSTGRES_PASSWORD = "rI7t7Nek1yGA9ucrRc7Uhy0jcjwPjnXa8me4o8tJON8=";
OVERWRITEPROTOCOL = "https";
};
};
cron = {
image = "nextcloud:fpm-alpine";
entrypoint = "/cron.sh";
dependsOn = [ "postgres" "nextcloud" ];
extraOptions = [ "--volumes-from=nextcloud-nextcloud" ];
};
nginx = {
image = "nginx:alpine";
dependsOn = [ "nextcloud" ];
volumes =
[ "${./configs/nginx-nextcloud.conf}:/etc/nginx/nginx.conf:ro" ];
extraOptions = [ "--volumes-from=nextcloud-nextcloud" ];
};
postgres = {
image = "postgres:alpine";
environment = {
POSTGRES_DB = "nextcloud";
POSTGRES_USER = "nextcloud";
POSTGRES_PASSWORD = "rI7t7Nek1yGA9ucrRc7Uhy0jcjwPjnXa8me4o8tJON8=";
};
volumes = [ "nextcloud-db-data-new:/var/lib/postgresql/data" ];
};
};
};
}

View file

@ -0,0 +1,27 @@
{ config, pkgs, ... }:
{
virtualisation.oci-containers.containers.webserver = {
image = "tlaternet/webserver";
imageFile = pkgs.dockerTools.buildImage {
name = "tlaternet/webserver";
tag = "latest";
contents = pkgs.tlaternet-webserver.webserver;
config = {
Cmd = [ "tlaternet-webserver" ];
Volumes = { "/srv/mail" = { }; };
Env = [
"ROCKET_PORT=80"
"ROCKET_TEMPLATE_DIR=${pkgs.tlaternet-templates.templates}/browser/"
];
ExposedPorts = { "80" = { }; };
};
};
ports = [ "3002:80" ];
volumes = [ "tlaternet-mail:/srv/mail" ];
extraOptions = [ "--hostname=tlater.net" ];
};
}

View file

@ -1,68 +0,0 @@
{ config, pkgs, ... }:
{
imports = [
./hardware-configuration.nix
./linode.nix
<nixpkgs/nixos/modules/profiles/headless.nix>
./modules/networked-docker-containers.nix
# FIXME: It'd be much nicer if these were imported further down,
# and set inside the docker-containers set, instead of setting the
# docker-containers set here.
./services/nginx.nix
./services/gitea.nix
./services/nextcloud.nix
./services/tlaternet.nix
./services/minecraft.nix
];
networking = {
hostName = "tlaternet";
usePredictableInterfaceNames = false;
# useDHCP is deprecated
useDHCP = false;
interfaces.eth0.useDHCP = true;
firewall = {
enable = true;
allowedTCPPorts = [
80
443
2222
2221
25565
];
};
};
time.timeZone = "Europe/London";
users.users = {
tlater = {
isNormalUser = true;
extraGroups = [ "wheel" "docker" ];
openssh.authorizedKeys.keyFiles = [ ./keys/tlater.pub ];
};
};
services = {
openssh = {
enable = true;
allowSFTP = false;
passwordAuthentication = false;
permitRootLogin = "no";
ports = [ 2222 ];
startWhenNeeded = true;
};
};
virtualisation.docker = {
enable = true;
autoPrune.enable = true;
};
system.stateVersion = "19.09";
}

BIN
etc/nixos/derivations/dist.tar.gz (Stored with Git LFS)

Binary file not shown.

@ -1 +0,0 @@
Subproject commit 114a87885e1b3852c4cf6fe9854c86045770b3e4

View file

@ -1,15 +0,0 @@
{ pkgs ? import <nixpkgs> {} }:
with pkgs;
# TODO: Once https://github.com/svanderburg/node2nix/issues/184 is
# fixed, use a proper nixos package instead of a tarball.
stdenv.mkDerivation {
pname = "tlaternet-templates";
version = "1.0";
src = ./dist.tar.gz;
installPhase = ''
mkdir -p $out/srv/
mv browser $out/srv/web
'';
}

View file

@ -1,306 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.networked-docker-containers;
networkedDockerContainer =
{ ... }: {
options = {
image = mkOption {
type = with types; str;
description = "Docker image to run.";
example = "library/hello-world";
};
imageFile = mkOption {
type = with types; nullOr package;
default = null;
description = ''
Path to an image file to load instead of pulling from a registry.
If defined, do not pull from registry.
You still need to set the <literal>image</literal> attribute, as it
will be used as the image name for docker to start a container.
'';
example = literalExample "pkgs.dockerTools.buildDockerImage {...};";
};
cmd = mkOption {
type = with types; listOf str;
default = [];
description = "Commandline arguments to pass to the image's entrypoint.";
example = literalExample ''
["--port=9000"]
'';
};
entrypoint = mkOption {
type = with types; nullOr str;
description = "Overwrite the default entrypoint of the image.";
default = null;
example = "/bin/my-app";
};
environment = mkOption {
type = with types; attrsOf str;
default = {};
description = "Environment variables to set for this container.";
example = literalExample ''
{
DATABASE_HOST = "db.example.com";
DATABASE_PORT = "3306";
}
'';
};
log-driver = mkOption {
type = types.str;
default = "none";
description = ''
Logging driver for the container. The default of
<literal>"none"</literal> means that the container's logs will be
handled as part of the systemd unit. Setting this to
<literal>"journald"</literal> will result in duplicate logging, but
the container's logs will be visible to the <command>docker
logs</command> command.
For more details and a full list of logging drivers, refer to the
<link xlink:href="https://docs.docker.com/engine/reference/run/#logging-drivers---log-driver">
Docker engine documentation</link>
'';
};
networks = mkOption {
type = with types; listOf str;
default = [];
description = ''
Docker networks to create and connect this container to.
The first network in this list will be connected with
<literal>--network=</literal>, others after container
creation with <command>docker network connect</command>.
Any networks will be created if they do not exist before
the container is started.
'';
};
ports = mkOption {
type = with types; listOf str;
default = [];
description = ''
Network ports to publish from the container to the outer host.
Valid formats:
<itemizedlist>
<listitem>
<para>
<literal>&lt;ip&gt;:&lt;hostPort&gt;:&lt;containerPort&gt;</literal>
</para>
</listitem>
<listitem>
<para>
<literal>&lt;ip&gt;::&lt;containerPort&gt;</literal>
</para>
</listitem>
<listitem>
<para>
<literal>&lt;hostPort&gt;:&lt;containerPort&gt;</literal>
</para>
</listitem>
<listitem>
<para>
<literal>&lt;containerPort&gt;</literal>
</para>
</listitem>
</itemizedlist>
Both <literal>hostPort</literal> and
<literal>containerPort</literal> can be specified as a range of
ports. When specifying ranges for both, the number of container
ports in the range must match the number of host ports in the
range. Example: <literal>1234-1236:1234-1236/tcp</literal>
When specifying a range for <literal>hostPort</literal> only, the
<literal>containerPort</literal> must <emphasis>not</emphasis> be a
range. In this case, the container port is published somewhere
within the specified <literal>hostPort</literal> range. Example:
<literal>1234-1236:1234/tcp</literal>
Refer to the
<link xlink:href="https://docs.docker.com/engine/reference/run/#expose-incoming-ports">
Docker engine documentation</link> for full details.
'';
example = literalExample ''
[
"8080:9000"
]
'';
};
user = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Override the username or UID (and optionally groupname or GID) used
in the container.
'';
example = "nobody:nogroup";
};
volumes = mkOption {
type = with types; listOf str;
default = [];
description = ''
List of volumes to attach to this container.
Note that this is a list of <literal>"src:dst"</literal> strings to
allow for <literal>src</literal> to refer to
<literal>/nix/store</literal> paths, which would difficult with an
attribute set. There are also a variety of mount options available
as a third field; please refer to the
<link xlink:href="https://docs.docker.com/engine/reference/run/#volume-shared-filesystems">
docker engine documentation</link> for details.
'';
example = literalExample ''
[
"volume_name:/path/inside/container"
"/path/on/host:/path/inside/container"
]
'';
};
workdir = mkOption {
type = with types; nullOr str;
default = null;
description = "Override the default working directory for the container.";
example = "/var/lib/hello_world";
};
dependsOn = mkOption {
type = with types; listOf str;
default = [];
description = ''
Define which other containers this one depends on. They will be added to both After and Requires for the unit.
Use the same name as the attribute under <literal>services.docker-containers</literal>.
'';
example = literalExample ''
services.docker-containers = {
node1 = {};
node2 = {
dependsOn = [ "node1" ];
}
}
'';
};
extraDockerOptions = mkOption {
type = with types; listOf str;
default = [];
description = "Extra options for <command>docker run</command>.";
example = literalExample ''
["--network=host"]
'';
};
};
};
mkService = name: container: let
mkAfter = map (x: "docker-${x}.service") container.dependsOn;
in
rec {
wantedBy = [ "multi-user.target" ];
after = [ "docker.service" "docker.socket" "docker-networks.service" ] ++ mkAfter;
requires = after;
serviceConfig = {
ExecStart = [ "${pkgs.docker}/bin/docker start -a ${name}" ];
ExecStartPre = [
"-${pkgs.docker}/bin/docker rm -f ${name}"
"-${pkgs.docker}/bin/docker image prune -f"
] ++ (
optional (container.imageFile != null)
[ "${pkgs.docker}/bin/docker load -i ${container.imageFile}" ]
) ++ [
(
concatStringsSep " \\\n " (
[
"${pkgs.docker}/bin/docker create"
"--rm"
"--name=${name}"
"--log-driver=${container.log-driver}"
] ++ optional (container.entrypoint != null)
"--entrypoint=${escapeShellArg container.entrypoint}"
++ (mapAttrsToList (k: v: "-e ${escapeShellArg k}=${escapeShellArg v}") container.environment)
++ map (p: "-p ${escapeShellArg p}") container.ports
++ optional (container.user != null) "-u ${escapeShellArg container.user}"
++ map (v: "-v ${escapeShellArg v}") container.volumes
++ optional (container.workdir != null) "-w ${escapeShellArg container.workdir}"
++ optional (container.networks != []) "--network=${escapeShellArg (builtins.head container.networks)}"
++ map escapeShellArg container.extraDockerOptions
++ [ container.image ]
++ map escapeShellArg container.cmd
)
)
] ++ map (n: "${pkgs.docker}/bin/docker network connect ${escapeShellArg n} ${name}") (drop 1 container.networks);
ExecStop = ''${pkgs.bash}/bin/sh -c "[ $SERVICE_RESULT = success ] || ${pkgs.docker}/bin/docker stop ${name}"'';
ExecStopPost = "-${pkgs.docker}/bin/docker rm -f ${name}";
### There is no generalized way of supporting `reload` for docker
### containers. Some containers may respond well to SIGHUP sent to their
### init process, but it is not guaranteed; some apps have other reload
### mechanisms, some don't have a reload signal at all, and some docker
### images just have broken signal handling. The best compromise in this
### case is probably to leave ExecReload undefined, so `systemctl reload`
### will at least result in an error instead of potentially undefined
### behaviour.
###
### Advanced users can still override this part of the unit to implement
### a custom reload handler, since the result of all this is a normal
### systemd service from the perspective of the NixOS module system.
###
# ExecReload = ...;
###
TimeoutStartSec = 0;
TimeoutStopSec = 120;
Restart = "no";
};
};
in
{
options.networked-docker-containers = mkOption {
default = {};
type = types.attrsOf (types.submodule networkedDockerContainer);
description = "Docker containers to run as systemd services.";
};
config = mkIf (cfg != {}) {
systemd.services = mapAttrs' (n: v: nameValuePair "docker-${n}" (mkService n v)) cfg // {
"docker-networks" = rec {
after = [ "docker.service" "docker.socket" ];
requires = after;
serviceConfig = {
Type = "oneshot";
ExecStart = map (
n: ''${pkgs.bash}/bin/sh -c "${pkgs.docker}/bin/docker network inspect ${escapeShellArg n} > /dev/null || \
${pkgs.docker}/bin/docker network create ${escapeShellArg n}"''
) (unique (flatten (mapAttrsToList (_: c: c.networks) cfg)));
};
};
};
virtualisation.docker.enable = true;
};
}

Binary file not shown.

View file

@ -1 +0,0 @@
client_max_body_size 16G;

View file

@ -1,51 +0,0 @@
{ ... }:
{
networked-docker-containers = {
gitea = {
image = "gitea/gitea:latest";
ports = [
"2221:2221"
];
volumes = [
"gitea:/data:Z"
"/etc/timezone:/etc/timezone:ro"
"/etc/localtime:/etc/localtime:ro"
];
environment = {
VIRTUAL_PORT = "3000";
VIRTUAL_HOST = "gitea.tlater.net";
LETSENCRYPT_HOST = "gitea.tlater.net";
DB_TYPE = "postgres";
DB_HOST = "gitea-postgres:5432";
DB_NAME = "gitea";
DB_USER = "gitea";
DB_PASSWD = "/qNDDK9WCMuubfA7D8DFwfl9T+Gy2IMDvPhiNpcxZjY=";
RUN_MODE = "prod";
DOMAIN = "gitea.tlater.net";
SSH_PORT = "2221";
};
networks = [
"webproxy"
"gitea"
];
};
gitea-postgres = {
image = "postgres:alpine";
environment = {
POSTGRES_DB = "gitea";
POSTGRES_USER = "gitea";
POSTGRES_PASSWORD = "/qNDDK9WCMuubfA7D8DFwfl9T+Gy2IMDvPhiNpcxZjY=";
};
volumes = [
"gitea-db-data-new:/var/lib/postgresql/data"
];
networks = [
"gitea"
];
};
};
}

View file

@ -1,130 +0,0 @@
{ pkgs, ... }:
let
entrypoint = pkgs.writeScript "entrypoint.sh" ''
#!${pkgs.bash}/bin/bash
${pkgs.busybox}/bin/mkdir -p /var/lib/
${pkgs.gzip}/bin/gzip -dc ${./configs/minecraft.tar.gz} | ${pkgs.gnutar}/bin/tar -xf - -C /var/lib
echo 'eula=true' > /var/lib/minecraft/eula.txt
${pkgs.busybox}/bin/cp -f ${properties} /var/lib/minecraft/server.properties
$@
'';
ops = pkgs.writeText "ops.json" (builtins.toJSON [
{
uuid = "140d177a-966f-41b8-a4c0-e305babd291b";
name = "TLATER";
level = 4;
bypassesPlayerLimit = true;
}
]);
whitelist = pkgs.writeText "whitelist.json" (builtins.toJSON [
{
uuid = "59cd1648-14a4-4bcf-8f5a-2e1bde678f2c";
name = "romino25";
}
{
uuid = "0ab6e3d1-544a-47e7-8538-2e6c248e49a4";
name = "lasi25";
}
{
uuid = "140d177a-966f-41b8-a4c0-e305babd291b";
name = "TLATER";
}
]);
properties = pkgs.writeText "server.properties" ''
#Minecraft server properties
#Sun Jul 19 16:04:54 GMT 2020
max-tick-time=60000
generator-settings=
allow-nether=true
force-gamemode=false
gamemode=0
enable-query=false
player-idle-timeout=0
difficulty=1
spawn-monsters=true
op-permission-level=4
pvp=false
snooper-enabled=true
level-type=DEFAULT
hardcore=false
enable-command-block=false
max-players=4
network-compression-threshold=256
resource-pack-sha1=
max-world-size=29999984
server-port=25565
server-ip=
spawn-npcs=true
allow-flight=true
level-name=world
view-distance=15
resource-pack=
spawn-animals=true
white-list=true
generate-structures=true
online-mode=true
max-build-height=256
level-seed=
prevent-proxy-connections=false
use-native-transport=true
motd=Adventures met die broers
enable-rcon=false
'';
in
{
docker-containers = {
minecraft = {
image = "tlaternet/minecraft";
imageFile = pkgs.dockerTools.buildImage {
name = "tlaternet/minecraft";
tag = "latest";
config = {
Entrypoint = [ "${entrypoint}" ];
Cmd = [ "${pkgs.jre}/bin/java"
"-Xms2G"
"-Xmx2G"
# Using recommended flags from https://mcflags.emc.gs
"-XX:+UseG1GC"
"-XX:+ParallelRefProcEnabled"
"-XX:MaxGCPauseMillis=200"
"-XX:+UnlockExperimentalVMOptions"
"-XX:+DisableExplicitGC"
"-XX:+AlwaysPreTouch"
"-XX:G1NewSizePercent=30"
"-XX:G1MaxNewSizePercent=40"
"-XX:G1HeapRegionSize=8M"
"-XX:G1ReservePercent=20"
"-XX:G1HeapWastePercent=5"
"-XX:G1MixedGCCountTarget=4"
"-XX:InitiatingHeapOccupancyPercent=15"
"-XX:G1MixedGCLiveThresholdPercent=90"
"-XX:G1RSetUpdatingPauseTimePercent=5"
"-XX:SurvivorRatio=32"
"-XX:+PerfDisableSharedMem"
"-XX:MaxTenuringThreshold=1"
"-jar"
"/var/lib/minecraft/forge-1.12.2-14.23.5.2854.jar"
"nogui"
];
Volumes = {
"/var/lib/minecraft/world" = {};
};
WorkingDir = "/var/lib/minecraft";
ExposedPorts = {
"25565" = {};
};
};
};
ports = [
"25565:25565"
];
volumes = [
"minecraft:/var/lib/minecraft/world"
"${ops}:/var/lib/minecraft/ops.json:ro"
"${whitelist}:/var/lib/minecraft/whitelist.json:ro"
];
};
};
}

View file

@ -1,76 +0,0 @@
{ ... }:
{
networked-docker-containers = {
nextcloud = {
image = "nextcloud:fpm-alpine";
dependsOn = ["nextcloud-postgres"];
volumes = [
"nextcloud-apps:/var/www/html/custom_apps"
"nextcloud-config:/var/www/html/config"
"nextcloud-data:/var/www/html/data"
];
environment = {
POSTGRES_DB = "nextcloud";
POSTGRES_USER = "nextcloud";
POSTGRES_HOST = "nextcloud-postgres";
POSTGRES_PASSWORD = "rI7t7Nek1yGA9ucrRc7Uhy0jcjwPjnXa8me4o8tJON8=";
OVERWRITEPROTOCOL = "https";
};
networks = [
"nextcloud"
];
extraDockerOptions = [
"--domainname=nextcloud.tlater.net"
];
};
nextcloud-cron = {
image = "nextcloud:fpm-alpine";
entrypoint = "/cron.sh";
dependsOn = ["nextcloud-postgres"];
extraDockerOptions = [
"--volumes-from"
"nextcloud"
];
networks = [
"nextcloud"
];
};
nextcloud-nginx = {
image = "nginx:alpine";
dependsOn = ["nextcloud"];
environment = {
LETSENCRYPT_HOST = "nextcloud.tlater.net";
VIRTUAL_HOST = "nextcloud.tlater.net";
};
volumes = [
"${./configs/nginx-nextcloud.conf}:/etc/nginx/nginx.conf:ro"
];
networks = [
"webproxy"
"nextcloud"
];
extraDockerOptions = [
"--volumes-from"
"nextcloud"
];
};
nextcloud-postgres = {
image = "postgres:alpine";
environment = {
POSTGRES_DB = "nextcloud";
POSTGRES_USER = "nextcloud";
POSTGRES_PASSWORD = "rI7t7Nek1yGA9ucrRc7Uhy0jcjwPjnXa8me4o8tJON8=";
};
volumes = [
"nextcloud-db-data-new:/var/lib/postgresql/data"
];
networks = [
"nextcloud"
];
};
};
}

View file

@ -1,47 +0,0 @@
{ ... }:
{
networked-docker-containers = {
nginx-proxy = {
image = "jwilder/nginx-proxy:alpine";
ports = [
"80:80"
"443:443"
];
volumes = [
"${./configs/nginx-proxy.conf}:/etc/nginx/conf.d/general.conf:ro"
# So that we can watch new containers come up
"/var/run/docker.sock:/tmp/docker.sock:ro"
# So that we can access generated certs
"nginx-certs:/etc/nginx/certs:ro"
# So that we can write challenge files for letsencrypt auth
"nginx-challenges:/usr/share/nginx/html"
# So that we can modify config on-the-fly to set up challenge
# files
"nginx-conf:/etc/nginx/vhost.d"
];
environment = {
DHPARAM_GENERATION = "false"; # Provided by nginx-proxy-letsencrypt
};
networks = [
"webproxy"
];
};
nginx-proxy-letsencrypt = {
image = "jrcs/letsencrypt-nginx-proxy-companion";
dependsOn = ["nginx-proxy"];
volumes = [
"/var/run/docker.sock:/var/run/docker.sock:ro"
"nginx-certs:/etc/nginx/certs"
];
environment = {
DEFAULT_EMAIL = "tm@tlater.net";
};
extraDockerOptions = [
"--volumes-from"
"nginx-proxy"
];
};
};
}

View file

@ -1,45 +0,0 @@
{ pkgs, ... }:
let
tlaternet = import ../derivations/tlaternet { inherit pkgs; };
tlaternet-templates = import ../derivations/tlaternet-templates.nix { inherit pkgs; };
in
{
networked-docker-containers = {
web = {
image = "tlaternet/web";
imageFile = pkgs.dockerTools.buildImage {
name = "tlaternet/web";
tag = "latest";
contents = tlaternet;
config = {
Cmd = [ "${tlaternet}/bin/tlaternet" ];
Volumes = {
"/srv/mail" = {};
};
Env = [
"ROCKET_PORT=80"
"ROCKET_TEMPLATE_DIR=${tlaternet-templates}/srv/web"
];
ExposedPorts = {
"80" = {};
};
};
};
volumes = [
"tlaternet-mail:/srv/mail"
];
environment = {
VIRTUAL_HOST = "tlater.net,www.tlater.net";
LETSENCRYPT_HOST = "tlater.net,www.tlater.net";
};
networks = [
"webproxy"
];
extraDockerOptions = [
"--domainname=tlater.net"
];
};
};
}

170
flake.lock Normal file
View file

@ -0,0 +1,170 @@
{
"nodes": {
"flake-utils": {
"locked": {
"lastModified": 1617631617,
"narHash": "sha256-PARRCz55qN3gy07VJZIlFeOX420d0nGF0RzGI/9hVlw=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b2c27d1a81b0dc266270fa8aeecebbd1807fc610",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"naersk": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1614785451,
"narHash": "sha256-TPw8kQvr2UNCuvndtY+EjyXp6Q5GEW2l9UafXXh1XmI=",
"owner": "nmattia",
"repo": "naersk",
"rev": "e0fe990b478a66178a58c69cf53daec0478ca6f9",
"type": "github"
},
"original": {
"owner": "nmattia",
"repo": "naersk",
"type": "github"
}
},
"nixos-hardware": {
"locked": {
"lastModified": 1617690895,
"narHash": "sha256-5TUizPI+ibn/LBzevTIIyIZ1XeLl3HU0PTRk7H6AKTQ=",
"owner": "nixos",
"repo": "nixos-hardware",
"rev": "7c00c8b5cab5dedb6519eabba7ca6d069e2dfdae",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "master",
"repo": "nixos-hardware",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1618149891,
"narHash": "sha256-Sz3DzI1k49Puq+F5KRBsaN3gRXHDzCTG6AwK29Znw0M=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "a7ff7a57c96588fd89370568b72751dd15d24e72",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-20.09",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-unstable": {
"locked": {
"lastModified": 1617899217,
"narHash": "sha256-gd5JHH7IkeoIQ/oiGZSqDpGdGt7DMRJTQ8JiD8+hdOQ=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "9e377a6ce42dccd9b624ae4ce8f978dc892ba0e2",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixos-hardware": "nixos-hardware",
"nixpkgs": "nixpkgs",
"tlaternet-templates": "tlaternet-templates",
"tlaternet-webserver": "tlaternet-webserver"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1617071065,
"narHash": "sha256-9JXhxwlc/ZJaO4aZ3cUwQwlK7ZRamjV+BvOTvdXrggs=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "38766381042021f547a168ebb3f10305dc6fde08",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"tlaternet-templates": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
],
"nixpkgs-unstable": "nixpkgs-unstable"
},
"locked": {
"lastModified": 1618117315,
"narHash": "sha256-RSqbWv20zNDhCx1VARxEjrYH1pNv+H2pY8dQ29tdNjA=",
"ref": "master",
"rev": "6da1d644ac02143172d20e0d3e9fcd7a0c8720ef",
"revCount": 60,
"type": "git",
"url": "https://gitea.tlater.net/tlaternet/tlaternet-templates.git"
},
"original": {
"type": "git",
"url": "https://gitea.tlater.net/tlaternet/tlaternet-templates.git"
}
},
"tlaternet-webserver": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"naersk": "naersk",
"nixpkgs": [
"nixpkgs"
],
"rust-overlay": "rust-overlay"
},
"locked": {
"lastModified": 1617992257,
"narHash": "sha256-TfcLtS/1Niv21NN5e9VGUbK0fpoOvgmx1caO4LBcTas=",
"ref": "master",
"rev": "ff25f151d3c170c7290b83be5cbdb1fd84261997",
"revCount": 14,
"type": "git",
"url": "https://gitea.tlater.net/tlaternet/tlaternet.git"
},
"original": {
"type": "git",
"url": "https://gitea.tlater.net/tlaternet/tlaternet.git"
}
}
},
"root": "root",
"version": 7
}

78
flake.nix Normal file
View file

@ -0,0 +1,78 @@
{
description = "tlater.net host configuration";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-20.09";
nixos-hardware.url = "github:nixos/nixos-hardware/master";
flake-utils.url = "github:numtide/flake-utils";
tlaternet-webserver = {
url = "git+https://gitea.tlater.net/tlaternet/tlaternet.git";
inputs = {
flake-utils.follows = "flake-utils";
nixpkgs.follows = "nixpkgs";
};
};
tlaternet-templates = {
url = "git+https://gitea.tlater.net/tlaternet/tlaternet-templates.git";
inputs = {
flake-utils.follows = "flake-utils";
nixpkgs.follows = "nixpkgs";
};
};
};
outputs = { nixpkgs, nixos-hardware, flake-utils, tlaternet-webserver
, tlaternet-templates, ... }@inputs:
let
overlays = [
(final: prev: {
tlaternet-webserver =
tlaternet-webserver.legacyPackages.${prev.system}.packages;
tlaternet-templates =
tlaternet-templates.legacyPackages.${prev.system}.packages;
})
];
in {
nixosConfigurations = {
tlaternet = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
({ ... }: { nixpkgs.overlays = overlays; })
(import ./modules)
(import ./configuration)
(import ./configuration/linode.nix)
(import ./configuration/hardware-configuration.nix)
nixpkgs.modules.headless
];
};
vm = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
({ ... }: { nixpkgs.overlays = overlays; })
(import ./modules)
(import ./configuration)
({ ... }: {
users.users.tlater.password = "insecure";
# virtualisation.memorySize = 3941;
# virtualisation.cores = 2;
})
];
};
};
} // flake-utils.lib.eachDefaultSystem (system: {
devShell = with nixpkgs.legacyPackages.${system};
mkShell {
buildInputs = [ nixfmt git-lfs ];
shellHook = ''
export QEMU_OPTS="-m 3941 -smp 2"
export QEMU_NET_OPTS="hostfwd=::3022-:2222,hostfwd=::3080-:80,hostfwd=::3443-:443,hostfwd=::3021-:2221,hostfwd=::25565-:25565"
'';
};
});
}

5
modules/default.nix Normal file
View file

@ -0,0 +1,5 @@
{ ... }:
{
imports = [ ./virtualisation/pods.nix ];
}

View file

@ -0,0 +1,220 @@
{ lib, config, options, ... }:
with lib;
let
cfg = config.virtualisation.pods;
list-to-args = arg: list:
concatStringsSep " " (map (e: "--${arg}=${escapeShellArg e}") list);
possibly-unset-arg = arg: val:
(optionalString (val != null) "--${arg}=${escapeShellArg val}");
mkPod = name: pod: rec {
path = [ config.virtualisation.podman.package ];
wants = [ "network.target" ];
after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" "default.target" ];
environment.PODMAN_SYSTEMD_UNIT = "%n";
preStart = concatStringsSep " " [
"mkdir -p /run/podman/pods/ ;"
"podman pod create"
"--infra-conmon-pidfile=${escapeShellArg "/run/podman/pods/${name}.pid"}"
"--name=${escapeShellArg name}"
"--replace"
(list-to-args "add-host" pod.added-hosts)
(possibly-unset-arg "cgroup-parent" pod.cgroup-parent)
(list-to-args "dns" pod.dns)
(list-to-args "dns-opt" pod.dns-opt)
(list-to-args "dns-search" pod.dns-search)
(possibly-unset-arg "hostname" pod.hostname)
(possibly-unset-arg "infra" pod.infra)
(possibly-unset-arg "infra-command" pod.infra-command)
(possibly-unset-arg "infra-image" pod.infra-image)
(possibly-unset-arg "ip" pod.ip)
(possibly-unset-arg "mac-address" pod.mac-address)
(possibly-unset-arg "network" pod.network)
(possibly-unset-arg "network-alias" pod.network-alias)
(possibly-unset-arg "no-hosts" pod.no-hosts)
(list-to-args "publish" pod.publish)
(list-to-args "share" pod.share)
];
script = "podman pod start ${escapeShellArg name}";
preStop = "podman pod stop ${escapeShellArg name}";
# `podman generate systemd` generates a second stop after the
# first; not sure why but clearly it's recommended.
postStop = preStop;
serviceConfig = rec {
Type = "forking";
TimeoutStopSec = 70;
Restart = "on-failure";
PIDFile = "/run/podman/pods/${name}.pid";
};
};
in {
options.virtualisation.pods = mkOption {
type = with types;
attrsOf (submodule {
options = {
added-hosts = mkOption {
type = listOf str;
default = [ ];
description =
"Additional hosts to add to /etc/hosts for each container.";
example = literalExample ''
[ "database:10.0.0.1" ]
'';
};
cgroup-parent = mkOption {
type = nullOr str;
default = null;
description =
"The cgroups path under which the pod cgroup will be created.";
};
dns = mkOption {
type = listOf str;
default = [ ];
description = "The dns servers to set in /etc/resolv.conf.";
};
dns-opt = mkOption {
type = listOf str;
default = [ ];
description = "dns options to set in /etc/resolv.conf.";
};
dns-search = mkOption {
type = listOf str;
default = [ ];
description = "Search domains to set in /etc/resolv.conf.";
};
hostname = mkOption {
type = nullOr str;
default = null;
description = "The pod hostname.";
};
infra = mkOption {
type = nullOr bool;
default = null;
description = "Whether to create the infra container for the pod.";
};
infra-command = mkOption {
type = nullOr str;
default = null;
description = "The command to run in the infra container.";
};
infra-image = mkOption {
type = nullOr str;
default = null;
description = "The image to use for the infra container.";
};
ip = mkOption {
type = nullOr str;
default = null;
description = "A static IP address for the pod network.";
};
# TODO: set up label file stuff.
#
# labels = mkOption {};
mac-address = mkOption {
type = nullOr str;
default = null;
description = "A static mac address for the pod network.";
};
network = mkOption {
type = nullOr str;
default = null;
description = "Network configuration for the pod.";
};
network-alias = mkOption {
type = nullOr str;
default = null;
description = "DNS alias for the pod.";
};
no-hosts = mkOption {
type = nullOr bool;
default = null;
description = "Whether to disable /etc/hosts creation for the pod.";
};
publish = mkOption {
type = listOf str;
default = [ ];
description = "List of ports to publish from the pod.";
};
share = mkOption {
type = listOf str;
default = [ ];
description = "List of kernel namespaces to share.";
};
containers = options.virtualisation.oci-containers.containers;
};
});
default = { };
description = "Podman pods to run as systemd services.";
};
config = let
# Merge a list of attribute sets together
#
# TODO: See if there's a generic version for this somewhere in the
# pkgs lib?
mergeAttrs = attrList: foldr (a: b: a // b) { } attrList;
# Create services for all defined pods
pod-services = mapAttrs' (n: v: nameValuePair "pod-${n}" (mkPod n v)) cfg;
# Override the systemd-specific settings of containers defined in
# pods.
#
# I.e., make a systemd unit dependency on the pod service.
pod-container-services = mergeAttrs (mapAttrsToList (pname: pod:
mapAttrs' (cname: container:
nameValuePair "podman-${pname}-${cname}" rec {
after = [ "pod-${pname}.service" ];
requires = after;
}) pod.containers) cfg);
# Override the oci-container settings for containers defined in pods.
#
# I.e., set the --pod=podname setting, and update the dependsOn so
# it points to containers in the same pod.
podifyContainer = container: podname:
container // {
dependsOn =
map (dependency: "${podname}-${dependency}") container.dependsOn;
extraOptions = container.extraOptions ++ [ "--pod=${podname}" ];
};
in lib.mkIf (cfg != { }) {
virtualisation.podman.enable = true;
virtualisation.oci-containers.backend = "podman";
systemd.services = pod-services // pod-container-services;
virtualisation.oci-containers.containers = mergeAttrs (mapAttrsToList
(pname: pod:
mapAttrs' (cname: container:
nameValuePair "${pname}-${cname}" (podifyContainer container pname))
pod.containers) cfg);
};
}