even more backups of things
This commit is contained in:
parent
b8d125d448
commit
630f9b0074
46 changed files with 1166 additions and 197 deletions
|
|
@ -1,6 +1,6 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
{
|
||||
sops.defaultSopsFile = ./secrets/common.yaml;
|
||||
#sops.defaultSopsFile = ./secrets/common.yaml;
|
||||
networking.hostName = "lithium";
|
||||
networking.domain = lib.mkForce config.vars.domain;
|
||||
environment.systemPackages = with pkgs; [
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
./hardware.nix
|
||||
./configuration.nix
|
||||
./semi-secret-vars.nix
|
||||
|
||||
./services/caddy.nix
|
||||
./services/tailscale.nix
|
||||
./services/kanidm.nix
|
||||
|
|
@ -25,12 +26,18 @@
|
|||
./services/miniflux
|
||||
./services/forgejo.nix
|
||||
|
||||
./services/immich.nix
|
||||
|
||||
./services/calibre-web.nix
|
||||
|
||||
# Monitoring
|
||||
./services/monitoring
|
||||
./services/smartd.nix
|
||||
|
||||
# Game Servers
|
||||
./services/palworld
|
||||
|
||||
# TODO: Add Karakeep with yt-dlp + https://news.ycombinator.com/item?id=45595084
|
||||
|
||||
# Services running in virtual machines
|
||||
#./microvms
|
||||
|
|
|
|||
18
hosts/lithium/microvms/default.nix
Normal file
18
hosts/lithium/microvms/default.nix
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
{
|
||||
microvm.autostart = [
|
||||
"palworld-server"
|
||||
];
|
||||
|
||||
microvm = {
|
||||
interfaces = [
|
||||
{ type = "user"; id = "main-net"; }
|
||||
{ type = "macvtap"; id = "vm-palworld"; }
|
||||
];
|
||||
|
||||
# Interface Name on the Host
|
||||
# Ethernet Address of MicroVM's interface.
|
||||
# Locally administered have one of 2/6/A/E in the second nibble.
|
||||
#interfaces = [{type = "tap";id = "vm-palworld";mac = "02:00:00:00:00:01";}];
|
||||
};
|
||||
}
|
||||
107
hosts/lithium/microvms/palworld/default.nix
Normal file
107
hosts/lithium/microvms/palworld/default.nix
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
{
|
||||
# Host Firewall
|
||||
networking.firewall.allowedUDPPorts = [ 8211 ];
|
||||
#networking.nat = {
|
||||
#enable = true;
|
||||
#enableIPv6 = true;
|
||||
#externalInterface = "eth0";
|
||||
#internalInterfaces = [ "microvm" ];
|
||||
#};
|
||||
|
||||
microvm.vms.palworld-server = {
|
||||
# Basic Requirements
|
||||
# https://docs.palworldgame.com/getting-started/requirements
|
||||
#hypervisor = "qemu";
|
||||
vcpu = 4;
|
||||
memory = 16348;
|
||||
|
||||
# Networking
|
||||
interfaces = [{ type = "user"; id = "main-net"; }];
|
||||
|
||||
# Interface Name on the Host
|
||||
# Ethernet Address of MicroVM's interface.
|
||||
# Locally administered have one of 2/6/A/E in the second nibble.
|
||||
#interfaces = [{type = "tap";id = "vm-palworld";mac = "02:00:00:00:00:01";}];
|
||||
#forwardPorts = [
|
||||
#{ proto = "udp"; from = "host"; host.port = 8211; guest.port = 8211; }
|
||||
# Optional: If you need RCON or other ports, add them here
|
||||
# { proto = "tcp"; from = "host"; host.port = 25575; guest.port = 25575; }
|
||||
#];
|
||||
|
||||
# Persistent Data
|
||||
sharedDirectories = [
|
||||
{
|
||||
source = "/var/lib/palworld-data";
|
||||
target = "/var/lib/palworld-server";
|
||||
readonly = false;
|
||||
}
|
||||
];
|
||||
|
||||
# VM NixOS Configuration
|
||||
config = {
|
||||
imports = [ pkgs.nixosModules.notDetected ];
|
||||
|
||||
networking.hostName = "palworld-vm";
|
||||
time.timeZone = "America/Chicago";
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
steamcmd
|
||||
#glibc
|
||||
#gnumake
|
||||
#cff
|
||||
];
|
||||
|
||||
# Pre-VM-Start
|
||||
binScripts.tap-up = lib.mkAfter ''
|
||||
${lib.getExe' pkgs.iproute2 "ip"} link set dev 'vm-ixp-as11201p' master 'ixp-peering'
|
||||
'';
|
||||
|
||||
# Service Definition
|
||||
systemd.services.palworld-dedicated = {
|
||||
description = "Palworld Dedicated Server";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = "palworld";
|
||||
Group = "palworld";
|
||||
# Working Directory points to where steamcmd installs the server
|
||||
WorkingDirectory = "/var/lib/palworld-server/Pal/Binaries/Win64";
|
||||
ExecStart = ''
|
||||
${pkgs.steam-run}/bin/steam-run ${pkgs.bash}/bin/bash -c '\
|
||||
${pkgs.steamcmd}/bin/steamcmd \
|
||||
+force_install_dur /var/lib/palworld-server \
|
||||
+login anonymous \
|
||||
+app_update 2394010 validate \
|
||||
+quit \
|
||||
&& \
|
||||
./PalServer.sh -userperfthreads -NoAsyncLoadingThread -UseNvidiaServers -nosteamclient \
|
||||
-Players=8 -Port=8211 -queryport=27015 -PublicPort=8211 -PublicIP=\"\" -RCONEnabled=False
|
||||
'
|
||||
'';
|
||||
Restart = "on-failure";
|
||||
RestartSec = "5s";
|
||||
LimitNPROC = 10000;
|
||||
LimitNOFILE = 100000;
|
||||
};
|
||||
};
|
||||
|
||||
# User and Group Configuration
|
||||
users.users.palworld = {
|
||||
isSystem = true;
|
||||
group = "palworld";
|
||||
createHome = false;
|
||||
};
|
||||
users.groups.palworld = {};
|
||||
|
||||
# Firewall Configuration
|
||||
networking.firewall.allowedUDPPorts = [ 8211 ];
|
||||
|
||||
# Ensure correct permissions for shared directory
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/palworld-server 0755 palworld palworld -"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
50
hosts/lithium/microvms/valheim.nix
Normal file
50
hosts/lithium/microvms/valheim.nix
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
{
|
||||
microvm.vms.valheim = {
|
||||
autoStart = true;
|
||||
memorySize = 4096;
|
||||
vcpu = 2;
|
||||
|
||||
forwardPorts = [
|
||||
{ from = "host"; hostPort = 2456; guestPort = 2456; proto = "udp"; }
|
||||
{ from = "host"; hostPort = 2457; guestPort = 2457; proto = "udp"; }
|
||||
];
|
||||
|
||||
# NOTE: For games with large save files, choose a path in "/tank" for
|
||||
# storage.
|
||||
sharedDirectories = [
|
||||
{
|
||||
hostPath = "/srv/game-data-valheim";
|
||||
guestPath = "/data";
|
||||
tag = "valheim-data";
|
||||
readOnly = false;
|
||||
}
|
||||
];
|
||||
|
||||
packages = [ pkgs.steamcmd pkgs.steam-run ];
|
||||
|
||||
users.users.valheim = {
|
||||
isNormalUser = true;
|
||||
home = "/home/valheim";
|
||||
extraGroups = [ "wheel" ];
|
||||
};
|
||||
|
||||
systemd.services.valheim = {
|
||||
description = "Valheim Dedicated Server";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
workingDirectory = "/data";
|
||||
ExecStart = ''
|
||||
${pkgs.steam-run}/bin/steam-run ./valheim_server.x86_64 \
|
||||
-name "Valheim NixOS" \
|
||||
-port 2456 \
|
||||
-world "FlatEarth" \
|
||||
-password "secret" \
|
||||
-public 1
|
||||
'';
|
||||
Restart = "always";
|
||||
User = "valheim";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
8
hosts/lithium/private-config.nix
Normal file
8
hosts/lithium/private-config.nix
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
{ inputs, ... }:
|
||||
# let secretsPath = builtins.toString inputs.nixos-secrets; in
|
||||
{
|
||||
#imports = [ inputs.nixos-secrets.nixosModules.private-config ];
|
||||
|
||||
# Enables a whole littany of private settings.
|
||||
private-config.enable = true;
|
||||
}
|
||||
|
|
@ -1,27 +1,30 @@
|
|||
kanidm:
|
||||
admin-password: ENC[AES256_GCM,data:wNE9qWAjfp8tf29sn1Q6GYrbw8g=,iv:uzg971jGIVyEkEbcOm2W8dy4wVgWiL+4Ph/f/bnieI0=,tag:/yY1okvnJLYGw2OLBd2Zdg==,type:str]
|
||||
idm-admin-password: ENC[AES256_GCM,data:jIWaXUgHjhp0bP/DrF1m+plzcvE=,iv:nNpIkg9FTbCncih1/pAk4o7teuk7Gf/nPXyrnpFx4no=,tag:WhhsjtEdyS3Zw4F7uF9APg==,type:str]
|
||||
admin-password: ENC[AES256_GCM,data:Hvmo6YG2ZCoYdQOBOyPiS2XAm6I=,iv:qKu5vlT0HEqK3Mx3zgAA0OUA+B63rEXnq/P059mrweI=,tag:K+iTc8R30ClP8egzIEtXKA==,type:str]
|
||||
idm-admin-password: ENC[AES256_GCM,data:Dvz2o6gY/G3igJFhaIQ4gj/OG/8=,iv:hDE+y8SKqRU8tNxnd7q4CE3GfOHOkMcODpqc43KiPfc=,tag:fU8AyFokVUhERUvi6W6bYw==,type:str]
|
||||
forgejo-admin-password: ENC[AES256_GCM,data:d7a2pzSpaeZ0CQ==,iv:XB6Y41egclWzmyZe3g2Z9U1NcCilw3VTZNlym94h3IU=,tag:vR6jrXiBciPXzUmvxHzQNw==,type:str]
|
||||
miniflux:
|
||||
oauth2_client_secret: ENC[AES256_GCM,data:tZk6Ru5MQk+VJ/ulZNtKirL2lfLmOeVKsDbJfLly1SBwXC6HdjBx+yAFCc7YVX+2,iv:GIIMhV/sIALjoUZsMMXAJl968owAlULJ1JLpShqa3RM=,tag:c2i+LvGJKQm82fsUEHF/BA==,type:str]
|
||||
sops:
|
||||
age:
|
||||
- recipient: age1mv8xtvkuuw3hphq5ytaekz7p8a4kht79uajyhy534uy9e5472fhqj5zpxu
|
||||
enc: |
|
||||
-----BEGIN AGE ENCRYPTED FILE-----
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB5cFlReGMxV1R3QW1Vd1RU
|
||||
WTgzNm5tbGhld3RMTGpMU1M2eVdoU0hmZGtNCmZLSTNOMk9IMDh6K2svRmRveGw3
|
||||
dlFUZ2lzTDBJWnBSTEhVTmVDOHVTdW8KLS0tIDhLWVZWSVJYM2x4YTlZWWZZZVNh
|
||||
T2drb0p6TjZrZldpU0VUd0xmcVJUSk0KMjX3vr/74/HU7fmulefUHiNzwX8LcAes
|
||||
ob3fabhMk9lmbuQk21rpoWbz3PNTfCQH63q+h7gLJTCCW2ISTvh/KQ==
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA4WUlyeStvZ28xVmV4VlNK
|
||||
d1J1Z0twaTFzSEFENit0S210eTJFYTR1c1FRCnQvcFRCMUtFMkJxUDlRUTNxcTlK
|
||||
dE5aM3U4Y0xvRE5jZlhrbjVtVk5rOHMKLS0tIHZISVlVcW9yT3hWcUtsVmN5TmRv
|
||||
bXk5aSs4Wk9nM1p5d3FVSFBFKzVYZVEKYP2KQZIYm+zuI6OTfy85cEhj3gJWoKNu
|
||||
jxd8vxwSbDmsXQK+mT8MsA9s+A9AhzGcZQ0rIQM/yKWFKSXt4kJ9rg==
|
||||
-----END AGE ENCRYPTED FILE-----
|
||||
- recipient: age148yre4vaxp6lm59rft24te46szawqyguf8znkrtpq7ud8tpteauqxkwyjl
|
||||
enc: |
|
||||
-----BEGIN AGE ENCRYPTED FILE-----
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBpK3VvNUhHNmJUQVZlREJl
|
||||
c1oyZWx4ekNEM0VqL3NKanNmSmZtQXNpcWdVCmpWcklLVnhWUisvcGZzV1NHN3p4
|
||||
bW4wL09wL01XaHpveGdmbU4rbEp5NmsKLS0tIElhQVRmS05xUmJIZlI0S1dyWGhV
|
||||
OGtKdHVwbWY2akJTQkF4YzlnNWQzNU0K81PyJ1tOvwOohNu9iUkS8vE7UXFRnJab
|
||||
8OLHtzX7FrkIH8rO2D5vEL9gPmxUtNKc9Ad3sndQls/yfg4wJAYedA==
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBHQk1kYjU1bXZ0aUc3cGVQ
|
||||
N1hwclNpRkJWQlVhaU9EOEFNRkRQMmhLakNjCjAwbE1UYkxxbzNxQjV5Y1I0TU0r
|
||||
cmZBUTZsTzMxZmFZZUxnRmw3T205aDAKLS0tIGdJTHBHVmk4TmlBQ1RHYkJ3aWw3
|
||||
bktSejFIdS8wS2R4RHRFMEwwb2pBN1kK8EoLo1E/DiFmpCf/v0kGLPcqIB1qZDd1
|
||||
Uf7ccMFqvQH8wGVuyqqwiZ2SconvK7hHC5U9qgi6bZa/t4aW8eeU/g==
|
||||
-----END AGE ENCRYPTED FILE-----
|
||||
lastmodified: "2025-06-07T02:02:46Z"
|
||||
mac: ENC[AES256_GCM,data:7mWOon8Hs2oU40l1dx4tVE8yXgcKoxfUAzY8zbtTzXqCOhzTzhY4OZAZiu4RiUSIOm7dMdQbH9zSx0j+5e2k9QflfJDDM3rWfunTa7L8Bm8k9b/WjS0Fnb7OV0InO6tLxQwkTamMcc7ORrKxwHB5PwuXD+efeWNXveHo5GYgF+M=,iv:seh2Pzt+AmmxyD5hwh3VkLQTDMq0Gh9mV6J3QrtxcmM=,tag:jpXn2fht8wArB2KD/ZmbyA==,type:str]
|
||||
lastmodified: "2025-10-03T06:48:03Z"
|
||||
mac: ENC[AES256_GCM,data:tglsrKi8Ydifc08LLA/KHzqWI3u9+Tn9kkERI3XKp/vSy4a90T2fU9f/lcyYbOXXuOOCL364wYq7ETmsTzOmb5Eo2Wtcu9Hsn820rLXuxNu7kyz9os/0eL/IThrPQtKov4/IdzoGQCqZvd9kcxTN1UPlRnd9aiHwsKmoFFFNZlE=,iv:y9+JQRXZf/6GabaQ1nQk3dL7qKMQxyzvIsXU2yVmlZo=,tag:klmWeIP068BpsSi8u/tE4A==,type:str]
|
||||
unencrypted_suffix: _unencrypted
|
||||
version: 3.10.2
|
||||
|
|
|
|||
11
hosts/lithium/services/README.md
Normal file
11
hosts/lithium/services/README.md
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
# hosts/lithium/services
|
||||
|
||||
The idea is that each `*.nix` or each `./*/default.nix` file would contain all
|
||||
necessary details for a service to bring itself up and be running.
|
||||
|
||||
One thing I have overlooked thus far is nothing tests for the existence of a
|
||||
reverse proxy and bails out if one isn't available. Practically if caddy isn't
|
||||
running, most of these services should also not run, or at the very least, the
|
||||
blocks pertaining to setting up reverse proxy details don't need to run.
|
||||
|
||||
There's a way of doing that with things like lib.mkDefault and so forth.
|
||||
65
hosts/lithium/services/acme-dns.nix
Normal file
65
hosts/lithium/services/acme-dns.nix
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
{ config, ... }:
|
||||
let
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
certDir = config.security.acme.certs."${homelabDomain}".directory;
|
||||
in
|
||||
{
|
||||
sops.secrets."cloudflare/dns_api_token" = {
|
||||
mode = "0440";
|
||||
group = config.services.caddy.group;
|
||||
restartUnits = [ "caddy.service" "ddclient.service" ];
|
||||
};
|
||||
|
||||
|
||||
# TODO: Consider defining reverse proxy all in one location.
|
||||
# All the ports and domains would be visible in one place.
|
||||
security.acme = {
|
||||
acceptTerms = true;
|
||||
defaults = {
|
||||
# NOTE: Uncomment the following line for testing, comment for production.
|
||||
server = "https://acme-staging-v02.api.letsencrypt.org/directory";
|
||||
dnsProvider = "cloudflare";
|
||||
dnsResolver = "1.1.1.1:53";
|
||||
dnsPropagationCheck = true;
|
||||
credentialFiles = {
|
||||
CLOUDFLARE_DNS_API_TOKEN_FILE = config.sops.secrets."cloudflare/dns_api_token".path;
|
||||
};
|
||||
group = config.services.caddy.group;
|
||||
#reloadServices = [ "caddy" ];
|
||||
email = "admin+acme@${homelabDomain}"; # NOTE: This email is /dev/null;
|
||||
#keyType = "ec384";
|
||||
};
|
||||
};
|
||||
|
||||
services.ddclient = {
|
||||
enable = true;
|
||||
protocol = "cloudflare";
|
||||
usev4 = "webv4, webv4=https://cloudflare.com/cdn-cgi/trace, web-skip='ip='";
|
||||
username = "token";
|
||||
#secretsFile = config.sops.secrets."cloudflare/dns_api_token".path;
|
||||
passwordFile = config.sops.secrets."cloudflare/dns_api_token".path;
|
||||
zone = homelabDomain;
|
||||
domains = [
|
||||
homelabDomain
|
||||
"*.${homelabDomain}"
|
||||
"id.${homelabDomain}"
|
||||
"status.${homelabDomain}"
|
||||
"grafana.${homelabDomain}"
|
||||
"feeds.${homelabDomain}"
|
||||
"git.${homelabDomain}"
|
||||
"tv.${homelabDomain}"
|
||||
"demo.${homelabDomain}" # Testing to see if the DNS record is set.
|
||||
];
|
||||
};
|
||||
|
||||
# NOTE: Issue a single cert /w subdomain wildcard
|
||||
# At the expense of individual service security, some public details about
|
||||
# attack surface remain slightly more private in https://crt.sh/
|
||||
security.acme.certs."${homelabDomain}" = {
|
||||
#group = config.services.caddy.group;
|
||||
domain = "${homelabDomain}";
|
||||
extraDomainNames = [ "*.${homelabDomain}" ];
|
||||
};
|
||||
# Nginx useACMEHost provides the DNS-01 challenge.
|
||||
# security.acme.certs."${homelabDomain}".directory
|
||||
}
|
||||
20
hosts/lithium/services/audiobookshelf.nix
Normal file
20
hosts/lithium/services/audiobookshelf.nix
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
{ inputs, config, pkgs, lib, ... }:
|
||||
let
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
svcDomain = "audiobooks.${homelabDomain}";
|
||||
svcPort = config.services.audiobookshelf.port; # Prevent a Conflict
|
||||
in
|
||||
{
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
reverse_proxy :${svcPort}
|
||||
'';
|
||||
|
||||
services.audiobookshelf = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
port = 8000;
|
||||
|
||||
# NOTE: Path to AudioBookShelf config & metadata inside of `/var/lib`
|
||||
dataDir = "audiobookshelf";
|
||||
};
|
||||
}
|
||||
|
|
@ -1,13 +1,22 @@
|
|||
{ config, pkgs, ... }:
|
||||
{ inputs, config, pkgs, lib, ... }:
|
||||
let
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
certDir = config.security.acme.certs."${homelabDomain}".directory;
|
||||
in
|
||||
{
|
||||
sops.secrets.caddy_env = {
|
||||
sopsFile = ../secrets/caddy.env;
|
||||
format = "dotenv";
|
||||
services.nginx.enable = lib.mkForce false;
|
||||
|
||||
sops.secrets.cloudflare_env = {
|
||||
mode = "0440";
|
||||
owner = config.services.caddy.user;
|
||||
sopsFile = "${inputs.nixos-secrets}/lithium/cloudflare.env";
|
||||
format = "dotenv";
|
||||
group = config.services.caddy.group;
|
||||
restartUnits = [ "caddy.service" ];
|
||||
};
|
||||
|
||||
# TODO: Revert to using Caddy DNS for the whole thing.
|
||||
# TODO: Add another cloudflare DDNS provider.
|
||||
# TODO: Add Metrics with Prometheus & Grafana
|
||||
services.caddy = {
|
||||
enable = true;
|
||||
package = pkgs.caddy.withPlugins {
|
||||
|
|
@ -16,26 +25,33 @@
|
|||
"github.com/mholt/caddy-dynamicdns@v0.0.0-20250430031602-b846b9e8fb83"
|
||||
"github.com/caddy-dns/cloudflare@v0.2.1"
|
||||
];
|
||||
|
||||
# NOTE: Built on 6/4/2025
|
||||
hash = "sha256-swskhAr7yFJX+qy0FR54nqJarTOojwhV2Mbk7+fyS0I=";
|
||||
# NOTE: Built on 9/30/2025
|
||||
hash = "sha256-xuwNkxZop+RnzFtM9DEwah95nPSyx8KgM+Eu4EJ9kqI=";
|
||||
};
|
||||
# NOTE: Use Staging CA while testing, check `systemctl status caddy`
|
||||
# to see if everything is working.
|
||||
# acmeCA = "https://acme-staging-v02.api.letsencrypt.org/directory";
|
||||
|
||||
# TODO: Add Metrics with Prometheus & Grafana
|
||||
environmentFile = config.sops.secrets.caddy_env.path;
|
||||
|
||||
environmentFile = config.sops.secrets.cloudflare_env.path;
|
||||
# NOTE: DNS provider settings
|
||||
# https://caddy.community/t/how-to-use-dns-provider-modules-in-caddy-2/8148
|
||||
globalConfig = ''
|
||||
# acme_dns cloudflare {env.CLOUDFLARE_API_TOKEN}
|
||||
#acme_dns cloudflare {$CLOUDFLARE_DNS_API_TOKEN}
|
||||
dynamic_dns {
|
||||
provider cloudflare {env.CLOUDFLARE_API_TOKEN}
|
||||
provider cloudflare {$CLOUDFLARE_DNS_API_TOKEN}
|
||||
check_interval 30m
|
||||
ttl 5m
|
||||
domains {
|
||||
${config.networking.domain} @
|
||||
${homelabDomain} @
|
||||
}
|
||||
dynamic_domains
|
||||
}
|
||||
'';
|
||||
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 80 443 ];
|
||||
allowedUDPPorts = [ 443 ];
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
{ inputs, config, pkgs, lib, ... }:
|
||||
let
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
certDir = config.security.acme.certs."${homelabDomain}".directory;
|
||||
#certDir = config.security.acme.certs."${homelabDomain}".directory;
|
||||
svcDomain = "books.${homelabDomain}";
|
||||
svcHttpPort = config.services.calibre-web.listen.port;
|
||||
web_data_dir = "calibre-web";
|
||||
# TODO: I want the actual media stored in the tank.
|
||||
library_path = "/tank/media/library/books";
|
||||
#library_path = "/var/lib/calibre-library";
|
||||
in
|
||||
{
|
||||
# TODO: This isn't the right place for this, but we need to guarantee that a
|
||||
|
|
@ -14,19 +16,25 @@ in
|
|||
users.groups.media = {};
|
||||
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
reverse_proxy :${toString svcHttpPort}
|
||||
encode {
|
||||
zstd
|
||||
gzip
|
||||
minimum_length 1024
|
||||
}
|
||||
reverse_proxy localhost:8883
|
||||
'';
|
||||
|
||||
# reverse_proxy :${toString svcHttpPort}
|
||||
# encode {
|
||||
# zstd
|
||||
# gzip
|
||||
# minimum_length 1024
|
||||
# }
|
||||
# '';
|
||||
|
||||
# NOTE: Needs some manual setup in Web-UI and I ecountered issues connecting even with firewall enabled.
|
||||
# The following command is what I used to forward the port:
|
||||
# ssh -f -N -L localhost:8883:localhost:8883 jml@lithium
|
||||
services.calibre-web = {
|
||||
enable = true;
|
||||
listen.port = 8083;
|
||||
listen.port = 8883;
|
||||
# NOTE: Don't need to open calibre-web port, it's served by reverse_proxy
|
||||
openFirewall = false;
|
||||
openFirewall = true; # TODO: Temporarily opened to allow configuration from inside my network.
|
||||
|
||||
user = "calibre-web";
|
||||
group = "calibre-web";
|
||||
|
|
@ -38,6 +46,7 @@ in
|
|||
options = {
|
||||
enableBookUploading = true;
|
||||
enableBookConversion = true;
|
||||
# NOTE: If I don't already have an extant calibreLibrary, I need to leave this null or the app won't launch.
|
||||
calibreLibrary = library_path;
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -41,6 +41,7 @@ in
|
|||
server = {
|
||||
DOMAIN = svcDomain;
|
||||
ROOT_URL = "https://${svcDomain}";
|
||||
HTTP_PORT = 3000;
|
||||
};
|
||||
# NOTE: Actions support is based on: https://github.com/nektos/act
|
||||
#actions = {
|
||||
|
|
@ -49,6 +50,7 @@ in
|
|||
#};
|
||||
actions.ENABLED = false;
|
||||
# NOTE: Registration is handled with kanidm.
|
||||
# Registration button link is at /user/sign_up
|
||||
service = {
|
||||
REGISTER_EMAIL_CONFIRM = false;
|
||||
DISABLE_REGISTRATION = false;
|
||||
|
|
@ -87,13 +89,15 @@ in
|
|||
services.kanidm.provision.systems.oauth2.forgejo = {
|
||||
displayName = "forgejo";
|
||||
# TODO: Get this from Forgejo
|
||||
originUrl = "https://git.${homelabDomain}/user/oauth2/${homelabDomain}/callback";
|
||||
# originUrl = "https://git.${homelabDomain}/user/oauth2/${homelabDomain}/callback";
|
||||
originUrl = "${config.services.forgejo.settings.server.ROOT_URL}/user/oauth2/kanidm/callback";
|
||||
originLanding = "https://git.${homelabDomain}/";
|
||||
#basicSecretFile = "TODO!SETME";
|
||||
scopeMaps."git.users" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
"groups"
|
||||
];
|
||||
# WARNING: PKCE is currently not supported by gitea/forgejo,
|
||||
# see https://github.com/go-gitea/gitea/issues/21376
|
||||
|
|
@ -137,5 +141,5 @@ in
|
|||
# TODO: Consider automatically creating admin account and password...
|
||||
# https://wiki.nixos.org/wiki/Forgejo#Ensure_users
|
||||
# Might be necessary to generate a token for kanidm
|
||||
#sops.secrets.forgejo-admin-password.owner = "forgejo";
|
||||
sops.secrets."forgejo/admin-password".owner = "forgejo";
|
||||
}
|
||||
|
|
|
|||
0
hosts/lithium/services/forgejo/actions-runner.nix
Normal file
0
hosts/lithium/services/forgejo/actions-runner.nix
Normal file
0
hosts/lithium/services/forgejo/default.nix
Normal file
0
hosts/lithium/services/forgejo/default.nix
Normal file
0
hosts/lithium/services/forgejo/forgejo.nix
Normal file
0
hosts/lithium/services/forgejo/forgejo.nix
Normal file
42
hosts/lithium/services/game_servers.nix
Normal file
42
hosts/lithium/services/game_servers.nix
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
{ pkgs, ... }:
|
||||
{
|
||||
# TODO
|
||||
# systemd.services.<name>.serviceConfig.{MemoryMax,CPUQuota}
|
||||
systemd.services.valheim-server = {
|
||||
description = "Valheim dedicated server";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = "valheim";
|
||||
Group = "valheim";
|
||||
ExecStart = "${pkgs.steamcmd}/bin/steamcmd +login anonymous +force_install_dir /home/valheim/server +app_update 896660 validate +exit && /home/valheim/server/valheim_server.x86_64";
|
||||
};
|
||||
|
||||
users.users.valheim = {
|
||||
isSystemUser = true;
|
||||
group = "valheim";
|
||||
home = "/home/valheim";
|
||||
};
|
||||
users.groups.valheim = {};
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 7777 2456 ];
|
||||
allowedUDPPorts = [ 7777 2457 ];
|
||||
};
|
||||
|
||||
services.restic.backups.gameservers = {
|
||||
user = "root";
|
||||
# TODO: Pick a real backup directory.
|
||||
repository = "/backup/gameservers";
|
||||
paths = [
|
||||
"/var/lib/terraria"
|
||||
"/home/valheim/server"
|
||||
];
|
||||
timeConfig = {
|
||||
OnCalendar = "daily";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
{ config, pkgs, ... }:
|
||||
let
|
||||
svcDomain = "grafana.${config.networking.domain}";
|
||||
svcPort = config.services.grafana.settings.server.http_port;
|
||||
in
|
||||
{
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
reverse_proxy :${svcPort}
|
||||
'';
|
||||
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server = {
|
||||
http_addr = "127.0.0.1";
|
||||
http_port = 3000;
|
||||
enforce_domain = true;
|
||||
enable_gzip = true;
|
||||
domain = svcDomain;
|
||||
};
|
||||
analytics.reporting_enabled = false; # NOTE: Disable Telemetry
|
||||
};
|
||||
};
|
||||
}
|
||||
7
hosts/lithium/services/home-assistant.nix
Normal file
7
hosts/lithium/services/home-assistant.nix
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
{ config, pkgs, ... }:
|
||||
{
|
||||
services.home-assistant = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
}
|
||||
|
|
@ -4,29 +4,47 @@ let
|
|||
svcDomain = "photos.${homelabDomain}";
|
||||
photoStorageDir = "/tank/shares/photos";
|
||||
svcPort = config.services.immich.port;
|
||||
# https://docs.immich.app/install/config-file/
|
||||
jsonSettings = {
|
||||
server.externalDomain = "https://${svcDomain}";
|
||||
oauth = {
|
||||
enabled = true;
|
||||
issuerUrl = "https://"; # TODO: the kanidm url?
|
||||
clientId = "immich";
|
||||
clientSecret = config.sops.placeholder."immich/oauth2_client_secret";
|
||||
scope = "openid email profile";
|
||||
signingAlgorithm = "ES256";
|
||||
storageLabelClaim = "email";
|
||||
buttonText = "Login with Kanidm";
|
||||
autoLaunch = true;
|
||||
mobileOverrideEnabled = true;
|
||||
mobileRedirectUri = "https://${svcDomain}/api/oauth/mobile-redirect/";
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
# NOTE: The following repo contains a highly mature immich setup on nixos.
|
||||
# https://github.com/xinyangli/nixos-config/blob/a8b5bea68caea573801ccfdb8ceacb7a8f2b0190/machines/agate/services/immich.nix
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
reverse_proxy :${svcPort}
|
||||
reverse_proxy :${toString svcPort}
|
||||
'';
|
||||
|
||||
# NOTE: Primarily to contain DB_PASSWORD to make it possible to backup and restore the DB.
|
||||
sops.secrets.immich_env = {
|
||||
sopsFile = ../../secrets/immich.env;
|
||||
format = "dotenv";
|
||||
# sops.secrets.immich_env = {
|
||||
# sopsFile = ../../secrets/immich.env;
|
||||
# format = "dotenv";
|
||||
# mode = "0440";
|
||||
# owner = "immich";
|
||||
# group = "immich";
|
||||
# restartUnits = [ "immich.service" ];
|
||||
# };
|
||||
sops.secrets."immich/oauth2_client_secret" = { };
|
||||
sops.templates."immich.json" = {
|
||||
mode = "0440";
|
||||
owner = "immich";
|
||||
group = "immich";
|
||||
restartUnits = [ "immich.service" ];
|
||||
};
|
||||
sops.secrets."immich/oauth2_client_secret" = {
|
||||
owner = "immich";
|
||||
group = "kanidm";
|
||||
mode = "0440";
|
||||
restartUnits = [ "immich.service" "kanidm.service" ];
|
||||
owner = config.services.immich.user;
|
||||
group = config.services.immich.group;
|
||||
content = builtins.toJSON jsonSettings;
|
||||
};
|
||||
|
||||
users.users.immich = {
|
||||
|
|
@ -45,27 +63,12 @@ in
|
|||
enable = true;
|
||||
openFirewall = true;
|
||||
port = 2283; # default
|
||||
secretsFile = config.sops.secrets."immich_secrets.env".path;
|
||||
#secretsFile = config.sops.secrets.immich_env.path;
|
||||
|
||||
# TODO: Build this directory with permissions for the immich user.
|
||||
mediaLocation = "/tank/shares/photos";
|
||||
|
||||
# https://docs.immich.app/install/config-file/
|
||||
settings = {
|
||||
# TODO: Setup OAuth with Kanidm
|
||||
oauth = {
|
||||
enabled = true;
|
||||
issuerUrl = "https://"; # TODO: the kanidm url?
|
||||
clientId = "immich";
|
||||
clientSecret = config.sops.placeholder."immich/oauth2_client_secret";
|
||||
scope = "openid email profile";
|
||||
signingAlgorithm = "ES256";
|
||||
storageLabelClaim = "email";
|
||||
buttonText = "Login with Kanidm";
|
||||
autoLaunch = true;
|
||||
mobileOverrideEnabled = true;
|
||||
mobileRedirectUri = "https://${svcDomain}/api/oauth/mobile-redirect/";
|
||||
};
|
||||
environment = {
|
||||
IMMICH_CONFIG_FILE = config.sops.templates."immich.json".path;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,33 +1,91 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
{ inputs, config, pkgs, lib, ... }:
|
||||
let
|
||||
svcDomain = "id.${config.networking.domain}";
|
||||
caddyCertsRoot = "${config.services.caddy.dataDir}/.local/share/caddy/certificates";
|
||||
caddyCertsDir = "${caddyCertsRoot}/acme-v02.api.letsencrypt.org-directory";
|
||||
certsDir = "/var/lib/kanidm/certs";
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
svcDomain = "id.${homelabDomain}";
|
||||
kanidmCertDir = "/var/lib/kanidm/certs";
|
||||
caddyCertStore = "${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${svcDomain}";
|
||||
#kcertloc = "${caddyCertsStore}/${svcDomain}/";
|
||||
certRenewalScript = pkgs.writeShellScript "copy-kanidm-cert-hook" ''
|
||||
set -Eeuo pipefail
|
||||
mkdir -p ${kanidmCertDir}
|
||||
cp ${caddyCertStore}/${svcDomain}.crt ${kanidmCertDir}/cert.pem
|
||||
cp ${caddyCertStore}/${svcDomain}.key ${kanidmCertDir}/key.pem
|
||||
|
||||
chown kanidm:kanidm ${kanidmCertDir}/*.pem
|
||||
|
||||
${pkgs.systemd}/bin/systemctl restart kanidm.service
|
||||
'';
|
||||
kanidmCertCopier = "kanidm-cert-copier";
|
||||
in
|
||||
{
|
||||
# NOTE: Domains are serious when they are the root of identity/authnz.
|
||||
# Recommendation from Kanidm docs for "Maximum" security is to maintain
|
||||
# Both `example.com` and `id.example-auth.com`, the latter for idm infra exclusively.
|
||||
# I consider that to be untenable and even more risky.
|
||||
# The next recommendation is to follow a pattern like so
|
||||
# id.example.com
|
||||
# australia.id.example.com
|
||||
# id-test.example.com
|
||||
# australia.id-test.example.com
|
||||
|
||||
|
||||
# Example of yoinking certs from caddy:
|
||||
# https://github.com/marcusramberg/nix-config/blob/e558914dd3705150511c5ef76278fc50bb4604f3/nixos/kanidm.nix#L3
|
||||
|
||||
# TODO: If possible, consider specifying the cert location here instead of the following kludge.
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
reverse_proxy :8443 {
|
||||
header_up Host {host}
|
||||
header_up X-Real-IP {http.request.header.CF-Connecting-IP}
|
||||
transport http {
|
||||
tls_server_name ${svcDomain}
|
||||
}
|
||||
}
|
||||
'';
|
||||
|
||||
# NOTE: Attempted kludge due to caddy generating (and therefore owning the certs)
|
||||
# NOTE: Cleanup old rules
|
||||
# systemd.tmpfiles.rules = lib.filter(rule: ! (lib.strings.hasPrefix "C ${kanidmCertDir}" rule)) config.systemd.tmpfiles.rules;
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${certsDir} 0750 kanidm caddy -"
|
||||
"C ${certsDir}/cert.pem - kanidm - - ${caddyCertsDir}/${svcDomain}/${svcDomain}.crt"
|
||||
"C ${certsDir}/key.key - kanidm - - ${caddyCertsDir}/${svcDomain}/${svcDomain}.key"
|
||||
"d ${kanidmCertDir} 0750 kanidm kanidm -"
|
||||
];
|
||||
systemd.services.kanidm = {
|
||||
after = [ "systemd-tmpfiles-setup.service" ];
|
||||
requires = [ "caddy.service" "systemd-tmpfiles-setup.service" ];
|
||||
# NOTE: Include automation for copying cert files on renewal.
|
||||
# systemd.services.caddy.serviceConfig = {
|
||||
# ExecStartPost = [
|
||||
# "${certRenewalScript}/bin/copy-kanidm-cert-hook"
|
||||
# ];
|
||||
# ExecReload = [
|
||||
# "${pkgs.caddy}/bin/caddy reload --config ${config.services.caddy.configFile}"
|
||||
# "${certRenewalScript}/bin/copy-kanidm-cert-hook"
|
||||
# ];
|
||||
# };
|
||||
systemd.services.${kanidmCertCopier} = {
|
||||
description = "Copy Caddy certificates for Kanidm";
|
||||
requires = [ "caddy.service" ];
|
||||
after = [ "caddy.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "root";
|
||||
ExecStart = "${certRenewalScript}";
|
||||
};
|
||||
};
|
||||
# systemd.services.caddy.wantedBy = [ "multi-user.target" ];
|
||||
# systemd.services.caddy.wants = [ kanidmCertCopier ];
|
||||
systemd.services.caddy.reloadTriggers = [ kanidmCertCopier ];
|
||||
systemd.timers.kanidm-cert-copier-daily = {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnBootSec = "5min";
|
||||
OnCalendar = "daily";
|
||||
Unit = kanidmCertCopier;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
# systemd.services.kanidm = {
|
||||
# after = [ kanidmCertCopier ];
|
||||
# requires = [ kanidmCertCopier ];
|
||||
# };
|
||||
users.users.kanidm.extraGroups = [
|
||||
"caddy"
|
||||
];
|
||||
|
|
@ -43,21 +101,25 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
|
||||
services.kanidm = {
|
||||
package = pkgs.kanidmWithSecretProvisioning;
|
||||
package = pkgs.kanidmWithSecretProvisioning_1_7;
|
||||
enableServer = true;
|
||||
serverSettings = {
|
||||
# NOTE: Required to start the server: https://kanidm.github.io/kanidm/stable/server_configuration.html
|
||||
# domain, origin, tls_chain, tls_key
|
||||
domain = svcDomain;
|
||||
origin = "https://${svcDomain}";
|
||||
tls_chain = "${certsDir}/cert.pem";
|
||||
tls_key = "${certsDir}/key.key";
|
||||
tls_chain = "${kanidmCertDir}/cert.pem";
|
||||
tls_key = "${kanidmCertDir}/key.pem";
|
||||
# tls_chain = "${caddyCertStore}/${svcDomain}.crt";
|
||||
# tls_key = "${caddyCertStore}/${svcDomain}.key";
|
||||
|
||||
# NOTE: Optional Settings
|
||||
# TODO: Configure the rest of the binding properly, should be 363 and maybe 8443
|
||||
ldapbindaddress = "127.0.0.1:3636"; # For Jellyfin LDAP integration.
|
||||
|
||||
# trust_x_forwarded_for = true;
|
||||
#trust_x_forwarded_for = true;
|
||||
};
|
||||
|
||||
enableClient = true;
|
||||
|
|
@ -74,6 +136,7 @@ in
|
|||
home_alias = "name";
|
||||
};
|
||||
|
||||
# TODO: Migrate the secrets from here to `nixos-secrets`
|
||||
# NOTE: There are manual steps required as root to allow a user to set
|
||||
# their own credentials, or to confiugre an account as posix. As-is this
|
||||
# module doesn't support provisioning a complete user /w credentials.
|
||||
|
|
@ -82,7 +145,9 @@ in
|
|||
# https://kanidm.github.io/kanidm/stable/accounts/authentication_and_credentials.html#onboarding-a-new-person--resetting-credentials
|
||||
provision = {
|
||||
enable = true;
|
||||
autoRemove = false;
|
||||
autoRemove = true;
|
||||
acceptInvalidCerts = true;
|
||||
|
||||
adminPasswordFile = config.sops.secrets."kanidm/admin-password".path;
|
||||
idmAdminPasswordFile = config.sops.secrets."kanidm/idm-admin-password".path;
|
||||
|
||||
|
|
@ -98,6 +163,8 @@ in
|
|||
"git.users"
|
||||
"git.admins"
|
||||
"tv.users"
|
||||
"immich.users"
|
||||
"miniflux.users"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
|
@ -107,6 +174,8 @@ in
|
|||
"git.admins" = {};
|
||||
"tv.users" = {};
|
||||
"tv.admins" = {};
|
||||
"immich.users" = {};
|
||||
"miniflux.users" = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
{ config, pkgs, ... }:
|
||||
let
|
||||
svcDomain = "feeds.${config.networking.domain}";
|
||||
svcPort = "8080";
|
||||
homelabDomain = config.networking.domain;
|
||||
svcDomain = "feeds.${homelabDomain}";
|
||||
svcPort = "8081"; # Prevent a Conflict
|
||||
in
|
||||
{
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
|
|
@ -22,32 +23,57 @@ in
|
|||
group = "miniflux";
|
||||
restartUnits = [ "miniflux.service" ];
|
||||
};
|
||||
services.kanidm.provision = {
|
||||
groups = {};
|
||||
systems.oauth2.miniflux = {
|
||||
displayName = "Miniflux Feed Reader";
|
||||
originUrl = "https://${fqdn}/callback";
|
||||
public = true; # enforces PKCE
|
||||
preferShortUsername = true;
|
||||
scopeMaps.pages_users = ["openid" "email" "profile"];
|
||||
claimMaps."${permissionsMap}".valuesByGroup.pages_admin = ["admin"];
|
||||
};
|
||||
sops.secrets."miniflux/oauth2_client_secret" = {
|
||||
owner = "miniflux";
|
||||
group = "kanidm";
|
||||
mode = "0440";
|
||||
restartUnits = [ "miniflux.service" "kanidm.service" ];
|
||||
};
|
||||
#services.kanidm.provision = {
|
||||
#groups = {};
|
||||
#systems.oauth2.miniflux = {
|
||||
#displayName = "Miniflux Feed Reader";
|
||||
#originUrl = "https://${fqdn}/callback";
|
||||
#public = true; # enforces PKCE
|
||||
#preferShortUsername = true;
|
||||
#scopeMaps.pages_users = ["openid" "email" "profile"];
|
||||
#claimMaps."${permissionsMap}".valuesByGroup.pages_admin = ["admin"];
|
||||
#};
|
||||
#};
|
||||
# NOTE: Currently requires some web-interface configuration
|
||||
services.miniflux = {
|
||||
enable = true;
|
||||
adminCredentialsFile = config.sops.secrets.miniflux_env.path;
|
||||
config = {
|
||||
BASE_URL = "https://${svcDomain}";
|
||||
CREATE_ADMIN = 0;
|
||||
DISABLE_LOCAL_AUTH = 1;
|
||||
#CREATE_ADMIN = 0;
|
||||
#DISABLE_LOCAL_AUTH = 1;
|
||||
OAUTH2_PROVIDER = "oidc";
|
||||
OAUTH2_OIDC_PROVIDER_NAME = "Kanidm";
|
||||
OAUTH2_OIDC_DISCOVERY_ENDPOINT = "https://id.${config.networking.domain}";
|
||||
OAUTH2_CLIENT_ID = "miniflux";
|
||||
OAUTH2_CLIENT_SECRET_FILE = config.sops.secrets."miniflux/oauth2_client_secret".path;
|
||||
OAUTH2_REDIRECT_URL = "https://${svcDomain}/oauth2/oidc/callback";
|
||||
OAUTH2_USER_CREATION = 1;
|
||||
OAUTH2_OIDC_DISCOVERY_ENDPOINT = "https://id.${homelabDomain}/oauth2/openid/miniflux";
|
||||
#OAUTH2_USER_CREATION = 1;
|
||||
CLEANUP_FREQUENCY = 48;
|
||||
LISTEN_ADDR = "localhost:${svcPort}";
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
services.kanidm.provision.systems.oauth2.miniflux = {
|
||||
displayName = "miniflux";
|
||||
originUrl = "https://${svcDomain}/oauth2/oidc/callback";
|
||||
originLanding = "https://${svcDomain}/";
|
||||
basicSecretFile = config.sops.secrets."miniflux/oauth2_client_secret".path;
|
||||
scopeMaps."miniflux.users" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
"groups"
|
||||
];
|
||||
# WARNING: PKCE is currently not supported by gitea/forgejo,
|
||||
# see https://github.com/go-gitea/gitea/issues/21376
|
||||
allowInsecureClientDisablePkce = true;
|
||||
preferShortUsername = true;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
14
hosts/lithium/services/monitoring/README.md
Normal file
14
hosts/lithium/services/monitoring/README.md
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
# hosts/lithium/services/monitoring
|
||||
|
||||
This is a Grafana/Prometheus Monitoring Stack.
|
||||
Why? Basically for the sake of it.
|
||||
|
||||
## Diagram
|
||||
|
||||
```mermaid
|
||||
````
|
||||
|
||||
## References
|
||||
|
||||
- https://gist.github.com/rickhull/895b0cb38fdd537c1078a858cf15d63e
|
||||
- https://xeiaso.net/blog/prometheus-grafana-loki-nixos-2020-11-20/
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
{ config, pkgs, ... }:
|
||||
{ inputs, config, pkgs, ... }:
|
||||
let
|
||||
svcDomain = "grafana.${config.networking.domain}";
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
#svcDomain = "grafana.${config.networking.domain}";
|
||||
svcDomain = "grafana.${homelabDomain}";
|
||||
svcPort = config.services.grafana.settings.server.http_port;
|
||||
in
|
||||
{
|
||||
|
|
|
|||
4
hosts/lithium/services/monitoring/loki-local-config.yaml
Normal file
4
hosts/lithium/services/monitoring/loki-local-config.yaml
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
15
hosts/lithium/services/monitoring/loki.nix
Normal file
15
hosts/lithium/services/monitoring/loki.nix
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
{ ... }:
|
||||
{
|
||||
services.loki = {
|
||||
enable = true;
|
||||
#configFile = "./loki-local-config.yaml";
|
||||
# Nix Object representing the data that might otherwise be in a YAML config
|
||||
# https://github.com/grafana/loki/blob/main/cmd/loki/loki-local-config.yaml
|
||||
configuration = {
|
||||
auth_enabled = false;
|
||||
server = {
|
||||
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
{ config, pkgs, ... }:
|
||||
#let
|
||||
let
|
||||
#svcDomain = "status.${config.networking.domain}";
|
||||
#svcPort = config.services.prometheus.exporters.node.port;
|
||||
#in
|
||||
svcPort = config.services.prometheus.exporters.node.port;
|
||||
in
|
||||
{
|
||||
#services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
#reverse_proxy :${svcPort}
|
||||
|
|
@ -10,20 +10,27 @@
|
|||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
port = 9090;
|
||||
#globalConfig.scrape_interval = "10s"; # "1m"
|
||||
#scrapeConfigs = [
|
||||
#{
|
||||
#job_name = "node";
|
||||
#static_configs = [{
|
||||
# targets = [ "localhost:${toString svcPort}" ];
|
||||
#}];
|
||||
#}
|
||||
#];
|
||||
|
||||
exporters = {
|
||||
# Export data about this host
|
||||
node = {
|
||||
enable = true;
|
||||
enabledCollectors = [ "systemd" ];
|
||||
port = 9091;
|
||||
};
|
||||
};
|
||||
|
||||
# Read data from the export
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "node-lithium";
|
||||
static_configs = [{
|
||||
targets = [ "localhost:${toString svcPort}" ];
|
||||
}];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
#services.prometheus.exporters.node = {
|
||||
#enable = true;
|
||||
#port = 9000;
|
||||
#enabledCollectors = [ "systemd" ];
|
||||
#};
|
||||
}
|
||||
|
|
|
|||
79
hosts/lithium/services/old-kanidm.nix
Normal file
79
hosts/lithium/services/old-kanidm.nix
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
let
|
||||
cfg = config.services.kanidm;
|
||||
authDomain = "auth.${config.networking.domain}";
|
||||
certsDir = config.security.acme.certs."${authDomain}".directory;
|
||||
in
|
||||
{
|
||||
# TODO: Pull in the appropriate sops-nix secrets and get this baby rolling.
|
||||
# https://github.com/search?q=language%3ANix+services.kanidm&type=code
|
||||
services.kanidm = {
|
||||
# NOTE: Pin a specific kanidm version, we don't want issues from auto-updating.
|
||||
package = pkgs.kanidm_1_6;
|
||||
enableServer = true;
|
||||
|
||||
# TODO: Initial kanidm setup.
|
||||
# I sort of want users to be able to create their own accounts and what I
|
||||
# don't want is for any of their account information to be leaked here as
|
||||
# it can be used for remote logins.
|
||||
# So kanidm accounts aside from the administration will be "impure".
|
||||
# I vastly prefer people being able to set their own credentials:
|
||||
# https://kanidm.github.io/kanidm/stable/accounts/authentication_and_credentials.html#onboarding-a-new-person--resetting-credentials
|
||||
provision = {
|
||||
enable = true;
|
||||
autoRemove = false;
|
||||
# TODO: Add secrets from `sops-nix`.
|
||||
adminPasswordFile = "TODO!SETME";
|
||||
idmAdminPasswordFile = "TODO!SETME";
|
||||
|
||||
persons = {
|
||||
# https://kanidm.github.io/kanidm/stable/accounts/authentication_and_credentials.html#resetting-person-account-credentials
|
||||
# Needs to be a member of idm_people_admins and idm_high_privilege to prevent idm_service_desk from tampering.
|
||||
zenware = {
|
||||
displayName = "zenware";
|
||||
legalName = "zenware";
|
||||
mailAddresses = [ "zenware@${config.networking.domain} "];
|
||||
groups = [
|
||||
"idm_high_privilege"
|
||||
"git.users"
|
||||
"git.admins"
|
||||
];
|
||||
};
|
||||
# TODO: Make an idm_service_desk account.
|
||||
};
|
||||
groups = {
|
||||
# This group is `git` because it could be forgejo, gitea, etc.
|
||||
"git.users" = {};
|
||||
"git.admins" = {};
|
||||
};
|
||||
systems.oauth2 = {
|
||||
forgejo = {
|
||||
displayName = "forgejo";
|
||||
originUrl = "TODO!SETME";
|
||||
originLanding = "TODO!SETME";
|
||||
basicSecretFile = "TODO!SETME";
|
||||
scopeMaps."git.users" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
# WARNING: PKCE is currently not supported by gitea/forgejo,
|
||||
# see https://github.com/go-gitea/gitea/issues/21376
|
||||
allowInsecureClientDisablePkce = true;
|
||||
preferShortUsername = true;
|
||||
claimMaps.groups = {
|
||||
joinType = "array";
|
||||
valuesByGroup."git.admins" = [ "admin" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#enableClient = false;
|
||||
clientSettings = {
|
||||
uri = "https://${authDomain}";
|
||||
verify_hostnames = true;
|
||||
verify_ca = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -34,13 +34,15 @@ in
|
|||
'';
|
||||
WorkingDirectory = "/home/palworld";
|
||||
Restart = "always";
|
||||
RuntimeMaxSec = "1d";
|
||||
RuntimeMaxSec = "1d"; # NOTE: This thing has memory leaks, restart to save our selves.
|
||||
User = "palworld";
|
||||
};
|
||||
};
|
||||
|
||||
# NOTE: Config is stashed at the following directory.
|
||||
# /home/palworld/.steam/steam/Steamapps/common/PalServer/Pal/Saved/Config/LinuxServer/PalWorldSettings.ini
|
||||
# TODO: There are benefits to including the meat of the configuration inside the 'nix' file.
|
||||
# Namely that it will result in actually updating the config when I rebuild.
|
||||
environment.etc."palworld/PalWorldSettings.ini" = {
|
||||
target = "/home/palworld/.steam/steam/Steamapps/common/PalServer/Pal/Saved/Config/LinuxServer/PalWorldSettings.ini";
|
||||
text = palworldSettings;
|
||||
|
|
|
|||
|
|
@ -1,32 +1,39 @@
|
|||
{ }:
|
||||
{ config, ... }:
|
||||
{
|
||||
services.smartd = {
|
||||
enable = true;
|
||||
devices = [
|
||||
{
|
||||
device = "ata-CT500MX500SSD1_2206E607D6AA";
|
||||
device = "/dev/disk/by-id/ata-CT500MX500SSD1_2206E607D6AA";
|
||||
}
|
||||
{
|
||||
device = "ata-CT500MX500SSD1_2206E607D728";
|
||||
device = "/dev/disk/by-id/ata-CT500MX500SSD1_2206E607D728";
|
||||
}
|
||||
{
|
||||
device = "ata-ST16000NM001G-2KK103_ZL2B73HT";
|
||||
device = "/dev/disk/by-id/ata-ST16000NM001G-2KK103_ZL2B73HT";
|
||||
}
|
||||
{
|
||||
device = "ata-ST16000NM001G-2KK103_ZL2PSELL";
|
||||
device = "/dev/disk/by-id/ata-ST16000NM001G-2KK103_ZL2PSELL";
|
||||
}
|
||||
{
|
||||
device = "ata-ST16000NM001G-2KK103_ZL2B4RSM";
|
||||
device = "/dev/disk/by-id/ata-ST16000NM001G-2KK103_ZL2B4RSM";
|
||||
}
|
||||
{
|
||||
device = "ata-ST16000NM001G-2KK103_ZL23XYMM";
|
||||
device = "/dev/disk/by-id/ata-ST16000NM001G-2KK103_ZL23XYMM";
|
||||
}
|
||||
{
|
||||
device = "nvme-Samsung_SSD_960_EVO_500GB_S3X4NB0K244331X";
|
||||
device = "/dev/disk/by-id/nvme-Samsung_SSD_960_EVO_500GB_S3X4NB0K244331X";
|
||||
}
|
||||
{
|
||||
device = "nvme-Samsung_SSD_960_EVO_500GB_S3X4NB0K244303V";
|
||||
device = "/dev/disk/by-id/nvme-Samsung_SSD_960_EVO_500GB_S3X4NB0K244303V";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
services.prometheus.exporters.smartctl = {
|
||||
enable = config.services.smartd.enable;
|
||||
openFirewall = config.services.smartd.enable;
|
||||
# https://github.com/prometheus-community/smartctl_exporter?tab=readme-ov-file#why-is-root-required-cant-i-add-a-user-to-the-disk-group
|
||||
user = "root";
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@ in
|
|||
enable = true;
|
||||
# NOTE: NixOS Attributes here resolve into these ENV vars:
|
||||
# https://github.com/louislam/uptime-kuma/wiki/Environment-Variables
|
||||
# settings = {};
|
||||
settings = {
|
||||
PORT = "4000";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,13 +1,15 @@
|
|||
{ inputs, config, ... }:
|
||||
{ inputs, ... }:
|
||||
let
|
||||
secretsPath = builtins.toString inputs.nixos-secrets;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
inputs.sops-nix.nixosModules.sops
|
||||
];
|
||||
#imports = [ inputs.sops-nix.nixosModules.sops ];
|
||||
|
||||
sops = {
|
||||
defaultSopsFile = "${secretsPath}/${config.hostname}/secrets.yaml";
|
||||
#defaultSopsFile = "${secretsPath}/${config.hostname}/secrets.yaml";
|
||||
#defaultSopsFile = "${secretsPath}/global/secrets.yaml";
|
||||
# TODO: Make this test the hostname.
|
||||
#defaultSopsFile = "${secretsPath}/lithium/secrets/common.yaml";
|
||||
defaultSopsFile = "${secretsPath}/lithium/secrets.yaml";
|
||||
};
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue