even more backups of things
This commit is contained in:
parent
b8d125d448
commit
630f9b0074
46 changed files with 1166 additions and 197 deletions
11
hosts/lithium/services/README.md
Normal file
11
hosts/lithium/services/README.md
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
# hosts/lithium/services
|
||||
|
||||
The idea is that each `*.nix` or each `./*/default.nix` file would contain all
|
||||
necessary details for a service to bring itself up and be running.
|
||||
|
||||
One thing I have overlooked thus far is nothing tests for the existence of a
|
||||
reverse proxy and bails out if one isn't available. Practically if caddy isn't
|
||||
running, most of these services should also not run, or at the very least, the
|
||||
blocks pertaining to setting up reverse proxy details don't need to run.
|
||||
|
||||
There's a way of doing that with things like lib.mkDefault and so forth.
|
||||
65
hosts/lithium/services/acme-dns.nix
Normal file
65
hosts/lithium/services/acme-dns.nix
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
{ config, ... }:
|
||||
let
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
certDir = config.security.acme.certs."${homelabDomain}".directory;
|
||||
in
|
||||
{
|
||||
sops.secrets."cloudflare/dns_api_token" = {
|
||||
mode = "0440";
|
||||
group = config.services.caddy.group;
|
||||
restartUnits = [ "caddy.service" "ddclient.service" ];
|
||||
};
|
||||
|
||||
|
||||
# TODO: Consider defining reverse proxy all in one location.
|
||||
# All the ports and domains would be visible in one place.
|
||||
security.acme = {
|
||||
acceptTerms = true;
|
||||
defaults = {
|
||||
# NOTE: Uncomment the following line for testing, comment for production.
|
||||
server = "https://acme-staging-v02.api.letsencrypt.org/directory";
|
||||
dnsProvider = "cloudflare";
|
||||
dnsResolver = "1.1.1.1:53";
|
||||
dnsPropagationCheck = true;
|
||||
credentialFiles = {
|
||||
CLOUDFLARE_DNS_API_TOKEN_FILE = config.sops.secrets."cloudflare/dns_api_token".path;
|
||||
};
|
||||
group = config.services.caddy.group;
|
||||
#reloadServices = [ "caddy" ];
|
||||
email = "admin+acme@${homelabDomain}"; # NOTE: This email is /dev/null;
|
||||
#keyType = "ec384";
|
||||
};
|
||||
};
|
||||
|
||||
services.ddclient = {
|
||||
enable = true;
|
||||
protocol = "cloudflare";
|
||||
usev4 = "webv4, webv4=https://cloudflare.com/cdn-cgi/trace, web-skip='ip='";
|
||||
username = "token";
|
||||
#secretsFile = config.sops.secrets."cloudflare/dns_api_token".path;
|
||||
passwordFile = config.sops.secrets."cloudflare/dns_api_token".path;
|
||||
zone = homelabDomain;
|
||||
domains = [
|
||||
homelabDomain
|
||||
"*.${homelabDomain}"
|
||||
"id.${homelabDomain}"
|
||||
"status.${homelabDomain}"
|
||||
"grafana.${homelabDomain}"
|
||||
"feeds.${homelabDomain}"
|
||||
"git.${homelabDomain}"
|
||||
"tv.${homelabDomain}"
|
||||
"demo.${homelabDomain}" # Testing to see if the DNS record is set.
|
||||
];
|
||||
};
|
||||
|
||||
# NOTE: Issue a single cert /w subdomain wildcard
|
||||
# At the expense of individual service security, some public details about
|
||||
# attack surface remain slightly more private in https://crt.sh/
|
||||
security.acme.certs."${homelabDomain}" = {
|
||||
#group = config.services.caddy.group;
|
||||
domain = "${homelabDomain}";
|
||||
extraDomainNames = [ "*.${homelabDomain}" ];
|
||||
};
|
||||
# Nginx useACMEHost provides the DNS-01 challenge.
|
||||
# security.acme.certs."${homelabDomain}".directory
|
||||
}
|
||||
20
hosts/lithium/services/audiobookshelf.nix
Normal file
20
hosts/lithium/services/audiobookshelf.nix
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
{ inputs, config, pkgs, lib, ... }:
|
||||
let
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
svcDomain = "audiobooks.${homelabDomain}";
|
||||
svcPort = config.services.audiobookshelf.port; # Prevent a Conflict
|
||||
in
|
||||
{
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
reverse_proxy :${svcPort}
|
||||
'';
|
||||
|
||||
services.audiobookshelf = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
port = 8000;
|
||||
|
||||
# NOTE: Path to AudioBookShelf config & metadata inside of `/var/lib`
|
||||
dataDir = "audiobookshelf";
|
||||
};
|
||||
}
|
||||
|
|
@ -1,13 +1,22 @@
|
|||
{ config, pkgs, ... }:
|
||||
{ inputs, config, pkgs, lib, ... }:
|
||||
let
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
certDir = config.security.acme.certs."${homelabDomain}".directory;
|
||||
in
|
||||
{
|
||||
sops.secrets.caddy_env = {
|
||||
sopsFile = ../secrets/caddy.env;
|
||||
format = "dotenv";
|
||||
services.nginx.enable = lib.mkForce false;
|
||||
|
||||
sops.secrets.cloudflare_env = {
|
||||
mode = "0440";
|
||||
owner = config.services.caddy.user;
|
||||
sopsFile = "${inputs.nixos-secrets}/lithium/cloudflare.env";
|
||||
format = "dotenv";
|
||||
group = config.services.caddy.group;
|
||||
restartUnits = [ "caddy.service" ];
|
||||
};
|
||||
|
||||
# TODO: Revert to using Caddy DNS for the whole thing.
|
||||
# TODO: Add another cloudflare DDNS provider.
|
||||
# TODO: Add Metrics with Prometheus & Grafana
|
||||
services.caddy = {
|
||||
enable = true;
|
||||
package = pkgs.caddy.withPlugins {
|
||||
|
|
@ -16,26 +25,33 @@
|
|||
"github.com/mholt/caddy-dynamicdns@v0.0.0-20250430031602-b846b9e8fb83"
|
||||
"github.com/caddy-dns/cloudflare@v0.2.1"
|
||||
];
|
||||
|
||||
# NOTE: Built on 6/4/2025
|
||||
hash = "sha256-swskhAr7yFJX+qy0FR54nqJarTOojwhV2Mbk7+fyS0I=";
|
||||
# NOTE: Built on 9/30/2025
|
||||
hash = "sha256-xuwNkxZop+RnzFtM9DEwah95nPSyx8KgM+Eu4EJ9kqI=";
|
||||
};
|
||||
# NOTE: Use Staging CA while testing, check `systemctl status caddy`
|
||||
# to see if everything is working.
|
||||
# acmeCA = "https://acme-staging-v02.api.letsencrypt.org/directory";
|
||||
|
||||
# TODO: Add Metrics with Prometheus & Grafana
|
||||
environmentFile = config.sops.secrets.caddy_env.path;
|
||||
|
||||
environmentFile = config.sops.secrets.cloudflare_env.path;
|
||||
# NOTE: DNS provider settings
|
||||
# https://caddy.community/t/how-to-use-dns-provider-modules-in-caddy-2/8148
|
||||
globalConfig = ''
|
||||
# acme_dns cloudflare {env.CLOUDFLARE_API_TOKEN}
|
||||
#acme_dns cloudflare {$CLOUDFLARE_DNS_API_TOKEN}
|
||||
dynamic_dns {
|
||||
provider cloudflare {env.CLOUDFLARE_API_TOKEN}
|
||||
provider cloudflare {$CLOUDFLARE_DNS_API_TOKEN}
|
||||
check_interval 30m
|
||||
ttl 5m
|
||||
domains {
|
||||
${config.networking.domain} @
|
||||
${homelabDomain} @
|
||||
}
|
||||
dynamic_domains
|
||||
}
|
||||
'';
|
||||
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 80 443 ];
|
||||
allowedUDPPorts = [ 443 ];
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
{ inputs, config, pkgs, lib, ... }:
|
||||
let
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
certDir = config.security.acme.certs."${homelabDomain}".directory;
|
||||
#certDir = config.security.acme.certs."${homelabDomain}".directory;
|
||||
svcDomain = "books.${homelabDomain}";
|
||||
svcHttpPort = config.services.calibre-web.listen.port;
|
||||
web_data_dir = "calibre-web";
|
||||
# TODO: I want the actual media stored in the tank.
|
||||
library_path = "/tank/media/library/books";
|
||||
#library_path = "/var/lib/calibre-library";
|
||||
in
|
||||
{
|
||||
# TODO: This isn't the right place for this, but we need to guarantee that a
|
||||
|
|
@ -14,19 +16,25 @@ in
|
|||
users.groups.media = {};
|
||||
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
reverse_proxy :${toString svcHttpPort}
|
||||
encode {
|
||||
zstd
|
||||
gzip
|
||||
minimum_length 1024
|
||||
}
|
||||
reverse_proxy localhost:8883
|
||||
'';
|
||||
|
||||
# reverse_proxy :${toString svcHttpPort}
|
||||
# encode {
|
||||
# zstd
|
||||
# gzip
|
||||
# minimum_length 1024
|
||||
# }
|
||||
# '';
|
||||
|
||||
# NOTE: Needs some manual setup in Web-UI and I ecountered issues connecting even with firewall enabled.
|
||||
# The following command is what I used to forward the port:
|
||||
# ssh -f -N -L localhost:8883:localhost:8883 jml@lithium
|
||||
services.calibre-web = {
|
||||
enable = true;
|
||||
listen.port = 8083;
|
||||
listen.port = 8883;
|
||||
# NOTE: Don't need to open calibre-web port, it's served by reverse_proxy
|
||||
openFirewall = false;
|
||||
openFirewall = true; # TODO: Temporarily opened to allow configuration from inside my network.
|
||||
|
||||
user = "calibre-web";
|
||||
group = "calibre-web";
|
||||
|
|
@ -38,6 +46,7 @@ in
|
|||
options = {
|
||||
enableBookUploading = true;
|
||||
enableBookConversion = true;
|
||||
# NOTE: If I don't already have an extant calibreLibrary, I need to leave this null or the app won't launch.
|
||||
calibreLibrary = library_path;
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -41,6 +41,7 @@ in
|
|||
server = {
|
||||
DOMAIN = svcDomain;
|
||||
ROOT_URL = "https://${svcDomain}";
|
||||
HTTP_PORT = 3000;
|
||||
};
|
||||
# NOTE: Actions support is based on: https://github.com/nektos/act
|
||||
#actions = {
|
||||
|
|
@ -49,6 +50,7 @@ in
|
|||
#};
|
||||
actions.ENABLED = false;
|
||||
# NOTE: Registration is handled with kanidm.
|
||||
# Registration button link is at /user/sign_up
|
||||
service = {
|
||||
REGISTER_EMAIL_CONFIRM = false;
|
||||
DISABLE_REGISTRATION = false;
|
||||
|
|
@ -87,13 +89,15 @@ in
|
|||
services.kanidm.provision.systems.oauth2.forgejo = {
|
||||
displayName = "forgejo";
|
||||
# TODO: Get this from Forgejo
|
||||
originUrl = "https://git.${homelabDomain}/user/oauth2/${homelabDomain}/callback";
|
||||
# originUrl = "https://git.${homelabDomain}/user/oauth2/${homelabDomain}/callback";
|
||||
originUrl = "${config.services.forgejo.settings.server.ROOT_URL}/user/oauth2/kanidm/callback";
|
||||
originLanding = "https://git.${homelabDomain}/";
|
||||
#basicSecretFile = "TODO!SETME";
|
||||
scopeMaps."git.users" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
"groups"
|
||||
];
|
||||
# WARNING: PKCE is currently not supported by gitea/forgejo,
|
||||
# see https://github.com/go-gitea/gitea/issues/21376
|
||||
|
|
@ -137,5 +141,5 @@ in
|
|||
# TODO: Consider automatically creating admin account and password...
|
||||
# https://wiki.nixos.org/wiki/Forgejo#Ensure_users
|
||||
# Might be necessary to generate a token for kanidm
|
||||
#sops.secrets.forgejo-admin-password.owner = "forgejo";
|
||||
sops.secrets."forgejo/admin-password".owner = "forgejo";
|
||||
}
|
||||
|
|
|
|||
0
hosts/lithium/services/forgejo/actions-runner.nix
Normal file
0
hosts/lithium/services/forgejo/actions-runner.nix
Normal file
0
hosts/lithium/services/forgejo/default.nix
Normal file
0
hosts/lithium/services/forgejo/default.nix
Normal file
0
hosts/lithium/services/forgejo/forgejo.nix
Normal file
0
hosts/lithium/services/forgejo/forgejo.nix
Normal file
42
hosts/lithium/services/game_servers.nix
Normal file
42
hosts/lithium/services/game_servers.nix
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
{ pkgs, ... }:
|
||||
{
|
||||
# TODO
|
||||
# systemd.services.<name>.serviceConfig.{MemoryMax,CPUQuota}
|
||||
systemd.services.valheim-server = {
|
||||
description = "Valheim dedicated server";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = "valheim";
|
||||
Group = "valheim";
|
||||
ExecStart = "${pkgs.steamcmd}/bin/steamcmd +login anonymous +force_install_dir /home/valheim/server +app_update 896660 validate +exit && /home/valheim/server/valheim_server.x86_64";
|
||||
};
|
||||
|
||||
users.users.valheim = {
|
||||
isSystemUser = true;
|
||||
group = "valheim";
|
||||
home = "/home/valheim";
|
||||
};
|
||||
users.groups.valheim = {};
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 7777 2456 ];
|
||||
allowedUDPPorts = [ 7777 2457 ];
|
||||
};
|
||||
|
||||
services.restic.backups.gameservers = {
|
||||
user = "root";
|
||||
# TODO: Pick a real backup directory.
|
||||
repository = "/backup/gameservers";
|
||||
paths = [
|
||||
"/var/lib/terraria"
|
||||
"/home/valheim/server"
|
||||
];
|
||||
timeConfig = {
|
||||
OnCalendar = "daily";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
{ config, pkgs, ... }:
|
||||
let
|
||||
svcDomain = "grafana.${config.networking.domain}";
|
||||
svcPort = config.services.grafana.settings.server.http_port;
|
||||
in
|
||||
{
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
reverse_proxy :${svcPort}
|
||||
'';
|
||||
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server = {
|
||||
http_addr = "127.0.0.1";
|
||||
http_port = 3000;
|
||||
enforce_domain = true;
|
||||
enable_gzip = true;
|
||||
domain = svcDomain;
|
||||
};
|
||||
analytics.reporting_enabled = false; # NOTE: Disable Telemetry
|
||||
};
|
||||
};
|
||||
}
|
||||
7
hosts/lithium/services/home-assistant.nix
Normal file
7
hosts/lithium/services/home-assistant.nix
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
{ config, pkgs, ... }:
|
||||
{
|
||||
services.home-assistant = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
}
|
||||
|
|
@ -4,29 +4,47 @@ let
|
|||
svcDomain = "photos.${homelabDomain}";
|
||||
photoStorageDir = "/tank/shares/photos";
|
||||
svcPort = config.services.immich.port;
|
||||
# https://docs.immich.app/install/config-file/
|
||||
jsonSettings = {
|
||||
server.externalDomain = "https://${svcDomain}";
|
||||
oauth = {
|
||||
enabled = true;
|
||||
issuerUrl = "https://"; # TODO: the kanidm url?
|
||||
clientId = "immich";
|
||||
clientSecret = config.sops.placeholder."immich/oauth2_client_secret";
|
||||
scope = "openid email profile";
|
||||
signingAlgorithm = "ES256";
|
||||
storageLabelClaim = "email";
|
||||
buttonText = "Login with Kanidm";
|
||||
autoLaunch = true;
|
||||
mobileOverrideEnabled = true;
|
||||
mobileRedirectUri = "https://${svcDomain}/api/oauth/mobile-redirect/";
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
# NOTE: The following repo contains a highly mature immich setup on nixos.
|
||||
# https://github.com/xinyangli/nixos-config/blob/a8b5bea68caea573801ccfdb8ceacb7a8f2b0190/machines/agate/services/immich.nix
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
reverse_proxy :${svcPort}
|
||||
reverse_proxy :${toString svcPort}
|
||||
'';
|
||||
|
||||
# NOTE: Primarily to contain DB_PASSWORD to make it possible to backup and restore the DB.
|
||||
sops.secrets.immich_env = {
|
||||
sopsFile = ../../secrets/immich.env;
|
||||
format = "dotenv";
|
||||
# sops.secrets.immich_env = {
|
||||
# sopsFile = ../../secrets/immich.env;
|
||||
# format = "dotenv";
|
||||
# mode = "0440";
|
||||
# owner = "immich";
|
||||
# group = "immich";
|
||||
# restartUnits = [ "immich.service" ];
|
||||
# };
|
||||
sops.secrets."immich/oauth2_client_secret" = { };
|
||||
sops.templates."immich.json" = {
|
||||
mode = "0440";
|
||||
owner = "immich";
|
||||
group = "immich";
|
||||
restartUnits = [ "immich.service" ];
|
||||
};
|
||||
sops.secrets."immich/oauth2_client_secret" = {
|
||||
owner = "immich";
|
||||
group = "kanidm";
|
||||
mode = "0440";
|
||||
restartUnits = [ "immich.service" "kanidm.service" ];
|
||||
owner = config.services.immich.user;
|
||||
group = config.services.immich.group;
|
||||
content = builtins.toJSON jsonSettings;
|
||||
};
|
||||
|
||||
users.users.immich = {
|
||||
|
|
@ -45,27 +63,12 @@ in
|
|||
enable = true;
|
||||
openFirewall = true;
|
||||
port = 2283; # default
|
||||
secretsFile = config.sops.secrets."immich_secrets.env".path;
|
||||
#secretsFile = config.sops.secrets.immich_env.path;
|
||||
|
||||
# TODO: Build this directory with permissions for the immich user.
|
||||
mediaLocation = "/tank/shares/photos";
|
||||
|
||||
# https://docs.immich.app/install/config-file/
|
||||
settings = {
|
||||
# TODO: Setup OAuth with Kanidm
|
||||
oauth = {
|
||||
enabled = true;
|
||||
issuerUrl = "https://"; # TODO: the kanidm url?
|
||||
clientId = "immich";
|
||||
clientSecret = config.sops.placeholder."immich/oauth2_client_secret";
|
||||
scope = "openid email profile";
|
||||
signingAlgorithm = "ES256";
|
||||
storageLabelClaim = "email";
|
||||
buttonText = "Login with Kanidm";
|
||||
autoLaunch = true;
|
||||
mobileOverrideEnabled = true;
|
||||
mobileRedirectUri = "https://${svcDomain}/api/oauth/mobile-redirect/";
|
||||
};
|
||||
environment = {
|
||||
IMMICH_CONFIG_FILE = config.sops.templates."immich.json".path;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,33 +1,91 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
{ inputs, config, pkgs, lib, ... }:
|
||||
let
|
||||
svcDomain = "id.${config.networking.domain}";
|
||||
caddyCertsRoot = "${config.services.caddy.dataDir}/.local/share/caddy/certificates";
|
||||
caddyCertsDir = "${caddyCertsRoot}/acme-v02.api.letsencrypt.org-directory";
|
||||
certsDir = "/var/lib/kanidm/certs";
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
svcDomain = "id.${homelabDomain}";
|
||||
kanidmCertDir = "/var/lib/kanidm/certs";
|
||||
caddyCertStore = "${config.services.caddy.dataDir}/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/${svcDomain}";
|
||||
#kcertloc = "${caddyCertsStore}/${svcDomain}/";
|
||||
certRenewalScript = pkgs.writeShellScript "copy-kanidm-cert-hook" ''
|
||||
set -Eeuo pipefail
|
||||
mkdir -p ${kanidmCertDir}
|
||||
cp ${caddyCertStore}/${svcDomain}.crt ${kanidmCertDir}/cert.pem
|
||||
cp ${caddyCertStore}/${svcDomain}.key ${kanidmCertDir}/key.pem
|
||||
|
||||
chown kanidm:kanidm ${kanidmCertDir}/*.pem
|
||||
|
||||
${pkgs.systemd}/bin/systemctl restart kanidm.service
|
||||
'';
|
||||
kanidmCertCopier = "kanidm-cert-copier";
|
||||
in
|
||||
{
|
||||
# NOTE: Domains are serious when they are the root of identity/authnz.
|
||||
# Recommendation from Kanidm docs for "Maximum" security is to maintain
|
||||
# Both `example.com` and `id.example-auth.com`, the latter for idm infra exclusively.
|
||||
# I consider that to be untenable and even more risky.
|
||||
# The next recommendation is to follow a pattern like so
|
||||
# id.example.com
|
||||
# australia.id.example.com
|
||||
# id-test.example.com
|
||||
# australia.id-test.example.com
|
||||
|
||||
|
||||
# Example of yoinking certs from caddy:
|
||||
# https://github.com/marcusramberg/nix-config/blob/e558914dd3705150511c5ef76278fc50bb4604f3/nixos/kanidm.nix#L3
|
||||
|
||||
# TODO: If possible, consider specifying the cert location here instead of the following kludge.
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
reverse_proxy :8443 {
|
||||
header_up Host {host}
|
||||
header_up X-Real-IP {http.request.header.CF-Connecting-IP}
|
||||
transport http {
|
||||
tls_server_name ${svcDomain}
|
||||
}
|
||||
}
|
||||
'';
|
||||
|
||||
# NOTE: Attempted kludge due to caddy generating (and therefore owning the certs)
|
||||
# NOTE: Cleanup old rules
|
||||
# systemd.tmpfiles.rules = lib.filter(rule: ! (lib.strings.hasPrefix "C ${kanidmCertDir}" rule)) config.systemd.tmpfiles.rules;
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${certsDir} 0750 kanidm caddy -"
|
||||
"C ${certsDir}/cert.pem - kanidm - - ${caddyCertsDir}/${svcDomain}/${svcDomain}.crt"
|
||||
"C ${certsDir}/key.key - kanidm - - ${caddyCertsDir}/${svcDomain}/${svcDomain}.key"
|
||||
"d ${kanidmCertDir} 0750 kanidm kanidm -"
|
||||
];
|
||||
systemd.services.kanidm = {
|
||||
after = [ "systemd-tmpfiles-setup.service" ];
|
||||
requires = [ "caddy.service" "systemd-tmpfiles-setup.service" ];
|
||||
# NOTE: Include automation for copying cert files on renewal.
|
||||
# systemd.services.caddy.serviceConfig = {
|
||||
# ExecStartPost = [
|
||||
# "${certRenewalScript}/bin/copy-kanidm-cert-hook"
|
||||
# ];
|
||||
# ExecReload = [
|
||||
# "${pkgs.caddy}/bin/caddy reload --config ${config.services.caddy.configFile}"
|
||||
# "${certRenewalScript}/bin/copy-kanidm-cert-hook"
|
||||
# ];
|
||||
# };
|
||||
systemd.services.${kanidmCertCopier} = {
|
||||
description = "Copy Caddy certificates for Kanidm";
|
||||
requires = [ "caddy.service" ];
|
||||
after = [ "caddy.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "root";
|
||||
ExecStart = "${certRenewalScript}";
|
||||
};
|
||||
};
|
||||
# systemd.services.caddy.wantedBy = [ "multi-user.target" ];
|
||||
# systemd.services.caddy.wants = [ kanidmCertCopier ];
|
||||
systemd.services.caddy.reloadTriggers = [ kanidmCertCopier ];
|
||||
systemd.timers.kanidm-cert-copier-daily = {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnBootSec = "5min";
|
||||
OnCalendar = "daily";
|
||||
Unit = kanidmCertCopier;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
# systemd.services.kanidm = {
|
||||
# after = [ kanidmCertCopier ];
|
||||
# requires = [ kanidmCertCopier ];
|
||||
# };
|
||||
users.users.kanidm.extraGroups = [
|
||||
"caddy"
|
||||
];
|
||||
|
|
@ -43,21 +101,25 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
|
||||
services.kanidm = {
|
||||
package = pkgs.kanidmWithSecretProvisioning;
|
||||
package = pkgs.kanidmWithSecretProvisioning_1_7;
|
||||
enableServer = true;
|
||||
serverSettings = {
|
||||
# NOTE: Required to start the server: https://kanidm.github.io/kanidm/stable/server_configuration.html
|
||||
# domain, origin, tls_chain, tls_key
|
||||
domain = svcDomain;
|
||||
origin = "https://${svcDomain}";
|
||||
tls_chain = "${certsDir}/cert.pem";
|
||||
tls_key = "${certsDir}/key.key";
|
||||
tls_chain = "${kanidmCertDir}/cert.pem";
|
||||
tls_key = "${kanidmCertDir}/key.pem";
|
||||
# tls_chain = "${caddyCertStore}/${svcDomain}.crt";
|
||||
# tls_key = "${caddyCertStore}/${svcDomain}.key";
|
||||
|
||||
# NOTE: Optional Settings
|
||||
# TODO: Configure the rest of the binding properly, should be 363 and maybe 8443
|
||||
ldapbindaddress = "127.0.0.1:3636"; # For Jellyfin LDAP integration.
|
||||
|
||||
# trust_x_forwarded_for = true;
|
||||
#trust_x_forwarded_for = true;
|
||||
};
|
||||
|
||||
enableClient = true;
|
||||
|
|
@ -74,6 +136,7 @@ in
|
|||
home_alias = "name";
|
||||
};
|
||||
|
||||
# TODO: Migrate the secrets from here to `nixos-secrets`
|
||||
# NOTE: There are manual steps required as root to allow a user to set
|
||||
# their own credentials, or to confiugre an account as posix. As-is this
|
||||
# module doesn't support provisioning a complete user /w credentials.
|
||||
|
|
@ -82,7 +145,9 @@ in
|
|||
# https://kanidm.github.io/kanidm/stable/accounts/authentication_and_credentials.html#onboarding-a-new-person--resetting-credentials
|
||||
provision = {
|
||||
enable = true;
|
||||
autoRemove = false;
|
||||
autoRemove = true;
|
||||
acceptInvalidCerts = true;
|
||||
|
||||
adminPasswordFile = config.sops.secrets."kanidm/admin-password".path;
|
||||
idmAdminPasswordFile = config.sops.secrets."kanidm/idm-admin-password".path;
|
||||
|
||||
|
|
@ -98,6 +163,8 @@ in
|
|||
"git.users"
|
||||
"git.admins"
|
||||
"tv.users"
|
||||
"immich.users"
|
||||
"miniflux.users"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
|
@ -107,6 +174,8 @@ in
|
|||
"git.admins" = {};
|
||||
"tv.users" = {};
|
||||
"tv.admins" = {};
|
||||
"immich.users" = {};
|
||||
"miniflux.users" = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
{ config, pkgs, ... }:
|
||||
let
|
||||
svcDomain = "feeds.${config.networking.domain}";
|
||||
svcPort = "8080";
|
||||
homelabDomain = config.networking.domain;
|
||||
svcDomain = "feeds.${homelabDomain}";
|
||||
svcPort = "8081"; # Prevent a Conflict
|
||||
in
|
||||
{
|
||||
services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
|
|
@ -22,32 +23,57 @@ in
|
|||
group = "miniflux";
|
||||
restartUnits = [ "miniflux.service" ];
|
||||
};
|
||||
services.kanidm.provision = {
|
||||
groups = {};
|
||||
systems.oauth2.miniflux = {
|
||||
displayName = "Miniflux Feed Reader";
|
||||
originUrl = "https://${fqdn}/callback";
|
||||
public = true; # enforces PKCE
|
||||
preferShortUsername = true;
|
||||
scopeMaps.pages_users = ["openid" "email" "profile"];
|
||||
claimMaps."${permissionsMap}".valuesByGroup.pages_admin = ["admin"];
|
||||
};
|
||||
sops.secrets."miniflux/oauth2_client_secret" = {
|
||||
owner = "miniflux";
|
||||
group = "kanidm";
|
||||
mode = "0440";
|
||||
restartUnits = [ "miniflux.service" "kanidm.service" ];
|
||||
};
|
||||
#services.kanidm.provision = {
|
||||
#groups = {};
|
||||
#systems.oauth2.miniflux = {
|
||||
#displayName = "Miniflux Feed Reader";
|
||||
#originUrl = "https://${fqdn}/callback";
|
||||
#public = true; # enforces PKCE
|
||||
#preferShortUsername = true;
|
||||
#scopeMaps.pages_users = ["openid" "email" "profile"];
|
||||
#claimMaps."${permissionsMap}".valuesByGroup.pages_admin = ["admin"];
|
||||
#};
|
||||
#};
|
||||
# NOTE: Currently requires some web-interface configuration
|
||||
services.miniflux = {
|
||||
enable = true;
|
||||
adminCredentialsFile = config.sops.secrets.miniflux_env.path;
|
||||
config = {
|
||||
BASE_URL = "https://${svcDomain}";
|
||||
CREATE_ADMIN = 0;
|
||||
DISABLE_LOCAL_AUTH = 1;
|
||||
#CREATE_ADMIN = 0;
|
||||
#DISABLE_LOCAL_AUTH = 1;
|
||||
OAUTH2_PROVIDER = "oidc";
|
||||
OAUTH2_OIDC_PROVIDER_NAME = "Kanidm";
|
||||
OAUTH2_OIDC_DISCOVERY_ENDPOINT = "https://id.${config.networking.domain}";
|
||||
OAUTH2_CLIENT_ID = "miniflux";
|
||||
OAUTH2_CLIENT_SECRET_FILE = config.sops.secrets."miniflux/oauth2_client_secret".path;
|
||||
OAUTH2_REDIRECT_URL = "https://${svcDomain}/oauth2/oidc/callback";
|
||||
OAUTH2_USER_CREATION = 1;
|
||||
OAUTH2_OIDC_DISCOVERY_ENDPOINT = "https://id.${homelabDomain}/oauth2/openid/miniflux";
|
||||
#OAUTH2_USER_CREATION = 1;
|
||||
CLEANUP_FREQUENCY = 48;
|
||||
LISTEN_ADDR = "localhost:${svcPort}";
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
services.kanidm.provision.systems.oauth2.miniflux = {
|
||||
displayName = "miniflux";
|
||||
originUrl = "https://${svcDomain}/oauth2/oidc/callback";
|
||||
originLanding = "https://${svcDomain}/";
|
||||
basicSecretFile = config.sops.secrets."miniflux/oauth2_client_secret".path;
|
||||
scopeMaps."miniflux.users" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
"groups"
|
||||
];
|
||||
# WARNING: PKCE is currently not supported by gitea/forgejo,
|
||||
# see https://github.com/go-gitea/gitea/issues/21376
|
||||
allowInsecureClientDisablePkce = true;
|
||||
preferShortUsername = true;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
14
hosts/lithium/services/monitoring/README.md
Normal file
14
hosts/lithium/services/monitoring/README.md
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
# hosts/lithium/services/monitoring
|
||||
|
||||
This is a Grafana/Prometheus Monitoring Stack.
|
||||
Why? Basically for the sake of it.
|
||||
|
||||
## Diagram
|
||||
|
||||
```mermaid
|
||||
````
|
||||
|
||||
## References
|
||||
|
||||
- https://gist.github.com/rickhull/895b0cb38fdd537c1078a858cf15d63e
|
||||
- https://xeiaso.net/blog/prometheus-grafana-loki-nixos-2020-11-20/
|
||||
|
|
@ -1,6 +1,8 @@
|
|||
{ config, pkgs, ... }:
|
||||
{ inputs, config, pkgs, ... }:
|
||||
let
|
||||
svcDomain = "grafana.${config.networking.domain}";
|
||||
homelabDomain = inputs.nixos-secrets.homelabDomain;
|
||||
#svcDomain = "grafana.${config.networking.domain}";
|
||||
svcDomain = "grafana.${homelabDomain}";
|
||||
svcPort = config.services.grafana.settings.server.http_port;
|
||||
in
|
||||
{
|
||||
|
|
|
|||
4
hosts/lithium/services/monitoring/loki-local-config.yaml
Normal file
4
hosts/lithium/services/monitoring/loki-local-config.yaml
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
15
hosts/lithium/services/monitoring/loki.nix
Normal file
15
hosts/lithium/services/monitoring/loki.nix
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
{ ... }:
|
||||
{
|
||||
services.loki = {
|
||||
enable = true;
|
||||
#configFile = "./loki-local-config.yaml";
|
||||
# Nix Object representing the data that might otherwise be in a YAML config
|
||||
# https://github.com/grafana/loki/blob/main/cmd/loki/loki-local-config.yaml
|
||||
configuration = {
|
||||
auth_enabled = false;
|
||||
server = {
|
||||
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
{ config, pkgs, ... }:
|
||||
#let
|
||||
let
|
||||
#svcDomain = "status.${config.networking.domain}";
|
||||
#svcPort = config.services.prometheus.exporters.node.port;
|
||||
#in
|
||||
svcPort = config.services.prometheus.exporters.node.port;
|
||||
in
|
||||
{
|
||||
#services.caddy.virtualHosts."${svcDomain}".extraConfig = ''
|
||||
#reverse_proxy :${svcPort}
|
||||
|
|
@ -10,20 +10,27 @@
|
|||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
port = 9090;
|
||||
#globalConfig.scrape_interval = "10s"; # "1m"
|
||||
#scrapeConfigs = [
|
||||
#{
|
||||
#job_name = "node";
|
||||
#static_configs = [{
|
||||
# targets = [ "localhost:${toString svcPort}" ];
|
||||
#}];
|
||||
#}
|
||||
#];
|
||||
|
||||
exporters = {
|
||||
# Export data about this host
|
||||
node = {
|
||||
enable = true;
|
||||
enabledCollectors = [ "systemd" ];
|
||||
port = 9091;
|
||||
};
|
||||
};
|
||||
|
||||
# Read data from the export
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "node-lithium";
|
||||
static_configs = [{
|
||||
targets = [ "localhost:${toString svcPort}" ];
|
||||
}];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
#services.prometheus.exporters.node = {
|
||||
#enable = true;
|
||||
#port = 9000;
|
||||
#enabledCollectors = [ "systemd" ];
|
||||
#};
|
||||
}
|
||||
|
|
|
|||
79
hosts/lithium/services/old-kanidm.nix
Normal file
79
hosts/lithium/services/old-kanidm.nix
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
let
|
||||
cfg = config.services.kanidm;
|
||||
authDomain = "auth.${config.networking.domain}";
|
||||
certsDir = config.security.acme.certs."${authDomain}".directory;
|
||||
in
|
||||
{
|
||||
# TODO: Pull in the appropriate sops-nix secrets and get this baby rolling.
|
||||
# https://github.com/search?q=language%3ANix+services.kanidm&type=code
|
||||
services.kanidm = {
|
||||
# NOTE: Pin a specific kanidm version, we don't want issues from auto-updating.
|
||||
package = pkgs.kanidm_1_6;
|
||||
enableServer = true;
|
||||
|
||||
# TODO: Initial kanidm setup.
|
||||
# I sort of want users to be able to create their own accounts and what I
|
||||
# don't want is for any of their account information to be leaked here as
|
||||
# it can be used for remote logins.
|
||||
# So kanidm accounts aside from the administration will be "impure".
|
||||
# I vastly prefer people being able to set their own credentials:
|
||||
# https://kanidm.github.io/kanidm/stable/accounts/authentication_and_credentials.html#onboarding-a-new-person--resetting-credentials
|
||||
provision = {
|
||||
enable = true;
|
||||
autoRemove = false;
|
||||
# TODO: Add secrets from `sops-nix`.
|
||||
adminPasswordFile = "TODO!SETME";
|
||||
idmAdminPasswordFile = "TODO!SETME";
|
||||
|
||||
persons = {
|
||||
# https://kanidm.github.io/kanidm/stable/accounts/authentication_and_credentials.html#resetting-person-account-credentials
|
||||
# Needs to be a member of idm_people_admins and idm_high_privilege to prevent idm_service_desk from tampering.
|
||||
zenware = {
|
||||
displayName = "zenware";
|
||||
legalName = "zenware";
|
||||
mailAddresses = [ "zenware@${config.networking.domain} "];
|
||||
groups = [
|
||||
"idm_high_privilege"
|
||||
"git.users"
|
||||
"git.admins"
|
||||
];
|
||||
};
|
||||
# TODO: Make an idm_service_desk account.
|
||||
};
|
||||
groups = {
|
||||
# This group is `git` because it could be forgejo, gitea, etc.
|
||||
"git.users" = {};
|
||||
"git.admins" = {};
|
||||
};
|
||||
systems.oauth2 = {
|
||||
forgejo = {
|
||||
displayName = "forgejo";
|
||||
originUrl = "TODO!SETME";
|
||||
originLanding = "TODO!SETME";
|
||||
basicSecretFile = "TODO!SETME";
|
||||
scopeMaps."git.users" = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
# WARNING: PKCE is currently not supported by gitea/forgejo,
|
||||
# see https://github.com/go-gitea/gitea/issues/21376
|
||||
allowInsecureClientDisablePkce = true;
|
||||
preferShortUsername = true;
|
||||
claimMaps.groups = {
|
||||
joinType = "array";
|
||||
valuesByGroup."git.admins" = [ "admin" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#enableClient = false;
|
||||
clientSettings = {
|
||||
uri = "https://${authDomain}";
|
||||
verify_hostnames = true;
|
||||
verify_ca = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -34,13 +34,15 @@ in
|
|||
'';
|
||||
WorkingDirectory = "/home/palworld";
|
||||
Restart = "always";
|
||||
RuntimeMaxSec = "1d";
|
||||
RuntimeMaxSec = "1d"; # NOTE: This thing has memory leaks, restart to save our selves.
|
||||
User = "palworld";
|
||||
};
|
||||
};
|
||||
|
||||
# NOTE: Config is stashed at the following directory.
|
||||
# /home/palworld/.steam/steam/Steamapps/common/PalServer/Pal/Saved/Config/LinuxServer/PalWorldSettings.ini
|
||||
# TODO: There are benefits to including the meat of the configuration inside the 'nix' file.
|
||||
# Namely that it will result in actually updating the config when I rebuild.
|
||||
environment.etc."palworld/PalWorldSettings.ini" = {
|
||||
target = "/home/palworld/.steam/steam/Steamapps/common/PalServer/Pal/Saved/Config/LinuxServer/PalWorldSettings.ini";
|
||||
text = palworldSettings;
|
||||
|
|
|
|||
|
|
@ -1,32 +1,39 @@
|
|||
{ }:
|
||||
{ config, ... }:
|
||||
{
|
||||
services.smartd = {
|
||||
enable = true;
|
||||
devices = [
|
||||
{
|
||||
device = "ata-CT500MX500SSD1_2206E607D6AA";
|
||||
device = "/dev/disk/by-id/ata-CT500MX500SSD1_2206E607D6AA";
|
||||
}
|
||||
{
|
||||
device = "ata-CT500MX500SSD1_2206E607D728";
|
||||
device = "/dev/disk/by-id/ata-CT500MX500SSD1_2206E607D728";
|
||||
}
|
||||
{
|
||||
device = "ata-ST16000NM001G-2KK103_ZL2B73HT";
|
||||
device = "/dev/disk/by-id/ata-ST16000NM001G-2KK103_ZL2B73HT";
|
||||
}
|
||||
{
|
||||
device = "ata-ST16000NM001G-2KK103_ZL2PSELL";
|
||||
device = "/dev/disk/by-id/ata-ST16000NM001G-2KK103_ZL2PSELL";
|
||||
}
|
||||
{
|
||||
device = "ata-ST16000NM001G-2KK103_ZL2B4RSM";
|
||||
device = "/dev/disk/by-id/ata-ST16000NM001G-2KK103_ZL2B4RSM";
|
||||
}
|
||||
{
|
||||
device = "ata-ST16000NM001G-2KK103_ZL23XYMM";
|
||||
device = "/dev/disk/by-id/ata-ST16000NM001G-2KK103_ZL23XYMM";
|
||||
}
|
||||
{
|
||||
device = "nvme-Samsung_SSD_960_EVO_500GB_S3X4NB0K244331X";
|
||||
device = "/dev/disk/by-id/nvme-Samsung_SSD_960_EVO_500GB_S3X4NB0K244331X";
|
||||
}
|
||||
{
|
||||
device = "nvme-Samsung_SSD_960_EVO_500GB_S3X4NB0K244303V";
|
||||
device = "/dev/disk/by-id/nvme-Samsung_SSD_960_EVO_500GB_S3X4NB0K244303V";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
services.prometheus.exporters.smartctl = {
|
||||
enable = config.services.smartd.enable;
|
||||
openFirewall = config.services.smartd.enable;
|
||||
# https://github.com/prometheus-community/smartctl_exporter?tab=readme-ov-file#why-is-root-required-cant-i-add-a-user-to-the-disk-group
|
||||
user = "root";
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@ in
|
|||
enable = true;
|
||||
# NOTE: NixOS Attributes here resolve into these ENV vars:
|
||||
# https://github.com/louislam/uptime-kuma/wiki/Environment-Variables
|
||||
# settings = {};
|
||||
settings = {
|
||||
PORT = "4000";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue