Go big or go home
This commit is contained in:
parent
50fa2ef9ed
commit
c204541f38
|
@ -4,6 +4,8 @@
|
|||
outputs = { self, ... }: {
|
||||
overlay = import ./overlay.nix;
|
||||
|
||||
nixosModule = import ./module.nix;
|
||||
|
||||
lib = import ./lib.nix;
|
||||
};
|
||||
}
|
||||
|
|
12
lib.nix
12
lib.nix
|
@ -1,10 +1,10 @@
|
|||
{ pkgs, ... }:
|
||||
|
||||
{
|
||||
ip = import ./lib/ip.nix { inherit pkgs; };
|
||||
dns = import ./lib/dns.nix { inherit pkgs; };
|
||||
passwd = import ./lib/passwd.nix { inherit pkgs; };
|
||||
lisp = import ./lib/lisp.nix { inherit pkgs; };
|
||||
network = import ./lib/network.nix { inherit pkgs; };
|
||||
fs = import ./lib/filesystem.nix { inherit pkgs; };
|
||||
ip = import ./lib/lib/ip.nix { inherit pkgs; };
|
||||
dns = import ./lib/lib/dns.nix { inherit pkgs; };
|
||||
passwd = import ./lib/lib/passwd.nix { inherit pkgs; };
|
||||
lisp = import ./lib/lib/lisp.nix { inherit pkgs; };
|
||||
network = import ./lib/lib/network.nix { inherit pkgs; };
|
||||
fs = import ./lib/lib/filesystem.nix { inherit pkgs; };
|
||||
}
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
{ lib, config, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./instance.nix
|
||||
|
||||
./fudo
|
||||
|
||||
./informis
|
||||
];
|
||||
}
|
|
@ -0,0 +1,206 @@
|
|||
{ config, lib, pkgs, ... } @ toplevel:
|
||||
|
||||
with lib;
|
||||
let
|
||||
hostname = config.instance.hostname;
|
||||
|
||||
domainOpts = { name, ... }: let
|
||||
domain = name;
|
||||
in {
|
||||
options = with types; {
|
||||
email = mkOption {
|
||||
type = str;
|
||||
description = "Domain administrator email.";
|
||||
default = "admin@${domain}";
|
||||
};
|
||||
|
||||
extra-domains = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of domains to add to this certificate.";
|
||||
default = [];
|
||||
};
|
||||
|
||||
local-copies = let
|
||||
localCopyOpts = { name, ... }: let
|
||||
copy = name;
|
||||
in {
|
||||
options = with types; let
|
||||
target-path = "/run/ssl-certificates/${domain}/${copy}";
|
||||
in {
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "User to which this copy belongs.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Group to which this copy belongs.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
service = mkOption {
|
||||
type = str;
|
||||
description = "systemd job to copy certs.";
|
||||
default = "fudo-acme-${domain}-${copy}-certs.service";
|
||||
};
|
||||
|
||||
certificate = mkOption {
|
||||
type = str;
|
||||
description = "Full path to the local copy certificate.";
|
||||
default = "${target-path}/cert.pem";
|
||||
};
|
||||
|
||||
full-certificate = mkOption {
|
||||
type = str;
|
||||
description = "Full path to the local copy certificate.";
|
||||
default = "${target-path}/fullchain.pem";
|
||||
};
|
||||
|
||||
chain = mkOption {
|
||||
type = str;
|
||||
description = "Full path to the local copy certificate.";
|
||||
default = "${target-path}/chain.pem";
|
||||
};
|
||||
|
||||
private-key = mkOption {
|
||||
type = str;
|
||||
description = "Full path to the local copy certificate.";
|
||||
default = "${target-path}/key.pem";
|
||||
};
|
||||
|
||||
dependent-services = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of systemd services depending on this copy.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
part-of = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of systemd targets to which this copy belongs.";
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
in mkOption {
|
||||
type = attrsOf (submodule localCopyOpts);
|
||||
description = "Map of copies to make for use by services.";
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
head-or-null = lst: if (lst == []) then null else head lst;
|
||||
rm-service-ext = filename:
|
||||
head-or-null (builtins.match "^(.+)\.service$" filename);
|
||||
|
||||
concatMapAttrs = f: attrs:
|
||||
foldr (a: b: a // b) {} (mapAttrsToList f attrs);
|
||||
|
||||
cfg = config.fudo.acme;
|
||||
hasLocalDomains = hasAttr hostname cfg.host-domains;
|
||||
localDomains = if hasLocalDomains then
|
||||
cfg.host-domains.${hostname} else {};
|
||||
|
||||
optionalStringOr = str: default:
|
||||
if (str != null) then str else default;
|
||||
|
||||
in {
|
||||
options.fudo.acme = with types; {
|
||||
host-domains = mkOption {
|
||||
type = attrsOf (attrsOf (submodule domainOpts));
|
||||
description = "Map of host to domains to domain options.";
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
security.acme.certs = mapAttrs (domain: domainOpts: {
|
||||
email = domainOpts.email;
|
||||
extraDomainNames = domainOpts.extra-domains;
|
||||
}) localDomains;
|
||||
|
||||
# Assume that if we're acquiring SSL certs, we have a real IP for the
|
||||
# host. nginx must have an acme dir for security.acme to work.
|
||||
services.nginx = mkIf hasLocalDomains {
|
||||
enable = true;
|
||||
|
||||
recommendedGzipSettings = true;
|
||||
recommendedOptimisation = true;
|
||||
recommendedTlsSettings = true;
|
||||
recommendedProxySettings = true;
|
||||
|
||||
virtualHosts.${config.instance.host-fqdn} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
# Just...force override if you want this to point somewhere.
|
||||
locations."/" = {
|
||||
return = "403 Forbidden";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = let
|
||||
copies = concatMapAttrs (domain: domainOpts:
|
||||
domainOpts.local-copies) localDomains;
|
||||
perms = copyOpts: if (copyOpts.group != null) then "0550" else "0500";
|
||||
copy-paths = mapAttrsToList (copy: copyOpts:
|
||||
let
|
||||
dir-entry = copyOpts: file: "d \"${dirOf file}\" ${perms copyOpts} ${copyOpts.user} ${optionalStringOr copyOpts.group "-"} - -";
|
||||
in map (dir-entry copyOpts) [
|
||||
copyOpts.certificate
|
||||
copyOpts.full-certificate
|
||||
copyOpts.chain
|
||||
copyOpts.private-key
|
||||
]) copies;
|
||||
in unique (concatMap (i: unique i) copy-paths);
|
||||
|
||||
services = concatMapAttrs (domain: domainOpts:
|
||||
concatMapAttrs (copy: copyOpts: let
|
||||
key-perms = copyOpts: if (copyOpts.group != null) then "0440" else "0400";
|
||||
source = config.security.acme.certs.${domain}.directory;
|
||||
target = copyOpts.path;
|
||||
owners =
|
||||
if (copyOpts.group != null) then
|
||||
"${copyOpts.user}:${copyOpts.group}"
|
||||
else copyOpts.user;
|
||||
install-certs = pkgs.writeShellScript "fudo-install-${domain}-${copy}-certs.sh" ''
|
||||
cp ${source}/cert.pem ${copyOpts.certificate}
|
||||
chmod 0444 ${copyOpts.certificate}
|
||||
chown ${owners} ${copyOpts.certificate}
|
||||
|
||||
cp ${source}/full.pem ${copyOpts.full-certificate}
|
||||
chmod 0444 ${copyOpts.full-certificate}
|
||||
chown ${owners} ${copyOpts.full-certificate}
|
||||
|
||||
cp ${source}/chain.pem ${copyOpts.chain}
|
||||
chmod 0444 ${copyOpts.chain}
|
||||
chown ${owners} ${copyOpts.chain}
|
||||
|
||||
cp ${source}/key.pem ${copyOpts.private-key}
|
||||
chmod ${key-perms copyOpts} ${copyOpts.private-key}
|
||||
chown ${owners} ${copyOpts.private-key}
|
||||
'';
|
||||
|
||||
service-name = rm-service-ext copyOpts.service;
|
||||
in {
|
||||
${service-name} = {
|
||||
description = "Copy ${domain} ACME certs for ${copy}.";
|
||||
after = [ "acme-${domain}.service" ];
|
||||
before = copyOpts.dependent-services;
|
||||
wantedBy = [ "multi-user.target" ] ++ copyOpts.dependent-services;
|
||||
partOf = copyOpts.part-of;
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = install-certs;
|
||||
RemainAfterExit = true;
|
||||
StandardOutput = "journal";
|
||||
};
|
||||
};
|
||||
}) domainOpts.local-copies) localDomains;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
# Starts an Nginx server on $HOSTNAME just to get a cert for this host
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.acme;
|
||||
|
||||
# wwwRoot = hostname:
|
||||
# pkgs.writeTextFile {
|
||||
# name = "index.html";
|
||||
|
||||
# text = ''
|
||||
# <html>
|
||||
# <head>
|
||||
# <title>${hostname}</title>
|
||||
# </head>
|
||||
# <body>
|
||||
# <h1>${hostname}</title>
|
||||
# </body>
|
||||
# </html>
|
||||
# '';
|
||||
# destination = "/www";
|
||||
# };
|
||||
|
||||
in {
|
||||
|
||||
options.fudo.acme = {
|
||||
enable = mkEnableOption "Fetch ACME certs for supplied local hostnames.";
|
||||
|
||||
hostnames = mkOption {
|
||||
type = with types; listOf str;
|
||||
description = "A list of hostnames mapping to this host, for which to acquire SSL certificates.";
|
||||
default = [];
|
||||
example = [
|
||||
"my.hostname.com"
|
||||
"alt.hostname.com"
|
||||
];
|
||||
};
|
||||
|
||||
admin-address = mkOption {
|
||||
type = types.str;
|
||||
description = "The admin address in charge of these addresses.";
|
||||
default = "admin@fudo.org";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
||||
virtualHosts = listToAttrs
|
||||
(map
|
||||
(hostname:
|
||||
nameValuePair hostname
|
||||
{
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
# root = (wwwRoot hostname) + ("/" + "www");
|
||||
})
|
||||
cfg.hostnames);
|
||||
};
|
||||
|
||||
security.acme.certs = listToAttrs
|
||||
(map (hostname: nameValuePair hostname { email = cfg.admin-address; })
|
||||
cfg.hostnames);
|
||||
};
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.authentication;
|
||||
in {
|
||||
options.fudo.authentication = {
|
||||
enable = mkEnableOption "Use Fudo users & groups from LDAP.";
|
||||
|
||||
ssl-ca-certificate = mkOption {
|
||||
type = types.str;
|
||||
description = "Path to the CA certificate to use to bind to the server.";
|
||||
};
|
||||
|
||||
bind-passwd-file = mkOption {
|
||||
type = types.str;
|
||||
description = "Path to a file containing the password used to bind to the server.";
|
||||
};
|
||||
|
||||
ldap-url = mkOption {
|
||||
type = types.str;
|
||||
description = "URL of the LDAP server.";
|
||||
example = "ldaps://auth.fudo.org";
|
||||
};
|
||||
|
||||
base = mkOption {
|
||||
type = types.str;
|
||||
description = "The LDAP base in which to look for users.";
|
||||
default = "dc=fudo,dc=org";
|
||||
};
|
||||
|
||||
bind-dn = mkOption {
|
||||
type = types.str;
|
||||
description = "The DN with which to bind the LDAP server.";
|
||||
default = "cn=auth_reader,dc=fudo,dc=org";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
users.ldap = {
|
||||
enable = true;
|
||||
base = cfg.base;
|
||||
bind = {
|
||||
distinguishedName = cfg.bind-dn;
|
||||
passwordFile = cfg.bind-passwd-file;
|
||||
timeLimit = 5;
|
||||
};
|
||||
loginPam = true;
|
||||
nsswitch = true;
|
||||
server = cfg.ldap-url;
|
||||
timeLimit = 5;
|
||||
useTLS = true;
|
||||
extraConfig = ''
|
||||
TLS_CACERT ${cfg.ssl-ca-certificate}
|
||||
TSL_REQCERT allow
|
||||
'';
|
||||
|
||||
daemon = {
|
||||
enable = true;
|
||||
extraConfig = ''
|
||||
tls_cacertfile ${cfg.ssl-ca-certificate}
|
||||
tls_reqcert allow
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,154 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.backplane.dns;
|
||||
|
||||
powerdns-conf-dir = "${cfg.powerdns.home}/conf.d";
|
||||
|
||||
clientHostOpts = { name, ... }: {
|
||||
options = with types; {
|
||||
password-file = mkOption {
|
||||
type = path;
|
||||
description =
|
||||
"Location (on the build host) of the file containing the host password.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
serviceOpts = { name, ... }: {
|
||||
options = with types; {
|
||||
password-file = mkOption {
|
||||
type = path;
|
||||
description =
|
||||
"Location (on the build host) of the file containing the service password.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
databaseOpts = { ... }: {
|
||||
options = with types; {
|
||||
host = mkOption {
|
||||
type = str;
|
||||
description = "Hostname or IP of the PostgreSQL server.";
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = str;
|
||||
description = "Database to use for DNS backplane.";
|
||||
default = "backplane_dns";
|
||||
};
|
||||
|
||||
username = mkOption {
|
||||
type = str;
|
||||
description = "Database user for DNS backplane.";
|
||||
default = "backplane_dns";
|
||||
};
|
||||
|
||||
password-file = mkOption {
|
||||
type = str;
|
||||
description = "File containing password for database user.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
options.fudo.backplane = with types; {
|
||||
|
||||
client-hosts = mkOption {
|
||||
type = attrsOf (submodule clientHostOpts);
|
||||
description = "List of backplane client options.";
|
||||
default = {};
|
||||
};
|
||||
|
||||
services = mkOption {
|
||||
type = attrsOf (submodule serviceOpts);
|
||||
description = "List of backplane service options.";
|
||||
default = {};
|
||||
};
|
||||
|
||||
backplane-host = mkOption {
|
||||
type = types.str;
|
||||
description = "Hostname of the backplane XMPP server.";
|
||||
};
|
||||
|
||||
dns = {
|
||||
enable = mkEnableOption "Enable backplane dynamic DNS server.";
|
||||
|
||||
port = mkOption {
|
||||
type = port;
|
||||
description = "Port on which to serve authoritative DNS requests.";
|
||||
default = 53;
|
||||
};
|
||||
|
||||
listen-v4-addresses = mkOption {
|
||||
type = listOf str;
|
||||
description = "IPv4 addresses on which to listen for dns requests.";
|
||||
default = [ "0.0.0.0" ];
|
||||
};
|
||||
|
||||
listen-v6-addresses = mkOption {
|
||||
type = listOf str;
|
||||
description = "IPv6 addresses on which to listen for dns requests.";
|
||||
example = [ "[abcd::1]" ];
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
required-services = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of services required before the DNS server can start.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "User as which to run DNS backplane listener service.";
|
||||
default = "backplane-dns";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = str;
|
||||
description = "Group as which to run DNS backplane listener service.";
|
||||
default = "backplane-dns";
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = submodule databaseOpts;
|
||||
description = "Database settings for the DNS server.";
|
||||
};
|
||||
|
||||
powerdns = {
|
||||
home = mkOption {
|
||||
type = str;
|
||||
description = "Directory at which to store powerdns configuration and state.";
|
||||
default = "/run/backplane-dns/powerdns";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "Username as which to run PowerDNS.";
|
||||
default = "backplane-powerdns";
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = submodule databaseOpts;
|
||||
description = "Database settings for the DNS server.";
|
||||
};
|
||||
};
|
||||
|
||||
backplane-role = {
|
||||
role = mkOption {
|
||||
type = types.str;
|
||||
description = "Backplane XMPP role name for the DNS server.";
|
||||
default = "service-dns";
|
||||
};
|
||||
|
||||
password-file = mkOption {
|
||||
type = types.str;
|
||||
description = "File containing XMPP password for backplane role.";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
{
|
||||
imports = [
|
||||
./common.nix
|
||||
./dns.nix
|
||||
./jabber.nix
|
||||
];
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
backplane-cfg = config.fudo.backplane;
|
||||
|
||||
cfg = backplane-cfg.dns;
|
||||
|
||||
powerdns-conf-dir = "${cfg.powerdns.home}/conf.d";
|
||||
|
||||
in {
|
||||
config = mkIf cfg.enable {
|
||||
users = {
|
||||
users = {
|
||||
"${cfg.user}" = {
|
||||
isSystemUser = true;
|
||||
group = cfg.group;
|
||||
createHome = true;
|
||||
home = "/var/home/${cfg.user}";
|
||||
};
|
||||
${cfg.powerdns.user} = {
|
||||
isSystemUser = true;
|
||||
home = cfg.powerdns.home;
|
||||
createHome = true;
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
${cfg.group} = { members = [ cfg.user ]; };
|
||||
${cfg.powerdns.user} = { members = [ cfg.powerdns.user ]; };
|
||||
};
|
||||
};
|
||||
|
||||
fudo = {
|
||||
system.services = {
|
||||
backplane-powerdns-config-generator = {
|
||||
description =
|
||||
"Generate postgres configuration for backplane DNS server.";
|
||||
requires = cfg.required-services;
|
||||
type = "oneshot";
|
||||
restartIfChanged = true;
|
||||
partOf = [ "backplane-dns.target" ];
|
||||
|
||||
readWritePaths = [ powerdns-conf-dir ];
|
||||
|
||||
# This builds the config in a bash script, to avoid storing the password
|
||||
# in the nix store at any point
|
||||
script = let
|
||||
user = cfg.powerdns.user;
|
||||
db = cfg.powerdns.database;
|
||||
in ''
|
||||
TMPDIR=$(${pkgs.coreutils}/bin/mktemp -d -t pdns-XXXXXXXXXX)
|
||||
TMPCONF=$TMPDIR/pdns.local.gpgsql.conf
|
||||
|
||||
if [ ! -f ${cfg.database.password-file} ]; then
|
||||
echo "${cfg.database.password-file} does not exist!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
touch $TMPCONF
|
||||
chmod go-rwx $TMPCONF
|
||||
chown ${user} $TMPCONF
|
||||
PASSWORD=$(cat ${db.password-file})
|
||||
echo "launch+=gpgsql" >> $TMPCONF
|
||||
echo "gpgsql-host=${db.host}" >> $TMPCONF
|
||||
echo "gpgsql-dbname=${db.database}" >> $TMPCONF
|
||||
echo "gpgsql-user=${db.username}" >> $TMPCONF
|
||||
echo "gpgsql-password=$PASSWORD" >> $TMPCONF
|
||||
echo "gpgsql-dnssec=yes" >> $TMPCONF
|
||||
|
||||
mv $TMPCONF ${powerdns-conf-dir}/pdns.local.gpgsql.conf
|
||||
rm -rf $TMPDIR
|
||||
|
||||
exit 0
|
||||
'';
|
||||
};
|
||||
|
||||
backplane-dns = {
|
||||
description = "Fudo DNS Backplane Server";
|
||||
restartIfChanged = true;
|
||||
path = with pkgs; [ backplane-dns-server ];
|
||||
execStart = "launch-backplane-dns.sh";
|
||||
pidFile = "/run/backplane-dns.$USERNAME.pid";
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
partOf = [ "backplane-dns.target" ];
|
||||
requires = cfg.required-services ++ [ "postgresql.service" ];
|
||||
environment = {
|
||||
FUDO_DNS_BACKPLANE_XMPP_HOSTNAME = backplane-cfg.backplane-host;
|
||||
FUDO_DNS_BACKPLANE_XMPP_USERNAME = cfg.backplane-role.role;
|
||||
FUDO_DNS_BACKPLANE_XMPP_PASSWORD_FILE = cfg.backplane-role.password-file;
|
||||
FUDO_DNS_BACKPLANE_DATABASE_HOSTNAME = cfg.database.host;
|
||||
FUDO_DNS_BACKPLANE_DATABASE_NAME = cfg.database.database;
|
||||
FUDO_DNS_BACKPLANE_DATABASE_USERNAME =
|
||||
cfg.database.username;
|
||||
FUDO_DNS_BACKPLANE_DATABASE_PASSWORD_FILE =
|
||||
cfg.database.password-file;
|
||||
|
||||
CL_SOURCE_REGISTRY =
|
||||
pkgs.lib.fudo.lisp.lisp-source-registry pkgs.backplane-dns-server;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"d ${powerdns-conf-dir} 0700 ${cfg.powerdns.user} - - -"
|
||||
];
|
||||
|
||||
targets = {
|
||||
backplane-dns = {
|
||||
description = "Fudo DNS backplane services.";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = cfg.required-services ++ [ "postgresql.service" ];
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
backplane-powerdns = let
|
||||
pdns-config-dir = pkgs.writeTextDir "pdns.conf" ''
|
||||
local-address=${lib.concatStringsSep ", " cfg.listen-v4-addresses}
|
||||
local-ipv6=${lib.concatStringsSep ", " cfg.listen-v6-addresses}
|
||||
local-port=${toString cfg.port}
|
||||
launch=
|
||||
include-dir=${powerdns-conf-dir}/
|
||||
'';
|
||||
in {
|
||||
description = "Backplane PowerDNS name server";
|
||||
requires = [
|
||||
"postgresql.service"
|
||||
"backplane-powerdns-config-generator.service"
|
||||
];
|
||||
after = [ "network.target" ];
|
||||
path = with pkgs; [ powerdns postgresql ];
|
||||
serviceConfig = {
|
||||
ExecStart = "pdns_server --setuid=${cfg.powerdns.user} --setgid=${cfg.powerdns.user} --chroot=${cfg.powerdns.home} --socket-dir=/ --daemon=no --guardian=no --disable-syslog --write-pid=no --config-dir=${pdns-config-dir}";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
{
|
||||
config = mkIf config.fudo.jabber.enable {
|
||||
fudo = let
|
||||
cfg = config.fudo.backplane;
|
||||
|
||||
hostname = config.instance.hostname;
|
||||
|
||||
backplane-server = cfg.backplane-host;
|
||||
|
||||
generate-auth-file = name: files: let
|
||||
make-entry = name: passwd-file:
|
||||
''("${name}" . "${readFile passwd-file}")'';
|
||||
entries = mapAttrsToList make-entry files;
|
||||
content = concatStringsSep "\n" entries;
|
||||
in pkgs.writeText "${name}-backplane-auth.scm" "'(${content})";
|
||||
|
||||
host-auth-file = generate-auth-file "host"
|
||||
(mapAttrs (hostname: hostOpts: hostOpts.password-file)
|
||||
cfg.client-hosts);
|
||||
|
||||
service-auth-file = generate-auth-file "service"
|
||||
(mapAttrs (service: serviceOpts: serviceOpts.password-file)
|
||||
cfg.services);
|
||||
|
||||
in {
|
||||
secrets.host-secrets.${hostname} = {
|
||||
backplane-host-auth = {
|
||||
source-file = host-auth-file;
|
||||
target-file = "/var/backplane/host-passwords.scm";
|
||||
user = config.fudo.jabber.user;
|
||||
};
|
||||
backplane-service-auth = {
|
||||
source-file = service-auth-file;
|
||||
target-file = "/var/backplane/service-passwords.scm";
|
||||
user = config.fudo.jabber.user;
|
||||
};
|
||||
};
|
||||
|
||||
jabber = {
|
||||
environment = {
|
||||
FUDO_HOST_PASSWD_FILE =
|
||||
secrets.backplane-host-auth.target-file;
|
||||
FUDO_SERVICE_PASSWD_FILE =
|
||||
secrets.backplane-service-auth.target-file;
|
||||
};
|
||||
|
||||
sites.${backplane-server} = {
|
||||
site-config = {
|
||||
auth_method = "external";
|
||||
extauth_program =
|
||||
"${pkgs.guile}/bin/guile -s ${pkgs.backplane-auth}/backplane-auth.scm";
|
||||
extauth_pool_size = 3;
|
||||
auth_use_cache = true;
|
||||
|
||||
modules = {
|
||||
mod_adhoc = {};
|
||||
mod_caps = {};
|
||||
mod_carboncopy = {};
|
||||
mod_client_state = {};
|
||||
mod_configure = {};
|
||||
mod_disco = {};
|
||||
mod_fail2ban = {};
|
||||
mod_last = {};
|
||||
mod_offline = {
|
||||
access_max_user_messages = 5000;
|
||||
};
|
||||
mod_ping = {};
|
||||
mod_pubsub = {
|
||||
access_createnode = "pubsub_createnode";
|
||||
ignore_pep_from_offline = true;
|
||||
last_item_cache = false;
|
||||
plugins = [
|
||||
"flat"
|
||||
"pep"
|
||||
];
|
||||
};
|
||||
mod_roster = {};
|
||||
mod_stream_mgmt = {};
|
||||
mod_time = {};
|
||||
mod_version = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,262 @@
|
|||
{ pkgs, lib, config, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.chat;
|
||||
mattermost-config-target = "/run/chat/mattermost/mattermost-config.json";
|
||||
|
||||
in {
|
||||
options.fudo.chat = with types; {
|
||||
enable = mkEnableOption "Enable chat server";
|
||||
|
||||
hostname = mkOption {
|
||||
type = str;
|
||||
description = "Hostname at which this chat server is accessible.";
|
||||
example = "chat.mydomain.com";
|
||||
};
|
||||
|
||||
site-name = mkOption {
|
||||
type = str;
|
||||
description = "The name of this chat server.";
|
||||
example = "My Fancy Chat Site";
|
||||
};
|
||||
|
||||
smtp = {
|
||||
server = mkOption {
|
||||
type = str;
|
||||
description = "SMTP server to use for sending notification emails.";
|
||||
example = "mail.my-site.com";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "Username with which to connect to the SMTP server.";
|
||||
};
|
||||
|
||||
password-file = mkOption {
|
||||
type = str;
|
||||
description =
|
||||
"Path to a file containing the password to use while connecting to the SMTP server.";
|
||||
};
|
||||
};
|
||||
|
||||
state-directory = mkOption {
|
||||
type = str;
|
||||
description = "Path at which to store server state data.";
|
||||
default = "/var/lib/mattermost";
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = (submodule {
|
||||
options = {
|
||||
name = mkOption {
|
||||
type = str;
|
||||
description = "Database name.";
|
||||
};
|
||||
|
||||
hostname = mkOption {
|
||||
type = str;
|
||||
description = "Database host.";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "Database user.";
|
||||
};
|
||||
|
||||
password-file = mkOption {
|
||||
type = str;
|
||||
description = "Path to file containing database password.";
|
||||
};
|
||||
};
|
||||
});
|
||||
description = "Database configuration.";
|
||||
example = {
|
||||
name = "my_database";
|
||||
hostname = "my.database.com";
|
||||
user = "db_user";
|
||||
password-file = /path/to/some/file.pw;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable (let
|
||||
pkg = pkgs.mattermost;
|
||||
default-config = builtins.fromJSON (readFile "${pkg}/config/config.json");
|
||||
modified-config = recursiveUpdate default-config {
|
||||
ServiceSettings.SiteURL = "https://${cfg.hostname}";
|
||||
ServiceSettings.ListenAddress = "127.0.0.1:8065";
|
||||
TeamSettings.SiteName = cfg.site-name;
|
||||
EmailSettings = {
|
||||
RequireEmailVerification = true;
|
||||
SMTPServer = cfg.smtp.server;
|
||||
SMTPPort = 587;
|
||||
EnableSMTPAuth = true;
|
||||
SMTPUsername = cfg.smtp.user;
|
||||
SMTPPassword = "__SMTP_PASSWD__";
|
||||
SendEmailNotifications = true;
|
||||
ConnectionSecurity = "STARTTLS";
|
||||
FeedbackEmail = "chat@fudo.org";
|
||||
FeedbackName = "Admin";
|
||||
};
|
||||
EnableEmailInvitations = true;
|
||||
SqlSettings.DriverName = "postgres";
|
||||
SqlSettings.DataSource = "postgres://${
|
||||
cfg.database.user
|
||||
}:__DATABASE_PASSWORD__@${
|
||||
cfg.database.hostname
|
||||
}:5432/${
|
||||
cfg.database.name
|
||||
}";
|
||||
};
|
||||
mattermost-config-file-template =
|
||||
pkgs.writeText "mattermost-config.json.template" (builtins.toJSON modified-config);
|
||||
mattermost-user = "mattermost";
|
||||
mattermost-group = "mattermost";
|
||||
|
||||
generate-mattermost-config = target: template: smtp-passwd-file: db-passwd-file:
|
||||
pkgs.writeScript "mattermost-config-generator.sh" ''
|
||||
SMTP_PASSWD=$( cat ${smtp-passwd-file} )
|
||||
DATABASE_PASSWORD=$( cat ${db-passwd-file} )
|
||||
sed -e 's/__SMTP_PASSWD__/"$SMTP_PASSWD"/' -e 's/__DATABASE_PASSWORD__/"$DATABASE_PASSWORD"/' ${template} > ${target}
|
||||
'';
|
||||
|
||||
in {
|
||||
users = {
|
||||
users = {
|
||||
${mattermost-user} = {
|
||||
isSystemUser = true;
|
||||
group = mattermost-group;
|
||||
};
|
||||
};
|
||||
|
||||
groups = { ${mattermost-group} = { members = [ mattermost-user ]; }; };
|
||||
};
|
||||
|
||||
fudo.system.services.mattermost = {
|
||||
description = "Mattermost Chat Server";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
|
||||
preStart = ''
|
||||
${generate-mattermost-config
|
||||
mattermost-config-target
|
||||
mattermost-config-file-template
|
||||
cfg.smtp.password-file
|
||||
cfg.database.password-file}
|
||||
cp ${cfg.smtp.password-file} ${cfg.state-directory}/config/config.json
|
||||
cp -uRL ${pkg}/client ${cfg.state-directory}
|
||||
chown ${mattermost-user}:${mattermost-group} ${cfg.state-directory}/client
|
||||
chmod 0750 ${cfg.state-directory}/client
|
||||
'';
|
||||
execStart = "${pkg}/bin/mattermost";
|
||||
workingDirectory = cfg.state-directory;
|
||||
user = mattermost-user;
|
||||
group = mattermost-group;
|
||||
};
|
||||
|
||||
systemd = {
|
||||
|
||||
tmpfiles.rules = [
|
||||
"d ${cfg.state-directory} 0750 ${mattermost-user} ${mattermost-group} - -"
|
||||
"d ${cfg.state-directory}/config 0750 ${mattermost-user} ${mattermost-group} - -"
|
||||
"L ${cfg.state-directory}/bin - - - - ${pkg}/bin"
|
||||
"L ${cfg.state-directory}/fonts - - - - ${pkg}/fonts"
|
||||
"L ${cfg.state-directory}/i18n - - - - ${pkg}/i18n"
|
||||
"L ${cfg.state-directory}/templates - - - - ${pkg}/templates"
|
||||
];
|
||||
|
||||
# services.mattermost = {
|
||||
# description = "Mattermost Chat Server";
|
||||
# wantedBy = [ "multi-user.target" ];
|
||||
# after = [ "network.target" ];
|
||||
|
||||
# preStart = ''
|
||||
# ${generate-mattermost-config
|
||||
# mattermost-config-target
|
||||
# mattermost-config-file-template
|
||||
# cfg.smtp.password-file
|
||||
# cfg.database.password-file}
|
||||
# cp ${cfg.smtp.password-file} ${cfg.state-directory}/config/config.json
|
||||
# cp -uRL ${pkg}/client ${cfg.state-directory}
|
||||
# chown ${mattermost-user}:${mattermost-group} ${cfg.state-directory}/client
|
||||
# chmod 0750 ${cfg.state-directory}/client
|
||||
# '';
|
||||
|
||||
# serviceConfig = {
|
||||
# PermissionsStartOnly = true;
|
||||
# ExecStart = "${pkg}/bin/mattermost";
|
||||
# WorkingDirectory = cfg.state-directory;
|
||||
# Restart = "always";
|
||||
# RestartSec = "10";
|
||||
# LimitNOFILE = "49152";
|
||||
# User = mattermost-user;
|
||||
# Group = mattermost-group;
|
||||
# };
|
||||
# };
|
||||
};
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
||||
appendHttpConfig = ''
|
||||
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=mattermost_cache:10m max_size=3g inactive=120m use_temp_path=off;
|
||||
'';
|
||||
|
||||
virtualHosts = {
|
||||
"${cfg.hostname}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:8065";
|
||||
|
||||
extraConfig = ''
|
||||
client_max_body_size 50M;
|
||||
proxy_set_header Connection "";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-By $server_addr:$server_port;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Frame-Options SAMEORIGIN;
|
||||
proxy_buffers 256 16k;
|
||||
proxy_buffer_size 16k;
|
||||
proxy_read_timeout 600s;
|
||||
proxy_cache mattermost_cache;
|
||||
proxy_cache_revalidate on;
|
||||
proxy_cache_min_uses 2;
|
||||
proxy_cache_use_stale timeout;
|
||||
proxy_cache_lock on;
|
||||
proxy_http_version 1.1;
|
||||
'';
|
||||
};
|
||||
|
||||
locations."~ /api/v[0-9]+/(users/)?websocket$" = {
|
||||
proxyPass = "http://127.0.0.1:8065";
|
||||
|
||||
extraConfig = ''
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
client_max_body_size 50M;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-By $server_addr:$server_port;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Frame-Options SAMEORIGIN;
|
||||
proxy_buffers 256 16k;
|
||||
proxy_buffer_size 16k;
|
||||
client_body_timeout 60;
|
||||
send_timeout 300;
|
||||
lingering_timeout 5;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_send_timeout 300;
|
||||
proxy_read_timeout 90s;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.client.dns;
|
||||
|
||||
ssh-key-files =
|
||||
map (host-key: host-key.path) config.services.openssh.hostKeys;
|
||||
|
||||
ssh-key-args = concatStringsSep " " (map (file: "-f ${file}") ssh-key-files);
|
||||
|
||||
in {
|
||||
options.fudo.client.dns = {
|
||||
ipv4 = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = "Report host external IPv4 address to Fudo DynDNS server.";
|
||||
};
|
||||
|
||||
ipv6 = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = "Report host external IPv6 address to Fudo DynDNS server.";
|
||||
};
|
||||
|
||||
sshfp = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = "Report host SSH fingerprints to the Fudo DynDNS server.";
|
||||
};
|
||||
|
||||
domain = mkOption {
|
||||
type = types.str;
|
||||
description = "Domain under which this host is registered.";
|
||||
default = "fudo.link";
|
||||
};
|
||||
|
||||
server = mkOption {
|
||||
type = types.str;
|
||||
description = "Backplane DNS server to which changes will be reported.";
|
||||
default = "backplane.fudo.org";
|
||||
};
|
||||
|
||||
password-file = mkOption {
|
||||
type = types.str;
|
||||
description = "File containing host password for backplane.";
|
||||
example = "/path/to/secret.passwd";
|
||||
};
|
||||
|
||||
frequency = mkOption {
|
||||
type = types.str;
|
||||
description =
|
||||
"Frequency at which to report the local IP(s) to backplane.";
|
||||
default = "*:0/15";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
description =
|
||||
"User as which to run the client script (must have access to password file).";
|
||||
default = "backplane-dns-client";
|
||||
};
|
||||
|
||||
external-interface = mkOption {
|
||||
type = with types; nullOr str;
|
||||
description =
|
||||
"Interface with which this host communicates with the larger internet.";
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
users.users = {
|
||||
"${cfg.user}" = {
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
home = "/var/home/${cfg.user}";
|
||||
};
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"d /var/home 755 root - - -"
|
||||
"d /var/home/${cfg.user} 700 ${cfg.user} - - -"
|
||||
];
|
||||
|
||||
timers.backplane-dns-client = {
|
||||
enable = true;
|
||||
description = "Report local IP addresses to Fudo backplane.";
|
||||
partOf = [ "backplane-dns-client.service" ];
|
||||
wantedBy = [ "timers.target" ];
|
||||
requires = [ "network-online.target" ];
|
||||
timerConfig = { OnCalendar = cfg.frequency; };
|
||||
};
|
||||
|
||||
services.backplane-dns-client-pw-file = {
|
||||
enable = true;
|
||||
requiredBy = [ "backplane-dns-client.services" ];
|
||||
reloadIfChanged = true;
|
||||
serviceConfig = { Type = "oneshot"; };
|
||||
script = ''
|
||||
chmod 400 ${cfg.password-file}
|
||||
chown ${cfg.user} ${cfg.password-file}
|
||||
'';
|
||||
};
|
||||
|
||||
services.backplane-dns-client = {
|
||||
enable = true;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
StandardOutput = "journal";
|
||||
User = cfg.user;
|
||||
ExecStart = pkgs.writeShellScript "start-backplane-dns-client.sh" ''
|
||||
${pkgs.backplane-dns-client}/bin/backplane-dns-client ${
|
||||
optionalString cfg.ipv4 "-4"
|
||||
} ${optionalString cfg.ipv6 "-6"} ${
|
||||
optionalString cfg.sshfp ssh-key-args
|
||||
} ${
|
||||
optionalString (cfg.external-interface != null)
|
||||
"--interface=${cfg.external-interface}"
|
||||
} --domain=${cfg.domain} --server=${cfg.server} --password-file=${cfg.password-file}
|
||||
'';
|
||||
};
|
||||
# Needed to generate SSH fingerprinst
|
||||
path = [ pkgs.openssh ];
|
||||
reloadIfChanged = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
# General Fudo config, shared across packages
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
{ }
|
|
@ -0,0 +1,49 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib; {
|
||||
imports = [
|
||||
./acme-certs.nix
|
||||
./acme-for-hostname.nix
|
||||
./authentication.nix
|
||||
./backplane
|
||||
./chat.nix
|
||||
./client/dns.nix
|
||||
./deploy.nix
|
||||
./distributed-builds.nix
|
||||
./dns.nix
|
||||
./domains.nix
|
||||
./garbage-collector.nix
|
||||
./git.nix
|
||||
./global.nix
|
||||
./grafana.nix
|
||||
./hosts.nix
|
||||
./host-filesystems.nix
|
||||
./initrd-network.nix
|
||||
./ipfs.nix
|
||||
./jabber.nix
|
||||
./kdc.nix
|
||||
./ldap.nix
|
||||
./local-network.nix
|
||||
./mail.nix
|
||||
./mail-container.nix
|
||||
./minecraft-server.nix
|
||||
./netinfo-email.nix
|
||||
./networks.nix
|
||||
./node-exporter.nix
|
||||
./nsd.nix
|
||||
./password.nix
|
||||
./postgres.nix
|
||||
./prometheus.nix
|
||||
./secrets.nix
|
||||
./secure-dns-proxy.nix
|
||||
./sites.nix
|
||||
./slynk.nix
|
||||
./ssh.nix
|
||||
./system.nix
|
||||
./system-networking.nix
|
||||
./users.nix
|
||||
./vpn.nix
|
||||
./webmail.nix
|
||||
./wireless-networks.nix
|
||||
];
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
site-cfg = config.fudo.sites.${config.instance.local-site};
|
||||
|
||||
in {
|
||||
config = {
|
||||
users.users.root.openssh.authorizedKeys.keys =
|
||||
mkIf (site-cfg.deploy-pubkeys != null)
|
||||
site-cfg.deploy-pubkeys;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
hostname = config.instance.hostname;
|
||||
|
||||
site-cfg = config.fudo.sites.${config.instance.local-site};
|
||||
|
||||
has-build-servers = (length (attrNames site-cfg.build-servers)) > 0;
|
||||
|
||||
build-keypair = config.fudo.secrets.host-secrets.${hostname}.build-keypair;
|
||||
|
||||
enable-distributed-builds =
|
||||
site-cfg.enable-distributed-builds && has-build-servers && build-keypair != null;
|
||||
|
||||
local-build-cfg = if (hasAttr hostname site-cfg.build-servers) then
|
||||
site-cfg.build-servers.${hostname}
|
||||
else null;
|
||||
|
||||
in {
|
||||
config = {
|
||||
nix = mkIf enable-distributed-builds {
|
||||
buildMachines = mapAttrsToList (hostname: buildOpts: {
|
||||
hostName = "${hostname}.${domain-name}";
|
||||
maxJobs = buildOpts.max-jobs;
|
||||
speedFactor = buildOpts.speed-factor;
|
||||
supportedFeatures = buildOpts.supportedFeatures;
|
||||
sshKey = build-keypair.private-key;
|
||||
sshUser = buildOpts.user;
|
||||
}) site-cfg.build-servers;
|
||||
distributedBuilds = true;
|
||||
|
||||
trustedUsers = mkIf (local-build-cfg != null) [
|
||||
local-build-host.build-user
|
||||
];
|
||||
};
|
||||
|
||||
users.users = mkIf (local-build-cfg != null) {
|
||||
${local-build-cfg.build-user} = {
|
||||
isSystemUser = true;
|
||||
openssh.authorizedKeys.keyFiles =
|
||||
concatLists
|
||||
(mapAttrsToList (host: hostOpts: hostOpts.build-pubkeys)
|
||||
config.instance.local-hosts);
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,178 @@
|
|||
{ lib, config, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.dns;
|
||||
|
||||
join-lines = concatStringsSep "\n";
|
||||
|
||||
domainOpts = { domain, ... }: {
|
||||
options = with types; {
|
||||
dnssec = mkOption {
|
||||
type = bool;
|
||||
description = "Enable DNSSEC security for this zone.";
|
||||
default = true;
|
||||
};
|
||||
|
||||
dmarc-report-address = mkOption {
|
||||
type = nullOr str;
|
||||
description = "The email to use to recieve DMARC reports, if any.";
|
||||
example = "admin-user@domain.com";
|
||||
default = null;
|
||||
};
|
||||
|
||||
network-definition = mkOption {
|
||||
type = submodule (import ../types/network-definition.nix);
|
||||
description = "Definition of network to be served by local server.";
|
||||
};
|
||||
|
||||
default-host = mkOption {
|
||||
type = str;
|
||||
description = "The host to which the domain should map by default.";
|
||||
};
|
||||
|
||||
mx = mkOption {
|
||||
type = listOf str;
|
||||
description = "The hosts which act as the domain mail exchange.";
|
||||
default = [];
|
||||
};
|
||||
|
||||
gssapi-realm = mkOption {
|
||||
type = nullOr str;
|
||||
description = "The GSSAPI realm of this domain.";
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networkHostOpts = import ../types/network-host.nix { inherit lib; };
|
||||
|
||||
hostRecords = hostname: nethost-data: let
|
||||
# FIXME: RP doesn't work.
|
||||
# generic-host-records = let
|
||||
# host-data = if (hasAttr hostname config.fudo.hosts) then config.fudo.hosts.${hostname} else null;
|
||||
# in
|
||||
# if (host-data == null) then [] else (
|
||||
# (map (sshfp: "${hostname} IN SSHFP ${sshfp}") host-data.ssh-fingerprints) ++ (optional (host-data.rp != null) "${hostname} IN RP ${host-data.rp}")
|
||||
# );
|
||||
sshfp-records = if (hasAttr hostname config.fudo.hosts) then (map (sshfp: "${hostname} IN SSHFP ${sshfp}") config.fudo.hosts.${hostname}.ssh-fingerprints) else [];
|
||||
a-record = optional (nethost-data.ipv4-address != null) "${hostname} IN A ${nethost-data.ipv4-address}";
|
||||
aaaa-record = optional (nethost-data.ipv6-address != null) "${hostname} IN AAAA ${nethost-data.ipv6-address}";
|
||||
description-record = optional (nethost-data.description != null) "${hostname} IN TXT \"${nethost-data.description}\"";
|
||||
in
|
||||
join-lines (a-record ++ aaaa-record ++ description-record ++ sshfp-records);
|
||||
|
||||
makeSrvRecords = protocol: type: records:
|
||||
join-lines (map (record:
|
||||
"_${type}._${protocol} IN SRV ${toString record.priority} ${
|
||||
toString record.weight
|
||||
} ${toString record.port} ${toString record.host}.") records);
|
||||
|
||||
makeSrvProtocolRecords = protocol: types:
|
||||
join-lines (mapAttrsToList (makeSrvRecords protocol) types);
|
||||
|
||||
cnameRecord = alias: host: "${alias} IN CNAME ${host}";
|
||||
|
||||
mxRecords = mxs: concatStringsSep "\n" (map (mx: "@ IN MX 10 ${mx}.") mxs);
|
||||
|
||||
dmarcRecord = dmarc-email:
|
||||
optionalString (dmarc-email != null) ''
|
||||
_dmarc IN TXT "v=DMARC1;p=quarantine;sp=quarantine;rua=mailto:${dmarc-email};"'';
|
||||
|
||||
nsRecords = domain: ns-hosts:
|
||||
join-lines
|
||||
(mapAttrsToList (host: _: "@ IN NS ${host}.${domain}.") ns-hosts);
|
||||
|
||||
in {
|
||||
|
||||
options.fudo.dns = with types; {
|
||||
enable = mkEnableOption "Enable master DNS services.";
|
||||
|
||||
# FIXME: This should allow for AAAA addresses too...
|
||||
nameservers = mkOption {
|
||||
type = attrsOf (submodule networkHostOpts);
|
||||
description = "Map of domain nameserver FQDNs to IP.";
|
||||
example = {
|
||||
"ns1.domain.com" = {
|
||||
ipv4-address = "1.1.1.1";
|
||||
description = "my fancy dns server";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
identity = mkOption {
|
||||
type = str;
|
||||
description = "The identity (CH TXT ID.SERVER) of this host.";
|
||||
};
|
||||
|
||||
domains = mkOption {
|
||||
type = attrsOf (submodule domainOpts);
|
||||
default = { };
|
||||
description = "A map of domain to domain options.";
|
||||
};
|
||||
|
||||
listen-ips = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of IPs on which to listen for DNS queries.";
|
||||
example = [ "1.2.3.4" ];
|
||||
};
|
||||
|
||||
state-directory = mkOption {
|
||||
type = str;
|
||||
description = "Path at which to store nameserver state, including DNSSEC keys.";
|
||||
default = "/var/lib/nsd";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 53 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
};
|
||||
|
||||
fudo.nsd = {
|
||||
enable = true;
|
||||
identity = cfg.identity;
|
||||
interfaces = cfg.listen-ips;
|
||||
stateDir = cfg.state-directory;
|
||||
zones = mapAttrs' (dom: dom-cfg: let
|
||||
net-cfg = dom-cfg.network-definition;
|
||||
in nameValuePair "${dom}." {
|
||||
dnssec = dom-cfg.dnssec;
|
||||
|
||||
data = ''
|
||||
$ORIGIN ${dom}.
|
||||
$TTL 12h
|
||||
|
||||
@ IN SOA ns1.${dom}. hostmaster.${dom}. (
|
||||
${toString config.instance.build-timestamp}
|
||||
30m
|
||||
2m
|
||||
3w
|
||||
5m)
|
||||
|
||||
${optionalString (dom-cfg.default-host != null)
|
||||
"@ IN A ${dom-cfg.default-host}"}
|
||||
|
||||
${mxRecords dom-cfg.mx}
|
||||
|
||||
$TTL 6h
|
||||
|
||||
${optionalString (dom-cfg.gssapi-realm != null)
|
||||
''_kerberos IN TXT "${dom-cfg.gssapi-realm}"''}
|
||||
|
||||
${nsRecords dom cfg.nameservers}
|
||||
${join-lines (mapAttrsToList hostRecords cfg.nameservers)}
|
||||
|
||||
${dmarcRecord dom-cfg.dmarc-report-address}
|
||||
|
||||
${join-lines
|
||||
(mapAttrsToList makeSrvProtocolRecords net-cfg.srv-records)}
|
||||
${join-lines (mapAttrsToList hostRecords net-cfg.hosts)}
|
||||
${join-lines (mapAttrsToList cnameRecord net-cfg.aliases)}
|
||||
${join-lines net-cfg.verbatim-dns-records}
|
||||
'';
|
||||
}) cfg.domains;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
hostname = config.instance.hostname;
|
||||
domain = config.instance.local-domain;
|
||||
cfg = config.fudo.domains.${domain};
|
||||
|
||||
served-domain = cfg.primary-nameserver != null;
|
||||
|
||||
is-primary = hostname == cfg.primary-nameserver;
|
||||
|
||||
create-srv-record = port: hostname: {
|
||||
port = port;
|
||||
host = hostname;
|
||||
};
|
||||
|
||||
in {
|
||||
config = {
|
||||
fudo.dns = mkIf is-primary (let
|
||||
primary-ip = pkgs.lib.fudo.network.host-ipv4 config hostname;
|
||||
all-ips = pkgs.lib.fudo.network.host-ips config hostname;
|
||||
in {
|
||||
enable = true;
|
||||
identity = "${hostname}.${domain}";
|
||||
nameservers = {
|
||||
ns1 = {
|
||||
ipv4-address = primary-ip;
|
||||
description = "Primary ${domain} nameserver";
|
||||
};
|
||||
};
|
||||
|
||||
# Deliberately leaving out localhost so the primary nameserver
|
||||
# can use a custom recursor
|
||||
listen-ips = all-ips;
|
||||
|
||||
domains = {
|
||||
${domain} = {
|
||||
dnssec = true;
|
||||
default-host = primary-ip;
|
||||
gssapi-realm = cfg.gssapi-realm;
|
||||
mx = optional (cfg.primary-mailserver != null)
|
||||
cfg.primary-mailserver;
|
||||
# TODO: there's no guarantee this exists...
|
||||
dmarc-report-address = "dmarc-report@${domain}";
|
||||
|
||||
network-definition = let
|
||||
network = config.fudo.networks.${domain};
|
||||
in network // {
|
||||
srv-records = {
|
||||
tcp = {
|
||||
domain = [{
|
||||
host = "ns1.${domain}";
|
||||
port = 53;
|
||||
}];
|
||||
};
|
||||
udp = {
|
||||
domain = [{
|
||||
host = "ns1.${domain}";
|
||||
port = 53;
|
||||
}];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
hostname = config.instance.hostname;
|
||||
domain = config.instance.local-domain;
|
||||
cfg = config.fudo.domains.${domain};
|
||||
|
||||
in {
|
||||
config = let
|
||||
hostname = config.instance.hostname;
|
||||
is-master = hostname == cfg.kerberos-master;
|
||||
is-slave = elem hostname cfg.kerberos-slaves;
|
||||
|
||||
kerberized-domain = cfg.kerberos-master != null;
|
||||
|
||||
in {
|
||||
fudo = {
|
||||
auth.kdc = mkIf (is-master || is-slave) {
|
||||
enable = true;
|
||||
realm = cfg.gssapi-realm;
|
||||
# TODO: Also bind to ::1?
|
||||
bind-addresses =
|
||||
(pkgs.lib.fudo.network.host-ips config hostname) ++
|
||||
[ "127.0.0.1" ] ++ (optional config.networking.enableIPv6 "::1");
|
||||
master-config = mkIf is-master {
|
||||
acl = let
|
||||
admin-entries = genAttrs cfg.local-admins
|
||||
(admin: {
|
||||
perms = [ "add" "change-password" "list" ];
|
||||
});
|
||||
in admin-entries // {
|
||||
"*/root" = { perms = [ "all" ]; };
|
||||
};
|
||||
};
|
||||
slave-config = mkIf is-slave {
|
||||
master-host = cfg.kerberos-master;
|
||||
# You gotta provide the keytab yourself, sorry...
|
||||
};
|
||||
};
|
||||
|
||||
dns.domains.${domain} = {
|
||||
network-definition = mkIf kerberized-domain {
|
||||
srv-records = let
|
||||
get-fqdn = hostname:
|
||||
"${hostname}.${config.fudo.hosts.${hostname}.domain}";
|
||||
|
||||
create-srv-record = port: hostname: {
|
||||
port = port;
|
||||
host = hostname;
|
||||
};
|
||||
|
||||
all-servers = map get-fqdn
|
||||
([cfg.kerberos-master] ++ cfg.kerberos-slaves);
|
||||
|
||||
master-servers =
|
||||
map get-fqdn [cfg.kerberos-master];
|
||||
|
||||
in {
|
||||
tcp = {
|
||||
kerberos = map (create-srv-record 88) all-servers;
|
||||
kerberos-adm = map (create-srv-record 749) master-servers;
|
||||
};
|
||||
udp = {
|
||||
kerberos = map (create-srv-record 88) all-servers;
|
||||
kerberos-master = map (create-srv-record 88) master-servers;
|
||||
kpasswd = map (create-srv-record 464) master-servers;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
hostname = config.instance.hostname;
|
||||
domain = config.instance.local-domain;
|
||||
|
||||
domainOpts = { name, ... }: let
|
||||
domain = name;
|
||||
in {
|
||||
options = with types; {
|
||||
domain = mkOption {
|
||||
type = str;
|
||||
description = "Domain name.";
|
||||
default = domain;
|
||||
};
|
||||
|
||||
local-networks = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of networks to be considered trusted on this network.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
local-users = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of users who should have local (i.e. login) access to _all_ hosts in this domain.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
local-admins = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of users who should have admin access to _all_ hosts in this domain.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
local-groups = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of groups which should exist within this domain.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
admin-email = mkOption {
|
||||
type = str;
|
||||
description = "Email for the administrator of this domain.";
|
||||
default = "admin@${domain}";
|
||||
};
|
||||
|
||||
gssapi-realm = mkOption {
|
||||
type = str;
|
||||
description = "GSSAPI (i.e. Kerberos) realm of this domain.";
|
||||
default = toUpper domain;
|
||||
};
|
||||
|
||||
kerberos-master = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Hostname of the Kerberos master server for the domain, if applicable.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
kerberos-slaves = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of hosts acting as Kerberos slaves for the domain.";
|
||||
default = [];
|
||||
};
|
||||
|
||||
primary-nameserver = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Hostname of the primary nameserver for this domain.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
primary-mailserver = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Hostname of the primary mail server for this domain.";
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
options.fudo.domains = mkOption {
|
||||
type = with types; attrsOf (submodule domainOpts);
|
||||
description = "Domain configurations for all domains known to the system.";
|
||||
default = { };
|
||||
};
|
||||
|
||||
imports = [
|
||||
./domain/kerberos.nix
|
||||
./domain/dns.nix
|
||||
];
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let cfg = config.fudo.garbage-collector;
|
||||
|
||||
in {
|
||||
|
||||
options.fudo.garbage-collector = {
|
||||
enable = mkEnableOption "Enable periodic NixOS garbage collection";
|
||||
|
||||
timing = mkOption {
|
||||
type = types.str;
|
||||
default = "weekly";
|
||||
description =
|
||||
"Period (systemd format) at which to run garbage collector.";
|
||||
};
|
||||
|
||||
age = mkOption {
|
||||
type = types.str;
|
||||
default = "30d";
|
||||
description = "Age of garbage to collect (eg. 30d).";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
fudo.system.services.fudo-garbage-collector = {
|
||||
description = "Collect NixOS garbage older than ${cfg.age}.";
|
||||
onCalendar = cfg.timing;
|
||||
type = "oneshot";
|
||||
script =
|
||||
"${pkgs.nix}/bin/nix-collect-garbage --delete-older-than ${cfg.age}";
|
||||
addressFamilies = [ ];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,171 @@
|
|||
{ pkgs, lib, config, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.git;
|
||||
|
||||
databaseOpts = { ... }: {
|
||||
options = {
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
description = "Database name.";
|
||||
};
|
||||
hostname = mkOption {
|
||||
type = types.str;
|
||||
description = "Hostname of the database server.";
|
||||
};
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
description = "Database username.";
|
||||
};
|
||||
password-file = mkOption {
|
||||
type = types.path;
|
||||
description = "File containing the database user's password.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
sshOpts = { ... }:
|
||||
with types; {
|
||||
options = {
|
||||
listen-ip = mkOption {
|
||||
type = str;
|
||||
description = "IP on which to listen for SSH connections.";
|
||||
};
|
||||
|
||||
listen-port = mkOption {
|
||||
type = port;
|
||||
description =
|
||||
"Port on which to listen for SSH connections, on <listen-ip>.";
|
||||
default = 22;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
options.fudo.git = with types; {
|
||||
enable = mkEnableOption "Enable Fudo git web server.";
|
||||
|
||||
hostname = mkOption {
|
||||
type = str;
|
||||
description = "Hostname at which this git server is accessible.";
|
||||
example = "git.fudo.org";
|
||||
};
|
||||
|
||||
site-name = mkOption {
|
||||
type = str;
|
||||
description = "Name to use for the git server.";
|
||||
default = "Fudo Git";
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = (submodule databaseOpts);
|
||||
description = "Gitea database options.";
|
||||
};
|
||||
|
||||
repository-dir = mkOption {
|
||||
type = str;
|
||||
description = "Path at which to store repositories.";
|
||||
example = "/srv/git/repo";
|
||||
};
|
||||
|
||||
state-dir = mkOption {
|
||||
type = str;
|
||||
description = "Path at which to store server state.";
|
||||
example = "/srv/git/state";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = with types; nullOr str;
|
||||
description = "System user as which to run.";
|
||||
default = "git";
|
||||
};
|
||||
|
||||
local-port = mkOption {
|
||||
type = port;
|
||||
description =
|
||||
"Local port to which the Gitea server will bind. Not globally accessible.";
|
||||
default = 3543;
|
||||
};
|
||||
|
||||
ssh = mkOption {
|
||||
type = nullOr (submodule sshOpts);
|
||||
description = "SSH listen configuration.";
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
security.acme.certs.${cfg.hostname}.email =
|
||||
let domain-name = config.fudo.hosts.${config.instance.hostname}.domain;
|
||||
in config.fudo.domains.${domain-name}.admin-email;
|
||||
|
||||
networking.firewall.allowedTCPPorts =
|
||||
mkIf (cfg.ssh != null) [ cfg.ssh.listen-port ];
|
||||
|
||||
environment.systemPackages = with pkgs; let
|
||||
gitea-admin = writeShellScriptBin "gitea-admin" ''
|
||||
TMP=$(mktemp -d /tmp/gitea-XXXXXXXX)
|
||||
${gitea}/bin/gitea --custom-path ${cfg.state-dir}/custom --config ${cfg.state-dir}/custom/conf/app.ini --work-path $TMP $@
|
||||
'';
|
||||
in [
|
||||
gitea-admin
|
||||
];
|
||||
|
||||
services = {
|
||||
gitea = {
|
||||
enable = true;
|
||||
appName = cfg.site-name;
|
||||
database = {
|
||||
createDatabase = false;
|
||||
host = cfg.database.hostname;
|
||||
name = cfg.database.name;
|
||||
user = cfg.database.user;
|
||||
passwordFile = cfg.database.password-file;
|
||||
type = "postgres";
|
||||
};
|
||||
domain = cfg.hostname;
|
||||
httpAddress = "127.0.0.1";
|
||||
httpPort = cfg.local-port;
|
||||
repositoryRoot = cfg.repository-dir;
|
||||
stateDir = cfg.state-dir;
|
||||
rootUrl = "https://${cfg.hostname}/";
|
||||
user = mkIf (cfg.user != null) cfg.user;
|
||||
ssh = {
|
||||
enable = true;
|
||||
clonePort = cfg.ssh.listen-port;
|
||||
};
|
||||
settings = mkIf (cfg.ssh != null) {
|
||||
server = {
|
||||
SSH_DOMAIN = cfg.hostname;
|
||||
SSH_LISTEN_PORT = cfg.ssh.listen-port;
|
||||
SSH_LISTEN_HOST = cfg.ssh.listen-ip;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nginx = {
|
||||
enable = true;
|
||||
|
||||
virtualHosts = {
|
||||
"${cfg.hostname}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:${toString cfg.local-port}";
|
||||
|
||||
extraConfig = ''
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-By $server_addr:$server_port;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib; {
|
||||
config = { };
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
# NOTE: this assumes that postgres is running locally.
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.grafana;
|
||||
fudo-cfg = config.fudo.common;
|
||||
|
||||
database-name = "grafana";
|
||||
database-user = "grafana";
|
||||
|
||||
databaseOpts = { ... }: {
|
||||
options = {
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
description = "Database name.";
|
||||
};
|
||||
hostname = mkOption {
|
||||
type = types.str;
|
||||
description = "Hostname of the database server.";
|
||||
};
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
description = "Database username.";
|
||||
};
|
||||
password-file = mkOption {
|
||||
type = types.path;
|
||||
description = "File containing the database user's password.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
|
||||
options.fudo.grafana = {
|
||||
enable = mkEnableOption "Fudo Metrics Display Service";
|
||||
|
||||
hostname = mkOption {
|
||||
type = types.str;
|
||||
description = "Grafana site hostname.";
|
||||
example = "fancy-graphs.fudo.org";
|
||||
};
|
||||
|
||||
smtp-username = mkOption {
|
||||
type = types.str;
|
||||
description = "Username with which to send email.";
|
||||
};
|
||||
|
||||
smtp-password-file = mkOption {
|
||||
type = types.path;
|
||||
description = "Path to a file containing the email user's password.";
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = (types.submodule databaseOpts);
|
||||
description = "Grafana database configuration.";
|
||||
};
|
||||
|
||||
admin-password-file = mkOption {
|
||||
type = types.path;
|
||||
description = "Path to a file containing the admin user's password.";
|
||||
};
|
||||
|
||||
secret-key-file = mkOption {
|
||||
type = types.path;
|
||||
description = "Path to a file containing the server's secret key, used for signatures.";
|
||||
};
|
||||
|
||||
prometheus-host = mkOption {
|
||||
type = types.str;
|
||||
description = "The URL of the prometheus data source.";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
security.acme.certs.${cfg.hostname}.email = fudo-cfg.admin-email;
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
||||
virtualHosts = {
|
||||
"${cfg.hostname}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:3000";
|
||||
|
||||
extraConfig = ''
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-By $server_addr:$server_port;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
|
||||
addr = "127.0.0.1";
|
||||
protocol = "http";
|
||||
port = 3000;
|
||||
domain = "${cfg.hostname}";
|
||||
rootUrl = "https://${cfg.hostname}/";
|
||||
|
||||
security = {
|
||||
adminPasswordFile = cfg.admin-password-file;
|
||||
secretKeyFile = cfg.secret-key-file;
|
||||
};
|
||||
|
||||
smtp = {
|
||||
enable = true;
|
||||
fromAddress = "metrics@fudo.org";
|
||||
host = "mail.fudo.org:25";
|
||||
user = cfg.smtp-username;
|
||||
passwordFile = cfg.smtp-password-file;
|
||||
};
|
||||
|
||||
database = {
|
||||
host = cfg.database.hostname;
|
||||
name = cfg.database.name;
|
||||
user = cfg.database.user;
|
||||
passwordFile = cfg.database.password-file;
|
||||
type = "postgres";
|
||||
};
|
||||
|
||||
provision.datasources = [
|
||||
{
|
||||
editable = false;
|
||||
isDefault = true;
|
||||
name = cfg.prometheus-host;
|
||||
type = "prometheus";
|
||||
url = "https://${cfg.prometheus-host}/";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
hostname = config.instance.hostname;
|
||||
host-filesystems = config.fudo.hosts.${hostname}.encrypted-filesystems;
|
||||
|
||||
optionalOrDefault = str: default: if (str != null) then str else default;
|
||||
|
||||
filesystemsToMountpointLists = mapAttrsToList
|
||||
(fs: fsOpts: fsOpts.mountpoints);
|
||||
|
||||
concatMapAttrs = f: as: concatMap (i: i) (mapAttrsToList f as);
|
||||
|
||||
concatMapAttrsToList = f: attrs:
|
||||
concatMap (i: i) (mapAttrsToList f attrs);
|
||||
|
||||
in {
|
||||
config = {
|
||||
users.groups = let
|
||||
site-name = config.instance.local-site;
|
||||
site-hosts = filterAttrs
|
||||
(hostname: hostOpts: hostOpts.site == site-name)
|
||||
config.fudo.hosts;
|
||||
site-mountpoints = concatMapAttrsToList
|
||||
(host: hostOpts: concatMapAttrsToList
|
||||
(fs: fsOpts: attrValues fsOpts.mountpoints)
|
||||
hostOpts.encrypted-filesystems)
|
||||
site-hosts;
|
||||
in listToAttrs
|
||||
(map (mp: nameValuePair mp.group { members = mp.users; })
|
||||
site-mountpoints);
|
||||
|
||||
systemd = {
|
||||
# Ensure the mountpoints exist
|
||||
tmpfiles.rules = let
|
||||
mpPerms = mpOpts: if mpOpts.world-readable then "755" else "750";
|
||||
mountpointToPath = mp: mpOpts:
|
||||
"d '${mp}' ${mpPerms mpOpts} root ${optionalOrDefault mpOpts.group "-"} - -";
|
||||
filesystemsToMountpointLists = mapAttrsToList
|
||||
(fs: fsOpts: fsOpts.mountpoints);
|
||||
mountpointListsToPaths = concatMap
|
||||
(mps: mapAttrsToList mountpointToPath mps);
|
||||
in mountpointListsToPaths (filesystemsToMountpointLists host-filesystems);
|
||||
|
||||
# Actual mounts of decrypted filesystems
|
||||
mounts = let
|
||||
filesystems = mapAttrsToList
|
||||
(fs: opts: { filesystem = fs; opts = opts; })
|
||||
host-filesystems;
|
||||
|
||||
mounts = concatMap
|
||||
(fs: mapAttrsToList
|
||||
(mp: mp-opts:
|
||||
{
|
||||
what = "/dev/mapper/${fs.filesystem}";
|
||||
type = fs.opts.filesystem-type;
|
||||
where = mp;
|
||||
options = concatStringsSep "," (fs.opts.options ++ mp-opts.options);
|
||||
description = "${fs.opts.filesystem-type} filesystem on ${fs.filesystem} mounted to ${mp}";
|
||||
requires = [ "${fs.filesystem}-decrypt.service" ];
|
||||
partOf = [ "${fs.filesystem}.target" ];
|
||||
wantedBy = [ "${fs.filesystem}.target" ];
|
||||
})
|
||||
fs.opts.mountpoints)
|
||||
filesystems;
|
||||
in mounts;
|
||||
|
||||
# Jobs to decrypt the encrypted devices
|
||||
services = mapAttrs' (filesystem-name: opts:
|
||||
nameValuePair "${filesystem-name}-decrypt"
|
||||
{
|
||||
description = "Decrypt the ${filesystem-name} filesystem when the key is available at ${opts.key-path}";
|
||||
path = with pkgs; [ cryptsetup ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = pkgs.writeShellScript "decrypt-${filesystem-name}.sh" ''
|
||||
[ -e /dev/mapper/${filesystem-name} ] || cryptsetup open --type luks --key-file ${opts.key-path} ${opts.encrypted-device} ${filesystem-name}
|
||||
'';
|
||||
ExecStartPost = pkgs.writeShellScript "remove-${filesystem-name}-key.sh" ''
|
||||
rm ${opts.key-path}
|
||||
'';
|
||||
ExecStop = pkgs.writeShellScript "close-${filesystem-name}.sh" ''
|
||||
cryptsetup close /dev/mapper/${filesystem-name}
|
||||
'';
|
||||
};
|
||||
restartIfChanged = true;
|
||||
})
|
||||
host-filesystems;
|
||||
|
||||
# Watch the path of the key, trigger decrypt when it's available
|
||||
paths = let
|
||||
decryption-jobs = mapAttrs' (filesystem-name: opts:
|
||||
nameValuePair "${filesystem-name}-decrypt"
|
||||
{
|
||||
wantedBy = [ "default.target" ];
|
||||
description = "Watch for decryption key, then decrypt the target filesystem.";
|
||||
pathConfig = {
|
||||
PathExists = opts.key-path;
|
||||
Unit = "${filesystem-name}-decrypt.service";
|
||||
};
|
||||
}) host-filesystems;
|
||||
|
||||
post-decryption-jobs = mapAttrs' (filesystem-name: opts:
|
||||
nameValuePair "${filesystem-name}-mount"
|
||||
{
|
||||
wantedBy = [ "default.target" ];
|
||||
description = "Mount ${filesystem-name} filesystems once the decrypted device is available.";
|
||||
pathConfig = {
|
||||
PathExists = "/dev/mapper/${filesystem-name}";
|
||||
Unit = "${filesystem-name}.target";
|
||||
};
|
||||
}) host-filesystems;
|
||||
in decryption-jobs // post-decryption-jobs;
|
||||
|
||||
targets = mapAttrs (filesystem-name: opts:
|
||||
{
|
||||
description = "${filesystem-name} enabled and available.";
|
||||
}) host-filesystems;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
mapOptional = f: val: if (val != null) then (f val) else null;
|
||||
|
||||
host = import ../types/host.nix { inherit lib; };
|
||||
|
||||
hostname = config.instance.hostname;
|
||||
|
||||
generate-string-hash = name: str: let
|
||||
string-hash-pkg = pkgs.stdenv.mkDerivation {
|
||||
name = "${name}-string-hash";
|
||||
phases = "installPhase";
|
||||
buildInputs = [ pkgs.openssl ];
|
||||
installPhase = "openssl passwd -6 ${str} > $out";
|
||||
};
|
||||
in string-hash-pkg;
|
||||
|
||||
in {
|
||||
options.fudo.hosts = with types;
|
||||
mkOption {
|
||||
type = attrsOf (submodule host.hostOpts);
|
||||
description = "Host configurations for all hosts known to the system.";
|
||||
default = { };
|
||||
};
|
||||
|
||||
config = let
|
||||
hostname = config.instance.hostname;
|
||||
host-cfg = config.fudo.hosts.${hostname};
|
||||
site-name = host-cfg.site;
|
||||
site = config.fudo.sites.${site-name};
|
||||
domain-name = host-cfg.domain;
|
||||
domain = config.fudo.domains.${domain-name};
|
||||
has-build-servers = (length (attrNames site.build-servers)) > 0;
|
||||
has-build-keys = (length host-cfg.build-pubkeys) > 0;
|
||||
|
||||
in {
|
||||
security.sudo.extraConfig = ''
|
||||
# I get it, I get it
|
||||
Defaults lecture = never
|
||||
'';
|
||||
|
||||
networking = {
|
||||
hostName = config.instance.hostname;
|
||||
domain = domain-name;
|
||||
nameservers = site.nameservers;
|
||||
# This will cause a loop on the gateway itself
|
||||
#defaultGateway = site.gateway-v4;
|
||||
#defaultGateway6 = site.gateway-v6;
|
||||
|
||||
firewall = mkIf ((length host-cfg.external-interfaces) > 0) {
|
||||
enable = true;
|
||||
allowedTCPPorts = [ 22 2112 ]; # Make sure _at least_ SSH is allowed
|
||||
trustedInterfaces = let
|
||||
all-interfaces = attrNames config.networking.interfaces;
|
||||
in subtractLists host-cfg.external-interfaces all-interfaces;
|
||||
};
|
||||
|
||||
hostId = mkIf (host-cfg.machine-id != null)
|
||||
(substring 0 8 host-cfg.machine-id);
|
||||
};
|
||||
|
||||
environment = {
|
||||
etc = {
|
||||
# NixOS generates a stupid hosts file, just force it
|
||||
hosts = let
|
||||
host-entries = mapAttrsToList
|
||||
(ip: hostnames: "${ip} ${concatStringsSep " " hostnames}")
|
||||
config.fudo.system.hostfile-entries;
|
||||
in mkForce {
|
||||
text = ''
|
||||
127.0.0.1 ${hostname}.${domain-name} ${hostname} localhost
|
||||
127.0.0.2 ${hostname} localhost
|
||||
::1 ${hostname}.${domain-name} ${hostname} localhost
|
||||
${concatStringsSep "\n" host-entries}
|
||||
'';
|
||||
user = "root";
|
||||
group = "root";
|
||||
mode = "0444";
|
||||
};
|
||||
|
||||
machine-id = mkIf (host-cfg.machine-id != null) {
|
||||
text = host-cfg.machine-id;
|
||||
user = "root";
|
||||
group = "root";
|
||||
mode = "0444";
|
||||
};
|
||||
|
||||
current-system-packages.text = with builtins; let
|
||||
packages = map (p: "${p.name}")
|
||||
config.environment.systemPackages;
|
||||
sorted-unique = sort lessThan (unique packages);
|
||||
in concatStringsSep "\n" sorted-unique;
|
||||
|
||||
build-timestamp.text = toString config.instance.build-timestamp;
|
||||
build-seed-hash.source =
|
||||
generate-string-hash "build-seed" config.instance.build-seed;
|
||||
};
|
||||
|
||||
systemPackages = with pkgs;
|
||||
mkIf (host-cfg.docker-server) [ docker nix-prefetch-docker ];
|
||||
};
|
||||
|
||||
time.timeZone = site.timezone;
|
||||
|
||||
krb5.libdefaults.default_realm = domain.gssapi-realm;
|
||||
|
||||
services = {
|
||||
cron.mailto = domain.admin-email;
|
||||
fail2ban.ignoreIP = config.instance.local-networks;
|
||||
};
|
||||
|
||||
virtualisation.docker = mkIf (host-cfg.docker-server) {
|
||||
enable = true;
|
||||
enableOnBoot = true;
|
||||
autoPrune.enable = true;
|
||||
};
|
||||
|
||||
programs.adb.enable = host-cfg.android-dev;
|
||||
users.groups.adbusers = mkIf host-cfg.android-dev {
|
||||
members = config.instance.local-admins;
|
||||
};
|
||||
|
||||
boot.tmpOnTmpfs = host-cfg.tmp-on-tmpfs;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
# THROW THIS AWAY, NOT USED
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.hosts.local-network;
|
||||
|
||||
# FIXME: this isn't used, is it?
|
||||
gatewayServerOpts = { ... }: {
|
||||
options = {
|
||||
enable = mkEnableOption "Turn this host into a network gateway.";
|
||||
|
||||
internal-interfaces = mkOption {
|
||||
type = with types; listOf str;
|
||||
description =
|
||||
"List of internal interfaces from which to forward traffic.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
external-interface = mkOption {
|
||||
type = types.str;
|
||||
description =
|
||||
"Interface facing public internet, to which traffic is forwarded.";
|
||||
};
|
||||
|
||||
external-tcp-ports = mkOption {
|
||||
type = with types; listOf port;
|
||||
description = "List of TCP ports to open to the outside world.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
external-udp-ports = mkOption {
|
||||
type = with types; listOf port;
|
||||
description = "List of UDP ports to open to the outside world.";
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dnsOverHttpsProxy = {
|
||||
options = {
|
||||
enable = mkEnableOption "Enable a DNS-over-HTTPS proxy server.";
|
||||
|
||||
listen-port = mkOption {
|
||||
type = types.port;
|
||||
description = "Port on which to listen for DNS requests.";
|
||||
default = 53;
|
||||
};
|
||||
|
||||
upstream-dns = mkOption {
|
||||
type = with types; listOf str;
|
||||
description = "List of DoH DNS servers to use for recursion.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
bootstrap-dns = mkOption {
|
||||
type = types.str;
|
||||
description = "DNS server used to bootstrap the proxy server.";
|
||||
default = "1.1.1.1";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networkDhcpServerOpts = mkOption {
|
||||
options = {
|
||||
enable = mkEnableOption "Enable local DHCP server.";
|
||||
|
||||
dns-servers = mkOption {
|
||||
type = with types; listOf str;
|
||||
description = "List of DNS servers for clients to use.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
listen-interfaces = mkOption {
|
||||
type = with types; listOf str;
|
||||
description = "List of interfaces on which to serve DHCP requests.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
server-ip = mkOption {
|
||||
type = types.str;
|
||||
description = "IP address of the server host.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networkServerOpts = {
|
||||
options = {
|
||||
enable = mkEnableOption "Enable local networking server (DNS & DHCP).";
|
||||
|
||||
domain = mkOption {
|
||||
type = types.str;
|
||||
description = "Local network domain which this host will serve.";
|
||||
};
|
||||
|
||||
dns-listen-addrs = mkOption {
|
||||
type = with types; listOf str;
|
||||
description = "List of IP addresses on which to listen for requests.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
dhcp = mkOption {
|
||||
type = types.submodule networkDhcpServerOpts;
|
||||
description = "Local DHCP server options.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
options.fudo.hosts.local-network = with types; {
|
||||
recursive-resolvers = mkOption {
|
||||
type = listOf str;
|
||||
description = "DNS server to use for recursive lookups.";
|
||||
example = "1.2.3.4 port 53";
|
||||
};
|
||||
|
||||
gateway-server = mkOption {
|
||||
type = submodule gatewayServerOpts;
|
||||
description = "Gateway server options.";
|
||||
};
|
||||
|
||||
dns-over-https-proxy = mkOption {
|
||||
type = submodule dnsOverHttpsProxy;
|
||||
description = "DNS-over-HTTPS proxy server.";
|
||||
};
|
||||
|
||||
networkServerOpts = mkOption {
|
||||
type = submodule networkServerOpts;
|
||||
description = "Networking (DNS & DHCP) server for a local network.";
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
fudo.secure-dns-proxy = mkIf cfg.dns-over-https-proxy.enable {
|
||||
enable = true;
|
||||
port = cfg.dns-over-https-proxy.listen-port;
|
||||
upstream-dns = cfg.dns-over-https-proxy.upstream-dns;
|
||||
bootstrap-dns = cfg.dns-over-https-proxy.bootstrap-dns;
|
||||
listen-ips = cfg.dns-over-https-proxy.listen-ips;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
lib: site: config: version:
|
||||
with lib;
|
||||
let
|
||||
db-config = optionalString (config.database != null)
|
||||
''
|
||||
type = "${config.database.type}"
|
||||
pdo_dsn = "${config.database.type}:host=${config.database.hostname};port=${toString config.database.port};dbname=${config.database.name}"
|
||||
pdo_user = "${config.database.user}"
|
||||
pdo_password = "${fileContents config.database.password-file}"
|
||||
'';
|
||||
|
||||
in ''
|
||||
[webmail]
|
||||
title = "${config.title}"
|
||||
loading_description = "${config.title}"
|
||||
favicon_url = "https://${site}/favicon.ico"
|
||||
theme = "${config.theme}"
|
||||
allow_themes = On
|
||||
allow_user_background = Off
|
||||
language = "en"
|
||||
language_admin = "en"
|
||||
allow_languages_on_settings = On
|
||||
allow_additional_accounts = On
|
||||
allow_additional_identities = On
|
||||
messages_per_page = ${toString config.messages-per-page}
|
||||
attachment_size_limit = ${toString config.max-upload-size}
|
||||
|
||||
[interface]
|
||||
show_attachment_thumbnail = On
|
||||
new_move_to_folder_button = On
|
||||
|
||||
[branding]
|
||||
|
||||
[contacts]
|
||||
enable = On
|
||||
allow_sync = On
|
||||
sync_interval = 20
|
||||
suggestions_limit = 10
|
||||
${db-config}
|
||||
|
||||
[security]
|
||||
csrf_protection = On
|
||||
custom_server_signature = "RainLoop"
|
||||
x_frame_options_header = ""
|
||||
openpgp = On
|
||||
|
||||
admin_login = "admin"
|
||||
admin_password = ""
|
||||
allow_admin_panel = Off
|
||||
allow_two_factor_auth = On
|
||||
force_two_factor_auth = Off
|
||||
hide_x_mailer_header = Off
|
||||
admin_panel_host = ""
|
||||
admin_panel_key = "admin"
|
||||
content_security_policy = ""
|
||||
core_install_access_domain = ""
|
||||
|
||||
[login]
|
||||
default_domain = "${config.domain}"
|
||||
allow_languages_on_login = On
|
||||
determine_user_language = On
|
||||
determine_user_domain = Off
|
||||
welcome_page = Off
|
||||
hide_submit_button = On
|
||||
|
||||
[plugins]
|
||||
enable = Off
|
||||
|
||||
[defaults]
|
||||
view_editor_type = "${config.edit-mode}"
|
||||
view_layout = ${if (config.layout-mode == "bottom") then "2" else "1"}
|
||||
contacts_autosave = On
|
||||
mail_use_threads = ${if config.enable-threading then "On" else "Off"}
|
||||
allow_draft_autosave = On
|
||||
mail_reply_same_folder = Off
|
||||
show_images = On
|
||||
|
||||
[logs]
|
||||
enable = ${if config.debug then "On" else "Off"}
|
||||
|
||||
[debug]
|
||||
enable = ${if config.debug then "On" else "Off"}
|
||||
hide_passwords = On
|
||||
filename = "log-{date:Y-m-d}.txt"
|
||||
|
||||
[social]
|
||||
google_enable = Off
|
||||
fb_enable = Off
|
||||
twitter_enable = Off
|
||||
dropbox_enable = Off
|
||||
|
||||
[cache]
|
||||
enable = On
|
||||
index = "v1"
|
||||
fast_cache_driver = "files"
|
||||
fast_cache_index = "v1"
|
||||
http = On
|
||||
http_expires = 3600
|
||||
server_uids = On
|
||||
|
||||
[labs]
|
||||
allow_mobile_version = ${if config.enable-mobile then "On" else "Off"}
|
||||
check_new_password_strength = On
|
||||
allow_gravatar = On
|
||||
allow_prefetch = On
|
||||
allow_smart_html_links = On
|
||||
cache_system_data = On
|
||||
date_from_headers = On
|
||||
autocreate_system_folders = On
|
||||
allow_ctrl_enter_on_compose = On
|
||||
favicon_status = On
|
||||
use_local_proxy_for_external_images = On
|
||||
detect_image_exif_orientation = On
|
||||
|
||||
[version]
|
||||
current = "${version}"
|
||||
''
|
|
@ -0,0 +1,87 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
hostname = config.instance.hostname;
|
||||
initrd-cfg = config.fudo.hosts.${hostname}.initrd-network;
|
||||
|
||||
read-lines = filename: splitString "\n" (fileContents filename);
|
||||
|
||||
concatLists = lsts: concatMap (i: i) lsts;
|
||||
|
||||
gen-sshfp-records-pkg = hostname: pubkey: let
|
||||
pubkey-file = builtins.toFile "${hostname}-initrd-ssh-pubkey" pubkey;
|
||||
in pkgs.stdenv.mkDerivation {
|
||||
name = "${hostname}-initrd-ssh-firngerprint";
|
||||
|
||||
phases = [ "installPhase" ];
|
||||
|
||||
buildInputs = with pkgs; [ openssh ];
|
||||
|
||||
installPhase = ''
|
||||
mkdir $out
|
||||
ssh-keygen -r REMOVEME -f "${pubkey-file}" | sed 's/^REMOVEME IN SSHFP //' >> $out/initrd-ssh-pubkey.sshfp
|
||||
'';
|
||||
};
|
||||
|
||||
gen-sshfp-records = hostname: pubkey: let
|
||||
sshfp-record-pkg = gen-sshfp-records-pkg hostname pubkey;
|
||||
in read-lines "${sshfp-record-pkg}/initrd-ssh-pubkey.sshfp";
|
||||
|
||||
in {
|
||||
config = {
|
||||
boot = mkIf (initrd-cfg != null) {
|
||||
kernelParams = let
|
||||
site = config.fudo.sites.${config.instance.local-site};
|
||||
site-gateway = site.gateway-v4;
|
||||
netmask =
|
||||
pkgs.lib.fudo.ip.maskFromV32Network site.network;
|
||||
in [
|
||||
"ip=${initrd-cfg.ip}:${site-gateway}:${netmask}:${hostname}:${initrd-cfg.interface}"
|
||||
];
|
||||
initrd = {
|
||||
network = {
|
||||
enable = true;
|
||||
|
||||
ssh = let
|
||||
admin-ssh-keys =
|
||||
concatMap (admin: config.fudo.users.${admin}.ssh-authorized-keys)
|
||||
config.instance.local-admins;
|
||||
in {
|
||||
enable = true;
|
||||
port = 22;
|
||||
authorizedKeys = admin-ssh-keys;
|
||||
hostKeys = [
|
||||
initrd-cfg.keypair.private-key-file
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
fudo = {
|
||||
local-network = let
|
||||
initrd-network-hosts =
|
||||
filterAttrs
|
||||
(hostname: hostOpts: hostOpts.initrd-network != null)
|
||||
config.instance.local-hosts;
|
||||
in {
|
||||
network-definition.hosts = mapAttrs'
|
||||
(hostname: hostOpts: nameValuePair "${hostname}-recovery"
|
||||
{
|
||||
ipv4-address = hostOpts.initrd-network.ip;
|
||||
description = "${hostname} initrd host";
|
||||
})
|
||||
initrd-network-hosts;
|
||||
|
||||
extra-records = let
|
||||
recs = (mapAttrsToList
|
||||
(hostname: hostOpts: map
|
||||
(sshfp: "${hostname} IN SSHFP ${sshfp}")
|
||||
(gen-sshfp-records hostname hostOpts.initrd-network.keypair.public-key))
|
||||
initrd-network-hosts);
|
||||
in concatLists recs;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.ipfs;
|
||||
|
||||
user-group-entry = group: user:
|
||||
nameValuePair user { extraGroups = [ group ]; };
|
||||
|
||||
in {
|
||||
options.fudo.ipfs = with types; {
|
||||
enable = mkEnableOption "Fudo IPFS";
|
||||
|
||||
users = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of users with IPFS access.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "User as which to run IPFS user.";
|
||||
default = "ipfs";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = str;
|
||||
description = "Group as which to run IPFS user.";
|
||||
default = "ipfs";
|
||||
};
|
||||
|
||||
api-address = mkOption {
|
||||
type = str;
|
||||
description = "Address on which to listen for requests.";
|
||||
default = "/ip4/127.0.0.1/tcp/5001";
|
||||
};
|
||||
|
||||
automount = mkOption {
|
||||
type = bool;
|
||||
description = "Whether to automount /ipfs and /ipns on boot.";
|
||||
default = true;
|
||||
};
|
||||
|
||||
data-dir = mkOption {
|
||||
type = str;
|
||||
description = "Path to store data for IPFS.";
|
||||
default = "/var/lib/ipfs";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
users.users =
|
||||
mapAttrs user-group-entry config.instance.local-users;
|
||||
|
||||
services.ipfs = {
|
||||
enable = true;
|
||||
apiAddress = cfg.api-address;
|
||||
autoMount = cfg.automount;
|
||||
enableGC = true;
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
dataDir = cfg.data-dir;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,236 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
hostname = config.instance.hostname;
|
||||
|
||||
siteOpts = { ... }: with types; {
|
||||
options = {
|
||||
enableACME = mkOption {
|
||||
type = bool;
|
||||
description = "Use ACME to get SSL certificates for this site.";
|
||||
default = true;
|
||||
};
|
||||
|
||||
site-config = mkOption {
|
||||
type = attrs;
|
||||
description = "Site-specific configuration.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
concatMapAttrs = f: attrs:
|
||||
foldr (a: b: a // b) {} (mapAttrs f attrs);
|
||||
|
||||
concatMapAttrsToList = f: attr:
|
||||
concatMap (i: i) (attrValues (mapAttrs f attr));
|
||||
|
||||
host-domains = config.fudo.acme.host-domains.${hostname};
|
||||
|
||||
siteCerts = site: let
|
||||
cert-copy = host-domains.${site}.local-copies.ejabberd;
|
||||
in [
|
||||
cert-copy.certificate
|
||||
cert-copy.private-key
|
||||
cert-copy.chain
|
||||
];
|
||||
|
||||
siteCertService = site:
|
||||
host-domains.${site}.local-copies.ejabberd.service;
|
||||
|
||||
config-file-template = let
|
||||
jabber-config = {
|
||||
loglevel = cfg.log-level;
|
||||
|
||||
access_rules = {
|
||||
c2s = { allow = "all"; };
|
||||
announce = { allow = "admin"; };
|
||||
configure = { allow = "admin"; };
|
||||
pubsub_createnode = { allow = "local"; };
|
||||
};
|
||||
|
||||
acl = {
|
||||
admin = {
|
||||
user = concatMap
|
||||
(admin: map (site: "${admin}@${site}")
|
||||
(attrNames cfg.sites))
|
||||
cfg.admins;
|
||||
};
|
||||
};
|
||||
|
||||
hosts = attrNames cfg.sites;
|
||||
|
||||
listen = map (ip: {
|
||||
port = cfg.port;
|
||||
module = "ejabberd_c2s";
|
||||
ip = ip;
|
||||
starttls = true;
|
||||
starttls_required = true;
|
||||
}) cfg.listen-ips;
|
||||
|
||||
certfiles = concatMapAttrsToList
|
||||
(site: siteOpts:
|
||||
if (siteOpts.enableACME) then
|
||||
(siteCerts site)
|
||||
else [])
|
||||
cfg.sites;
|
||||
|
||||
host_config =
|
||||
mapAttrs (site: siteOpts: siteOpts.site-config)
|
||||
cfg.sites;
|
||||
};
|
||||
|
||||
config-file = builtins.toJSON jabber-config;
|
||||
in pkgs.writeText "ejabberd.config.yml.template" config-file;
|
||||
|
||||
enter-secrets = template: secrets: target: let
|
||||
secret-readers = concatStringsSep "\n"
|
||||
(mapAttrsToList
|
||||
(secret: file: "${secret}=$(cat ${file})")
|
||||
secrets);
|
||||
secret-swappers = map
|
||||
(secret: "sed s/${secret}/\$${secret}/g")
|
||||
(attrNames secrets);
|
||||
swapper = concatStringsSep " | " secret-swappers;
|
||||
in pkgs.writeShellScript "ejabberd-generate-config.sh" ''
|
||||
cat ${template} | ${swapper} > ${target}
|
||||
'';
|
||||
|
||||
cfg = config.fudo.jabber;
|
||||
|
||||
in {
|
||||
options.fudo.jabber = with types; {
|
||||
enable = mkEnableOption "Enable ejabberd server.";
|
||||
|
||||
listen-ips = mkOption {
|
||||
type = listOf str;
|
||||
description = "IPs on which to listen for Jabber connections.";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = port;
|
||||
description = "Port on which to listen for Jabber connections.";
|
||||
default = 5222;
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "User as which to run the ejabberd server.";
|
||||
default = "ejabberd";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = str;
|
||||
description = "Group as which to run the ejabberd server.";
|
||||
default = "ejabberd";
|
||||
};
|
||||
|
||||
admins = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of admin users for the server.";
|
||||
default = [];
|
||||
};
|
||||
|
||||
sites = mkOption {
|
||||
type = attrsOf (submodule siteOpts);
|
||||
description = "List of sites on which to listen for Jabber connections.";
|
||||
};
|
||||
|
||||
secret-files = mkOption {
|
||||
type = attrsOf str;
|
||||
description = "Map of secret-name to file. File contents will be subbed for the name in the config.";
|
||||
default = {};
|
||||
};
|
||||
|
||||
config-file = mkOption {
|
||||
type = str;
|
||||
description = "Location at which to generate the configuration file.";
|
||||
default = "/run/ejabberd/ejabberd.yaml";
|
||||
};
|
||||
|
||||
log-level = mkOption {
|
||||
type = int;
|
||||
description = ''
|
||||
Log level at which to run the server.
|
||||
|
||||
See: https://docs.ejabberd.im/admin/guide/troubleshooting/
|
||||
'';
|
||||
default = 3;
|
||||
};
|
||||
|
||||
environment = mkOption {
|
||||
type = attrsOf str;
|
||||
description = "Environment variables to set for the ejabberd daemon.";
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
users = {
|
||||
users.${cfg.user} = {
|
||||
isSystemUser = true;
|
||||
};
|
||||
|
||||
groups.${cfg.group} = {
|
||||
members = [ cfg.user ];
|
||||
};
|
||||
};
|
||||
|
||||
fudo = {
|
||||
acme.host-domains.${hostname} = mapAttrs (site: siteCfg:
|
||||
mkIf siteCfg.enableACME {
|
||||
local-copies.ejabberd = {
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
};
|
||||
}) cfg.sites;
|
||||
|
||||
system = let
|
||||
config-dir = dirOf cfg.config-file;
|
||||
in {
|
||||
ensure-directories.${config-dir} = {
|
||||
user = cfg.user;
|
||||
perms = "0700";
|
||||
};
|
||||
|
||||
services.ejabberd-config-generator = let
|
||||
config-generator =
|
||||
enter-secrets config-file-template cfg.secret-files cfg.config-file;
|
||||
in {
|
||||
script = "${config-generator}";
|
||||
readWritePaths = [ config-dir ];
|
||||
workingDirectory = config-dir;
|
||||
user = cfg.user;
|
||||
description = "Generate ejabberd config file with necessary passwords.";
|
||||
postStart = ''
|
||||
chown ${cfg.user} ${cfg.config-file}
|
||||
chmod 0400 ${cfg.config-file}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"D '${dirOf cfg.config-file}' 0550 ${cfg.user} ${cfg.group} - -"
|
||||
];
|
||||
|
||||
services = {
|
||||
ejabberd = {
|
||||
wants = map (site: siteCertService site) (attrNames cfg.sites);
|
||||
requires = [ "ejabberd-config-generator.service" ];
|
||||
environment = cfg.environment;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.ejabberd = {
|
||||
enable = true;
|
||||
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
|
||||
configFile = cfg.config-file;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,532 @@
|
|||
{ config, lib, pkgs, ... } @ toplevel:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.auth.kdc;
|
||||
|
||||
hostname = config.instance.hostname;
|
||||
|
||||
localhost-ips = let
|
||||
addr-only = addrinfo: addrinfo.address;
|
||||
interface = config.networking.interfaces.lo;
|
||||
in
|
||||
(map addr-only interface.ipv4.addresses) ++
|
||||
(map addr-only interface.ipv6.addresses);
|
||||
|
||||
host-ips =
|
||||
(pkgs.lib.fudo.network.host-ips hostname) ++ localhost-ips;
|
||||
|
||||
state-directory = toplevel.config.fudo.auth.kdc.state-directory;
|
||||
|
||||
database-file = "${state-directory}/principals.db";
|
||||
iprop-log = "${state-directory}/iprop.log";
|
||||
|
||||
master-server = cfg.master-config != null;
|
||||
slave-server = cfg.slave-config != null;
|
||||
|
||||
get-fqdn = hostname:
|
||||
"${hostname}.${config.fudo.hosts.${hostname}.domain}";
|
||||
|
||||
kdc-conf = generate-kdc-conf {
|
||||
realm = cfg.realm;
|
||||
db-file = database-file;
|
||||
key-file = cfg.master-key-file;
|
||||
acl-data = if master-server then cfg.master-config.acl else null;
|
||||
};
|
||||
|
||||
initialize-db =
|
||||
{ realm, user, group, kdc-conf, key-file, db-name, max-lifetime, max-renewal,
|
||||
primary-keytab, kadmin-keytab, kpasswd-keytab, ipropd-keytab, local-hostname }: let
|
||||
|
||||
kadmin-cmd = "kadmin -l -c ${kdc-conf} --";
|
||||
|
||||
get-domain-hosts = domain: let
|
||||
host-in-subdomain = host: hostOpts:
|
||||
(builtins.match "(.+[.])?${domain}$" hostOpts.domain) != null;
|
||||
in attrNames (filterAttrs host-in-subdomain config.fudo.hosts);
|
||||
|
||||
get-host-principals = realm: hostname: let
|
||||
host = config.fudo.hosts.${hostname};
|
||||
in map (service: "${service}/${hostname}.${host.domain}@${realm}")
|
||||
host.kerberos-services;
|
||||
|
||||
add-principal-str = principal:
|
||||
"${kadmin-cmd} add --random-key --use-defaults ${principal}";
|
||||
|
||||
test-existence = principal:
|
||||
"[[ $( ${kadmin-cmd} get ${principal} ) ]]";
|
||||
|
||||
exists-or-add = principal: ''
|
||||
if ${test-existence principal}; then
|
||||
echo "skipping ${principal}, already exists"
|
||||
else
|
||||
${add-principal-str principal}
|
||||
fi
|
||||
'';
|
||||
|
||||
ensure-host-principals = realm:
|
||||
concatStringsSep "\n"
|
||||
(map exists-or-add
|
||||
(concatMap (get-host-principals realm)
|
||||
(get-domain-hosts (toLower realm))));
|
||||
|
||||
slave-hostnames = map get-fqdn cfg.master-config.slave-hosts;
|
||||
|
||||
ensure-iprop-principals = concatStringsSep "\n"
|
||||
(map (host: exists-or-add "iprop/${host}@${realm}")
|
||||
[ local-hostname ] ++ slave-hostnames);
|
||||
|
||||
copy-slave-principals-file = let
|
||||
slave-principals = map
|
||||
(host: "iprop/${hostname}@${cfg.realm}")
|
||||
slave-hostnames;
|
||||
slave-principals-file = pkgs.writeText "heimdal-slave-principals"
|
||||
(concatStringsSep "\n" slave-principals);
|
||||
in optionalString (slave-principals-file != null) ''
|
||||
cp ${slave-principals-file} ${state-directory}/slaves
|
||||
# Since it's copied from /nix/store, this is by default read-only,
|
||||
# which causes updates to fail.
|
||||
chmod u+w ${state-directory}/slaves
|
||||
'';
|
||||
|
||||
in pkgs.writeShellScript "initialize-kdc-db.sh" ''
|
||||
TMP=$(mktemp -d -t kdc-XXXXXXXX)
|
||||
if [ ! -e ${database-file} ]; then
|
||||
## CHANGING HOW THIS WORKS
|
||||
## Now we expect the key to be provided
|
||||
# kstash --key-file=${key-file} --random-key
|
||||
${kadmin-cmd} init --realm-max-ticket-life="${max-lifetime}" --realm-max-renewable-life="${max-renewal}" ${realm}
|
||||
fi
|
||||
|
||||
${ensure-host-principals realm}
|
||||
|
||||
${ensure-iprop-principals}
|
||||
|
||||
echo "*** BEGIN EXTRACTING KEYTABS"
|
||||
echo "*** You can probably ignore the 'principal does not exist' errors that follow,"
|
||||
echo "*** they're just testing for principal existence before creating those that"
|
||||
echo "*** don't already exist"
|
||||
|
||||
${kadmin-cmd} ext_keytab --keytab=$TMP/primary.keytab */${local-hostname}@${realm}
|
||||
mv $TMP/primary.keytab ${primary-keytab}
|
||||
${kadmin-cmd} ext_keytab --keytab=$TMP/kadmin.keytab kadmin/admin@${realm}
|
||||
mv $TMP/kadmin.keytab ${kadmin-keytab}
|
||||
${kadmin-cmd} ext_keytab --keytab=$TMP/kpasswd.keytab kadmin/changepw@${realm}
|
||||
mv $TMP/kpasswd.keytab ${kpasswd-keytab}
|
||||
${kadmin-cmd} ext_keytab --keytab=$TMP/ipropd.keytab iprop/${local-hostname}@${realm}
|
||||
mv $TMP/ipropd.keytab ${ipropd-keytab}
|
||||
|
||||
echo "*** END EXTRACTING KEYTABS"
|
||||
|
||||
${copy-slave-principals-file}
|
||||
'';
|
||||
|
||||
generate-kdc-conf = { realm, db-file, key-file, acl-data }:
|
||||
pkgs.writeText "kdc.conf" ''
|
||||
[kdc]
|
||||
database = {
|
||||
dbname = sqlite:${db-file}
|
||||
realm = ${realm}
|
||||
mkey_file = ${key-file}
|
||||
${optionalString (acl-data != null)
|
||||
"acl_file = ${generate-acl-file acl-data}"}
|
||||
log_file = ${iprop-log}
|
||||
}
|
||||
|
||||
[realms]
|
||||
${realm} = {
|
||||
enable-http = false
|
||||
}
|
||||
|
||||
[logging]
|
||||
kdc = FILE:${state-directory}/kerberos.log
|
||||
default = FILE:${state-directory}/kerberos.log
|
||||
'';
|
||||
|
||||
aclEntry = { principal, ... }: {
|
||||
options = with types; {
|
||||
perms = let
|
||||
perms = [
|
||||
"change-password"
|
||||
"add"
|
||||
"list"
|
||||
"delete"
|
||||
"modify"
|
||||
"get"
|
||||
"get-keys"
|
||||
"all"
|
||||
];
|
||||
in mkOption {
|
||||
type = listOf (enum perms);
|
||||
description = "List of permissions.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
target = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Target principals.";
|
||||
default = null;
|
||||
example = "hosts/*@REALM.COM";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
generate-acl-file = acl-entries: let
|
||||
perms-to-permstring = perms: concatStringsSep "," perms;
|
||||
in
|
||||
pkgs.writeText "kdc.acl" (concatStringsSep "\n" (mapAttrsToList
|
||||
(principal: opts:
|
||||
"${principal} ${perms-to-permstring opts.perms}${
|
||||
optionalString (opts.target != null) " ${opts.target}" }")
|
||||
acl-entries));
|
||||
|
||||
kadmin-local = kdc-conf:
|
||||
pkgs.writeShellScriptBin "kadmin.local" ''
|
||||
${pkgs.heimdalFull}/bin/kadmin -l -c ${kdc-conf} $@
|
||||
'';
|
||||
|
||||
masterOpts = { ... }: {
|
||||
options = with types; {
|
||||
acl = mkOption {
|
||||
type = attrsOf (submodule aclEntry);
|
||||
description = "Mapping of pricipals to a list of permissions.";
|
||||
default = { "*/admin" = [ "all" ]; };
|
||||
example = {
|
||||
"*/root" = [ "all" ];
|
||||
"admin-user" = [ "add" "list" "modify" ];
|
||||
};
|
||||
};
|
||||
|
||||
kadmin-keytab = mkOption {
|
||||
type = str;
|
||||
description = "Location at which to store keytab for kadmind.";
|
||||
default = "${state-directory}/kadmind.keytab";
|
||||
};
|
||||
|
||||
kpasswdd-keytab = mkOption {
|
||||
type = str;
|
||||
description = "Location at which to store keytab for kpasswdd.";
|
||||
default = "${state-directory}/kpasswdd.keytab";
|
||||
};
|
||||
|
||||
ipropd-keytab = mkOption {
|
||||
type = str;
|
||||
description = "Location at which to store keytab for ipropd master.";
|
||||
default = "${state-directory}/ipropd.keytab";
|
||||
};
|
||||
|
||||
slave-hosts = mkOption {
|
||||
type = listOf str;
|
||||
description = ''
|
||||
A list of host to which the database should be propagated.
|
||||
|
||||
Must exist in the Fudo Host database.
|
||||
'';
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
slaveOpts = { ... }: {
|
||||
options = with types; {
|
||||
master-host = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
Host from which to recieve database updates.
|
||||
|
||||
Must exist in the Fudo Host database.
|
||||
'';
|
||||
};
|
||||
|
||||
ipropd-keytab = mkOption {
|
||||
type = str;
|
||||
description = "Location at which to find keytab for ipropd slave.";
|
||||
default = "${state-directory}/ipropd.keytab";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
|
||||
options.fudo.auth.kdc = with types; {
|
||||
enable = mkEnableOption "Fudo KDC";
|
||||
|
||||
realm = mkOption {
|
||||
type = str;
|
||||
description = "The realm for which we are the acting KDC.";
|
||||
};
|
||||
|
||||
bind-addresses = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of IP addresses on which to bind.";
|
||||
default = host-ips;
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "User as which to run Heimdal servers.";
|
||||
default = "kerberos";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = str;
|
||||
description = "Group as which to run Heimdal servers.";
|
||||
default = "kerberos";
|
||||
};
|
||||
|
||||
state-directory = mkOption {
|
||||
type = str;
|
||||
description = "Path at which to store kerberos database.";
|
||||
default = "/var/lib/kerberos";
|
||||
};
|
||||
|
||||
master-key-file = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
File containing the master key for the realm.
|
||||
|
||||
Must be provided!
|
||||
'';
|
||||
};
|
||||
|
||||
primary-keytab = mkOption {
|
||||
type = str;
|
||||
description = "Location of host master keytab.";
|
||||
default = "${state-directory}/host.keytab";
|
||||
};
|
||||
|
||||
master-config = mkOption {
|
||||
type = nullOr (submodule masterOpts);
|
||||
description = "Configuration for the master KDC server.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
slave-config = mkOption {
|
||||
type = nullOr (submodule slaveOpts);
|
||||
description = "Configuration for slave KDC servers.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
max-ticket-lifetime = mkOption {
|
||||
type = str;
|
||||
description = "Maximum lifetime of a single ticket in this realm.";
|
||||
default = "1d";
|
||||
};
|
||||
|
||||
max-ticket-renewal = mkOption {
|
||||
type = str;
|
||||
description = "Maximum time a ticket may be renewed in this realm.";
|
||||
default = "7d";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = master-server || slave-server;
|
||||
message =
|
||||
"For the KDC to be enabled, a master OR slave config must be provided.";
|
||||
}
|
||||
{
|
||||
assertion = !(master-server && slave-server);
|
||||
message =
|
||||
"Only one of master-config and slave-config may be provided.";
|
||||
}
|
||||
];
|
||||
|
||||
users = {
|
||||
users.${cfg.user} = {
|
||||
isSystemUser = true;
|
||||
home = state-directory;
|
||||
group = cfg.group;
|
||||
};
|
||||
|
||||
groups.${cfg.group} = { members = [ cfg.user ]; };
|
||||
};
|
||||
|
||||
krb5 = {
|
||||
libdefaults = {
|
||||
# Stick to ~/.k5login
|
||||
# k5login_directory = cfg.k5login-directory;
|
||||
ticket_lifetime = cfg.max-ticket-lifetime;
|
||||
renew_lifetime = cfg.max-ticket-renewal;
|
||||
};
|
||||
# Sorry, port 80 isn't available!
|
||||
realms.${cfg.realm}.enable-http = false;
|
||||
extraConfig = ''
|
||||
default = FILE:${state-directory}/kerberos.log
|
||||
'';
|
||||
};
|
||||
|
||||
environment = {
|
||||
systemPackages = [ pkgs.heimdalFull (kadmin-local kdc-conf) ];
|
||||
|
||||
## This shouldn't be necessary...every host gets a krb5.keytab
|
||||
# etc = {
|
||||
# "krb5.keytab" = {
|
||||
# user = "root";
|
||||
# group = "root";
|
||||
# mode = "0400";
|
||||
# source = cfg.primary-keytab;
|
||||
# };
|
||||
# };
|
||||
};
|
||||
|
||||
fudo.system = {
|
||||
ensure-directories = {
|
||||
"${state-directory}" = {
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
perms = "0740";
|
||||
};
|
||||
};
|
||||
|
||||
services = if master-server then {
|
||||
|
||||
heimdal-kdc = let
|
||||
listen-addrs = concatStringsSep " "
|
||||
(map (addr: "--addresses=${addr}") cfg.bind-addresses);
|
||||
in {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
description =
|
||||
"Heimdal Kerberos Key Distribution Center (ticket server).";
|
||||
execStart = "${pkgs.heimdalFull}/libexec/heimdal/kdc -c ${kdc-conf} --ports=88 ${listen-addrs}";
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
workingDirectory = state-directory;
|
||||
privateNetwork = false;
|
||||
addressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||
requiredCapabilities = [ "CAP_NET_BIND_SERVICE" ];
|
||||
environment = { KRB5_CONFIG = "/etc/krb5.conf"; };
|
||||
};
|
||||
|
||||
heimdal-kdc-init = let
|
||||
init-cmd = initialize-db {
|
||||
realm = cfg.realm;
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
kdc-conf = kdc-conf;
|
||||
key-file = cfg.master-key-file;
|
||||
db-name = database-file;
|
||||
max-lifetime = cfg.max-ticket-lifetime;
|
||||
max-renewal = cfg.max-ticket-renewal;
|
||||
primary-keytab = cfg.primary-keytab;
|
||||
kadmin-keytab = cfg.master-config.kadmin-keytab;
|
||||
kpasswd-keytab = cfg.master-config.kpasswdd-keytab;
|
||||
ipropd-keytab = cfg.master-config.ipropd-keytab;
|
||||
local-hostname =
|
||||
"${config.instance.hostname}.${config.instance.local-domain}";
|
||||
};
|
||||
in {
|
||||
requires = [ "heimdal-kdc.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
description = "Initialization script for Heimdal KDC.";
|
||||
type = "oneshot";
|
||||
execStart = "${init-cmd}";
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
path = with pkgs; [ heimdalFull ];
|
||||
protectSystem = "full";
|
||||
addressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||
workingDirectory = state-directory;
|
||||
environment = { KRB5_CONFIG = "/etc/krb5.conf"; };
|
||||
};
|
||||
|
||||
heimdal-ipropd-master = mkIf (length cfg.master-config.slave-hosts > 0) {
|
||||
requires = [ "heimdal-kdc.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
description = "Propagate changes to the master KDC DB to all slaves.";
|
||||
path = with pkgs; [ heimdalFull ];
|
||||
execStart = "${pkgs.heimdalFull}/libexec/heimdal/ipropd-master -c ${kdc-conf} -k ${cfg.master.ipropd-keytab}";
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
workingDirectory = state-directory;
|
||||
privateNetwork = false;
|
||||
addressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||
environment = { KRB5_CONFIG = "/etc/krb5.conf"; };
|
||||
};
|
||||
|
||||
} else {
|
||||
|
||||
heimdal-kdc-slave = let
|
||||
listen-addrs = concatStringsSep " "
|
||||
(map (addr: "--addresses=${addr}") cfg.bind-addresses);
|
||||
command =
|
||||
"${pkgs.heimdalFull}/libexec/heimdal/kdc -c ${kdc-conf} --ports=88 ${listen-addrs}";
|
||||
in {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
description =
|
||||
"Heimdal Slave Kerberos Key Distribution Center (ticket server).";
|
||||
execStart = command;
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
workingDirectory = state-directory;
|
||||
privateNetwork = false;
|
||||
addressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||
requiredCapabilities = [ "CAP_NET_BIND_SERVICE" ];
|
||||
environment = { KRB5_CONFIG = "/etc/krb5.conf"; };
|
||||
};
|
||||
|
||||
heimdal-ipropd-slave = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
description = "Receive changes propagated from the KDC master server.";
|
||||
path = with pkgs; [ heimdalFull ];
|
||||
execStart = concatStringsSep " " [
|
||||
"${pkgs.heimdalFull}/libexec/heimdal/ipropd-slave"
|
||||
"--config-file=${kdc-conf}"
|
||||
"--keytab=${cfg.slave-config.ipropd-keytab}"
|
||||
"--realm=${cfg.realm}"
|
||||
"--hostname=${get-fqdn hostname}"
|
||||
"--port=2121"
|
||||
"--verbose"
|
||||
(get-fqdn cfg.slave-config.master-host)
|
||||
];
|
||||
user = cfg.user;
|
||||
group = cfg.group;
|
||||
workingDirectory = state-directory;
|
||||
privateNetwork = false;
|
||||
addressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||
requiredCapabilities = [ "CAP_NET_BIND_SERVICE" ];
|
||||
environment = { KRB5_CONFIG = "/etc/krb5.conf"; };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.xinetd = mkIf master-server {
|
||||
enable = true;
|
||||
|
||||
services = [
|
||||
{
|
||||
name = "kerberos-adm";
|
||||
user = cfg.user;
|
||||
server = "${pkgs.heimdalFull}/libexec/heimdal/kadmind";
|
||||
protocol = "tcp";
|
||||
serverArgs =
|
||||
"--config-file=${kdc-conf} --keytab=${cfg.master-config.kadmin-keytab}";
|
||||
}
|
||||
{
|
||||
name = "kpasswd";
|
||||
user = cfg.user;
|
||||
server = "${pkgs.heimdalFull}/libexec/heimdal/kpasswdd";
|
||||
protocol = "udp";
|
||||
serverArgs =
|
||||
"--config-file=${kdc-conf} --keytab=${cfg.master-config.kpasswdd-keytab}";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
networking = {
|
||||
firewall = {
|
||||
allowedTCPPorts = [ 88 ] ++
|
||||
(optionals master-server [ 749 ]) ++
|
||||
(optionals slave-server [ 2121 ]);
|
||||
allowedUDPPorts = [ 88 ] ++
|
||||
(optionals master-server [ 464 ]) ++
|
||||
(optionals slave-server [ 2121 ]);
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,460 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
|
||||
cfg = config.fudo.auth.ldap-server;
|
||||
|
||||
user-type = import ../types/user.nix { inherit lib; };
|
||||
|
||||
stringJoin = concatStringsSep;
|
||||
|
||||
getUserGidNumber = user: group-map: group-map.${user.primary-group}.gid;
|
||||
|
||||
attrOr = attrs: attr: value: if attrs ? ${attr} then attrs.${attr} else value;
|
||||
|
||||
ca-path = "${cfg.state-directory}/ca.pem";
|
||||
|
||||
build-ca-script = target: ca-cert: site-chain: let
|
||||
user = config.services.openldap.user;
|
||||
group = config.services.openldap.group;
|
||||
in pkgs.writeShellScript "build-openldap-ca-script.sh" ''
|
||||
cat ${site-chain} ${ca-cert} > ${target}
|
||||
chmod 440 ${target}
|
||||
chown ${user}:${group} ${target}
|
||||
'';
|
||||
|
||||
mkHomeDir = username: user-opts:
|
||||
if (user-opts.primary-group == "admin") then
|
||||
"/home/${username}"
|
||||
else
|
||||
"/home/${user-opts.primary-group}/${username}";
|
||||
|
||||
userLdif = base: name: group-map: opts: ''
|
||||
dn: uid=${name},ou=members,${base}
|
||||
uid: ${name}
|
||||
objectClass: account
|
||||
objectClass: shadowAccount
|
||||
objectClass: posixAccount
|
||||
cn: ${opts.common-name}
|
||||
uidNumber: ${toString (opts.uid)}
|
||||
gidNumber: ${toString (getUserGidNumber opts group-map)}
|
||||
homeDirectory: ${mkHomeDir name opts}
|
||||
description: ${opts.description}
|
||||
shadowLastChange: 12230
|
||||
shadowMax: 99999
|
||||
shadowWarning: 7
|
||||
userPassword: ${opts.ldap-hashed-passwd}
|
||||
'';
|
||||
|
||||
systemUserLdif = base: name: opts: ''
|
||||
dn: cn=${name},${base}
|
||||
objectClass: organizationalRole
|
||||
objectClass: simpleSecurityObject
|
||||
cn: ${name}
|
||||
description: ${opts.description}
|
||||
userPassword: ${opts.ldap-hashed-password}
|
||||
'';
|
||||
|
||||
toMemberList = userList:
|
||||
stringJoin "\n" (map (username: "memberUid: ${username}") userList);
|
||||
|
||||
groupLdif = base: name: opts: ''
|
||||
dn: cn=${name},ou=groups,${base}
|
||||
objectClass: posixGroup
|
||||
cn: ${name}
|
||||
gidNumber: ${toString (opts.gid)}
|
||||
description: ${opts.description}
|
||||
${toMemberList opts.members}
|
||||
'';
|
||||
|
||||
systemUsersLdif = base: user-map:
|
||||
stringJoin "\n"
|
||||
(mapAttrsToList (name: opts: systemUserLdif base name opts) user-map);
|
||||
|
||||
groupsLdif = base: group-map:
|
||||
stringJoin "\n"
|
||||
(mapAttrsToList (name: opts: groupLdif base name opts) group-map);
|
||||
|
||||
usersLdif = base: group-map: user-map:
|
||||
stringJoin "\n"
|
||||
(mapAttrsToList (name: opts: userLdif base name group-map opts) user-map);
|
||||
|
||||
in {
|
||||
|
||||
options = with types; {
|
||||
fudo = {
|
||||
auth = {
|
||||
ldap-server = {
|
||||
enable = mkEnableOption "Fudo Authentication";
|
||||
|
||||
kerberos-host = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
The name of the host to use for Kerberos authentication.
|
||||
'';
|
||||
};
|
||||
|
||||
kerberos-keytab = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
The path to a keytab for the LDAP server, containing a principal for ldap/<hostname>.
|
||||
'';
|
||||
};
|
||||
|
||||
ssl-certificate = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
The path to the SSL certificate to use for the server.
|
||||
'';
|
||||
};
|
||||
|
||||
ssl-chain = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
The path to the SSL chain to to the certificate for the server.
|
||||
'';
|
||||
};
|
||||
|
||||
ssl-private-key = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
The path to the SSL key to use for the server.
|
||||
'';
|
||||
};
|
||||
|
||||
ssl-ca-certificate = mkOption {
|
||||
type = nullOr str;
|
||||
description = ''
|
||||
The path to the SSL CA cert used to sign the certificate.
|
||||
'';
|
||||
default = null;
|
||||
};
|
||||
|
||||
organization = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
The name to use for the organization.
|
||||
'';
|
||||
};
|
||||
|
||||
base = mkOption {
|
||||
type = str;
|
||||
description = "The base dn of the LDAP server.";
|
||||
example = "dc=fudo,dc=org";
|
||||
};
|
||||
|
||||
rootpw-file = mkOption {
|
||||
default = "";
|
||||
type = str;
|
||||
description = ''
|
||||
The path to a file containing the root password for this database.
|
||||
'';
|
||||
};
|
||||
|
||||
listen-uris = mkOption {
|
||||
type = listOf str;
|
||||
description = ''
|
||||
A list of URIs on which the ldap server should listen.
|
||||
'';
|
||||
example = [ "ldap://auth.fudo.org" "ldaps://auth.fudo.org" ];
|
||||
};
|
||||
|
||||
users = mkOption {
|
||||
type = attrsOf (submodule user-type.userOpts);
|
||||
example = {
|
||||
tester = {
|
||||
uid = 10099;
|
||||
common-name = "Joe Blow";
|
||||
hashed-password = "<insert password hash>";
|
||||
};
|
||||
};
|
||||
description = ''
|
||||
Users to be added to the Fudo LDAP database.
|
||||
'';
|
||||
default = { };
|
||||
};
|
||||
|
||||
groups = mkOption {
|
||||
default = { };
|
||||
type = attrsOf (submodule user-type.groupOpts);
|
||||
example = {
|
||||
admin = {
|
||||
gid = 1099;
|
||||
members = [ "tester" ];
|
||||
};
|
||||
};
|
||||
description = ''
|
||||
Groups to be added to the Fudo LDAP database.
|
||||
'';
|
||||
};
|
||||
|
||||
system-users = mkOption {
|
||||
default = { };
|
||||
type = attrsOf (submodule user-type.systemUserOpts);
|
||||
example = {
|
||||
replicator = {
|
||||
description = "System user for database sync";
|
||||
ldap-hashed-password = "<insert password hash>";
|
||||
};
|
||||
};
|
||||
description = "System users to be added to the Fudo LDAP database.";
|
||||
};
|
||||
|
||||
state-directory = mkOption {
|
||||
type = str;
|
||||
description = "Path at which to store openldap database & state.";
|
||||
};
|
||||
|
||||
systemd-target = mkOption {
|
||||
type = str;
|
||||
description = "Systemd target for running ldap server.";
|
||||
default = "fudo-ldap-server.target";
|
||||
};
|
||||
|
||||
required-services = mkOption {
|
||||
type = listOf str;
|
||||
description = "Systemd services on which the server depends.";
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
environment = {
|
||||
etc = {
|
||||
"openldap/sasl2/slapd.conf" = {
|
||||
mode = "0400";
|
||||
user = config.services.openldap.user;
|
||||
group = config.services.openldap.group;
|
||||
text = ''
|
||||
mech_list: gssapi external
|
||||
keytab: ${cfg.kerberos-keytab}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 389 636 ];
|
||||
allowedUDPPorts = [ 389 ];
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = let
|
||||
ca-dir = dirOf ca-path;
|
||||
user = config.services.openldap.user;
|
||||
group = config.services.openldap.group;
|
||||
in [
|
||||
"d ${ca-dir} 0700 ${user} ${group} - -"
|
||||
];
|
||||
|
||||
services.openldap = {
|
||||
partOf = [ cfg.systemd-target ];
|
||||
requires = cfg.required-services;
|
||||
environment.KRB5_KTNAME = cfg.kerberos-keytab;
|
||||
preStart = mkBefore
|
||||
"${build-ca-script ca-path
|
||||
cfg.ssl-chain
|
||||
cfg.ssl-ca-certificate}";
|
||||
serviceConfig = {
|
||||
PrivateDevices = true;
|
||||
PrivateTmp = true;
|
||||
PrivateMounts = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectSystem = true;
|
||||
ProtectHostname = true;
|
||||
ProtectHome = true;
|
||||
ProtectClock = true;
|
||||
ProtectKernelLogs = true;
|
||||
KeyringMode = "private";
|
||||
# RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
|
||||
Restart = "on-failure";
|
||||
LockPersonality = true;
|
||||
RestrictRealtime = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
SystemCallFilter = concatStringsSep " " [
|
||||
"~@clock"
|
||||
"@debug"
|
||||
"@module"
|
||||
"@mount"
|
||||
"@raw-io"
|
||||
"@reboot"
|
||||
"@swap"
|
||||
# "@privileged"
|
||||
"@resources"
|
||||
"@cpu-emulation"
|
||||
"@obsolete"
|
||||
];
|
||||
UMask = "7007";
|
||||
InaccessiblePaths = [ "/home" "/root" ];
|
||||
LimitNOFILE = 49152;
|
||||
PermissionsStartOnly = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.openldap = {
|
||||
enable = true;
|
||||
urlList = cfg.listen-uris;
|
||||
|
||||
settings = let
|
||||
makePermEntry = dn: perm: "by ${dn} ${perm}";
|
||||
|
||||
makeAccessLine = target: perm-map: let
|
||||
perm-entries = mapAttrsToList makePermEntry perm-map;
|
||||
in "to ${target} ${concatStringsSep " " perm-entries}";
|
||||
|
||||
makeAccess = access-map: let
|
||||
access-lines = mapAttrsToList makeAccessLine;
|
||||
numbered-access-lines = imap0 (i: line: "{${toString i}}${line}");
|
||||
in numbered-access-lines (access-lines access-map);
|
||||
|
||||
in {
|
||||
attrs = {
|
||||
cn = "config";
|
||||
objectClass = "olcGlobal";
|
||||
olcPidFile = "/run/slapd/slapd.pid";
|
||||
olcTLSCertificateFile = cfg.ssl-certificate;
|
||||
olcTLSCertificateKeyFile = cfg.ssl-private-key;
|
||||
olcTLSCACertificateFile = ca-path;
|
||||
olcSaslSecProps = "noplain,noanonymous";
|
||||
olcAuthzRegexp = let
|
||||
authz-regex-entry = i: { regex, target }:
|
||||
"{${toString i}}\"${regex}\" \"${target}\"";
|
||||
in imap0 authz-regex-entry [
|
||||
{
|
||||
regex = "^uid=auth/([^.]+).fudo.org,cn=fudo.org,cn=gssapi,cn=auth$";
|
||||
target = "cn=$1,ou=hosts,dc=fudo,dc=org";
|
||||
}
|
||||
{
|
||||
regex = "^uid=[^,/]+/root,cn=fudo.org,cn=gssapi,cn=auth$";
|
||||
target = "cn=admin,dc=fudo,dc=org";
|
||||
}
|
||||
{
|
||||
regex = "^uid=([^,/]+),cn=fudo.org,cn=gssapi,cn=auth$";
|
||||
target = "uid=$1,ou=members,dc=fudo,dc=org";
|
||||
}
|
||||
{
|
||||
regex = "^uid=host/([^,/]+),cn=fudo.org,cn=gssapi,cn=auth$";
|
||||
target = "cn=$1,ou=hosts,dc=fudo,dc=org";
|
||||
}
|
||||
{
|
||||
regex = "^gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth$";
|
||||
target = "cn=admin,dc=fudo,dc=org";
|
||||
}
|
||||
];
|
||||
};
|
||||
children = {
|
||||
"cn=schema" = {
|
||||
includes = [
|
||||
"${pkgs.openldap}/etc/schema/core.ldif"
|
||||
"${pkgs.openldap}/etc/schema/cosine.ldif"
|
||||
"${pkgs.openldap}/etc/schema/inetorgperson.ldif"
|
||||
"${pkgs.openldap}/etc/schema/nis.ldif"
|
||||
];
|
||||
};
|
||||
"olcDatabase={-1}frontend" = {
|
||||
attrs = {
|
||||
objectClass = [ "olcDatabaseConfig" "olcFrontendConfig" ];
|
||||
olcDatabase = "{-1}frontend";
|
||||
olcAccess = makeAccess {
|
||||
"*" = {
|
||||
"dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" = "manage";
|
||||
"*" = "none";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
"olcDatabase={0}config" = {
|
||||
attrs = {
|
||||
objectClass = [ "olcDatabaseConfig" ];
|
||||
olcDatabase = "{0}config";
|
||||
olcAccess = makeAccess {
|
||||
"*" = {
|
||||
"dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" = "manage";
|
||||
"*" = "none";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
"olcDatabase={1}mdb" = {
|
||||
attrs = {
|
||||
objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
|
||||
olcDatabase = "{1}mdb";
|
||||
olcSuffix = cfg.base;
|
||||
# olcRootDN = "cn=admin,${cfg.base}";
|
||||
# olcRootPW = FIXME; # NOTE: this should be hashed...
|
||||
olcDbDirectory = "${cfg.state-directory}/database";
|
||||
olcDbIndex = [ "objectClass eq" "uid eq" ];
|
||||
olcAccess = makeAccess {
|
||||
"attrs=userPassword,shadowLastChange" = {
|
||||
"dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" = "manage";
|
||||
"dn.exact=cn=auth_reader,${cfg.base}" = "read";
|
||||
"dn.exact=cn=replicator,${cfg.base}" = "read";
|
||||
"self" = "write";
|
||||
"*" = "auth";
|
||||
};
|
||||
"dn=cn=admin,ou=groups,${cfg.base}" = {
|
||||
"dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" = "manage";
|
||||
"users" = "read";
|
||||
"*" = "none";
|
||||
};
|
||||
"dn.subtree=ou=groups,${cfg.base} attrs=memberUid" = {
|
||||
"dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" = "manage";
|
||||
"dn.regex=cn=[a-zA-Z][a-zA-Z0-9_]+,ou=hosts,${cfg.base}" = "write";
|
||||
"users" = "read";
|
||||
"*" = "none";
|
||||
};
|
||||
"dn.subtree=ou=members,${cfg.base} attrs=cn,sn,homeDirectory,loginShell,gecos,description,homeDirectory,uidNumber,gidNumber" = {
|
||||
"dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" = "manage";
|
||||
"dn.exact=cn=user_db_reader,${cfg.base}" = "read";
|
||||
"users" = "read";
|
||||
"*" = "none";
|
||||
};
|
||||
"*" = {
|
||||
"dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" = "manage";
|
||||
"users" = "read";
|
||||
"*" = "none";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
declarativeContents = {
|
||||
"dc=fudo,dc=org" = ''
|
||||
dn: ${cfg.base}
|
||||
objectClass: top
|
||||
objectClass: dcObject
|
||||
objectClass: organization
|
||||
o: ${cfg.organization}
|
||||
|
||||
dn: ou=groups,${cfg.base}
|
||||
objectClass: organizationalUnit
|
||||
description: ${cfg.organization} groups
|
||||
|
||||
dn: ou=members,${cfg.base}
|
||||
objectClass: organizationalUnit
|
||||
description: ${cfg.organization} members
|
||||
|
||||
dn: cn=admin,${cfg.base}
|
||||
objectClass: organizationalRole
|
||||
cn: admin
|
||||
description: "Admin User"
|
||||
|
||||
${systemUsersLdif cfg.base cfg.system-users}
|
||||
${groupsLdif cfg.base cfg.groups}
|
||||
${usersLdif cfg.base cfg.groups cfg.users}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,238 @@
|
|||
{ lib, config, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.local-network;
|
||||
|
||||
join-lines = concatStringsSep "\n";
|
||||
|
||||
traceout = out: builtins.trace out out;
|
||||
|
||||
in {
|
||||
|
||||
options.fudo.local-network = with types; {
|
||||
|
||||
enable = mkEnableOption "Enable local network configuration (DHCP & DNS).";
|
||||
|
||||
domain = mkOption {
|
||||
type = str;
|
||||
description = "The domain to use for the local network.";
|
||||
};
|
||||
|
||||
dns-servers = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of domain name servers to pass to local clients.";
|
||||
};
|
||||
|
||||
dhcp-interfaces = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of interfaces on which to serve DHCP.";
|
||||
};
|
||||
|
||||
dns-listen-ips = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of IPs on which to server DNS queries.";
|
||||
};
|
||||
|
||||
gateway = mkOption {
|
||||
type = str;
|
||||
description = "The gateway to use for the local network.";
|
||||
};
|
||||
|
||||
network = mkOption {
|
||||
type = str;
|
||||
description = "Network to treat as local.";
|
||||
example = "10.0.0.0/16";
|
||||
};
|
||||
|
||||
dhcp-dynamic-network = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
The network from which to dynamically allocate IPs via DHCP.
|
||||
|
||||
Must be a subnet of <network>.
|
||||
'';
|
||||
example = "10.0.1.0/24";
|
||||
};
|
||||
|
||||
enable-reverse-mappings = mkOption {
|
||||
type = bool;
|
||||
description = "Genereate PTR reverse lookup records.";
|
||||
default = false;
|
||||
};
|
||||
|
||||
recursive-resolver = mkOption {
|
||||
type = str;
|
||||
description = "DNS nameserver to use for recursive resolution.";
|
||||
default = "1.1.1.1 port 53";
|
||||
};
|
||||
|
||||
search-domains = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of domains which clients should consider local.";
|
||||
example = [ "my-domain.com" "other-domain.com" ];
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
network-definition = let
|
||||
networkOpts = import ../types/network-definition.nix { inherit lib; };
|
||||
in mkOption {
|
||||
type = submodule networkOpts;
|
||||
description = "Definition of network to be served by local server.";
|
||||
default = { };
|
||||
};
|
||||
|
||||
extra-records = mkOption {
|
||||
type = listOf str;
|
||||
description = "Extra records to add to the local zone.";
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
fudo.system.hostfile-entries = let
|
||||
other-hosts = filterAttrs
|
||||
(hostname: hostOpts: hostname != config.instance.hostname)
|
||||
cfg.network-definition.hosts;
|
||||
in mapAttrs' (hostname: hostOpts:
|
||||
nameValuePair hostOpts.ipv4-address ["${hostname}.${cfg.domain}" hostname])
|
||||
other-hosts;
|
||||
|
||||
services.dhcpd4 = let network = cfg.network-definition;
|
||||
in {
|
||||
enable = true;
|
||||
|
||||
machines = mapAttrsToList (hostname: hostOpts: {
|
||||
ethernetAddress = hostOpts.mac-address;
|
||||
hostName = hostname;
|
||||
ipAddress = hostOpts.ipv4-address;
|
||||
}) (filterAttrs (host: hostOpts:
|
||||
hostOpts.mac-address != null && hostOpts.ipv4-address != null)
|
||||
network.hosts);
|
||||
|
||||
interfaces = cfg.dhcp-interfaces;
|
||||
|
||||
extraConfig = ''
|
||||
subnet ${pkgs.lib.fudo.ip.getNetworkBase cfg.network} netmask ${
|
||||
pkgs.lib.fudo.ip.maskFromV32Network cfg.network
|
||||
} {
|
||||
authoritative;
|
||||
option subnet-mask ${pkgs.lib.fudo.ip.maskFromV32Network cfg.network};
|
||||
option broadcast-address ${pkgs.lib.fudo.ip.networkMaxIp cfg.network};
|
||||
option routers ${cfg.gateway};
|
||||
option domain-name-servers ${concatStringsSep " " cfg.dns-servers};
|
||||
option domain-name "${cfg.domain}";
|
||||
option domain-search "${
|
||||
concatStringsSep " " ([ cfg.domain ] ++ cfg.search-domains)
|
||||
}";
|
||||
range ${pkgs.lib.fudo.ip.networkMinIp cfg.dhcp-dynamic-network} ${
|
||||
pkgs.lib.fudo.ip.networkMaxButOneIp cfg.dhcp-dynamic-network
|
||||
};
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
services.bind = let
|
||||
blockHostsToZone = block: hosts-data: {
|
||||
master = true;
|
||||
name = "${block}.in-addr.arpa";
|
||||
file = let
|
||||
# We should add these...but need a domain to assign them to.
|
||||
# ip-last-el = ip: toInt (last (splitString "." ip));
|
||||
# used-els = map (host-data: ip-last-el host-data.ipv4-address) hosts-data;
|
||||
# unused-els = subtractLists used-els (map toString (range 1 255));
|
||||
|
||||
in pkgs.writeText "db.${block}-zone" ''
|
||||
$ORIGIN ${block}.in-addr.arpa.
|
||||
$TTL 1h
|
||||
|
||||
@ IN SOA ns1.${cfg.domain}. hostmaster.${cfg.domain}. (
|
||||
${toString config.instance.build-timestamp}
|
||||
1800
|
||||
900
|
||||
604800
|
||||
1800)
|
||||
|
||||
@ IN NS ns1.${cfg.domain}.
|
||||
|
||||
${join-lines (map hostPtrRecord hosts-data)}
|
||||
'';
|
||||
};
|
||||
|
||||
ipToBlock = ip:
|
||||
concatStringsSep "." (reverseList (take 3 (splitString "." ip)));
|
||||
compactHosts =
|
||||
mapAttrsToList (host: data: data // { host = host; }) network.hosts;
|
||||
hostsByBlock =
|
||||
groupBy (host-data: ipToBlock host-data.ipv4-address) compactHosts;
|
||||
hostPtrRecord = host-data:
|
||||
"${
|
||||
last (splitString "." host-data.ipv4-address)
|
||||
} IN PTR ${host-data.host}.${cfg.domain}.";
|
||||
|
||||
blockZones = mapAttrsToList blockHostsToZone hostsByBlock;
|
||||
|
||||
hostARecord = host: data: "${host} IN A ${data.ipv4-address}";
|
||||
hostSshFpRecords = host: data:
|
||||
let
|
||||
ssh-fingerprints = if (hasAttr host known-hosts) then
|
||||
known-hosts.${host}.ssh-fingerprints
|
||||
else
|
||||
[ ];
|
||||
in join-lines
|
||||
(map (sshfp: "${host} IN SSHFP ${sshfp}") ssh-fingerprints);
|
||||
cnameRecord = alias: host: "${alias} IN CNAME ${host}";
|
||||
|
||||
network = cfg.network-definition;
|
||||
|
||||
known-hosts = config.fudo.hosts;
|
||||
|
||||
in {
|
||||
enable = true;
|
||||
cacheNetworks = [ cfg.network "localhost" "localnets" ];
|
||||
forwarders = [ cfg.recursive-resolver ];
|
||||
listenOn = cfg.dns-listen-ips;
|
||||
extraOptions = concatStringsSep "\n" [
|
||||
"dnssec-enable yes;"
|
||||
"dnssec-validation yes;"
|
||||
"auth-nxdomain no;"
|
||||
"recursion yes;"
|
||||
"allow-recursion { any; };"
|
||||
];
|
||||
zones = [{
|
||||
master = true;
|
||||
name = cfg.domain;
|
||||
file = pkgs.writeText "${cfg.domain}-zone" ''
|
||||
@ IN SOA ns1.${cfg.domain}. hostmaster.${cfg.domain}. (
|
||||
${toString config.instance.build-timestamp}
|
||||
5m
|
||||
2m
|
||||
6w
|
||||
5m)
|
||||
|
||||
$TTL 1h
|
||||
|
||||
@ IN NS ns1.${cfg.domain}.
|
||||
|
||||
$ORIGIN ${cfg.domain}.
|
||||
|
||||
$TTL 30m
|
||||
|
||||
${optionalString (network.gssapi-realm != null)
|
||||
''_kerberos IN TXT "${network.gssapi-realm}"''}
|
||||
|
||||
${join-lines
|
||||
(imap1 (i: server-ip: "ns${toString i} IN A ${server-ip}")
|
||||
cfg.dns-servers)}
|
||||
${join-lines (mapAttrsToList hostARecord network.hosts)}
|
||||
${join-lines (mapAttrsToList hostSshFpRecords network.hosts)}
|
||||
${join-lines (mapAttrsToList cnameRecord network.aliases)}
|
||||
${join-lines network.verbatim-dns-records}
|
||||
${pkgs.lib.fudo.dns.srvRecordsToBindZone network.srv-records}
|
||||
${join-lines cfg.extra-records}
|
||||
'';
|
||||
}] ++ blockZones;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,221 @@
|
|||
{ pkgs, lib, config, ... }:
|
||||
with lib;
|
||||
let
|
||||
hostname = config.instance.hostname;
|
||||
cfg = config.fudo.mail-server;
|
||||
container-maildir = "/var/lib/mail";
|
||||
container-statedir = "/var/lib/mail-state";
|
||||
|
||||
# Don't bother with group-id, nixos doesn't seem to use it anyway
|
||||
container-mail-user = "mailer";
|
||||
container-mail-user-id = 542;
|
||||
container-mail-group = "mailer";
|
||||
|
||||
build-timestamp = config.instance.build-timestamp;
|
||||
build-seed = config.instance.build-seed;
|
||||
site = config.instance.local-site;
|
||||
domain = cfg.domain;
|
||||
|
||||
local-networks = config.instance.local-networks;
|
||||
|
||||
in rec {
|
||||
config = mkIf (cfg.enableContainer) {
|
||||
# Disable postfix on this host--it'll be run in the container instead
|
||||
services.postfix.enable = false;
|
||||
|
||||
services.nginx = mkIf cfg.monitoring {
|
||||
enable = true;
|
||||
|
||||
virtualHosts = let
|
||||
proxy-headers = ''
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Host $host;
|
||||
'';
|
||||
trusted-network-string =
|
||||
optionalString ((length local-networks) > 0)
|
||||
(concatStringsSep "\n"
|
||||
(map (network: "allow ${network};")
|
||||
local-networks)) + ''
|
||||
|
||||
deny all;'';
|
||||
|
||||
in {
|
||||
"${cfg.mail-hostname}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
locations."/metrics/postfix" = {
|
||||
proxyPass = "http://127.0.0.1:9154/metrics";
|
||||
|
||||
extraConfig = ''
|
||||
${proxy-headers}
|
||||
|
||||
${trusted-network-string}
|
||||
'';
|
||||
};
|
||||
|
||||
locations."/metrics/dovecot" = {
|
||||
proxyPass = "http://127.0.0.1:9166/metrics";
|
||||
|
||||
extraConfig = ''
|
||||
${proxy-headers}
|
||||
|
||||
${trusted-network-string}
|
||||
'';
|
||||
};
|
||||
|
||||
locations."/metrics/rspamd" = {
|
||||
proxyPass = "http://127.0.0.1:7980/metrics";
|
||||
|
||||
extraConfig = ''
|
||||
${proxy-headers}
|
||||
|
||||
${trusted-network-string}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
containers.mail-server = {
|
||||
|
||||
autoStart = true;
|
||||
|
||||
bindMounts = {
|
||||
"${container-maildir}" = {
|
||||
hostPath = cfg.mail-directory;
|
||||
isReadOnly = false;
|
||||
};
|
||||
|
||||
"${container-statedir}" = {
|
||||
hostPath = cfg.state-directory;
|
||||
isReadOnly = false;
|
||||
};
|
||||
|
||||
"/run/mail/certs/postfix/cert.pem" = {
|
||||
hostPath = cfg.ssl.certificate;
|
||||
isReadOnly = true;
|
||||
};
|
||||
|
||||
"/run/mail/certs/postfix/key.pem" = {
|
||||
hostPath = cfg.ssl.private-key;
|
||||
isReadOnly = true;
|
||||
};
|
||||
|
||||
"/run/mail/certs/dovecot/cert.pem" = {
|
||||
hostPath = cfg.ssl.certificate;
|
||||
isReadOnly = true;
|
||||
};
|
||||
|
||||
"/run/mail/certs/dovecot/key.pem" = {
|
||||
hostPath = cfg.ssl.private-key;
|
||||
isReadOnly = true;
|
||||
};
|
||||
|
||||
"/run/mail/passwords/dovecot/ldap-reader.passwd" = {
|
||||
hostPath = cfg.dovecot.ldap.reader-password-file;
|
||||
isReadOnly = true;
|
||||
};
|
||||
};
|
||||
|
||||
config = { config, pkgs, ... }: {
|
||||
|
||||
imports = let
|
||||
initialize-host = import ../../initialize.nix;
|
||||
profile = "container";
|
||||
in [
|
||||
./mail.nix
|
||||
|
||||
(initialize-host {
|
||||
inherit
|
||||
lib
|
||||
pkgs
|
||||
build-timestamp
|
||||
site
|
||||
domain
|
||||
profile;
|
||||
hostname = "mail-container";
|
||||
})
|
||||
];
|
||||
|
||||
instance.build-seed = build-seed;
|
||||
|
||||
environment.etc = {
|
||||
"mail-server/postfix/cert.pem" = {
|
||||
source = "/run/mail/certs/postfix/cert.pem";
|
||||
user = config.services.postfix.user;
|
||||
mode = "0444";
|
||||
};
|
||||
"mail-server/postfix/key.pem" = {
|
||||
source = "/run/mail/certs/postfix/key.pem";
|
||||
user = config.services.postfix.user;
|
||||
mode = "0400";
|
||||
};
|
||||
"mail-server/dovecot/cert.pem" = {
|
||||
source = "/run/mail/certs/dovecot/cert.pem";
|
||||
user = config.services.dovecot2.user;
|
||||
mode = "0444";
|
||||
};
|
||||
"mail-server/dovecot/key.pem" = {
|
||||
source = "/run/mail/certs/dovecot/key.pem";
|
||||
user = config.services.dovecot2.user;
|
||||
mode = "0400";
|
||||
};
|
||||
|
||||
## The pre-script runs as root anyway...
|
||||
# "mail-server/dovecot/ldap-reader.passwd" = {
|
||||
# source = "/run/mail/passwords/dovecot/ldap-reader.passwd";
|
||||
# user = config.services.dovecot2.user;
|
||||
# mode = "0400";
|
||||
# };
|
||||
};
|
||||
|
||||
fudo = {
|
||||
|
||||
mail-server = {
|
||||
enable = true;
|
||||
mail-hostname = cfg.mail-hostname;
|
||||
domain = cfg.domain;
|
||||
|
||||
debug = cfg.debug;
|
||||
monitoring = cfg.monitoring;
|
||||
|
||||
state-directory = container-statedir;
|
||||
mail-directory = container-maildir;
|
||||
|
||||
postfix = {
|
||||
ssl-certificate = "/etc/mail-server/postfix/cert.pem";
|
||||
ssl-private-key = "/etc/mail-server/postfix/key.pem";
|
||||
};
|
||||
|
||||
dovecot = {
|
||||
ssl-certificate = "/etc/mail-server/dovecot/cert.pem";
|
||||
ssl-private-key = "/etc/mail-server/dovecot/key.pem";
|
||||
ldap = {
|
||||
server-urls = cfg.dovecot.ldap.server-urls;
|
||||
reader-dn = cfg.dovecot.ldap.reader-dn;
|
||||
reader-password-file = "/run/mail/passwords/dovecot/ldap-reader.passwd";
|
||||
};
|
||||
};
|
||||
|
||||
local-domains = cfg.local-domains;
|
||||
|
||||
alias-users = cfg.alias-users;
|
||||
user-aliases = cfg.user-aliases;
|
||||
sender-blacklist = cfg.sender-blacklist;
|
||||
recipient-blacklist = cfg.recipient-blacklist;
|
||||
trusted-networks = cfg.trusted-networks;
|
||||
|
||||
mail-user = container-mail-user;
|
||||
mail-user-id = container-mail-user-id;
|
||||
mail-group = container-mail-group;
|
||||
|
||||
clamav.enable = cfg.clamav.enable;
|
||||
|
||||
dkim.signing = cfg.dkim.signing;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,225 @@
|
|||
{ config, lib, pkgs, environment, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
inherit (lib.strings) concatStringsSep;
|
||||
cfg = config.fudo.mail-server;
|
||||
|
||||
in {
|
||||
|
||||
options.fudo.mail-server = with types; {
|
||||
enable = mkEnableOption "Fudo Email Server";
|
||||
|
||||
enableContainer = mkEnableOption ''
|
||||
Run the mail server in a container.
|
||||
|
||||
Mutually exclusive with mail-server.enable.
|
||||
'';
|
||||
|
||||
domain = mkOption {
|
||||
type = str;
|
||||
description = "The main and default domain name for this email server.";
|
||||
};
|
||||
|
||||
mail-hostname = mkOption {
|
||||
type = str;
|
||||
description = "The domain name to use for the mail server.";
|
||||
};
|
||||
|
||||
ldap-url = mkOption {
|
||||
type = str;
|
||||
description = "URL of the LDAP server to use for authentication.";
|
||||
example = "ldaps://auth.fudo.org/";
|
||||
};
|
||||
|
||||
monitoring = mkEnableOption "Enable monitoring for the mail server.";
|
||||
|
||||
mail-user = mkOption {
|
||||
type = str;
|
||||
description = "User to use for mail delivery.";
|
||||
default = "mailuser";
|
||||
};
|
||||
|
||||
# No group id, because NixOS doesn't seem to use it
|
||||
mail-group = mkOption {
|
||||
type = str;
|
||||
description = "Group to use for mail delivery.";
|
||||
default = "mailgroup";
|
||||
};
|
||||
|
||||
mail-user-id = mkOption {
|
||||
type = int;
|
||||
description = "UID of mail-user.";
|
||||
};
|
||||
|
||||
local-domains = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of domains for which we accept mail.";
|
||||
default = ["localhost" "localhost.localdomain"];
|
||||
example = [
|
||||
"localhost"
|
||||
"localhost.localdomain"
|
||||
"somedomain.com"
|
||||
"otherdomain.org"
|
||||
];
|
||||
};
|
||||
|
||||
mail-directory = mkOption {
|
||||
type = str;
|
||||
description = "Path to use for mail storage.";
|
||||
};
|
||||
|
||||
state-directory = mkOption {
|
||||
type = str;
|
||||
description = "Path to use for state data.";
|
||||
};
|
||||
|
||||
trusted-networks = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of trusted networks, for which we will happily relay without auth.";
|
||||
example = [
|
||||
"10.0.0.0/16"
|
||||
"192.168.0.0/24"
|
||||
];
|
||||
};
|
||||
|
||||
sender-blacklist = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of email addresses for whom we will not send email.";
|
||||
default = [];
|
||||
example = [
|
||||
"baduser@test.com"
|
||||
"change-pw@test.com"
|
||||
];
|
||||
};
|
||||
|
||||
recipient-blacklist = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of email addresses for whom we will not accept email.";
|
||||
default = [];
|
||||
example = [
|
||||
"baduser@test.com"
|
||||
"change-pw@test.com"
|
||||
];
|
||||
};
|
||||
|
||||
message-size-limit = mkOption {
|
||||
type = int;
|
||||
description = "Size of max email in megabytes.";
|
||||
default = 30;
|
||||
};
|
||||
|
||||
user-aliases = mkOption {
|
||||
type = attrsOf (listOf str);
|
||||
description = "A map of real user to list of alias emails.";
|
||||
default = {};
|
||||
example = {
|
||||
someuser = ["alias0" "alias1"];
|
||||
};
|
||||
};
|
||||
|
||||
alias-users = mkOption {
|
||||
type = attrsOf (listOf str);
|
||||
description = "A map of email alias to a list of users.";
|
||||
example = {
|
||||
alias = ["realuser0" "realuser1"];
|
||||
};
|
||||
};
|
||||
|
||||
mailboxes = mkOption {
|
||||
description = ''
|
||||
The mailboxes for dovecot.
|
||||
|
||||
Depending on the mail client used it might be necessary to change some mailbox's name.
|
||||
'';
|
||||
default = {
|
||||
Trash = {
|
||||
auto = "create";
|
||||
specialUse = "Trash";
|
||||
autoexpunge = "30d";
|
||||
};
|
||||
Junk = {
|
||||
auto = "create";
|
||||
specialUse = "Junk";
|
||||
autoexpunge = "60d";
|
||||
};
|
||||
Drafts = {
|
||||
auto = "create";
|
||||
specialUse = "Drafts";
|
||||
autoexpunge = "60d";
|
||||
};
|
||||
Sent = {
|
||||
auto = "subscribe";
|
||||
specialUse = "Sent";
|
||||
};
|
||||
Archive = {
|
||||
auto = "no";
|
||||
specialUse = "Archive";
|
||||
};
|
||||
Flagged = {
|
||||
auto = "no";
|
||||
specialUse = "Flagged";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
debug = mkOption {
|
||||
description = "Enable debugging on mailservers.";
|
||||
type = bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
max-user-connections = mkOption {
|
||||
description = "Max simultaneous connections per user.";
|
||||
type = int;
|
||||
default = 20;
|
||||
};
|
||||
|
||||
ssl = {
|
||||
certificate = mkOption {
|
||||
type = str;
|
||||
description = "Path to the ssl certificate for the mail server to use.";
|
||||
};
|
||||
|
||||
private-key = mkOption {
|
||||
type = str;
|
||||
description = "Path to the ssl private key for the mail server to use.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
imports = [
|
||||
./mail/dkim.nix
|
||||
./mail/dovecot.nix
|
||||
./mail/postfix.nix
|
||||
./mail/rspamd.nix
|
||||
./mail/clamav.nix
|
||||
];
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${cfg.mail-directory} 775 ${cfg.mail-user} ${cfg.mail-group} - -"
|
||||
"d ${cfg.state-directory} 775 root ${cfg.mail-group} - -"
|
||||
];
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 25 110 143 587 993 995 ];
|
||||
};
|
||||
|
||||
users = {
|
||||
users = {
|
||||
${cfg.mail-user} = {
|
||||
isSystemUser = true;
|
||||
uid = cfg.mail-user-id;
|
||||
group = cfg.mail-group;
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
${cfg.mail-group} = {
|
||||
members = [ cfg.mail-user ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
let cfg = config.fudo.mail-server;
|
||||
|
||||
in {
|
||||
options.fudo.mail-server.clamav = {
|
||||
enable = mkOption {
|
||||
description = "Enable virus scanning with ClamAV.";
|
||||
type = types.bool;
|
||||
default = true;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf (cfg.enable && cfg.clamav.enable) {
|
||||
|
||||
services.clamav = {
|
||||
daemon = {
|
||||
enable = true;
|
||||
settings = { PhishingScanURLs = "no"; };
|
||||
};
|
||||
updater.enable = true;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.fudo.mail-server;
|
||||
|
||||
createDomainDkimCert = dom:
|
||||
let
|
||||
dkim_key = "${cfg.dkim.key-directory}/${dom}.${cfg.dkim.selector}.key";
|
||||
dkim_txt = "${cfg.dkim.key-directory}/${dom}.${cfg.dkim.selector}.txt";
|
||||
in
|
||||
''
|
||||
if [ ! -f "${dkim_key}" ] || [ ! -f "${dkim_txt}" ]
|
||||
then
|
||||
${cfg.dkim.package}/bin/opendkim-genkey -s "${cfg.dkim.selector}" \
|
||||
-d "${dom}" \
|
||||
--bits="${toString cfg.dkim.key-bits}" \
|
||||
--directory="${cfg.dkim.key-directory}"
|
||||
mv "${cfg.dkim.key-directory}/${cfg.dkim.selector}.private" "${dkim_key}"
|
||||
mv "${cfg.dkim.key-directory}/${cfg.dkim.selector}.txt" "${dkim_txt}"
|
||||
echo "Generated key for domain ${dom} selector ${cfg.dkim.selector}"
|
||||
fi
|
||||
'';
|
||||
|
||||
createAllCerts = lib.concatStringsSep "\n" (map createDomainDkimCert cfg.local-domains);
|
||||
|
||||
keyTable = pkgs.writeText "opendkim-KeyTable"
|
||||
(lib.concatStringsSep "\n" (lib.flip map cfg.local-domains
|
||||
(dom: "${dom} ${dom}:${cfg.dkim.selector}:${cfg.dkim.key-directory}/${dom}.${cfg.dkim.selector}.key")));
|
||||
signingTable = pkgs.writeText "opendkim-SigningTable"
|
||||
(lib.concatStringsSep "\n" (lib.flip map cfg.local-domains (dom: "${dom} ${dom}")));
|
||||
|
||||
dkim = config.services.opendkim;
|
||||
args = [ "-f" "-l" ] ++ lib.optionals (dkim.configFile != null) [ "-x" dkim.configFile ];
|
||||
in
|
||||
{
|
||||
|
||||
options.fudo.mail-server.dkim = {
|
||||
signing = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = "Enable dkim signatures for mail.";
|
||||
};
|
||||
|
||||
key-directory = mkOption {
|
||||
type = types.str;
|
||||
default = "/var/dkim";
|
||||
description = "Path to use to store DKIM keys.";
|
||||
};
|
||||
|
||||
selector = mkOption {
|
||||
type = types.str;
|
||||
default = "mail";
|
||||
description = "Name to use for mail-signing keys.";
|
||||
};
|
||||
|
||||
key-bits = mkOption {
|
||||
type = types.int;
|
||||
default = 2048;
|
||||
description = ''
|
||||
How many bits in generated DKIM keys. RFC6376 advises minimum 1024-bit keys.
|
||||
|
||||
If you have already deployed a key with a different number of bits than specified
|
||||
here, then you should use a different selector (dkimSelector). In order to get
|
||||
this package to generate a key with the new number of bits, you will either have to
|
||||
change the selector or delete the old key file.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.opendkim;
|
||||
description = "OpenDKIM package to use.";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf (cfg.dkim.signing && cfg.enable) {
|
||||
services.opendkim = {
|
||||
enable = true;
|
||||
selector = cfg.dkim.selector;
|
||||
domains = "csl:${builtins.concatStringsSep "," cfg.local-domains}";
|
||||
configFile = pkgs.writeText "opendkim.conf" (''
|
||||
Canonicalization relaxed/simple
|
||||
UMask 0002
|
||||
Socket ${dkim.socket}
|
||||
KeyTable file:${keyTable}
|
||||
SigningTable file:${signingTable}
|
||||
'' + (lib.optionalString cfg.debug ''
|
||||
Syslog yes
|
||||
SyslogSuccess yes
|
||||
LogWhy yes
|
||||
''));
|
||||
};
|
||||
|
||||
users.users = {
|
||||
"${config.services.postfix.user}" = {
|
||||
extraGroups = [ "${config.services.opendkim.group}" ];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.opendkim = {
|
||||
preStart = lib.mkForce createAllCerts;
|
||||
serviceConfig = {
|
||||
ExecStart = lib.mkForce "${cfg.dkim.package}/bin/opendkim ${escapeShellArgs args}";
|
||||
PermissionsStartOnly = lib.mkForce false;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '${cfg.dkim.key-directory}' - ${config.services.opendkim.user} ${config.services.opendkim.group} - -"
|
||||
];
|
||||
};
|
||||
}
|
|
@ -0,0 +1,314 @@
|
|||
{ config, lib, pkgs, environment, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.mail-server;
|
||||
|
||||
sieve-path = "${cfg.state-directory}/dovecot/imap_sieve";
|
||||
|
||||
pipe-bin = pkgs.stdenv.mkDerivation {
|
||||
name = "pipe_bin";
|
||||
src = ./dovecot/pipe_bin;
|
||||
buildInputs = with pkgs; [ makeWrapper coreutils bash rspamd ];
|
||||
buildCommand = ''
|
||||
mkdir -p $out/pipe/bin
|
||||
cp $src/* $out/pipe/bin/
|
||||
chmod a+x $out/pipe/bin/*
|
||||
patchShebangs $out/pipe/bin
|
||||
|
||||
for file in $out/pipe/bin/*; do
|
||||
wrapProgram $file \
|
||||
--set PATH "${pkgs.coreutils}/bin:${pkgs.rspamd}/bin"
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
ldap-conf-template = ldap-cfg:
|
||||
let
|
||||
ssl-config = if (ldap-cfg.ca == null) then ''
|
||||
tls = no
|
||||
tls_require_cert = try
|
||||
'' else ''
|
||||
tls_ca_cert_file = ${ldap-cfg.ca}
|
||||
tls = yes
|
||||
tls_require_cert = try
|
||||
'';
|
||||
in
|
||||
pkgs.writeText "dovecot2-ldap-config.conf.template" ''
|
||||
uris = ${concatStringsSep " " ldap-cfg.server-urls}
|
||||
ldap_version = 3
|
||||
dn = ${ldap-cfg.reader-dn}
|
||||
dnpass = __LDAP_READER_PASSWORD__
|
||||
auth_bind = yes
|
||||
auth_bind_userdn = uid=%u,ou=members,dc=fudo,dc=org
|
||||
base = dc=fudo,dc=org
|
||||
${ssl-config}
|
||||
'';
|
||||
|
||||
ldap-conf-generator = ldap-cfg: let
|
||||
template = ldap-conf-template ldap-cfg;
|
||||
target-dir = dirOf ldap-cfg.generated-ldap-config;
|
||||
target = ldap-cfg.generated-ldap-config;
|
||||
in pkgs.writeScript "dovecot2-ldap-password-swapper.sh" ''
|
||||
mkdir -p ${target-dir}
|
||||
touch ${target}
|
||||
chmod 600 ${target}
|
||||
chown ${config.services.dovecot2.user} ${target}
|
||||
LDAP_READER_PASSWORD=$( cat "${ldap-cfg.reader-password-file}" )
|
||||
sed 's/__LDAP_READER_PASSWORD__/$LDAP_READER_PASSWORD/' '${template}' > ${target}
|
||||
'';
|
||||
|
||||
ldap-passwd-entry = ldap-config: ''
|
||||
passdb {
|
||||
driver = ldap
|
||||
args = ${ldap-conf "ldap-passdb.conf" ldap-config}
|
||||
}
|
||||
'';
|
||||
|
||||
ldapOpts = {
|
||||
options = with types; {
|
||||
ca = mkOption {
|
||||
type = nullOr str;
|
||||
description = "The path to the CA cert used to sign the LDAP server certificate.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
base = mkOption {
|
||||
type = str;
|
||||
description = "Base of the LDAP server database.";
|
||||
example = "dc=fudo,dc=org";
|
||||
};
|
||||
|
||||
server-urls = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of LDAP server URLs used for authentication.";
|
||||
};
|
||||
|
||||
reader-dn = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
DN to use for reading user information. Needs access to homeDirectory,
|
||||
uidNumber, gidNumber, and uid, but not password attributes.
|
||||
'';
|
||||
};
|
||||
|
||||
reader-password-file = mkOption {
|
||||
type = str;
|
||||
description = "Password for the user specified in ldap-reader-dn.";
|
||||
};
|
||||
|
||||
generated-ldap-config = mkOption {
|
||||
type = str;
|
||||
description = "Path at which to store the generated LDAP config file, including password.";
|
||||
default = "/run/dovecot2/config/ldap.conf";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dovecot-user = config.services.dovecot2.user;
|
||||
|
||||
in {
|
||||
options.fudo.mail-server.dovecot = with types; {
|
||||
ssl-private-key = mkOption {
|
||||
type = str;
|
||||
description = "Location of the server SSL private key.";
|
||||
};
|
||||
|
||||
ssl-certificate = mkOption {
|
||||
type = str;
|
||||
description = "Location of the server SSL certificate.";
|
||||
};
|
||||
|
||||
ldap = mkOption {
|
||||
type = nullOr (submodule ldapOpts);
|
||||
default = null;
|
||||
description = ''
|
||||
LDAP auth server configuration. If omitted, the server will use local authentication.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
services.prometheus.exporters.dovecot = mkIf cfg.monitoring {
|
||||
enable = true;
|
||||
scopes = ["user" "global"];
|
||||
listenAddress = "127.0.0.1";
|
||||
port = 9166;
|
||||
socketPath = "/var/run/dovecot2/old-stats";
|
||||
};
|
||||
|
||||
services.dovecot2 = {
|
||||
enable = true;
|
||||
enableImap = true;
|
||||
enableLmtp = true;
|
||||
enablePop3 = true;
|
||||
enablePAM = cfg.dovecot.ldap == null;
|
||||
|
||||
createMailUser = true;
|
||||
|
||||
mailUser = cfg.mail-user;
|
||||
mailGroup = cfg.mail-group;
|
||||
mailLocation = "maildir:${cfg.mail-directory}/%u/";
|
||||
|
||||
sslServerCert = cfg.dovecot.ssl-certificate;
|
||||
sslServerKey = cfg.dovecot.ssl-private-key;
|
||||
|
||||
modules = [ pkgs.dovecot_pigeonhole ];
|
||||
protocols = [ "sieve" ];
|
||||
|
||||
sieveScripts = {
|
||||
after = builtins.toFile "spam.sieve" ''
|
||||
require "fileinto";
|
||||
|
||||
if header :is "X-Spam" "Yes" {
|
||||
fileinto "Junk";
|
||||
stop;
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
mailboxes = cfg.mailboxes;
|
||||
|
||||
extraConfig = ''
|
||||
#Extra Config
|
||||
|
||||
${optionalString cfg.monitoring ''
|
||||
# The prometheus exporter still expects an older style of metrics
|
||||
mail_plugins = $mail_plugins old_stats
|
||||
service old-stats {
|
||||
unix_listener old-stats {
|
||||
user = dovecot-exporter
|
||||
group = dovecot-exporter
|
||||
}
|
||||
}
|
||||
''}
|
||||
|
||||
${lib.optionalString cfg.debug ''
|
||||
mail_debug = yes
|
||||
auth_debug = yes
|
||||
verbose_ssl = yes
|
||||
''}
|
||||
|
||||
protocol imap {
|
||||
mail_max_userip_connections = ${toString cfg.max-user-connections}
|
||||
mail_plugins = $mail_plugins imap_sieve
|
||||
}
|
||||
|
||||
protocol pop3 {
|
||||
mail_max_userip_connections = ${toString cfg.max-user-connections}
|
||||
}
|
||||
|
||||
protocol lmtp {
|
||||
mail_plugins = $mail_plugins sieve
|
||||
}
|
||||
|
||||
mail_access_groups = ${cfg.mail-group}
|
||||
ssl = required
|
||||
|
||||
# When looking up usernames, just use the name, not the full address
|
||||
auth_username_format = %n
|
||||
|
||||
service lmtp {
|
||||
# Enable logging in debug mode
|
||||
${optionalString cfg.debug "executable = lmtp -L"}
|
||||
|
||||
# Unix socket for postfix to deliver messages via lmtp
|
||||
unix_listener dovecot-lmtp {
|
||||
user = "postfix"
|
||||
group = ${cfg.mail-group}
|
||||
mode = 0600
|
||||
}
|
||||
|
||||
# Drop privs, since all mail is owned by one user
|
||||
# user = ${cfg.mail-user}
|
||||
# group = ${cfg.mail-group}
|
||||
user = root
|
||||
}
|
||||
|
||||
auth_mechanisms = login plain
|
||||
|
||||
${optionalString (cfg.dovecot.ldap != null) ''
|
||||
passdb {
|
||||
driver = ldap
|
||||
args = ${cfg.dovecot.ldap.generated-ldap-config}
|
||||
}
|
||||
''}
|
||||
userdb {
|
||||
driver = static
|
||||
args = uid=${toString cfg.mail-user-id} home=${cfg.mail-directory}/%u
|
||||
}
|
||||
|
||||
# Used by postfix to authorize users
|
||||
service auth {
|
||||
unix_listener auth {
|
||||
mode = 0660
|
||||
user = "${config.services.postfix.user}"
|
||||
group = ${cfg.mail-group}
|
||||
}
|
||||
|
||||
unix_listener auth-userdb {
|
||||
mode = 0660
|
||||
user = "${config.services.postfix.user}"
|
||||
group = ${cfg.mail-group}
|
||||
}
|
||||
}
|
||||
|
||||
service auth-worker {
|
||||
user = root
|
||||
}
|
||||
|
||||
service imap {
|
||||
vsz_limit = 1024M
|
||||
}
|
||||
|
||||
namespace inbox {
|
||||
separator = "/"
|
||||
inbox = yes
|
||||
}
|
||||
|
||||
plugin {
|
||||
sieve_plugins = sieve_imapsieve sieve_extprograms
|
||||
sieve = file:/var/sieve/%u/scripts;active=/var/sieve/%u/active.sieve
|
||||
sieve_default = file:/var/sieve/%u/default.sieve
|
||||
sieve_default_name = default
|
||||
# From elsewhere to Spam folder
|
||||
imapsieve_mailbox1_name = Junk
|
||||
imapsieve_mailbox1_causes = COPY
|
||||
imapsieve_mailbox1_before = file:${sieve-path}/report-spam.sieve
|
||||
# From Spam folder to elsewhere
|
||||
imapsieve_mailbox2_name = *
|
||||
imapsieve_mailbox2_from = Junk
|
||||
imapsieve_mailbox2_causes = COPY
|
||||
imapsieve_mailbox2_before = file:${sieve-path}/report-ham.sieve
|
||||
sieve_pipe_bin_dir = ${pipe-bin}/pipe/bin
|
||||
sieve_global_extensions = +vnd.dovecot.pipe +vnd.dovecot.environment
|
||||
}
|
||||
|
||||
recipient_delimiter = +
|
||||
|
||||
lmtp_save_to_detail_mailbox = yes
|
||||
|
||||
lda_mailbox_autosubscribe = yes
|
||||
lda_mailbox_autocreate = yes
|
||||
'';
|
||||
};
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = [
|
||||
"d ${sieve-path} 750 ${dovecot-user} ${cfg.mail-group} - -"
|
||||
];
|
||||
|
||||
services.dovecot2.preStart = ''
|
||||
rm -f ${sieve-path}/*
|
||||
cp -p ${./dovecot/imap_sieve}/*.sieve ${sieve-path}
|
||||
for k in ${sieve-path}/*.sieve ; do
|
||||
${pkgs.dovecot_pigeonhole}/bin/sievec "$k"
|
||||
done
|
||||
|
||||
${optionalString (cfg.dovecot.ldap != null)
|
||||
(ldap-conf-generator cfg.dovecot.ldap)}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
require ["vnd.dovecot.pipe", "copy", "imapsieve", "environment", "variables"];
|
||||
|
||||
if environment :matches "imap.mailbox" "*" {
|
||||
set "mailbox" "${1}";
|
||||
}
|
||||
|
||||
if string "${mailbox}" "Trash" {
|
||||
stop;
|
||||
}
|
||||
|
||||
if environment :matches "imap.user" "*" {
|
||||
set "username" "${1}";
|
||||
}
|
||||
|
||||
pipe :copy "sa-learn-ham.sh" [ "${username}" ];
|
|
@ -0,0 +1,7 @@
|
|||
require ["vnd.dovecot.pipe", "copy", "imapsieve", "environment", "variables"];
|
||||
|
||||
if environment :matches "imap.user" "*" {
|
||||
set "username" "${1}";
|
||||
}
|
||||
|
||||
pipe :copy "sa-learn-spam.sh" [ "${username}" ];
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
set -o errexit
|
||||
exec rspamc -h /run/rspamd/worker-controller.sock learn_ham
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
set -o errexit
|
||||
exec rspamc -h /run/rspamd/worker-controller.sock learn_spam
|
|
@ -0,0 +1,319 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
inherit (lib.strings) concatStringsSep;
|
||||
|
||||
cfg = config.fudo.mail-server;
|
||||
|
||||
# The final newline is important
|
||||
write-entries = filename: entries:
|
||||
let
|
||||
entries-string = (concatStringsSep "\n" entries);
|
||||
in builtins.toFile filename ''
|
||||
${entries-string}
|
||||
'';
|
||||
|
||||
make-user-aliases = entries:
|
||||
concatStringsSep "\n"
|
||||
(mapAttrsToList (user: aliases:
|
||||
concatStringsSep "\n"
|
||||
(map (alias: "${alias} ${user}") aliases))
|
||||
entries);
|
||||
|
||||
make-alias-users = domains: entries:
|
||||
concatStringsSep "\n"
|
||||
(flatten
|
||||
(mapAttrsToList (alias: users:
|
||||
(map (domain:
|
||||
"${alias}@${domain} ${concatStringsSep "," users}")
|
||||
domains))
|
||||
entries));
|
||||
|
||||
policyd-spf = pkgs.writeText "policyd-spf.conf" (
|
||||
cfg.postfix.policy-spf-extra-config
|
||||
+ (lib.optionalString cfg.debug ''
|
||||
debugLevel = 4
|
||||
''));
|
||||
|
||||
submission-header-cleanup-rules = pkgs.writeText "submission_header_cleanup_rules" (''
|
||||
# Removes sensitive headers from mails handed in via the submission port.
|
||||
# See https://thomas-leister.de/mailserver-debian-stretch/
|
||||
# Uses "pcre" style regex.
|
||||
|
||||
/^Received:/ IGNORE
|
||||
/^X-Originating-IP:/ IGNORE
|
||||
/^X-Mailer:/ IGNORE
|
||||
/^User-Agent:/ IGNORE
|
||||
/^X-Enigmail:/ IGNORE
|
||||
'');
|
||||
blacklist-postfix-entry = sender: "${sender} REJECT";
|
||||
blacklist-postfix-file = filename: entries:
|
||||
write-entries filename entries;
|
||||
sender-blacklist-file = blacklist-postfix-file "reject_senders"
|
||||
(map blacklist-postfix-entry cfg.sender-blacklist);
|
||||
recipient-blacklist-file = blacklist-postfix-file "reject_recipients"
|
||||
(map blacklist-postfix-entry cfg.recipient-blacklist);
|
||||
|
||||
# A list of domains for which we accept mail
|
||||
virtual-mailbox-map-file = write-entries "virtual_mailbox_map"
|
||||
(map (domain: "@${domain} OK") (cfg.local-domains ++ [cfg.domain]));
|
||||
|
||||
sender-login-map-file = let
|
||||
escapeDot = (str: replaceStrings ["."] ["\\."] str);
|
||||
in write-entries "sender_login_maps"
|
||||
(map (domain: "/^(.*)@${escapeDot domain}$/ \${1}") (cfg.local-domains ++ [cfg.domain]));
|
||||
|
||||
mapped-file = name: "hash:/var/lib/postfix/conf/${name}";
|
||||
|
||||
pcre-file = name: "pcre:/var/lib/postfix/conf/${name}";
|
||||
|
||||
in {
|
||||
|
||||
options.fudo.mail-server.postfix = {
|
||||
|
||||
ssl-private-key = mkOption {
|
||||
type = types.str;
|
||||
description = "Location of the server SSL private key.";
|
||||
};
|
||||
|
||||
ssl-certificate = mkOption {
|
||||
type = types.str;
|
||||
description = "Location of the server SSL certificate.";
|
||||
};
|
||||
|
||||
policy-spf-extra-config = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
example = ''
|
||||
skip_addresses = 127.0.0.0/8,::ffff:127.0.0.0/104,::1
|
||||
'';
|
||||
description = ''
|
||||
Extra configuration options for policyd-spf. This can be use to among
|
||||
other things skip spf checking for some IP addresses.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
services.prometheus.exporters.postfix = mkIf cfg.monitoring {
|
||||
enable = true;
|
||||
systemd.enable = true;
|
||||
showqPath = "/var/lib/postfix/queue/public/showq";
|
||||
user = config.services.postfix.user;
|
||||
group = config.services.postfix.group;
|
||||
};
|
||||
|
||||
services.postfix = {
|
||||
enable = true;
|
||||
domain = cfg.domain;
|
||||
origin = cfg.domain;
|
||||
hostname = cfg.mail-hostname;
|
||||
destination = ["localhost" "localhost.localdomain"];
|
||||
# destination = ["localhost" "localhost.localdomain" cfg.hostname] ++
|
||||
# cfg.local-domains;;
|
||||
|
||||
enableHeaderChecks = true;
|
||||
enableSmtp = true;
|
||||
enableSubmission = true;
|
||||
|
||||
mapFiles."reject_senders" = sender-blacklist-file;
|
||||
mapFiles."reject_recipients" = recipient-blacklist-file;
|
||||
mapFiles."virtual_mailbox_map" = virtual-mailbox-map-file;
|
||||
mapFiles."sender_login_map" = sender-login-map-file;
|
||||
|
||||
# TODO: enable!
|
||||
# headerChecks = [ { action = "REDIRECT spam@example.com"; pattern = "/^X-Spam-Flag:/"; } ];
|
||||
networks = cfg.trusted-networks;
|
||||
|
||||
virtual = ''
|
||||
${make-user-aliases cfg.user-aliases}
|
||||
|
||||
${make-alias-users ([cfg.domain] ++ cfg.local-domains) cfg.alias-users}
|
||||
'';
|
||||
|
||||
sslCert = cfg.postfix.ssl-certificate;
|
||||
sslKey = cfg.postfix.ssl-private-key;
|
||||
|
||||
config = {
|
||||
virtual_mailbox_domains = cfg.local-domains ++ [cfg.domain];
|
||||
# virtual_mailbox_base = "${cfg.mail-directory}/";
|
||||
virtual_mailbox_maps = mapped-file "virtual_mailbox_map";
|
||||
|
||||
virtual_uid_maps = "static:${toString cfg.mail-user-id}";
|
||||
virtual_gid_maps = "static:${toString config.users.groups."${cfg.mail-group}".gid}";
|
||||
|
||||
virtual_transport = "lmtp:unix:/run/dovecot2/dovecot-lmtp";
|
||||
|
||||
# NOTE: it's important that this ends with /, to indicate Maildir format!
|
||||
# mail_spool_directory = "${cfg.mail-directory}/";
|
||||
message_size_limit = toString(cfg.message-size-limit * 1024 * 1024);
|
||||
|
||||
smtpd_banner = "${cfg.mail-hostname} ESMTP NO UCE";
|
||||
|
||||
tls_eecdh_strong_curve = "prime256v1";
|
||||
tls_eecdh_ultra_curve = "secp384r1";
|
||||
|
||||
policy-spf_time_limit = "3600s";
|
||||
|
||||
smtp_host_lookup = "dns, native";
|
||||
|
||||
smtpd_sasl_type = "dovecot";
|
||||
smtpd_sasl_path = "/run/dovecot2/auth";
|
||||
smtpd_sasl_auth_enable = "yes";
|
||||
smtpd_sasl_local_domain = "fudo.org";
|
||||
|
||||
smtpd_sasl_security_options = "noanonymous";
|
||||
smtpd_sasl_tls_security_options = "noanonymous";
|
||||
|
||||
smtpd_sender_login_maps = (pcre-file "sender_login_map");
|
||||
|
||||
disable_vrfy_command = "yes";
|
||||
|
||||
recipient_delimiter = "+";
|
||||
|
||||
milter_protocol = "6";
|
||||
milter_mail_macros = "i {mail_addr} {client_addr} {client_name} {auth_type} {auth_authen} {auth_author} {mail_addr} {mail_host} {mail_mailer}";
|
||||
|
||||
smtpd_milters = [
|
||||
"unix:/run/rspamd/rspamd-milter.sock"
|
||||
"unix:/var/run/opendkim/opendkim.sock"
|
||||
];
|
||||
|
||||
non_smtpd_milters = [
|
||||
"unix:/run/rspamd/rspamd-milter.sock"
|
||||
"unix:/var/run/opendkim/opendkim.sock"
|
||||
];
|
||||
|
||||
smtpd_relay_restrictions = [
|
||||
"permit_mynetworks"
|
||||
"permit_sasl_authenticated"
|
||||
"reject_unauth_destination"
|
||||
"reject_unauth_pipelining"
|
||||
"reject_unauth_destination"
|
||||
"reject_unknown_sender_domain"
|
||||
];
|
||||
|
||||
smtpd_sender_restrictions = [
|
||||
"check_sender_access ${mapped-file "reject_senders"}"
|
||||
"permit_mynetworks"
|
||||
"permit_sasl_authenticated"
|
||||
"reject_unknown_sender_domain"
|
||||
];
|
||||
|
||||
smtpd_recipient_restrictions = [
|
||||
"check_sender_access ${mapped-file "reject_recipients"}"
|
||||
"permit_mynetworks"
|
||||
"permit_sasl_authenticated"
|
||||
"check_policy_service unix:private/policy-spf"
|
||||
"reject_unknown_recipient_domain"
|
||||
"reject_unauth_pipelining"
|
||||
"reject_unauth_destination"
|
||||
"reject_invalid_hostname"
|
||||
"reject_non_fqdn_hostname"
|
||||
"reject_non_fqdn_sender"
|
||||
"reject_non_fqdn_recipient"
|
||||
];
|
||||
|
||||
smtpd_helo_restrictions = [
|
||||
"permit_mynetworks"
|
||||
"reject_invalid_hostname"
|
||||
"permit"
|
||||
];
|
||||
|
||||
# Handled by submission
|
||||
smtpd_tls_security_level = "may";
|
||||
|
||||
smtpd_tls_eecdh_grade = "ultra";
|
||||
|
||||
# Disable obselete protocols
|
||||
smtpd_tls_protocols = [
|
||||
"TLSv1.2"
|
||||
"TLSv1.1"
|
||||
"!TLSv1"
|
||||
"!SSLv2"
|
||||
"!SSLv3"
|
||||
];
|
||||
smtp_tls_protocols = [
|
||||
"TLSv1.2"
|
||||
"TLSv1.1"
|
||||
"!TLSv1"
|
||||
"!SSLv2"
|
||||
"!SSLv3"
|
||||
];
|
||||
smtpd_tls_mandatory_protocols = [
|
||||
"TLSv1.2"
|
||||
"TLSv1.1"
|
||||
"!TLSv1"
|
||||
"!SSLv2"
|
||||
"!SSLv3"
|
||||
];
|
||||
smtp_tls_mandatory_protocols = [
|
||||
"TLSv1.2"
|
||||
"TLSv1.1"
|
||||
"!TLSv1"
|
||||
"!SSLv2"
|
||||
"!SSLv3"
|
||||
];
|
||||
|
||||
smtp_tls_ciphers = "high";
|
||||
smtpd_tls_ciphers = "high";
|
||||
smtp_tls_mandatory_ciphers = "high";
|
||||
smtpd_tls_mandatory_ciphers = "high";
|
||||
|
||||
smtpd_tls_mandatory_exclude_ciphers = ["MD5" "DES" "ADH" "RC4" "PSD" "SRP" "3DES" "eNULL" "aNULL"];
|
||||
smtpd_tls_exclude_ciphers = ["MD5" "DES" "ADH" "RC4" "PSD" "SRP" "3DES" "eNULL" "aNULL"];
|
||||
smtp_tls_mandatory_exclude_ciphers = ["MD5" "DES" "ADH" "RC4" "PSD" "SRP" "3DES" "eNULL" "aNULL"];
|
||||
smtp_tls_exclude_ciphers = ["MD5" "DES" "ADH" "RC4" "PSD" "SRP" "3DES" "eNULL" "aNULL"];
|
||||
|
||||
tls_preempt_cipherlist = "yes";
|
||||
|
||||
smtpd_tls_auth_only = "yes";
|
||||
|
||||
smtpd_tls_loglevel = "1";
|
||||
|
||||
tls_random_source = "dev:/dev/urandom";
|
||||
};
|
||||
|
||||
submissionOptions = {
|
||||
smtpd_tls_security_level = "encrypt";
|
||||
smtpd_sasl_auth_enable = "yes";
|
||||
smtpd_sasl_type = "dovecot";
|
||||
smtpd_sasl_path = "/run/dovecot2/auth";
|
||||
smtpd_sasl_security_options = "noanonymous";
|
||||
smtpd_sasl_local_domain = cfg.domain;
|
||||
smtpd_client_restrictions = "permit_sasl_authenticated,reject";
|
||||
smtpd_sender_restrictions = "reject_sender_login_mismatch,reject_unknown_sender_domain";
|
||||
smtpd_recipient_restrictions = "reject_non_fqdn_recipient,reject_unknown_recipient_domain,permit_sasl_authenticated,reject";
|
||||
cleanup_service_name = "submission-header-cleanup";
|
||||
};
|
||||
|
||||
masterConfig = {
|
||||
"policy-spf" = {
|
||||
type = "unix";
|
||||
privileged = true;
|
||||
chroot = false;
|
||||
command = "spawn";
|
||||
args = [ "user=nobody" "argv=${pkgs.pypolicyd-spf}/bin/policyd-spf" "${policyd-spf}"];
|
||||
};
|
||||
"submission-header-cleanup" = {
|
||||
type = "unix";
|
||||
private = false;
|
||||
chroot = false;
|
||||
maxproc = 0;
|
||||
command = "cleanup";
|
||||
args = ["-o" "header_checks=pcre:${submission-header-cleanup-rules}"];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Postfix requires dovecot lmtp socket, dovecot auth socket and certificate to work
|
||||
systemd.services.postfix = {
|
||||
after = [ "dovecot2.service" ]
|
||||
++ (lib.optional cfg.dkim.signing "opendkim.service");
|
||||
requires = [ "dovecot2.service" ]
|
||||
++ (lib.optional cfg.dkim.signing "opendkim.service");
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.mail-server;
|
||||
|
||||
in {
|
||||
config = mkIf cfg.enable {
|
||||
services.prometheus.exporters.rspamd.enable = true;
|
||||
|
||||
services.rspamd = {
|
||||
|
||||
enable = true;
|
||||
|
||||
locals = {
|
||||
"milter_headers.conf" = {
|
||||
text = ''
|
||||
extended_spam_headers = yes;
|
||||
'';
|
||||
};
|
||||
|
||||
"antivirus.conf" = {
|
||||
text = ''
|
||||
clamav {
|
||||
action = "reject";
|
||||
symbol = "CLAM_VIRUS";
|
||||
type = "clamav";
|
||||
log_clean = true;
|
||||
servers = "/run/clamav/clamd.ctl";
|
||||
scan_mime_parts = false; # scan mail as a whole unit, not parts. seems to be needed to work at all
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
overrides = {
|
||||
"milter_headers.conf" = {
|
||||
text = ''
|
||||
extended_spam_headers = true;
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
workers.rspamd_proxy = {
|
||||
type = "rspamd_proxy";
|
||||
bindSockets = [{
|
||||
socket = "/run/rspamd/rspamd-milter.sock";
|
||||
mode = "0664";
|
||||
}];
|
||||
count = 1; # Do not spawn too many processes of this type
|
||||
extraConfig = ''
|
||||
milter = yes; # Enable milter mode
|
||||
timeout = 120s; # Needed for Milter usually
|
||||
|
||||
upstream "local" {
|
||||
default = yes; # Self-scan upstreams are always default
|
||||
self_scan = yes; # Enable self-scan
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
workers.controller = {
|
||||
type = "controller";
|
||||
count = 1;
|
||||
bindSockets = [
|
||||
"localhost:11334"
|
||||
{
|
||||
socket = "/run/rspamd/worker-controller.sock";
|
||||
mode = "0666";
|
||||
}
|
||||
];
|
||||
includes = [];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.rspamd = {
|
||||
requires = (optional cfg.clamav.enable "clamav-daemon.service");
|
||||
after = (optional cfg.clamav.enable "clamav-daemon.service");
|
||||
};
|
||||
|
||||
systemd.services.postfix = {
|
||||
after = [ "rspamd.service" ];
|
||||
requires = [ "rspamd.service" ];
|
||||
};
|
||||
|
||||
users.extraUsers.${config.services.postfix.user}.extraGroups = [ config.services.rspamd.group ];
|
||||
};
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.minecraft-server;
|
||||
|
||||
in {
|
||||
options.fudo.minecraft-server = {
|
||||
enable = mkEnableOption "Start a minecraft server.";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
description = "Minecraft package to use.";
|
||||
default = pkgs.minecraft-server_1_15_1;
|
||||
};
|
||||
|
||||
data-dir = mkOption {
|
||||
type = types.path;
|
||||
description = "Path at which to store minecraft data.";
|
||||
};
|
||||
|
||||
world-name = mkOption {
|
||||
type = types.str;
|
||||
description = "Name of the server world (used in saves etc).";
|
||||
};
|
||||
|
||||
motd = mkOption {
|
||||
type = types.str;
|
||||
description = "Welcome message for newcomers.";
|
||||
};
|
||||
|
||||
game-mode = mkOption {
|
||||
type = types.enum ["survival" "creative" "adventure" "spectator"];
|
||||
description = "Game mode of the server.";
|
||||
default = "survival";
|
||||
};
|
||||
|
||||
difficulty = mkOption {
|
||||
type = types.int;
|
||||
description = "Difficulty level, where 0 is peaceful and 3 is hard.";
|
||||
default = 2;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [
|
||||
cfg.package
|
||||
];
|
||||
|
||||
services.minecraft-server = {
|
||||
enable = true;
|
||||
package = cfg.package;
|
||||
dataDir = cfg.data-dir;
|
||||
eula = true;
|
||||
declarative = true;
|
||||
serverProperties = {
|
||||
level-name = cfg.world-name;
|
||||
motd = cfg.motd;
|
||||
difficulty = cfg.difficulty;
|
||||
gamemode = cfg.game-mode;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.netinfo-email;
|
||||
|
||||
make-script = server: port: target: pkgs.writeText "netinfo-script.rb" ''
|
||||
#!${pkgs.ruby}/bin/ruby
|
||||
|
||||
require 'net/smtp'
|
||||
|
||||
raise RuntimeError.new("NETINFO_SMTP_USERNAME not set!") if not ENV['NETINFO_SMTP_USERNAME']
|
||||
user = ENV['NETINFO_SMTP_USERNAME']
|
||||
|
||||
raise RuntimeError.new("NETINFO_SMTP_PASSWD not set!") if not ENV['NETINFO_SMTP_PASSWD']
|
||||
passwd = ENV['NETINFO_SMTP_PASSWD']
|
||||
|
||||
hostname = `${pkgs.inetutils}/bin/hostname -f`.strip
|
||||
date = `${pkgs.coreutils}/bin/date +%Y-%m-%d`.strip
|
||||
email_date = `${pkgs.coreutils}/bin/date`
|
||||
ipinfo = `${pkgs.iproute}/bin/ip addr`
|
||||
|
||||
message = <<EOM
|
||||
From: #{user}@fudo.org
|
||||
To: ${target}
|
||||
Subject: #{hostname} network info for #{date}
|
||||
Date: #{email_date}
|
||||
|
||||
#{ipinfo}
|
||||
EOM
|
||||
|
||||
smtp = Net::SMTP.new("${server}", ${toString port})
|
||||
smtp.enable_starttls
|
||||
|
||||
smtp.start('localhost', user, passwd) do |server|
|
||||
server.send_message(message, "#{user}@fudo.org", ["${target}"])
|
||||
end
|
||||
'';
|
||||
|
||||
in {
|
||||
|
||||
options.fudo.netinfo-email = {
|
||||
enable = mkEnableOption "Enable netinfo email (hacky way to keep track of a host's IP";
|
||||
|
||||
smtp-server = mkOption {
|
||||
type = types.str;
|
||||
default = "mail.fudo.org";
|
||||
};
|
||||
|
||||
smtp-port = mkOption {
|
||||
type = types.port;
|
||||
default = 587;
|
||||
};
|
||||
|
||||
env-file = mkOption {
|
||||
type = types.str;
|
||||
description = "Path to file containing NETINFO_SMTP_USERNAME and NETINFO_SMTP_PASSWD";
|
||||
};
|
||||
|
||||
target-email = mkOption {
|
||||
type = types.str;
|
||||
default = "network-info@fudo.link";
|
||||
description = "Email to which to send network info report.";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd = {
|
||||
timers.netinfo = {
|
||||
enable = true;
|
||||
description = "Send network info to ${cfg.target-email}";
|
||||
partOf = ["netinfo.service"];
|
||||
wantedBy = [ "timers.target" ];
|
||||
requires = [ "network-online.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "daily";
|
||||
};
|
||||
};
|
||||
|
||||
services.netinfo = {
|
||||
enable = true;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
StandardOutput = "journal";
|
||||
EnvironmentFile = cfg.env-file;
|
||||
};
|
||||
script = ''
|
||||
${pkgs.ruby}/bin/ruby ${make-script cfg.smtp-server cfg.smtp-port cfg.target-email}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
with types;
|
||||
let networkOpts = import ../types/network-definition.nix { inherit lib; };
|
||||
|
||||
in {
|
||||
options.fudo.networks = mkOption {
|
||||
type = attrsOf (submodule networkOpts);
|
||||
description = "A map of networks to network definitions.";
|
||||
default = { };
|
||||
};
|
||||
|
||||
config = let
|
||||
domain-name = config.instance.local-domain;
|
||||
local-networks = map (network: "ip4:${network}")
|
||||
config.fudo.domains.${domain-name}.local-networks;
|
||||
local-net-string = concatStringsSep " " local-networks;
|
||||
in {
|
||||
fudo.networks.${domain-name}.verbatim-dns-records = [
|
||||
''@ IN TXT "v=spf1 mx ${local-net-string} -all"''
|
||||
''@ IN SPF "v=spf1 mx ${local-net-string} -all"''
|
||||
];
|
||||
};
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
{ lib, config, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
inherit (lib.strings) concatStringsSep;
|
||||
|
||||
cfg = config.fudo.node-exporter;
|
||||
fudo-cfg = config.fudo.common;
|
||||
|
||||
allow-network = network: "allow ${network};";
|
||||
|
||||
in {
|
||||
options.fudo.node-exporter = {
|
||||
enable = mkEnableOption "Enable a Prometheus node exporter with some reasonable settings.";
|
||||
|
||||
hostname = mkOption {
|
||||
type = types.str;
|
||||
description = "Hostname from which to export statistics.";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
security.acme.certs.${cfg.hostname}.email = fudo-cfg.admin-email;
|
||||
|
||||
services = {
|
||||
# This'll run an exporter at localhost:9100
|
||||
prometheus.exporters.node = {
|
||||
enable = true;
|
||||
enabledCollectors = [ "systemd" ];
|
||||
listenAddress = "127.0.0.1";
|
||||
port = 9100;
|
||||
user = "node";
|
||||
};
|
||||
|
||||
# ...And this'll expose the above to the outside world, or at least the
|
||||
# list of trusted networks, with SSL protection.
|
||||
nginx = {
|
||||
enable = true;
|
||||
|
||||
virtualHosts."${cfg.hostname}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
locations."/metrics/node" = {
|
||||
extraConfig = ''
|
||||
${concatStringsSep "\n" (map allow-network fudo-cfg.local-networks)}
|
||||
allow 127.0.0.0/16;
|
||||
deny all;
|
||||
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Host $host;
|
||||
'';
|
||||
|
||||
proxyPass = "http://127.0.0.1:9100/metrics";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,978 @@
|
|||
### NOTE:
|
||||
## This is a copy of the upstream version, which allows for overriding the state directory
|
||||
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.fudo.nsd;
|
||||
|
||||
username = "nsd";
|
||||
stateDir = cfg.stateDir;
|
||||
pidFile = stateDir + "/var/nsd.pid";
|
||||
|
||||
# build nsd with the options needed for the given config
|
||||
nsdPkg = pkgs.nsd.override {
|
||||
bind8Stats = cfg.bind8Stats;
|
||||
ipv6 = cfg.ipv6;
|
||||
ratelimit = cfg.ratelimit.enable;
|
||||
rootServer = cfg.rootServer;
|
||||
zoneStats = length (collect (x: (x.zoneStats or null) != null) cfg.zones) > 0;
|
||||
};
|
||||
|
||||
mkZoneFileName = name: if name == "." then "root" else name;
|
||||
|
||||
# replaces include: directives for keys with fake keys for nsd-checkconf
|
||||
injectFakeKeys = keys: concatStrings
|
||||
(mapAttrsToList
|
||||
(keyName: keyOptions: ''
|
||||
fakeKey="$(${pkgs.bind}/bin/tsig-keygen -a ${escapeShellArgs [ keyOptions.algorithm keyName ]} | grep -oP "\s*secret \"\K.*(?=\";)")"
|
||||
sed "s@^\s*include:\s*\"${stateDir}/private/${keyName}\"\$@secret: $fakeKey@" -i $out/nsd.conf
|
||||
'')
|
||||
keys);
|
||||
|
||||
nsdEnv = pkgs.buildEnv {
|
||||
name = "nsd-env";
|
||||
|
||||
paths = [ configFile ]
|
||||
++ mapAttrsToList (name: zone: writeZoneData name zone.data) zoneConfigs;
|
||||
|
||||
postBuild = ''
|
||||
echo "checking zone files"
|
||||
cd $out/zones
|
||||
for zoneFile in *; do
|
||||
echo "|- checking zone '$out/zones/$zoneFile'"
|
||||
${nsdPkg}/sbin/nsd-checkzone "$zoneFile" "$zoneFile" || {
|
||||
if grep -q \\\\\\$ "$zoneFile"; then
|
||||
echo zone "$zoneFile" contains escaped dollar signs \\\$
|
||||
echo Escaping them is not needed any more. Please make sure \
|
||||
to unescape them where they prefix a variable name.
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
done
|
||||
echo "checking configuration file"
|
||||
# Save original config file including key references...
|
||||
cp $out/nsd.conf{,.orig}
|
||||
# ...inject mock keys into config
|
||||
${injectFakeKeys cfg.keys}
|
||||
# ...do the checkconf
|
||||
${nsdPkg}/sbin/nsd-checkconf $out/nsd.conf
|
||||
# ... and restore original config file.
|
||||
mv $out/nsd.conf{.orig,}
|
||||
'';
|
||||
};
|
||||
|
||||
writeZoneData = name: text: pkgs.writeTextFile {
|
||||
name = "nsd-zone-${mkZoneFileName name}";
|
||||
inherit text;
|
||||
destination = "/zones/${mkZoneFileName name}";
|
||||
};
|
||||
|
||||
|
||||
# options are ordered alphanumerically by the nixos option name
|
||||
configFile = pkgs.writeTextDir "nsd.conf" ''
|
||||
server:
|
||||
chroot: "${stateDir}"
|
||||
username: ${username}
|
||||
# The directory for zonefile: files. The daemon chdirs here.
|
||||
zonesdir: "${stateDir}"
|
||||
# the list of dynamically added zones.
|
||||
database: "${stateDir}/var/nsd.db"
|
||||
pidfile: "${pidFile}"
|
||||
xfrdfile: "${stateDir}/var/xfrd.state"
|
||||
xfrdir: "${stateDir}/tmp"
|
||||
zonelistfile: "${stateDir}/var/zone.list"
|
||||
# interfaces
|
||||
${forEach " ip-address: " cfg.interfaces}
|
||||
ip-freebind: ${yesOrNo cfg.ipFreebind}
|
||||
hide-version: ${yesOrNo cfg.hideVersion}
|
||||
identity: "${cfg.identity}"
|
||||
ip-transparent: ${yesOrNo cfg.ipTransparent}
|
||||
do-ip4: ${yesOrNo cfg.ipv4}
|
||||
ipv4-edns-size: ${toString cfg.ipv4EDNSSize}
|
||||
do-ip6: ${yesOrNo cfg.ipv6}
|
||||
ipv6-edns-size: ${toString cfg.ipv6EDNSSize}
|
||||
log-time-ascii: ${yesOrNo cfg.logTimeAscii}
|
||||
${maybeString "nsid: " cfg.nsid}
|
||||
port: ${toString cfg.port}
|
||||
reuseport: ${yesOrNo cfg.reuseport}
|
||||
round-robin: ${yesOrNo cfg.roundRobin}
|
||||
server-count: ${toString cfg.serverCount}
|
||||
${maybeToString "statistics: " cfg.statistics}
|
||||
tcp-count: ${toString cfg.tcpCount}
|
||||
tcp-query-count: ${toString cfg.tcpQueryCount}
|
||||
tcp-timeout: ${toString cfg.tcpTimeout}
|
||||
verbosity: ${toString cfg.verbosity}
|
||||
${maybeString "version: " cfg.version}
|
||||
xfrd-reload-timeout: ${toString cfg.xfrdReloadTimeout}
|
||||
zonefiles-check: ${yesOrNo cfg.zonefilesCheck}
|
||||
${maybeString "rrl-ipv4-prefix-length: " cfg.ratelimit.ipv4PrefixLength}
|
||||
${maybeString "rrl-ipv6-prefix-length: " cfg.ratelimit.ipv6PrefixLength}
|
||||
rrl-ratelimit: ${toString cfg.ratelimit.ratelimit}
|
||||
${maybeString "rrl-slip: " cfg.ratelimit.slip}
|
||||
rrl-size: ${toString cfg.ratelimit.size}
|
||||
rrl-whitelist-ratelimit: ${toString cfg.ratelimit.whitelistRatelimit}
|
||||
${keyConfigFile}
|
||||
remote-control:
|
||||
control-enable: ${yesOrNo cfg.remoteControl.enable}
|
||||
control-key-file: "${cfg.remoteControl.controlKeyFile}"
|
||||
control-cert-file: "${cfg.remoteControl.controlCertFile}"
|
||||
${forEach " control-interface: " cfg.remoteControl.interfaces}
|
||||
control-port: ${toString cfg.remoteControl.port}
|
||||
server-key-file: "${cfg.remoteControl.serverKeyFile}"
|
||||
server-cert-file: "${cfg.remoteControl.serverCertFile}"
|
||||
${concatStrings (mapAttrsToList zoneConfigFile zoneConfigs)}
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
yesOrNo = b: if b then "yes" else "no";
|
||||
maybeString = prefix: x: if x == null then "" else ''${prefix} "${x}"'';
|
||||
maybeToString = prefix: x: if x == null then "" else ''${prefix} ${toString x}'';
|
||||
forEach = pre: l: concatMapStrings (x: pre + x + "\n") l;
|
||||
|
||||
|
||||
keyConfigFile = concatStrings (mapAttrsToList (keyName: keyOptions: ''
|
||||
key:
|
||||
name: "${keyName}"
|
||||
algorithm: "${keyOptions.algorithm}"
|
||||
include: "${stateDir}/private/${keyName}"
|
||||
'') cfg.keys);
|
||||
|
||||
copyKeys = concatStrings (mapAttrsToList (keyName: keyOptions: ''
|
||||
secret=$(cat "${keyOptions.keyFile}")
|
||||
dest="${stateDir}/private/${keyName}"
|
||||
echo " secret: \"$secret\"" > "$dest"
|
||||
chown ${username}:${username} "$dest"
|
||||
chmod 0400 "$dest"
|
||||
'') cfg.keys);
|
||||
|
||||
|
||||
# options are ordered alphanumerically by the nixos option name
|
||||
zoneConfigFile = name: zone: ''
|
||||
zone:
|
||||
name: "${name}"
|
||||
zonefile: "${stateDir}/zones/${mkZoneFileName name}"
|
||||
${maybeString "outgoing-interface: " zone.outgoingInterface}
|
||||
${forEach " rrl-whitelist: " zone.rrlWhitelist}
|
||||
${maybeString "zonestats: " zone.zoneStats}
|
||||
${maybeToString "max-refresh-time: " zone.maxRefreshSecs}
|
||||
${maybeToString "min-refresh-time: " zone.minRefreshSecs}
|
||||
${maybeToString "max-retry-time: " zone.maxRetrySecs}
|
||||
${maybeToString "min-retry-time: " zone.minRetrySecs}
|
||||
allow-axfr-fallback: ${yesOrNo zone.allowAXFRFallback}
|
||||
${forEach " allow-notify: " zone.allowNotify}
|
||||
${forEach " request-xfr: " zone.requestXFR}
|
||||
${forEach " notify: " zone.notify}
|
||||
notify-retry: ${toString zone.notifyRetry}
|
||||
${forEach " provide-xfr: " zone.provideXFR}
|
||||
'';
|
||||
|
||||
zoneConfigs = zoneConfigs' {} "" { children = cfg.zones; };
|
||||
|
||||
zoneConfigs' = parent: name: zone:
|
||||
if !(zone ? children) || zone.children == null || zone.children == { }
|
||||
# leaf -> actual zone
|
||||
then listToAttrs [ (nameValuePair name (parent // zone)) ]
|
||||
|
||||
# fork -> pattern
|
||||
else zipAttrsWith (name: head) (
|
||||
mapAttrsToList (name: child: zoneConfigs' (parent // zone // { children = {}; }) name child)
|
||||
zone.children
|
||||
);
|
||||
|
||||
# fighting infinite recursion
|
||||
zoneOptions = zoneOptionsRaw // childConfig zoneOptions1 true;
|
||||
zoneOptions1 = zoneOptionsRaw // childConfig zoneOptions2 false;
|
||||
zoneOptions2 = zoneOptionsRaw // childConfig zoneOptions3 false;
|
||||
zoneOptions3 = zoneOptionsRaw // childConfig zoneOptions4 false;
|
||||
zoneOptions4 = zoneOptionsRaw // childConfig zoneOptions5 false;
|
||||
zoneOptions5 = zoneOptionsRaw // childConfig zoneOptions6 false;
|
||||
zoneOptions6 = zoneOptionsRaw // childConfig null false;
|
||||
|
||||
childConfig = x: v: { options.children = { type = types.attrsOf x; visible = v; }; };
|
||||
|
||||
# options are ordered alphanumerically
|
||||
zoneOptionsRaw = types.submodule {
|
||||
options = {
|
||||
|
||||
allowAXFRFallback = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
If NSD as secondary server should be allowed to AXFR if the primary
|
||||
server does not allow IXFR.
|
||||
'';
|
||||
};
|
||||
|
||||
allowNotify = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
example = [ "192.0.2.0/24 NOKEY" "10.0.0.1-10.0.0.5 my_tsig_key_name"
|
||||
"10.0.3.4&255.255.0.0 BLOCKED"
|
||||
];
|
||||
description = ''
|
||||
Listed primary servers are allowed to notify this secondary server.
|
||||
<screen><![CDATA[
|
||||
Format: <ip> <key-name | NOKEY | BLOCKED>
|
||||
<ip> either a plain IPv4/IPv6 address or range. Valid patters for ranges:
|
||||
* 10.0.0.0/24 # via subnet size
|
||||
* 10.0.0.0&255.255.255.0 # via subnet mask
|
||||
* 10.0.0.1-10.0.0.254 # via range
|
||||
A optional port number could be added with a '@':
|
||||
* 2001:1234::1@1234
|
||||
<key-name | NOKEY | BLOCKED>
|
||||
* <key-name> will use the specified TSIG key
|
||||
* NOKEY no TSIG signature is required
|
||||
* BLOCKED notifies from non-listed or blocked IPs will be ignored
|
||||
* ]]></screen>
|
||||
'';
|
||||
};
|
||||
|
||||
children = mkOption {
|
||||
default = {};
|
||||
description = ''
|
||||
Children zones inherit all options of their parents. Attributes
|
||||
defined in a child will overwrite the ones of its parent. Only
|
||||
leaf zones will be actually served. This way it's possible to
|
||||
define maybe zones which share most attributes without
|
||||
duplicating everything. This mechanism replaces nsd's patterns
|
||||
in a save and functional way.
|
||||
'';
|
||||
};
|
||||
|
||||
data = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
example = "";
|
||||
description = ''
|
||||
The actual zone data. This is the content of your zone file.
|
||||
Use imports or pkgs.lib.readFile if you don't want this data in your config file.
|
||||
'';
|
||||
};
|
||||
|
||||
dnssec = mkEnableOption "DNSSEC";
|
||||
|
||||
dnssecPolicy = {
|
||||
algorithm = mkOption {
|
||||
type = types.str;
|
||||
default = "RSASHA256";
|
||||
description = "Which algorithm to use for DNSSEC";
|
||||
};
|
||||
keyttl = mkOption {
|
||||
type = types.str;
|
||||
default = "1h";
|
||||
description = "TTL for dnssec records";
|
||||
};
|
||||
coverage = mkOption {
|
||||
type = types.str;
|
||||
default = "1y";
|
||||
description = ''
|
||||
The length of time to ensure that keys will be correct; no action will be taken to create new keys to be activated after this time.
|
||||
'';
|
||||
};
|
||||
zsk = mkOption {
|
||||
type = keyPolicy;
|
||||
default = { keySize = 2048;
|
||||
prePublish = "1w";
|
||||
postPublish = "1w";
|
||||
rollPeriod = "1mo";
|
||||
};
|
||||
description = "Key policy for zone signing keys";
|
||||
};
|
||||
ksk = mkOption {
|
||||
type = keyPolicy;
|
||||
default = { keySize = 4096;
|
||||
prePublish = "1mo";
|
||||
postPublish = "1mo";
|
||||
rollPeriod = "0";
|
||||
};
|
||||
description = "Key policy for key signing keys";
|
||||
};
|
||||
};
|
||||
|
||||
maxRefreshSecs = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = ''
|
||||
Limit refresh time for secondary zones. This is the timer which
|
||||
checks to see if the zone has to be refetched when it expires.
|
||||
Normally the value from the SOA record is used, but this option
|
||||
restricts that value.
|
||||
'';
|
||||
};
|
||||
|
||||
minRefreshSecs = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = ''
|
||||
Limit refresh time for secondary zones.
|
||||
'';
|
||||
};
|
||||
|
||||
maxRetrySecs = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = ''
|
||||
Limit retry time for secondary zones. This is the timeout after
|
||||
a failed fetch attempt for the zone. Normally the value from
|
||||
the SOA record is used, but this option restricts that value.
|
||||
'';
|
||||
};
|
||||
|
||||
minRetrySecs = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = ''
|
||||
Limit retry time for secondary zones.
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
notify = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [ "10.0.0.1@3721 my_key" "::5 NOKEY" ];
|
||||
description = ''
|
||||
This primary server will notify all given secondary servers about
|
||||
zone changes.
|
||||
<screen><![CDATA[
|
||||
Format: <ip> <key-name | NOKEY>
|
||||
<ip> a plain IPv4/IPv6 address with on optional port number (ip@port)
|
||||
<key-name | NOKEY>
|
||||
* <key-name> sign notifies with the specified key
|
||||
* NOKEY don't sign notifies
|
||||
]]></screen>
|
||||
'';
|
||||
};
|
||||
|
||||
notifyRetry = mkOption {
|
||||
type = types.int;
|
||||
default = 5;
|
||||
description = ''
|
||||
Specifies the number of retries for failed notifies. Set this along with notify.
|
||||
'';
|
||||
};
|
||||
|
||||
outgoingInterface = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "2000::1@1234";
|
||||
description = ''
|
||||
This address will be used for zone-transfere requests if configured
|
||||
as a secondary server or notifications in case of a primary server.
|
||||
Supply either a plain IPv4 or IPv6 address with an optional port
|
||||
number (ip@port).
|
||||
'';
|
||||
};
|
||||
|
||||
provideXFR = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [ "192.0.2.0/24 NOKEY" "192.0.2.0/24 my_tsig_key_name" ];
|
||||
description = ''
|
||||
Allow these IPs and TSIG to transfer zones, addr TSIG|NOKEY|BLOCKED
|
||||
address range 192.0.2.0/24, 1.2.3.4&255.255.0.0, 3.0.2.20-3.0.2.40
|
||||
'';
|
||||
};
|
||||
|
||||
requestXFR = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [];
|
||||
description = ''
|
||||
Format: <code>[AXFR|UDP] <ip-address> <key-name | NOKEY></code>
|
||||
'';
|
||||
};
|
||||
|
||||
rrlWhitelist = mkOption {
|
||||
type = with types; listOf (enum [ "nxdomain" "error" "referral" "any" "rrsig" "wildcard" "nodata" "dnskey" "positive" "all" ]);
|
||||
default = [];
|
||||
description = ''
|
||||
Whitelists the given rrl-types.
|
||||
'';
|
||||
};
|
||||
|
||||
zoneStats = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "%s";
|
||||
description = ''
|
||||
When set to something distinct to null NSD is able to collect
|
||||
statistics per zone. All statistics of this zone(s) will be added
|
||||
to the group specified by this given name. Use "%s" to use the zones
|
||||
name as the group. The groups are output from nsd-control stats
|
||||
and stats_noreset.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
keyPolicy = types.submodule {
|
||||
options = {
|
||||
keySize = mkOption {
|
||||
type = types.int;
|
||||
description = "Key size in bits";
|
||||
};
|
||||
prePublish = mkOption {
|
||||
type = types.str;
|
||||
description = "How long in advance to publish new keys";
|
||||
};
|
||||
postPublish = mkOption {
|
||||
type = types.str;
|
||||
description = "How long after deactivation to keep a key in the zone";
|
||||
};
|
||||
rollPeriod = mkOption {
|
||||
type = types.str;
|
||||
description = "How frequently to change keys";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dnssecZones = (filterAttrs (n: v: if v ? dnssec then v.dnssec else false) zoneConfigs);
|
||||
|
||||
dnssec = dnssecZones != {};
|
||||
|
||||
dnssecTools = pkgs.bind.override { enablePython = true; };
|
||||
|
||||
signZones = optionalString dnssec ''
|
||||
mkdir -p ${stateDir}/dnssec
|
||||
chown ${username}:${username} ${stateDir}/dnssec
|
||||
chmod 0600 ${stateDir}/dnssec
|
||||
${concatStrings (mapAttrsToList signZone dnssecZones)}
|
||||
'';
|
||||
signZone = name: zone: ''
|
||||
${dnssecTools}/bin/dnssec-keymgr -g ${dnssecTools}/bin/dnssec-keygen -s ${dnssecTools}/bin/dnssec-settime -K ${stateDir}/dnssec -c ${policyFile name zone.dnssecPolicy} ${name}
|
||||
${dnssecTools}/bin/dnssec-signzone -S -K ${stateDir}/dnssec -o ${name} -O full -N date ${stateDir}/zones/${name}
|
||||
${nsdPkg}/sbin/nsd-checkzone ${name} ${stateDir}/zones/${name}.signed && mv -v ${stateDir}/zones/${name}.signed ${stateDir}/zones/${name}
|
||||
'';
|
||||
policyFile = name: policy: pkgs.writeText "${name}.policy" ''
|
||||
zone ${name} {
|
||||
algorithm ${policy.algorithm};
|
||||
key-size zsk ${toString policy.zsk.keySize};
|
||||
key-size ksk ${toString policy.ksk.keySize};
|
||||
keyttl ${policy.keyttl};
|
||||
pre-publish zsk ${policy.zsk.prePublish};
|
||||
pre-publish ksk ${policy.ksk.prePublish};
|
||||
post-publish zsk ${policy.zsk.postPublish};
|
||||
post-publish ksk ${policy.ksk.postPublish};
|
||||
roll-period zsk ${policy.zsk.rollPeriod};
|
||||
roll-period ksk ${policy.ksk.rollPeriod};
|
||||
coverage ${policy.coverage};
|
||||
};
|
||||
'';
|
||||
in
|
||||
{
|
||||
# options are ordered alphanumerically
|
||||
options.fudo.nsd = {
|
||||
|
||||
enable = mkEnableOption "NSD authoritative DNS server";
|
||||
|
||||
bind8Stats = mkEnableOption "BIND8 like statistics";
|
||||
|
||||
dnssecInterval = mkOption {
|
||||
type = types.str;
|
||||
default = "1h";
|
||||
description = ''
|
||||
How often to check whether dnssec key rollover is required
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
Extra nsd config.
|
||||
'';
|
||||
};
|
||||
|
||||
hideVersion = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether NSD should answer VERSION.BIND and VERSION.SERVER CHAOS class queries.
|
||||
'';
|
||||
};
|
||||
|
||||
identity = mkOption {
|
||||
type = types.str;
|
||||
default = "unidentified server";
|
||||
description = ''
|
||||
Identify the server (CH TXT ID.SERVER entry).
|
||||
'';
|
||||
};
|
||||
|
||||
interfaces = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ "127.0.0.0" "::1" ];
|
||||
description = ''
|
||||
What addresses the server should listen to.
|
||||
'';
|
||||
};
|
||||
|
||||
ipFreebind = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to bind to nonlocal addresses and interfaces that are down.
|
||||
Similar to ip-transparent.
|
||||
'';
|
||||
};
|
||||
|
||||
ipTransparent = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Allow binding to non local addresses.
|
||||
'';
|
||||
};
|
||||
|
||||
ipv4 = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to listen on IPv4 connections.
|
||||
'';
|
||||
};
|
||||
|
||||
ipv4EDNSSize = mkOption {
|
||||
type = types.int;
|
||||
default = 4096;
|
||||
description = ''
|
||||
Preferred EDNS buffer size for IPv4.
|
||||
'';
|
||||
};
|
||||
|
||||
ipv6 = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to listen on IPv6 connections.
|
||||
'';
|
||||
};
|
||||
|
||||
ipv6EDNSSize = mkOption {
|
||||
type = types.int;
|
||||
default = 4096;
|
||||
description = ''
|
||||
Preferred EDNS buffer size for IPv6.
|
||||
'';
|
||||
};
|
||||
|
||||
logTimeAscii = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Log time in ascii, if false then in unix epoch seconds.
|
||||
'';
|
||||
};
|
||||
|
||||
nsid = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
NSID identity (hex string, or "ascii_somestring").
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 53;
|
||||
description = ''
|
||||
Port the service should bind do.
|
||||
'';
|
||||
};
|
||||
|
||||
reuseport = mkOption {
|
||||
type = types.bool;
|
||||
default = pkgs.stdenv.isLinux;
|
||||
description = ''
|
||||
Whether to enable SO_REUSEPORT on all used sockets. This lets multiple
|
||||
processes bind to the same port. This speeds up operation especially
|
||||
if the server count is greater than one and makes fast restarts less
|
||||
prone to fail
|
||||
'';
|
||||
};
|
||||
|
||||
rootServer = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether this server will be a root server (a DNS root server, you
|
||||
usually don't want that).
|
||||
'';
|
||||
};
|
||||
|
||||
roundRobin = mkEnableOption "round robin rotation of records";
|
||||
|
||||
serverCount = mkOption {
|
||||
type = types.int;
|
||||
default = 1;
|
||||
description = ''
|
||||
Number of NSD servers to fork. Put the number of CPUs to use here.
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
stateDir = mkOption {
|
||||
type = types.str;
|
||||
description = "Directory at which to store NSD state data.";
|
||||
default = "/var/lib/nsd";
|
||||
};
|
||||
|
||||
statistics = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = ''
|
||||
Statistics are produced every number of seconds. Prints to log.
|
||||
If null no statistics are logged.
|
||||
'';
|
||||
};
|
||||
|
||||
tcpCount = mkOption {
|
||||
type = types.int;
|
||||
default = 100;
|
||||
description = ''
|
||||
Maximum number of concurrent TCP connections per server.
|
||||
'';
|
||||
};
|
||||
|
||||
tcpQueryCount = mkOption {
|
||||
type = types.int;
|
||||
default = 0;
|
||||
description = ''
|
||||
Maximum number of queries served on a single TCP connection.
|
||||
0 means no maximum.
|
||||
'';
|
||||
};
|
||||
|
||||
tcpTimeout = mkOption {
|
||||
type = types.int;
|
||||
default = 120;
|
||||
description = ''
|
||||
TCP timeout in seconds.
|
||||
'';
|
||||
};
|
||||
|
||||
verbosity = mkOption {
|
||||
type = types.int;
|
||||
default = 0;
|
||||
description = ''
|
||||
Verbosity level.
|
||||
'';
|
||||
};
|
||||
|
||||
version = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
The version string replied for CH TXT version.server and version.bind
|
||||
queries. Will use the compiled package version on null.
|
||||
See hideVersion for enabling/disabling this responses.
|
||||
'';
|
||||
};
|
||||
|
||||
xfrdReloadTimeout = mkOption {
|
||||
type = types.int;
|
||||
default = 1;
|
||||
description = ''
|
||||
Number of seconds between reloads triggered by xfrd.
|
||||
'';
|
||||
};
|
||||
|
||||
zonefilesCheck = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to check mtime of all zone files on start and sighup.
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
keys = mkOption {
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
|
||||
algorithm = mkOption {
|
||||
type = types.str;
|
||||
default = "hmac-sha256";
|
||||
description = ''
|
||||
Authentication algorithm for this key.
|
||||
'';
|
||||
};
|
||||
|
||||
keyFile = mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Path to the file which contains the actual base64 encoded
|
||||
key. The key will be copied into "${stateDir}/private" before
|
||||
NSD starts. The copied file is only accessibly by the NSD
|
||||
user.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
});
|
||||
default = {};
|
||||
example = literalExample ''
|
||||
{ "tsig.example.org" = {
|
||||
algorithm = "hmac-md5";
|
||||
keyFile = "/path/to/my/key";
|
||||
};
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Define your TSIG keys here.
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
ratelimit = {
|
||||
|
||||
enable = mkEnableOption "ratelimit capabilities";
|
||||
|
||||
ipv4PrefixLength = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = ''
|
||||
IPv4 prefix length. Addresses are grouped by netblock.
|
||||
'';
|
||||
};
|
||||
|
||||
ipv6PrefixLength = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = ''
|
||||
IPv6 prefix length. Addresses are grouped by netblock.
|
||||
'';
|
||||
};
|
||||
|
||||
ratelimit = mkOption {
|
||||
type = types.int;
|
||||
default = 200;
|
||||
description = ''
|
||||
Max qps allowed from any query source.
|
||||
0 means unlimited. With an verbosity of 2 blocked and
|
||||
unblocked subnets will be logged.
|
||||
'';
|
||||
};
|
||||
|
||||
slip = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = ''
|
||||
Number of packets that get discarded before replying a SLIP response.
|
||||
0 disables SLIP responses. 1 will make every response a SLIP response.
|
||||
'';
|
||||
};
|
||||
|
||||
size = mkOption {
|
||||
type = types.int;
|
||||
default = 1000000;
|
||||
description = ''
|
||||
Size of the hashtable. More buckets use more memory but lower
|
||||
the chance of hash hash collisions.
|
||||
'';
|
||||
};
|
||||
|
||||
whitelistRatelimit = mkOption {
|
||||
type = types.int;
|
||||
default = 2000;
|
||||
description = ''
|
||||
Max qps allowed from whitelisted sources.
|
||||
0 means unlimited. Set the rrl-whitelist option for specific
|
||||
queries to apply this limit instead of the default to them.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
remoteControl = {
|
||||
|
||||
enable = mkEnableOption "remote control via nsd-control";
|
||||
|
||||
controlCertFile = mkOption {
|
||||
type = types.path;
|
||||
default = "/etc/nsd/nsd_control.pem";
|
||||
description = ''
|
||||
Path to the client certificate signed with the server certificate.
|
||||
This file is used by nsd-control and generated by nsd-control-setup.
|
||||
'';
|
||||
};
|
||||
|
||||
controlKeyFile = mkOption {
|
||||
type = types.path;
|
||||
default = "/etc/nsd/nsd_control.key";
|
||||
description = ''
|
||||
Path to the client private key, which is used by nsd-control
|
||||
but not by the server. This file is generated by nsd-control-setup.
|
||||
'';
|
||||
};
|
||||
|
||||
interfaces = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ "127.0.0.1" "::1" ];
|
||||
description = ''
|
||||
Which interfaces NSD should bind to for remote control.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 8952;
|
||||
description = ''
|
||||
Port number for remote control operations (uses TLS over TCP).
|
||||
'';
|
||||
};
|
||||
|
||||
serverCertFile = mkOption {
|
||||
type = types.path;
|
||||
default = "/etc/nsd/nsd_server.pem";
|
||||
description = ''
|
||||
Path to the server self signed certificate, which is used by the server
|
||||
but and by nsd-control. This file is generated by nsd-control-setup.
|
||||
'';
|
||||
};
|
||||
|
||||
serverKeyFile = mkOption {
|
||||
type = types.path;
|
||||
default = "/etc/nsd/nsd_server.key";
|
||||
description = ''
|
||||
Path to the server private key, which is used by the server
|
||||
but not by nsd-control. This file is generated by nsd-control-setup.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
zones = mkOption {
|
||||
type = types.attrsOf zoneOptions;
|
||||
default = {};
|
||||
example = literalExample ''
|
||||
{ "serverGroup1" = {
|
||||
provideXFR = [ "10.1.2.3 NOKEY" ];
|
||||
children = {
|
||||
"example.com." = {
|
||||
data = '''
|
||||
$ORIGIN example.com.
|
||||
$TTL 86400
|
||||
@ IN SOA a.ns.example.com. admin.example.com. (
|
||||
...
|
||||
''';
|
||||
};
|
||||
"example.org." = {
|
||||
data = '''
|
||||
$ORIGIN example.org.
|
||||
$TTL 86400
|
||||
@ IN SOA a.ns.example.com. admin.example.com. (
|
||||
...
|
||||
''';
|
||||
};
|
||||
};
|
||||
};
|
||||
"example.net." = {
|
||||
provideXFR = [ "10.3.2.1 NOKEY" ];
|
||||
data = '''
|
||||
...
|
||||
''';
|
||||
};
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Define your zones here. Zones can cascade other zones and therefore
|
||||
inherit settings from parent zones. Look at the definition of
|
||||
children to learn about inheritance and child zones.
|
||||
The given example will define 3 zones (example.(com|org|net).). Both
|
||||
example.com. and example.org. inherit their configuration from
|
||||
serverGroup1.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
assertions = singleton {
|
||||
assertion = zoneConfigs ? "." -> cfg.rootServer;
|
||||
message = "You have a root zone configured. If this is really what you "
|
||||
+ "want, please enable 'services.nsd.rootServer'.";
|
||||
};
|
||||
|
||||
environment = {
|
||||
systemPackages = [ nsdPkg ];
|
||||
etc."nsd/nsd.conf".source = "${configFile}/nsd.conf";
|
||||
};
|
||||
|
||||
users.groups.${username}.gid = config.ids.gids.nsd;
|
||||
|
||||
users.users.${username} = {
|
||||
description = "NSD service user";
|
||||
home = stateDir;
|
||||
createHome = true;
|
||||
uid = config.ids.uids.nsd;
|
||||
group = username;
|
||||
};
|
||||
|
||||
systemd.services.nsd = {
|
||||
description = "NSD authoritative only domain name service";
|
||||
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
startLimitBurst = 4;
|
||||
startLimitIntervalSec = 5 * 60; # 5 mins
|
||||
serviceConfig = {
|
||||
ExecStart = "${nsdPkg}/sbin/nsd -d -c ${nsdEnv}/nsd.conf";
|
||||
StandardError = "null";
|
||||
PIDFile = pidFile;
|
||||
Restart = "always";
|
||||
RestartSec = "4s";
|
||||
};
|
||||
|
||||
preStart = ''
|
||||
rm -Rf "${stateDir}/private/"
|
||||
rm -Rf "${stateDir}/tmp/"
|
||||
mkdir -m 0700 -p "${stateDir}/private"
|
||||
mkdir -m 0700 -p "${stateDir}/tmp"
|
||||
mkdir -m 0700 -p "${stateDir}/var"
|
||||
cat > "${stateDir}/don't touch anything in here" << EOF
|
||||
Everything in this directory except NSD's state in var and dnssec
|
||||
is automatically generated and will be purged and redeployed by
|
||||
the nsd.service pre-start script.
|
||||
EOF
|
||||
chown ${username}:${username} -R "${stateDir}/private"
|
||||
chown ${username}:${username} -R "${stateDir}/tmp"
|
||||
chown ${username}:${username} -R "${stateDir}/var"
|
||||
rm -rf "${stateDir}/zones"
|
||||
cp -rL "${nsdEnv}/zones" "${stateDir}/zones"
|
||||
${copyKeys}
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.timers.nsd-dnssec = mkIf dnssec {
|
||||
description = "Automatic DNSSEC key rollover";
|
||||
|
||||
wantedBy = [ "nsd.service" ];
|
||||
|
||||
timerConfig = {
|
||||
OnActiveSec = cfg.dnssecInterval;
|
||||
OnUnitActiveSec = cfg.dnssecInterval;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.nsd-dnssec = mkIf dnssec {
|
||||
description = "DNSSEC key rollover";
|
||||
|
||||
wantedBy = [ "nsd.service" ];
|
||||
before = [ "nsd.service" ];
|
||||
|
||||
script = signZones;
|
||||
|
||||
postStop = ''
|
||||
/run/current-system/systemd/bin/systemctl kill -s SIGHUP nsd.service
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
}
|
|
@ -0,0 +1,116 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.password;
|
||||
|
||||
genOpts = {
|
||||
options = {
|
||||
file = mkOption {
|
||||
type = types.str;
|
||||
description = "Password file in which to store a generated password.";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
description = "User to which the file should belong.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = with types; nullOr str;
|
||||
description = "Group to which the file should belong.";
|
||||
default = "nogroup";
|
||||
};
|
||||
|
||||
restart-services = mkOption {
|
||||
type = with types; listOf str;
|
||||
description = "List of services to restart when the password file is generated.";
|
||||
default = [];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
generate-passwd-file = file: user: group: pkgs.writeShellScriptBin "generate-passwd-file.sh" ''
|
||||
mkdir -p $(dirname ${file})
|
||||
|
||||
if touch ${file}; then
|
||||
chown ${user}${optionalString (group != null) ":${group}"} ${file}
|
||||
if [ $? -ne 0 ]; then
|
||||
rm ${file}
|
||||
echo "failed to set permissions on ${file}"
|
||||
exit 4
|
||||
fi
|
||||
${pkgs.pwgen}/bin/pwgen 30 1 > ${file}
|
||||
else
|
||||
echo "cannot write to ${file}"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
if [ ! -f ${file} ]; then
|
||||
echo "Failed to create file ${file}"
|
||||
exit 3
|
||||
fi
|
||||
|
||||
${if (group != null) then
|
||||
"chmod 640 ${file}"
|
||||
else
|
||||
"chmod 600 ${file}"}
|
||||
|
||||
echo "created password file ${file}"
|
||||
exit 0
|
||||
'';
|
||||
|
||||
restart-script = service-name: ''
|
||||
SYSCTL=${pkgs.systemd}/bin/systemctl
|
||||
JOBTYPE=$(${pkgs.systemd}/bin/systemctl show ${service-name} -p Type)
|
||||
if $SYSCTL is-active --quiet ${service-name} ||
|
||||
[ $JOBTYPE == "Type=simple" ] ||
|
||||
[ $JOBTYPE == "Type=oneshot" ] ; then
|
||||
echo "restarting service ${service-name} because password has changed."
|
||||
$SYSCTL restart ${service-name}
|
||||
fi
|
||||
'';
|
||||
|
||||
filterForRestarts = filterAttrs (name: opts: opts.restart-services != []);
|
||||
|
||||
in {
|
||||
options.fudo.password = {
|
||||
file-generator = mkOption {
|
||||
type = with types; attrsOf (submodule genOpts);
|
||||
description = "List of password files to generate.";
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
systemd.targets.fudo-passwords = {
|
||||
description = "Target indicating that all Fudo passwords have been generated.";
|
||||
wantedBy = [ "default.target" ];
|
||||
};
|
||||
|
||||
systemd.services = fold (a: b: a // b) {} (mapAttrsToList (name: opts: {
|
||||
"file-generator-${name}" = {
|
||||
enable = true;
|
||||
partOf = [ "fudo-passwords.target" ];
|
||||
serviceConfig.Type = "oneshot";
|
||||
description = "Generate password file for ${name}.";
|
||||
script = "${generate-passwd-file opts.file opts.user opts.group}/bin/generate-passwd-file.sh";
|
||||
reloadIfChanged = true;
|
||||
};
|
||||
|
||||
"file-generator-watcher-${name}" = mkIf (! (opts.restart-services == [])) {
|
||||
description = "Restart services upon regenerating password for ${name}";
|
||||
after = [ "file-generator-${name}.service" ];
|
||||
partOf = [ "fudo-passwords.target" ];
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = concatStringsSep "\n" (map restart-script opts.restart-services);
|
||||
};
|
||||
}) cfg.file-generator);
|
||||
|
||||
systemd.paths = mapAttrs' (name: opts:
|
||||
nameValuePair "file-generator-watcher-${name}" {
|
||||
partOf = [ "fudo-passwords.target"];
|
||||
pathConfig.PathChanged = opts.file;
|
||||
}) (filterForRestarts cfg.file-generator);
|
||||
};
|
||||
}
|
|
@ -0,0 +1,370 @@
|
|||
{ config, lib, pkgs, environment, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.postgresql;
|
||||
|
||||
hostname = config.instance.hostname;
|
||||
domain-name = config.instance.local-domain;
|
||||
|
||||
gssapi-realm = config.fudo.domains.${domain-name}.gssapi-realm;
|
||||
|
||||
join-lines = lib.concatStringsSep "\n";
|
||||
|
||||
strip-ext = filename:
|
||||
head (builtins.match "^(.+)[.][^.]+$" filename);
|
||||
|
||||
userDatabaseOpts = { database, ... }: {
|
||||
options = {
|
||||
access = mkOption {
|
||||
type = types.str;
|
||||
description = "Privileges for user on this database.";
|
||||
default = "CONNECT";
|
||||
};
|
||||
|
||||
entity-access = mkOption {
|
||||
type = with types; attrsOf str;
|
||||
description =
|
||||
"A list of entities mapped to the access this user should have.";
|
||||
default = { };
|
||||
example = {
|
||||
"TABLE users" = "SELECT,DELETE";
|
||||
"ALL SEQUENCES IN public" = "SELECT";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
userOpts = { username, ... }: {
|
||||
options = with types; {
|
||||
password-file = mkOption {
|
||||
type = nullOr str;
|
||||
description = "A file containing the user's (plaintext) password.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
databases = mkOption {
|
||||
type = attrsOf (submodule userDatabaseOpts);
|
||||
description = "Map of databases to required database/table perms.";
|
||||
default = { };
|
||||
example = {
|
||||
my_database = {
|
||||
access = "ALL PRIVILEGES";
|
||||
entity-access = { "ALL TABLES" = "SELECT"; };
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
databaseOpts = { dbname, ... }: {
|
||||
options = with types; {
|
||||
users = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of users who should have full access to this database.";
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
filterPasswordedUsers = filterAttrs (user: opts: opts.password-file != null);
|
||||
|
||||
password-setter-script = user: password-file: sql-file: ''
|
||||
unset PASSWORD
|
||||
if [ ! -f ${password-file} ]; then
|
||||
echo "file does not exist: ${password-file}"
|
||||
exit 1
|
||||
fi
|
||||
PASSWORD=$(cat ${password-file})
|
||||
echo "setting password for user ${user}"
|
||||
echo "ALTER USER ${user} ENCRYPTED PASSWORD '$PASSWORD';" >> ${sql-file}
|
||||
'';
|
||||
|
||||
passwords-setter-script = users:
|
||||
pkgs.writeScript "postgres-set-passwords.sh" ''
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "usage: $0 output-file.sql"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
OUTPUT_FILE=$1
|
||||
|
||||
if [ ! -f $OUTPUT_FILE ]; then
|
||||
echo "file doesn't exist: $OUTPUT_FILE"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
${join-lines (mapAttrsToList (user: opts:
|
||||
password-setter-script user opts.password-file "$OUTPUT_FILE")
|
||||
(filterPasswordedUsers users))}
|
||||
'';
|
||||
|
||||
userDatabaseAccess = user: databases:
|
||||
mapAttrs' (database: databaseOpts:
|
||||
nameValuePair "DATABASE ${database}" databaseOpts.access) databases;
|
||||
|
||||
makeEntry = nw:
|
||||
"host all all ${nw} gss include_realm=0 krb_realm=${gssapi-realm}";
|
||||
|
||||
makeNetworksEntry = networks: join-lines (map makeEntry networks);
|
||||
|
||||
makeLocalUserPasswordEntries = users:
|
||||
join-lines (mapAttrsToList (user: opts:
|
||||
join-lines (map (db: ''
|
||||
local ${db} ${user} md5
|
||||
host ${db} ${user} 127.0.0.1/16 md5
|
||||
host ${db} ${user} ::1/128 md5
|
||||
'') (attrNames opts.databases))) (filterPasswordedUsers users));
|
||||
|
||||
userTableAccessSql = user: entity: access:
|
||||
"GRANT ${access} ON ${entity} TO ${user};";
|
||||
userDatabaseAccessSql = user: database: dbOpts: ''
|
||||
\c ${database}
|
||||
${join-lines
|
||||
(mapAttrsToList (userTableAccessSql user) dbOpts.entity-access)}
|
||||
'';
|
||||
userAccessSql = user: userOpts:
|
||||
join-lines (mapAttrsToList (userDatabaseAccessSql user) userOpts.databases);
|
||||
usersAccessSql = users: join-lines (mapAttrsToList userAccessSql users);
|
||||
|
||||
in {
|
||||
|
||||
options.fudo.postgresql = with types; {
|
||||
enable = mkEnableOption "Fudo PostgreSQL Server";
|
||||
|
||||
ssl-private-key = mkOption {
|
||||
type = str;
|
||||
description = "Location of the server SSL private key.";
|
||||
};
|
||||
|
||||
ssl-certificate = mkOption {
|
||||
type = str;
|
||||
description = "Location of the server SSL certificate.";
|
||||
};
|
||||
|
||||
keytab = mkOption {
|
||||
type = str;
|
||||
description = "Location of the server Kerberos keytab.";
|
||||
};
|
||||
|
||||
local-networks = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of networks from which to accept connections.";
|
||||
example = [ "10.0.0.1/16" ];
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
users = mkOption {
|
||||
type = attrsOf (submodule userOpts);
|
||||
description = "A map of users to user attributes.";
|
||||
example = {
|
||||
sampleUser = {
|
||||
password-file = "/path/to/password/file";
|
||||
databases = {
|
||||
some_database = {
|
||||
access = "CONNECT";
|
||||
entity-access = { "TABLE some_table" = "SELECT,UPDATE"; };
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
default = { };
|
||||
};
|
||||
|
||||
databases = mkOption {
|
||||
type = attrsOf (submodule databaseOpts);
|
||||
description = "A map of databases to database options.";
|
||||
default = { };
|
||||
};
|
||||
|
||||
socket-directory = mkOption {
|
||||
type = str;
|
||||
description = "Directory in which to place unix sockets.";
|
||||
default = "/run/postgresql";
|
||||
};
|
||||
|
||||
socket-group = mkOption {
|
||||
type = str;
|
||||
description = "Group for accessing sockets.";
|
||||
default = "postgres_local";
|
||||
};
|
||||
|
||||
local-users = mkOption {
|
||||
type = listOf str;
|
||||
description = "Users able to access the server via local socket.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
required-services = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of services that should run before postgresql.";
|
||||
default = [ ];
|
||||
example = [ "password-generator.service" ];
|
||||
};
|
||||
|
||||
state-directory = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Path at which to store database state data.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
cleanup-tasks = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of actions to take during shutdown of the service.";
|
||||
default = [];
|
||||
};
|
||||
|
||||
systemd-target = mkOption {
|
||||
type = str;
|
||||
description = "Name of the systemd target for postgresql";
|
||||
default = "postgresql.target";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
environment = {
|
||||
systemPackages = with pkgs; [ postgresql_11_gssapi ];
|
||||
|
||||
# etc = {
|
||||
# "postgresql/private/privkey.pem" = {
|
||||
# mode = "0400";
|
||||
# user = "postgres";
|
||||
# group = "postgres";
|
||||
# source = cfg.ssl-private-key;
|
||||
# };
|
||||
|
||||
# "postgresql/cert.pem" = {
|
||||
# mode = "0444";
|
||||
# user = "postgres";
|
||||
# group = "postgres";
|
||||
# source = cfg.ssl-certificate;
|
||||
# };
|
||||
|
||||
# "postgresql/private/postgres.keytab" = {
|
||||
# mode = "0400";
|
||||
# user = "postgres";
|
||||
# group = "postgres";
|
||||
# source = cfg.keytab;
|
||||
# };
|
||||
# };
|
||||
};
|
||||
|
||||
users.groups = {
|
||||
${cfg.socket-group} = { members = [ "postgres" ] ++ cfg.local-users; };
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
package = pkgs.postgresql_11_gssapi;
|
||||
enableTCPIP = true;
|
||||
ensureDatabases = mapAttrsToList (name: value: name) cfg.databases;
|
||||
ensureUsers = ((mapAttrsToList (username: attrs: {
|
||||
name = username;
|
||||
ensurePermissions = userDatabaseAccess username attrs.databases;
|
||||
}) cfg.users) ++ (flatten (mapAttrsToList (database: opts:
|
||||
(map (username: {
|
||||
name = username;
|
||||
ensurePermissions = { "DATABASE ${database}" = "ALL PRIVILEGES"; };
|
||||
}) opts.users)) cfg.databases)));
|
||||
|
||||
settings = {
|
||||
krb_server_keyfile = cfg.keytab;
|
||||
|
||||
ssl = true;
|
||||
ssl_cert_file = cfg.ssl-certificate;
|
||||
ssl_key_file = cfg.ssl-private-key;
|
||||
|
||||
unix_socket_directories = cfg.socket-directory;
|
||||
unix_socket_group = cfg.socket-group;
|
||||
unix_socket_permissions = "0777";
|
||||
};
|
||||
|
||||
authentication = lib.mkForce ''
|
||||
${makeLocalUserPasswordEntries cfg.users}
|
||||
|
||||
local all all ident
|
||||
|
||||
# host-local
|
||||
host all all 127.0.0.1/32 gss include_realm=0 krb_realm=${gssapi-realm}
|
||||
host all all ::1/128 gss include_realm=0 krb_realm=${gssapi-realm}
|
||||
|
||||
# local networks
|
||||
${makeNetworksEntry cfg.local-networks}
|
||||
'';
|
||||
|
||||
dataDir = mkIf (cfg.state-directory != null) cfg.state-directory;
|
||||
};
|
||||
|
||||
systemd = {
|
||||
|
||||
tmpfiles.rules = optional (cfg.state-directory != null) (let
|
||||
user = config.systemd.services.postgresql.serviceConfig.User;
|
||||
in "d ${cfg.state-directory} 0700 ${user} - - -");
|
||||
|
||||
targets.${strip-ext cfg.systemd-target} = {
|
||||
description = "Postgresql and associated systemd services.";
|
||||
};
|
||||
|
||||
services = {
|
||||
postgresql-password-setter = let
|
||||
passwords-script = passwords-setter-script cfg.users;
|
||||
password-wrapper-script =
|
||||
pkgs.writeScript "password-script-wrapper.sh" ''
|
||||
TMPDIR=$(${pkgs.coreutils}/bin/mktemp -d -t postgres-XXXXXXXXXX)
|
||||
echo "using temp dir $TMPDIR"
|
||||
PASSWORD_SQL_FILE=$TMPDIR/user-passwords.sql
|
||||
echo "password file $PASSWORD_SQL_FILE"
|
||||
touch $PASSWORD_SQL_FILE
|
||||
chown ${config.services.postgresql.superUser} $PASSWORD_SQL_FILE
|
||||
chmod go-rwx $PASSWORD_SQL_FILE
|
||||
${passwords-script} $PASSWORD_SQL_FILE
|
||||
echo "executing $PASSWORD_SQL_FILE"
|
||||
${pkgs.postgresql}/bin/psql --port ${
|
||||
toString config.services.postgresql.port
|
||||
} -d postgres -f $PASSWORD_SQL_FILE
|
||||
echo rm $PASSWORD_SQL_FILE
|
||||
echo "Postgresql user passwords set.";
|
||||
exit 0
|
||||
'';
|
||||
|
||||
in {
|
||||
description =
|
||||
"A service to set postgresql user passwords after the server has started.";
|
||||
after = [ "postgresql.service" ] ++ cfg.required-services;
|
||||
requires = [ "postgresql.service" ] ++ cfg.required-services;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = config.services.postgresql.superUser;
|
||||
};
|
||||
partOf = [ cfg.systemd-target ];
|
||||
script = "${password-wrapper-script}";
|
||||
};
|
||||
|
||||
postgresql = {
|
||||
requires = cfg.required-services;
|
||||
after = cfg.required-services;
|
||||
partOf = [ cfg.systemd-target ];
|
||||
|
||||
postStart = let
|
||||
allow-user-login = user: "ALTER ROLE ${user} WITH LOGIN;";
|
||||
|
||||
extra-settings-sql = pkgs.writeText "settings.sql" ''
|
||||
${concatStringsSep "\n"
|
||||
(map allow-user-login (mapAttrsToList (key: val: key) cfg.users))}
|
||||
${usersAccessSql cfg.users}
|
||||
'';
|
||||
in ''
|
||||
${pkgs.postgresql}/bin/psql --port ${
|
||||
toString config.services.postgresql.port
|
||||
} -d postgres -f ${extra-settings-sql}
|
||||
${pkgs.coreutils}/bin/chgrp ${cfg.socket-group} ${cfg.socket-directory}/.s.PGSQL*
|
||||
'';
|
||||
|
||||
postStop = concatStringsSep "\n" cfg.cleanup-tasks;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,207 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
inherit (lib.strings) concatStringsSep;
|
||||
cfg = config.fudo.prometheus;
|
||||
|
||||
in {
|
||||
|
||||
options.fudo.prometheus = {
|
||||
enable = mkEnableOption "Fudo Prometheus Data-Gathering Server";
|
||||
|
||||
service-discovery-dns = mkOption {
|
||||
type = with types; attrsOf (listOf str);
|
||||
description = ''
|
||||
A map of exporter type to a list of domains to use for service discovery.
|
||||
'';
|
||||
example = {
|
||||
node = [ "node._metrics._tcp.my-domain.com" ];
|
||||
postfix = [ "postfix._metrics._tcp.my-domain.com" ];
|
||||
};
|
||||
default = {
|
||||
dovecot = [];
|
||||
node = [];
|
||||
postfix = [];
|
||||
rspamd = [];
|
||||
};
|
||||
};
|
||||
|
||||
static-targets = mkOption {
|
||||
type = with types; attrsOf (listOf str);
|
||||
description = ''
|
||||
A map of exporter type to a list of host:ports from which to collect metrics.
|
||||
'';
|
||||
example = {
|
||||
node = [ "my-host.my-domain:1111" ];
|
||||
};
|
||||
default = {
|
||||
dovecot = [];
|
||||
node = [];
|
||||
postfix = [];
|
||||
rspamd = [];
|
||||
};
|
||||
};
|
||||
|
||||
docker-hosts = mkOption {
|
||||
type = with types; listOf str;
|
||||
description = ''
|
||||
A list of explicit <host:port> docker targets from which to gather node data.
|
||||
'';
|
||||
default = [];
|
||||
};
|
||||
|
||||
push-url = mkOption {
|
||||
type = with types; nullOr str;
|
||||
description = ''
|
||||
The <host:port> that services can use to manually push data.
|
||||
'';
|
||||
default = null;
|
||||
};
|
||||
|
||||
push-address = mkOption {
|
||||
type = with types; nullOr str;
|
||||
description = ''
|
||||
The <host:port> address on which to listen for incoming data.
|
||||
'';
|
||||
default = null;
|
||||
};
|
||||
|
||||
hostname = mkOption {
|
||||
type = with types; str;
|
||||
description = "The hostname upon which Prometheus will serve.";
|
||||
example = "my-metrics-server.fudo.org";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
||||
virtualHosts = {
|
||||
"${cfg.hostname}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:9090";
|
||||
|
||||
extraConfig = let
|
||||
local-networks = config.instance.local-networks;
|
||||
in ''
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-By $server_addr:$server_port;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
${optionalString ((length local-networks) > 0)
|
||||
(concatStringsSep "\n" (map (network: "allow ${network};") local-networks)) + "\ndeny all;"}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.prometheus = {
|
||||
|
||||
enable = true;
|
||||
|
||||
webExternalUrl = "https://${cfg.hostname}";
|
||||
|
||||
listenAddress = "127.0.0.1";
|
||||
port = 9090;
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "docker";
|
||||
honor_labels = false;
|
||||
static_configs = [
|
||||
{
|
||||
targets = cfg.docker-hosts;
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
{
|
||||
job_name = "node";
|
||||
scheme = "https";
|
||||
metrics_path = "/metrics/node";
|
||||
honor_labels = false;
|
||||
dns_sd_configs = [
|
||||
{
|
||||
names = cfg.service-discovery-dns.node;
|
||||
}
|
||||
];
|
||||
static_configs = [
|
||||
{
|
||||
targets = cfg.static-targets.node;
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
{
|
||||
job_name = "dovecot";
|
||||
scheme = "https";
|
||||
metrics_path = "/metrics/dovecot";
|
||||
honor_labels = false;
|
||||
dns_sd_configs = [
|
||||
{
|
||||
names = cfg.service-discovery-dns.dovecot;
|
||||
}
|
||||
];
|
||||
static_configs = [
|
||||
{
|
||||
targets = cfg.static-targets.dovecot;
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
{
|
||||
job_name = "postfix";
|
||||
scheme = "https";
|
||||
metrics_path = "/metrics/postfix";
|
||||
honor_labels = false;
|
||||
dns_sd_configs = [
|
||||
{
|
||||
names = cfg.service-discovery-dns.postfix;
|
||||
}
|
||||
];
|
||||
static_configs = [
|
||||
{
|
||||
targets = cfg.static-targets.postfix;
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
{
|
||||
job_name = "rspamd";
|
||||
scheme = "https";
|
||||
metrics_path = "/metrics/rspamd";
|
||||
honor_labels = false;
|
||||
dns_sd_configs = [
|
||||
{
|
||||
names = cfg.service-discovery-dns.rspamd;
|
||||
}
|
||||
];
|
||||
static_configs = [
|
||||
{
|
||||
targets = cfg.static-targets.rspamd;
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
|
||||
pushgateway = {
|
||||
enable = if (cfg.push-url != null) then true else false;
|
||||
web = {
|
||||
external-url = if cfg.push-url == null then
|
||||
cfg.push-address
|
||||
else
|
||||
cfg.push-url;
|
||||
listen-address = cfg.push-address;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,221 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.secrets;
|
||||
|
||||
encrypt-on-disk = { secret-name, target-host, target-pubkey, source-file }:
|
||||
pkgs.stdenv.mkDerivation {
|
||||
name = "${target-host}-${secret-name}-secret";
|
||||
phases = "installPhase";
|
||||
buildInputs = [ pkgs.age ];
|
||||
installPhase = ''
|
||||
age -a -r "${target-pubkey}" -o $out ${source-file}
|
||||
'';
|
||||
};
|
||||
|
||||
decrypt-script = { secret-name, source-file, target-host, target-file
|
||||
, host-master-key, user, group, permissions }:
|
||||
pkgs.writeShellScript "decrypt-fudo-secret-${target-host}-${secret-name}.sh" ''
|
||||
rm -f ${target-file}
|
||||
touch ${target-file}
|
||||
chown ${user}:${group} ${target-file}
|
||||
chmod ${permissions} ${target-file}
|
||||
# NOTE: silly hack because sometimes age leaves a blank line
|
||||
# Only include lines with at least one non-space character
|
||||
SRC=$(mktemp fudo-secret-${target-host}-${secret-name}.XXXXXXXX)
|
||||
cat ${encrypt-on-disk {
|
||||
inherit secret-name source-file target-host;
|
||||
target-pubkey = host-master-key.public-key;
|
||||
}} | grep "[^ ]" > $SRC
|
||||
age -d -i ${host-master-key.key-path} -o ${target-file} $SRC
|
||||
rm -f $SRC
|
||||
'';
|
||||
|
||||
secret-service = target-host: secret-name:
|
||||
{ source-file, target-file, user, group, permissions, ... }: {
|
||||
description = "decrypt secret ${secret-name} for ${target-host}.";
|
||||
wantedBy = [ "default.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = let
|
||||
host-master-key = config.fudo.hosts.${target-host}.master-key;
|
||||
in decrypt-script {
|
||||
inherit secret-name source-file target-host target-file host-master-key
|
||||
user group permissions;
|
||||
};
|
||||
};
|
||||
path = [ pkgs.age ];
|
||||
};
|
||||
|
||||
secretOpts = { name, ... }: {
|
||||
options = with types; {
|
||||
source-file = mkOption {
|
||||
type = path; # CAREFUL: this will copy the file to nixstore...keep on deploy host
|
||||
description = "File from which to load the secret. If unspecified, a random new password will be generated.";
|
||||
default = "${generate-secret name}/passwd";
|
||||
};
|
||||
|
||||
target-file = mkOption {
|
||||
type = str;
|
||||
description =
|
||||
"Target file on the host; the secret will be decrypted to this file.";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "User (on target host) to which the file will belong.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = str;
|
||||
description = "Group (on target host) to which the file will belong.";
|
||||
default = "nogroup";
|
||||
};
|
||||
|
||||
permissions = mkOption {
|
||||
type = str;
|
||||
description = "Permissions to set on the target file.";
|
||||
default = "0400";
|
||||
};
|
||||
|
||||
metadata = mkOption {
|
||||
type = attrsOf anything;
|
||||
description = "Arbitrary metadata associated with this secret.";
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nix-build-users = let usernames = attrNames config.users.users;
|
||||
in filter (user: (builtins.match "^nixbld[0-9]{1,2}$" user) != null)
|
||||
usernames;
|
||||
|
||||
generate-secret = name: pkgs.stdenv.mkDerivation {
|
||||
name = "${name}-generated-passwd";
|
||||
|
||||
phases = [ "installPhase" ];
|
||||
|
||||
buildInputs = with pkgs; [ pwgen ];
|
||||
|
||||
buildPhase = ''
|
||||
echo "${name}-${config.instance.build-timestamp}" >> file.txt
|
||||
pwgen --secure --symbols --num-passwords=1 --sha1=file.txt 40 > passwd
|
||||
rm -f file.txt
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir $out
|
||||
mv passwd $out/passwd
|
||||
'';
|
||||
};
|
||||
|
||||
in {
|
||||
options.fudo.secrets = with types; {
|
||||
enable = mkOption {
|
||||
type = bool;
|
||||
description = "Include secrets in the build (disable when secrets are unavailable)";
|
||||
default = true;
|
||||
};
|
||||
|
||||
host-secrets = mkOption {
|
||||
type = attrsOf (attrsOf (submodule secretOpts));
|
||||
description = "Map of hosts to host secrets";
|
||||
default = { };
|
||||
};
|
||||
|
||||
host-deep-secrets = mkOption {
|
||||
type = attrsOf (attrsOf (submodule secretOpts));
|
||||
description = ''
|
||||
Secrets that are only passed during deployment.
|
||||
|
||||
These secrets will be passed as nixops deployment secrets,
|
||||
_unlike_ regular secrets that are passed to hosts as part of
|
||||
the nixops store, but encrypted with the host SSH key. Regular
|
||||
secrets are kept secret from normal users. These secrets will
|
||||
be kept secret from _everybody_. However, they won't be
|
||||
available on the host at boot until a new deployment occurs.
|
||||
'';
|
||||
default = { };
|
||||
};
|
||||
|
||||
secret-users = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of users with read-access to secrets.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
secret-group = mkOption {
|
||||
type = str;
|
||||
description = "Group to which secrets will belong.";
|
||||
default = "nixops-secrets";
|
||||
};
|
||||
|
||||
secret-paths = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"Paths which contain (only) secrets. The contents will be reabable by the secret-group.";
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
users.groups = {
|
||||
${cfg.secret-group} = {
|
||||
members = cfg.secret-users ++ nix-build-users;
|
||||
};
|
||||
};
|
||||
|
||||
systemd = let
|
||||
hostname = config.instance.hostname;
|
||||
|
||||
host-secrets = if (hasAttr hostname cfg.host-secrets) then
|
||||
cfg.host-secrets.${hostname}
|
||||
else
|
||||
{ };
|
||||
|
||||
host-secret-services = mapAttrs' (secret: secretOpts:
|
||||
(nameValuePair "fudo-secret-${hostname}-${secret}"
|
||||
(secret-service hostname secret secretOpts))) host-secrets;
|
||||
|
||||
trace-all = obj: builtins.trace obj obj;
|
||||
|
||||
host-secret-paths = mapAttrsToList
|
||||
(secret: secretOpts:
|
||||
let perms = if secretOpts.group != "nobody" then "550" else "500";
|
||||
in "d ${dirOf secretOpts.target-file} ${perms} ${secretOpts.user} ${secretOpts.group} - -")
|
||||
host-secrets;
|
||||
|
||||
build-secret-paths =
|
||||
map (path: "d '${path}' - root ${cfg.secret-group} - -")
|
||||
cfg.secret-paths;
|
||||
|
||||
in {
|
||||
tmpfiles.rules = host-secret-paths ++ build-secret-paths;
|
||||
|
||||
services = host-secret-services // {
|
||||
fudo-secrets-watcher = mkIf (length cfg.secret-paths > 0) {
|
||||
wantedBy = [ "default.target" ];
|
||||
description =
|
||||
"Ensure access for group ${cfg.secret-group} to fudo secret paths.";
|
||||
serviceConfig = {
|
||||
ExecStart = pkgs.writeShellScript "fudo-secrets-watcher.sh"
|
||||
(concatStringsSep "\n" (map (path: ''
|
||||
chown -R root:${cfg.secret-group} ${path}
|
||||
chmod -R u=rwX,g=rX,o= ${path}
|
||||
'') cfg.secret-paths));
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
paths.fudo-secrets-watcher = mkIf (length cfg.secret-paths > 0) {
|
||||
wantedBy = [ "default.target" ];
|
||||
description = "Watch fudo secret paths, and correct perms on changes.";
|
||||
pathConfig = {
|
||||
PathChanged = cfg.secret-paths;
|
||||
Unit = "fudo-secrets-watcher.service";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
{ lib, pkgs, config, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.secure-dns-proxy;
|
||||
|
||||
fudo-lib = import ../fudo-lib.nix { lib = lib; };
|
||||
|
||||
in {
|
||||
options.fudo.secure-dns-proxy = with types; {
|
||||
enable =
|
||||
mkEnableOption "Enable a DNS server using an encrypted upstream source.";
|
||||
|
||||
listen-port = mkOption {
|
||||
type = port;
|
||||
description = "Port on which to listen for DNS queries.";
|
||||
default = 53;
|
||||
};
|
||||
|
||||
upstream-dns = mkOption {
|
||||
type = listOf str;
|
||||
description = ''
|
||||
The upstream DNS services to use, in a format useable by dnsproxy.
|
||||
|
||||
See: https://github.com/AdguardTeam/dnsproxy
|
||||
'';
|
||||
default = [ "https://cloudflare-dns.com/dns-query" ];
|
||||
};
|
||||
|
||||
bootstrap-dns = mkOption {
|
||||
type = str;
|
||||
description =
|
||||
"A simple DNS server from which HTTPS DNS can be bootstrapped, if necessary.";
|
||||
default = "1.1.1.1";
|
||||
};
|
||||
|
||||
listen-ips = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of local IP addresses on which to listen.";
|
||||
default = [ "0.0.0.0" ];
|
||||
};
|
||||
|
||||
allowed-networks = mkOption {
|
||||
type = nullOr (listOf str);
|
||||
description =
|
||||
"List of networks with which this job is allowed to communicate.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "User as which to run secure DNS proxy.";
|
||||
default = "secure-dns-proxy";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = str;
|
||||
description = "Group as which to run secure DNS proxy.";
|
||||
default = "secure-dns-proxy";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable (let
|
||||
upgrade-perms = cfg.listen-port <= 1024;
|
||||
in {
|
||||
users = mkIf upgrade-perms {
|
||||
users = {
|
||||
${cfg.user} = {
|
||||
isSystemUser = true;
|
||||
group = cfg.group;
|
||||
};
|
||||
};
|
||||
|
||||
groups = {
|
||||
${cfg.group} = {
|
||||
members = [ cfg.user ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
fudo.system.services.secure-dns-proxy = {
|
||||
description = "DNS Proxy for secure DNS-over-HTTPS lookups.";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
privateNetwork = false;
|
||||
requiredCapabilities = mkIf upgrade-perms [ "CAP_NET_BIND_SERVICE" ];
|
||||
restartWhen = "always";
|
||||
addressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||
networkWhitelist = cfg.allowed-networks;
|
||||
user = mkIf upgrade-perms cfg.user;
|
||||
group = mkIf upgrade-perms cfg.group;
|
||||
|
||||
execStart = let
|
||||
upstreams = map (upstream: "-u ${upstream}") cfg.upstream-dns;
|
||||
upstream-line = concatStringsSep " " upstreams;
|
||||
listen-line =
|
||||
concatStringsSep " " (map (listen: "-l ${listen}") cfg.listen-ips);
|
||||
in "${pkgs.dnsproxy}/bin/dnsproxy -p ${
|
||||
toString cfg.listen-port
|
||||
} ${upstream-line} ${listen-line} -b ${cfg.bootstrap-dns}";
|
||||
};
|
||||
});
|
||||
}
|
|
@ -0,0 +1,240 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
hostname = config.instance.hostname;
|
||||
site-name = config.fudo.hosts.${hostname}.site;
|
||||
site-cfg = config.fudo.sites.${site-name};
|
||||
|
||||
site-hosts = filterAttrs (hostname: hostOpts: hostOpts.site == site-name)
|
||||
config.fudo.hosts;
|
||||
|
||||
siteOpts = { site, ... }: {
|
||||
options = with types; {
|
||||
site = mkOption {
|
||||
type = str;
|
||||
description = "Site name.";
|
||||
default = site;
|
||||
};
|
||||
|
||||
network = mkOption {
|
||||
type = str;
|
||||
description = "Network to be treated as local.";
|
||||
};
|
||||
|
||||
dynamic-network = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Network to be allocated by DHCP.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
gateway-v4 = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Gateway to use for public ipv4 internet access.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
gateway-v6 = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Gateway to use for public ipv6 internet access.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
local-groups = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of groups which should exist at this site.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
local-users = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"List of users which should exist on all hosts at this site.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
local-admins = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"List of admin users which should exist on all hosts at this site.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
enable-monitoring =
|
||||
mkEnableOption "Enable site-wide monitoring with prometheus.";
|
||||
|
||||
nameservers = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of nameservers to be used by hosts at this site.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
timezone = mkOption {
|
||||
type = str;
|
||||
description = "Timezone of the site.";
|
||||
example = "America/Winnipeg";
|
||||
};
|
||||
|
||||
deploy-pubkeys = mkOption {
|
||||
type = nullOr (listOf str);
|
||||
description = "SSH pubkey of site deploy key. Used by dropbear daemon.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
enable-ssh-backdoor = mkOption {
|
||||
type = bool;
|
||||
description =
|
||||
"Enable a backup SSH server in case of failures of the primary.";
|
||||
default = true;
|
||||
};
|
||||
|
||||
dropbear-rsa-key-path = mkOption {
|
||||
type = str;
|
||||
description = "Location of Dropbear RSA key.";
|
||||
default = "/etc/dropbear/host_rsa_key";
|
||||
};
|
||||
|
||||
dropbear-ecdsa-key-path = mkOption {
|
||||
type = str;
|
||||
description = "Location of Dropbear ECDSA key.";
|
||||
default = "/etc/dropbear/host_ecdsa_key";
|
||||
};
|
||||
|
||||
dropbear-ssh-port = mkOption {
|
||||
type = port;
|
||||
description = "Port to be used for the backup SSH server.";
|
||||
default = 2112;
|
||||
};
|
||||
|
||||
enable-distributed-builds =
|
||||
mkEnableOption "Enable distributed builds for the site.";
|
||||
|
||||
build-servers = mkOption {
|
||||
type = attrsOf (submodule buildServerOpts);
|
||||
description =
|
||||
"List of hosts to be used as build servers for the local site.";
|
||||
default = { };
|
||||
example = {
|
||||
my-build-host = {
|
||||
port = 22;
|
||||
systems = [ "i686-linux" "x86_64-linux" ];
|
||||
build-user = "my-builder";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
local-networks = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of networks to consider local at this site.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
mail-server = mkOption {
|
||||
type = str;
|
||||
description = "Hostname of the mail server to use for this site.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
buildServerOpts = { hostname, ... }: {
|
||||
options = with types; {
|
||||
port = mkOption {
|
||||
type = port;
|
||||
description = "SSH port at which to contact the server.";
|
||||
default = 22;
|
||||
};
|
||||
|
||||
systems = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of systems for which this build server can build.";
|
||||
default = [ "i686-linux" "x86_64-linux" ];
|
||||
};
|
||||
|
||||
max-jobs = mkOption {
|
||||
type = int;
|
||||
description = "Max build allowed per-system.";
|
||||
default = 1;
|
||||
};
|
||||
|
||||
speed-factor = mkOption {
|
||||
type = int;
|
||||
description = "Weight to give this server, i.e. it's relative speed.";
|
||||
default = 1;
|
||||
};
|
||||
|
||||
supported-features = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of features supported by this server.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
build-user = mkOption {
|
||||
type = str;
|
||||
description = "User as which to run distributed builds.";
|
||||
default = "nix-site-builder";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
options.fudo.sites = mkOption {
|
||||
type = with types; attrsOf (submodule siteOpts);
|
||||
description = "Site configurations for all sites known to the system.";
|
||||
default = { };
|
||||
};
|
||||
|
||||
config = {
|
||||
networking.firewall.allowedTCPPorts =
|
||||
mkIf site-cfg.enable-ssh-backdoor [ site-cfg.dropbear-ssh-port ];
|
||||
|
||||
systemd = mkIf site-cfg.enable-ssh-backdoor {
|
||||
sockets = {
|
||||
dropbear-deploy = {
|
||||
wantedBy = [ "sockets.target" ];
|
||||
socketConfig = {
|
||||
ListenStream = "0.0.0.0:${toString site-cfg.dropbear-ssh-port}";
|
||||
Accept = true;
|
||||
};
|
||||
unitConfig = { restartIfChanged = true; };
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
dropbear-deploy-init = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = ''
|
||||
if [ ! -d /etc/dropbear ]; then
|
||||
mkdir /etc/dropbear
|
||||
chmod 700 /etc/dropbear
|
||||
fi
|
||||
|
||||
if [ ! -f ${site-cfg.dropbear-rsa-key-path} ]; then
|
||||
${pkgs.dropbear}/bin/dropbearkey -t rsa -f ${site-cfg.dropbear-rsa-key-path}
|
||||
${pkgs.coreutils}/bin/chmod 0400 ${site-cfg.dropbear-rsa-key-path}
|
||||
fi
|
||||
|
||||
if [ ! -f ${site-cfg.dropbear-ecdsa-key-path} ]; then
|
||||
${pkgs.dropbear}/bin/dropbearkey -t ecdsa -f ${site-cfg.dropbear-ecdsa-key-path}
|
||||
${pkgs.coreutils}/bin/chmod 0400 ${site-cfg.dropbear-ecdsa-key-path}
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
"dropbear-deploy@" = {
|
||||
description =
|
||||
"Per-connection service for deployment, using dropbear.";
|
||||
requires = [ "dropbear-deploy-init.service" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart =
|
||||
"${pkgs.dropbear}/bin/dropbear -F -i -w -m -j -k -r ${site-cfg.dropbear-rsa-key-path} -r ${site-cfg.dropbear-ecdsa-key-path}";
|
||||
ExecReload = "${pkgs.utillinux}/bin/kill -HUP $MAINPID";
|
||||
StandardInput = "socket";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.slynk;
|
||||
|
||||
initScript = port: load-paths: let
|
||||
load-path-string =
|
||||
concatStringsSep " " (map (path: "\"${path}\"") load-paths);
|
||||
in pkgs.writeText "slynk.lisp" ''
|
||||
(load (merge-pathnames "quicklisp/setup.lisp" (user-homedir-pathname)))
|
||||
(ql:quickload :slynk)
|
||||
(setf asdf:*central-registry*
|
||||
(append asdf:*central-registry*
|
||||
(list ${load-path-string})))
|
||||
(slynk:create-server :port ${toString port} :dont-close t)
|
||||
(dolist (var '("LD_LIBRARY_PATH"))
|
||||
(format t "~S: ~S~%" var (sb-unix::posix-getenv var)))
|
||||
|
||||
(loop (sleep 60))
|
||||
'';
|
||||
|
||||
lisp-libs = with pkgs.lispPackages; [
|
||||
alexandria
|
||||
asdf-package-system
|
||||
asdf-system-connections
|
||||
cl_plus_ssl
|
||||
cl-ppcre
|
||||
quicklisp
|
||||
quri
|
||||
uiop
|
||||
usocket
|
||||
];
|
||||
|
||||
in {
|
||||
options.fudo.slynk = {
|
||||
enable = mkEnableOption "Enable Slynk emacs common lisp server.";
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
description = "Port on which to open a Slynk server.";
|
||||
default = 4005;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.user.services.slynk = {
|
||||
description = "Slynk Common Lisp server.";
|
||||
|
||||
serviceConfig = let
|
||||
load-paths = (map (pkg: "${pkg}/lib/common-lisp/") lisp-libs);
|
||||
in {
|
||||
ExecStartPre = "${pkgs.lispPackages.quicklisp}/bin/quicklisp init";
|
||||
ExecStart = "${pkgs.sbcl}/bin/sbcl --load ${initScript cfg.port load-paths}";
|
||||
Restart = "on-failure";
|
||||
PIDFile = "/run/slynk.$USERNAME.pid";
|
||||
};
|
||||
|
||||
path = with pkgs; [
|
||||
gcc
|
||||
glibc # for getent
|
||||
file
|
||||
];
|
||||
|
||||
environment = {
|
||||
LD_LIBRARY_PATH = "${pkgs.openssl_1_1.out}/lib";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
{
|
||||
config = {
|
||||
programs.ssh.knownHosts = let
|
||||
keyed-hosts =
|
||||
filterAttrs (h: o: o.ssh-pubkeys != [])
|
||||
config.fudo.hosts;
|
||||
|
||||
crossProduct = f: list0: list1:
|
||||
concatMap (el0: map (el1: f el0 el1) list1) list0;
|
||||
|
||||
all-hostnames = hostname: opts:
|
||||
[ hostname ] ++
|
||||
(crossProduct (host: domain: "${host}.${domain}")
|
||||
([ hostname ] ++ opts.aliases)
|
||||
([ opts.domain ] ++ opts.extra-domains));
|
||||
|
||||
in mapAttrs (hostname: hostOpts: {
|
||||
publicKeyFile = builtins.head hostOpts.ssh-pubkeys;
|
||||
hostNames = all-hostnames hostname hostOpts;
|
||||
}) keyed-hosts;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,168 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.system;
|
||||
|
||||
portMappingOpts = { name, ... }: {
|
||||
options = with types; {
|
||||
internal-port = mkOption {
|
||||
type = port;
|
||||
description = "Port on localhost to recieve traffic";
|
||||
};
|
||||
external-port = mkOption {
|
||||
type = port;
|
||||
description = "External port on which to listen for traffic.";
|
||||
};
|
||||
protocols = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"Protocols for which to forward ports. Default is tcp-only.";
|
||||
default = [ "tcp" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
options.fudo.system = with types; {
|
||||
internal-port-map = mkOption {
|
||||
type = attrsOf (submodule portMappingOpts);
|
||||
description =
|
||||
"Sets of external ports to internal (i.e. localhost) ports to forward.";
|
||||
default = { };
|
||||
example = {
|
||||
sshmap = {
|
||||
internal-port = 2222;
|
||||
external-port = 22;
|
||||
protocol = "udp";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# DO THIS MANUALLY since NixOS sux at making a reasonable /etc/hosts
|
||||
hostfile-entries = mkOption {
|
||||
type = attrsOf (listOf str);
|
||||
description = "Map of extra IP addresses to hostnames for /etc/hosts";
|
||||
default = {};
|
||||
example = {
|
||||
"10.0.0.3" = [ "my-host" "my-host.my.domain" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf (cfg.internal-port-map != { }) {
|
||||
# FIXME: FUCK ME THIS IS WAY HARDER THAN IT SHOULD BE
|
||||
# boot.kernel.sysctl = mkIf (cfg.internal-port-map != { }) {
|
||||
# "net.ipv4.conf.all.route_localnet" = "1";
|
||||
# };
|
||||
|
||||
# fudo.system.services.forward-internal-ports = let
|
||||
# ip-line = op: src-port: target-port: protocol: ''
|
||||
# ${ipt} -t nat -${op} PREROUTING -p ${protocol} --dport ${
|
||||
# toString src-port
|
||||
# } -j REDIRECT --to-ports ${toString target-port}
|
||||
# ${ipt} -t nat -${op} OUTPUT -p ${protocol} -s lo --dport ${
|
||||
# toString src-port
|
||||
# } -j REDIRECT --to-ports ${toString target-port}
|
||||
# '';
|
||||
|
||||
# ip-forward-line = ip-line "I";
|
||||
|
||||
# ip-unforward-line = ip-line "D";
|
||||
|
||||
# traceOut = obj: builtins.trace obj obj;
|
||||
|
||||
# concatMapAttrsToList = f: attrs: concatLists (mapAttrsToList f attrs);
|
||||
|
||||
# portmap-entries = concatMapAttrsToList (name: opts:
|
||||
# map (protocol: {
|
||||
# src = opts.external-port;
|
||||
# target = opts.internal-port;
|
||||
# protocol = protocol;
|
||||
# }) opts.protocols) cfg.internal-port-map;
|
||||
|
||||
# make-entries = f: { src, target, protocol, ... }: f src target protocol;
|
||||
|
||||
# forward-entries = map (make-entries ip-forward-line) portmap-entries;
|
||||
|
||||
# unforward-entries = map (make-entries ip-unforward-line) portmap-entries;
|
||||
|
||||
# forward-ports-script = pkgs.writeShellScript "forward-internal-ports.sh"
|
||||
# (concatStringsSep "\n" forward-entries);
|
||||
|
||||
# unforward-ports-script =
|
||||
# pkgs.writeShellScript "unforward-internal-ports.sh"
|
||||
# (concatStringsSep "\n"
|
||||
# (map (make-entries ip-unforward-line) portmap-entries));
|
||||
# in {
|
||||
# wantedBy = [ "multi-user.target" ];
|
||||
# after = [ "firewall.service" "nat.service" ];
|
||||
# type = "oneshot";
|
||||
# description = "Rules for forwarding external ports to local ports.";
|
||||
# execStart = "${forward-ports-script}";
|
||||
# execStop = "${unforward-ports-script}";
|
||||
# requiredCapabilities =
|
||||
# [ "CAP_DAC_READ_SEARCH" "CAP_NET_ADMIN" "CAP_NET_RAW" ];
|
||||
# };
|
||||
|
||||
# networking.firewall = let
|
||||
# iptables = "ip46tables";
|
||||
# ip-forward-line = protocols: internal: external:
|
||||
# concatStringsSep "\n" (map (protocol: ''
|
||||
# ${iptables} -t nat -I PREROUTING -p ${protocol} --dport ${
|
||||
# toString external
|
||||
# } -j REDIRECT --to-ports ${toString internal}
|
||||
# ${iptables} -t nat -I OUTPUT -s lo -p ${protocol} --dport ${
|
||||
# toString external
|
||||
# } -j REDIRECT --to-ports ${toString internal}
|
||||
# '') protocols);
|
||||
|
||||
# ip-unforward-line = protocols: internal: external:
|
||||
# concatStringsSep "\n" (map (protocol: ''
|
||||
# ${iptables} -t nat -D PREROUTING -p ${protocol} --dport ${
|
||||
# toString external
|
||||
# } -j REDIRECT --to-ports ${toString internal}
|
||||
# ${iptables} -t nat -D OUTPUT -s lo -p ${protocol} --dport ${
|
||||
# toString external
|
||||
# } -j REDIRECT --to-ports ${toString internal}
|
||||
# '') protocols);
|
||||
# in {
|
||||
# enable = true;
|
||||
|
||||
# extraCommands = concatStringsSep "\n" (mapAttrsToList (name: opts:
|
||||
# ip-forward-line opts.protocols opts.internal-port opts.external-port)
|
||||
# cfg.internal-port-map);
|
||||
|
||||
# extraStopCommands = concatStringsSep "\n" (mapAttrsToList (name: opts:
|
||||
# ip-unforward-line opts.protocols opts.internal-port opts.external-port)
|
||||
# cfg.internal-port-map);
|
||||
# };
|
||||
|
||||
# networking.nat.forwardPorts =
|
||||
# let portmaps = (attrValues opts.external-port);
|
||||
# in concatMap (opts:
|
||||
# map (protocol: {
|
||||
# destination = "127.0.0.1:${toString opts.internal-port}";
|
||||
# sourcePort = opts.external-port;
|
||||
# proto = protocol;
|
||||
# }) opts.protocols) (attrValues cfg.internal-port-map);
|
||||
|
||||
# services.xinetd = mkIf ((length (attrNames cfg.internal-port-map)) > 0) {
|
||||
# enable = true;
|
||||
# services = let
|
||||
# svcs = mapAttrsToList (name: opts: opts // { name = name; })
|
||||
# cfg.internal-port-map;
|
||||
# svcs-protocols = concatMap
|
||||
# (svc: map (protocol: svc // { protocol = protocol; }) svc.protocols)
|
||||
# svcs;
|
||||
# in map (opts: {
|
||||
# name = opts.name;
|
||||
# unlisted = true;
|
||||
# port = opts.external-port;
|
||||
# server = "${pkgs.coreutils}/bin/false";
|
||||
# extraConfig = "redirect = localhost ${toString opts.internal-port}";
|
||||
# protocol = opts.protocol;
|
||||
# }) svcs-protocols;
|
||||
# };
|
||||
};
|
||||
}
|
|
@ -0,0 +1,500 @@
|
|||
{ pkgs, lib, config, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.system;
|
||||
|
||||
mkDisableOption = description:
|
||||
mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = description;
|
||||
};
|
||||
|
||||
isEmpty = lst: 0 == (length lst);
|
||||
|
||||
serviceOpts = { name, ... }:
|
||||
with types; {
|
||||
options = {
|
||||
after = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of services to start before this one.";
|
||||
default = [ ];
|
||||
};
|
||||
script = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Simple shell script for the service to run.";
|
||||
default = null;
|
||||
};
|
||||
reloadScript = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Script to run whenever the service is restarted.";
|
||||
default = null;
|
||||
};
|
||||
before = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"List of services before which this service should be started.";
|
||||
default = [ ];
|
||||
};
|
||||
requires = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"List of services on which this service depends. If they fail to start, this service won't start.";
|
||||
default = [ ];
|
||||
};
|
||||
preStart = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Script to run prior to starting this service.";
|
||||
default = null;
|
||||
};
|
||||
postStart = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Script to run after starting this service.";
|
||||
default = null;
|
||||
};
|
||||
preStop = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Script to run prior to stopping this service.";
|
||||
default = null;
|
||||
};
|
||||
postStop = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Script to run after stopping this service.";
|
||||
default = null;
|
||||
};
|
||||
requiredBy = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"List of services which require this service, and should fail without it.";
|
||||
default = [ ];
|
||||
};
|
||||
wantedBy = mkOption {
|
||||
type = listOf str;
|
||||
default = [ ];
|
||||
description =
|
||||
"List of services before which this service should be started.";
|
||||
};
|
||||
environment = mkOption {
|
||||
type = attrsOf str;
|
||||
description = "Environment variables supplied to this service.";
|
||||
default = { };
|
||||
};
|
||||
environment-file = mkOption {
|
||||
type = nullOr str;
|
||||
description =
|
||||
"File containing environment variables supplied to this service.";
|
||||
default = null;
|
||||
};
|
||||
description = mkOption {
|
||||
type = str;
|
||||
description = "Description of the service.";
|
||||
};
|
||||
path = mkOption {
|
||||
type = listOf package;
|
||||
description =
|
||||
"A list of packages which should be in the service PATH.";
|
||||
default = [ ];
|
||||
};
|
||||
restartIfChanged =
|
||||
mkDisableOption "Restart the service if the definition changes.";
|
||||
dynamicUser = mkDisableOption "Create a new user for this service.";
|
||||
privateNetwork = mkDisableOption "Only allow access to localhost.";
|
||||
privateUsers =
|
||||
mkDisableOption "Don't allow access to system user list.";
|
||||
privateDevices = mkDisableOption
|
||||
"Restrict access to system devices other than basics.";
|
||||
privateTmp = mkDisableOption "Limit service to a private tmp dir.";
|
||||
protectControlGroups =
|
||||
mkDisableOption "Don't allow service to modify control groups.";
|
||||
protectClock =
|
||||
mkDisableOption "Don't allow service to modify system clock.";
|
||||
restrictSuidSgid =
|
||||
mkDisableOption "Don't allow service to suid or sgid binaries.";
|
||||
protectKernelTunables =
|
||||
mkDisableOption "Don't allow service to modify kernel tunables.";
|
||||
privateMounts =
|
||||
mkDisableOption "Don't allow service to access mounted devices.";
|
||||
protectKernelModules = mkDisableOption
|
||||
"Don't allow service to load or evict kernel modules.";
|
||||
protectHome = mkDisableOption "Limit access to home directories.";
|
||||
protectHostname =
|
||||
mkDisableOption "Don't allow service to modify hostname.";
|
||||
protectKernelLogs =
|
||||
mkDisableOption "Don't allow access to kernel logs.";
|
||||
lockPersonality = mkDisableOption "Lock service 'personality'.";
|
||||
restrictRealtime =
|
||||
mkDisableOption "Restrict service from using realtime functionality.";
|
||||
restrictNamespaces =
|
||||
mkDisableOption "Restrict service from using namespaces.";
|
||||
memoryDenyWriteExecute = mkDisableOption
|
||||
"Restrict process from executing from writable memory.";
|
||||
keyringMode = mkOption {
|
||||
type = str;
|
||||
default = "private";
|
||||
description = "Sharing state of process keyring.";
|
||||
};
|
||||
requiredCapabilities = mkOption {
|
||||
type = listOf (enum capabilities);
|
||||
default = [ ];
|
||||
description = "List of capabilities granted to the service.";
|
||||
};
|
||||
restartWhen = mkOption {
|
||||
type = str;
|
||||
default = "on-failure";
|
||||
description = "Conditions under which process should be restarted.";
|
||||
};
|
||||
restartSec = mkOption {
|
||||
type = int;
|
||||
default = 10;
|
||||
description = "Number of seconds to wait before restarting service.";
|
||||
};
|
||||
execStart = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "Command to run to launch the service.";
|
||||
};
|
||||
execStop = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "Command to run to launch the service.";
|
||||
};
|
||||
protectSystem = mkOption {
|
||||
type = enum [ "true" "false" "full" "strict" true false ];
|
||||
default = "full";
|
||||
description =
|
||||
"Level of protection to apply to the system for this service.";
|
||||
};
|
||||
addressFamilies = mkOption {
|
||||
type = listOf (enum address-families);
|
||||
default = [ ];
|
||||
description = "List of address families which the service can use.";
|
||||
};
|
||||
workingDirectory = mkOption {
|
||||
type = nullOr path;
|
||||
default = null;
|
||||
description = "Directory in which to launch the service.";
|
||||
};
|
||||
user = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "User as which to launch this service.";
|
||||
};
|
||||
group = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "Primary group as which to launch this service.";
|
||||
};
|
||||
type = mkOption {
|
||||
type =
|
||||
enum [ "simple" "exec" "forking" "oneshot" "dbus" "notify" "idle" ];
|
||||
default = "simple";
|
||||
description = "Systemd service type of this service.";
|
||||
};
|
||||
partOf = mkOption {
|
||||
type = listOf str;
|
||||
default = [ ];
|
||||
description =
|
||||
"List of targets to which this service belongs (and with which it should be restarted).";
|
||||
};
|
||||
standardOutput = mkOption {
|
||||
type = str;
|
||||
default = "journal";
|
||||
description = "Destination of standard output for this service.";
|
||||
};
|
||||
standardError = mkOption {
|
||||
type = str;
|
||||
default = "journal";
|
||||
description = "Destination of standard error for this service.";
|
||||
};
|
||||
pidFile = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "Service PID file.";
|
||||
};
|
||||
networkWhitelist = mkOption {
|
||||
type = nullOr (listOf str);
|
||||
default = null;
|
||||
description =
|
||||
"A list of networks with which this process may communicate.";
|
||||
};
|
||||
allowedSyscalls = mkOption {
|
||||
type = listOf (enum syscalls);
|
||||
default = [ ];
|
||||
description = "System calls which the service is permitted to make.";
|
||||
};
|
||||
maximumUmask = mkOption {
|
||||
type = str;
|
||||
default = "0077";
|
||||
description = "Umask to apply to files created by the service.";
|
||||
};
|
||||
startOnlyPerms = mkDisableOption "Disable perms after startup.";
|
||||
onCalendar = mkOption {
|
||||
type = nullOr str;
|
||||
description =
|
||||
"Schedule on which the job should be invoked. See: man systemd.time(7).";
|
||||
default = null;
|
||||
};
|
||||
runtimeDirectory = mkOption {
|
||||
type = nullOr str;
|
||||
description =
|
||||
"Directory created at runtime with perms for the service to read/write.";
|
||||
default = null;
|
||||
};
|
||||
readWritePaths = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of paths to which the service will be allowed normal access, even if ProtectSystem=strict.";
|
||||
default = [ ];
|
||||
};
|
||||
stateDirectory = mkOption {
|
||||
type = nullOr str;
|
||||
description =
|
||||
"State directory for the service, available via STATE_DIRECTORY.";
|
||||
default = null;
|
||||
};
|
||||
cacheDirectory = mkOption {
|
||||
type = nullOr str;
|
||||
description =
|
||||
"Cache directory for the service, available via CACHE_DIRECTORY.";
|
||||
default = null;
|
||||
};
|
||||
inaccessiblePaths = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of paths which should be inaccessible to the service.";
|
||||
default = [ "/home" "/root" ];
|
||||
};
|
||||
# noExecPaths = mkOption {
|
||||
# type = listOf str;
|
||||
# description =
|
||||
# "A list of paths where the service will not be allowed to run executables.";
|
||||
# default = [ "/home" "/root" "/tmp" "/var" ];
|
||||
# };
|
||||
readOnlyPaths = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of paths to which will be read-only for the service.";
|
||||
default = [ ];
|
||||
};
|
||||
execPaths = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of paths where the service WILL be allowed to run executables.";
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# See: man capabilities(7)
|
||||
capabilities = [
|
||||
"CAP_AUDIT_CONTROL"
|
||||
"CAP_AUDIT_READ"
|
||||
"CAP_AUDIT_WRITE"
|
||||
"CAP_BLOCK_SUSPEND"
|
||||
"CAP_BPF"
|
||||
"CAP_CHECKPOINT_RESTORE"
|
||||
"CAP_CHOWN"
|
||||
"CAP_DAC_OVERRIDE"
|
||||
"CAP_DAC_READ_SEARCH"
|
||||
"CAP_FOWNER"
|
||||
"CAP_FSETID"
|
||||
"CAP_IPC_LOCK"
|
||||
"CAP_IPC_OWNER"
|
||||
"CAP_KILL"
|
||||
"CAP_LEASE"
|
||||
"CAP_LINUX_IMMUTABLE"
|
||||
"CAP_MAC_ADMIN"
|
||||
"CAP_MAC_OVERRIDE"
|
||||
"CAP_MKNOD"
|
||||
"CAP_NET_ADMIN"
|
||||
"CAP_NET_BIND_SERVICE"
|
||||
"CAP_NET_BROADCAST"
|
||||
"CAP_NET_RAW"
|
||||
"CAP_PERFMON"
|
||||
"CAP_SETGID"
|
||||
"CAP_SETFCAP"
|
||||
"CAP_SETPCAP"
|
||||
"CAP_SETUID"
|
||||
"CAP_SYS_ADMIN"
|
||||
"CAP_SYS_BOOT"
|
||||
"CAP_SYS_CHROOT"
|
||||
"CAP_SYS_MODULE"
|
||||
"CAP_SYS_NICE"
|
||||
"CAP_SYS_PACCT"
|
||||
"CAP_SYS_PTRACE"
|
||||
"CAP_SYS_RAWIO"
|
||||
"CAP_SYS_RESOURCE"
|
||||
"CAP_SYS_TIME"
|
||||
"CAP_SYS_TTY_CONFIG"
|
||||
"CAP_SYSLOG"
|
||||
"CAP_WAKE_ALARM"
|
||||
];
|
||||
|
||||
syscalls = [
|
||||
"@clock"
|
||||
"@debug"
|
||||
"@module"
|
||||
"@mount"
|
||||
"@raw-io"
|
||||
"@reboot"
|
||||
"@swap"
|
||||
"@privileged"
|
||||
"@resources"
|
||||
"@cpu-emulation"
|
||||
"@obsolete"
|
||||
];
|
||||
|
||||
address-families = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
|
||||
|
||||
restrict-capabilities = allowed:
|
||||
if (allowed == [ ]) then
|
||||
"~${concatStringsSep " " capabilities}"
|
||||
else
|
||||
concatStringsSep " " allowed;
|
||||
|
||||
restrict-syscalls = allowed:
|
||||
if (allowed == [ ]) then
|
||||
"~${concatStringsSep " " syscalls}"
|
||||
else
|
||||
concatStringsSep " " allowed;
|
||||
|
||||
restrict-address-families = allowed:
|
||||
if (allowed == [ ]) then [ "~AF_INET" "~AF_INET6" ] else allowed;
|
||||
|
||||
dirOpts = { path, ... }: {
|
||||
options = with types; {
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "User by whom the directory will be owned.";
|
||||
default = "nobody";
|
||||
};
|
||||
group = mkOption {
|
||||
type = str;
|
||||
description = "Group by which the directory will be owned.";
|
||||
default = "nogroup";
|
||||
};
|
||||
perms = mkOption {
|
||||
type = str;
|
||||
description = "Permission bits to apply to the directory.";
|
||||
default = "0770";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
options.fudo.system = with types; {
|
||||
services = mkOption {
|
||||
type = attrsOf (submodule serviceOpts);
|
||||
description = "Fudo system service definitions, with secure defaults.";
|
||||
default = { };
|
||||
};
|
||||
|
||||
tmpOnTmpfs = mkOption {
|
||||
type = bool;
|
||||
description = "Put tmp filesystem on tmpfs (needs enough RAM).";
|
||||
default = true;
|
||||
};
|
||||
|
||||
ensure-directories = mkOption {
|
||||
type = attrsOf (submodule dirOpts);
|
||||
description = "A map of required directories to directory properties.";
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
|
||||
systemd.timers = mapAttrs (name: opts: {
|
||||
enable = true;
|
||||
description = opts.description;
|
||||
partOf = [ "${name}.timer" ];
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = { OnCalendar = opts.onCalendar; };
|
||||
}) (filterAttrs (name: opts: opts.onCalendar != null) cfg.services);
|
||||
|
||||
systemd.tmpfiles.rules = mapAttrsToList
|
||||
(path: opts: "d ${path} ${opts.perms} ${opts.user} ${opts.group} - -")
|
||||
cfg.ensure-directories;
|
||||
|
||||
systemd.targets.fudo-init = { wantedBy = [ "multi-user.target" ]; };
|
||||
|
||||
systemd.services = mapAttrs (name: opts: {
|
||||
enable = true;
|
||||
script = mkIf (opts.script != null) opts.script;
|
||||
reload = mkIf (opts.reloadScript != null) opts.reloadScript;
|
||||
after = opts.after ++ [ "fudo-init.target" ];
|
||||
before = opts.before;
|
||||
requires = opts.requires;
|
||||
wantedBy = opts.wantedBy;
|
||||
preStart = mkIf (opts.preStart != null) opts.preStart;
|
||||
postStart = mkIf (opts.postStart != null) opts.postStart;
|
||||
postStop = mkIf (opts.postStop != null) opts.postStop;
|
||||
preStop = mkIf (opts.preStop != null) opts.preStop;
|
||||
partOf = opts.partOf;
|
||||
requiredBy = opts.requiredBy;
|
||||
environment = opts.environment;
|
||||
description = opts.description;
|
||||
restartIfChanged = opts.restartIfChanged;
|
||||
path = opts.path;
|
||||
serviceConfig = {
|
||||
PrivateNetwork = opts.privateNetwork;
|
||||
PrivateUsers = mkIf (opts.user == null) opts.privateUsers;
|
||||
PrivateDevices = opts.privateDevices;
|
||||
PrivateTmp = opts.privateTmp;
|
||||
PrivateMounts = opts.privateMounts;
|
||||
ProtectControlGroups = opts.protectControlGroups;
|
||||
ProtectKernelTunables = opts.protectKernelTunables;
|
||||
ProtectKernelModules = opts.protectKernelModules;
|
||||
ProtectSystem = opts.protectSystem;
|
||||
ProtectHostname = opts.protectHostname;
|
||||
ProtectHome = opts.protectHome;
|
||||
ProtectClock = opts.protectClock;
|
||||
ProtectKernelLogs = opts.protectKernelLogs;
|
||||
KeyringMode = opts.keyringMode;
|
||||
EnvironmentFile =
|
||||
mkIf (opts.environment-file != null) opts.environment-file;
|
||||
|
||||
# This is more complicated than it looks...
|
||||
# CapabilityBoundingSet = restrict-capabilities opts.requiredCapabilities;
|
||||
AmbientCapabilities = concatStringsSep " " opts.requiredCapabilities;
|
||||
SecureBits = mkIf ((length opts.requiredCapabilities) > 0) "keep-caps";
|
||||
|
||||
DynamicUser = mkIf (opts.user == null) opts.dynamicUser;
|
||||
Restart = opts.restartWhen;
|
||||
WorkingDirectory =
|
||||
mkIf (opts.workingDirectory != null) opts.workingDirectory;
|
||||
RestrictAddressFamilies =
|
||||
restrict-address-families opts.addressFamilies;
|
||||
RestrictNamespaces = opts.restrictNamespaces;
|
||||
User = mkIf (opts.user != null) opts.user;
|
||||
Group = mkIf (opts.group != null) opts.group;
|
||||
Type = opts.type;
|
||||
StandardOutput = opts.standardOutput;
|
||||
PIDFile = mkIf (opts.pidFile != null) opts.pidFile;
|
||||
LockPersonality = opts.lockPersonality;
|
||||
RestrictRealtime = opts.restrictRealtime;
|
||||
ExecStart = mkIf (opts.execStart != null) opts.execStart;
|
||||
ExecStop = mkIf (opts.execStop != null) opts.execStop;
|
||||
MemoryDenyWriteExecute = opts.memoryDenyWriteExecute;
|
||||
SystemCallFilter = restrict-syscalls opts.allowedSyscalls;
|
||||
UMask = opts.maximumUmask;
|
||||
IpAddressAllow =
|
||||
mkIf (opts.networkWhitelist != null) opts.networkWhitelist;
|
||||
IpAddressDeny = mkIf (opts.networkWhitelist != null) "any";
|
||||
LimitNOFILE = "49152";
|
||||
PermissionsStartOnly = opts.startOnlyPerms;
|
||||
RuntimeDirectory =
|
||||
mkIf (opts.runtimeDirectory != null) opts.runtimeDirectory;
|
||||
CacheDirectory = mkIf (opts.cacheDirectory != null) opts.cacheDirectory;
|
||||
StateDirectory = mkIf (opts.stateDirectory != null) opts.stateDirectory;
|
||||
ReadWritePaths = opts.readWritePaths;
|
||||
ReadOnlyPaths = opts.readOnlyPaths;
|
||||
InaccessiblePaths = opts.inaccessiblePaths;
|
||||
# Apparently not supported yet?
|
||||
# NoExecPaths = opts.noExecPaths;
|
||||
ExecPaths = opts.execPaths;
|
||||
};
|
||||
}) config.fudo.system.services;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
|
||||
user = import ../types/user.nix { inherit lib; };
|
||||
|
||||
list-includes = list: el: isNull (findFirst (this: this == el) null list);
|
||||
|
||||
filterExistingUsers = users: group-members:
|
||||
let user-list = attrNames users;
|
||||
in filter (username: list-includes user-list username) group-members;
|
||||
|
||||
hostname = config.instance.hostname;
|
||||
host-cfg = config.fudo.hosts.${hostname};
|
||||
|
||||
in {
|
||||
options = with types; {
|
||||
fudo = {
|
||||
users = mkOption {
|
||||
type = attrsOf (submodule user.userOpts);
|
||||
description = "Users";
|
||||
default = { };
|
||||
};
|
||||
|
||||
groups = mkOption {
|
||||
type = attrsOf (submodule user.groupOpts);
|
||||
description = "Groups";
|
||||
default = { };
|
||||
};
|
||||
|
||||
system-users = mkOption {
|
||||
type = attrsOf (submodule user.systemUserOpts);
|
||||
description = "System users (probably not what you're looking for!)";
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = let
|
||||
sys = config.instance;
|
||||
in {
|
||||
fudo.auth.ldap-server = {
|
||||
users = filterAttrs
|
||||
(username: userOpts: userOpts.ldap-hashed-passwd != null)
|
||||
config.fudo.users;
|
||||
|
||||
groups = config.fudo.groups;
|
||||
|
||||
system-users = config.fudo.system-users;
|
||||
};
|
||||
|
||||
programs.ssh.extraConfig = mkAfter ''
|
||||
IdentityFile %h/.ssh/id_rsa
|
||||
IdentityFile /etc/ssh/private_keys.d/%u.key
|
||||
'';
|
||||
|
||||
environment.etc = mapAttrs' (username: userOpts:
|
||||
nameValuePair
|
||||
"ssh/private_keys.d/${username}"
|
||||
{
|
||||
text = concatStringsSep "\n"
|
||||
(map (keypair: readFile keypair.public-key)
|
||||
userOpts.ssh-keys);
|
||||
})
|
||||
sys.local-users;
|
||||
|
||||
users = {
|
||||
users = mapAttrs (username: userOpts: {
|
||||
isNormalUser = true;
|
||||
uid = userOpts.uid;
|
||||
createHome = true;
|
||||
description = userOpts.common-name;
|
||||
group = userOpts.primary-group;
|
||||
home = if (userOpts.home-directory != null) then
|
||||
userOpts.home-directory
|
||||
else
|
||||
"/home/${userOpts.primary-group}/${username}";
|
||||
hashedPassword = userOpts.login-hashed-passwd;
|
||||
openssh.authorizedKeys.keys = userOpts.ssh-authorized-keys;
|
||||
}) sys.local-users;
|
||||
|
||||
groups = (mapAttrs (groupname: groupOpts: {
|
||||
gid = groupOpts.gid;
|
||||
members = filterExistingUsers sys.local-users groupOpts.members;
|
||||
}) sys.local-groups) // {
|
||||
wheel = { members = sys.local-admins; };
|
||||
docker = mkIf (host-cfg.docker-server) { members = sys.local-admins; };
|
||||
};
|
||||
};
|
||||
|
||||
services.nfs.idmapd.settings = let
|
||||
local-domain = config.instance.local-domain;
|
||||
local-admins = config.instance.local-admins;
|
||||
local-users = config.instance.local-users;
|
||||
local-realm = config.fudo.domains.${local-domain}.gssapi-realm;
|
||||
in {
|
||||
General = {
|
||||
Verbosity = 10;
|
||||
# Domain = local-domain;
|
||||
"Local-Realms" = local-realm;
|
||||
};
|
||||
Translation = {
|
||||
GSS-Methods = "static";
|
||||
};
|
||||
Static = let
|
||||
generate-admin-entry = admin: userOpts:
|
||||
nameValuePair "${admin}/root@${local-realm}" "root";
|
||||
generate-user-entry = user: userOpts:
|
||||
nameValuePair "${user}@${local-realm}" user;
|
||||
|
||||
admin-entries =
|
||||
mapAttrs' generate-admin-entry (getAttrs local-admins local-users);
|
||||
user-entries =
|
||||
mapAttrs' generate-user-entry local-users;
|
||||
in admin-entries // user-entries;
|
||||
};
|
||||
|
||||
# Group home directories have to exist, otherwise users can't log in
|
||||
systemd.tmpfiles.rules = let
|
||||
groups-with-members = attrNames
|
||||
(filterAttrs (group: groupOpts: (length groupOpts.members) > 0)
|
||||
sys.local-groups);
|
||||
in map (group: "d /home/${group} 550 root ${group} - -") groups-with-members;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
{ pkgs, lib, config, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.fudo.vpn;
|
||||
|
||||
generate-pubkey-pkg = name: privkey:
|
||||
pkgs.runCommand "wireguard-${name}-pubkey" {
|
||||
WIREGUARD_PRIVATE_KEY = privkey;
|
||||
} ''
|
||||
mkdir $out
|
||||
PUBKEY=$(echo $WIREGUARD_PRIVATE_KEY | ${pkgs.wireguard-tools}/bin/wg pubkey)
|
||||
echo $PUBKEY > $out/pubkey.key
|
||||
'';
|
||||
|
||||
generate-client-config = privkey-file: server-pubkey: network: server-ip: listen-port: dns-servers: ''
|
||||
[Interface]
|
||||
Address = ${ip.networkMinIp network}
|
||||
PrivateKey = ${fileContents privkey-file}
|
||||
ListenPort = ${toString listen-port}
|
||||
DNS = ${concatStringsSep ", " dns-servers}
|
||||
|
||||
[Peer]
|
||||
PublicKey = ${server-pubkey}
|
||||
Endpoint = ${server-ip}:${toString listen-port}
|
||||
AllowedIps = 0.0.0.0/0, ::/0
|
||||
PersistentKeepalive = 25
|
||||
'';
|
||||
|
||||
generate-peer-entry = peer-name: peer-privkey-path: peer-allowed-ips: let
|
||||
peer-pkg = generate-pubkey-pkg "client-${peer-name}" (fileContents peer-privkey-path);
|
||||
pubkey-path = "${peer-pkg}/pubkey.key";
|
||||
in {
|
||||
publicKey = fileContents pubkey-path;
|
||||
allowedIPs = peer-allowed-ips;
|
||||
};
|
||||
|
||||
in {
|
||||
options.fudo.vpn = with types; {
|
||||
enable = mkEnableOption "Enable Fudo VPN";
|
||||
|
||||
network = mkOption {
|
||||
type = str;
|
||||
description = "Network range to assign this interface.";
|
||||
default = "10.100.0.0/16";
|
||||
};
|
||||
|
||||
private-key-file = mkOption {
|
||||
type = str;
|
||||
description = "Path to the secret key (generated with wg [genkey/pubkey]).";
|
||||
example = "/path/to/secret.key";
|
||||
};
|
||||
|
||||
listen-port = mkOption {
|
||||
type = port;
|
||||
description = "Port on which to listen for incoming connections.";
|
||||
default = 51820;
|
||||
};
|
||||
|
||||
dns-servers = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of dns servers to pass to clients.";
|
||||
default = ["1.1.1.1" "8.8.8.8"];
|
||||
};
|
||||
|
||||
server-ip = mkOption {
|
||||
type = str;
|
||||
description = "IP of this WireGuard server.";
|
||||
};
|
||||
|
||||
peers = mkOption {
|
||||
type = attrsOf str;
|
||||
description = "A map of peers to shared private keys.";
|
||||
default = {};
|
||||
example = {
|
||||
peer0 = "/path/to/priv.key";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.etc = let
|
||||
peer-data = imap1 (i: peer:{
|
||||
name = peer.name;
|
||||
privkey-path = peer.privkey-path;
|
||||
network-range = let
|
||||
base = ip.intToIpv4
|
||||
((ip.ipv4ToInt (ip.getNetworkBase cfg.network)) + (i * 256));
|
||||
in "${base}/24";
|
||||
}) (mapAttrsToList (name: privkey-path: {
|
||||
name = name;
|
||||
privkey-path = privkey-path;
|
||||
}) cfg.peers);
|
||||
|
||||
server-pubkey-pkg = generate-pubkey-pkg "server-pubkey" (fileContents cfg.private-key-file);
|
||||
|
||||
server-pubkey = fileContents "${server-pubkey-pkg}/pubkey.key";
|
||||
|
||||
in listToAttrs
|
||||
(map (peer: nameValuePair "wireguard/clients/${peer.name}.conf" {
|
||||
mode = "0400";
|
||||
user = "root";
|
||||
group = "root";
|
||||
text = generate-client-config
|
||||
peer.privkey-path
|
||||
server-pubkey
|
||||
peer.network-range
|
||||
cfg.server-ip
|
||||
cfg.listen-port
|
||||
cfg.dns-servers;
|
||||
}) peer-data);
|
||||
|
||||
networking.wireguard = {
|
||||
enable = true;
|
||||
interfaces.wgtun0 = {
|
||||
generatePrivateKeyFile = false;
|
||||
ips = [ cfg.network ];
|
||||
listenPort = cfg.listen-port;
|
||||
peers = mapAttrsToList
|
||||
(name: private-key: generate-peer-entry name private-key ["0.0.0.0/0" "::/0"])
|
||||
cfg.peers;
|
||||
privateKeyFile = cfg.private-key-file;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,385 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
hostname = config.instance.hostname;
|
||||
|
||||
cfg = config.fudo.webmail;
|
||||
|
||||
webmail-user = cfg.user;
|
||||
webmail-group = cfg.group;
|
||||
|
||||
base-data-path = "/run/rainloop";
|
||||
|
||||
concatMapAttrs = f: attrs:
|
||||
foldr (a: b: a // b) {} (mapAttrsToList f attrs);
|
||||
|
||||
fastcgi-conf = builtins.toFile "fastcgi.conf" ''
|
||||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
||||
fastcgi_param QUERY_STRING $query_string;
|
||||
fastcgi_param REQUEST_METHOD $request_method;
|
||||
fastcgi_param CONTENT_TYPE $content_type;
|
||||
fastcgi_param CONTENT_LENGTH $content_length;
|
||||
|
||||
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
||||
fastcgi_param REQUEST_URI $request_uri;
|
||||
fastcgi_param DOCUMENT_URI $document_uri;
|
||||
fastcgi_param DOCUMENT_ROOT $document_root;
|
||||
fastcgi_param SERVER_PROTOCOL $server_protocol;
|
||||
fastcgi_param REQUEST_SCHEME $scheme;
|
||||
fastcgi_param HTTPS $https if_not_empty;
|
||||
|
||||
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
|
||||
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
|
||||
|
||||
fastcgi_param REMOTE_ADDR $remote_addr;
|
||||
fastcgi_param REMOTE_PORT $remote_port;
|
||||
fastcgi_param SERVER_ADDR $server_addr;
|
||||
fastcgi_param SERVER_PORT $server_port;
|
||||
fastcgi_param SERVER_NAME $server_name;
|
||||
|
||||
# PHP only, required if PHP was built with --enable-force-cgi-redirect
|
||||
fastcgi_param REDIRECT_STATUS 200;
|
||||
'';
|
||||
|
||||
site-packages = mapAttrs (site: site-cfg:
|
||||
pkgs.rainloop-community.overrideAttrs (oldAttrs: {
|
||||
# Not sure how to correctly specify this arg...
|
||||
#dataPath = "${base-data-path}/${site}";
|
||||
|
||||
# Overwriting, to correctly create data dir
|
||||
installPhase = ''
|
||||
mkdir $out
|
||||
cp -r rainloop/* $out
|
||||
rm -rf $out/data
|
||||
ln -s ${base-data-path}/${site} $out/data
|
||||
ln -s ${site-cfg.favicon} $out/favicon.ico
|
||||
'';
|
||||
})) cfg.sites;
|
||||
|
||||
siteOpts = { site-host, ... }: with types; {
|
||||
options = {
|
||||
title = mkOption {
|
||||
type = str;
|
||||
description = "Webmail site title";
|
||||
example = "My Webmail";
|
||||
};
|
||||
|
||||
debug = mkOption {
|
||||
type = bool;
|
||||
description = "Turn debug logs on.";
|
||||
default = false;
|
||||
};
|
||||
|
||||
mail-server = mkOption {
|
||||
type = str;
|
||||
description = "Mail server from which to send & recieve email.";
|
||||
default = "mail.fudo.org";
|
||||
};
|
||||
|
||||
favicon = mkOption {
|
||||
type = str;
|
||||
description = "URL of the site favicon";
|
||||
example = "https://www.somepage.com/fav.ico";
|
||||
};
|
||||
|
||||
messages-per-page = mkOption {
|
||||
type = int;
|
||||
description = "Default number of messages to show per page";
|
||||
default = 30;
|
||||
};
|
||||
|
||||
max-upload-size = mkOption {
|
||||
type = int;
|
||||
description = "Size limit in MB for uploaded files";
|
||||
default = 30;
|
||||
};
|
||||
|
||||
theme = mkOption {
|
||||
type = str;
|
||||
description = "Default theme to use for this webmail site.";
|
||||
default = "Default";
|
||||
};
|
||||
|
||||
domain = mkOption {
|
||||
type = str;
|
||||
description = "Domain for which the server acts as webmail server";
|
||||
};
|
||||
|
||||
edit-mode = mkOption {
|
||||
type = enum [ "Plain" "Html" "PlainForced" "HtmlForced" ];
|
||||
description = "Default text editing mode for email";
|
||||
default = "Html";
|
||||
};
|
||||
|
||||
layout-mode = mkOption {
|
||||
type = enum [ "side" "bottom" ];
|
||||
description = "Layout mode to use for email preview.";
|
||||
default = "side";
|
||||
};
|
||||
|
||||
enable-threading = mkOption {
|
||||
type = bool;
|
||||
description = "Whether to enable threading for email.";
|
||||
default = true;
|
||||
};
|
||||
|
||||
enable-mobile = mkOption {
|
||||
type = bool;
|
||||
description = "Whether to enable a mobile site view.";
|
||||
default = true;
|
||||
};
|
||||
|
||||
database = mkOption {
|
||||
type = nullOr (submodule databaseOpts);
|
||||
description = "Database configuration for storing contact data.";
|
||||
example = {
|
||||
name = "my_db";
|
||||
host = "db.domain.com";
|
||||
user = "my_user";
|
||||
password-file = /path/to/some/file.pw;
|
||||
};
|
||||
default = null;
|
||||
};
|
||||
|
||||
admin-email = mkOption {
|
||||
type = str;
|
||||
description = "Email of administrator of this site.";
|
||||
default = "admin@fudo.org";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
databaseOpts = { ... }: with types; {
|
||||
options = {
|
||||
type = mkOption {
|
||||
type = enum [ "pgsql" "mysql" ];
|
||||
description = "Driver to use when connecting to the database.";
|
||||
default = "pgsql";
|
||||
};
|
||||
|
||||
hostname = mkOption {
|
||||
type = str;
|
||||
description = "Name of host running the database.";
|
||||
example = "my-db.domain.com";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = int;
|
||||
description = "Port on which the database server is listening.";
|
||||
default = 5432;
|
||||
};
|
||||
|
||||
name = mkOption {
|
||||
type = str;
|
||||
description =
|
||||
"Name of the database containing contact info. <user> must have access.";
|
||||
default = "rainloop_webmail";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "User as which to connect to the database.";
|
||||
default = "webmail";
|
||||
};
|
||||
|
||||
password-file = mkOption {
|
||||
type = nullOr str;
|
||||
description = ''
|
||||
Password to use when connecting to the database.
|
||||
|
||||
If unset, a random password will be generated.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
options.fudo.webmail = with types; {
|
||||
enable = mkEnableOption "Enable a RainLoop webmail server.";
|
||||
|
||||
sites = mkOption {
|
||||
type = attrsOf (submodule siteOpts);
|
||||
description = "A map of webmail sites to site configurations.";
|
||||
example = {
|
||||
"webmail.domain.com" = {
|
||||
title = "My Awesome Webmail";
|
||||
layout-mode = "side";
|
||||
favicon = "/path/to/favicon.ico";
|
||||
admin-password = "shh-don't-tell";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "User as which webmail will run.";
|
||||
default = "webmail-php";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = str;
|
||||
description = "Group as which webmail will run.";
|
||||
default = "webmail-php";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
users = {
|
||||
users = {
|
||||
${webmail-user} = {
|
||||
isSystemUser = true;
|
||||
description = "Webmail PHP FPM user";
|
||||
group = webmail-group;
|
||||
};
|
||||
};
|
||||
groups = {
|
||||
${webmail-group} = {
|
||||
members = [ webmail-user config.services.nginx.user ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
security.acme.certs = mapAttrs
|
||||
(site: site-cfg: { email = site-cfg.admin-email; })
|
||||
cfg.sites;
|
||||
|
||||
services = {
|
||||
phpfpm = {
|
||||
pools.webmail = {
|
||||
settings = {
|
||||
"pm" = "dynamic";
|
||||
"pm.max_children" = 50;
|
||||
"pm.start_servers" = 5;
|
||||
"pm.min_spare_servers" = 1;
|
||||
"pm.max_spare_servers" = 8;
|
||||
};
|
||||
|
||||
phpOptions = ''
|
||||
memory_limit = 500M
|
||||
'';
|
||||
|
||||
# Not working....see chmod below
|
||||
user = webmail-user;
|
||||
group = webmail-group;
|
||||
};
|
||||
};
|
||||
|
||||
nginx = {
|
||||
enable = true;
|
||||
|
||||
virtualHosts = mapAttrs (site: site-cfg: {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
root = "${site-packages.${site}}";
|
||||
|
||||
locations = {
|
||||
"/" = { index = "index.php"; };
|
||||
|
||||
"/data" = {
|
||||
extraConfig = ''
|
||||
deny all;
|
||||
return 403;
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
extraConfig = ''
|
||||
location ~ \.php$ {
|
||||
expires -1;
|
||||
|
||||
include ${fastcgi-conf};
|
||||
fastcgi_index index.php;
|
||||
fastcgi_pass unix:${config.services.phpfpm.pools.webmail.socket};
|
||||
}
|
||||
'';
|
||||
}) cfg.sites;
|
||||
};
|
||||
};
|
||||
|
||||
fudo.secrets.host-secrets.${hostname} = concatMapAttrs
|
||||
(site: site-cfg: let
|
||||
|
||||
site-config-file = builtins.toFile "${site}-rainloop.cfg"
|
||||
(import ./include/rainloop.nix lib site site-cfg site-packages.${site}.version);
|
||||
|
||||
domain-config-file = builtins.toFile "${site}-domain.cfg" ''
|
||||
imap_host = "${site-cfg.mail-server}"
|
||||
imap_port = 143
|
||||
imap_secure = "TLS"
|
||||
imap_short_login = On
|
||||
sieve_use = Off
|
||||
sieve_allow_raw = Off
|
||||
sieve_host = ""
|
||||
sieve_port = 4190
|
||||
sieve_secure = "None"
|
||||
smtp_host = "${site-cfg.mail-server}"
|
||||
smtp_port = 587
|
||||
smtp_secure = "TLS"
|
||||
smtp_short_login = On
|
||||
smtp_auth = On
|
||||
smtp_php_mail = Off
|
||||
white_list = ""
|
||||
'';
|
||||
in {
|
||||
"${site}-site-config" = {
|
||||
source-file = site-config-file;
|
||||
target-file = "/var/run/webmail/rainloop/site-${site}-rainloop.cfg";
|
||||
user = cfg.user;
|
||||
};
|
||||
|
||||
"${site}-domain-config" = {
|
||||
source-file = domain-config-file;
|
||||
target-file = "/var/run/webmail/rainloop/domain-${site}-rainloop.cfg";
|
||||
user = cfg.user;
|
||||
};
|
||||
}) cfg.sites;
|
||||
|
||||
# TODO: make this a fudo service
|
||||
systemd.services = {
|
||||
webmail-init = let
|
||||
link-configs = concatStringsSep "\n" (mapAttrsToList (site: site-cfg:
|
||||
let
|
||||
cfg-file = config.fudo.secrets.host-secrets.${hostname}."${site}-site-config".target-file;
|
||||
domain-cfg-file = config.fudo.secrets.host-secrets.${hostname}."${site}-domain-config".target-file;
|
||||
in ''
|
||||
${pkgs.coreutils}/bin/mkdir -p ${base-data-path}/${site}/_data_/_default_/configs
|
||||
${pkgs.coreutils}/bin/cp ${cfg-file} ${base-data-path}/${site}/_data_/_default_/configs/application.ini
|
||||
|
||||
${pkgs.coreutils}/bin/mkdir -p ${base-data-path}/${site}/_data_/_default_/domains/
|
||||
${pkgs.coreutils}/bin/cp ${domain-cfg-file} ${base-data-path}/${site}/_data_/_default_/domains/${site-cfg.domain}.ini
|
||||
'') cfg.sites);
|
||||
scriptPkg = (pkgs.writeScriptBin "webmail-init.sh" ''
|
||||
#!${pkgs.bash}/bin/bash -e
|
||||
${link-configs}
|
||||
${pkgs.coreutils}/bin/chown -R ${webmail-user}:${webmail-group} ${base-data-path}
|
||||
${pkgs.coreutils}/bin/chmod -R u+w ${base-data-path}
|
||||
'');
|
||||
in {
|
||||
requiredBy = [ "nginx.service" ];
|
||||
description =
|
||||
"Initialize webmail service directories prior to starting nginx.";
|
||||
script = "${scriptPkg}/bin/webmail-init.sh";
|
||||
};
|
||||
|
||||
phpfpm-webmail-socket-perm = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
description =
|
||||
"Change ownership of the phpfpm socket for webmail once it's started.";
|
||||
requires = [ "phpfpm-webmail.service" ];
|
||||
after = [ "phpfpm.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.coreutils}/bin/chown ${webmail-user}:${webmail-group} ${config.services.phpfpm.pools.webmail.socket}
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
nginx = {
|
||||
requires =
|
||||
[ "webmail-init.service" "phpfpm-webmail-socket-perm.service" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
networkOpts = { network, ... }: {
|
||||
options = {
|
||||
network = mkOption {
|
||||
type = types.str;
|
||||
description = "Name of wireless network.";
|
||||
default = network;
|
||||
};
|
||||
|
||||
key = mkOption {
|
||||
type = types.str;
|
||||
description = "Secret key for wireless network.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
options.fudo.wireless-networks = mkOption {
|
||||
type = with types; attrsOf (submodule networkOpts);
|
||||
description = "A map of wireless networks to attributes (including key).";
|
||||
default = { };
|
||||
};
|
||||
|
||||
config = {
|
||||
networking.wireless.networks =
|
||||
mapAttrs (network: networkOpts: { psk = networkOpts.key; })
|
||||
config.fudo.wireless-networks;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.informis.cl-gemini;
|
||||
|
||||
feedOpts = { ... }: with types; {
|
||||
options = {
|
||||
url = mkOption {
|
||||
type = str;
|
||||
description = "Base URI of the feed, i.e. the URI corresponding to the feed path.";
|
||||
example = "gemini://my.server/path/to/feedfiles";
|
||||
};
|
||||
|
||||
title = mkOption {
|
||||
type = str;
|
||||
description = "Title of given feed.";
|
||||
example = "My Fancy Feed";
|
||||
};
|
||||
|
||||
path = mkOption {
|
||||
type = str;
|
||||
description = "Path to Gemini files making up the feed.";
|
||||
example = "/path/to/feed";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
ensure-certificates = hostname: user: key: cert: pkgs.writeShellScript "ensure-gemini-certificates.sh" ''
|
||||
if [[ ! -e ${key} ]]; then
|
||||
TARGET_CERT_DIR=$(${pkgs.coreutils}/bin/dirname ${cert})
|
||||
TARGET_KEY_DIR=$(${pkgs.coreutils}/bin/dirname ${key})
|
||||
if [[ ! -d $TARGET_CERT_DIR ]]; then mkdir -p $TARGET_CERT_DIR; fi
|
||||
if [[ ! -d $TARGET_KEY_DIR ]]; then mkdir -p $TARGET_KEY_DIR; fi
|
||||
${pkgs.openssl}/bin/openssl req -new -subj "/CN=.${hostname}" -addext "subjectAltName = DNS:${hostname}, DNS:.${hostname}" -x509 -newkey ec -pkeyopt ec_paramgen_curve:prime256v1 -days 3650 -nodes -out ${cert} -keyout ${key}
|
||||
${pkgs.coreutils}/bin/chown -R ${user}:nogroup ${cert}
|
||||
${pkgs.coreutils}/bin/chown -R ${user}:nogroup ${key}
|
||||
${pkgs.coreutils}/bin/chmod 0444 ${cert}
|
||||
${pkgs.coreutils}/bin/chmod 0400 ${key}
|
||||
fi
|
||||
'';
|
||||
|
||||
generate-feeds = feeds:
|
||||
let
|
||||
feed-strings = mapAttrsToList (feed-name: opts:
|
||||
"(cl-gemini:register-feed :name \"${feed-name}\" :title \"${opts.title}\" :path \"${opts.path}\" :base-uri \"${opts.url}\")") feeds;
|
||||
in pkgs.writeText "gemini-local-feeds.lisp" (concatStringsSep "\n" feed-strings);
|
||||
|
||||
in {
|
||||
options.informis.cl-gemini = with types; {
|
||||
enable = mkEnableOption "Enable the cl-gemini server.";
|
||||
|
||||
port = mkOption {
|
||||
type = port;
|
||||
description = "Port on which to serve Gemini traffic.";
|
||||
default = 1965;
|
||||
};
|
||||
|
||||
hostname = mkOption {
|
||||
type = str;
|
||||
description = "Hostname at which the server is available (for generating the SSL certificate).";
|
||||
example = "my.hostname.com";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = str;
|
||||
description = "User as which to run the cl-gemini server.";
|
||||
default = "cl-gemini";
|
||||
};
|
||||
|
||||
server-ip = mkOption {
|
||||
type = str;
|
||||
description = "IP on which to serve Gemini traffic.";
|
||||
example = "1.2.3.4";
|
||||
};
|
||||
|
||||
document-root = mkOption {
|
||||
type = str;
|
||||
description = "Root at which to look for gemini files.";
|
||||
example = "/my/gemini/root";
|
||||
};
|
||||
|
||||
user-public = mkOption {
|
||||
type = str;
|
||||
description = "Subdirectory of user homes to check for gemini files.";
|
||||
default = "gemini-public";
|
||||
};
|
||||
|
||||
ssl-private-key = mkOption {
|
||||
type = str;
|
||||
description = "Path to the pem-encoded server private key.";
|
||||
example = "/path/to/secret/key.pem";
|
||||
default = "${config.users.users.cl-gemini.home}/private/server-key.pem";
|
||||
};
|
||||
|
||||
ssl-certificate = mkOption {
|
||||
type = str;
|
||||
description = "Path to the pem-encoded server public certificate.";
|
||||
example = "/path/to/cert.pem";
|
||||
default = "${config.users.users.cl-gemini.home}/private/server-cert.pem";
|
||||
};
|
||||
|
||||
slynk-port = mkOption {
|
||||
type = nullOr port;
|
||||
description = "Port on which to open a slynk server, if any.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
feeds = mkOption {
|
||||
type = attrsOf (submodule feedOpts);
|
||||
description = "Feeds to generate and make available (as eg. /feed/name.xml).";
|
||||
example = {
|
||||
diary = {
|
||||
title = "My Diary";
|
||||
path = "/path/to/my/gemfiles/";
|
||||
url = "gemini://my.host/blog-path/";
|
||||
};
|
||||
};
|
||||
default = {};
|
||||
};
|
||||
|
||||
textfiles-archive = mkOption {
|
||||
type = str;
|
||||
description = "A path containing only gemini & text files.";
|
||||
example = "/path/to/textfiles/";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ cfg.port ];
|
||||
|
||||
users.users = {
|
||||
${cfg.user} = {
|
||||
isSystemUser = true;
|
||||
group = "nogroup";
|
||||
createHome = true;
|
||||
home = "/var/lib/${cfg.user}";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
cl-gemini = {
|
||||
description = "cl-gemini Gemini server (https://gemini.circumlunar.space/)";
|
||||
|
||||
serviceConfig = {
|
||||
ExecStartPre = "${ensure-certificates cfg.hostname cfg.user cfg.ssl-private-key cfg.ssl-certificate}";
|
||||
ExecStart = "${pkgs.cl-gemini}/bin/launch-server.sh";
|
||||
Restart = "on-failure";
|
||||
PIDFile = "/run/cl-gemini.$USERNAME.uid";
|
||||
User = cfg.user;
|
||||
};
|
||||
|
||||
environment = {
|
||||
GEMINI_SLYNK_PORT = mkIf (cfg.slynk-port != null) (toString cfg.slynk-port);
|
||||
GEMINI_LISTEN_IP = cfg.server-ip;
|
||||
GEMINI_PRIVATE_KEY = cfg.ssl-private-key;
|
||||
GEMINI_CERTIFICATE = cfg.ssl-certificate;
|
||||
GEMINI_LISTEN_PORT = toString cfg.port;
|
||||
GEMINI_DOCUMENT_ROOT = cfg.document-root;
|
||||
GEMINI_TEXTFILES_ROOT = cfg.textfiles-archive;
|
||||
GEMINI_FEEDS = "${generate-feeds cfg.feeds}";
|
||||
|
||||
CL_SOURCE_REGISTRY = "${pkgs.lib.fudo.lisp.lisp-source-registry pkgs.cl-gemini}";
|
||||
};
|
||||
|
||||
path = with pkgs; [
|
||||
gcc
|
||||
file
|
||||
getent
|
||||
];
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./cl-gemini.nix
|
||||
];
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
user = import ./types/user.nix { inherit lib; };
|
||||
host = import ./types/host.nix { inherit lib; };
|
||||
|
||||
in {
|
||||
options.instance = with types; {
|
||||
hostname = mkOption {
|
||||
type = str;
|
||||
description = "Hostname of this specific host (without domain).";
|
||||
};
|
||||
|
||||
host-fqdn = mkOption {
|
||||
type = str;
|
||||
description = "Fully-qualified name of this host.";
|
||||
};
|
||||
|
||||
build-timestamp = mkOption {
|
||||
type = int;
|
||||
description = "Timestamp associated with the build. Used for e.g. DNS serials.";
|
||||
};
|
||||
|
||||
local-domain = mkOption {
|
||||
type = str;
|
||||
description = "Domain name of the current local host.";
|
||||
};
|
||||
|
||||
local-profile = mkOption {
|
||||
type = str;
|
||||
description = "Profile name of the current local host.";
|
||||
};
|
||||
|
||||
local-site = mkOption {
|
||||
type = str;
|
||||
description = "Site name of the current local host.";
|
||||
};
|
||||
|
||||
local-admins = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of users who should have admin access to the local host.";
|
||||
};
|
||||
|
||||
local-groups = mkOption {
|
||||
type = attrsOf (submodule user.groupOpts);
|
||||
description = "List of groups which should be created on the local host.";
|
||||
};
|
||||
|
||||
local-hosts = mkOption {
|
||||
type = attrsOf (submodule host.hostOpts);
|
||||
description = "List of hosts that should be considered local to the current host.";
|
||||
};
|
||||
|
||||
local-users = mkOption {
|
||||
type = attrsOf (submodule user.userOpts);
|
||||
description = "List of users who should have access to the local host";
|
||||
};
|
||||
|
||||
local-networks = mkOption {
|
||||
type = listOf str;
|
||||
description = "Networks which are considered local to this host, site, or domain.";
|
||||
};
|
||||
|
||||
build-seed = mkOption {
|
||||
type = str;
|
||||
description = "Seed used to generate configuration.";
|
||||
};
|
||||
};
|
||||
|
||||
config = let
|
||||
local-host = config.instance.hostname;
|
||||
local-domain = config.fudo.hosts.${local-host}.domain;
|
||||
local-site = config.fudo.hosts.${local-host}.site;
|
||||
|
||||
host = config.fudo.hosts.${local-host};
|
||||
|
||||
host-user-list = host.local-users;
|
||||
domain-user-list = config.fudo.domains."${local-domain}".local-users;
|
||||
site-user-list = config.fudo.sites."${local-site}".local-users;
|
||||
local-users =
|
||||
getAttrs (host-user-list ++ domain-user-list ++ site-user-list) config.fudo.users;
|
||||
|
||||
host-admin-list = host.local-admins;
|
||||
domain-admin-list = config.fudo.domains."${local-domain}".local-admins;
|
||||
site-admin-list = config.fudo.sites."${local-site}".local-admins;
|
||||
local-admins = host-admin-list ++ domain-admin-list ++ site-admin-list;
|
||||
|
||||
host-group-list = host.local-groups;
|
||||
domain-group-list = config.fudo.domains."${local-domain}".local-groups;
|
||||
site-group-list = config.fudo.sites."${local-site}".local-groups;
|
||||
local-groups =
|
||||
getAttrs (host-group-list ++ domain-group-list ++ site-group-list)
|
||||
config.fudo.groups;
|
||||
|
||||
local-hosts =
|
||||
filterAttrs (host: hostOpts: hostOpts.site == local-site) config.fudo.hosts;
|
||||
|
||||
local-networks =
|
||||
host.local-networks ++
|
||||
config.fudo.domains.${local-domain}.local-networks ++
|
||||
config.fudo.sites.${local-site}.local-networks;
|
||||
|
||||
local-profile = host.profile;
|
||||
|
||||
host-fqdn = "${config.instance.hostname}.${local-domain}";
|
||||
|
||||
in {
|
||||
instance = {
|
||||
inherit
|
||||
host-fqdn
|
||||
local-domain
|
||||
local-site
|
||||
local-users
|
||||
local-admins
|
||||
local-groups
|
||||
local-hosts
|
||||
local-profile
|
||||
local-networks;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,305 @@
|
|||
{ lib, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
passwd = import ../passwd.nix { inherit lib; };
|
||||
|
||||
in rec {
|
||||
encryptedFSOpts = { ... }: let
|
||||
mountpoint = { mp, ... }: {
|
||||
options = with types; {
|
||||
mountpoint = mkOption {
|
||||
type = str;
|
||||
description = "Path at which to mount the filesystem.";
|
||||
default = mp;
|
||||
};
|
||||
|
||||
options = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of filesystem options specific to this mountpoint (eg: subvol).";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Group to which the mountpoint should belong.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
users = mkOption {
|
||||
type = listOf str;
|
||||
description = ''
|
||||
List of users who should have access to the filesystem.
|
||||
|
||||
Requires a group to be set.
|
||||
'';
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
world-readable = mkOption {
|
||||
type = bool;
|
||||
description = "Whether to leave the top level world-readable.";
|
||||
default = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
in {
|
||||
options = with types; {
|
||||
encrypted-device = mkOption {
|
||||
type = str;
|
||||
description = "Path to the encrypted device.";
|
||||
};
|
||||
|
||||
key-path = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
Path at which to locate the key file.
|
||||
|
||||
The filesystem will be decrypted and mounted once available.";
|
||||
'';
|
||||
};
|
||||
|
||||
filesystem-type = mkOption {
|
||||
type = str;
|
||||
description = "Filesystem type of the decrypted filesystem.";
|
||||
};
|
||||
|
||||
options = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of filesystem options with which to mount.";
|
||||
};
|
||||
|
||||
mountpoints = mkOption {
|
||||
type = attrsOf (submodule mountpoint);
|
||||
description = "A map of mountpoints for this filesystem to fs options. Multiple to support btrfs.";
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
masterKeyOpts = { ... }: {
|
||||
options = with types; {
|
||||
key-path = mkOption {
|
||||
type = str;
|
||||
description = "Path of the host master key file, used to decrypt secrets.";
|
||||
};
|
||||
|
||||
public-key = mkOption {
|
||||
type = str;
|
||||
description = "Public key used during deployment to decrypt secrets for the host.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
hostOpts = { name, ... }: let
|
||||
hostname = name;
|
||||
in {
|
||||
options = with types; {
|
||||
master-key = mkOption {
|
||||
type = nullOr (submodule masterKeyOpts);
|
||||
description = "Public key for the host master key, used by the host to decrypt secrets.";
|
||||
};
|
||||
|
||||
domain = mkOption {
|
||||
type = str;
|
||||
description =
|
||||
"Primary domain to which the host belongs, in the form of a domain name.";
|
||||
default = "fudo.org";
|
||||
};
|
||||
|
||||
extra-domains = mkOption {
|
||||
type = listOf str;
|
||||
description = "Extra domain in which this host is reachable.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
aliases = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"Host aliases used by the current host. Note this will be multiplied with extra-domains.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
site = mkOption {
|
||||
type = str;
|
||||
description = "Site at which the host is located.";
|
||||
default = "unsited";
|
||||
};
|
||||
|
||||
local-networks = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of networks to be considered trusted by this host.";
|
||||
default = [ "127.0.0.0/8" ];
|
||||
};
|
||||
|
||||
profile = mkOption {
|
||||
type = listOf (enum ["desktop" "server" "laptop"]);
|
||||
description =
|
||||
"The profile to be applied to the host, determining what software is included.";
|
||||
};
|
||||
|
||||
admin-email = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Email for the administrator of this host.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
local-users = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"List of users who should have local (i.e. login) access to the host.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
description = mkOption {
|
||||
type = str;
|
||||
description = "Description of this host.";
|
||||
default = "Another Fudo Host.";
|
||||
};
|
||||
|
||||
local-admins = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"A list of users who should have admin access to this host.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
local-groups = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of groups which should exist on this host.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
ssh-fingerprints = mkOption {
|
||||
type = listOf str;
|
||||
description = ''
|
||||
A list of DNS SSHFP records for this host. Get with `ssh-keygen -r <hostname>`
|
||||
'';
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
rp = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Responsible person.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
tmp-on-tmpfs = mkOption {
|
||||
type = bool;
|
||||
description =
|
||||
"Use tmpfs for /tmp. Great if you've got enough (>16G) RAM.";
|
||||
default = true;
|
||||
};
|
||||
|
||||
enable-gui = mkEnableOption "Install desktop GUI software.";
|
||||
|
||||
docker-server = mkEnableOption "Enable Docker on the current host.";
|
||||
|
||||
kerberos-services = mkOption {
|
||||
type = listOf str;
|
||||
description =
|
||||
"List of services which should exist for this host, if it belongs to a realm.";
|
||||
default = [ "ssh" "host" ];
|
||||
};
|
||||
|
||||
ssh-pubkeys = mkOption {
|
||||
type = listOf path;
|
||||
description =
|
||||
"SSH key files of the host.";
|
||||
default = [];
|
||||
};
|
||||
|
||||
build-pubkeys = mkOption {
|
||||
type = listOf str;
|
||||
description = "SSH public keys used to access the build server.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
external-interfaces = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of interfaces on which to enable the firewall.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
keytab-secret-file = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Keytab from which to create a keytab secret.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
keep-cool = mkOption {
|
||||
type = bool;
|
||||
description = "A host that tends to overheat. Try to keep it cooler.";
|
||||
default = false;
|
||||
};
|
||||
|
||||
nixos-system = mkOption {
|
||||
type = bool;
|
||||
description = "Whether the host is a NixOS system.";
|
||||
default = true;
|
||||
};
|
||||
|
||||
arch = mkOption {
|
||||
type = str;
|
||||
description = "System architecture of the system.";
|
||||
};
|
||||
|
||||
machine-id = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Machine id of the system. See: man machine-id.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
android-dev = mkEnableOption "Enable ADB on the host.";
|
||||
|
||||
encrypted-filesystems = mkOption {
|
||||
type = attrsOf (submodule encryptedFSOpts);
|
||||
description = "List of encrypted filesystems to mount on the local host when the key is available.";
|
||||
default = { };
|
||||
};
|
||||
|
||||
initrd-network = let
|
||||
keypair-type = { ... }: {
|
||||
options = {
|
||||
public-key = mkOption {
|
||||
type = str;
|
||||
description = "SSH public key.";
|
||||
};
|
||||
|
||||
private-key-file = mkOption {
|
||||
type = str;
|
||||
description = "Path to SSH private key (on the local host!).";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
initrd-network-config = { ... }: {
|
||||
options = {
|
||||
ip = mkOption {
|
||||
type = str;
|
||||
description = "IP to assign to the initrd image, allowing access to host during bootup.";
|
||||
};
|
||||
keypair = mkOption {
|
||||
type = (submodule keypair-type);
|
||||
description = "SSH host key pair to use for initrd.";
|
||||
};
|
||||
interface = mkOption {
|
||||
type = str;
|
||||
description = "Name of interface on which to listen for connections.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in mkOption {
|
||||
type = nullOr (submodule initrd-network-config);
|
||||
description = "Configuration parameters to set up initrd SSH network.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
backplane-password-file = mkOption {
|
||||
options = path;
|
||||
description = "File containing the password used by this host to connect to the backplane.";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
{ lib, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
srvRecordOpts = { ... }: {
|
||||
options = with types; {
|
||||
priority = mkOption {
|
||||
type = int;
|
||||
description = "Priority to give to this record.";
|
||||
default = 0;
|
||||
};
|
||||
|
||||
weight = mkOption {
|
||||
type = int;
|
||||
description =
|
||||
"Weight to give this record, among records of equivalent priority.";
|
||||
default = 5;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = port;
|
||||
description = "Port for service on this host.";
|
||||
example = 88;
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
type = str;
|
||||
description = "Host providing service.";
|
||||
example = "my-host.my-domain.com";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networkHostOpts = import ./network-host.nix { inherit lib; };
|
||||
|
||||
in {
|
||||
options = with types; {
|
||||
hosts = mkOption {
|
||||
type = attrsOf (submodule networkHostOpts);
|
||||
description = "Hosts on the local network, with relevant settings.";
|
||||
example = {
|
||||
my-host = {
|
||||
ipv4-address = "192.168.0.1";
|
||||
mac-address = "aa:aa:aa:aa:aa";
|
||||
};
|
||||
};
|
||||
default = { };
|
||||
};
|
||||
|
||||
srv-records = mkOption {
|
||||
type = attrsOf (attrsOf (listOf (submodule srvRecordOpts)));
|
||||
description = "SRV records for the network.";
|
||||
example = {
|
||||
tcp = {
|
||||
kerberos = {
|
||||
port = 88;
|
||||
host = "krb-host.my-domain.com";
|
||||
};
|
||||
};
|
||||
};
|
||||
default = { };
|
||||
};
|
||||
|
||||
aliases = mkOption {
|
||||
type = attrsOf str;
|
||||
default = { };
|
||||
description =
|
||||
"A mapping of host-alias -> hostnames to add to the domain record.";
|
||||
example = {
|
||||
mail = "my-mail-host";
|
||||
music = "musicall-host.other-domain.com.";
|
||||
};
|
||||
};
|
||||
|
||||
verbatim-dns-records = mkOption {
|
||||
type = listOf str;
|
||||
description = "Records to be inserted verbatim into the DNS zone.";
|
||||
example = [ "some-host IN CNAME base-host" ];
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
dmarc-report-address = mkOption {
|
||||
type = nullOr str;
|
||||
description = "The email to use to recieve DMARC reports, if any.";
|
||||
example = "admin-user@domain.com";
|
||||
default = null;
|
||||
};
|
||||
|
||||
default-host = mkOption {
|
||||
type = nullOr str;
|
||||
description =
|
||||
"IP of the host which will act as the default server for this domain, if any.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
mx = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of mail servers serving this domain.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
gssapi-realm = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Kerberos GSSAPI realm of the network.";
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{ hostname, ... }:
|
||||
with lib;
|
||||
{
|
||||
options = with types; {
|
||||
ipv4-address = mkOption {
|
||||
type = nullOr str;
|
||||
description = "The V4 IP of a given host, if any.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
ipv6-address = mkOption {
|
||||
type = nullOr str;
|
||||
description = "The V6 IP of a given host, if any.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
mac-address = mkOption {
|
||||
type = nullOr types.str;
|
||||
description =
|
||||
"The MAC address of a given host, if desired for IP reservation.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
description = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Description of the host.";
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,157 @@
|
|||
{ lib, ... }:
|
||||
|
||||
with lib;
|
||||
rec {
|
||||
systemUserOpts = { name, ... }: {
|
||||
options = with lib.types; {
|
||||
username = mkOption {
|
||||
type = str;
|
||||
description = "The system user's login name.";
|
||||
default = name;
|
||||
};
|
||||
|
||||
description = mkOption {
|
||||
type = str;
|
||||
description = "Description of this system user's purpose or role";
|
||||
};
|
||||
|
||||
ldap-hashed-password = mkOption {
|
||||
type = str;
|
||||
description =
|
||||
"LDAP-formatted hashed password for this user. Generate with slappasswd.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
userOpts = { name, ... }: let
|
||||
username = name;
|
||||
in {
|
||||
options = with lib.types; {
|
||||
username = mkOption {
|
||||
type = str;
|
||||
description = "The user's login name.";
|
||||
default = username;
|
||||
};
|
||||
|
||||
uid = mkOption {
|
||||
type = int;
|
||||
description = "Unique UID number for the user.";
|
||||
};
|
||||
|
||||
common-name = mkOption {
|
||||
type = str;
|
||||
description = "The user's common or given name.";
|
||||
};
|
||||
|
||||
primary-group = mkOption {
|
||||
type = str;
|
||||
description = "Primary group to which the user belongs.";
|
||||
};
|
||||
|
||||
login-shell = mkOption {
|
||||
type = nullOr shellPackage;
|
||||
description = "The user's preferred shell.";
|
||||
};
|
||||
|
||||
description = mkOption {
|
||||
type = str;
|
||||
default = "Fudo Member";
|
||||
description = "A description of this user's role.";
|
||||
};
|
||||
|
||||
ldap-hashed-passwd = mkOption {
|
||||
type = nullOr str;
|
||||
description =
|
||||
"LDAP-formatted hashed password, used for email and other services. Use slappasswd to generate the properly-formatted password.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
login-hashed-passwd = mkOption {
|
||||
type = nullOr str;
|
||||
description =
|
||||
"Hashed password for shell, used for shell access to hosts. Use mkpasswd to generate the properly-formatted password.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
ssh-authorized-keys = mkOption {
|
||||
type = listOf str;
|
||||
description = "SSH public keys this user can use to log in.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
home-directory = mkOption {
|
||||
type = nullOr str;
|
||||
description = "Default home directory for the given user.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
k5login = mkOption {
|
||||
type = listOf str;
|
||||
description = "List of Kerberos principals that map to this user.";
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
ssh-keys = mkOption {
|
||||
type = listOf (submodule sshKeyOpts);
|
||||
description = "Path to the user's public and private key files.";
|
||||
default = [];
|
||||
};
|
||||
|
||||
email = mkOption {
|
||||
type = nullOr str;
|
||||
description = "User's primary email address.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
email-aliases = mkOption {
|
||||
type = listOf str;
|
||||
description = "Email aliases that should map to this user.";
|
||||
default = [];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
groupOpts = { name, ... }: {
|
||||
options = with lib.types; {
|
||||
group-name = mkOption {
|
||||
description = "Group name.";
|
||||
default = name;
|
||||
};
|
||||
|
||||
description = mkOption {
|
||||
type = str;
|
||||
description = "Description of the group or it's purpose.";
|
||||
};
|
||||
|
||||
members = mkOption {
|
||||
type = listOf str;
|
||||
default = [ ];
|
||||
description = "A list of users who are members of the current group.";
|
||||
};
|
||||
|
||||
gid = mkOption {
|
||||
type = int;
|
||||
description = "GID number of the group.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
sshKeyOpts = { ... }: {
|
||||
options = with lib.types; {
|
||||
private-key = mkOption {
|
||||
type = str;
|
||||
description = "Path to the user's private key.";
|
||||
};
|
||||
|
||||
public-key = mkOption {
|
||||
type = str;
|
||||
description = "Path to the user's public key.";
|
||||
};
|
||||
|
||||
key-type = mkOption {
|
||||
type = enum [ "rsa" "ecdsa" "ed25519" ];
|
||||
description = "Type of the user's public key.";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./lib
|
||||
];
|
||||
}
|
Loading…
Reference in New Issue