nixos/kubernetes: let flannel use kubernetes as storage backend

+ isolate etcd on the master node by letting it listen only on loopback
+ enabling kubelet on master and taint master with NoSchedule

The reason for the latter is that flannel requires all nodes to be "registered"
in the cluster in order to setup the cluster network. This means that the
kubelet is needed even at nodes on which we don't plan to schedule anything.
This commit is contained in:
Johan Thomsen 2019-02-12 16:48:23 +01:00 committed by Franz Pletz
parent 1f49c2160a
commit 466beb0214
No known key found for this signature in database
GPG Key ID: 846FDED7792617B4
5 changed files with 78 additions and 9 deletions

View File

@ -411,6 +411,7 @@ in
name = "etcd"; name = "etcd";
CN = top.masterAddress; CN = top.masterAddress;
hosts = [ hosts = [
"etcd.local"
"etcd.${top.addons.dns.clusterDomain}" "etcd.${top.addons.dns.clusterDomain}"
top.masterAddress top.masterAddress
cfg.advertiseAddress cfg.advertiseAddress

View File

@ -189,6 +189,16 @@ in {
services.kubernetes.addonManager.enable = mkDefault true; services.kubernetes.addonManager.enable = mkDefault true;
services.kubernetes.proxy.enable = mkDefault true; services.kubernetes.proxy.enable = mkDefault true;
services.etcd.enable = true; # Cannot mkDefault because of flannel default options services.etcd.enable = true; # Cannot mkDefault because of flannel default options
services.kubernetes.kubelet = {
enable = mkDefault true;
taints = mkIf (!(elem "node" cfg.roles)) {
master = {
key = "node-role.kubernetes.io/master";
value = "true";
effect = "NoSchedule";
};
};
};
}) })

View File

@ -6,6 +6,9 @@ let
top = config.services.kubernetes; top = config.services.kubernetes;
cfg = top.flannel; cfg = top.flannel;
# we want flannel to use kubernetes itself as configuration backend, not direct etcd
storageBackend = "kubernetes";
# needed for flannel to pass options to docker # needed for flannel to pass options to docker
mkDockerOpts = pkgs.runCommand "mk-docker-opts" { mkDockerOpts = pkgs.runCommand "mk-docker-opts" {
buildInputs = [ pkgs.makeWrapper ]; buildInputs = [ pkgs.makeWrapper ];
@ -29,6 +32,8 @@ in
enable = mkDefault true; enable = mkDefault true;
network = mkDefault top.clusterCidr; network = mkDefault top.clusterCidr;
inherit storageBackend;
nodeName = config.services.kubernetes.kubelet.hostname;
}; };
services.kubernetes.kubelet = { services.kubernetes.kubelet = {
@ -69,11 +74,52 @@ in
}; };
services.kubernetes.pki.certs = { services.kubernetes.pki.certs = {
flannelEtcdClient = top.lib.mkCert { flannelClient = top.lib.mkCert {
name = "flannel-etcd-client"; name = "flannel-client";
CN = "flannel-etcd-client"; CN = "flannel-client";
action = "systemctl restart flannel.service"; action = "systemctl restart flannel.service";
}; };
}; };
# give flannel som kubernetes rbac permissions if applicable
services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
flannel-cr = {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "ClusterRole";
metadata = { name = "flannel"; };
rules = [{
apiGroups = [ "" ];
resources = [ "pods" ];
verbs = [ "get" ];
}
{
apiGroups = [ "" ];
resources = [ "nodes" ];
verbs = [ "list" "watch" ];
}
{
apiGroups = [ "" ];
resources = [ "nodes/status" ];
verbs = [ "patch" ];
}];
};
flannel-crb = {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "ClusterRoleBinding";
metadata = { name = "flannel"; };
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "flannel";
};
subjects = [{
kind = "User";
name = "flannel-client";
}];
};
};
}; };
} }

View File

@ -305,7 +305,7 @@ in
''} ''}
${optionalString top.flannel.enable '' ${optionalString top.flannel.enable ''
while [ ! -f ${cfg.certs.flannelEtcdClient.cert} ]; do sleep 1; done while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
echo "Restarting flannel..." >&1 echo "Restarting flannel..." >&1
systemctl restart flannel systemctl restart flannel
''} ''}
@ -313,22 +313,35 @@ in
echo "Node joined succesfully" echo "Node joined succesfully"
'')]; '')];
# isolate etcd on loopback at the master node
# easyCerts doesn't support multimaster clusters anyway atm.
services.etcd = with cfg.certs.etcd; { services.etcd = with cfg.certs.etcd; {
listenClientUrls = ["https://127.0.0.1:2379"];
listenPeerUrls = ["https://127.0.0.1:2380"];
advertiseClientUrls = ["https://etcd.local:2379"];
initialCluster = ["${top.masterAddress}=https://etcd.local:2380"];
initialAdvertisePeerUrls = ["https://etcd.local:2380"];
certFile = mkDefault cert; certFile = mkDefault cert;
keyFile = mkDefault key; keyFile = mkDefault key;
trustedCaFile = mkDefault caCert; trustedCaFile = mkDefault caCert;
}; };
networking.extraHosts = mkIf (config.services.etcd.enable) ''
127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
'';
services.flannel.etcd = with cfg.certs.flannelEtcdClient; { services.flannel = with cfg.certs.flannelClient; {
certFile = mkDefault cert; kubeconfig = top.lib.mkKubeConfig "flannel" {
keyFile = mkDefault key; server = top.apiserverAddress;
caFile = mkDefault caCert; certFile = cert;
keyFile = key;
};
}; };
services.kubernetes = { services.kubernetes = {
apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; { apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
etcd = with cfg.certs.apiserverEtcdClient; { etcd = with cfg.certs.apiserverEtcdClient; {
servers = ["https://etcd.local:2379"];
certFile = mkDefault cert; certFile = mkDefault cert;
keyFile = mkDefault key; keyFile = mkDefault key;
caFile = mkDefault caCert; caFile = mkDefault caCert;

View File

@ -65,7 +65,6 @@ let
} }
(optionalAttrs (any (role: role == "master") machine.roles) { (optionalAttrs (any (role: role == "master") machine.roles) {
networking.firewall.allowedTCPPorts = [ networking.firewall.allowedTCPPorts = [
2379 2380 # etcd
443 # kubernetes apiserver 443 # kubernetes apiserver
]; ];
}) })