Kuberbetes DNS addon 1.14.4 -> 1.14.10
As shipped with k8s 1.10.3. Also: - updated the definition jsons as they are distributed in k8s. - updated the image uris as they are renamed in k8s - added imageDigest param as per 736848723e5aefa5d24396c58dc6de603399efde
This commit is contained in:
parent
3e3917bb9e
commit
55fa98dd76
@ -3,27 +3,27 @@
|
|||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
let
|
||||||
version = "1.14.4";
|
version = "1.14.10";
|
||||||
|
|
||||||
k8s-dns-kube-dns = pkgs.dockerTools.pullImage {
|
k8s-dns-kube-dns = pkgs.dockerTools.pullImage {
|
||||||
imageName = "gcr.io/google_containers/k8s-dns-kube-dns-amd64";
|
imageName = "k8s.gcr.io/k8s-dns-kube-dns-amd64";
|
||||||
|
imageDigest = "sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8";
|
||||||
finalImageTag = version;
|
finalImageTag = version;
|
||||||
sha256 = "0q97xfqrigrfjl2a9cxl5in619py0zv44gch09jm8gqjkxl80imp";
|
sha256 = "0x583znk9smqn0fix7ld8sm5jgaxhqhx3fq97b1wkqm7iwhvl3pj";
|
||||||
imageDigest = "sha256:40790881bbe9ef4ae4ff7fe8b892498eecb7fe6dcc22661402f271e03f7de344";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
k8s-dns-dnsmasq-nanny = pkgs.dockerTools.pullImage {
|
k8s-dns-dnsmasq-nanny = pkgs.dockerTools.pullImage {
|
||||||
imageName = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64";
|
imageName = "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64";
|
||||||
|
imageDigest = "sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8";
|
||||||
finalImageTag = version;
|
finalImageTag = version;
|
||||||
sha256 = "051w5ca4qb88mwva4hbnh9xzlsvv7k1mbk3wz50lmig2mqrqqx6c";
|
sha256 = "1fihml7s2mfwgac51cbqpylkwbivc8nyhgi4vb820s83zvl8a6y1";
|
||||||
imageDigest = "sha256:aeeb994acbc505eabc7415187cd9edb38cbb5364dc1c2fc748154576464b3dc2";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
k8s-dns-sidecar = pkgs.dockerTools.pullImage {
|
k8s-dns-sidecar = pkgs.dockerTools.pullImage {
|
||||||
imageName = "gcr.io/google_containers/k8s-dns-sidecar-amd64";
|
imageName = "k8s.gcr.io/k8s-dns-sidecar-amd64";
|
||||||
|
imageDigest = "sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4";
|
||||||
finalImageTag = version;
|
finalImageTag = version;
|
||||||
sha256 = "1z0d129bcm8i2cqq36x5jhnrv9hirj8c6kjrmdav8vgf7py78vsm";
|
sha256 = "08l1bv5jgrhvjzpqpbinrkgvv52snc4fzyd8ya9v18ns2klyz7m0";
|
||||||
imageDigest = "sha256:97074c951046e37d3cbb98b82ae85ed15704a290cce66a8314e7f846404edde9";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
cfg = config.services.kubernetes.addons.dns;
|
cfg = config.services.kubernetes.addons.dns;
|
||||||
@ -59,7 +59,7 @@ in {
|
|||||||
|
|
||||||
services.kubernetes.addonManager.addons = {
|
services.kubernetes.addonManager.addons = {
|
||||||
kubedns-deployment = {
|
kubedns-deployment = {
|
||||||
apiVersion = "apps/v1beta1";
|
apiVersion = "extensions/v1beta1";
|
||||||
kind = "Deployment";
|
kind = "Deployment";
|
||||||
metadata = {
|
metadata = {
|
||||||
labels = {
|
labels = {
|
||||||
@ -84,9 +84,38 @@ in {
|
|||||||
labels.k8s-app = "kube-dns";
|
labels.k8s-app = "kube-dns";
|
||||||
};
|
};
|
||||||
spec = {
|
spec = {
|
||||||
|
priorityClassName = "system-cluster-critical";
|
||||||
containers = [
|
containers = [
|
||||||
{
|
{
|
||||||
name = "kubedns";
|
name = "kubedns";
|
||||||
|
image = "k8s.gcr.io/k8s-dns-kube-dns-amd64:${version}";
|
||||||
|
resources = {
|
||||||
|
limits.memory = "170Mi";
|
||||||
|
requests = {
|
||||||
|
cpu = "100m";
|
||||||
|
memory = "70Mi";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
livenessProbe = {
|
||||||
|
failureThreshold = 5;
|
||||||
|
httpGet = {
|
||||||
|
path = "/healthcheck/kubedns";
|
||||||
|
port = 10054;
|
||||||
|
scheme = "HTTP";
|
||||||
|
};
|
||||||
|
initialDelaySeconds = 60;
|
||||||
|
successThreshold = 1;
|
||||||
|
timeoutSeconds = 5;
|
||||||
|
};
|
||||||
|
readinessProbe = {
|
||||||
|
httpGet = {
|
||||||
|
path = "/readiness";
|
||||||
|
port = 8081;
|
||||||
|
scheme = "HTTP";
|
||||||
|
};
|
||||||
|
initialDelaySeconds = 3;
|
||||||
|
timeoutSeconds = 5;
|
||||||
|
};
|
||||||
args = [
|
args = [
|
||||||
"--domain=${cfg.clusterDomain}"
|
"--domain=${cfg.clusterDomain}"
|
||||||
"--dns-port=10053"
|
"--dns-port=10053"
|
||||||
@ -99,18 +128,6 @@ in {
|
|||||||
value = "10055";
|
value = "10055";
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
image = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:${version}";
|
|
||||||
livenessProbe = {
|
|
||||||
failureThreshold = 5;
|
|
||||||
httpGet = {
|
|
||||||
path = "/healthcheck/kubedns";
|
|
||||||
port = 10054;
|
|
||||||
scheme = "HTTP";
|
|
||||||
};
|
|
||||||
initialDelaySeconds = 60;
|
|
||||||
successThreshold = 1;
|
|
||||||
timeoutSeconds = 5;
|
|
||||||
};
|
|
||||||
ports = [
|
ports = [
|
||||||
{
|
{
|
||||||
containerPort = 10053;
|
containerPort = 10053;
|
||||||
@ -128,22 +145,6 @@ in {
|
|||||||
protocol = "TCP";
|
protocol = "TCP";
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
readinessProbe = {
|
|
||||||
httpGet = {
|
|
||||||
path = "/readiness";
|
|
||||||
port = 8081;
|
|
||||||
scheme = "HTTP";
|
|
||||||
};
|
|
||||||
initialDelaySeconds = 3;
|
|
||||||
timeoutSeconds = 5;
|
|
||||||
};
|
|
||||||
resources = {
|
|
||||||
limits.memory = "170Mi";
|
|
||||||
requests = {
|
|
||||||
cpu = "100m";
|
|
||||||
memory = "70Mi";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
volumeMounts = [
|
volumeMounts = [
|
||||||
{
|
{
|
||||||
mountPath = "/kube-dns-config";
|
mountPath = "/kube-dns-config";
|
||||||
@ -152,6 +153,19 @@ in {
|
|||||||
];
|
];
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
name = "dnsmasq";
|
||||||
|
image = "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:${version}";
|
||||||
|
livenessProbe = {
|
||||||
|
httpGet = {
|
||||||
|
path = "/healthcheck/dnsmasq";
|
||||||
|
port = 10054;
|
||||||
|
scheme = "HTTP";
|
||||||
|
};
|
||||||
|
initialDelaySeconds = 60;
|
||||||
|
timeoutSeconds = 5;
|
||||||
|
successThreshold = 1;
|
||||||
|
failureThreshold = 5;
|
||||||
|
};
|
||||||
args = [
|
args = [
|
||||||
"-v=2"
|
"-v=2"
|
||||||
"-logtostderr"
|
"-logtostderr"
|
||||||
@ -165,19 +179,6 @@ in {
|
|||||||
"--server=/in-addr.arpa/127.0.0.1#10053"
|
"--server=/in-addr.arpa/127.0.0.1#10053"
|
||||||
"--server=/ip6.arpa/127.0.0.1#10053"
|
"--server=/ip6.arpa/127.0.0.1#10053"
|
||||||
];
|
];
|
||||||
image = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:${version}";
|
|
||||||
livenessProbe = {
|
|
||||||
failureThreshold = 5;
|
|
||||||
httpGet = {
|
|
||||||
path = "/healthcheck/dnsmasq";
|
|
||||||
port = 10054;
|
|
||||||
scheme = "HTTP";
|
|
||||||
};
|
|
||||||
initialDelaySeconds = 60;
|
|
||||||
successThreshold = 1;
|
|
||||||
timeoutSeconds = 5;
|
|
||||||
};
|
|
||||||
name = "dnsmasq";
|
|
||||||
ports = [
|
ports = [
|
||||||
{
|
{
|
||||||
containerPort = 53;
|
containerPort = 53;
|
||||||
@ -205,24 +206,24 @@ in {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
name = "sidecar";
|
name = "sidecar";
|
||||||
image = "gcr.io/google_containers/k8s-dns-sidecar-amd64:${version}";
|
image = "k8s.gcr.io/k8s-dns-sidecar-amd64:${version}";
|
||||||
args = [
|
|
||||||
"--v=2"
|
|
||||||
"--logtostderr"
|
|
||||||
"--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
|
|
||||||
"--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
|
|
||||||
];
|
|
||||||
livenessProbe = {
|
livenessProbe = {
|
||||||
failureThreshold = 5;
|
|
||||||
httpGet = {
|
httpGet = {
|
||||||
path = "/metrics";
|
path = "/metrics";
|
||||||
port = 10054;
|
port = 10054;
|
||||||
scheme = "HTTP";
|
scheme = "HTTP";
|
||||||
};
|
};
|
||||||
initialDelaySeconds = 60;
|
initialDelaySeconds = 60;
|
||||||
successThreshold = 1;
|
|
||||||
timeoutSeconds = 5;
|
timeoutSeconds = 5;
|
||||||
|
successThreshold = 1;
|
||||||
|
failureThreshold = 5;
|
||||||
};
|
};
|
||||||
|
args = [
|
||||||
|
"--v=2"
|
||||||
|
"--logtostderr"
|
||||||
|
"--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
|
||||||
|
"--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
|
||||||
|
];
|
||||||
ports = [
|
ports = [
|
||||||
{
|
{
|
||||||
containerPort = 10054;
|
containerPort = 10054;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user