fix: use evictionHard to reserve node memory to avoid systemd mess

This commit is contained in:
Stefan Reimer 2021-04-04 16:52:18 +02:00
parent 2dc912cf9a
commit 9a362607c1

View File

@ -20,13 +20,16 @@ tlsCipherSuites: [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES
featureGates: {{ include "kubeadm.featuregates" ( dict "return" "map" "platform" .Values.platform ) }} featureGates: {{ include "kubeadm.featuregates" ( dict "return" "map" "platform" .Values.platform ) }}
# Minimal unit is 50m per pod # Minimal unit is 50m per pod
podsPerCore: 20 podsPerCore: 20
# cpuCFSQuotaPeriod: 10ms
# Basic OS on Ubuntu 20.04 incl. crio # Basic OS on Ubuntu 20.04 incl. crio
systemReserved: #systemReserved:
memory: 256Mi # memory: 256Mi
# This should be dynamic based on number of maxpods and available cores # This should be dynamic based on number of maxpods and available cores
# https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture#memory_cpu # https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture#memory_cpu
# Below are default minimal for 2 cores and minimum kubelet # Below are default minimal for 2 cores and minimum kubelet
kubeReserved: kubeReserved:
cpu: 70m cpu: 70m
memory: 128Mi # memory: 128Mi
# cpuCFSQuotaPeriod: 10ms # Lets use below to reserve memory for system processes as kubeReserved/sytemReserved doesnt go well with systemd it seems
evictionHard:
memory.available: "484Mi"