diff --git a/admin/libhelm.sh b/admin/libhelm.sh index 7b982f32..81d5b76d 100644 --- a/admin/libhelm.sh +++ b/admin/libhelm.sh @@ -3,6 +3,9 @@ # Simulate well-known CRDs being available API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget" +#VERSION="latest" +VERSION="v1.27" + # Waits for max 300s and retries function wait_for() { local TRIES=0 @@ -182,3 +185,126 @@ function _helm() { return 0 } + +function all_nodes_upgrade() { + CMD="$1" + + echo "Deploy all node upgrade daemonSet(busybox)" + cat </dev/null + while true; do + kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break + sleep 3 + done + kubectl delete pod kubezero-upgrade -n kube-system +} diff --git a/admin/upgrade_cluster.sh b/admin/upgrade_cluster.sh index bdf0d30b..34f1d0d2 100755 --- a/admin/upgrade_cluster.sh +++ b/admin/upgrade_cluster.sh @@ -2,139 +2,13 @@ set -eE set -o pipefail -#VERSION="latest" -VERSION="v1.27" ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml} SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) # shellcheck disable=SC1091 -. "$SCRIPT_DIR"/libhelm.sh - [ -n "$DEBUG" ] && set -x -all_nodes_upgrade() { - CMD="$1" - - echo "Deploy all node upgrade daemonSet(busybox)" - cat </dev/null - while true; do - kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break - sleep 3 - done - kubectl delete pod kubezero-upgrade -n kube-system -} - +. "$SCRIPT_DIR"/libhelm.sh echo "Checking that all pods in kube-system are running ..." waitSystemPodsRunning @@ -154,7 +28,7 @@ control_plane_upgrade kubeadm_upgrade control_plane_upgrade apply_network echo "Wait for all CNI agents to be running ..." -kubectl rollout status ds/cilium -n kube-system --timeout=120s +kubectl rollout status ds/cilium -n kube-system --timeout=300s all_nodes_upgrade "cd /host/etc/cni/net.d && ln -s 05-cilium.conflist 05-cilium.conf || true" # v1.27 diff --git a/charts/kubeadm/templates/patches/etcd0.yaml b/charts/kubeadm/templates/patches/etcd0.yaml index 8bebff49..5eec13ec 100644 --- a/charts/kubeadm/templates/patches/etcd0.yaml +++ b/charts/kubeadm/templates/patches/etcd0.yaml @@ -3,6 +3,6 @@ spec: - name: etcd resources: requests: - cpu: 200m - memory: 192Mi + cpu: 50m + memory: 256Mi #ephemeral-storage: 1Gi diff --git a/charts/kubeadm/templates/patches/kube-apiserver0.yaml b/charts/kubeadm/templates/patches/kube-apiserver0.yaml index 37a592f0..3a7d6ac4 100644 --- a/charts/kubeadm/templates/patches/kube-apiserver0.yaml +++ b/charts/kubeadm/templates/patches/kube-apiserver0.yaml @@ -4,5 +4,5 @@ spec: - name: kube-apiserver resources: requests: - cpu: 200m - memory: 1Gi + cpu: 250m + memory: 1268Mi diff --git a/charts/kubeadm/templates/patches/kube-controller-manager0.yaml b/charts/kubeadm/templates/patches/kube-controller-manager0.yaml index 5771a750..d8c25490 100644 --- a/charts/kubeadm/templates/patches/kube-controller-manager0.yaml +++ b/charts/kubeadm/templates/patches/kube-controller-manager0.yaml @@ -3,5 +3,5 @@ spec: - name: kube-controller-manager resources: requests: - cpu: 100m - memory: 128Mi + cpu: 50m + memory: 192Mi diff --git a/charts/kubeadm/templates/patches/kube-scheduler0.yaml b/charts/kubeadm/templates/patches/kube-scheduler0.yaml index 837fd637..a15eecea 100644 --- a/charts/kubeadm/templates/patches/kube-scheduler0.yaml +++ b/charts/kubeadm/templates/patches/kube-scheduler0.yaml @@ -3,5 +3,5 @@ spec: - name: kube-scheduler resources: requests: - cpu: 100m - memory: 64Mi + cpu: 50m + memory: 96Mi diff --git a/charts/kubezero-logging/values.yaml b/charts/kubezero-logging/values.yaml index ae5c8c25..06c0a419 100644 --- a/charts/kubezero-logging/values.yaml +++ b/charts/kubezero-logging/values.yaml @@ -248,7 +248,7 @@ fluent-bit: resources: requests: cpu: 20m - memory: 32Mi + memory: 48Mi limits: memory: 128Mi diff --git a/charts/kubezero-storage/values.yaml b/charts/kubezero-storage/values.yaml index d8b86999..d1ce33d6 100644 --- a/charts/kubezero-storage/values.yaml +++ b/charts/kubezero-storage/values.yaml @@ -222,7 +222,7 @@ aws-efs-csi-driver: resources: requests: cpu: 20m - memory: 64Mi + memory: 96Mi limits: memory: 128Mi