diff --git a/admin/upgrade_cluster.sh b/admin/upgrade_cluster.sh index 4352591..7159142 100755 --- a/admin/upgrade_cluster.sh +++ b/admin/upgrade_cluster.sh @@ -150,6 +150,7 @@ echo "Adjust kubezero values as needed:" # shellcheck disable=SC2015 argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system +# v1.27 # We need to restore the network ready file as cilium decided to rename it control_plane_upgrade apply_network @@ -157,23 +158,37 @@ echo "Wait for all CNI agents to be running ..." kubectl rollout status ds/cilium -n kube-system --timeout=60s all_nodes_upgrade "cd /host/etc/cni/net.d && ln -s 05-cilium.conflist 05-cilium.conf || true" +# v1.27 # now the rest control_plane_upgrade "apply_addons, apply_storage, apply_operators" +# v1.27 # Remove legacy eck-operator as part of logging if running kubectl delete statefulset elastic-operator -n logging || true +# v1.27 echo "Checking that all pods in kube-system are running ..." waitSystemPodsRunning echo "Applying remaining KubeZero modules..." +# v1.27 ### Cleanup of some deprecated Istio Crds for crd in clusterrbacconfigs.rbac.istio.io rbacconfigs.rbac.istio.io servicerolebindings.rbac.istio.io serviceroles.rbac.istio.io; do kubectl delete crds $crd || true done +# Cleanup of some legacy node labels and annotations +controllers=$(kubectl get nodes -l node-role.kubernetes.io/control-plane -o json | jq .items[].metadata.name -r) +for c in $controllers; do + for l in projectcalico.org/IPv4VXLANTunnelAddr projectcalico.org/IPv4Address kubeadm.alpha.kubernetes.io/cri-socket; do + kubectl annotate node $c ${l}- + done + kubectl label node $c topology.ebs.csi.aws.com/zone- +done +# v1.27 + control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argocd" # Trigger backup of upgraded cluster state diff --git a/charts/kubezero-network/templates/multus/daemonset.yaml b/charts/kubezero-network/templates/multus/daemonset.yaml index b480cec..18be034 100644 --- a/charts/kubezero-network/templates/multus/daemonset.yaml +++ b/charts/kubezero-network/templates/multus/daemonset.yaml @@ -37,10 +37,10 @@ spec: resources: requests: cpu: "100m" - memory: "32Mi" + memory: "64Mi" limits: #cpu: "100m" - memory: "64Mi" + memory: "256Mi" securityContext: privileged: true capabilities: diff --git a/charts/kubezero-storage/templates/snapshot-controller/controller.yaml b/charts/kubezero-storage/templates/snapshot-controller/controller.yaml index 7cbfd3c..fa5a41b 100644 --- a/charts/kubezero-storage/templates/snapshot-controller/controller.yaml +++ b/charts/kubezero-storage/templates/snapshot-controller/controller.yaml @@ -46,7 +46,7 @@ spec: serviceAccountName: snapshot-controller containers: - name: snapshot-controller - image: registry.k8s.io/sig-storage/snapshot-controller:v6.2.2 + image: {{ .Values.snapshotController.image.name }}:{{ .Values.snapshotController.image.tag }} args: - "--v={{ .Values.snapshotController.logLevel }}" {{- if gt (int .Values.snapshotController.replicas) 1 }} diff --git a/charts/kubezero-storage/values.yaml b/charts/kubezero-storage/values.yaml index 4797c22..2e65dc9 100644 --- a/charts/kubezero-storage/values.yaml +++ b/charts/kubezero-storage/values.yaml @@ -3,6 +3,11 @@ crd: snapshotController: enabled: false + + image: + name: registry.k8s.io/sig-storage/snapshot-controller + tag: v6.3.0 + replicas: 1 logLevel: 2