From cb0b167437ff6f8a869749d4da0052eb822de76e Mon Sep 17 00:00:00 2001 From: Stefan Reimer Date: Wed, 22 Nov 2023 11:55:34 +0000 Subject: [PATCH] First v1.27 docs, upgrade tweaks --- admin/upgrade_cluster.sh | 11 ++++++--- docs/v1.27.md | 50 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 3 deletions(-) create mode 100644 docs/v1.27.md diff --git a/admin/upgrade_cluster.sh b/admin/upgrade_cluster.sh index 3cacaec..4352591 100755 --- a/admin/upgrade_cluster.sh +++ b/admin/upgrade_cluster.sh @@ -152,12 +152,17 @@ argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-val # We need to restore the network ready file as cilium decided to rename it control_plane_upgrade apply_network + echo "Wait for all CNI agents to be running ..." -waitSystemPodsRunning +kubectl rollout status ds/cilium -n kube-system --timeout=60s + all_nodes_upgrade "cd /host/etc/cni/net.d && ln -s 05-cilium.conflist 05-cilium.conf || true" # now the rest -control_plane_upgrade "apply_addons, apply_storage" +control_plane_upgrade "apply_addons, apply_storage, apply_operators" + +# Remove legacy eck-operator as part of logging if running +kubectl delete statefulset elastic-operator -n logging || true echo "Checking that all pods in kube-system are running ..." waitSystemPodsRunning @@ -169,7 +174,7 @@ for crd in clusterrbacconfigs.rbac.istio.io rbacconfigs.rbac.istio.io servicerol kubectl delete crds $crd || true done -control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_argocd" +control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argocd" # Trigger backup of upgraded cluster state kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$VERSION -n kube-system diff --git a/docs/v1.27.md b/docs/v1.27.md new file mode 100644 index 0000000..f1ab2f8 --- /dev/null +++ b/docs/v1.27.md @@ -0,0 +1,50 @@ +# KubeZero 1.27 + +## TODO + +## What's new - Major themes +- all KubeZero and support AMIs based on latest Alpine 3.18.4 +- updated and improved hardening of Istio Ingress Gateways +- moved ECK operator into new kubezero-operators module +- new, optional, OpenSearch operator + +## Version upgrades +- cilium 1.14.4 +- istio 1.19.4 + +### FeatureGates +- CustomCPUCFSQuotaPeriod +- MemoryQoS + +# Upgrade +`(No, really, you MUST read this before you upgrade)` + +Ensure your Kube context points to the correct cluster ! + +1. Review CFN config for controller and workers, no mandatory changes during this release though + +2. Upgrade CFN stacks for the control plane *ONLY* ! + Updating the workers CFN stacks would trigger rolling updates right away ! + +3. Trigger cluster upgrade: + `./admin/upgrade_cluster.sh ` + +4. Review the kubezero-config and if all looks good commit the ArgoApp resouce for Kubezero via regular git + git add / commit / push `` + +5. Reboot controller(s) one by one +Wait each time for controller to join and all pods running. +Might take a while ... + +6. Upgrade CFN stacks for the workers. + This in turn will trigger automated worker updates by evicting pods and launching new workers in a rolling fashion. + Grab a coffee and keep an eye on the cluster to be safe ... + Depending on your cluster size it might take a while to roll over all workers! + +7. Re-enable ArgoCD by hitting on the still waiting upgrade script + +8. Quickly head over to ArgoCD and sync the KubeZero main module as soon as possible to reduce potential back and forth in case ArgoCD has legacy state + + +## Known issues +So far so good.