diff --git a/containers/admin/v1.21/README.md b/containers/admin/v1.21/README.md new file mode 100644 index 00000000..cb55405d --- /dev/null +++ b/containers/admin/v1.21/README.md @@ -0,0 +1,25 @@ +! Ensure your Kube context points to the correct cluster ! + +# Trigger the cluster upgrade +`./upgrade_121.sh` + +# Upgrade CFN stacks for the control plane and all worker groups + +# Reboot controller(s) one by one + +# Patch current deployments, blocking ArgoCD otherwise +`./kubezero_121.sh` + +# Migrate ArgoCD config for the cluster +`./migrate_argo.sh "cluster/env/kubezero/application.yaml"` + +Adjust as needed, eg. ensure eck-operator is enabled if needed. +git add / commit / push + +Watch ArgoCD do its work. + +# Replace worker nodes + +## Known issues +- pods seem stuck, eg. fluent-bit on workers shows NotReady *after* control nodes have been ugpraded + -> restart `kube-proxy` on the affected workers diff --git a/containers/admin/v1.21/kubezero_121.sh b/containers/admin/v1.21/kubezero_121.sh index 737a8593..aef2d7a3 100755 --- a/containers/admin/v1.21/kubezero_121.sh +++ b/containers/admin/v1.21/kubezero_121.sh @@ -6,8 +6,9 @@ set -x kubectl patch application aws-ebs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]' kubectl patch application aws-efs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]' -# Migrate ZeroDownTime helm repo +# Migrate ZeroDownTime helm repo and fix project permissions kubectl patch appproject kubezero -n argocd --type=json -p='[{"op": "replace", "path": "/spec/sourceRepos/0", "value": "https://cdn.zero-downtime.net/charts" }]' +kubectl patch appproject kubezero -n argocd --type=json -p='[{"op": "replace", "path": "/spec/destinations", "value": [{"namespace": "*", "server": "https://kubernetes.default.svc"}] }]' # Delete EBS and EFS Deployments and Daemonsets as we cannot change the lables while moving them to storage. # This will NOT affect provisioned volumes