From 23d87073bb05e7312907c4836473d15bd13daae4 Mon Sep 17 00:00:00 2001 From: Stefan Reimer Date: Tue, 12 Jul 2022 15:00:37 +0000 Subject: [PATCH] feat: First stab at 1.23 --- Dockerfile | 6 +-- {releases/v1.22 => admin}/kubezero.sh | 42 +++++++------------ {releases/v1.22 => admin}/migrate_argo.py | 0 {releases/v1.22 => admin}/upgrade_cluster.sh | 0 .../51-aws-iam-authenticator-deployment.yaml | 2 +- releases/v1.19/Upgrade.md => docs/v1.19.md | 0 releases/v1.20/Upgrade.md => docs/v1.20.md | 0 releases/v1.22/README.md => docs/v1.22.md | 0 8 files changed, 19 insertions(+), 31 deletions(-) rename {releases/v1.22 => admin}/kubezero.sh (89%) rename {releases/v1.22 => admin}/migrate_argo.py (100%) rename {releases/v1.22 => admin}/upgrade_cluster.sh (100%) rename releases/v1.19/Upgrade.md => docs/v1.19.md (100%) rename releases/v1.20/Upgrade.md => docs/v1.20.md (100%) rename releases/v1.22/README.md => docs/v1.22.md (100%) diff --git a/Dockerfile b/Dockerfile index 6cdbef06..f356e4db 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,9 @@ -ARG ALPINE_VERSION=3.15 +ARG ALPINE_VERSION=3.16 FROM alpine:${ALPINE_VERSION} ARG ALPINE_VERSION -ARG KUBE_VERSION=1.22 +ARG KUBE_VERSION=1.23 RUN cd /etc/apk/keys && \ wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \ @@ -21,7 +21,7 @@ RUN cd /etc/apk/keys && \ restic@testing \ helm@testing -ADD releases/v${KUBE_VERSION}/kubezero.sh /usr/bin +ADD admin/kubezero.sh /usr/bin ADD charts/kubeadm /charts/kubeadm ADD charts/kubezero-addons /charts/kubezero-addons ADD charts/kubezero-network /charts/kubezero-network diff --git a/releases/v1.22/kubezero.sh b/admin/kubezero.sh similarity index 89% rename from releases/v1.22/kubezero.sh rename to admin/kubezero.sh index dc9a9313..31359fea 100755 --- a/releases/v1.22/kubezero.sh +++ b/admin/kubezero.sh @@ -9,7 +9,7 @@ fi export WORKDIR=/tmp/kubezero export HOSTFS=/host export CHARTS=/charts -export VERSION=v1.22 +export VERSION=v1.23 export KUBECONFIG="${HOSTFS}/root/.kube/config" @@ -118,11 +118,9 @@ post_kubeadm() { parse_kubezero if [ "$1" == 'upgrade' ]; then - ### PRE 1.22 specific + ### PRE 1.23 specific ##################### - kubectl delete runtimeclass crio - ##################### render_kubeadm @@ -139,14 +137,11 @@ if [ "$1" == 'upgrade' ]; then cp ${HOSTFS}/etc/kubernetes/admin.conf ${HOSTFS}/root/.kube/config fi - ### POST 1.22 specific - - # Remove all remaining kiam - helm repo add uswitch https://uswitch.github.io/kiam-helm-charts/charts/ - helm repo update - helm template uswitch/kiam --name-template kiam --set server.deployment.enabled=true --set server.prometheus.servicemonitor.enabled=true --set agent.prometheus.servicemonitor.enabled=true | kubectl delete --namespace kube-system -f - || true + ### POST 1.23 specific + ##################### ###################### + # network yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml helm template $CHARTS/kubezero-network --namespace kube-system --include-crds --name-template network \ @@ -157,14 +152,6 @@ if [ "$1" == 'upgrade' ]; then helm template $CHARTS/kubezero-addons --namespace kube-system --include-crds --name-template addons \ -f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG - ###################### - - # Could be removed with 1.23 as we now have persistent etcd - # Execute cluster backup to allow new controllers to join - kubectl create job backup-cluster-now --from=cronjob/kubezero-backup -n kube-system - # That might take a while as the backup pod needs the CNIs to come online etc. - retry 10 30 40 kubectl wait --for=condition=complete job/backup-cluster-now -n kube-system && kubectl delete job backup-cluster-now -n kube-system - # Cleanup after kubeadm on the host rm -rf ${HOSTFS}/etc/kubernetes/tmp @@ -191,10 +178,8 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then rm -rf ${HOSTFS}/var/lib/etcd/member else - # Todo: 1.23 - # Workaround for 1.22 as the final backup is still tagged with the previous verion from the cronjob - #retry 10 60 30 restic restore latest --no-lock -t / --tag $VERSION - retry 10 60 30 restic restore latest --no-lock -t / + # restore latest backup + retry 10 60 30 restic restore latest --no-lock -t / --tag $VERSION # Make last etcd snapshot available cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes @@ -205,9 +190,8 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then # Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config - # etcd needs to resync during join + # Only restore etcd data during "restore" and none exists already if [[ "$1" =~ "^(restore)$" ]]; then - # Only restore etcd data set if none exists already if [ ! -d ${HOSTFS}/var/lib/etcd/member ]; then etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \ --name $ETCD_NODENAME \ @@ -353,7 +337,11 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then elif [ "$1" == 'backup' ]; then restic snapshots || restic init || exit 1 - CLUSTER_VERSION="v1.$(kubectl version --short=true -o json | jq .serverVersion.minor -r)" + CV=$(kubectl version --short=true -o json | jq .serverVersion.minor -r) + let PCV=$CV-1 + + CLUSTER_VERSION="v1.$CV" + PREVIOUS_VERSION="v1.$PCV" etcdctl --endpoints=https://${ETCD_NODENAME}:2379 snapshot save ${WORKDIR}/etcd_snapshot @@ -367,8 +355,8 @@ elif [ "$1" == 'backup' ]; then echo "Backup complete." - # Remove backups from previous versions - restic forget --keep-tag $CLUSTER_VERSION --prune + # Remove backups from pre-previous versions + restic forget --keep-tag $CLUSTER_VERSION --keep-tag $PREVIOUS_VERSION --prune # Regular retention restic forget --keep-hourly 24 --keep-daily ${RESTIC_RETENTION:-7} --prune diff --git a/releases/v1.22/migrate_argo.py b/admin/migrate_argo.py similarity index 100% rename from releases/v1.22/migrate_argo.py rename to admin/migrate_argo.py diff --git a/releases/v1.22/upgrade_cluster.sh b/admin/upgrade_cluster.sh similarity index 100% rename from releases/v1.22/upgrade_cluster.sh rename to admin/upgrade_cluster.sh diff --git a/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml b/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml index 569ee792..d14a109c 100644 --- a/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml +++ b/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml @@ -115,7 +115,7 @@ spec: containers: - name: aws-iam-authenticator - image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.7 + image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.9 args: - server - --backend-mode=CRD,MountedFile diff --git a/releases/v1.19/Upgrade.md b/docs/v1.19.md similarity index 100% rename from releases/v1.19/Upgrade.md rename to docs/v1.19.md diff --git a/releases/v1.20/Upgrade.md b/docs/v1.20.md similarity index 100% rename from releases/v1.20/Upgrade.md rename to docs/v1.20.md diff --git a/releases/v1.22/README.md b/docs/v1.22.md similarity index 100% rename from releases/v1.22/README.md rename to docs/v1.22.md