feat: First stab at 1.23
This commit is contained in:
parent
17bd4c8ae0
commit
23d87073bb
@ -1,9 +1,9 @@
|
|||||||
ARG ALPINE_VERSION=3.15
|
ARG ALPINE_VERSION=3.16
|
||||||
|
|
||||||
FROM alpine:${ALPINE_VERSION}
|
FROM alpine:${ALPINE_VERSION}
|
||||||
|
|
||||||
ARG ALPINE_VERSION
|
ARG ALPINE_VERSION
|
||||||
ARG KUBE_VERSION=1.22
|
ARG KUBE_VERSION=1.23
|
||||||
|
|
||||||
RUN cd /etc/apk/keys && \
|
RUN cd /etc/apk/keys && \
|
||||||
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
||||||
@ -21,7 +21,7 @@ RUN cd /etc/apk/keys && \
|
|||||||
restic@testing \
|
restic@testing \
|
||||||
helm@testing
|
helm@testing
|
||||||
|
|
||||||
ADD releases/v${KUBE_VERSION}/kubezero.sh /usr/bin
|
ADD admin/kubezero.sh /usr/bin
|
||||||
ADD charts/kubeadm /charts/kubeadm
|
ADD charts/kubeadm /charts/kubeadm
|
||||||
ADD charts/kubezero-addons /charts/kubezero-addons
|
ADD charts/kubezero-addons /charts/kubezero-addons
|
||||||
ADD charts/kubezero-network /charts/kubezero-network
|
ADD charts/kubezero-network /charts/kubezero-network
|
||||||
|
@ -9,7 +9,7 @@ fi
|
|||||||
export WORKDIR=/tmp/kubezero
|
export WORKDIR=/tmp/kubezero
|
||||||
export HOSTFS=/host
|
export HOSTFS=/host
|
||||||
export CHARTS=/charts
|
export CHARTS=/charts
|
||||||
export VERSION=v1.22
|
export VERSION=v1.23
|
||||||
|
|
||||||
export KUBECONFIG="${HOSTFS}/root/.kube/config"
|
export KUBECONFIG="${HOSTFS}/root/.kube/config"
|
||||||
|
|
||||||
@ -118,11 +118,9 @@ post_kubeadm() {
|
|||||||
parse_kubezero
|
parse_kubezero
|
||||||
|
|
||||||
if [ "$1" == 'upgrade' ]; then
|
if [ "$1" == 'upgrade' ]; then
|
||||||
### PRE 1.22 specific
|
### PRE 1.23 specific
|
||||||
#####################
|
#####################
|
||||||
|
|
||||||
kubectl delete runtimeclass crio
|
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
|
|
||||||
render_kubeadm
|
render_kubeadm
|
||||||
@ -139,14 +137,11 @@ if [ "$1" == 'upgrade' ]; then
|
|||||||
cp ${HOSTFS}/etc/kubernetes/admin.conf ${HOSTFS}/root/.kube/config
|
cp ${HOSTFS}/etc/kubernetes/admin.conf ${HOSTFS}/root/.kube/config
|
||||||
fi
|
fi
|
||||||
|
|
||||||
### POST 1.22 specific
|
### POST 1.23 specific
|
||||||
|
#####################
|
||||||
# Remove all remaining kiam
|
|
||||||
helm repo add uswitch https://uswitch.github.io/kiam-helm-charts/charts/
|
|
||||||
helm repo update
|
|
||||||
helm template uswitch/kiam --name-template kiam --set server.deployment.enabled=true --set server.prometheus.servicemonitor.enabled=true --set agent.prometheus.servicemonitor.enabled=true | kubectl delete --namespace kube-system -f - || true
|
|
||||||
|
|
||||||
######################
|
######################
|
||||||
|
|
||||||
# network
|
# network
|
||||||
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
|
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
|
||||||
helm template $CHARTS/kubezero-network --namespace kube-system --include-crds --name-template network \
|
helm template $CHARTS/kubezero-network --namespace kube-system --include-crds --name-template network \
|
||||||
@ -157,14 +152,6 @@ if [ "$1" == 'upgrade' ]; then
|
|||||||
helm template $CHARTS/kubezero-addons --namespace kube-system --include-crds --name-template addons \
|
helm template $CHARTS/kubezero-addons --namespace kube-system --include-crds --name-template addons \
|
||||||
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
|
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
|
||||||
|
|
||||||
######################
|
|
||||||
|
|
||||||
# Could be removed with 1.23 as we now have persistent etcd
|
|
||||||
# Execute cluster backup to allow new controllers to join
|
|
||||||
kubectl create job backup-cluster-now --from=cronjob/kubezero-backup -n kube-system
|
|
||||||
# That might take a while as the backup pod needs the CNIs to come online etc.
|
|
||||||
retry 10 30 40 kubectl wait --for=condition=complete job/backup-cluster-now -n kube-system && kubectl delete job backup-cluster-now -n kube-system
|
|
||||||
|
|
||||||
# Cleanup after kubeadm on the host
|
# Cleanup after kubeadm on the host
|
||||||
rm -rf ${HOSTFS}/etc/kubernetes/tmp
|
rm -rf ${HOSTFS}/etc/kubernetes/tmp
|
||||||
|
|
||||||
@ -191,10 +178,8 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
|
|||||||
rm -rf ${HOSTFS}/var/lib/etcd/member
|
rm -rf ${HOSTFS}/var/lib/etcd/member
|
||||||
|
|
||||||
else
|
else
|
||||||
# Todo: 1.23
|
# restore latest backup
|
||||||
# Workaround for 1.22 as the final backup is still tagged with the previous verion from the cronjob
|
retry 10 60 30 restic restore latest --no-lock -t / --tag $VERSION
|
||||||
#retry 10 60 30 restic restore latest --no-lock -t / --tag $VERSION
|
|
||||||
retry 10 60 30 restic restore latest --no-lock -t /
|
|
||||||
|
|
||||||
# Make last etcd snapshot available
|
# Make last etcd snapshot available
|
||||||
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
|
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
|
||||||
@ -205,9 +190,8 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
|
|||||||
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
|
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
|
||||||
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
|
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
|
||||||
|
|
||||||
# etcd needs to resync during join
|
# Only restore etcd data during "restore" and none exists already
|
||||||
if [[ "$1" =~ "^(restore)$" ]]; then
|
if [[ "$1" =~ "^(restore)$" ]]; then
|
||||||
# Only restore etcd data set if none exists already
|
|
||||||
if [ ! -d ${HOSTFS}/var/lib/etcd/member ]; then
|
if [ ! -d ${HOSTFS}/var/lib/etcd/member ]; then
|
||||||
etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \
|
etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \
|
||||||
--name $ETCD_NODENAME \
|
--name $ETCD_NODENAME \
|
||||||
@ -353,7 +337,11 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
|
|||||||
elif [ "$1" == 'backup' ]; then
|
elif [ "$1" == 'backup' ]; then
|
||||||
restic snapshots || restic init || exit 1
|
restic snapshots || restic init || exit 1
|
||||||
|
|
||||||
CLUSTER_VERSION="v1.$(kubectl version --short=true -o json | jq .serverVersion.minor -r)"
|
CV=$(kubectl version --short=true -o json | jq .serverVersion.minor -r)
|
||||||
|
let PCV=$CV-1
|
||||||
|
|
||||||
|
CLUSTER_VERSION="v1.$CV"
|
||||||
|
PREVIOUS_VERSION="v1.$PCV"
|
||||||
|
|
||||||
etcdctl --endpoints=https://${ETCD_NODENAME}:2379 snapshot save ${WORKDIR}/etcd_snapshot
|
etcdctl --endpoints=https://${ETCD_NODENAME}:2379 snapshot save ${WORKDIR}/etcd_snapshot
|
||||||
|
|
||||||
@ -367,8 +355,8 @@ elif [ "$1" == 'backup' ]; then
|
|||||||
|
|
||||||
echo "Backup complete."
|
echo "Backup complete."
|
||||||
|
|
||||||
# Remove backups from previous versions
|
# Remove backups from pre-previous versions
|
||||||
restic forget --keep-tag $CLUSTER_VERSION --prune
|
restic forget --keep-tag $CLUSTER_VERSION --keep-tag $PREVIOUS_VERSION --prune
|
||||||
|
|
||||||
# Regular retention
|
# Regular retention
|
||||||
restic forget --keep-hourly 24 --keep-daily ${RESTIC_RETENTION:-7} --prune
|
restic forget --keep-hourly 24 --keep-daily ${RESTIC_RETENTION:-7} --prune
|
@ -115,7 +115,7 @@ spec:
|
|||||||
|
|
||||||
containers:
|
containers:
|
||||||
- name: aws-iam-authenticator
|
- name: aws-iam-authenticator
|
||||||
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.7
|
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.9
|
||||||
args:
|
args:
|
||||||
- server
|
- server
|
||||||
- --backend-mode=CRD,MountedFile
|
- --backend-mode=CRD,MountedFile
|
||||||
|
Loading…
Reference in New Issue
Block a user