kubezero/admin/upgrade_cluster.sh

216 lines
6.2 KiB
Bash
Raw Normal View History

2023-06-23 17:00:26 +00:00
#!/bin/bash
set -eE
set -o pipefail
2022-04-13 16:02:14 +00:00
#VERSION="latest"
2023-10-18 21:23:43 +00:00
VERSION="v1.27"
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
2022-09-15 09:37:21 +00:00
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
2022-11-09 16:08:22 +00:00
# shellcheck disable=SC1091
. "$SCRIPT_DIR"/libhelm.sh
[ -n "$DEBUG" ] && set -x
2022-04-13 16:02:14 +00:00
all_nodes_upgrade() {
CMD="$1"
2022-09-15 09:37:21 +00:00
echo "Deploy all node upgrade daemonSet(busybox)"
2022-08-24 15:13:39 +00:00
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: DaemonSet
metadata:
2022-09-15 09:37:21 +00:00
name: kubezero-all-nodes-upgrade
namespace: kube-system
labels:
app: kubezero-upgrade
spec:
selector:
matchLabels:
2022-09-15 09:37:21 +00:00
name: kubezero-all-nodes-upgrade
template:
metadata:
labels:
2022-09-15 09:37:21 +00:00
name: kubezero-all-nodes-upgrade
spec:
hostNetwork: true
hostIPC: true
hostPID: true
tolerations:
2022-11-09 16:08:22 +00:00
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
initContainers:
2022-09-15 09:37:21 +00:00
- name: node-upgrade
image: busybox
command: ["/bin/sh"]
args: ["-x", "-c", "$CMD" ]
volumeMounts:
- name: host
mountPath: /host
- name: hostproc
mountPath: /hostproc
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
containers:
2022-09-15 09:37:21 +00:00
- name: node-upgrade-wait
image: busybox
command: ["sleep", "3600"]
volumes:
- name: host
hostPath:
path: /
type: Directory
- name: hostproc
hostPath:
path: /proc
type: Directory
EOF
2022-09-15 09:37:21 +00:00
kubectl rollout status daemonset -n kube-system kubezero-all-nodes-upgrade --timeout 300s
kubectl delete ds kubezero-all-nodes-upgrade -n kube-system
2022-08-24 15:13:39 +00:00
}
2022-04-13 16:02:14 +00:00
control_plane_upgrade() {
TASKS="$1"
2022-11-09 16:08:22 +00:00
echo "Deploy cluster admin task: $TASKS"
cat <<EOF | kubectl apply -f -
2022-04-13 16:02:14 +00:00
apiVersion: v1
kind: Pod
metadata:
2022-09-15 09:37:21 +00:00
name: kubezero-upgrade
2022-04-13 16:02:14 +00:00
namespace: kube-system
labels:
app: kubezero-upgrade
spec:
hostNetwork: true
hostIPC: true
hostPID: true
containers:
- name: kubezero-admin
image: public.ecr.aws/zero-downtime/kubezero-admin:${VERSION}
2022-04-13 16:02:14 +00:00
imagePullPolicy: Always
command: ["kubezero.sh"]
args: [$TASKS]
2022-04-13 16:02:14 +00:00
env:
- name: DEBUG
value: "$DEBUG"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: host
mountPath: /host
- name: workdir
mountPath: /tmp
securityContext:
capabilities:
add: ["SYS_CHROOT"]
volumes:
- name: host
hostPath:
path: /
type: Directory
- name: workdir
emptyDir: {}
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
2022-04-13 16:02:14 +00:00
effect: NoSchedule
restartPolicy: Never
EOF
2022-09-15 09:37:21 +00:00
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
while true; do
2022-09-15 09:37:21 +00:00
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
sleep 3
done
2022-09-15 09:37:21 +00:00
kubectl delete pod kubezero-upgrade -n kube-system
}
echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning
argo_used && disable_argo
#all_nodes_upgrade ""
2022-09-15 09:37:21 +00:00
control_plane_upgrade kubeadm_upgrade
#echo "Adjust kubezero values as needed:"
2022-11-09 16:08:22 +00:00
# shellcheck disable=SC2015
#argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
2023-11-30 14:38:04 +00:00
# v1.27
# We need to restore the network ready file as cilium decided to rename it
control_plane_upgrade apply_network
2023-11-22 11:55:34 +00:00
echo "Wait for all CNI agents to be running ..."
2023-11-30 17:59:37 +00:00
kubectl rollout status ds/cilium -n kube-system --timeout=120s
2023-11-22 11:55:34 +00:00
all_nodes_upgrade "cd /host/etc/cni/net.d && ln -s 05-cilium.conflist 05-cilium.conf || true"
2023-11-30 14:38:04 +00:00
# v1.27
# now the rest
2023-11-22 11:55:34 +00:00
control_plane_upgrade "apply_addons, apply_storage, apply_operators"
2023-11-30 14:38:04 +00:00
# v1.27
2023-11-22 11:55:34 +00:00
# Remove legacy eck-operator as part of logging if running
kubectl delete statefulset elastic-operator -n logging || true
2023-11-30 14:38:04 +00:00
# v1.27
echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning
echo "Applying remaining KubeZero modules..."
2022-09-28 15:41:30 +00:00
2023-11-30 14:38:04 +00:00
# v1.27
### Cleanup of some deprecated Istio Crds
for crd in clusterrbacconfigs.rbac.istio.io rbacconfigs.rbac.istio.io servicerolebindings.rbac.istio.io serviceroles.rbac.istio.io; do
kubectl delete crds $crd || true
done
2023-11-30 14:38:04 +00:00
# Cleanup of some legacy node labels and annotations
controllers=$(kubectl get nodes -l node-role.kubernetes.io/control-plane -o json | jq .items[].metadata.name -r)
for c in $controllers; do
2023-11-30 17:59:37 +00:00
for l in projectcalico.org/IPv4VXLANTunnelAddr projectcalico.org/IPv4Address; do
2023-11-30 14:38:04 +00:00
kubectl annotate node $c ${l}-
done
kubectl label node $c topology.ebs.csi.aws.com/zone-
done
# Fix for legacy cert-manager CRDs to be upgraded
for crd_name in certificaterequests.cert-manager.io certificates.cert-manager.io challenges.acme.cert-manager.io clusterissuers.cert-manager.io issuers.cert-manager.io orders.acme.cert-manager.io; do
manager_index="$(kubectl get crd "${crd_name}" --show-managed-fields --output json | jq -r '.metadata.managedFields | map(.manager == "cainjector") | index(true)')"
[ "$manager_index" != "null" ] && kubectl patch crd "${crd_name}" --type=json -p="[{\"op\": \"remove\", \"path\": \"/metadata/managedFields/${manager_index}\"}]"
done
2023-11-30 14:38:04 +00:00
# v1.27
2023-11-22 11:55:34 +00:00
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argocd"
2023-05-03 17:33:04 +00:00
# Trigger backup of upgraded cluster state
kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$VERSION -n kube-system
2023-05-04 08:41:45 +00:00
while true; do
kubectl wait --for=condition=complete job/kubezero-backup-$VERSION -n kube-system 2>/dev/null && kubectl delete job kubezero-backup-$VERSION -n kube-system && break
sleep 1
done
2022-09-15 11:05:38 +00:00
# Final step is to commit the new argocd kubezero app
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..) | .spec.source.helm.values |= (from_yaml | to_yaml)' > $ARGO_APP
2022-09-15 11:05:38 +00:00
echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your cluster."
2022-09-15 11:05:38 +00:00
echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades."
echo "<Return> to continue and re-enable ArgoCD:"
2022-11-09 16:08:22 +00:00
read -r
argo_used && enable_argo