fix: various upgrade fixes

This commit is contained in:
Stefan Reimer 2022-09-15 11:37:21 +02:00
parent 3da7157f5a
commit a382c10b71
7 changed files with 38 additions and 66 deletions

View File

@ -120,7 +120,7 @@ post_kubeadm() {
}
upgrade_cluster() {
kubeadm_upgrade() {
# pre upgrade hook
[ -f /var/lib/kubezero/pre-upgrade.sh ] && . /var/lib/kubezero/pre-upgrade.sh
@ -144,7 +144,7 @@ upgrade_cluster() {
# Cleanup after kubeadm on the host
rm -rf ${HOSTFS}/etc/kubernetes/tmp
echo "Successfully upgraded cluster."
echo "Successfully upgraded kubeadm control plane."
# TODO
# Send Notification currently done via CloudBender -> SNS -> Slack
@ -155,13 +155,6 @@ upgrade_cluster() {
}
upgrade_node() {
echo "Starting node upgrade ..."
echo "All done."
}
control_plane_node() {
CMD=$1
@ -385,8 +378,7 @@ parse_kubezero
# Execute tasks
for t in $@; do
case "$t" in
upgrade_cluster) upgrade_cluster;;
upgrade_node) upgrade_node;;
kubeadm_upgrade) kubeadm_upgrade;;
bootstrap) control_plane_node bootstrap;;
join) control_plane_node join;;
restore) control_plane_node restore;;

View File

@ -21,7 +21,7 @@ function chart_location() {
function argo_used() {
kubectl get application kubezero -n argocd && rc=$? || rc=$?
kubectl get application kubezero -n argocd >/dev/null && rc=$? || rc=$?
return $rc
}
@ -45,11 +45,13 @@ spec:
- '*'
EOF
kubectl patch appproject kubezero -n argocd --patch-file _argoapp_patch.yaml --type=merge && rm _argoapp_patch.yaml
echo "Enabled service window for ArgoCD project kubezero"
}
function enable_argo() {
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/spec/syncWindows"}]' || true
echo "Removed service window for ArgoCD project kubezero"
}
@ -130,7 +132,7 @@ function _helm() {
yq eval '.spec.source.helm.values' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
echo "using values for $module: "
echo "using values to $action of module $module: "
cat $WORKDIR/values.yaml
if [ $action == "crds" ]; then

View File

@ -4,7 +4,7 @@ import argparse
import io
import yaml
DEFAULT_VERSION = "1.23.10-2"
DEFAULT_VERSION = "1.23.10-3"
def migrate(values):

View File

@ -1,43 +1,41 @@
#!/bin/bash -e
#VERSION="v1.23.10-3"
VERSION="latest"
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
. $SCRIPT_DIR/libhelm.sh
VERSION="v1.23.10-2"
[ -n "$DEBUG" ] && set -x
# unset any AWS_DEFAULT_PROFILE as it will break aws-iam-auth
unset AWS_DEFAULT_PROFILE
all_nodes_upgrade() {
CMD="$1"
echo "Deploying node upgrade daemonSet..."
echo "Deploy all node upgrade daemonSet(busybox)"
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kubezero-upgrade-${VERSION//.}
name: kubezero-all-nodes-upgrade
namespace: kube-system
labels:
app: kubezero-upgrade
spec:
selector:
matchLabels:
name: kubezero-upgrade-${VERSION//.}
name: kubezero-all-nodes-upgrade
template:
metadata:
labels:
name: kubezero-upgrade-${VERSION//.}
name: kubezero-all-nodes-upgrade
spec:
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
initContainers:
- name: kubezero-upgrade-${VERSION//.}
- name: node-upgrade
image: busybox
command: ["/bin/sh"]
args: ["-x", "-c", "$CMD" ]
@ -48,7 +46,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
containers:
- name: kubezero-upgrade-${VERSION//.}-wait
- name: node-upgrade-wait
image: busybox
command: ["sleep", "3600"]
volumes:
@ -58,20 +56,20 @@ spec:
type: Directory
EOF
kubectl rollout status daemonset -n kube-system kubezero-upgrade-${VERSION//.} --timeout 300s
kubectl delete ds kubezero-upgrade-${VERSION//.} -n kube-system
kubectl rollout status daemonset -n kube-system kubezero-all-nodes-upgrade --timeout 300s
kubectl delete ds kubezero-all-nodes-upgrade -n kube-system
}
control_plane_upgrade() {
TASKS="$1"
echo "Deploying cluster upgrade job ..."
echo "Deploy cluster admin task: $TASK"
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: kubezero-upgrade-${VERSION//.}
name: kubezero-upgrade
namespace: kube-system
labels:
app: kubezero-upgrade
@ -115,29 +113,29 @@ spec:
restartPolicy: Never
EOF
kubectl wait pod kubezero-upgrade-${VERSION//.} -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
while true; do
kubectl logs kubezero-upgrade-${VERSION//.} -n kube-system -f 2>/dev/null && break
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
sleep 3
done
kubectl delete pod kubezero-upgrade-${VERSION//.} -n kube-system
kubectl delete pod kubezero-upgrade -n kube-system
}
argo_used && disable_argo
#all_nodes_upgrade "mount --make-shared /host/sys/fs/cgroup; mount --make-shared /host/sys;"
all_nodes_upgrade "mount --make-shared /host/sys/fs/cgroup; mount --make-shared /host/sys;"
control_plane_upgrade upgrade_cluster
control_plane_upgrade kubeadm_upgrade
#echo "Adjust kubezero-values CM !!"
#read
#kubectl delete ds kube-multus-ds -n kube-system
kubectl delete ds kube-multus-ds -n kube-system
control_plane_upgrade "apply_network, apply_addons"
#kubectl rollout restart daemonset/calico-node -n kube-system
#kubectl rollout restart daemonset/cilium -n kube-system
#kubectl rollout restart daemonset/kube-multus-ds -n kube-system
kubectl rollout restart daemonset/calico-node -n kube-system
kubectl rollout restart daemonset/cilium -n kube-system
kubectl rollout restart daemonset/kube-multus-ds -n kube-system
argo_used && enable_argo

View File

@ -31,7 +31,7 @@ yq e '.network |
{"network": .}' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml > $WORKDIR/network-values.yaml
# get current argo cd values
kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.values > ${WORKDIR}/argo-values.yaml
kubectl get application kubezero -n argocd -o yaml | yq '.spec.source.helm.values' > ${WORKDIR}/argo-values.yaml
# merge all into new CM
yq ea '. as $item ireduce ({}; . * $item ) |
@ -45,28 +45,8 @@ kubectl get cm -n kube-system kubezero-values -o=yaml | \
kubectl replace -f -
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml)
# update argo app, create new from scratch as Argo is really picky being patched
# autosync DISABLED !!!
cat > $WORKDIR/kube-argo.yaml <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubezero
namespace: argocd
spec:
project: kubezero
source:
repoURL: https://cdn.zero-downtime.net/charts
chart: kubezero
targetRevision: $kubezero_chart_version
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy: {}
EOF
yq eval -i '.spec.source.helm.values |= load_str("/tmp/kubezero/kubezero-values.yaml")' $WORKDIR/kube-argo.yaml
kubectl apply -f $WORKDIR/kube-argo.yaml
# update argo app
kubectl get application kubezero -n argocd -o yaml | \
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
kubectl apply -f -

View File

@ -19,7 +19,7 @@ dependencies:
version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts/
- name: cilium
version: 1.12.1
version: 1.12.2
repository: https://helm.cilium.io/
condition: cilium.enabled
- name: metallb

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero
description: KubeZero - Root App of Apps chart
type: application
version: 1.23.10-2
version: 1.23.10-3
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: