fix: various upgrade fixes

This commit is contained in:
Stefan Reimer 2022-09-15 11:37:21 +02:00
parent 3da7157f5a
commit a382c10b71
7 changed files with 38 additions and 66 deletions

View File

@ -120,7 +120,7 @@ post_kubeadm() {
} }
upgrade_cluster() { kubeadm_upgrade() {
# pre upgrade hook # pre upgrade hook
[ -f /var/lib/kubezero/pre-upgrade.sh ] && . /var/lib/kubezero/pre-upgrade.sh [ -f /var/lib/kubezero/pre-upgrade.sh ] && . /var/lib/kubezero/pre-upgrade.sh
@ -144,7 +144,7 @@ upgrade_cluster() {
# Cleanup after kubeadm on the host # Cleanup after kubeadm on the host
rm -rf ${HOSTFS}/etc/kubernetes/tmp rm -rf ${HOSTFS}/etc/kubernetes/tmp
echo "Successfully upgraded cluster." echo "Successfully upgraded kubeadm control plane."
# TODO # TODO
# Send Notification currently done via CloudBender -> SNS -> Slack # Send Notification currently done via CloudBender -> SNS -> Slack
@ -155,13 +155,6 @@ upgrade_cluster() {
} }
upgrade_node() {
echo "Starting node upgrade ..."
echo "All done."
}
control_plane_node() { control_plane_node() {
CMD=$1 CMD=$1
@ -385,8 +378,7 @@ parse_kubezero
# Execute tasks # Execute tasks
for t in $@; do for t in $@; do
case "$t" in case "$t" in
upgrade_cluster) upgrade_cluster;; kubeadm_upgrade) kubeadm_upgrade;;
upgrade_node) upgrade_node;;
bootstrap) control_plane_node bootstrap;; bootstrap) control_plane_node bootstrap;;
join) control_plane_node join;; join) control_plane_node join;;
restore) control_plane_node restore;; restore) control_plane_node restore;;

View File

@ -21,7 +21,7 @@ function chart_location() {
function argo_used() { function argo_used() {
kubectl get application kubezero -n argocd && rc=$? || rc=$? kubectl get application kubezero -n argocd >/dev/null && rc=$? || rc=$?
return $rc return $rc
} }
@ -45,11 +45,13 @@ spec:
- '*' - '*'
EOF EOF
kubectl patch appproject kubezero -n argocd --patch-file _argoapp_patch.yaml --type=merge && rm _argoapp_patch.yaml kubectl patch appproject kubezero -n argocd --patch-file _argoapp_patch.yaml --type=merge && rm _argoapp_patch.yaml
echo "Enabled service window for ArgoCD project kubezero"
} }
function enable_argo() { function enable_argo() {
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/spec/syncWindows"}]' || true kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/spec/syncWindows"}]' || true
echo "Removed service window for ArgoCD project kubezero"
} }
@ -130,7 +132,7 @@ function _helm() {
yq eval '.spec.source.helm.values' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml yq eval '.spec.source.helm.values' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
echo "using values for $module: " echo "using values to $action of module $module: "
cat $WORKDIR/values.yaml cat $WORKDIR/values.yaml
if [ $action == "crds" ]; then if [ $action == "crds" ]; then

View File

@ -4,7 +4,7 @@ import argparse
import io import io
import yaml import yaml
DEFAULT_VERSION = "1.23.10-2" DEFAULT_VERSION = "1.23.10-3"
def migrate(values): def migrate(values):

View File

@ -1,43 +1,41 @@
#!/bin/bash -e #!/bin/bash -e
#VERSION="v1.23.10-3"
VERSION="latest"
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
. $SCRIPT_DIR/libhelm.sh . $SCRIPT_DIR/libhelm.sh
VERSION="v1.23.10-2"
[ -n "$DEBUG" ] && set -x [ -n "$DEBUG" ] && set -x
# unset any AWS_DEFAULT_PROFILE as it will break aws-iam-auth
unset AWS_DEFAULT_PROFILE
all_nodes_upgrade() { all_nodes_upgrade() {
CMD="$1" CMD="$1"
echo "Deploying node upgrade daemonSet..." echo "Deploy all node upgrade daemonSet(busybox)"
cat <<EOF | kubectl apply -f - cat <<EOF | kubectl apply -f -
apiVersion: apps/v1 apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
name: kubezero-upgrade-${VERSION//.} name: kubezero-all-nodes-upgrade
namespace: kube-system namespace: kube-system
labels: labels:
app: kubezero-upgrade app: kubezero-upgrade
spec: spec:
selector: selector:
matchLabels: matchLabels:
name: kubezero-upgrade-${VERSION//.} name: kubezero-all-nodes-upgrade
template: template:
metadata: metadata:
labels: labels:
name: kubezero-upgrade-${VERSION//.} name: kubezero-all-nodes-upgrade
spec: spec:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
operator: Exists operator: Exists
effect: NoSchedule effect: NoSchedule
initContainers: initContainers:
- name: kubezero-upgrade-${VERSION//.} - name: node-upgrade
image: busybox image: busybox
command: ["/bin/sh"] command: ["/bin/sh"]
args: ["-x", "-c", "$CMD" ] args: ["-x", "-c", "$CMD" ]
@ -48,7 +46,7 @@ spec:
capabilities: capabilities:
add: ["SYS_ADMIN"] add: ["SYS_ADMIN"]
containers: containers:
- name: kubezero-upgrade-${VERSION//.}-wait - name: node-upgrade-wait
image: busybox image: busybox
command: ["sleep", "3600"] command: ["sleep", "3600"]
volumes: volumes:
@ -58,20 +56,20 @@ spec:
type: Directory type: Directory
EOF EOF
kubectl rollout status daemonset -n kube-system kubezero-upgrade-${VERSION//.} --timeout 300s kubectl rollout status daemonset -n kube-system kubezero-all-nodes-upgrade --timeout 300s
kubectl delete ds kubezero-upgrade-${VERSION//.} -n kube-system kubectl delete ds kubezero-all-nodes-upgrade -n kube-system
} }
control_plane_upgrade() { control_plane_upgrade() {
TASKS="$1" TASKS="$1"
echo "Deploying cluster upgrade job ..." echo "Deploy cluster admin task: $TASK"
cat <<EOF | kubectl apply -f - cat <<EOF | kubectl apply -f -
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: kubezero-upgrade-${VERSION//.} name: kubezero-upgrade
namespace: kube-system namespace: kube-system
labels: labels:
app: kubezero-upgrade app: kubezero-upgrade
@ -115,29 +113,29 @@ spec:
restartPolicy: Never restartPolicy: Never
EOF EOF
kubectl wait pod kubezero-upgrade-${VERSION//.} -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
while true; do while true; do
kubectl logs kubezero-upgrade-${VERSION//.} -n kube-system -f 2>/dev/null && break kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
sleep 3 sleep 3
done done
kubectl delete pod kubezero-upgrade-${VERSION//.} -n kube-system kubectl delete pod kubezero-upgrade -n kube-system
} }
argo_used && disable_argo argo_used && disable_argo
#all_nodes_upgrade "mount --make-shared /host/sys/fs/cgroup; mount --make-shared /host/sys;" all_nodes_upgrade "mount --make-shared /host/sys/fs/cgroup; mount --make-shared /host/sys;"
control_plane_upgrade upgrade_cluster control_plane_upgrade kubeadm_upgrade
#echo "Adjust kubezero-values CM !!" #echo "Adjust kubezero-values CM !!"
#read #read
#kubectl delete ds kube-multus-ds -n kube-system kubectl delete ds kube-multus-ds -n kube-system
control_plane_upgrade "apply_network, apply_addons" control_plane_upgrade "apply_network, apply_addons"
#kubectl rollout restart daemonset/calico-node -n kube-system kubectl rollout restart daemonset/calico-node -n kube-system
#kubectl rollout restart daemonset/cilium -n kube-system kubectl rollout restart daemonset/cilium -n kube-system
#kubectl rollout restart daemonset/kube-multus-ds -n kube-system kubectl rollout restart daemonset/kube-multus-ds -n kube-system
argo_used && enable_argo argo_used && enable_argo

View File

@ -31,7 +31,7 @@ yq e '.network |
{"network": .}' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml > $WORKDIR/network-values.yaml {"network": .}' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml > $WORKDIR/network-values.yaml
# get current argo cd values # get current argo cd values
kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.values > ${WORKDIR}/argo-values.yaml kubectl get application kubezero -n argocd -o yaml | yq '.spec.source.helm.values' > ${WORKDIR}/argo-values.yaml
# merge all into new CM # merge all into new CM
yq ea '. as $item ireduce ({}; . * $item ) | yq ea '. as $item ireduce ({}; . * $item ) |
@ -45,28 +45,8 @@ kubectl get cm -n kube-system kubezero-values -o=yaml | \
kubectl replace -f - kubectl replace -f -
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) # update argo app
kubectl get application kubezero -n argocd -o yaml | \
# update argo app, create new from scratch as Argo is really picky being patched kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
# autosync DISABLED !!! yq '.spec.source.helm.values |= load_str("/tmp/kubezero/kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
kubectl apply -f -
cat > $WORKDIR/kube-argo.yaml <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubezero
namespace: argocd
spec:
project: kubezero
source:
repoURL: https://cdn.zero-downtime.net/charts
chart: kubezero
targetRevision: $kubezero_chart_version
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy: {}
EOF
yq eval -i '.spec.source.helm.values |= load_str("/tmp/kubezero/kubezero-values.yaml")' $WORKDIR/kube-argo.yaml
kubectl apply -f $WORKDIR/kube-argo.yaml

View File

@ -19,7 +19,7 @@ dependencies:
version: ">= 0.1.5" version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: cilium - name: cilium
version: 1.12.1 version: 1.12.2
repository: https://helm.cilium.io/ repository: https://helm.cilium.io/
condition: cilium.enabled condition: cilium.enabled
- name: metallb - name: metallb

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero name: kubezero
description: KubeZero - Root App of Apps chart description: KubeZero - Root App of Apps chart
type: application type: application
version: 1.23.10-2 version: 1.23.10-3
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords: