fix: fix cluster upgrade logic for 1.30

This commit is contained in:
Stefan Reimer 2024-10-22 13:54:19 +01:00
parent 8dde055cbf
commit 29c8fbab68
6 changed files with 59 additions and 32 deletions

View File

@ -7,6 +7,7 @@ ARTIFACTS=($(echo $1 | tr "," "\n"))
ACTION=$2
LOCAL_DEV=1
ARGOCD="False"
#VERSION="latest"
KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"
@ -76,7 +77,7 @@ function metrics-pre() {
### Main
get_kubezero_values
get_kubezero_values $ARGOCD
# Always use embedded kubezero chart
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $KUBE_VERSION --version ~$KUBE_VERSION --devel --output-dir $WORKDIR

View File

@ -126,8 +126,30 @@ post_kubeadm() {
kubeadm_upgrade() {
# pre upgrade hook
### Remove with 1.31
# migrate kubezero CM to kubezero NS
# migrate ArgoCD app from values to valuesObject
if [ "$ARGOCD" == "True" ]; then
kubectl get app kubezero -n argocd -o yaml > $WORKDIR/kubezero-argo-app.yaml
if [ "$(yq '(.spec.source.helm | has "values")' $WORKDIR/kubezero-argo-app.yaml)" == "true" ]; then
yq '.spec.source.helm.valuesObject = (.spec.source.helm.values | from_yaml)' \
$WORKDIR/kubezero-argo-app.yaml | kubectl apply --server-side --force-conflicts -f -
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/spec/source/helm/values"}]'
kubectl delete cm kubezero-values -n kube-system > /dev/null || true
fi
else
kubectl get cm kubezero-values -n kubezero > /dev/null || \
{ create_ns kubezero; kubectl get cm kubezero-values -n kube-system -o yaml | \
sed 's/^ namespace: kube-system/ namespace: kubezero/' | \
kubectl create -f - && \
kubectl delete cm kubezero-values -n kube-system ; }
fi
###
# get current values, argo app over cm
get_kubezero_values
get_kubezero_values $ARGOCD
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
@ -140,8 +162,8 @@ kubeadm_upgrade() {
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq 'del(.spec.source.helm.values) | .spec.source.helm.valuesObject |= load("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
kubectl apply -f -
yq '.spec.source.helm.valuesObject |= load("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
kubectl apply --server-side --force-conflicts -f -
# finally remove annotation to allow argo to sync again
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'
@ -323,7 +345,7 @@ control_plane_node() {
apply_module() {
MODULES=$1
get_kubezero_values
get_kubezero_values $ARGOCD
# Always use embedded kubezero chart
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
@ -344,7 +366,7 @@ apply_module() {
delete_module() {
MODULES=$1
get_kubezero_values
get_kubezero_values $ARGOCD
# Always use embedded kubezero chart
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
@ -406,14 +428,21 @@ parse_kubezero
# Execute tasks
for t in $@; do
case "$t" in
kubeadm_upgrade) kubeadm_upgrade;;
bootstrap) control_plane_node bootstrap;;
join) control_plane_node join;;
restore) control_plane_node restore;;
apply_*) apply_module "${t##apply_}";;
delete_*) delete_module "${t##delete_}";;
kubeadm_upgrade)
ARGOCD=$(argo_used)
kubeadm_upgrade;;
apply_*)
ARGOCD=$(argo_used)
apply_module "${t##apply_}";;
delete_*)
ARGOCD=$(argo_used)
delete_module "${t##delete_}";;
backup) backup;;
debug_shell) debug_shell;;
*) echo "Unknown command: '$t'";;
esac
done

View File

@ -27,25 +27,20 @@ function chart_location() {
function argo_used() {
kubectl get application kubezero -n argocd >/dev/null && rc=$? || rc=$?
return $rc
kubectl get application kubezero -n argocd >/dev/null \
&& echo "True" || echo "False"
}
# get kubezero-values from ArgoCD if available or use in-cluster CM without Argo
# get kubezero-values from ArgoCD if available or use in-cluster CM
function get_kubezero_values() {
### Remove with 1.31
### Migrate the kubezero CM from kube-system to kubezero NS during the 1.30 cycle
kubectl get cm kubezero-values -n kubezero > /dev/null || \
{ create_ns kubezero; kubectl get cm kubezero-values -n kube-system -o yaml | \
sed 's/^ namespace: kube-system/ namespace: kubezero/' | \
kubectl create -f - && \
kubectl delete cm kubezero-values -n kube-system ; }
###
local argo=${1:-"False"}
argo_used && \
{ kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml ; } || \
{ kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml ; }
if [ "$argo" == "True" ]; then
kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml
else
kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml
fi
}
@ -235,7 +230,6 @@ spec:
hostPID: true
tolerations:
- operator: Exists
effect: NoSchedule
initContainers:
- name: node-upgrade
image: busybox

View File

@ -12,12 +12,15 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
. "$SCRIPT_DIR"/libhelm.sh
ARGOCD=$(argo_used)
echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning
#waitSystemPodsRunning
argo_used && disable_argo
[ "$ARGOCD" == "True" ] && disable_argo
#all_nodes_upgrade ""
# Preload cilium images to running nodes
all_nodes_upgrade "chroot /host crictl pull quay.io/cilium/cilium:v1.16.3"
control_plane_upgrade kubeadm_upgrade
@ -26,7 +29,7 @@ read -r
#echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015
#argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
#[ "$ARGOCD" == "True" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
### v1.30
#
@ -57,4 +60,4 @@ echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to
echo "<Return> to continue and re-enable ArgoCD:"
read -r
argo_used && enable_argo
[ "$ARGOCD" == "True" ] && enable_argo

View File

@ -19,5 +19,6 @@ Installs Istio Ingress Gateways, requires kubezero-istio to be installed !
## Resources
### Envoy Listener Filter - TCP KeepAlive
- https://github.com/cilium/cilium/blob/main/operator/pkg/model/translation/envoy_listener.go#L134

View File

@ -19,12 +19,11 @@ Installs the Istio control plane
## Resources
- https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/#IstioOperatorSpec
- https://github.com/istio/istio/blob/master/manifests/profiles/default.yaml
- https://istio.io/latest/docs/setup/install/standalone-operator/
### Grafana
- https://grafana.com/grafana/dashboards/7645
- https://grafana.com/grafana/dashboards/7639
- https://grafana.com/grafana/dashboards/7636
- https://grafana.com/grafana/dashboards/7630
- https://grafana.com/grafana/dashboards/11829