fix: various fixes to improved upgrade reliability
This commit is contained in:
parent
b8f1991095
commit
591cb0fc46
@ -61,6 +61,8 @@ function cert-manager-post() {
|
||||
# ArgoCD #
|
||||
###########
|
||||
function argocd-pre() {
|
||||
kubectl delete job argo-argocd-redis-secret-init -n argocd || true
|
||||
|
||||
for f in $CLUSTER/secrets/argocd-*.yaml; do
|
||||
kubectl apply -f $f
|
||||
done
|
||||
|
@ -129,6 +129,8 @@ kubeadm_upgrade() {
|
||||
### Remove with 1.31
|
||||
# migrate kubezero CM to kubezero NS
|
||||
# migrate ArgoCD app from values to valuesObject
|
||||
create_ns kubezero
|
||||
|
||||
if [ "$ARGOCD" == "True" ]; then
|
||||
kubectl get app kubezero -n argocd -o yaml > $WORKDIR/kubezero-argo-app.yaml
|
||||
if [ "$(yq '(.spec.source.helm | has "values")' $WORKDIR/kubezero-argo-app.yaml)" == "true" ]; then
|
||||
@ -137,11 +139,12 @@ kubeadm_upgrade() {
|
||||
|
||||
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/spec/source/helm/values"}]'
|
||||
kubectl delete cm kubezero-values -n kube-system > /dev/null || true
|
||||
kubectl create configmap -n kubezero kubezero-values || true
|
||||
fi
|
||||
|
||||
else
|
||||
kubectl get cm kubezero-values -n kubezero > /dev/null || \
|
||||
{ create_ns kubezero; kubectl get cm kubezero-values -n kube-system -o yaml | \
|
||||
{ kubectl get cm kubezero-values -n kube-system -o yaml | \
|
||||
sed 's/^ namespace: kube-system/ namespace: kubezero/' | \
|
||||
kubectl create -f - && \
|
||||
kubectl delete cm kubezero-values -n kube-system ; }
|
||||
@ -157,16 +160,18 @@ kubeadm_upgrade() {
|
||||
# Update kubezero-values CM
|
||||
kubectl get cm -n kubezero kubezero-values -o=yaml | \
|
||||
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
|
||||
kubectl replace -f -
|
||||
|
||||
# update argo app
|
||||
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
|
||||
kubectl get application kubezero -n argocd -o yaml | \
|
||||
yq '.spec.source.helm.valuesObject |= load("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
|
||||
kubectl apply --server-side --force-conflicts -f -
|
||||
|
||||
# finally remove annotation to allow argo to sync again
|
||||
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
|
||||
if [ "$ARGOCD" == "True" ]; then
|
||||
# update argo app
|
||||
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
|
||||
kubectl get application kubezero -n argocd -o yaml | \
|
||||
yq '.spec.source.helm.valuesObject |= load("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
|
||||
kubectl apply --server-side --force-conflicts -f -
|
||||
|
||||
# finally remove annotation to allow argo to sync again
|
||||
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
|
||||
fi
|
||||
|
||||
# Local node upgrade
|
||||
render_kubeadm upgrade
|
||||
|
@ -267,6 +267,8 @@ EOF
|
||||
function control_plane_upgrade() {
|
||||
TASKS="$1"
|
||||
|
||||
[ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest"
|
||||
|
||||
echo "Deploy cluster admin task: $TASKS"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
|
@ -19,9 +19,6 @@ echo "Checking that all pods in kube-system are running ..."
|
||||
|
||||
[ "$ARGOCD" == "True" ] && disable_argo
|
||||
|
||||
# Preload cilium images to running nodes
|
||||
all_nodes_upgrade "chroot /host crictl pull quay.io/cilium/cilium:v1.16.3"
|
||||
|
||||
control_plane_upgrade kubeadm_upgrade
|
||||
|
||||
echo "Control plane upgraded, <Return> to continue"
|
||||
@ -35,6 +32,10 @@ read -r
|
||||
#
|
||||
|
||||
# upgrade modules
|
||||
#
|
||||
# Preload cilium images to running nodes
|
||||
all_nodes_upgrade "chroot /host crictl pull quay.io/cilium/cilium:v1.16.3"
|
||||
|
||||
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators"
|
||||
|
||||
echo "Checking that all pods in kube-system are running ..."
|
||||
|
@ -0,0 +1,11 @@
|
||||
--- charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml
|
||||
+++ charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml
|
||||
@@ -117,7 +117,7 @@ spec:
|
||||
|
||||
containers:
|
||||
- name: aws-iam-authenticator
|
||||
- image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.22
|
||||
+ image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.27
|
||||
args:
|
||||
- server
|
||||
- --backend-mode=CRD,MountedFile
|
@ -43,7 +43,7 @@ network:
|
||||
cert-manager:
|
||||
enabled: false
|
||||
namespace: cert-manager
|
||||
targetRevision: 0.9.9
|
||||
targetRevision: 0.9.10
|
||||
|
||||
storage:
|
||||
enabled: false
|
||||
@ -96,7 +96,7 @@ telemetry:
|
||||
operators:
|
||||
enabled: false
|
||||
namespace: operators
|
||||
targetRevision: 0.1.4
|
||||
targetRevision: 0.1.5
|
||||
|
||||
metrics:
|
||||
enabled: false
|
||||
@ -114,7 +114,7 @@ metrics:
|
||||
logging:
|
||||
enabled: false
|
||||
namespace: logging
|
||||
targetRevision: 0.8.12
|
||||
targetRevision: 0.8.13
|
||||
|
||||
argo:
|
||||
enabled: false
|
||||
|
Loading…
Reference in New Issue
Block a user