More fixes and upgrade docs

This commit is contained in:
Stefan Reimer 2020-12-01 07:46:04 -08:00
parent 3497392c39
commit 8b048dd390
6 changed files with 83 additions and 24 deletions

View File

@ -1,14 +0,0 @@
#!/bin/bash
# Remove operator first
kubectl delete deployment istio-operator -n istio-operator
kubectl delete ns istio-operator
# Remove policy
kubectl delete deployment istio-policy -n istio-system
# Install new istio and istio-ingress chart
# Remobe old ingress
kubectl delete deployment istio-ingressgateway -n istio-system
kubectl delete deployment istio-private-ingressgateway -n istio-system

View File

@ -2,7 +2,7 @@
set -ex
ACTION=$1
ARTIFACTS=("$2")
ARTIFACTS=($(echo $2 | tr "," "\n"))
CLUSTER=$3
LOCATION=${4:-""}
@ -99,10 +99,9 @@ function _helm() {
if [ $action == "crds" ]; then
_crds
else
elif [ $action == "apply" ]; then
# namespace must exist prior to apply
[ $action == "apply" ] && create_ns $namespace
create_ns $namespace
# Optional pre hook
declare -F ${release}-pre && ${release}-pre
@ -112,8 +111,11 @@ function _helm() {
# Optional post hook
declare -F ${release}-post && ${release}-post
elif [ $action == "delete" ]; then
apply
# Delete dedicated namespace if not kube-system
[ $action == "delete" ] && delete_ns $namespace
delete_ns $namespace
fi
return 0

View File

@ -0,0 +1,19 @@
#!/bin/bash
# Istio operator resources first
kubectl delete Istiooperators kubezero-istio -n istio-system
kubectl delete Istiooperators kubezero-istio-private-ingress -n istio-system
# Istio operator itself
kubectl delete deployment istio-operator -n istio-operator
kubectl delete ns istio-operator
# Remove policy pod
kubectl delete deployment istio-policy -n istio-system
# Remove old gateways
kubectl delete gateways ingressgateway -n istio-system
kubectl delete gateways private-ingressgateway -n istio-system
# Remove old shared public cert
kubectl delete certificate public-ingress-cert -n istio-system

View File

@ -0,0 +1,19 @@
#!/bin/bash -x
# Get all public vs
for vs in $(kubectl get vs -A | grep "istio-system/ingressgateway" | awk '{print $1 ":" $2}'); do
ns=${vs%%:*}
name=${vs##*:}
kubectl patch virtualservice $name -n $ns --type=json \
-p='[{"op": "replace", "path": "/spec/gateways/0", "value":"istio-ingress/ingressgateway"}]'
done
# Get all private vs
for vs in $(kubectl get vs -A | grep "istio-system/private-ingressgateway" | awk '{print $1 ":" $2}'); do
ns=${vs%%:*}
name=${vs##*:}
kubectl patch virtualservice $name -n $ns --type=json \
-p='[{"op": "replace", "path": "/spec/gateways/0", "value":"istio-ingress/private-ingressgateway"}]'
done

View File

@ -0,0 +1,7 @@
#!/bin/bash
ns=$(kubectl get ns -l argocd.argoproj.io/instance | grep -v NAME | awk '{print $1}')
for n in $ns; do
kubectl label --overwrite namespace $n 'argocd.argoproj.io/instance-'
done

View File

@ -1,18 +1,44 @@
# Upgrade to KubeZero V2(Argoless)
- disable all auto-sync in argo !! ( remove auto-sync from old values.yaml and run deploy one last time ) or disable manual via Argo UI starting with Kubezero app itself
## ArgoCD prep
- disable all auto-sync and "prune" feature to prevent that namespaces defined in previous apps get removed
- either remove auto-sync from old values.yaml and run deploy one last time, trigger kubezero sync !
- or disable manual via Argo UI starting with Kubezero app itself
- uninstall argo helm chart
`helm uninstall kubezero -n argocd`
- remove all "argocd.argoproj.io/instance" labels from namespaces to prevent namespace removal later on
`scripts/remove_argo_ns.sh`
- migrate values.yaml to new structure, adapt as needed
- update new central kubezero location in git and merge cluster configs
& update new central kubezero location in git and merge cluster configs
- Upgrade control plane nodes / worker nodes
- upgrade all crds
- upgrade calico,cert-manager,kiam,csi drivers
- Istio:
`./bootstrap.sh crds all clusters/$CLUSTER ../../../kubezero/charts`
- upgrade base artifacts
`./bootstrap.sh deploy calico,cert-manager,kiam,aws-ebs-csi-driver,aws-efs-csi-driver clusters/$CLUSTER ../../../kubezero/charts`
- Istio, due to changes of the ingress namespace we need brief downtime
DOWNTIME STARTS !
- delete istio operators, to remove all pieces, remove operator itself
`./scripts/delete_istio_17.sh`
- deploy istio and istio-ingress via bootstrap.sh
`./bootstrap.sh deploy all clusters/$CLUSTER ../../../kubezero/charts`
- patch all VirtualServices via script to new namespace
`./scripts/patch_vs.sh`
DOWNTIME ENDS !
- upgrade all artifacts
`./bootstrap.sh deploy all clusters/$CLUSTER ../../../kubezero/charts`
- push kubezero cluster config
- verify argocd incl. kubezero app
- verify all argo apps
- verify all the things
## High level / Admin changes
- ArgoCD is now optional
@ -50,6 +76,6 @@
### Istio
- operator removed, deployment migrated to helm, cleanups
- version bump to 1.8
- no more policy by default
- no more policy pod by default
- all ingress in dedicated new namespace istio-ingress as well as dedicated helm chart
- set priorty class