fix: minor version bumps, EBS fix, latest upgrade docs

This commit is contained in:
Stefan Reimer 2022-05-05 14:37:52 +02:00
parent fa5c1f9b2a
commit d900f21944
12 changed files with 38 additions and 23 deletions

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-ci
description: KubeZero umbrella chart for all things CI
type: application
version: 0.4.50
version: 0.4.51
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -26,7 +26,7 @@ dependencies:
repository: https://dl.gitea.io/charts/
condition: gitea.enabled
- name: jenkins
version: 3.12.0
version: 3.12.2
repository: https://charts.jenkins.io
condition: jenkins.enabled
- name: trivy

View File

@ -1,6 +1,6 @@
# kubezero-ci
![Version: 0.4.50](https://img.shields.io/badge/Version-0.4.50-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.4.51](https://img.shields.io/badge/Version-0.4.51-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI
@ -20,7 +20,7 @@ Kubernetes: `>= 1.20.0`
|------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.12 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://charts.jenkins.io | jenkins | 3.12.0 |
| https://charts.jenkins.io | jenkins | 3.12.2 |
| https://dl.gitea.io/charts/ | gitea | 5.0.5 |
| https://gocd.github.io/helm-chart | gocd | 1.40.8 |
@ -111,7 +111,7 @@ Kubernetes: `>= 1.20.0`
| jenkins.controller.resources.limits.memory | string | `"4096Mi"` | |
| jenkins.controller.resources.requests.cpu | string | `"250m"` | |
| jenkins.controller.resources.requests.memory | string | `"1280Mi"` | |
| jenkins.controller.tag | string | `"2.332.2-lts-jdk17-preview"` | |
| jenkins.controller.tag | string | `"2.332.3-lts-jdk17-preview"` | |
| jenkins.controller.testEnabled | bool | `false` | |
| jenkins.enabled | bool | `false` | |
| jenkins.istio.agent.enabled | bool | `false` | |

View File

@ -69,7 +69,7 @@ jenkins:
enabled: false
controller:
tag: 2.332.2-lts-jdk17-preview
tag: 2.332.3-lts-jdk17-preview
#tagLabel: alpine
disableRememberMe: true
prometheus:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-storage
description: KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini
type: application
version: 0.6.2
version: 0.6.3
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -1,6 +1,6 @@
# kubezero-storage
![Version: 0.6.2](https://img.shields.io/badge/Version-0.6.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.6.3](https://img.shields.io/badge/Version-0.6.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini
@ -38,6 +38,7 @@ Kubernetes: `>= 1.20.0`
| aws-ebs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-ebs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| aws-ebs-csi-driver.enabled | bool | `false` | |
| aws-ebs-csi-driver.image.tag | string | `"v1.5.3"` | |
| aws-ebs-csi-driver.node.resources.limits.memory | string | `"32Mi"` | |
| aws-ebs-csi-driver.node.resources.requests.cpu | string | `"10m"` | |
| aws-ebs-csi-driver.node.resources.requests.memory | string | `"16Mi"` | |

View File

@ -63,6 +63,11 @@ gemini:
aws-ebs-csi-driver:
enabled: false
# starting with 1.6 the ebs-plugin panics with "could not get number of attached ENIs"
# somewhere related to metadata / volumeattach limits and nitro instances ... AWS as usual
image:
tag: v1.5.3
controller:
replicaCount: 1
logLevel: 2

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero
description: KubeZero - Root App of Apps chart
type: application
version: 1.22.8-5
version: 1.22.8-6
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -1,6 +1,6 @@
# kubezero
![Version: 1.22.8-5](https://img.shields.io/badge/Version-1.22.8--5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 1.22.8-6](https://img.shields.io/badge/Version-1.22.8--6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero - Root App of Apps chart
@ -65,7 +65,7 @@ Kubernetes: `>= 1.20.0`
| storage.aws-ebs-csi-driver.enabled | bool | `false` | |
| storage.aws-efs-csi-driver.enabled | bool | `false` | |
| storage.enabled | bool | `false` | |
| storage.targetRevision | string | `"0.6.2"` | |
| storage.targetRevision | string | `"0.6.3"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.9.1](https://github.com/norwoodj/helm-docs/releases/v1.9.1)

View File

@ -22,7 +22,7 @@ cert-manager:
storage:
enabled: false
targetRevision: 0.6.2
targetRevision: 0.6.3
aws-ebs-csi-driver:
enabled: false
aws-efs-csi-driver:

View File

@ -81,14 +81,23 @@ Change Kubernetes version in controller config from `1.21.9` to `1.22.8`
Wait each time for controller to join and all pods running.
Might take a while ...
4. Migrate ArgoCD KubeZero config for your cluster:
```cat <cluster/env/kubezero/application.yaml> | ./release/v1.22/migrate_agro.py```
Adjust as needed...
4. Migrate ArgoCD KubeZero config:
`cat <cluster/env/kubezero/application.yaml> | ./release/v1.22/migrate_agro.py` and adjust if needed and replace the original.
- git add / commit / push
- Watch ArgoCD do its work.
5. Upgrade via boostrap.sh
As the changes around Istio are substantial in this release we need to upgrade some parts step by step to prevent service outages, especially for private-ingress.
5. Replace worker nodes
- `./bootstrap.sh crds all <env>` to deploy all new CRDs first
- `./bootstrap.sh apply cert-manager <env>` to update cert-manager, required for Istio
- `./bootstrap.sh apply istio <env>` to update the Istio control plane
- `./bootstrap.sh apply istio-private-ingress <env>` to deploy the new private-ingress gateways first
- `./bootstrap.sh apply istio-ingress <env>` to update the public ingress and also remove the 1.21 private-ingress gateways
6. Finalize via ArgoCD
git add / commit / push `<cluster/env/kubezero/application.yaml>` and watch ArgoCD do its work.
7. Replace worker nodes
Eg. by doubling `desired` for each worker ASG,
once all new workers joined, drain old workers one by one,
finally reset `desired` for each worker ASG which will terminate the old workers.
@ -100,4 +109,5 @@ finally reset `desired` for each worker ASG which will terminate the old workers
on 1.21 nodes until the metrics module is upgraded, due to underlying OS changes
### Logging
- `logging-fluent-bit` will go into `CrashLoopBackoff` on 1.21 nodes, until logging module is upgraded, due to underlying OS changes
- `elastic-operator-0` might be stuck in `CrashLoopBackoff` until all of the controllers are updated due to the CRD removals in 1.22
- `logging-fluent-bit` will go into `CrashLoopBackoff` or stuck in `ContainerCreating` on 1.21 nodes, until logging module is upgraded, due to underlying OS changes

View File

@ -144,8 +144,7 @@ if [ "$1" == 'upgrade' ]; then
# Remove all remaining kiam
helm repo add uswitch https://uswitch.github.io/kiam-helm-charts/charts/
helm repo update
helm template uswitch/kiam --name-template kiam --set server.prometheus.servicemonitor.enabled=true --set agent.prometheus.servicemonitor.enabled=true |
kubectl delete --namespace kube-system -f - || true
helm template uswitch/kiam --name-template kiam --set server.deployment.enabled=true --set server.prometheus.servicemonitor.enabled=true --set agent.prometheus.servicemonitor.enabled=true | kubectl delete --namespace kube-system -f - || true
######################
# network
@ -318,7 +317,7 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
fi
# install / update network and addons
if [[ "$1" =~ "^(bootstrap)$" ]]; then
if [[ "$1" =~ "^(bootstrap|join)$" ]]; then
# network
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
helm template $CHARTS/kubezero-network --namespace kube-system --include-crds --name-template network \

View File

@ -15,7 +15,7 @@ parser = argparse.ArgumentParser(description="Update Route53 entries")
parser.add_argument(
"--version",
dest="version",
default="1.22.8-5",
default="1.22.8-6",
action="store",
required=False,
help="Update KubeZero version",