diff --git a/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml b/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml index 962df260..569ee792 100644 --- a/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml +++ b/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml @@ -134,7 +134,7 @@ spec: memory: 20Mi cpu: 10m limits: - memory: 20Mi + memory: 64Mi #cpu: 100m volumeMounts: diff --git a/releases/v1.22/README.md b/releases/v1.22/README.md index 1d96a312..2763049a 100644 --- a/releases/v1.22/README.md +++ b/releases/v1.22/README.md @@ -84,8 +84,6 @@ Might take a while ... 4. Migrate ArgoCD KubeZero config for your cluster: ```cat | ./release/v1.22/migrate_agro.py``` Adjust as needed... -If the ECK operator is running in your cluster make sure to replace the CRDs *BEFORE* committing the new kubezero config ! -```kubectl replace -f https://download.elastic.co/downloads/eck/2.1.0/crds.yaml``` - git add / commit / push - Watch ArgoCD do its work. @@ -95,4 +93,11 @@ Eg. by doubling `desired` for each worker ASG, once all new workers joined, drain old workers one by one, finally reset `desired` for each worker ASG which will terminate the old workers. -## Known issues \ No newline at end of file +## Known issues + +### Metrics +- `metrics-prometheus-node-exporter` will go into `CreateContainerError` +on 1.21 nodes until the metrics module is upgraded, due to underlying OS changes + +### Logging +- `logging-fluent-bit` will go into `CrashLoopBackoff` on 1.21 nodes, until logging module is upgraded, due to underlying OS changes \ No newline at end of file diff --git a/releases/v1.22/upgrade_cluster.sh b/releases/v1.22/upgrade_cluster.sh index 43d75087..d07e4105 100755 --- a/releases/v1.22/upgrade_cluster.sh +++ b/releases/v1.22/upgrade_cluster.sh @@ -109,3 +109,12 @@ while true; do sleep 3 done kubectl delete pod kubezero-upgrade-${VERSION//.} -n kube-system + +# Now lets rolling restart bunch of ds to make sure they picked up the changes +for ds in calico-node kube-multus-ds kube-proxy ebs-csi-node; do + kubectl rollout restart daemonset/$ds -n kube-system + kubectl rollout status daemonset/$ds -n kube-system +done + +# Force replace the ECK CRDs +kubectl get crd elasticsearches.elasticsearch.k8s.elastic.co && kubectl replace -f https://download.elastic.co/downloads/eck/2.1.0/crds.yaml