From 03bc91d6d9e4f4db43e2fec41c9797f60688db6f Mon Sep 17 00:00:00 2001 From: Stefan Reimer Date: Wed, 8 Dec 2021 17:29:53 +0100 Subject: [PATCH] chore: migrate all nodelabel selectors from master to control-plane --- charts/kubezero-cert-manager/values.yaml | 6 +++--- .../istio-discovery/templates/deployment.yaml | 2 +- charts/kubezero-istio/values.yaml | 2 +- charts/kubezero-istio/zdt.patch | 2 +- charts/kubezero-logging/values.yaml | 2 +- charts/kubezero-metrics/values.yaml | 8 ++++---- .../charts/calico/calico-v3.16.5.patch | 2 +- .../charts/calico/templates/calico.yaml | 2 +- containers/admin/v1.21/kubezero_121.sh | 16 ++++++++++++++++ scripts/publish.sh | 16 +++++++++------- 10 files changed, 38 insertions(+), 20 deletions(-) create mode 100755 containers/admin/v1.21/kubezero_121.sh diff --git a/charts/kubezero-cert-manager/values.yaml b/charts/kubezero-cert-manager/values.yaml index 2742fe5f..27783cff 100644 --- a/charts/kubezero-cert-manager/values.yaml +++ b/charts/kubezero-cert-manager/values.yaml @@ -50,7 +50,7 @@ cert-manager: - key: node-role.kubernetes.io/master effect: NoSchedule nodeSelector: - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" ingressShim: defaultIssuerName: letsencrypt-dns-prod @@ -61,14 +61,14 @@ cert-manager: - key: node-role.kubernetes.io/master effect: NoSchedule nodeSelector: - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" cainjector: tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule nodeSelector: - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" extraArgs: - "--dns01-recursive-nameservers-only" diff --git a/charts/kubezero-istio/charts/istio-discovery/templates/deployment.yaml b/charts/kubezero-istio/charts/istio-discovery/templates/deployment.yaml index 7ca98c58..6e0320e2 100644 --- a/charts/kubezero-istio/charts/istio-discovery/templates/deployment.yaml +++ b/charts/kubezero-istio/charts/istio-discovery/templates/deployment.yaml @@ -61,7 +61,7 @@ spec: securityContext: fsGroup: 1337 nodeSelector: - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/charts/kubezero-istio/values.yaml b/charts/kubezero-istio/values.yaml index bb63a4b3..532d8c78 100644 --- a/charts/kubezero-istio/values.yaml +++ b/charts/kubezero-istio/values.yaml @@ -16,7 +16,7 @@ istio-discovery: # Not implemented, monkey patched in the chart itself nodeSelector: - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/charts/kubezero-istio/zdt.patch b/charts/kubezero-istio/zdt.patch index c7dda64d..6fcf246b 100644 --- a/charts/kubezero-istio/zdt.patch +++ b/charts/kubezero-istio/zdt.patch @@ -73,7 +73,7 @@ diff -tubr istio/manifests/charts/istio-control/istio-discovery/templates/deploy securityContext: fsGroup: 1337 + nodeSelector: -+ node-role.kubernetes.io/master: "" ++ node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master diff --git a/charts/kubezero-logging/values.yaml b/charts/kubezero-logging/values.yaml index f49a39a0..4f2acdac 100644 --- a/charts/kubezero-logging/values.yaml +++ b/charts/kubezero-logging/values.yaml @@ -8,7 +8,7 @@ eck-operator: - key: node-role.kubernetes.io/master effect: NoSchedule nodeSelector: - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" # Version for ElasticSearch and Kibana have to match so we define it at top-level version: 7.13.4 diff --git a/charts/kubezero-metrics/values.yaml b/charts/kubezero-metrics/values.yaml index b50ed98b..64532c49 100644 --- a/charts/kubezero-metrics/values.yaml +++ b/charts/kubezero-metrics/values.yaml @@ -57,7 +57,7 @@ kube-prometheus-stack: - key: node-role.kubernetes.io/master effect: NoSchedule nodeSelector: - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" resources: requests: cpu: 20m @@ -71,7 +71,7 @@ kube-prometheus-stack: - key: node-role.kubernetes.io/master effect: NoSchedule nodeSelector: - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" nodeExporter: enabled: true @@ -191,7 +191,7 @@ kube-prometheus-stack: - key: node-role.kubernetes.io/master effect: NoSchedule nodeSelector: - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" # Todo alertmanager: @@ -276,7 +276,7 @@ prometheus-adapter: - key: node-role.kubernetes.io/master effect: NoSchedule nodeSelector: - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" # Basic rules for HPA to work replacing heaptster, taken from kube-prometheus project # https://github.com/coreos/kube-prometheus/blob/master/manifests/prometheus-adapter-configMap.yaml rules: diff --git a/charts/kubezero-network/charts/calico/calico-v3.16.5.patch b/charts/kubezero-network/charts/calico/calico-v3.16.5.patch index 0db98ff9..77ff576b 100644 --- a/charts/kubezero-network/charts/calico/calico-v3.16.5.patch +++ b/charts/kubezero-network/charts/calico/calico-v3.16.5.patch @@ -3344,7 +3344,7 @@ spec: nodeSelector: kubernetes.io/os: linux -+ node-role.kubernetes.io/master: "" ++ node-role.kubernetes.io/control-plane: "" tolerations: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly diff --git a/charts/kubezero-network/charts/calico/templates/calico.yaml b/charts/kubezero-network/charts/calico/templates/calico.yaml index f61b003f..17159983 100644 --- a/charts/kubezero-network/charts/calico/templates/calico.yaml +++ b/charts/kubezero-network/charts/calico/templates/calico.yaml @@ -596,7 +596,7 @@ spec: spec: nodeSelector: kubernetes.io/os: linux - node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: "" tolerations: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly diff --git a/containers/admin/v1.21/kubezero_121.sh b/containers/admin/v1.21/kubezero_121.sh new file mode 100755 index 00000000..47116c71 --- /dev/null +++ b/containers/admin/v1.21/kubezero_121.sh @@ -0,0 +1,16 @@ +#!/bin/bash +set -x + +# Allow EFS and EBS Argo apps to be deleted without removing things like storageClasses etc. +# to be replaced by kubezero-storage +kubectl patch application aws-ebs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]' +kubectl patch application aws-efs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]' + +# Delete EBS and EFS Deployments and Daemonsets as we cannot change the lables while moving them to storage. +# This will NOT affect provisioned volumes +kubectl delete deployment ebs-csi-controller -n kube-system +kubectl delete daemonSet ebs-csi-node -n kube-system +kubectl delete statefulset ebs-snapshot-controller -n kube-system + +kubectl delete deployment efs-csi-controller -n kube-system +kubectl delete daemonSet efs-csi-node -n kube-system diff --git a/scripts/publish.sh b/scripts/publish.sh index 4371fc7f..46b7e47e 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -18,12 +18,12 @@ do name=$(basename $dir) [[ $name =~ $CHARTS ]] || continue - if [ $(helm dep list $dir 2>/dev/null| wc -l) -gt 1 ] - then - echo "Processing chart dependencies" - rm -rf $dir/tmpcharts - helm dependency update --skip-refresh $dir - fi + #if [ $(helm dep list $dir 2>/dev/null| wc -l) -gt 1 ] + #then + # echo "Processing chart dependencies" + # rm -rf $dir/tmpcharts + # helm dependency update --skip-refresh $dir + #fi echo "Processing $dir" helm lint $dir @@ -34,7 +34,9 @@ curl -L -s -o $TMPDIR/index.yaml ${REPO_URL}/index.yaml helm repo index $TMPDIR --url $REPO_URL --merge $TMPDIR/index.yaml -aws s3 cp $TMPDIR/*.tgz $REPO_URL_S3/ +for p in $TMPDIR/*.tgz; do + aws s3 cp $p $REPO_URL_S3/ +done aws s3 cp $TMPDIR/index.yaml $REPO_URL_S3/ --cache-control max-age=1 rm -rf $TMPDIR