chore: migrate all nodelabel selectors from master to control-plane

This commit is contained in:
Stefan Reimer 2021-12-08 17:29:53 +01:00
parent d33856f0d2
commit 03bc91d6d9
10 changed files with 38 additions and 20 deletions

View File

@ -50,7 +50,7 @@ cert-manager:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
ingressShim: ingressShim:
defaultIssuerName: letsencrypt-dns-prod defaultIssuerName: letsencrypt-dns-prod
@ -61,14 +61,14 @@ cert-manager:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
cainjector: cainjector:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
extraArgs: extraArgs:
- "--dns01-recursive-nameservers-only" - "--dns01-recursive-nameservers-only"

View File

@ -61,7 +61,7 @@ spec:
securityContext: securityContext:
fsGroup: 1337 fsGroup: 1337
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule
key: node-role.kubernetes.io/master key: node-role.kubernetes.io/master

View File

@ -16,7 +16,7 @@ istio-discovery:
# Not implemented, monkey patched in the chart itself # Not implemented, monkey patched in the chart itself
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule
key: node-role.kubernetes.io/master key: node-role.kubernetes.io/master

View File

@ -73,7 +73,7 @@ diff -tubr istio/manifests/charts/istio-control/istio-discovery/templates/deploy
securityContext: securityContext:
fsGroup: 1337 fsGroup: 1337
+ nodeSelector: + nodeSelector:
+ node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: ""
+ tolerations: + tolerations:
+ - effect: NoSchedule + - effect: NoSchedule
+ key: node-role.kubernetes.io/master + key: node-role.kubernetes.io/master

View File

@ -8,7 +8,7 @@ eck-operator:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
# Version for ElasticSearch and Kibana have to match so we define it at top-level # Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.13.4 version: 7.13.4

View File

@ -57,7 +57,7 @@ kube-prometheus-stack:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
resources: resources:
requests: requests:
cpu: 20m cpu: 20m
@ -71,7 +71,7 @@ kube-prometheus-stack:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
nodeExporter: nodeExporter:
enabled: true enabled: true
@ -191,7 +191,7 @@ kube-prometheus-stack:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
# Todo # Todo
alertmanager: alertmanager:
@ -276,7 +276,7 @@ prometheus-adapter:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
# Basic rules for HPA to work replacing heaptster, taken from kube-prometheus project # Basic rules for HPA to work replacing heaptster, taken from kube-prometheus project
# https://github.com/coreos/kube-prometheus/blob/master/manifests/prometheus-adapter-configMap.yaml # https://github.com/coreos/kube-prometheus/blob/master/manifests/prometheus-adapter-configMap.yaml
rules: rules:

View File

@ -3344,7 +3344,7 @@
spec: spec:
nodeSelector: nodeSelector:
kubernetes.io/os: linux kubernetes.io/os: linux
+ node-role.kubernetes.io/master: "" + node-role.kubernetes.io/control-plane: ""
tolerations: tolerations:
# Mark the pod as a critical add-on for rescheduling. # Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly - key: CriticalAddonsOnly

View File

@ -596,7 +596,7 @@ spec:
spec: spec:
nodeSelector: nodeSelector:
kubernetes.io/os: linux kubernetes.io/os: linux
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
tolerations: tolerations:
# Mark the pod as a critical add-on for rescheduling. # Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly - key: CriticalAddonsOnly

View File

@ -0,0 +1,16 @@
#!/bin/bash
set -x
# Allow EFS and EBS Argo apps to be deleted without removing things like storageClasses etc.
# to be replaced by kubezero-storage
kubectl patch application aws-ebs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]'
kubectl patch application aws-efs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]'
# Delete EBS and EFS Deployments and Daemonsets as we cannot change the lables while moving them to storage.
# This will NOT affect provisioned volumes
kubectl delete deployment ebs-csi-controller -n kube-system
kubectl delete daemonSet ebs-csi-node -n kube-system
kubectl delete statefulset ebs-snapshot-controller -n kube-system
kubectl delete deployment efs-csi-controller -n kube-system
kubectl delete daemonSet efs-csi-node -n kube-system

View File

@ -18,12 +18,12 @@ do
name=$(basename $dir) name=$(basename $dir)
[[ $name =~ $CHARTS ]] || continue [[ $name =~ $CHARTS ]] || continue
if [ $(helm dep list $dir 2>/dev/null| wc -l) -gt 1 ] #if [ $(helm dep list $dir 2>/dev/null| wc -l) -gt 1 ]
then #then
echo "Processing chart dependencies" # echo "Processing chart dependencies"
rm -rf $dir/tmpcharts # rm -rf $dir/tmpcharts
helm dependency update --skip-refresh $dir # helm dependency update --skip-refresh $dir
fi #fi
echo "Processing $dir" echo "Processing $dir"
helm lint $dir helm lint $dir
@ -34,7 +34,9 @@ curl -L -s -o $TMPDIR/index.yaml ${REPO_URL}/index.yaml
helm repo index $TMPDIR --url $REPO_URL --merge $TMPDIR/index.yaml helm repo index $TMPDIR --url $REPO_URL --merge $TMPDIR/index.yaml
aws s3 cp $TMPDIR/*.tgz $REPO_URL_S3/ for p in $TMPDIR/*.tgz; do
aws s3 cp $p $REPO_URL_S3/
done
aws s3 cp $TMPDIR/index.yaml $REPO_URL_S3/ --cache-control max-age=1 aws s3 cp $TMPDIR/index.yaml $REPO_URL_S3/ --cache-control max-age=1
rm -rf $TMPDIR rm -rf $TMPDIR