chore: migrate all nodelabel selectors from master to control-plane

This commit is contained in:
Stefan Reimer 2021-12-08 17:29:53 +01:00
parent f1c29811ed
commit 4d9bb8e11d
10 changed files with 38 additions and 20 deletions

View File

@ -50,7 +50,7 @@ cert-manager:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
ingressShim:
defaultIssuerName: letsencrypt-dns-prod
@ -61,14 +61,14 @@ cert-manager:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
cainjector:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
extraArgs:
- "--dns01-recursive-nameservers-only"

View File

@ -61,7 +61,7 @@ spec:
securityContext:
fsGroup: 1337
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master

View File

@ -16,7 +16,7 @@ istio-discovery:
# Not implemented, monkey patched in the chart itself
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master

View File

@ -73,7 +73,7 @@ diff -tubr istio/manifests/charts/istio-control/istio-discovery/templates/deploy
securityContext:
fsGroup: 1337
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ node-role.kubernetes.io/control-plane: ""
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master

View File

@ -8,7 +8,7 @@ eck-operator:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
# Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.13.4

View File

@ -57,7 +57,7 @@ kube-prometheus-stack:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
resources:
requests:
cpu: 20m
@ -71,7 +71,7 @@ kube-prometheus-stack:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
nodeExporter:
enabled: true
@ -191,7 +191,7 @@ kube-prometheus-stack:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
# Todo
alertmanager:
@ -276,7 +276,7 @@ prometheus-adapter:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
# Basic rules for HPA to work replacing heaptster, taken from kube-prometheus project
# https://github.com/coreos/kube-prometheus/blob/master/manifests/prometheus-adapter-configMap.yaml
rules:

View File

@ -3344,7 +3344,7 @@
spec:
nodeSelector:
kubernetes.io/os: linux
+ node-role.kubernetes.io/master: ""
+ node-role.kubernetes.io/control-plane: ""
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly

View File

@ -596,7 +596,7 @@ spec:
spec:
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly

View File

@ -0,0 +1,16 @@
#!/bin/bash
set -x
# Allow EFS and EBS Argo apps to be deleted without removing things like storageClasses etc.
# to be replaced by kubezero-storage
kubectl patch application aws-ebs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]'
kubectl patch application aws-efs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]'
# Delete EBS and EFS Deployments and Daemonsets as we cannot change the lables while moving them to storage.
# This will NOT affect provisioned volumes
kubectl delete deployment ebs-csi-controller -n kube-system
kubectl delete daemonSet ebs-csi-node -n kube-system
kubectl delete statefulset ebs-snapshot-controller -n kube-system
kubectl delete deployment efs-csi-controller -n kube-system
kubectl delete daemonSet efs-csi-node -n kube-system

View File

@ -18,12 +18,12 @@ do
name=$(basename $dir)
[[ $name =~ $CHARTS ]] || continue
if [ $(helm dep list $dir 2>/dev/null| wc -l) -gt 1 ]
then
echo "Processing chart dependencies"
rm -rf $dir/tmpcharts
helm dependency update --skip-refresh $dir
fi
#if [ $(helm dep list $dir 2>/dev/null| wc -l) -gt 1 ]
#then
# echo "Processing chart dependencies"
# rm -rf $dir/tmpcharts
# helm dependency update --skip-refresh $dir
#fi
echo "Processing $dir"
helm lint $dir
@ -34,7 +34,9 @@ curl -L -s -o $TMPDIR/index.yaml ${REPO_URL}/index.yaml
helm repo index $TMPDIR --url $REPO_URL --merge $TMPDIR/index.yaml
aws s3 cp $TMPDIR/*.tgz $REPO_URL_S3/
for p in $TMPDIR/*.tgz; do
aws s3 cp $p $REPO_URL_S3/
done
aws s3 cp $TMPDIR/index.yaml $REPO_URL_S3/ --cache-control max-age=1
rm -rf $TMPDIR