chore: migrate all nodelabel selectors from master to control-plane

pull/49/head
Stefan Reimer 8 months ago
parent f1c29811ed
commit 4d9bb8e11d
  1. 6
      charts/kubezero-cert-manager/values.yaml
  2. 2
      charts/kubezero-istio/charts/istio-discovery/templates/deployment.yaml
  3. 2
      charts/kubezero-istio/values.yaml
  4. 2
      charts/kubezero-istio/zdt.patch
  5. 2
      charts/kubezero-logging/values.yaml
  6. 8
      charts/kubezero-metrics/values.yaml
  7. 2
      charts/kubezero-network/charts/calico/calico-v3.16.5.patch
  8. 2
      charts/kubezero-network/charts/calico/templates/calico.yaml
  9. 16
      containers/admin/v1.21/kubezero_121.sh
  10. 16
      scripts/publish.sh

@ -50,7 +50,7 @@ cert-manager:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
ingressShim:
defaultIssuerName: letsencrypt-dns-prod
@ -61,14 +61,14 @@ cert-manager:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
cainjector:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
extraArgs:
- "--dns01-recursive-nameservers-only"

@ -61,7 +61,7 @@ spec:
securityContext:
fsGroup: 1337
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master

@ -16,7 +16,7 @@ istio-discovery:
# Not implemented, monkey patched in the chart itself
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master

@ -73,7 +73,7 @@ diff -tubr istio/manifests/charts/istio-control/istio-discovery/templates/deploy
securityContext:
fsGroup: 1337
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ node-role.kubernetes.io/control-plane: ""
+ tolerations:
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/master

@ -8,7 +8,7 @@ eck-operator:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
# Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.13.4

@ -57,7 +57,7 @@ kube-prometheus-stack:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
resources:
requests:
cpu: 20m
@ -71,7 +71,7 @@ kube-prometheus-stack:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
nodeExporter:
enabled: true
@ -191,7 +191,7 @@ kube-prometheus-stack:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
# Todo
alertmanager:
@ -276,7 +276,7 @@ prometheus-adapter:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
# Basic rules for HPA to work replacing heaptster, taken from kube-prometheus project
# https://github.com/coreos/kube-prometheus/blob/master/manifests/prometheus-adapter-configMap.yaml
rules:

@ -3344,7 +3344,7 @@
spec:
nodeSelector:
kubernetes.io/os: linux
+ node-role.kubernetes.io/master: ""
+ node-role.kubernetes.io/control-plane: ""
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly

@ -596,7 +596,7 @@ spec:
spec:
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly

@ -0,0 +1,16 @@
#!/bin/bash
set -x
# Allow EFS and EBS Argo apps to be deleted without removing things like storageClasses etc.
# to be replaced by kubezero-storage
kubectl patch application aws-ebs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]'
kubectl patch application aws-efs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]'
# Delete EBS and EFS Deployments and Daemonsets as we cannot change the lables while moving them to storage.
# This will NOT affect provisioned volumes
kubectl delete deployment ebs-csi-controller -n kube-system
kubectl delete daemonSet ebs-csi-node -n kube-system
kubectl delete statefulset ebs-snapshot-controller -n kube-system
kubectl delete deployment efs-csi-controller -n kube-system
kubectl delete daemonSet efs-csi-node -n kube-system

@ -18,12 +18,12 @@ do
name=$(basename $dir)
[[ $name =~ $CHARTS ]] || continue
if [ $(helm dep list $dir 2>/dev/null| wc -l) -gt 1 ]
then
echo "Processing chart dependencies"
rm -rf $dir/tmpcharts
helm dependency update --skip-refresh $dir
fi
#if [ $(helm dep list $dir 2>/dev/null| wc -l) -gt 1 ]
#then
# echo "Processing chart dependencies"
# rm -rf $dir/tmpcharts
# helm dependency update --skip-refresh $dir
#fi
echo "Processing $dir"
helm lint $dir
@ -34,7 +34,9 @@ curl -L -s -o $TMPDIR/index.yaml ${REPO_URL}/index.yaml
helm repo index $TMPDIR --url $REPO_URL --merge $TMPDIR/index.yaml
aws s3 cp $TMPDIR/*.tgz $REPO_URL_S3/
for p in $TMPDIR/*.tgz; do
aws s3 cp $p $REPO_URL_S3/
done
aws s3 cp $TMPDIR/index.yaml $REPO_URL_S3/ --cache-control max-age=1
rm -rf $TMPDIR

Loading…
Cancel
Save