Bugfixes for control plane, proper calico cpu requests

This commit is contained in:
Stefan Reimer 2021-03-17 17:29:44 +01:00
parent de2602c3d5
commit 3f204b5e04
9 changed files with 58 additions and 24 deletions

View File

@ -12,7 +12,7 @@ etcd:
extraArgs:
listen-metrics-urls: "http://{{ .Values.listenAddress }}:2381"
logger: "zap"
log-level: "warn"
# log-level: "warn"
{{- with .Values.etcdExtraArgs }}
{{- toYaml . | nindent 6 }}
{{- end }}
@ -50,7 +50,7 @@ apiServer:
{{- end }}
enable-admission-plugins: NodeRestriction,EventRateLimit
{{- if .Values.clusterHighAvailable }}
goaway-chance: ".001"
# goaway-chance: ".001"
{{- end }}
logging-format: json
{{- with .Values.apiExtraArgs }}

View File

@ -3,25 +3,15 @@
apiVersion: iamauthenticator.k8s.aws/v1alpha1
kind: IAMIdentityMapping
metadata:
name: kubezero-controllers
name: kubezero-worker-nodes
spec:
arn: {{ .Values.ControllerIamRole }}
username: kubezero-controller
arn: {{ .Values.WorkerNodeRole }}
username: system:node:{{ "{{" }}EC2PrivateDNSName{{ "}}" }}
groups:
# For now use masters, define properly with 1.20
- system:masters
---
# Worker role to eg. delete during terminate
apiVersion: iamauthenticator.k8s.aws/v1alpha1
kind: IAMIdentityMapping
metadata:
name: kubezero-workers
spec:
arn: {{ .Values.WorkerIamRole }}
username: kubezero-worker
groups:
- system:masters
- system:nodes
- system:bootstrappers
---
# Admin Role for remote access
apiVersion: iamauthenticator.k8s.aws/v1alpha1

View File

@ -11,3 +11,7 @@ platform: "aws"
# Set to false for openrc, eg. on Gentoo or Alpine
systemd: true
protectKernelDefaults: true
WorkerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
WorkerIamRole: "arn:aws:iam::000000000000:role/KubernetesNode"
KubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-aws-node-termination-handler
description: Umbrella chart for all KubeZero AWS addons
type: application
version: 0.1.0
version: 0.1.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -18,6 +18,6 @@ dependencies:
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: aws-node-termination-handler
version: ">= 0.13.3"
version: ">= 0.14.1"
repository: https://aws.github.io/eks-charts
kubeVersion: ">= 1.18.0"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-calico
description: KubeZero Umbrella Chart for Calico
type: application
version: 0.2.1
version: 0.2.2
appVersion: v3.16.5
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png

View File

@ -482,7 +482,7 @@ spec:
privileged: true
resources:
requests:
cpu: 250m
cpu: 50m
livenessProbe:
exec:
command:
@ -619,6 +619,9 @@ spec:
command:
- /usr/bin/check-status
- -r
resources:
requests:
cpu: 50m
---

View File

@ -15,6 +15,7 @@ helm_version=$(helm version --short)
echo $helm_version | grep -qe "^v3.[3-9]" || { echo "Helm version >= 3.3 required!"; exit 1; }
TMPDIR=$(mktemp -d kubezero.XXX)
[ -z "$DEBUG" ] && trap 'rm -rf $TMPDIR' ERR EXIT
# First lets generate kubezero.yaml
# Add all yaml files in $CLUSTER
@ -226,5 +227,3 @@ elif [ $1 == "delete" ]; then
is_enabled ${ARTIFACTS[idx]} && _helm delete ${ARTIFACTS[idx]}
done
fi
[ "$DEBUG" == "" ] && rm -rf $TMPDIR

12
scripts/setClusterVersion.sh Executable file
View File

@ -0,0 +1,12 @@
#!/bin/bash
# Meant for testing only !!!
# This sets the Kubernetes Version in SSM
# Make sure your AWS Profile and Region points to the right direction ...
CONGLOMERATE=$1
VERSION=$2
aws ssm put-parameter --name /cloudbender/${CONGLOMERATE}/kubecontrol/meta/clusterversion --type SecureString --value "$(echo "$VERSION" | base64 -w0)" --overwrite

26
scripts/updateWorkerIamRole.sh Executable file
View File

@ -0,0 +1,26 @@
#!/bin/bash -ex
# Meant for testing only !!!
# This updates the proxy IAM role with the actual worker ASGs IAM roles
# Make sure your AWS Profile points to right account
REGION="$1"
CLUSTER="$2"
TMPDIR=$(mktemp -p /tmp -d kubezero.XXX)
trap 'rm -rf $TMPDIR' ERR EXIT
# Get orig policy
aws iam get-role --output json --role-name $REGION-$CLUSTER-kube-workers | jq -c .Role.AssumeRolePolicyDocument > $TMPDIR/orig
# Add current and new list of entities to include
cat $TMPDIR/orig | jq -c .Statement[].Principal.AWS[] | sort | uniq > $TMPDIR/current-roles
aws iam list-roles --output json --path-prefix /$REGION/$CLUSTER/nodes/ | jq -c .Roles[].Arn | sort | uniq > $TMPDIR/new-roles
# If no diff exit
diff -tub $TMPDIR/current-roles $TMPDIR/new-roles && exit 0
# Create new policy
jq -c '.Statement[].Principal.AWS = $roles' $TMPDIR/orig --slurpfile roles $TMPDIR/new-roles > $TMPDIR/new
aws iam update-assume-role-policy --role-name $REGION-$CLUSTER-kube-workers --policy-document "$(cat $TMPDIR/new)"