diff --git a/charts/kubeadm/templates/ClusterConfiguration.yaml b/charts/kubeadm/templates/ClusterConfiguration.yaml index bde3b69c..0bfa0b84 100644 --- a/charts/kubeadm/templates/ClusterConfiguration.yaml +++ b/charts/kubeadm/templates/ClusterConfiguration.yaml @@ -12,7 +12,7 @@ etcd: extraArgs: listen-metrics-urls: "http://{{ .Values.listenAddress }}:2381" logger: "zap" - log-level: "warn" + # log-level: "warn" {{- with .Values.etcdExtraArgs }} {{- toYaml . | nindent 6 }} {{- end }} @@ -50,7 +50,7 @@ apiServer: {{- end }} enable-admission-plugins: NodeRestriction,EventRateLimit {{- if .Values.clusterHighAvailable }} - goaway-chance: ".001" + # goaway-chance: ".001" {{- end }} logging-format: json {{- with .Values.apiExtraArgs }} diff --git a/charts/kubeadm/templates/aws-iam-authenticator/mappings.yaml b/charts/kubeadm/templates/aws-iam-authenticator/mappings.yaml index 42174443..70ba3341 100644 --- a/charts/kubeadm/templates/aws-iam-authenticator/mappings.yaml +++ b/charts/kubeadm/templates/aws-iam-authenticator/mappings.yaml @@ -3,25 +3,15 @@ apiVersion: iamauthenticator.k8s.aws/v1alpha1 kind: IAMIdentityMapping metadata: - name: kubezero-controllers + name: kubezero-worker-nodes spec: - arn: {{ .Values.ControllerIamRole }} - username: kubezero-controller + arn: {{ .Values.WorkerNodeRole }} + username: system:node:{{ "{{" }}EC2PrivateDNSName{{ "}}" }} groups: + # For now use masters, define properly with 1.20 - system:masters - ---- -# Worker role to eg. delete during terminate -apiVersion: iamauthenticator.k8s.aws/v1alpha1 -kind: IAMIdentityMapping -metadata: - name: kubezero-workers -spec: - arn: {{ .Values.WorkerIamRole }} - username: kubezero-worker - groups: - - system:masters - + - system:nodes + - system:bootstrappers --- # Admin Role for remote access apiVersion: iamauthenticator.k8s.aws/v1alpha1 diff --git a/charts/kubeadm/values.yaml b/charts/kubeadm/values.yaml index fb274995..6798ff8b 100644 --- a/charts/kubeadm/values.yaml +++ b/charts/kubeadm/values.yaml @@ -11,3 +11,7 @@ platform: "aws" # Set to false for openrc, eg. on Gentoo or Alpine systemd: true protectKernelDefaults: true + +WorkerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode" +WorkerIamRole: "arn:aws:iam::000000000000:role/KubernetesNode" +KubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode" diff --git a/charts/kubezero-aws-node-termination-handler/Chart.yaml b/charts/kubezero-aws-node-termination-handler/Chart.yaml index 93f9bace..de337056 100644 --- a/charts/kubezero-aws-node-termination-handler/Chart.yaml +++ b/charts/kubezero-aws-node-termination-handler/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: kubezero-aws-node-termination-handler description: Umbrella chart for all KubeZero AWS addons type: application -version: 0.1.0 +version: 0.1.1 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: @@ -18,6 +18,6 @@ dependencies: version: ">= 0.1.3" repository: https://zero-down-time.github.io/kubezero/ - name: aws-node-termination-handler - version: ">= 0.13.3" + version: ">= 0.14.1" repository: https://aws.github.io/eks-charts kubeVersion: ">= 1.18.0" diff --git a/charts/kubezero-calico/Chart.yaml b/charts/kubezero-calico/Chart.yaml index f346a5f4..bb95e276 100644 --- a/charts/kubezero-calico/Chart.yaml +++ b/charts/kubezero-calico/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: kubezero-calico description: KubeZero Umbrella Chart for Calico type: application -version: 0.2.1 +version: 0.2.2 appVersion: v3.16.5 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png diff --git a/charts/kubezero-calico/templates/calico.yaml b/charts/kubezero-calico/templates/calico.yaml index 9946764a..f61b003f 100644 --- a/charts/kubezero-calico/templates/calico.yaml +++ b/charts/kubezero-calico/templates/calico.yaml @@ -482,7 +482,7 @@ spec: privileged: true resources: requests: - cpu: 250m + cpu: 50m livenessProbe: exec: command: @@ -619,6 +619,9 @@ spec: command: - /usr/bin/check-status - -r + resources: + requests: + cpu: 50m --- diff --git a/charts/kubezero/bootstrap.sh b/charts/kubezero/bootstrap.sh index f5384ec2..c1bf05eb 100755 --- a/charts/kubezero/bootstrap.sh +++ b/charts/kubezero/bootstrap.sh @@ -15,6 +15,7 @@ helm_version=$(helm version --short) echo $helm_version | grep -qe "^v3.[3-9]" || { echo "Helm version >= 3.3 required!"; exit 1; } TMPDIR=$(mktemp -d kubezero.XXX) +[ -z "$DEBUG" ] && trap 'rm -rf $TMPDIR' ERR EXIT # First lets generate kubezero.yaml # Add all yaml files in $CLUSTER @@ -226,5 +227,3 @@ elif [ $1 == "delete" ]; then is_enabled ${ARTIFACTS[idx]} && _helm delete ${ARTIFACTS[idx]} done fi - -[ "$DEBUG" == "" ] && rm -rf $TMPDIR diff --git a/scripts/setClusterVersion.sh b/scripts/setClusterVersion.sh new file mode 100755 index 00000000..f0252da4 --- /dev/null +++ b/scripts/setClusterVersion.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Meant for testing only !!! + +# This sets the Kubernetes Version in SSM +# Make sure your AWS Profile and Region points to the right direction ... + +CONGLOMERATE=$1 +VERSION=$2 + +aws ssm put-parameter --name /cloudbender/${CONGLOMERATE}/kubecontrol/meta/clusterversion --type SecureString --value "$(echo "$VERSION" | base64 -w0)" --overwrite + diff --git a/scripts/updateWorkerIamRole.sh b/scripts/updateWorkerIamRole.sh new file mode 100755 index 00000000..c15c11ea --- /dev/null +++ b/scripts/updateWorkerIamRole.sh @@ -0,0 +1,26 @@ +#!/bin/bash -ex + +# Meant for testing only !!! + +# This updates the proxy IAM role with the actual worker ASGs IAM roles +# Make sure your AWS Profile points to right account + +REGION="$1" +CLUSTER="$2" + +TMPDIR=$(mktemp -p /tmp -d kubezero.XXX) +trap 'rm -rf $TMPDIR' ERR EXIT + +# Get orig policy +aws iam get-role --output json --role-name $REGION-$CLUSTER-kube-workers | jq -c .Role.AssumeRolePolicyDocument > $TMPDIR/orig + +# Add current and new list of entities to include +cat $TMPDIR/orig | jq -c .Statement[].Principal.AWS[] | sort | uniq > $TMPDIR/current-roles +aws iam list-roles --output json --path-prefix /$REGION/$CLUSTER/nodes/ | jq -c .Roles[].Arn | sort | uniq > $TMPDIR/new-roles + +# If no diff exit +diff -tub $TMPDIR/current-roles $TMPDIR/new-roles && exit 0 + +# Create new policy +jq -c '.Statement[].Principal.AWS = $roles' $TMPDIR/orig --slurpfile roles $TMPDIR/new-roles > $TMPDIR/new +aws iam update-assume-role-policy --role-name $REGION-$CLUSTER-kube-workers --policy-document "$(cat $TMPDIR/new)"