Kubeadm chart for 1.19, improved tooling

This commit is contained in:
Stefan Reimer 2021-02-12 11:04:16 +00:00
parent 257d6f5e9c
commit 4fded1b668
11 changed files with 124 additions and 37 deletions

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubeadm
description: KubeZero Kubeadm golden config
type: application
version: 1.18.14
version: 1.19.7
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -17,6 +17,14 @@ Installs the Istio control plane
{{ template "chart.valuesSection" . }}
## Changes for 1.19
### Logging to json of control plane components
- https://github.com/kubernetes/website/blob/dev-1.19/content/en/docs/concepts/cluster-administration/system-logs.md
### PodTopologySpread
- https://kubernetes.io/blog/2020/05/introducing-podtopologyspread/#podtopologyspread-defaults
## Resources
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/

View File

@ -11,6 +11,8 @@ etcd:
local:
extraArgs:
listen-metrics-urls: "http://0.0.0.0:2381"
unsafe-no-fsync: "true"
logger: "zap"
{{- with .Values.etcdExtraArgs }}
{{- toYaml . | nindent 6 }}
{{- end }}
@ -20,20 +22,20 @@ controllerManager:
bind-address: 0.0.0.0
terminated-pod-gc-threshold: "300"
leader-elect: {{ .Values.clusterHighAvailable | quote }}
# Default anyways but make kube-bench happy
feature-gates: "RotateKubeletServerCertificate=true"
logging-format: json
scheduler:
extraArgs:
profiling: "false"
bind-address: 0.0.0.0
leader-elect: {{ .Values.clusterHighAvailable | quote }}
logging-format: json
apiServer:
certSANs:
- {{ regexSplit ":" .Values.apiEndpoint -1 | first }}
extraArgs:
etcd-servers: {{ .Values.allEtcdEndpoints }}
profiling: "false"
feature-gates: "CSIMigration=true,CSIMigrationAWS=true,CSIMigrationAWSComplete=true"
feature-gates: "CSIMigrationAWS=true,CSIMigrationAWSComplete=true,DefaultPodTopologySpread=true"
audit-log-path: "/var/log/kubernetes/audit.log"
audit-policy-file: /etc/kubernetes/apiserver/audit-policy.yaml
audit-log-maxage: "7"
@ -46,6 +48,7 @@ apiServer:
{{- if .Values.clusterHighAvailable }}
goaway-chance: ".001"
{{- end }}
logging-format: json
{{- with .Values.apiExtraArgs }}
{{- toYaml . | nindent 4 }}
{{- end }}

View File

@ -1,18 +1,15 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: JoinConfiguration
metadata:
name: kubezero-joinconfiguration
discovery:
bootstrapToken:
apiServerEndpoint: {{ .Values.apiEndpoint }}
token: {{ .Values.joinToken }}
caCertHashes:
- "{{ .Values.caCertHash }}"
file:
kubeConfigPath: /root/.kube/config
controlPlane:
localAPIEndpoint:
advertiseAddress: IP_ADDRESS
bindPort: {{ regexSplit ":" .Values.apiEndpoint -1 | last }}
nodeRegistration:
ignorePreflightErrors:
- DirAvailable--var-lib-etcd
- Swap
controlPlane:
localAPIEndpoint:
advertiseAddress: {{ .Values.ipAddress }}
bindPort: {{ regexSplit ":" .Values.apiEndpoint -1 | last }}
kubeletExtraArgs:
node-labels: {{ .Values.nodeLabels | quote }}

View File

@ -3,6 +3,9 @@ kind: KubeletConfiguration
metadata:
name: kubezero-kubeletconfiguration
failSwapOn: false
cgroupDriver: cgroupfs
logging:
format: json
hairpinMode: hairpin-veth
resolvConf: /run/systemd/resolve/resolv.conf
protectKernelDefaults: true
@ -12,8 +15,5 @@ eventRecordQPS: 0
# tlsPrivateKeyFile: /var/lib/kubelet/pki/kubelet.key
tlsCipherSuites: [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256]
featureGates:
# Default anyways but make kube-bench happy
RotateKubeletServerCertificate: true
CSIMigration: true
CSIMigrationAWS: true
CSIMigrationAWSComplete: true

View File

@ -0,0 +1,25 @@
apiVersion: v1
kind: Config
clusters:
- cluster:
server: https://{{ .Values.apiEndpoint }}
name: {{ .Values.clusterName }}
contexts:
- context:
cluster: {{ .Values.clusterName }}
user: kubernetes-admin
name: kubernetes-admin@{{ .Values.clusterName }}
current-context: kubernetes-admin@{{ .Values.clusterName }}
preferences: {}
users:
- name: kubernetes-admin
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
command: aws-iam-authenticator
args:
- "token"
- "-i"
- "{{ .Values.clusterName }}"
- "-r"
- "{{ .Values.kubeAdminRole }}"

View File

@ -51,7 +51,7 @@ metadata:
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aws-iam-authenticator
namespace: kube-system
@ -151,14 +151,3 @@ spec:
- name: state
hostPath:
path: /var/aws-iam-authenticator/
---
apiVersion: iamauthenticator.k8s.aws/v1alpha1
kind: IAMIdentityMapping
metadata:
name: kubernetes-admin
spec:
# Arn of the User or Role to be allowed to authenticate
arn: {{ .Values.kubeAdminRole }}
username: kubernetes-admin
groups:
- system:masters

View File

@ -0,0 +1,34 @@
# Controller role which is more or less cluster-admin once enrolled
apiVersion: iamauthenticator.k8s.aws/v1alpha1
kind: IAMIdentityMapping
metadata:
name: kubezero-controllers
spec:
arn: {{ .Values.ControllerIamRole }}
username: kubezero-controller
groups:
- system:masters
---
# Worker role to eg. delete former self etc.
apiVersion: iamauthenticator.k8s.aws/v1alpha1
kind: IAMIdentityMapping
metadata:
name: kubezero-workers
spec:
arn: {{ .Values.WorkerIamRole }}
username: kubezero-worker
groups:
- system:masters
---
# Admin Role for remote access
apiVersion: iamauthenticator.k8s.aws/v1alpha1
kind: IAMIdentityMapping
metadata:
name: kubernetes-admin
spec:
arn: {{ .Values.kubeAdminRole }}
username: kubernetes-admin
groups:
- system:masters

View File

@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fuse-device-plugin
namespace: kube-system
spec:
selector:
matchLabels:
name: fuse-device-plugin
template:
metadata:
labels:
name: fuse-device-plugin
spec:
hostNetwork: true
containers:
- image: public.ecr.aws/zero-downtime/fuse-device-plugin:v1.0
# imagePullPolicy: Always
name: fuse-device-plugin
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
volumeMounts:
- name: device-plugin
mountPath: /var/lib/kubelet/device-plugins
volumes:
- name: device-plugin
hostPath:
path: /var/lib/kubelet/device-plugins

View File

@ -1,11 +1,7 @@
clusterVersion: 1.18.0
clusterVersion: 1.19.0
clusterName: pleasechangeme
apiEndpoint: kube-api.changeme.org:6443
etcdExtraArgs: {}
# Enable for > 1.18
# unsafe-no-fsync: "true"
apiExtraArgs: {}
clusterHighAvailable: false
allEtcdEndpoints: ""
joinToken: ""
caCertHash: ""

View File

@ -2,12 +2,13 @@
set -eu
CHARTS=${1:-'.*'}
FORCE=${2:-''}
# all credits go to the argoproj Helm guys https://github.com/argoproj/argo-helm
SRCROOT="$(cd "$(dirname "$0")/.." && pwd)"
GIT_PUSH=${GIT_PUSH:-true}
[ "$(git branch --show-current)" == "stable" ] || { echo "Helm packages should only be built from stable branch !"; exit 1; }
[[ "$(git branch --show-current)" == "stable" || -n "$FORCE" ]] || { echo "Helm packages should only be built from stable branch !"; exit 1; }
TMPDIR=$(mktemp -d kubezero-repo.XXX)
mkdir -p $TMPDIR/stage
@ -38,7 +39,11 @@ do
done
# Do NOT overwrite existing charts
cp -n $TMPDIR/stage/*.tgz $TMPDIR/repo
if [ -n "$FORCE" ]; then
cp $TMPDIR/stage/*.tgz $TMPDIR/repo
else
cp -n $TMPDIR/stage/*.tgz $TMPDIR/repo
fi
cd $TMPDIR/repo