feat: various re-org for 1.21

This commit is contained in:
Stefan Reimer 2021-12-01 13:33:11 +01:00
parent 167f67abfa
commit 26b66016ad
13 changed files with 92 additions and 104 deletions

View File

@ -1,7 +1,7 @@
apiVersion: batch/v1 apiVersion: batch/v1
kind: Job kind: Job
metadata: metadata:
name: kubezero-upgrade name: kubezero-upgrade-v1.21.7
namespace: kube-system namespace: kube-system
spec: spec:
backoffLimit: 1 backoffLimit: 1

View File

@ -22,12 +22,14 @@ Kubernetes: `>= 1.20.0`
|-----|------|---------|-------------| |-----|------|---------|-------------|
| api.allEtcdEndpoints | string | `""` | | | api.allEtcdEndpoints | string | `""` | |
| api.apiAudiences | string | `"istio-ca"` | | | api.apiAudiences | string | `"istio-ca"` | |
| api.awsIamAuth | string | `"false"` | |
| api.endpoint | string | `"kube-api.changeme.org:6443"` | | | api.endpoint | string | `"kube-api.changeme.org:6443"` | |
| api.extraArgs | object | `{}` | | | api.extraArgs | object | `{}` | |
| api.listenPort | int | `6443` | | | api.listenPort | int | `6443` | |
| api.oidcEndpoint | string | `""` | s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME | | api.oidcEndpoint | string | `""` | s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |
| api.serviceAccountIssuer | string | `""` | https://s3.${REGION}.amazonaws.com/${CFN[ConfigBucket]}/k8s/$CLUSTERNAME | | api.serviceAccountIssuer | string | `""` | https://s3.${REGION}.amazonaws.com/${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |
| awsIamAuth.enabled | bool | `false` | |
| awsIamAuth.kubeAdminRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
| awsIamAuth.workerNodeRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
| backup.passwordFile | string | `""` | /etc/cloudbender/clusterBackup.passphrase | | backup.passwordFile | string | `""` | /etc/cloudbender/clusterBackup.passphrase |
| backup.repository | string | `""` | s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup | | backup.repository | string | `""` | s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup |
| clusterName | string | `"pleasechangeme"` | | | clusterName | string | `"pleasechangeme"` | |
@ -35,13 +37,12 @@ Kubernetes: `>= 1.20.0`
| etcd.extraArgs | object | `{}` | | | etcd.extraArgs | object | `{}` | |
| etcd.nodeName | string | `"set_via_cmdline"` | | | etcd.nodeName | string | `"set_via_cmdline"` | |
| highAvailable | bool | `false` | | | highAvailable | bool | `false` | |
| kubeAdminRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
| listenAddress | string | `"0.0.0.0"` | Needs to be set to primary node IP | | listenAddress | string | `"0.0.0.0"` | Needs to be set to primary node IP |
| network.multus.enabled | bool | `true` | |
| network.multus.tag | string | `"v3.8"` | |
| nodeName | string | `"localhost"` | set to $HOSTNAME | | nodeName | string | `"localhost"` | set to $HOSTNAME |
| platform | string | `"aws"` | supported values aws,bare-metal |
| protectKernelDefaults | bool | `true` | | | protectKernelDefaults | bool | `true` | |
| systemd | bool | `true` | Set to false for openrc, eg. on Gentoo or Alpine | | systemd | bool | `true` | Set to false for openrc, eg. on Gentoo or Alpine |
| workerNodeRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
## Resources ## Resources

View File

@ -36,13 +36,13 @@ controllerManager:
terminated-pod-gc-threshold: "300" terminated-pod-gc-threshold: "300"
# leader-elect: {{ .Values.highAvailable | quote }} # leader-elect: {{ .Values.highAvailable | quote }}
logging-format: json logging-format: json
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" "platform" .Values.platform ) | trimSuffix "," | quote }} feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
scheduler: scheduler:
extraArgs: extraArgs:
profiling: "false" profiling: "false"
# leader-elect: {{ .Values.highAvailable | quote }} # leader-elect: {{ .Values.highAvailable | quote }}
logging-format: json logging-format: json
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" "platform" .Values.platform ) | trimSuffix "," | quote }} feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
apiServer: apiServer:
certSANs: certSANs:
- {{ regexSplit ":" .Values.api.endpoint -1 | first }} - {{ regexSplit ":" .Values.api.endpoint -1 | first }}
@ -62,10 +62,10 @@ apiServer:
service-account-issuer: "{{ .Values.api.serviceAccountIssuer }}" service-account-issuer: "{{ .Values.api.serviceAccountIssuer }}"
service-account-jwks-uri: "{{ .Values.api.serviceAccountIssuer }}/openid/v1/jwks" service-account-jwks-uri: "{{ .Values.api.serviceAccountIssuer }}/openid/v1/jwks"
{{- end }} {{- end }}
{{- if eq .Values.platform "aws" }} {{- if .Values.api.awsIamAuth.enabled }}
authentication-token-webhook-config-file: /etc/kubernetes/apiserver/aws-iam-authenticator.yaml authentication-token-webhook-config-file: /etc/kubernetes/apiserver/aws-iam-authenticator.yaml
{{- end }} {{- end }}
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" "platform" .Values.platform ) | trimSuffix "," | quote }} feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit
# {{- if .Values.highAvailable }} # {{- if .Values.highAvailable }}
# goaway-chance: ".001" # goaway-chance: ".001"

View File

@ -16,7 +16,7 @@ eventRecordQPS: 0
# tlsCertFile: /var/lib/kubelet/pki/kubelet.crt # tlsCertFile: /var/lib/kubelet/pki/kubelet.crt
# tlsPrivateKeyFile: /var/lib/kubelet/pki/kubelet.key # tlsPrivateKeyFile: /var/lib/kubelet/pki/kubelet.key
tlsCipherSuites: [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256] tlsCipherSuites: [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256]
featureGates: {{ include "kubeadm.featuregates" ( dict "return" "map" "platform" .Values.platform ) }} featureGates: {{ include "kubeadm.featuregates" ( dict "return" "map" ) }}
# Minimal unit is 50m per pod # Minimal unit is 50m per pod
podsPerCore: 20 podsPerCore: 20
# cpuCFSQuotaPeriod: 10ms # cpuCFSQuotaPeriod: 10ms

View File

@ -1,4 +1,4 @@
{{- if eq .Values.platform "aws" }} {{- if .Values.api.awsIamAuth.enabled }}
apiVersion: v1 apiVersion: v1
kind: Config kind: Config
clusters: clusters:
@ -23,5 +23,5 @@ users:
- "-i" - "-i"
- "{{ .Values.clusterName }}" - "{{ .Values.clusterName }}"
- "-r" - "-r"
- "{{ .Values.kubeAdminRole }}" - "{{ .Values.api.awsIamAuth.kubeAdminRole }}"
{{- end }} {{- end }}

View File

@ -1,4 +1,4 @@
{{- if eq .Values.platform "aws" }} {{- if .Values.api.awsIamAuth.enabled }}
# clusters refers to the remote service. # clusters refers to the remote service.
clusters: clusters:
- name: aws-iam-authenticator - name: aws-iam-authenticator

View File

@ -1,4 +1,4 @@
{{- if eq .Values.platform "aws" }} {{- if .Values.api.awsIamAuth.enabled }}
apiVersion: apiextensions.k8s.io/v1beta1 apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:

View File

@ -1,4 +1,4 @@
{{- if eq .Values.platform "aws" }} {{- if .Values.api.awsIamAuth.enabled }}
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -106,9 +106,9 @@ spec:
# run on the host network (don't depend on CNI) # run on the host network (don't depend on CNI)
hostNetwork: true hostNetwork: true
# run on each master node # run on each controller
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule
key: node-role.kubernetes.io/master key: node-role.kubernetes.io/master

View File

@ -1,11 +1,11 @@
{{- if eq .Values.platform "aws" }} {{- if .Values.api.awsIamAuth.enabled }}
# Controller role for consistency, similar to kubeadm admin.conf # Controller role for consistency, similar to kubeadm admin.conf
apiVersion: iamauthenticator.k8s.aws/v1alpha1 apiVersion: iamauthenticator.k8s.aws/v1alpha1
kind: IAMIdentityMapping kind: IAMIdentityMapping
metadata: metadata:
name: kubezero-worker-nodes name: kubezero-worker-nodes
spec: spec:
arn: {{ .Values.workerNodeRole }} arn: {{ .Values.api.awsIamAuth.workerNodeRole }}
username: system:node:{{ "{{" }}EC2PrivateDNSName{{ "}}" }} username: system:node:{{ "{{" }}EC2PrivateDNSName{{ "}}" }}
groups: groups:
# For now use masters, define properly with 1.20 # For now use masters, define properly with 1.20
@ -19,7 +19,7 @@ kind: IAMIdentityMapping
metadata: metadata:
name: kubernetes-admin name: kubernetes-admin
spec: spec:
arn: {{ .Values.kubeAdminRole }} arn: {{ .Values.api.awsIamAuth.kubeAdminRole }}
username: kubernetes-admin username: kubernetes-admin
groups: groups:
- system:masters - system:masters

View File

@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: kubezero-backup-restic
namespace: kube-system
type: Opaque
data:
repository: {{ default "" .Values.backup.repository | b64enc }}
password: {{ default "" .Values.backup.password | b64enc }}

View File

@ -1,53 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: kubezero-backup
namespace: kube-system
spec:
schedule: "0 * * * *"
jobTemplate:
spec:
backoffLimit: 1
template:
spec:
containers:
- name: kubezero-admin
image: public.ecr.aws/zero-downtime/kubezero-admin:v{{ .Chart.Version }}
imagePullPolicy: Always
command: ["kubezero.sh"]
args:
- backup
volumeMounts:
- name: host
mountPath: /host
- name: workdir
mountPath: /tmp
env:
- name: DEBUG
value: "1"
- name: RESTIC_REPOSITORY
valueFrom:
secretKeyRef:
name: kubezero-backup-restic
key: repository
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: kubezero-backup-restic
key: password
#securityContext:
# readOnlyRootFilesystem: true
hostNetwork: true
volumes:
- name: host
hostPath:
path: /
type: Directory
- name: workdir
emptyDir: {}
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
restartPolicy: Never

View File

@ -16,26 +16,40 @@ api:
# -- s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME # -- s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME
oidcEndpoint: "" oidcEndpoint: ""
apiAudiences: "istio-ca" apiAudiences: "istio-ca"
awsIamAuth: "false"
etcd: awsIamAuth:
nodeName: set_via_cmdline enabled: false
extraArgs: {} workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"
backup: addons:
aws-node-termination-handler:
enabled: false
# -- arn:aws:sqs:${REGION}:${AWS_ACCOUNT_ID}:${CLUSTERNAME}_Nth
queueURL: ""
clusterBackup:
enabled: false
# -- s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup # -- s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup
repository: "" repository: ""
# -- /etc/cloudbender/clusterBackup.passphrase # -- /etc/cloudbender/clusterBackup.passphrase
passwordFile: "" passwordFile: ""
network:
multus:
enabled: false
tag: "v3.8"
cilium:
enabled: false
calico:
enabled: false
highAvailable: false highAvailable: false
# -- supported values aws,bare-metal etcd:
platform: "aws" nodeName: set_via_cmdline
extraArgs: {}
# -- Set to false for openrc, eg. on Gentoo or Alpine # -- Set to false for openrc, eg. on Gentoo or Alpine
systemd: true systemd: true
protectKernelDefaults: true protectKernelDefaults: true
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"

View File

@ -55,10 +55,12 @@ render_kubeadm() {
parse_kubezero() { parse_kubezero() {
[ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] || { echo "Missing /etc/kubernetes/kubezero.yaml!"; exit 1; } [ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] || { echo "Missing /etc/kubernetes/kubezero.yaml!"; exit 1; }
KUBE_VERSION=$(kubeadm version -o yaml | yq eval .clientVersion.gitVersion -)
CLUSTERNAME=$(yq eval '.clusterName' ${HOSTFS}/etc/kubernetes/kubezero.yaml) CLUSTERNAME=$(yq eval '.clusterName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml) NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth // "true"' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
AWS_NTH=$(yq eval '.addons.aws-node-termination-handler.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
} }
@ -89,7 +91,9 @@ pre_kubeadm() {
# Shared steps after calling kubeadm # Shared steps after calling kubeadm
post_kubeadm() { post_kubeadm() {
# KubeZero resources # KubeZero resources
cat ${WORKDIR}/kubeadm/templates/resources/*.yaml | kubectl apply -f - $LOG for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do
kubectl apply -f $f $LOG
done
# Patch coreDNS addon, ideally we prevent kubeadm to reset coreDNS to its defaults # Patch coreDNS addon, ideally we prevent kubeadm to reset coreDNS to its defaults
kubectl patch deployment coredns -n kube-system --patch-file ${WORKDIR}/kubeadm/templates/patches/coredns0.yaml $LOG kubectl patch deployment coredns -n kube-system --patch-file ${WORKDIR}/kubeadm/templates/patches/coredns0.yaml $LOG
@ -132,6 +136,23 @@ if [ "$1" == 'upgrade' ]; then
### POST 1.21 specific ### POST 1.21 specific
###################### ######################
helm repo add kubezero https://cdn.zero-downtime.net/charts/
# if Calico, install multus to prepare migration
kubectl get ds calico-node -n kube-system && \
helm template kubezero/kubezero-network --version 0.1.0 --include-crds --namespace kube-system --kube-version $KUBE_VERSION --name-template network \
--set multus.enabled=true \
| kubectl apply -f - $LOG
# migrate backup
if [ -f ${HOSTFS}/usr/local/sbin/backup_control_plane.sh ]; then
_repo=$(grep "export RESTIC_REPOSITORY" ${HOSTFS}/usr/local/sbin/backup_control_plane.sh)
helm template kubezero/kubezero-addons --version 0.2.0 --include-crds --namespace kube-system --kube-version $KUBE_VERSION --name-template addons \
--set clusterBackup.enabled=true \
--set clusterBackup.repository="${_repo##*=}" \
--set clusterBackup.password="$(cat ${HOSTFS}/etc/kubernetes/clusterBackup.passphrase)" \
| kubectl apply -f - $LOG
fi
###################### ######################
@ -147,7 +168,6 @@ if [ "$1" == 'upgrade' ]; then
# Removed: # Removed:
# - update oidc do we need that ? # - update oidc do we need that ?
# - backup right after upgrade ... not so sure about that one
elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then
@ -203,6 +223,21 @@ elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then
yq eval -M ".clusters[0].cluster.certificate-authority-data = \"$(cat ${HOSTFS}/etc/kubernetes/pki/ca.crt | base64 -w0)\"" ${WORKDIR}/kubeadm/templates/admin-aws-iam.yaml > ${HOSTFS}/etc/kubernetes/admin-aws-iam.yaml yq eval -M ".clusters[0].cluster.certificate-authority-data = \"$(cat ${HOSTFS}/etc/kubernetes/pki/ca.crt | base64 -w0)\"" ${WORKDIR}/kubeadm/templates/admin-aws-iam.yaml > ${HOSTFS}/etc/kubernetes/admin-aws-iam.yaml
fi fi
# Install some basics on bootstrap
if [[ "$1" =~ "^(bootstrap)$" ]]; then
helm repo add kubezero https://cdn.zero-downtime.net/charts/
# network
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
helm template kubezero/kubezero-network --version 0.1.0 --include-crds --namespace kube-system --name-template network \
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply -f - $LOG
# addons
yq eval '.addons // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
helm template kubezero/kubezero-addons --version 0.2.0 --include-crds --namespace kube-system --name-template addons \
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply -f - $LOG
fi
post_kubeadm post_kubeadm
echo "${1} cluster $CLUSTERNAME successfull." echo "${1} cluster $CLUSTERNAME successfull."