feat: various re-org for 1.21

This commit is contained in:
Stefan Reimer 2021-12-01 13:33:11 +01:00
parent da9f722baa
commit a6d67d2b13
13 changed files with 92 additions and 104 deletions

View File

@ -1,7 +1,7 @@
apiVersion: batch/v1
kind: Job
metadata:
name: kubezero-upgrade
name: kubezero-upgrade-v1.21.7
namespace: kube-system
spec:
backoffLimit: 1

View File

@ -22,12 +22,14 @@ Kubernetes: `>= 1.20.0`
|-----|------|---------|-------------|
| api.allEtcdEndpoints | string | `""` | |
| api.apiAudiences | string | `"istio-ca"` | |
| api.awsIamAuth | string | `"false"` | |
| api.endpoint | string | `"kube-api.changeme.org:6443"` | |
| api.extraArgs | object | `{}` | |
| api.listenPort | int | `6443` | |
| api.oidcEndpoint | string | `""` | s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |
| api.serviceAccountIssuer | string | `""` | https://s3.${REGION}.amazonaws.com/${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |
| awsIamAuth.enabled | bool | `false` | |
| awsIamAuth.kubeAdminRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
| awsIamAuth.workerNodeRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
| backup.passwordFile | string | `""` | /etc/cloudbender/clusterBackup.passphrase |
| backup.repository | string | `""` | s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup |
| clusterName | string | `"pleasechangeme"` | |
@ -35,13 +37,12 @@ Kubernetes: `>= 1.20.0`
| etcd.extraArgs | object | `{}` | |
| etcd.nodeName | string | `"set_via_cmdline"` | |
| highAvailable | bool | `false` | |
| kubeAdminRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
| listenAddress | string | `"0.0.0.0"` | Needs to be set to primary node IP |
| network.multus.enabled | bool | `true` | |
| network.multus.tag | string | `"v3.8"` | |
| nodeName | string | `"localhost"` | set to $HOSTNAME |
| platform | string | `"aws"` | supported values aws,bare-metal |
| protectKernelDefaults | bool | `true` | |
| systemd | bool | `true` | Set to false for openrc, eg. on Gentoo or Alpine |
| workerNodeRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
## Resources

View File

@ -36,13 +36,13 @@ controllerManager:
terminated-pod-gc-threshold: "300"
# leader-elect: {{ .Values.highAvailable | quote }}
logging-format: json
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" "platform" .Values.platform ) | trimSuffix "," | quote }}
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
scheduler:
extraArgs:
profiling: "false"
# leader-elect: {{ .Values.highAvailable | quote }}
logging-format: json
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" "platform" .Values.platform ) | trimSuffix "," | quote }}
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
apiServer:
certSANs:
- {{ regexSplit ":" .Values.api.endpoint -1 | first }}
@ -62,10 +62,10 @@ apiServer:
service-account-issuer: "{{ .Values.api.serviceAccountIssuer }}"
service-account-jwks-uri: "{{ .Values.api.serviceAccountIssuer }}/openid/v1/jwks"
{{- end }}
{{- if eq .Values.platform "aws" }}
{{- if .Values.api.awsIamAuth.enabled }}
authentication-token-webhook-config-file: /etc/kubernetes/apiserver/aws-iam-authenticator.yaml
{{- end }}
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" "platform" .Values.platform ) | trimSuffix "," | quote }}
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit
# {{- if .Values.highAvailable }}
# goaway-chance: ".001"

View File

@ -16,7 +16,7 @@ eventRecordQPS: 0
# tlsCertFile: /var/lib/kubelet/pki/kubelet.crt
# tlsPrivateKeyFile: /var/lib/kubelet/pki/kubelet.key
tlsCipherSuites: [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256]
featureGates: {{ include "kubeadm.featuregates" ( dict "return" "map" "platform" .Values.platform ) }}
featureGates: {{ include "kubeadm.featuregates" ( dict "return" "map" ) }}
# Minimal unit is 50m per pod
podsPerCore: 20
# cpuCFSQuotaPeriod: 10ms

View File

@ -1,4 +1,4 @@
{{- if eq .Values.platform "aws" }}
{{- if .Values.api.awsIamAuth.enabled }}
apiVersion: v1
kind: Config
clusters:
@ -23,5 +23,5 @@ users:
- "-i"
- "{{ .Values.clusterName }}"
- "-r"
- "{{ .Values.kubeAdminRole }}"
- "{{ .Values.api.awsIamAuth.kubeAdminRole }}"
{{- end }}

View File

@ -1,4 +1,4 @@
{{- if eq .Values.platform "aws" }}
{{- if .Values.api.awsIamAuth.enabled }}
# clusters refers to the remote service.
clusters:
- name: aws-iam-authenticator

View File

@ -1,4 +1,4 @@
{{- if eq .Values.platform "aws" }}
{{- if .Values.api.awsIamAuth.enabled }}
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:

View File

@ -1,4 +1,4 @@
{{- if eq .Values.platform "aws" }}
{{- if .Values.api.awsIamAuth.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
@ -106,9 +106,9 @@ spec:
# run on the host network (don't depend on CNI)
hostNetwork: true
# run on each master node
# run on each controller
nodeSelector:
node-role.kubernetes.io/master: ""
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master

View File

@ -1,11 +1,11 @@
{{- if eq .Values.platform "aws" }}
{{- if .Values.api.awsIamAuth.enabled }}
# Controller role for consistency, similar to kubeadm admin.conf
apiVersion: iamauthenticator.k8s.aws/v1alpha1
kind: IAMIdentityMapping
metadata:
name: kubezero-worker-nodes
spec:
arn: {{ .Values.workerNodeRole }}
arn: {{ .Values.api.awsIamAuth.workerNodeRole }}
username: system:node:{{ "{{" }}EC2PrivateDNSName{{ "}}" }}
groups:
# For now use masters, define properly with 1.20
@ -19,7 +19,7 @@ kind: IAMIdentityMapping
metadata:
name: kubernetes-admin
spec:
arn: {{ .Values.kubeAdminRole }}
arn: {{ .Values.api.awsIamAuth.kubeAdminRole }}
username: kubernetes-admin
groups:
- system:masters

View File

@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: kubezero-backup-restic
namespace: kube-system
type: Opaque
data:
repository: {{ default "" .Values.backup.repository | b64enc }}
password: {{ default "" .Values.backup.password | b64enc }}

View File

@ -1,53 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: kubezero-backup
namespace: kube-system
spec:
schedule: "0 * * * *"
jobTemplate:
spec:
backoffLimit: 1
template:
spec:
containers:
- name: kubezero-admin
image: public.ecr.aws/zero-downtime/kubezero-admin:v{{ .Chart.Version }}
imagePullPolicy: Always
command: ["kubezero.sh"]
args:
- backup
volumeMounts:
- name: host
mountPath: /host
- name: workdir
mountPath: /tmp
env:
- name: DEBUG
value: "1"
- name: RESTIC_REPOSITORY
valueFrom:
secretKeyRef:
name: kubezero-backup-restic
key: repository
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: kubezero-backup-restic
key: password
#securityContext:
# readOnlyRootFilesystem: true
hostNetwork: true
volumes:
- name: host
hostPath:
path: /
type: Directory
- name: workdir
emptyDir: {}
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
restartPolicy: Never

View File

@ -16,26 +16,40 @@ api:
# -- s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME
oidcEndpoint: ""
apiAudiences: "istio-ca"
awsIamAuth: "false"
awsIamAuth:
enabled: false
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"
addons:
aws-node-termination-handler:
enabled: false
# -- arn:aws:sqs:${REGION}:${AWS_ACCOUNT_ID}:${CLUSTERNAME}_Nth
queueURL: ""
clusterBackup:
enabled: false
# -- s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup
repository: ""
# -- /etc/cloudbender/clusterBackup.passphrase
passwordFile: ""
network:
multus:
enabled: false
tag: "v3.8"
cilium:
enabled: false
calico:
enabled: false
highAvailable: false
etcd:
nodeName: set_via_cmdline
extraArgs: {}
backup:
# -- s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup
repository: ""
# -- /etc/cloudbender/clusterBackup.passphrase
passwordFile: ""
highAvailable: false
# -- supported values aws,bare-metal
platform: "aws"
# -- Set to false for openrc, eg. on Gentoo or Alpine
systemd: true
protectKernelDefaults: true
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"

View File

@ -41,7 +41,7 @@ render_kubeadm() {
yq eval 'del(.etcd.local.serverCertSANs) | del(.etcd.local.peerCertSANs)' \
${HOSTFS}/etc/kubernetes/kubeadm-etcd.yaml > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
# Copy JoinConfig
# Copy JoinConfig
cp ${WORKDIR}/kubeadm/templates/JoinConfiguration.yaml ${HOSTFS}/etc/kubernetes
# hack to "uncloack" the json patches after they go processed by helm
@ -55,10 +55,12 @@ render_kubeadm() {
parse_kubezero() {
[ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] || { echo "Missing /etc/kubernetes/kubezero.yaml!"; exit 1; }
KUBE_VERSION=$(kubeadm version -o yaml | yq eval .clientVersion.gitVersion -)
CLUSTERNAME=$(yq eval '.clusterName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth // "true"' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
AWS_NTH=$(yq eval '.addons.aws-node-termination-handler.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
}
@ -66,7 +68,7 @@ parse_kubezero() {
pre_kubeadm() {
# update all apiserver addons first
cp -r ${WORKDIR}/kubeadm/templates/apiserver ${HOSTFS}/etc/kubernetes
# aws-iam-authenticator enabled ?
if [ "$AWS_IAM_AUTH" == "true" ]; then
@ -89,7 +91,9 @@ pre_kubeadm() {
# Shared steps after calling kubeadm
post_kubeadm() {
# KubeZero resources
cat ${WORKDIR}/kubeadm/templates/resources/*.yaml | kubectl apply -f - $LOG
for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do
kubectl apply -f $f $LOG
done
# Patch coreDNS addon, ideally we prevent kubeadm to reset coreDNS to its defaults
kubectl patch deployment coredns -n kube-system --patch-file ${WORKDIR}/kubeadm/templates/patches/coredns0.yaml $LOG
@ -132,6 +136,23 @@ if [ "$1" == 'upgrade' ]; then
### POST 1.21 specific
######################
helm repo add kubezero https://cdn.zero-downtime.net/charts/
# if Calico, install multus to prepare migration
kubectl get ds calico-node -n kube-system && \
helm template kubezero/kubezero-network --version 0.1.0 --include-crds --namespace kube-system --kube-version $KUBE_VERSION --name-template network \
--set multus.enabled=true \
| kubectl apply -f - $LOG
# migrate backup
if [ -f ${HOSTFS}/usr/local/sbin/backup_control_plane.sh ]; then
_repo=$(grep "export RESTIC_REPOSITORY" ${HOSTFS}/usr/local/sbin/backup_control_plane.sh)
helm template kubezero/kubezero-addons --version 0.2.0 --include-crds --namespace kube-system --kube-version $KUBE_VERSION --name-template addons \
--set clusterBackup.enabled=true \
--set clusterBackup.repository="${_repo##*=}" \
--set clusterBackup.password="$(cat ${HOSTFS}/etc/kubernetes/clusterBackup.passphrase)" \
| kubectl apply -f - $LOG
fi
######################
@ -147,7 +168,6 @@ if [ "$1" == 'upgrade' ]; then
# Removed:
# - update oidc do we need that ?
# - backup right after upgrade ... not so sure about that one
elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then
@ -203,8 +223,23 @@ elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then
yq eval -M ".clusters[0].cluster.certificate-authority-data = \"$(cat ${HOSTFS}/etc/kubernetes/pki/ca.crt | base64 -w0)\"" ${WORKDIR}/kubeadm/templates/admin-aws-iam.yaml > ${HOSTFS}/etc/kubernetes/admin-aws-iam.yaml
fi
# Install some basics on bootstrap
if [[ "$1" =~ "^(bootstrap)$" ]]; then
helm repo add kubezero https://cdn.zero-downtime.net/charts/
# network
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
helm template kubezero/kubezero-network --version 0.1.0 --include-crds --namespace kube-system --name-template network \
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply -f - $LOG
# addons
yq eval '.addons // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
helm template kubezero/kubezero-addons --version 0.2.0 --include-crds --namespace kube-system --name-template addons \
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply -f - $LOG
fi
post_kubeadm
echo "${1} cluster $CLUSTERNAME successfull."
@ -225,7 +260,7 @@ elif [ "$1" == 'backup' ]; then
# pki & cluster-admin access
cp -r ${HOSTFS}/etc/kubernetes/pki ${WORKDIR}
cp -r ${HOSTFS}/etc/kubernetes/admin.conf ${WORKDIR}
# Backup via restic
restic snapshots || restic init
restic backup ${WORKDIR} -H $CLUSTERNAME