feat: new 1.21 kubeadm flow

This commit is contained in:
Stefan Reimer 2021-11-27 14:02:23 +01:00
parent bae84c0731
commit 167f67abfa
43 changed files with 540 additions and 100 deletions

View File

@ -65,6 +65,7 @@ KubeZero is distributed as a collection of versioned Helm charts, allowing custo
## Ingress ## Ingress
- AWS Network Loadbalancer and Istio Ingress controllers - AWS Network Loadbalancer and Istio Ingress controllers
- optional rate limiting support
- No additional costs per exposed service - No additional costs per exposed service
- Automated SSL Certificate handling via cert-manager incl. renewal etc. - Automated SSL Certificate handling via cert-manager incl. renewal etc.
- support for TCP services - support for TCP services

1
charts/kubeadm Symbolic link
View File

@ -0,0 +1 @@
../containers/admin/v1.21/kubeadm

View File

@ -1,3 +0,0 @@
#!/bin/bash
echo "Minor uppgrade from 1.19!"

View File

@ -1,3 +0,0 @@
#!/bin/bash
echo "Patch upgrade within 1.20"

View File

@ -1,28 +0,0 @@
clusterName: pleasechangeme
domain: changeme.org
# Needs to be set to primary node IP
listenAddress: 0.0.0.0
api:
endpoint: kube-api.changeme.org:6443
listenPort: 6443
allEtcdEndpoints: ""
extraArgs: {}
serviceAccountIssuer: ""
apiAudiences: "istio-ca"
etcd:
nodeName: set_via_cmdline
extraArgs: {}
highAvailable: false
# supported values aws,bare-metal
platform: "aws"
# Set to false for openrc, eg. on Gentoo or Alpine
systemd: true
protectKernelDefaults: true
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"

View File

@ -0,0 +1,24 @@
ARG ALPINE_VERSION
FROM alpine:${ALPINE_VERSION}
ARG KUBE_VERSION
RUN cd /etc/apk/keys && \
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-6183d596.rsa.pub" && \
echo "@kubezero https://cdn.zero-downtime.net/alpine/v${KUBE_VERSION}/kubezero" >> /etc/apk/repositories && \
echo "@testing http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
apk upgrade -U -a && \
apk --no-cache add \
yq \
cri-tools@kubezero \
kubeadm@kubezero \
kubectl@kubezero \
etcd-ctl@testing \
restic@testing \
helm@testing
ADD v${KUBE_VERSION}/kubezero.sh /usr/bin
ADD v${KUBE_VERSION}/kubeadm /opt/kubeadm
ENTRYPOINT ["kubezero.sh"]

25
containers/admin/Makefile Normal file
View File

@ -0,0 +1,25 @@
VERSION ?= 1.21.7
ALPINE_VERSION ?= 3.15
REGISTRY := public.ecr.aws/zero-downtime
REPOSITORY := kubezero-admin
TAG := $(REPOSITORY):v$(VERSION)
KUBE_VERSION := $(shell echo $(VERSION) | sed -e 's/\.[[:digit:]]*$$//')
.PHONY: build push clean scan
all: clean build push
build:
podman build --rm --squash-all --build-arg KUBE_VERSION=$(KUBE_VERSION) --build-arg ALPINE_VERSION=$(ALPINE_VERSION) -t $(TAG) .
push:
aws ecr-public get-login-password --region us-east-1 | podman login --username AWS --password-stdin $(REGISTRY)
podman tag $(TAG) $(REGISTRY)/$(TAG)
podman push $(REGISTRY)/$(TAG)
clean:
podman rmi -f $(TAG)
scan:
podman system service&
sleep 5; trivy $(TAG)

View File

@ -0,0 +1,43 @@
apiVersion: batch/v1
kind: Job
metadata:
name: kubezero-upgrade
namespace: kube-system
spec:
backoffLimit: 1
template:
spec:
hostNetwork: true
#hostIPC: true
#hostPID: true
containers:
- name: kubezero-admin
image: public.ecr.aws/zero-downtime/kubezero-admin:v1.21.7
imagePullPolicy: Always
command: ["kubezero.sh"]
args:
- upgrade
env:
- name: DEBUG
value: "1"
volumeMounts:
- name: host
mountPath: /host
- name: workdir
mountPath: /tmp
securityContext:
capabilities:
add: ["SYS_CHROOT"]
volumes:
- name: host
hostPath:
path: /
type: Directory
- name: workdir
emptyDir: {}
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
restartPolicy: Never

View File

@ -1,13 +1,14 @@
apiVersion: v2 apiVersion: v2
name: kubeadm name: kubeadm
description: KubeZero Kubeadm golden config description: KubeZero Kubeadm cluster config
type: application type: application
version: 1.20.11 version: 1.21.7
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
- kubezero - kubezero
- kubeadm - kubeadm
maintainers: maintainers:
- name: Quarky9 - name: Stefan Reimer
kubeVersion: ">= 1.18.0" email: stefan@zero-downtime.net
kubeVersion: ">= 1.20.0"

View File

@ -1,8 +1,8 @@
# kubeadm # kubeadm
![Version: 1.20.11](https://img.shields.io/badge/Version-1.20.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.21.7](https://img.shields.io/badge/Version-1.21.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Kubeadm golden config KubeZero Kubeadm cluster config
**Homepage:** <https://kubezero.com> **Homepage:** <https://kubezero.com>
@ -10,11 +10,11 @@ KubeZero Kubeadm golden config
| Name | Email | Url | | Name | Email | Url |
| ---- | ------ | --- | | ---- | ------ | --- |
| Quarky9 | | | | Stefan Reimer | stefan@zero-downtime.net | |
## Requirements ## Requirements
Kubernetes: `>= 1.18.0` Kubernetes: `>= 1.20.0`
## Values ## Values
@ -22,20 +22,25 @@ Kubernetes: `>= 1.18.0`
|-----|------|---------|-------------| |-----|------|---------|-------------|
| api.allEtcdEndpoints | string | `""` | | | api.allEtcdEndpoints | string | `""` | |
| api.apiAudiences | string | `"istio-ca"` | | | api.apiAudiences | string | `"istio-ca"` | |
| api.awsIamAuth | string | `"false"` | |
| api.endpoint | string | `"kube-api.changeme.org:6443"` | | | api.endpoint | string | `"kube-api.changeme.org:6443"` | |
| api.extraArgs | object | `{}` | | | api.extraArgs | object | `{}` | |
| api.listenPort | int | `6443` | | | api.listenPort | int | `6443` | |
| api.serviceAccountIssuer | string | `""` | | | api.oidcEndpoint | string | `""` | s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |
| api.serviceAccountIssuer | string | `""` | https://s3.${REGION}.amazonaws.com/${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |
| backup.passwordFile | string | `""` | /etc/cloudbender/clusterBackup.passphrase |
| backup.repository | string | `""` | s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup |
| clusterName | string | `"pleasechangeme"` | | | clusterName | string | `"pleasechangeme"` | |
| domain | string | `"changeme.org"` | | | domain | string | `"changeme.org"` | |
| etcd.extraArgs | object | `{}` | | | etcd.extraArgs | object | `{}` | |
| etcd.nodeName | string | `"set_via_cmdline"` | | | etcd.nodeName | string | `"set_via_cmdline"` | |
| highAvailable | bool | `false` | | | highAvailable | bool | `false` | |
| kubeAdminRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | | | kubeAdminRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
| listenAddress | string | `"0.0.0.0"` | | | listenAddress | string | `"0.0.0.0"` | Needs to be set to primary node IP |
| platform | string | `"aws"` | | | nodeName | string | `"localhost"` | set to $HOSTNAME |
| platform | string | `"aws"` | supported values aws,bare-metal |
| protectKernelDefaults | bool | `true` | | | protectKernelDefaults | bool | `true` | |
| systemd | bool | `true` | | | systemd | bool | `true` | Set to false for openrc, eg. on Gentoo or Alpine |
| workerNodeRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | | | workerNodeRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
## Resources ## Resources

View File

@ -1,7 +1,5 @@
apiVersion: kubeadm.k8s.io/v1beta2 apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration kind: ClusterConfiguration
metadata:
name: kubezero-clusterconfiguration
kubernetesVersion: {{ .Chart.Version }} kubernetesVersion: {{ .Chart.Version }}
clusterName: {{ .Values.clusterName }} clusterName: {{ .Values.clusterName }}
controlPlaneEndpoint: {{ .Values.api.endpoint }} controlPlaneEndpoint: {{ .Values.api.endpoint }}
@ -15,7 +13,7 @@ etcd:
#discovery-srv-name: {{ .Values.clusterName }} #discovery-srv-name: {{ .Values.clusterName }}
#initial-cluster: #initial-cluster:
initial-cluster-token: etcd-{{ .Values.clusterName }} initial-cluster-token: etcd-{{ .Values.clusterName }}
listen-metrics-urls: "http://{{ .Values.listenAddress }}:2381" listen-metrics-urls: "http://0.0.0.0:2381"
logger: "zap" logger: "zap"
# log-level: "warn" # log-level: "warn"
{{- with .Values.etcd.extraArgs }} {{- with .Values.etcd.extraArgs }}
@ -35,7 +33,6 @@ etcd:
controllerManager: controllerManager:
extraArgs: extraArgs:
profiling: "false" profiling: "false"
bind-address: {{ .Values.listenAddress }}
terminated-pod-gc-threshold: "300" terminated-pod-gc-threshold: "300"
# leader-elect: {{ .Values.highAvailable | quote }} # leader-elect: {{ .Values.highAvailable | quote }}
logging-format: json logging-format: json
@ -43,7 +40,6 @@ controllerManager:
scheduler: scheduler:
extraArgs: extraArgs:
profiling: "false" profiling: "false"
bind-address: {{ .Values.listenAddress }}
# leader-elect: {{ .Values.highAvailable | quote }} # leader-elect: {{ .Values.highAvailable | quote }}
logging-format: json logging-format: json
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" "platform" .Values.platform ) | trimSuffix "," | quote }} feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" "platform" .Values.platform ) | trimSuffix "," | quote }}
@ -59,7 +55,6 @@ apiServer:
audit-log-maxsize: "100" audit-log-maxsize: "100"
audit-log-maxbackup: "3" audit-log-maxbackup: "3"
audit-log-compress: "true" audit-log-compress: "true"
bind-address: {{ .Values.listenAddress }}
tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
admission-control-config-file: /etc/kubernetes/apiserver/admission-configuration.yaml admission-control-config-file: /etc/kubernetes/apiserver/admission-configuration.yaml
api-audiences: {{ .Values.api.apiAudiences }} api-audiences: {{ .Values.api.apiAudiences }}
@ -71,9 +66,7 @@ apiServer:
authentication-token-webhook-config-file: /etc/kubernetes/apiserver/aws-iam-authenticator.yaml authentication-token-webhook-config-file: /etc/kubernetes/apiserver/aws-iam-authenticator.yaml
{{- end }} {{- end }}
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" "platform" .Values.platform ) | trimSuffix "," | quote }} feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" "platform" .Values.platform ) | trimSuffix "," | quote }}
# for 1.21 enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit
# enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit
enable-admission-plugins: NodeRestriction,EventRateLimit
# {{- if .Values.highAvailable }} # {{- if .Values.highAvailable }}
# goaway-chance: ".001" # goaway-chance: ".001"
# {{- end }} # {{- end }}

View File

@ -1,7 +1,5 @@
apiVersion: kubeadm.k8s.io/v1beta2 apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration kind: InitConfiguration
metadata:
name: kubezero-initconfiguration
localAPIEndpoint: localAPIEndpoint:
advertiseAddress: {{ .Values.listenAddress }} advertiseAddress: {{ .Values.listenAddress }}
bindPort: {{ .Values.api.listenPort }} bindPort: {{ .Values.api.listenPort }}

View File

@ -1,7 +1,5 @@
apiVersion: kubeproxy.config.k8s.io/v1alpha1 apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration kind: KubeProxyConfiguration
metadata:
name: kubezero-kubeproxyconfiguration
# kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways # kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways
metricsBindAddress: "0.0.0.0:10249" metricsBindAddress: "0.0.0.0:10249"
mode: "" mode: ""

View File

@ -1,8 +1,6 @@
# https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/ # https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/
apiVersion: kubelet.config.k8s.io/v1beta1 apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration kind: KubeletConfiguration
metadata:
name: kubezero-kubeletconfiguration
failSwapOn: false failSwapOn: false
cgroupDriver: cgroupfs cgroupDriver: cgroupfs
logging: logging:

View File

@ -2,7 +2,7 @@
Feature gates for all control plane components Feature gates for all control plane components
*/ -}} */ -}}
{{- define "kubeadm.featuregates" -}} {{- define "kubeadm.featuregates" -}}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "GenericEphemeralVolume" "CSIMigrationAWSComplete" "CSIMigrationAzureDiskComplete" "CSIMigrationAzureFileComplete" "CSIMigrationGCEComplete" "CSIMigrationOpenStackComplete" "CSIMigrationvSphereComplete" }} {{- $gates := list "CustomCPUCFSQuotaPeriod" "GenericEphemeralVolume" "InTreePluginAWSUnregister" "InTreePluginAzureDiskUnregister" "InTreePluginAzureFileUnregister" "InTreePluginGCEUnregister" "InTreePluginOpenStackUnregister" }}
{{- if eq .return "csv" }} {{- if eq .return "csv" }}
{{- range $key := $gates }} {{- range $key := $gates }}
{{- $key }}=true, {{- $key }}=true,

View File

@ -0,0 +1,19 @@
{{- if eq .Values.platform "aws" }}
# clusters refers to the remote service.
clusters:
- name: aws-iam-authenticator
cluster:
certificate-authority-data: "replaced at runtime"
server: https://localhost:21362/authenticate
# users refers to the API Server's webhook configuration
# (we don't need to authenticate the API server).
users:
- name: apiserver
# kubeconfig files require a context. Provide one for the API Server.
current-context: webhook
contexts:
- name: webhook
context:
cluster: aws-iam-authenticator
user: apiserver
{{- end }}

View File

@ -11,7 +11,7 @@ spec:
limits: limits:
memory: 128Mi memory: 128Mi
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/control-plane: ""
affinity: affinity:
podAntiAffinity: podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution: requiredDuringSchedulingIgnoredDuringExecution:

View File

@ -0,0 +1,10 @@
json:
- op: add
path: /spec/containers/0/command/-
value: --bind-address={{ .Values.listenAddress }}
- op: replace
path: /spec/containers/0/livenessProbe/httpGet/host
value: {{ .Values.listenAddress }}
- op: replace
path: /spec/containers/0/startupProbe/httpGet/host
value: {{ .Values.listenAddress }}

View File

@ -0,0 +1,10 @@
json:
- op: add
path: /spec/containers/0/command/-
value: --bind-address={{ .Values.listenAddress }}
- op: replace
path: /spec/containers/0/livenessProbe/httpGet/host
value: {{ .Values.listenAddress }}
- op: replace
path: /spec/containers/0/startupProbe/httpGet/host
value: {{ .Values.listenAddress }}

View File

@ -0,0 +1,10 @@
json:
- op: add
path: /spec/containers/0/command/-
value: --bind-address={{ .Values.listenAddress }}
- op: replace
path: /spec/containers/0/livenessProbe/httpGet/host
value: {{ .Values.listenAddress }}
- op: replace
path: /spec/containers/0/startupProbe/httpGet/host
value: {{ .Values.listenAddress }}

View File

@ -112,12 +112,10 @@ spec:
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule
key: node-role.kubernetes.io/master key: node-role.kubernetes.io/master
- key: CriticalAddonsOnly
operator: Exists
containers: containers:
- name: aws-iam-authenticator - name: aws-iam-authenticator
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.2 image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.3
args: args:
- server - server
- --backend-mode=CRD,MountedFile - --backend-mode=CRD,MountedFile
@ -150,6 +148,6 @@ spec:
configMap: configMap:
name: aws-iam-authenticator name: aws-iam-authenticator
- name: state - name: state
hostPath: secret:
path: /var/aws-iam-authenticator/ secretName: aws-iam-certs
{{- end }} {{- end }}

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: kubezero-backup-restic
namespace: kube-system
type: Opaque
data:
repository: {{ default "" .Values.backup.repository | b64enc }}
password: {{ default "" .Values.backup.password | b64enc }}

View File

@ -0,0 +1,53 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: kubezero-backup
namespace: kube-system
spec:
schedule: "0 * * * *"
jobTemplate:
spec:
backoffLimit: 1
template:
spec:
containers:
- name: kubezero-admin
image: public.ecr.aws/zero-downtime/kubezero-admin:v{{ .Chart.Version }}
imagePullPolicy: Always
command: ["kubezero.sh"]
args:
- backup
volumeMounts:
- name: host
mountPath: /host
- name: workdir
mountPath: /tmp
env:
- name: DEBUG
value: "1"
- name: RESTIC_REPOSITORY
valueFrom:
secretKeyRef:
name: kubezero-backup-restic
key: repository
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: kubezero-backup-restic
key: password
#securityContext:
# readOnlyRootFilesystem: true
hostNetwork: true
volumes:
- name: host
hostPath:
path: /
type: Directory
- name: workdir
emptyDir: {}
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
restartPolicy: Never

View File

@ -0,0 +1,41 @@
clusterName: pleasechangeme
# -- set to $HOSTNAME
nodeName: localhost
domain: changeme.org
# -- Needs to be set to primary node IP
listenAddress: 0.0.0.0
api:
endpoint: kube-api.changeme.org:6443
listenPort: 6443
allEtcdEndpoints: ""
extraArgs: {}
# -- https://s3.${REGION}.amazonaws.com/${CFN[ConfigBucket]}/k8s/$CLUSTERNAME
serviceAccountIssuer: ""
# -- s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME
oidcEndpoint: ""
apiAudiences: "istio-ca"
awsIamAuth: "false"
etcd:
nodeName: set_via_cmdline
extraArgs: {}
backup:
# -- s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup
repository: ""
# -- /etc/cloudbender/clusterBackup.passphrase
passwordFile: ""
highAvailable: false
# -- supported values aws,bare-metal
platform: "aws"
# -- Set to false for openrc, eg. on Gentoo or Alpine
systemd: true
protectKernelDefaults: true
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"

View File

@ -0,0 +1,253 @@
#!/bin/sh
set -e
WORKDIR=/tmp/kubezero
HOSTFS=/host
export KUBECONFIG="${HOSTFS}/root/.kube/config"
if [ -n "$DEBUG" ]; then
set -x
LOG="--v=5"
fi
# Generic retry utility
retry() {
local tries=$1
local waitfor=$2
local timeout=$3
shift 3
while true; do
type -tf $1 >/dev/null && { timeout $timeout $@ && return; } || { $@ && return; }
let tries=$tries-1
[ $tries -eq 0 ] && return 1
sleep $waitfor
done
}
# Render cluster config
render_kubeadm() {
helm template /opt/kubeadm --output-dir ${WORKDIR} -f ${HOSTFS}/etc/kubernetes/kubezero.yaml
# Assemble kubeadm config
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm-etcd.yaml
for f in Cluster Init KubeProxy Kubelet; do
# echo "---" >> /etc/kubernetes/kubeadm-etcd.yaml
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm-etcd.yaml
done
# Remove etcd custom cert entries from final kubeadm config
yq eval 'del(.etcd.local.serverCertSANs) | del(.etcd.local.peerCertSANs)' \
${HOSTFS}/etc/kubernetes/kubeadm-etcd.yaml > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
# Copy JoinConfig
cp ${WORKDIR}/kubeadm/templates/JoinConfiguration.yaml ${HOSTFS}/etc/kubernetes
# hack to "uncloack" the json patches after they go processed by helm
for s in apiserver controller-manager scheduler; do
yq eval '.json' ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml > /tmp/_tmp.yaml && \
mv /tmp/_tmp.yaml ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml
done
}
parse_kubezero() {
[ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] || { echo "Missing /etc/kubernetes/kubezero.yaml!"; exit 1; }
CLUSTERNAME=$(yq eval '.clusterName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth // "true"' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
}
# Shared steps before calling kubeadm
pre_kubeadm() {
# update all apiserver addons first
cp -r ${WORKDIR}/kubeadm/templates/apiserver ${HOSTFS}/etc/kubernetes
# aws-iam-authenticator enabled ?
if [ "$AWS_IAM_AUTH" == "true" ]; then
# Initialize webhook
if [ ! -f ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt ]; then
aws-iam-authenticator init -i ${CLUSTERNAME}
mv key.pem ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.key
mv cert.pem ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt
fi
# Patch the aws-iam-authenticator config with the actual cert.pem
yq eval -Mi ".clusters[0].cluster.certificate-authority-data = \"$(cat ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt| base64 -w0)\"" ${HOSTFS}/etc/kubernetes/apiserver/aws-iam-authenticator.yaml
fi
# copy patches to host to make --rootfs of kubeadm work
cp -r ${WORKDIR}/kubeadm/templates/patches /host/tmp/
}
# Shared steps after calling kubeadm
post_kubeadm() {
# KubeZero resources
cat ${WORKDIR}/kubeadm/templates/resources/*.yaml | kubectl apply -f - $LOG
# Patch coreDNS addon, ideally we prevent kubeadm to reset coreDNS to its defaults
kubectl patch deployment coredns -n kube-system --patch-file ${WORKDIR}/kubeadm/templates/patches/coredns0.yaml $LOG
rm -rf /host/tmp/patches
}
# First parse kubezero.yaml
parse_kubezero
if [ "$1" == 'upgrade' ]; then
### PRE 1.21 specific
#####################
# Migrate aws-iam-authenticator from file certs to secret
if [ "$AWS_IAM_AUTH" == "true" ]; then
kubectl get secrets -n kube-system aws-iam-certs || \
kubectl create secret generic aws-iam-certs -n kube-system \
--from-file=key.pem=${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.key \
--from-file=cert.pem=${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt
fi
#####################
render_kubeadm
pre_kubeadm
# Upgrade
kubeadm upgrade apply --config /etc/kubernetes/kubeadm.yaml --rootfs ${HOSTFS} \
--experimental-patches /tmp/patches $LOG -y
post_kubeadm
# If we have a re-cert kubectl config install for root
if [ -f ${HOSTFS}/etc/kubernetes/admin.conf ]; then
cp ${HOSTFS}/etc/kubernetes/admin.conf ${HOSTFS}/root/.kube/config
fi
### POST 1.21 specific
######################
######################
# Cleanup after kubeadm on the host
rm -rf /etc/kubernetes/tmp
echo "Successfully upgraded cluster."
# TODO
# Send Notification currently done via CloudBender -> SNS -> Slack
# Better deploy https://github.com/opsgenie/kubernetes-event-exporter and set proper routes and labels on this Job
# Removed:
# - update oidc do we need that ?
# - backup right after upgrade ... not so sure about that one
elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then
render_kubeadm
if [[ "$1" =~ "^(recover|join)$" ]]; then
# Recert certificates for THIS node
rm -f ${HOSTFS}/etc/kubernetes/pki/etcd/peer.* ${HOSTFS}/etc/kubernetes/pki/etcd/server.* ${HOSTFS}/etc/kubernetes/pki/apiserver.*
kubeadm init phase certs etcd-server --config=/etc/kubernetes/kubeadm-etcd.yaml --rootfs ${HOSTFS}
kubeadm init phase certs etcd-peer --config=/etc/kubernetes/kubeadm-etcd.yaml --rootfs ${HOSTFS}
kubeadm init phase certs apiserver --config=/etc/kubernetes/kubeadm.yaml --rootfs ${HOSTFS}
# Restore only etcd for desaster recovery
if [[ "$1" =~ "^(recover)$" ]]; then
etcdctl snapshot restore ${HOSTFS}/etc/kubernetes \
--name $NODENAME \
--data-dir="${HOSTFS}/var/lib/etcd" \
--initial-cluster-token ${CLUSTERNAME} \
--initial-advertise-peer-urls https://${NODENAME}:2380 \
--initial-cluster $NODENAME=https://${NODENAME}:2380
fi
# Create all certs during bootstrap
else
kubeadm init phase certs all --config=/etc/kubernetes/kubeadm-etcd.yaml --rootfs ${HOSTFS}
fi
pre_kubeadm
if [[ "$1" =~ "^(join)$" ]]; then
kubeadm join --config /etc/kubernetes/JoinConfiguration.yaml --rootfs ${HOSTFS} \
--experimental-patches /tmp/patches $LOG
else
kubeadm init --config /etc/kubernetes/kubeadm.yaml --rootfs ${HOSTFS} \
--experimental-patches /tmp/patches --skip-token-print $LOG
fi
cp ${HOSTFS}/etc/kubernetes/admin.conf ${HOSTFS}/root/.kube/config
# Wait for api to be online
retry 0 10 30 kubectl cluster-info --request-timeout 3
# Ensure aws-iam-authenticator secret is in place
if [ "$AWS_IAM_AUTH" == "true" ]; then
kubectl get secrets -n kube-system aws-iam-certs || \
kubectl create secret generic aws-iam-certs -n kube-system \
--from-file=key.pem=${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.key \
--from-file=cert.pem=${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt
# Store aws-iam-auth admin on SSM
yq eval -M ".clusters[0].cluster.certificate-authority-data = \"$(cat ${HOSTFS}/etc/kubernetes/pki/ca.crt | base64 -w0)\"" ${WORKDIR}/kubeadm/templates/admin-aws-iam.yaml > ${HOSTFS}/etc/kubernetes/admin-aws-iam.yaml
fi
post_kubeadm
echo "${1} cluster $CLUSTERNAME successfull."
# Since 1.21 we only need to backup etcd + /etc/kubernetes/pki !
elif [ "$1" == 'backup' ]; then
mkdir -p ${WORKDIR}
restic snapshots || restic init || exit 1
# etcd
export ETCDCTL_API=3
export ETCDCTL_CACERT=${HOSTFS}/etc/kubernetes/pki/etcd/ca.crt
export ETCDCTL_CERT=${HOSTFS}/etc/kubernetes/pki/apiserver-etcd-client.crt
export ETCDCTL_KEY=${HOSTFS}/etc/kubernetes/pki/apiserver-etcd-client.key
etcdctl --endpoints=https://localhost:2379 snapshot save ${WORKDIR}/etcd_snapshot
# pki & cluster-admin access
cp -r ${HOSTFS}/etc/kubernetes/pki ${WORKDIR}
cp -r ${HOSTFS}/etc/kubernetes/admin.conf ${WORKDIR}
# Backup via restic
restic snapshots || restic init
restic backup ${WORKDIR} -H $CLUSTERNAME
echo "Backup complete"
restic forget --keep-hourly 24 --keep-daily ${RESTIC_RETENTION:-7} --prune
elif [ "$1" == 'restore' ]; then
mkdir -p ${WORKDIR}
restic restore latest --no-lock -t /
# Make last etcd snapshot available
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
# Put PKI in place
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
else
echo "Unknown command!"
exit 1
fi

View File

@ -1,29 +1,23 @@
#!/bin/bash #!/bin/bash
set -eux set -ex
REPO_URL_S3="s3://zero-downtime-web/cdn/charts"
REPO_URL="https://cdn.zero-downtime.net/charts"
CHARTS=${1:-'.*'} CHARTS=${1:-'.*'}
FORCE=${2:-''}
# all credits go to the argoproj Helm guys https://github.com/argoproj/argo-helm
SRCROOT="$(cd "$(dirname "$0")/.." && pwd)" SRCROOT="$(cd "$(dirname "$0")/.." && pwd)"
GIT_PUSH=${GIT_PUSH:-true}
TMPDIR=$(mktemp -d kubezero-repo.XXX) TMPDIR=$(mktemp -d kubezero-repo.XXX)
mkdir -p $TMPDIR/stage && trap 'rm -rf $TMPDIR' ERR EXIT mkdir -p $TMPDIR
git clone -b gh-pages ssh://git@git.zero-downtime.net:22000/ZeroDownTime/KubeZero.git $TMPDIR/repo [ -z "$DEBUG" ] && trap 'rm -rf $TMPDIR' ERR EXIT
# Reset all
# rm -rf $TMPDIR/repo/*tgz $TMPDIR/repo/index.yaml
helm repo add argoproj https://argoproj.github.io/argo-helm
helm repo add jetstack https://charts.jetstack.io
helm repo add uswitch https://uswitch.github.io/kiam-helm-charts/charts/
helm repo update
for dir in $(find -L $SRCROOT/charts -mindepth 1 -maxdepth 1 -type d); for dir in $(find -L $SRCROOT/charts -mindepth 1 -maxdepth 1 -type d);
do do
name=$(basename $dir) name=$(basename $dir)
[[ $name =~ $CHARTS ]] || continue [[ $name =~ $CHARTS ]] || continue
if [ $(helm dep list $dir 2>/dev/null| wc -l) -gt 1 ] if [ $(helm dep list $dir 2>/dev/null| wc -l) -gt 1 ]
then then
echo "Processing chart dependencies" echo "Processing chart dependencies"
@ -32,28 +26,15 @@ do
fi fi
echo "Processing $dir" echo "Processing $dir"
helm lint $dir || true helm lint $dir
helm --debug package -d $TMPDIR/stage $dir helm package -d $TMPDIR $dir
done done
# Do NOT overwrite existing charts curl -L -s -o $TMPDIR/index.yaml ${REPO_URL}/index.yaml
if [ -n "$FORCE" ]; then
cp $TMPDIR/stage/*.tgz $TMPDIR/repo
else
cp -n $TMPDIR/stage/*.tgz $TMPDIR/repo
fi
cd $TMPDIR/repo helm repo index $TMPDIR --url $REPO_URL --merge $TMPDIR/index.yaml
# read aws s3 cp $TMPDIR/*.tgz $REPO_URL_S3/
aws s3 cp $TMPDIR/index.yaml $REPO_URL_S3/ --cache-control max-age=1
helm repo index .
git status
if [ "$GIT_PUSH" == "true" ]
then
git add . && git commit -m "ci: Publish charts" && git push ssh://git@git.zero-downtime.net:22000/ZeroDownTime/KubeZero.git gh-pages
fi
cd -
rm -rf $TMPDIR rm -rf $TMPDIR

View File

@ -19,4 +19,7 @@ do
echo "Processing $dir" echo "Processing $dir"
helm lint $dir && helm --debug package $dir helm lint $dir && helm --debug package $dir
echo "Updating README"
helm-docs -c $dir
done done