ci: Reorg kubezero container build
This commit is contained in:
parent
abbb6bfed8
commit
7a247126ea
28
Dockerfile
Normal file
28
Dockerfile
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
ARG ALPINE_VERSION
|
||||||
|
|
||||||
|
FROM alpine:${ALPINE_VERSION}
|
||||||
|
|
||||||
|
ARG ALPINE_VERSION
|
||||||
|
ARG KUBE_VERSION
|
||||||
|
|
||||||
|
RUN cd /etc/apk/keys && \
|
||||||
|
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
||||||
|
echo "@kubezero https://cdn.zero-downtime.net/alpine/v${ALPINE_VERSION}/kubezero" >> /etc/apk/repositories && \
|
||||||
|
echo "@testing http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
||||||
|
apk upgrade -U -a --no-cache && \
|
||||||
|
apk --no-cache add \
|
||||||
|
jq \
|
||||||
|
yq \
|
||||||
|
cri-tools@kubezero \
|
||||||
|
kubeadm@kubezero~=${KUBE_VERSION} \
|
||||||
|
kubectl@kubezero~=${KUBE_VERSION} \
|
||||||
|
etcd-ctl@testing \
|
||||||
|
restic@testing \
|
||||||
|
helm@testing
|
||||||
|
|
||||||
|
ADD releases/v${KUBE_VERSION}/kubezero.sh /usr/bin
|
||||||
|
ADD charts/kubeadm /charts/kubeadm
|
||||||
|
ADD charts/kubezero-addons /charts/kubezero-addons
|
||||||
|
ADD charts/kubezero-network /charts/kubezero-network
|
||||||
|
|
||||||
|
ENTRYPOINT ["kubezero.sh"]
|
35
Makefile
35
Makefile
@ -1,20 +1,39 @@
|
|||||||
BUCKET ?= zero-downtime
|
VERSION ?= 1.22.8
|
||||||
BUCKET_PREFIX ?= /cloudbender/distfiles
|
ALPINE_VERSION ?= 3.15
|
||||||
FILES ?= distfiles.txt
|
REGISTRY := public.ecr.aws/zero-downtime
|
||||||
|
REPOSITORY := kubezero-admin
|
||||||
|
TAG := $(REPOSITORY):v$(VERSION)
|
||||||
|
KUBE_VERSION := $(shell echo $(VERSION) | sed -e 's/\.[[:digit:]]*$$//')
|
||||||
|
|
||||||
.PHONY: update docs
|
.PHONY: build push clean scan
|
||||||
|
|
||||||
all: update
|
all: build push
|
||||||
|
|
||||||
update:
|
build:
|
||||||
|
podman build --rm --build-arg KUBE_VERSION=$(KUBE_VERSION) --build-arg ALPINE_VERSION=$(ALPINE_VERSION) -t $(TAG) .
|
||||||
|
|
||||||
|
push:
|
||||||
|
aws ecr-public get-login-password --region us-east-1 | podman login --username AWS --password-stdin $(REGISTRY)
|
||||||
|
podman tag $(TAG) $(REGISTRY)/$(TAG)
|
||||||
|
podman push $(REGISTRY)/$(TAG)
|
||||||
|
|
||||||
|
clean:
|
||||||
|
podman image prune -f
|
||||||
|
|
||||||
|
scan:
|
||||||
|
podman system service&
|
||||||
|
sleep 5; trivy $(TAG)
|
||||||
|
|
||||||
|
update-charts:
|
||||||
./scripts/update_helm.sh
|
./scripts/update_helm.sh
|
||||||
|
|
||||||
docs:
|
update-chart-docs:
|
||||||
for c in charts/*; do \
|
for c in charts/*; do \
|
||||||
[[ $$c =~ "kubezero-lib" ]] && continue ; \
|
[[ $$c =~ "kubezero-lib" ]] && continue ; \
|
||||||
[[ $$c =~ "kubeadm" ]] && continue ; \
|
[[ $$c =~ "kubeadm" ]] && continue ; \
|
||||||
helm-docs -c $$c ; \
|
helm-docs -c $$c ; \
|
||||||
done
|
done
|
||||||
|
|
||||||
publish:
|
publish-charts:
|
||||||
./scripts/publish.sh
|
./scripts/publish.sh
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubeadm
|
name: kubeadm
|
||||||
description: KubeZero Kubeadm cluster config
|
description: KubeZero Kubeadm cluster config
|
||||||
type: application
|
type: application
|
||||||
version: 1.21.9
|
version: 1.22.8
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
@ -1,6 +1,6 @@
|
|||||||
# kubeadm
|
# kubeadm
|
||||||
|
|
||||||
![Version: 1.21.9](https://img.shields.io/badge/Version-1.21.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
![Version: 1.22.8](https://img.shields.io/badge/Version-1.22.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero Kubeadm cluster config
|
KubeZero Kubeadm cluster config
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ KubeZero Kubeadm cluster config
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.20.0`
|
Kubernetes: `>= 1.22.0`
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
@ -47,8 +47,8 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| network.multus.enabled | bool | `false` | |
|
| network.multus.enabled | bool | `false` | |
|
||||||
| network.multus.tag | string | `"v3.8"` | |
|
| network.multus.tag | string | `"v3.8"` | |
|
||||||
| nodeName | string | `"kubezero-node"` | set to $HOSTNAME |
|
| nodeName | string | `"kubezero-node"` | set to $HOSTNAME |
|
||||||
| protectKernelDefaults | bool | `true` | |
|
| protectKernelDefaults | bool | `false` | |
|
||||||
| systemd | bool | `true` | Set to false for openrc, eg. on Gentoo or Alpine |
|
| systemd | bool | `false` | Set to false for openrc, eg. on Gentoo or Alpine |
|
||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
@ -1,4 +1,4 @@
|
|||||||
apiVersion: kubeadm.k8s.io/v1beta2
|
apiVersion: kubeadm.k8s.io/v1beta3
|
||||||
kind: ClusterConfiguration
|
kind: ClusterConfiguration
|
||||||
kubernetesVersion: {{ .Chart.Version }}
|
kubernetesVersion: {{ .Chart.Version }}
|
||||||
clusterName: {{ .Values.clusterName }}
|
clusterName: {{ .Values.clusterName }}
|
||||||
@ -7,6 +7,8 @@ networking:
|
|||||||
podSubnet: 10.244.0.0/16
|
podSubnet: 10.244.0.0/16
|
||||||
etcd:
|
etcd:
|
||||||
local:
|
local:
|
||||||
|
# As 3.5 is not recommended stick with 3.4.13 till 1.23
|
||||||
|
imageTag: 3.4.13-0
|
||||||
extraArgs:
|
extraArgs:
|
||||||
### DNS discovery
|
### DNS discovery
|
||||||
#discovery-srv: {{ .Values.domain }}
|
#discovery-srv: {{ .Values.domain }}
|
@ -1,8 +1,10 @@
|
|||||||
apiVersion: kubeadm.k8s.io/v1beta2
|
apiVersion: kubeadm.k8s.io/v1beta3
|
||||||
kind: InitConfiguration
|
kind: InitConfiguration
|
||||||
localAPIEndpoint:
|
localAPIEndpoint:
|
||||||
advertiseAddress: {{ .Values.listenAddress }}
|
advertiseAddress: {{ .Values.listenAddress }}
|
||||||
bindPort: {{ .Values.api.listenPort }}
|
bindPort: {{ .Values.api.listenPort }}
|
||||||
|
patches:
|
||||||
|
directory: /tmp/patches
|
||||||
nodeRegistration:
|
nodeRegistration:
|
||||||
ignorePreflightErrors:
|
ignorePreflightErrors:
|
||||||
- DirAvailable--var-lib-etcd
|
- DirAvailable--var-lib-etcd
|
4
charts/kubeadm/templates/JoinConfiguration.yaml
Normal file
4
charts/kubeadm/templates/JoinConfiguration.yaml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
apiVersion: kubeadm.k8s.io/v1beta3
|
||||||
|
kind: JoinConfiguration
|
||||||
|
patches:
|
||||||
|
directory: /tmp/patches
|
@ -20,16 +20,16 @@ featureGates: {{ include "kubeadm.featuregates" ( dict "return" "map" ) }}
|
|||||||
# Minimal unit is 50m per pod
|
# Minimal unit is 50m per pod
|
||||||
podsPerCore: 20
|
podsPerCore: 20
|
||||||
# cpuCFSQuotaPeriod: 10ms
|
# cpuCFSQuotaPeriod: 10ms
|
||||||
# Basic OS on Ubuntu 20.04 incl. crio
|
# Basic OS incl. crio
|
||||||
systemReserved:
|
systemReserved:
|
||||||
memory: 256Mi
|
memory: 96Mi
|
||||||
ephemeral-storage: "2Gi"
|
#ephemeral-storage: "1Gi"
|
||||||
# kubelet memory should be static as runc,conmon are added to each pod's cgroup
|
# kubelet memory should be static as runc,conmon are added to each pod's cgroup
|
||||||
kubeReserved:
|
kubeReserved:
|
||||||
cpu: 70m
|
cpu: 70m
|
||||||
memory: 128Mi
|
memory: 96Mi
|
||||||
# Lets use below to reserve memory for system processes as kubeReserved/sytemReserved doesnt go well with systemd it seems
|
# Lets use below to reserve memory for system processes as kubeReserved/sytemReserved doesnt go well with systemd it seems
|
||||||
#evictionHard:
|
#evictionHard:
|
||||||
# memory.available: "484Mi"
|
# memory.available: "484Mi"
|
||||||
imageGCLowThresholdPercent: 70
|
imageGCLowThresholdPercent: 70
|
||||||
kernelMemcgNotification: true
|
# kernelMemcgNotification: true
|
@ -5,4 +5,4 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
cpu: 200m
|
cpu: 200m
|
||||||
memory: 192Mi
|
memory: 192Mi
|
||||||
ephemeral-storage: 1Gi
|
#ephemeral-storage: 1Gi
|
@ -0,0 +1,46 @@
|
|||||||
|
{{- if .Values.api.awsIamAuth.enabled }}
|
||||||
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: iamidentitymappings.iamauthenticator.k8s.aws
|
||||||
|
spec:
|
||||||
|
group: iamauthenticator.k8s.aws
|
||||||
|
scope: Cluster
|
||||||
|
names:
|
||||||
|
plural: iamidentitymappings
|
||||||
|
singular: iamidentitymapping
|
||||||
|
kind: IAMIdentityMapping
|
||||||
|
categories:
|
||||||
|
- all
|
||||||
|
versions:
|
||||||
|
- name: v1alpha1
|
||||||
|
served: true
|
||||||
|
storage: true
|
||||||
|
schema:
|
||||||
|
openAPIV3Schema:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
spec:
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- arn
|
||||||
|
- username
|
||||||
|
properties:
|
||||||
|
arn:
|
||||||
|
type: string
|
||||||
|
username:
|
||||||
|
type: string
|
||||||
|
groups:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
status:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
canonicalARN:
|
||||||
|
type: string
|
||||||
|
userID:
|
||||||
|
type: string
|
||||||
|
subresources:
|
||||||
|
status: {}
|
||||||
|
{{- end }}
|
@ -115,7 +115,7 @@ spec:
|
|||||||
|
|
||||||
containers:
|
containers:
|
||||||
- name: aws-iam-authenticator
|
- name: aws-iam-authenticator
|
||||||
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.3
|
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.4
|
||||||
args:
|
args:
|
||||||
- server
|
- server
|
||||||
- --backend-mode=CRD,MountedFile
|
- --backend-mode=CRD,MountedFile
|
@ -52,5 +52,5 @@ etcd:
|
|||||||
extraArgs: {}
|
extraArgs: {}
|
||||||
|
|
||||||
# -- Set to false for openrc, eg. on Gentoo or Alpine
|
# -- Set to false for openrc, eg. on Gentoo or Alpine
|
||||||
systemd: true
|
systemd: false
|
||||||
protectKernelDefaults: true
|
protectKernelDefaults: false
|
@ -1,24 +0,0 @@
|
|||||||
ARG ALPINE_VERSION
|
|
||||||
|
|
||||||
FROM alpine:${ALPINE_VERSION}
|
|
||||||
|
|
||||||
ARG KUBE_VERSION
|
|
||||||
|
|
||||||
RUN cd /etc/apk/keys && \
|
|
||||||
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
|
||||||
echo "@kubezero https://cdn.zero-downtime.net/alpine/v${KUBE_VERSION}/kubezero" >> /etc/apk/repositories && \
|
|
||||||
echo "@testing http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
|
||||||
apk upgrade -U -a && \
|
|
||||||
apk --no-cache add \
|
|
||||||
yq \
|
|
||||||
cri-tools@kubezero \
|
|
||||||
kubeadm@kubezero \
|
|
||||||
kubectl@kubezero \
|
|
||||||
etcd-ctl@testing \
|
|
||||||
restic@testing \
|
|
||||||
helm@testing
|
|
||||||
|
|
||||||
ADD v${KUBE_VERSION}/kubezero.sh /usr/bin
|
|
||||||
ADD v${KUBE_VERSION}/kubeadm /opt/kubeadm
|
|
||||||
|
|
||||||
ENTRYPOINT ["kubezero.sh"]
|
|
@ -1,25 +0,0 @@
|
|||||||
VERSION ?= 1.21.9
|
|
||||||
ALPINE_VERSION ?= 3.15
|
|
||||||
REGISTRY := public.ecr.aws/zero-downtime
|
|
||||||
REPOSITORY := kubezero-admin
|
|
||||||
TAG := $(REPOSITORY):v$(VERSION)
|
|
||||||
KUBE_VERSION := $(shell echo $(VERSION) | sed -e 's/\.[[:digit:]]*$$//')
|
|
||||||
|
|
||||||
.PHONY: build push clean scan
|
|
||||||
|
|
||||||
all: build push
|
|
||||||
|
|
||||||
build:
|
|
||||||
podman build --rm --squash-all --build-arg KUBE_VERSION=$(KUBE_VERSION) --build-arg ALPINE_VERSION=$(ALPINE_VERSION) -t $(TAG) .
|
|
||||||
|
|
||||||
push:
|
|
||||||
aws ecr-public get-login-password --region us-east-1 | podman login --username AWS --password-stdin $(REGISTRY)
|
|
||||||
podman tag $(TAG) $(REGISTRY)/$(TAG)
|
|
||||||
podman push $(REGISTRY)/$(TAG)
|
|
||||||
|
|
||||||
clean:
|
|
||||||
podman image prune -f
|
|
||||||
|
|
||||||
scan:
|
|
||||||
podman system service&
|
|
||||||
sleep 5; trivy $(TAG)
|
|
@ -1,34 +0,0 @@
|
|||||||
{{- if .Values.api.awsIamAuth.enabled }}
|
|
||||||
apiVersion: apiextensions.k8s.io/v1beta1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
metadata:
|
|
||||||
name: iamidentitymappings.iamauthenticator.k8s.aws
|
|
||||||
spec:
|
|
||||||
group: iamauthenticator.k8s.aws
|
|
||||||
version: v1alpha1
|
|
||||||
scope: Cluster
|
|
||||||
names:
|
|
||||||
plural: iamidentitymappings
|
|
||||||
singular: iamidentitymapping
|
|
||||||
kind: IAMIdentityMapping
|
|
||||||
categories:
|
|
||||||
- all
|
|
||||||
subresources:
|
|
||||||
status: {}
|
|
||||||
validation:
|
|
||||||
openAPIV3Schema:
|
|
||||||
properties:
|
|
||||||
spec:
|
|
||||||
required:
|
|
||||||
- arn
|
|
||||||
- username
|
|
||||||
properties:
|
|
||||||
arn:
|
|
||||||
type: string
|
|
||||||
username:
|
|
||||||
type: string
|
|
||||||
groups:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
{{- end }}
|
|
@ -1,5 +1,4 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ -n "$DEBUG" ]; then
|
if [ -n "$DEBUG" ]; then
|
||||||
set -x
|
set -x
|
||||||
@ -9,9 +8,8 @@ fi
|
|||||||
# Export vars to ease use in debug_shell etc
|
# Export vars to ease use in debug_shell etc
|
||||||
export WORKDIR=/tmp/kubezero
|
export WORKDIR=/tmp/kubezero
|
||||||
export HOSTFS=/host
|
export HOSTFS=/host
|
||||||
export VERSION=v1.21
|
export CHARTS=/charts
|
||||||
export NETWORK_VERSION=0.1.7
|
export VERSION=v1.22
|
||||||
export ADDONS_VERSION=0.4.3
|
|
||||||
|
|
||||||
export KUBECONFIG="${HOSTFS}/root/.kube/config"
|
export KUBECONFIG="${HOSTFS}/root/.kube/config"
|
||||||
|
|
||||||
@ -21,6 +19,8 @@ export ETCDCTL_CACERT=${HOSTFS}/etc/kubernetes/pki/etcd/ca.crt
|
|||||||
export ETCDCTL_CERT=${HOSTFS}/etc/kubernetes/pki/apiserver-etcd-client.crt
|
export ETCDCTL_CERT=${HOSTFS}/etc/kubernetes/pki/apiserver-etcd-client.crt
|
||||||
export ETCDCTL_KEY=${HOSTFS}/etc/kubernetes/pki/apiserver-etcd-client.key
|
export ETCDCTL_KEY=${HOSTFS}/etc/kubernetes/pki/apiserver-etcd-client.key
|
||||||
|
|
||||||
|
mkdir -p ${WORKDIR}
|
||||||
|
|
||||||
# Generic retry utility
|
# Generic retry utility
|
||||||
retry() {
|
retry() {
|
||||||
local tries=$1
|
local tries=$1
|
||||||
@ -43,11 +43,11 @@ _kubeadm() {
|
|||||||
|
|
||||||
# Render cluster config
|
# Render cluster config
|
||||||
render_kubeadm() {
|
render_kubeadm() {
|
||||||
helm template /opt/kubeadm --output-dir ${WORKDIR} -f ${HOSTFS}/etc/kubernetes/kubezero.yaml
|
helm template $CHARTS/kubeadm --output-dir ${WORKDIR} -f ${HOSTFS}/etc/kubernetes/kubezero.yaml
|
||||||
|
|
||||||
# Assemble kubeadm config
|
# Assemble kubeadm config
|
||||||
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||||
for f in Cluster Init KubeProxy Kubelet; do
|
for f in Cluster Init Join KubeProxy Kubelet; do
|
||||||
# echo "---" >> /etc/kubernetes/kubeadm.yaml
|
# echo "---" >> /etc/kubernetes/kubeadm.yaml
|
||||||
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||||
done
|
done
|
||||||
@ -61,14 +61,18 @@ render_kubeadm() {
|
|||||||
|
|
||||||
|
|
||||||
parse_kubezero() {
|
parse_kubezero() {
|
||||||
[ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] || { echo "Missing /etc/kubernetes/kubezero.yaml!"; exit 1; }
|
[ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] || { echo "Missing /etc/kubernetes/kubezero.yaml!"; return 1; }
|
||||||
|
|
||||||
export KUBE_VERSION=$(kubeadm version -o yaml | yq eval .clientVersion.gitVersion -)
|
export KUBE_VERSION=$(kubeadm version -o yaml | yq eval .clientVersion.gitVersion -)
|
||||||
export CLUSTERNAME=$(yq eval '.clusterName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
export CLUSTERNAME=$(yq eval '.clusterName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
||||||
export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
||||||
|
export NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
||||||
|
|
||||||
export AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
export AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
||||||
export AWS_NTH=$(yq eval '.addons.aws-node-termination-handler.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
export AWS_NTH=$(yq eval '.addons.aws-node-termination-handler.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
||||||
|
|
||||||
|
# From here on bail out, allows debug_shell even in error cases
|
||||||
|
set -e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -114,17 +118,9 @@ post_kubeadm() {
|
|||||||
parse_kubezero
|
parse_kubezero
|
||||||
|
|
||||||
if [ "$1" == 'upgrade' ]; then
|
if [ "$1" == 'upgrade' ]; then
|
||||||
### PRE 1.21 specific
|
### PRE 1.22 specific
|
||||||
#####################
|
#####################
|
||||||
|
|
||||||
# Migrate aws-iam-authenticator from file certs to secret
|
|
||||||
if [ "$AWS_IAM_AUTH" == "true" ]; then
|
|
||||||
kubectl get secrets -n kube-system aws-iam-certs || \
|
|
||||||
kubectl create secret generic aws-iam-certs -n kube-system \
|
|
||||||
--from-file=key.pem=${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.key \
|
|
||||||
--from-file=cert.pem=${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt
|
|
||||||
fi
|
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
|
|
||||||
render_kubeadm
|
render_kubeadm
|
||||||
@ -132,7 +128,7 @@ if [ "$1" == 'upgrade' ]; then
|
|||||||
pre_kubeadm
|
pre_kubeadm
|
||||||
|
|
||||||
# Upgrade
|
# Upgrade
|
||||||
_kubeadm upgrade apply -y --experimental-patches /tmp/patches
|
_kubeadm upgrade apply -y
|
||||||
|
|
||||||
post_kubeadm
|
post_kubeadm
|
||||||
|
|
||||||
@ -142,20 +138,16 @@ if [ "$1" == 'upgrade' ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
### POST 1.21 specific
|
### POST 1.21 specific
|
||||||
# Delete any previous aws-node-term config as they changed the labels ;-(
|
|
||||||
kubectl delete deployment aws-node-termination-handler -n kube-system || true
|
|
||||||
|
|
||||||
######################
|
######################
|
||||||
helm repo add kubezero https://cdn.zero-downtime.net/charts/
|
|
||||||
|
|
||||||
# network
|
# network
|
||||||
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
|
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
|
||||||
helm template kubezero/kubezero-network --version $NETWORK_VERSION --namespace kube-system --include-crds --name-template network \
|
helm template $CHARTS/kubezero-network --namespace kube-system --include-crds --name-template network \
|
||||||
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
|
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
|
||||||
|
|
||||||
# addons
|
# addons
|
||||||
yq eval '.addons // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
|
yq eval '.addons // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
|
||||||
helm template kubezero/kubezero-addons --version $ADDONS_VERSION --namespace kube-system --include-crds --name-template addons \
|
helm template $CHARTS/kubezero-addons --namespace kube-system --include-crds --name-template addons \
|
||||||
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
|
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
|
||||||
|
|
||||||
######################
|
######################
|
||||||
@ -181,67 +173,58 @@ if [ "$1" == 'upgrade' ]; then
|
|||||||
elif [[ "$1" == 'node-upgrade' ]]; then
|
elif [[ "$1" == 'node-upgrade' ]]; then
|
||||||
echo "Starting node upgrade ..."
|
echo "Starting node upgrade ..."
|
||||||
|
|
||||||
echo "Migrating kubezero.yaml"
|
|
||||||
yq -i eval '.api.etcdServers = .api.allEtcdEndpoints | .network.multus.enabled = "true"' ${HOSTFS}/etc/kubernetes/kubezero.yaml
|
|
||||||
|
|
||||||
# remove old aws-node-termination-handler config, first new controller will do the right thing
|
|
||||||
yq -i eval 'del(.addons.aws-node-termination-handler)' ${HOSTFS}/etc/kubernetes/kubezero.yaml
|
|
||||||
|
|
||||||
# AWS
|
|
||||||
if [ -f ${HOSTFS}/etc/cloudbender/clusterBackup.passphrase ]; then
|
|
||||||
if [ -f ${HOSTFS}/usr/local/sbin/backup_control_plane.sh ]; then
|
|
||||||
mv ${HOSTFS}/usr/local/sbin/backup_control_plane.sh ${HOSTFS}/usr/local/sbin/backup_control_plane.disabled
|
|
||||||
echo "Disabled old cluster backup OS cronjob"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# enable backup and awsIamAuth & multus
|
|
||||||
yq -i eval '
|
|
||||||
.api.awsIamAuth.enabled = "true" | .api.awsIamAuth.workerNodeRole = .workerNodeRole | .api.awsIamAuth.kubeAdminRole = .kubeAdminRole
|
|
||||||
| .api.serviceAccountIssuer = .serviceAccountIssuer | .api.apiAudiences = "istio-ca,sts.amazonaws.com"
|
|
||||||
' ${HOSTFS}/etc/kubernetes/kubezero.yaml
|
|
||||||
|
|
||||||
export restic_repo=$(grep "export RESTIC_REPOSITORY" ${HOSTFS}/usr/local/sbin/backup_control_plane.disabled | sed -e 's/.*=//' | sed -e 's/"//g')
|
|
||||||
export restic_pw="$(cat ${HOSTFS}/etc/cloudbender/clusterBackup.passphrase)"
|
|
||||||
export REGION=$(kubectl get node $NODE_NAME -o yaml | yq eval '.metadata.labels."topology.kubernetes.io/region"' -)
|
|
||||||
|
|
||||||
if [ -n "$restic_repo" ]; then
|
|
||||||
yq -i eval '
|
|
||||||
.addons.clusterBackup.enabled = "true" | .addons.clusterBackup.repository = strenv(restic_repo) | .addons.clusterBackup.password = strenv(restic_pw)
|
|
||||||
| .addons.clusterBackup.image.tag = strenv(KUBE_VERSION)
|
|
||||||
| .addons.clusterBackup.extraEnv[0].name = "AWS_DEFAULT_REGION" | .addons.clusterBackup.extraEnv[0].value = strenv(REGION)
|
|
||||||
' ${HOSTFS}/etc/kubernetes/kubezero.yaml
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "All done."
|
echo "All done."
|
||||||
|
|
||||||
elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then
|
elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
|
||||||
|
|
||||||
render_kubeadm
|
render_kubeadm
|
||||||
|
|
||||||
|
# Esnure clean slate if bootstrap, restore PKI otherwise
|
||||||
if [[ "$1" =~ "^(bootstrap)$" ]]; then
|
if [[ "$1" =~ "^(bootstrap)$" ]]; then
|
||||||
# Create all certs during bootstrap
|
rm -rf ${HOSTFS}/var/lib/etcd/member
|
||||||
_kubeadm init phase certs all
|
|
||||||
|
|
||||||
else
|
else
|
||||||
# Recert certificates for THIS node
|
restic restore latest --no-lock -t / --tag $VERSION
|
||||||
rm -f ${HOSTFS}/etc/kubernetes/pki/etcd/peer.* ${HOSTFS}/etc/kubernetes/pki/etcd/server.* ${HOSTFS}/etc/kubernetes/pki/apiserver.*
|
|
||||||
_kubeadm init phase certs etcd-server
|
# Make last etcd snapshot available
|
||||||
_kubeadm init phase certs etcd-peer
|
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
|
||||||
_kubeadm init phase certs apiserver
|
|
||||||
|
# Put PKI in place
|
||||||
|
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
|
||||||
|
|
||||||
|
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
|
||||||
|
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
|
||||||
|
|
||||||
|
# etcd needs to resync during join
|
||||||
|
if [[ "$1" =~ "^(restore)$" ]]; then
|
||||||
|
# Only restore etcd data set if none exists already
|
||||||
|
if [ ! -d ${HOSTFS}/var/lib/etcd/member ]; then
|
||||||
|
etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \
|
||||||
|
--name $ETCD_NODENAME \
|
||||||
|
--data-dir="${HOSTFS}/var/lib/etcd" \
|
||||||
|
--initial-cluster-token etcd-${CLUSTERNAME} \
|
||||||
|
--initial-advertise-peer-urls https://${ETCD_NODENAME}:2380 \
|
||||||
|
--initial-cluster $ETCD_NODENAME=https://${ETCD_NODENAME}:2380
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Delete old node certs in case they are around
|
||||||
|
rm -f ${HOSTFS}/etc/kubernetes/pki/etcd/peer.* ${HOSTFS}/etc/kubernetes/pki/etcd/server.* ${HOSTFS}/etc/kubernetes/pki/etcd/healthcheck-client.* \
|
||||||
|
${HOSTFS}/etc/kubernetes/pki/apiserver* ${HOSTFS}/etc/kubernetes/pki/front-proxy-client.*
|
||||||
|
|
||||||
|
# Issue all certs first, needed for eg. aws-iam-authenticator setup
|
||||||
|
_kubeadm init phase certs all
|
||||||
|
|
||||||
pre_kubeadm
|
pre_kubeadm
|
||||||
|
|
||||||
if [[ "$1" =~ "^(join)$" ]]; then
|
# Pull all images
|
||||||
|
_kubeadm config images pull
|
||||||
|
|
||||||
_kubeadm init phase preflight
|
_kubeadm init phase preflight
|
||||||
_kubeadm init phase kubeconfig all
|
_kubeadm init phase kubeconfig all
|
||||||
_kubeadm init phase kubelet-start
|
|
||||||
|
|
||||||
# flush etcd data directory from restore
|
|
||||||
rm -rf ${HOSTFS}/var/lib/etcd/member
|
|
||||||
|
|
||||||
|
if [[ "$1" =~ "^(join)$" ]]; then
|
||||||
# get current running etcd pods for etcdctl commands
|
# get current running etcd pods for etcdctl commands
|
||||||
while true; do
|
while true; do
|
||||||
etcd_endpoints=$(kubectl get pods -n kube-system -l component=etcd -o yaml | \
|
etcd_endpoints=$(kubectl get pods -n kube-system -l component=etcd -o yaml | \
|
||||||
@ -250,14 +233,30 @@ elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then
|
|||||||
sleep 3
|
sleep 3
|
||||||
done
|
done
|
||||||
|
|
||||||
# is our $ETCD_NODENAME already in the etcd cluster ?
|
# if we are NOT member already, flush etcd to be able to join
|
||||||
# Remove former self first
|
|
||||||
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
|
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
|
||||||
|
|
||||||
|
# Failsafe / etcd on ephmeral: we were a member but our dataset is missing
|
||||||
|
# -> remove former self so we can re-join
|
||||||
|
if [ -n "$MY_ID" -a ! -d ${HOSTFS}/var/lib/etcd/member ]; then
|
||||||
|
# Remove former self first
|
||||||
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
|
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
|
||||||
|
MY_ID=""
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ -z "$MY_ID" ]; then
|
||||||
|
# flush etcd data directory from restore
|
||||||
|
rm -rf ${HOSTFS}/var/lib/etcd/member
|
||||||
|
|
||||||
# Announce new etcd member and capture ETCD_INITIAL_CLUSTER, retry needed in case another node joining causes temp quorum loss
|
# Announce new etcd member and capture ETCD_INITIAL_CLUSTER, retry needed in case another node joining causes temp quorum loss
|
||||||
ETCD_ENVS=$(retry 12 5 5 etcdctl member add $ETCD_NODENAME --peer-urls="https://${ETCD_NODENAME}:2380" --endpoints=$etcd_endpoints)
|
ETCD_ENVS=$(retry 12 5 5 etcdctl member add $ETCD_NODENAME --peer-urls="https://${ETCD_NODENAME}:2380" --endpoints=$etcd_endpoints)
|
||||||
export $(echo "$ETCD_ENVS" | grep ETCD_INITIAL_CLUSTER= | sed -e 's/"//g')
|
export $(echo "$ETCD_ENVS" | grep ETCD_INITIAL_CLUSTER= | sed -e 's/"//g')
|
||||||
|
else
|
||||||
|
# build initial_cluster string from running cluster
|
||||||
|
_cluster=$(etcdctl member list --endpoints=$etcd_endpoints -w json | jq -r '.members[] | "\(.name)=\(.peerURLs[]),"')
|
||||||
|
export ETCD_INITIAL_CLUSTER=$(echo ${_cluster%%,} | sed -e 's/ //g')
|
||||||
|
fi
|
||||||
|
|
||||||
# Patch kubezero.yaml and re-render to get etcd manifest patched
|
# Patch kubezero.yaml and re-render to get etcd manifest patched
|
||||||
yq eval -i '.etcd.state = "existing"
|
yq eval -i '.etcd.state = "existing"
|
||||||
@ -265,21 +264,36 @@ elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then
|
|||||||
' ${HOSTFS}/etc/kubernetes/kubezero.yaml
|
' ${HOSTFS}/etc/kubernetes/kubezero.yaml
|
||||||
render_kubeadm
|
render_kubeadm
|
||||||
|
|
||||||
# Generate our advanced etcd yaml
|
# Delete any former self in case forseti did not delete yet
|
||||||
_kubeadm init phase etcd local --experimental-patches /tmp/patches
|
kubectl delete node ${NODENAME} --wait=true || true
|
||||||
|
|
||||||
_kubeadm init phase control-plane all --experimental-patches /tmp/patches
|
|
||||||
_kubeadm init phase mark-control-plane
|
|
||||||
_kubeadm init phase kubelet-finalize all
|
|
||||||
|
|
||||||
else
|
|
||||||
_kubeadm init --experimental-patches /tmp/patches --skip-token-print
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Generate our custom etcd yaml
|
||||||
|
_kubeadm init phase etcd local
|
||||||
|
_kubeadm init phase control-plane all
|
||||||
|
|
||||||
|
_kubeadm init phase kubelet-start
|
||||||
|
|
||||||
cp ${HOSTFS}/etc/kubernetes/admin.conf ${HOSTFS}/root/.kube/config
|
cp ${HOSTFS}/etc/kubernetes/admin.conf ${HOSTFS}/root/.kube/config
|
||||||
|
|
||||||
# Wait for api to be online
|
# Wait for api to be online
|
||||||
retry 0 10 30 kubectl cluster-info --request-timeout 3
|
echo "Waiting for Kubernetes API to be online ..."
|
||||||
|
retry 0 5 30 kubectl cluster-info --request-timeout 3 >/dev/null
|
||||||
|
|
||||||
|
if [[ ! "$1" =~ "^(join)$" ]]; then
|
||||||
|
_kubeadm init phase upload-config all
|
||||||
|
_kubeadm init phase upload-certs --skip-certificate-key-print
|
||||||
|
|
||||||
|
# This sets up the ClusterRoleBindings to allow bootstrap nodes to create CSRs etc.
|
||||||
|
_kubeadm init phase bootstrap-token --skip-token-print
|
||||||
|
fi
|
||||||
|
|
||||||
|
_kubeadm init phase mark-control-plane
|
||||||
|
_kubeadm init phase kubelet-finalize all
|
||||||
|
|
||||||
|
if [[ ! "$1" =~ "^(join)$" ]]; then
|
||||||
|
_kubeadm init phase addon all
|
||||||
|
fi
|
||||||
|
|
||||||
# Ensure aws-iam-authenticator secret is in place
|
# Ensure aws-iam-authenticator secret is in place
|
||||||
if [ "$AWS_IAM_AUTH" == "true" ]; then
|
if [ "$AWS_IAM_AUTH" == "true" ]; then
|
||||||
@ -293,17 +307,15 @@ elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Install some basics on bootstrap and join for 1.21 to get new modules in place
|
# Install some basics on bootstrap and join for 1.21 to get new modules in place
|
||||||
if [[ "$1" =~ "^(bootstrap|join|recover)$" ]]; then
|
if [[ "$1" =~ "^(bootstrap|join|restore)$" ]]; then
|
||||||
helm repo add kubezero https://cdn.zero-downtime.net/charts/
|
|
||||||
|
|
||||||
# network
|
# network
|
||||||
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
|
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
|
||||||
helm template kubezero/kubezero-network --version $NETWORK_VERSION --namespace kube-system --include-crds --name-template network \
|
helm template $CHARTS/kubezero-network --namespace kube-system --include-crds --name-template network \
|
||||||
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
|
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
|
||||||
|
|
||||||
# addons
|
# addons
|
||||||
yq eval '.addons // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
|
yq eval '.addons // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
|
||||||
helm template kubezero/kubezero-addons --version $ADDONS_VERSION --namespace kube-system --include-crds --name-template addons \
|
helm template $CHARTS/kubezero-addons --namespace kube-system --include-crds --name-template addons \
|
||||||
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
|
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -314,8 +326,6 @@ elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then
|
|||||||
|
|
||||||
# Since 1.21 we only need to backup etcd + /etc/kubernetes/pki !
|
# Since 1.21 we only need to backup etcd + /etc/kubernetes/pki !
|
||||||
elif [ "$1" == 'backup' ]; then
|
elif [ "$1" == 'backup' ]; then
|
||||||
mkdir -p ${WORKDIR}
|
|
||||||
|
|
||||||
restic snapshots || restic init || exit 1
|
restic snapshots || restic init || exit 1
|
||||||
|
|
||||||
etcdctl --endpoints=https://${ETCD_NODENAME}:2379 snapshot save ${WORKDIR}/etcd_snapshot
|
etcdctl --endpoints=https://${ETCD_NODENAME}:2379 snapshot save ${WORKDIR}/etcd_snapshot
|
||||||
@ -340,32 +350,11 @@ elif [ "$1" == 'backup' ]; then
|
|||||||
etcdctl --endpoints=https://${ETCD_NODENAME}:2379 defrag
|
etcdctl --endpoints=https://${ETCD_NODENAME}:2379 defrag
|
||||||
|
|
||||||
|
|
||||||
elif [ "$1" == 'restore' ]; then
|
|
||||||
mkdir -p ${WORKDIR}
|
|
||||||
|
|
||||||
restic restore latest --no-lock -t / --tag $VERSION
|
|
||||||
|
|
||||||
# Make last etcd snapshot available
|
|
||||||
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
|
|
||||||
|
|
||||||
# Put PKI in place
|
|
||||||
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
|
|
||||||
|
|
||||||
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
|
|
||||||
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
|
|
||||||
|
|
||||||
etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \
|
|
||||||
--name $ETCD_NODENAME \
|
|
||||||
--data-dir="${HOSTFS}/var/lib/etcd" \
|
|
||||||
--initial-cluster-token etcd-${CLUSTERNAME} \
|
|
||||||
--initial-advertise-peer-urls https://${ETCD_NODENAME}:2380 \
|
|
||||||
--initial-cluster $ETCD_NODENAME=https://${ETCD_NODENAME}:2380
|
|
||||||
|
|
||||||
echo "Backup restored."
|
|
||||||
|
|
||||||
|
|
||||||
elif [ "$1" == 'debug_shell' ]; then
|
elif [ "$1" == 'debug_shell' ]; then
|
||||||
echo "Entering debug shell"
|
echo "Entering debug shell"
|
||||||
|
|
||||||
|
printf "For manual etcdctl commands use:\n # export ETCDCTL_ENDPOINTS=$ETCD_NODENAME:2379\n"
|
||||||
|
|
||||||
/bin/sh
|
/bin/sh
|
||||||
|
|
||||||
else
|
else
|
@ -8,6 +8,7 @@ VERSION="v1.21.9"
|
|||||||
unset AWS_DEFAULT_PROFILE
|
unset AWS_DEFAULT_PROFILE
|
||||||
|
|
||||||
nodes=$(kubectl get nodes -l node-role.kubernetes.io/control-plane -o json | jq .items[].metadata.name -r)
|
nodes=$(kubectl get nodes -l node-role.kubernetes.io/control-plane -o json | jq .items[].metadata.name -r)
|
||||||
|
nodes=""
|
||||||
for node in $nodes; do
|
for node in $nodes; do
|
||||||
echo "Deploying node upgrade job on $node..."
|
echo "Deploying node upgrade job on $node..."
|
||||||
|
|
Loading…
Reference in New Issue
Block a user