KubeZero/admin/kubezero.sh

465 lines
16 KiB
Bash
Raw Permalink Normal View History

#!/bin/bash
set -eu -o pipefail
DEBUG=${DEBUG:-""}
LOG=""
2021-11-27 13:02:23 +00:00
if [ -n "$DEBUG" ]; then
set -x
LOG="--v=5"
fi
# include helm lib
. /var/lib/kubezero/libhelm.sh
# Export vars to ease use in debug_shell etc
export WORKDIR=/tmp/kubezero
export HOSTFS=/host
2022-04-08 15:09:40 +00:00
export CHARTS=/charts
2022-08-24 15:13:39 +00:00
export KUBE_VERSION=$(kubeadm version -o json | jq -r .clientVersion.gitVersion)
2023-08-24 14:12:30 +00:00
export KUBE_VERSION_MINOR=$(echo $KUBE_VERSION | sed -e 's/\.[0-9]*$//')
2021-11-27 13:02:23 +00:00
export KUBECONFIG="${HOSTFS}/root/.kube/config"
# etcd
export ETCDCTL_API=3
export ETCDCTL_CACERT=${HOSTFS}/etc/kubernetes/pki/etcd/ca.crt
export ETCDCTL_CERT=${HOSTFS}/etc/kubernetes/pki/apiserver-etcd-client.crt
export ETCDCTL_KEY=${HOSTFS}/etc/kubernetes/pki/apiserver-etcd-client.key
2022-04-08 15:09:40 +00:00
mkdir -p ${WORKDIR}
2021-11-27 13:02:23 +00:00
# Generic retry utility
retry() {
local tries=$1
local waitfor=$2
local timeout=$3
shift 3
while true; do
type -tf $1 >/dev/null && { timeout $timeout $@ && return; } || { $@ && return; }
let tries=$tries-1
[ $tries -eq 0 ] && return 1
sleep $waitfor
done
}
_kubeadm() {
kubeadm $@ --config /etc/kubernetes/kubeadm.yaml --rootfs ${HOSTFS} $LOG
}
2021-11-27 13:02:23 +00:00
# Render cluster config
render_kubeadm() {
2024-10-16 11:20:20 +00:00
local phase=$1
helm template $CHARTS/kubeadm --output-dir ${WORKDIR} \
-f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml \
--set patches=/etc/kubernetes/patches
2021-11-27 13:02:23 +00:00
# Assemble kubeadm config
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
2024-10-16 11:20:20 +00:00
for f in Cluster KubeProxy Kubelet; do
# echo "---" >> /etc/kubernetes/kubeadm.yaml
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
2021-11-27 13:02:23 +00:00
done
2024-11-13 14:35:50 +00:00
if [[ "$phase" =~ ^(bootstrap|join|restore)$ ]]; then
2024-10-16 11:20:20 +00:00
cat ${WORKDIR}/kubeadm/templates/InitConfiguration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
fi
2022-11-09 16:08:22 +00:00
# "uncloak" the json patches after they got processed by helm
2021-11-27 13:02:23 +00:00
for s in apiserver controller-manager scheduler; do
yq eval '.json' ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml > /tmp/_tmp.yaml && \
mv /tmp/_tmp.yaml ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml
done
}
parse_kubezero() {
export CLUSTERNAME=$(yq eval '.global.clusterName // .clusterName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
2024-08-09 10:41:24 +00:00
export PLATFORM=$(yq eval '.global.platform // "nocloud"' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
export HIGHAVAILABLE=$(yq eval '.global.highAvailable // .highAvailable // "false"' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
export NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
export PROVIDER_ID=$(yq eval '.providerID // ""' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
export AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled // "false"' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
2021-11-27 13:02:23 +00:00
}
# Shared steps before calling kubeadm
pre_kubeadm() {
# update all apiserver addons first
cp -r ${WORKDIR}/kubeadm/templates/apiserver ${HOSTFS}/etc/kubernetes
2021-12-01 12:33:11 +00:00
2021-11-27 13:02:23 +00:00
# aws-iam-authenticator enabled ?
if [ "$AWS_IAM_AUTH" == "true" ]; then
# Initialize webhook
if [ ! -f ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt ]; then
${HOSTFS}/usr/bin/aws-iam-authenticator init -i ${CLUSTERNAME}
2021-11-27 13:02:23 +00:00
mv key.pem ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.key
mv cert.pem ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt
fi
# Patch the aws-iam-authenticator config with the actual cert.pem
yq eval -Mi ".clusters[0].cluster.certificate-authority-data = \"$(cat ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt| base64 -w0)\"" ${HOSTFS}/etc/kubernetes/apiserver/aws-iam-authenticator.yaml
fi
# copy patches to host to make --rootfs of kubeadm work
2024-10-16 11:20:20 +00:00
cp -r ${WORKDIR}/kubeadm/templates/patches ${HOSTFS}/etc/kubernetes
2021-11-27 13:02:23 +00:00
}
# Shared steps after calling kubeadm
post_kubeadm() {
# KubeZero resources
2021-12-01 12:33:11 +00:00
for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do
kubectl apply -f $f $LOG
done
2021-11-27 13:02:23 +00:00
# Patch coreDNS addon, ideally we prevent kubeadm to reset coreDNS to its defaults
kubectl patch deployment coredns -n kube-system --patch-file ${WORKDIR}/kubeadm/templates/patches/coredns0.yaml $LOG
}
2022-09-15 09:37:21 +00:00
kubeadm_upgrade() {
# pre upgrade hook
2021-11-27 13:02:23 +00:00
### Remove with 1.31
# migrate kubezero CM to kubezero NS
# migrate ArgoCD app from values to valuesObject
create_ns kubezero
if [ "$ARGOCD" == "True" ]; then
kubectl get app kubezero -n argocd -o yaml > $WORKDIR/kubezero-argo-app.yaml
if [ "$(yq '(.spec.source.helm | has "values")' $WORKDIR/kubezero-argo-app.yaml)" == "true" ]; then
yq '.spec.source.helm.valuesObject = (.spec.source.helm.values | from_yaml)' \
$WORKDIR/kubezero-argo-app.yaml | kubectl apply --server-side --force-conflicts -f -
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/spec/source/helm/values"}]'
kubectl delete cm kubezero-values -n kube-system > /dev/null || true
kubectl create configmap -n kubezero kubezero-values || true
fi
else
kubectl get cm kubezero-values -n kubezero > /dev/null || \
{ kubectl get cm kubezero-values -n kube-system -o yaml | \
sed 's/^ namespace: kube-system/ namespace: kubezero/' | \
kubectl create -f - && \
kubectl delete cm kubezero-values -n kube-system ; }
fi
###
# get current values, argo app over cm
get_kubezero_values $ARGOCD
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
# Update kubezero-values CM
2024-10-16 11:20:20 +00:00
kubectl get cm -n kubezero kubezero-values -o=yaml | \
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
kubectl apply --server-side --force-conflicts -f -
if [ "$ARGOCD" == "True" ]; then
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
2024-11-04 13:43:46 +00:00
yq '.spec.source.helm.valuesObject |= load("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' \
> $WORKDIR/new-argocd-app.yaml
kubectl apply --server-side --force-conflicts -f $WORKDIR/new-argocd-app.yaml
# finally remove annotation to allow argo to sync again
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
fi
# Local node upgrade
2024-10-16 11:20:20 +00:00
render_kubeadm upgrade
2021-11-27 13:02:23 +00:00
pre_kubeadm
2024-10-16 11:20:20 +00:00
# Upgrade - we upload the new config first so we can use --patch during 1.30
_kubeadm init phase upload-config kubeadm
kubeadm upgrade apply --yes --patches /etc/kubernetes/patches $KUBE_VERSION --rootfs ${HOSTFS} $LOG
2021-11-27 13:02:23 +00:00
post_kubeadm
# install re-certed kubectl config for root
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
2021-11-27 13:02:23 +00:00
# post upgrade
# Update kubezero-values CM
kubectl get cm -n kube-system kubelet-config -o=yaml | \
yq e '.data.kubelet' | yq e '.containerRuntimeEndpoint = "unix:///run/containerd/containerd.sock"' > $WORKDIR/new-kubelet.cm
kubectl get cm -n kube-system kubelet-config -o=yaml | \
yq e '.data.kubelet |= load_str("/tmp/kubezero/new-kubelet.cm")' | \
kubectl apply --server-side --force-conflicts -f -
2022-07-12 15:00:37 +00:00
2021-11-27 13:02:23 +00:00
# Cleanup after kubeadm on the host
2021-12-03 21:13:40 +00:00
rm -rf ${HOSTFS}/etc/kubernetes/tmp
2021-11-27 13:02:23 +00:00
2022-09-15 09:37:21 +00:00
echo "Successfully upgraded kubeadm control plane."
2021-11-27 13:02:23 +00:00
# TODO
# Send Notification currently done via CloudBender -> SNS -> Slack
# Better deploy https://github.com/opsgenie/kubernetes-event-exporter and set proper routes and labels on this Job
# Removed:
# - update oidc do we need that ?
2022-08-24 15:13:39 +00:00
}
2021-11-27 13:02:23 +00:00
2022-08-24 15:13:39 +00:00
control_plane_node() {
CMD=$1
2021-11-27 13:02:23 +00:00
2024-10-16 11:20:20 +00:00
render_kubeadm $CMD
2021-11-27 13:02:23 +00:00
# Ensure clean slate if bootstrap, restore PKI otherwise
if [[ "$CMD" =~ ^(bootstrap)$ ]]; then
2022-04-08 15:09:40 +00:00
rm -rf ${HOSTFS}/var/lib/etcd/member
2021-11-27 13:02:23 +00:00
else
2022-07-12 15:00:37 +00:00
# restore latest backup
retry 10 60 30 restic restore latest --no-lock -t / # --tag $KUBE_VERSION_MINOR
2022-04-08 15:09:40 +00:00
# get timestamp from latest snap for debug / message
# we need a way to surface this info to eg. Slack
#snapTime="$(restic snapshots latest --json | jq -r '.[].time')"
# Make last etcd snapshot available
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
2022-04-08 15:09:40 +00:00
# Put PKI in place
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
2022-04-08 15:09:40 +00:00
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
2024-10-16 11:20:20 +00:00
cp ${WORKDIR}/super-admin.conf ${HOSTFS}/root/.kube/config
2022-04-08 15:09:40 +00:00
2022-07-12 15:00:37 +00:00
# Only restore etcd data during "restore" and none exists already
if [[ "$CMD" =~ ^(restore)$ ]]; then
if [ ! -d ${HOSTFS}/var/lib/etcd/member ]; then
etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \
--name $ETCD_NODENAME \
--data-dir="${HOSTFS}/var/lib/etcd" \
--initial-cluster-token etcd-${CLUSTERNAME} \
--initial-advertise-peer-urls https://${ETCD_NODENAME}:2380 \
--initial-cluster $ETCD_NODENAME=https://${ETCD_NODENAME}:2380
fi
2022-04-08 15:09:40 +00:00
fi
2021-11-27 13:02:23 +00:00
fi
2022-04-08 15:09:40 +00:00
# Delete old node certs in case they are around
rm -f ${HOSTFS}/etc/kubernetes/pki/etcd/peer.* ${HOSTFS}/etc/kubernetes/pki/etcd/server.* ${HOSTFS}/etc/kubernetes/pki/etcd/healthcheck-client.* \
${HOSTFS}/etc/kubernetes/pki/apiserver* ${HOSTFS}/etc/kubernetes/pki/front-proxy-client.*
2021-11-27 13:02:23 +00:00
2022-04-08 15:09:40 +00:00
# Issue all certs first, needed for eg. aws-iam-authenticator setup
_kubeadm init phase certs all
2022-04-08 15:09:40 +00:00
pre_kubeadm
2022-04-08 15:09:40 +00:00
# Pull all images
_kubeadm config images pull
_kubeadm init phase preflight
_kubeadm init phase kubeconfig all
2022-02-01 10:29:02 +00:00
if [[ "$CMD" =~ ^(join)$ ]]; then
# Delete any former self in case forseti did not delete yet
kubectl delete node ${NODENAME} --wait=true || true
# Wait for all pods to be deleted otherwise we end up with stale pods eg. kube-proxy and all goes to ....
kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME}
2022-02-01 10:29:02 +00:00
# get current running etcd pods for etcdctl commands
while true; do
etcd_endpoints=$(kubectl get pods -n kube-system -l component=etcd -o yaml | \
yq eval '.items[].metadata.annotations."kubeadm.kubernetes.io/etcd.advertise-client-urls"' - | tr '\n' ',' | sed -e 's/,$//')
[[ $etcd_endpoints =~ ^https:// ]] && break
sleep 3
done
# see if we are a former member and remove our former self if so
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
2023-05-03 17:33:04 +00:00
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
2023-05-03 17:33:04 +00:00
rm -rf ${HOSTFS}/var/lib/etcd/member
2022-04-08 15:09:40 +00:00
2023-05-03 17:33:04 +00:00
# Announce new etcd member and capture ETCD_INITIAL_CLUSTER, retry needed in case another node joining causes temp quorum loss
ETCD_ENVS=$(retry 12 5 5 etcdctl member add $ETCD_NODENAME --peer-urls="https://${ETCD_NODENAME}:2380" --endpoints=$etcd_endpoints)
export $(echo "$ETCD_ENVS" | grep ETCD_INITIAL_CLUSTER= | sed -e 's/"//g')
# Patch kubeadm-values.yaml and re-render to get etcd manifest patched
yq eval -i '.etcd.state = "existing"
| .etcd.initialCluster = strenv(ETCD_INITIAL_CLUSTER)
' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
2024-11-04 13:43:46 +00:00
render_kubeadm $CMD
2022-04-08 15:09:40 +00:00
fi
2022-04-08 15:09:40 +00:00
# Generate our custom etcd yaml
_kubeadm init phase etcd local
_kubeadm init phase control-plane all
2022-04-08 15:09:40 +00:00
_kubeadm init phase kubelet-start
2021-11-27 13:02:23 +00:00
2024-10-16 11:20:20 +00:00
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
2021-11-27 13:02:23 +00:00
# Wait for api to be online
2022-04-08 15:09:40 +00:00
echo "Waiting for Kubernetes API to be online ..."
retry 0 5 30 kubectl cluster-info --request-timeout 3 >/dev/null
# Update providerID as underlying VM changed during restore
if [[ "$CMD" =~ ^(restore)$ ]]; then
if [ -n "$PROVIDER_ID" ]; then
etcdhelper \
-cacert ${HOSTFS}/etc/kubernetes/pki/etcd/ca.crt \
-cert ${HOSTFS}/etc/kubernetes/pki/etcd/server.crt \
-key ${HOSTFS}/etc/kubernetes/pki/etcd/server.key \
-endpoint https://${ETCD_NODENAME}:2379 \
change-provider-id ${NODENAME} $PROVIDER_ID
fi
fi
2024-11-04 13:43:46 +00:00
_kubeadm init phase upload-config all
if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then
2024-11-04 13:43:46 +00:00
# we share certs via the control plane backup
#_kubeadm init phase upload-certs --skip-certificate-key-print
2022-04-08 15:09:40 +00:00
# This sets up the ClusterRoleBindings to allow bootstrap nodes to create CSRs etc.
_kubeadm init phase bootstrap-token --skip-token-print
fi
_kubeadm init phase mark-control-plane
_kubeadm init phase kubelet-finalize all
if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then
2022-04-08 15:09:40 +00:00
_kubeadm init phase addon all
fi
2021-11-27 13:02:23 +00:00
# Ensure aws-iam-authenticator secret is in place
if [ "$AWS_IAM_AUTH" == "true" ]; then
kubectl get secrets -n kube-system aws-iam-certs || \
kubectl create secret generic aws-iam-certs -n kube-system \
--from-file=key.pem=${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.key \
--from-file=cert.pem=${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt
# Store aws-iam-auth admin on SSM
yq eval -M ".clusters[0].cluster.certificate-authority-data = \"$(cat ${HOSTFS}/etc/kubernetes/pki/ca.crt | base64 -w0)\"" ${WORKDIR}/kubeadm/templates/admin-aws-iam.yaml > ${HOSTFS}/etc/kubernetes/admin-aws-iam.yaml
fi
2022-08-24 15:13:39 +00:00
post_kubeadm
echo "${CMD}ed cluster $CLUSTERNAME successfully."
2022-08-24 15:13:39 +00:00
}
apply_module() {
MODULES=$1
get_kubezero_values $ARGOCD
# Always use embedded kubezero chart
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
2021-12-01 12:33:11 +00:00
# CRDs first
for t in $MODULES; do
_helm crds $t
done
for t in $MODULES; do
_helm apply $t
done
2021-12-01 12:33:11 +00:00
echo "Applied KubeZero modules: $MODULES"
2022-08-24 15:13:39 +00:00
}
2021-11-27 13:02:23 +00:00
delete_module() {
MODULES=$1
get_kubezero_values $ARGOCD
# Always use embedded kubezero chart
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
for t in $MODULES; do
_helm delete $t
done
echo "Deleted KubeZero modules: $MODULES. Potential CRDs must be removed manually."
}
# backup etcd + /etc/kubernetes/pki
2022-08-24 15:13:39 +00:00
backup() {
# Display all ENVs, careful this exposes the password !
[ -n "$DEBUG" ] && env
2022-08-24 15:13:39 +00:00
2021-11-27 13:02:23 +00:00
restic snapshots || restic init || exit 1
2022-08-24 15:13:39 +00:00
CV=$(kubectl version -o json | jq .serverVersion.minor -r)
2022-07-12 15:00:37 +00:00
let PCV=$CV-1
CLUSTER_VERSION="v1.$CV"
PREVIOUS_VERSION="v1.$PCV"
etcdctl --endpoints=https://${ETCD_NODENAME}:2379 snapshot save ${WORKDIR}/etcd_snapshot
2021-11-27 13:02:23 +00:00
# pki & cluster-admin access
cp -r ${HOSTFS}/etc/kubernetes/pki ${WORKDIR}
cp ${HOSTFS}/etc/kubernetes/admin.conf ${WORKDIR}
2024-10-16 11:20:20 +00:00
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${WORKDIR}
2021-12-01 12:33:11 +00:00
2021-11-27 13:02:23 +00:00
# Backup via restic
restic backup ${WORKDIR} -H $CLUSTERNAME --tag $CLUSTER_VERSION
2021-11-27 13:02:23 +00:00
echo "Backup complete."
2021-12-03 21:13:40 +00:00
2022-07-12 15:00:37 +00:00
# Remove backups from pre-previous versions
restic forget --keep-tag $CLUSTER_VERSION --keep-tag $PREVIOUS_VERSION --prune
2021-12-03 21:13:40 +00:00
# Regular retention
2021-11-27 13:02:23 +00:00
restic forget --keep-hourly 24 --keep-daily ${RESTIC_RETENTION:-7} --prune
# Defrag etcd backend
etcdctl --endpoints=https://${ETCD_NODENAME}:2379 --command-timeout=60s defrag
2022-08-24 15:13:39 +00:00
}
2021-12-03 21:13:40 +00:00
2022-08-24 15:13:39 +00:00
debug_shell() {
echo "Entering debug shell"
2022-04-08 15:09:40 +00:00
printf "For manual etcdctl commands use:\n # export ETCDCTL_ENDPOINTS=$ETCD_NODENAME:2379\n"
bash
2022-08-24 15:13:39 +00:00
}
# First parse kubeadm-values.yaml
2022-08-24 15:13:39 +00:00
parse_kubezero
# Execute tasks
for t in $@; do
case "$t" in
bootstrap) control_plane_node bootstrap;;
join) control_plane_node join;;
restore) control_plane_node restore;;
kubeadm_upgrade)
ARGOCD=$(argo_used)
kubeadm_upgrade;;
apply_*)
ARGOCD=$(argo_used)
apply_module "${t##apply_}";;
delete_*)
ARGOCD=$(argo_used)
delete_module "${t##delete_}";;
2022-08-24 15:13:39 +00:00
backup) backup;;
debug_shell) debug_shell;;
*) echo "Unknown command: '$t'";;
esac
done