2022-09-11 11:54:56 +00:00
|
|
|
#!/bin/bash
|
|
|
|
|
|
|
|
# Simulate well-known CRDs being available
|
2024-07-19 18:32:18 +00:00
|
|
|
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
|
2022-09-11 11:54:56 +00:00
|
|
|
|
2024-05-23 22:10:26 +00:00
|
|
|
export HELM_SECRETS_BACKEND="vals"
|
|
|
|
|
2024-11-08 16:45:10 +00:00
|
|
|
LOCAL_DEV=${LOCAL_DEV:-""}
|
|
|
|
|
2022-09-11 11:54:56 +00:00
|
|
|
# Waits for max 300s and retries
|
|
|
|
function wait_for() {
|
|
|
|
local TRIES=0
|
|
|
|
while true; do
|
|
|
|
eval " $@" && break
|
|
|
|
[ $TRIES -eq 100 ] && return 1
|
|
|
|
let TRIES=$TRIES+1
|
|
|
|
sleep 3
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function chart_location() {
|
2024-05-23 21:17:00 +00:00
|
|
|
if [ -n "$LOCAL_DEV" ]; then
|
|
|
|
echo $CHARTS/$1
|
|
|
|
else
|
|
|
|
echo "$1 --repo https://cdn.zero-downtime.net/charts"
|
|
|
|
fi
|
2022-09-11 11:54:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-09-14 17:08:14 +00:00
|
|
|
function argo_used() {
|
2024-10-22 12:54:19 +00:00
|
|
|
kubectl get application kubezero -n argocd >/dev/null \
|
|
|
|
&& echo "True" || echo "False"
|
2022-09-14 17:08:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2024-10-22 12:54:19 +00:00
|
|
|
# get kubezero-values from ArgoCD if available or use in-cluster CM
|
2022-09-14 17:08:14 +00:00
|
|
|
function get_kubezero_values() {
|
2024-10-22 12:54:19 +00:00
|
|
|
local argo=${1:-"False"}
|
|
|
|
|
|
|
|
if [ "$argo" == "True" ]; then
|
|
|
|
kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml
|
|
|
|
else
|
|
|
|
kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml
|
|
|
|
fi
|
2022-09-14 17:08:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function disable_argo() {
|
|
|
|
cat > _argoapp_patch.yaml <<EOF
|
|
|
|
spec:
|
|
|
|
syncWindows:
|
|
|
|
- kind: deny
|
|
|
|
schedule: '0 * * * *'
|
|
|
|
duration: 24h
|
|
|
|
namespaces:
|
|
|
|
- '*'
|
|
|
|
EOF
|
|
|
|
kubectl patch appproject kubezero -n argocd --patch-file _argoapp_patch.yaml --type=merge && rm _argoapp_patch.yaml
|
2022-09-15 09:37:21 +00:00
|
|
|
echo "Enabled service window for ArgoCD project kubezero"
|
2022-09-14 17:08:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function enable_argo() {
|
|
|
|
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/spec/syncWindows"}]' || true
|
2022-09-15 09:37:21 +00:00
|
|
|
echo "Removed service window for ArgoCD project kubezero"
|
2022-09-14 17:08:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-09-23 10:54:27 +00:00
|
|
|
function cntFailedPods() {
|
|
|
|
NS=$1
|
|
|
|
|
|
|
|
NR=$(kubectl get pods -n $NS --field-selector="status.phase!=Succeeded,status.phase!=Running" -o custom-columns="POD:metadata.name" -o json | jq '.items | length')
|
|
|
|
echo $NR
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function waitSystemPodsRunning() {
|
|
|
|
while true; do
|
|
|
|
[ "$(cntFailedPods kube-system)" -eq 0 ] && break
|
|
|
|
sleep 3
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2022-09-14 17:08:14 +00:00
|
|
|
function argo_app_synced() {
|
|
|
|
APP=$1
|
|
|
|
|
|
|
|
# Ensure we are synced otherwise bail out
|
|
|
|
status=$(kubectl get application $APP -n argocd -o yaml | yq .status.sync.status)
|
|
|
|
if [ "$status" != "Synced" ]; then
|
|
|
|
echo "ArgoCD Application $APP not 'Synced'!"
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-09-11 11:54:56 +00:00
|
|
|
# make sure namespace exists prior to calling helm as the create-namespace options doesn't work
|
|
|
|
function create_ns() {
|
|
|
|
local namespace=$1
|
|
|
|
if [ "$namespace" != "kube-system" ]; then
|
2024-10-16 11:20:20 +00:00
|
|
|
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace
|
2022-09-11 11:54:56 +00:00
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# delete non kube-system ns
|
|
|
|
function delete_ns() {
|
|
|
|
local namespace=$1
|
|
|
|
[ "$namespace" != "kube-system" ] && kubectl delete ns $namespace
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2024-06-10 17:00:29 +00:00
|
|
|
# Extract crds via helm calls
|
2022-09-11 11:54:56 +00:00
|
|
|
function _crds() {
|
2024-07-19 18:32:18 +00:00
|
|
|
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c '
|
2024-06-10 17:00:29 +00:00
|
|
|
#!/usr/bin/python3
|
|
|
|
import yaml
|
|
|
|
import sys
|
|
|
|
|
2024-07-19 18:32:18 +00:00
|
|
|
yaml.add_multi_constructor("tag:yaml.org,2002:value", lambda loader, suffix, node: None, Loader=yaml.SafeLoader)
|
|
|
|
|
2024-06-10 17:00:29 +00:00
|
|
|
for manifest in yaml.safe_load_all(sys.stdin):
|
|
|
|
if manifest:
|
|
|
|
if "kind" in manifest and manifest["kind"] == "CustomResourceDefinition":
|
|
|
|
print("---")
|
|
|
|
print(yaml.dump(manifest))' > $WORKDIR/crds.yaml
|
2022-09-11 11:54:56 +00:00
|
|
|
|
|
|
|
# Only apply if there are actually any crds
|
|
|
|
if [ -s $WORKDIR/crds.yaml ]; then
|
2022-11-14 13:14:11 +00:00
|
|
|
[ -n "$DEBUG" ] && cat $WORKDIR/crds.yaml
|
2022-09-23 10:54:27 +00:00
|
|
|
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts
|
2022-09-11 11:54:56 +00:00
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# helm template | kubectl apply -f -
|
|
|
|
# confine to one namespace if possible
|
2022-11-14 13:14:11 +00:00
|
|
|
function render() {
|
2024-05-23 21:17:00 +00:00
|
|
|
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
|
2022-09-11 11:54:56 +00:00
|
|
|
| python3 -c '
|
|
|
|
#!/usr/bin/python3
|
|
|
|
import yaml
|
|
|
|
import sys
|
|
|
|
|
2024-07-19 18:32:18 +00:00
|
|
|
yaml.add_multi_constructor("tag:yaml.org,2002:value", lambda loader, suffix, node: None, Loader=yaml.SafeLoader)
|
|
|
|
|
2022-09-11 11:54:56 +00:00
|
|
|
for manifest in yaml.safe_load_all(sys.stdin):
|
|
|
|
if manifest:
|
2024-06-10 17:00:29 +00:00
|
|
|
if "kind" in manifest and manifest["kind"] == "CustomResourceDefinition":
|
|
|
|
continue
|
2022-09-11 11:54:56 +00:00
|
|
|
if "metadata" in manifest and "namespace" not in manifest["metadata"]:
|
|
|
|
manifest["metadata"]["namespace"] = sys.argv[1]
|
|
|
|
print("---")
|
|
|
|
print(yaml.dump(manifest))' $namespace > $WORKDIR/helm.yaml
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function _helm() {
|
|
|
|
local action=$1
|
|
|
|
local module=$2
|
|
|
|
|
2022-09-29 18:54:55 +00:00
|
|
|
# check if module is even enabled and return if not
|
|
|
|
[ ! -f $WORKDIR/kubezero/templates/${module}.yaml ] && { echo "Module $module disabled. No-op."; return 0; }
|
|
|
|
|
2022-09-11 11:54:56 +00:00
|
|
|
local chart="$(yq eval '.spec.source.chart' $WORKDIR/kubezero/templates/${module}.yaml)"
|
|
|
|
local namespace="$(yq eval '.spec.destination.namespace' $WORKDIR/kubezero/templates/${module}.yaml)"
|
|
|
|
|
|
|
|
targetRevision=""
|
2024-07-16 13:32:36 +00:00
|
|
|
if [ -z "$LOCAL_DEV" ]; then
|
|
|
|
_version="$(yq eval '.spec.source.targetRevision' $WORKDIR/kubezero/templates/${module}.yaml)"
|
|
|
|
[ -n "$_version" ] && targetRevision="--version $_version"
|
|
|
|
fi
|
2022-09-11 11:54:56 +00:00
|
|
|
|
2024-10-16 11:20:20 +00:00
|
|
|
yq eval '.spec.source.helm.valuesObject' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
|
2022-09-11 11:54:56 +00:00
|
|
|
|
|
|
|
if [ $action == "crds" ]; then
|
|
|
|
# Allow custom CRD handling
|
|
|
|
declare -F ${module}-crds && ${module}-crds || _crds
|
|
|
|
|
|
|
|
elif [ $action == "apply" ]; then
|
2024-08-09 10:45:27 +00:00
|
|
|
echo "using values to $action of module $module: "
|
|
|
|
cat $WORKDIR/values.yaml
|
|
|
|
|
2022-09-11 11:54:56 +00:00
|
|
|
# namespace must exist prior to apply
|
|
|
|
create_ns $namespace
|
|
|
|
|
|
|
|
# Optional pre hook
|
|
|
|
declare -F ${module}-pre && ${module}-pre
|
|
|
|
|
2022-11-14 13:14:11 +00:00
|
|
|
render
|
|
|
|
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$?
|
2022-09-11 11:54:56 +00:00
|
|
|
|
|
|
|
# Optional post hook
|
|
|
|
declare -F ${module}-post && ${module}-post
|
|
|
|
|
|
|
|
elif [ $action == "delete" ]; then
|
2022-11-14 13:14:11 +00:00
|
|
|
render
|
|
|
|
kubectl $action -f $WORKDIR/helm.yaml && rc=$? || rc=$?
|
2022-09-11 11:54:56 +00:00
|
|
|
|
|
|
|
# Delete dedicated namespace if not kube-system
|
|
|
|
[ -n "$DELETE_NS" ] && delete_ns $namespace
|
|
|
|
fi
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
2023-12-01 16:56:56 +00:00
|
|
|
|
|
|
|
function all_nodes_upgrade() {
|
|
|
|
CMD="$1"
|
|
|
|
|
|
|
|
echo "Deploy all node upgrade daemonSet(busybox)"
|
|
|
|
cat <<EOF | kubectl apply -f -
|
|
|
|
apiVersion: apps/v1
|
|
|
|
kind: DaemonSet
|
|
|
|
metadata:
|
|
|
|
name: kubezero-all-nodes-upgrade
|
|
|
|
namespace: kube-system
|
|
|
|
labels:
|
|
|
|
app: kubezero-upgrade
|
|
|
|
spec:
|
|
|
|
selector:
|
|
|
|
matchLabels:
|
|
|
|
name: kubezero-all-nodes-upgrade
|
|
|
|
template:
|
|
|
|
metadata:
|
|
|
|
labels:
|
|
|
|
name: kubezero-all-nodes-upgrade
|
|
|
|
spec:
|
|
|
|
hostNetwork: true
|
|
|
|
hostIPC: true
|
|
|
|
hostPID: true
|
|
|
|
tolerations:
|
2024-03-22 12:21:55 +00:00
|
|
|
- operator: Exists
|
2023-12-01 16:56:56 +00:00
|
|
|
initContainers:
|
|
|
|
- name: node-upgrade
|
|
|
|
image: busybox
|
|
|
|
command: ["/bin/sh"]
|
|
|
|
args: ["-x", "-c", "$CMD" ]
|
|
|
|
volumeMounts:
|
|
|
|
- name: host
|
|
|
|
mountPath: /host
|
|
|
|
- name: hostproc
|
|
|
|
mountPath: /hostproc
|
|
|
|
securityContext:
|
|
|
|
privileged: true
|
|
|
|
capabilities:
|
|
|
|
add: ["SYS_ADMIN"]
|
|
|
|
containers:
|
|
|
|
- name: node-upgrade-wait
|
|
|
|
image: busybox
|
|
|
|
command: ["sleep", "3600"]
|
|
|
|
volumes:
|
|
|
|
- name: host
|
|
|
|
hostPath:
|
|
|
|
path: /
|
|
|
|
type: Directory
|
|
|
|
- name: hostproc
|
|
|
|
hostPath:
|
|
|
|
path: /proc
|
|
|
|
type: Directory
|
|
|
|
EOF
|
|
|
|
|
|
|
|
kubectl rollout status daemonset -n kube-system kubezero-all-nodes-upgrade --timeout 300s
|
|
|
|
kubectl delete ds kubezero-all-nodes-upgrade -n kube-system
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function control_plane_upgrade() {
|
|
|
|
TASKS="$1"
|
|
|
|
|
2024-10-28 14:21:11 +00:00
|
|
|
[ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest"
|
|
|
|
|
2023-12-01 16:56:56 +00:00
|
|
|
echo "Deploy cluster admin task: $TASKS"
|
|
|
|
cat <<EOF | kubectl apply -f -
|
|
|
|
apiVersion: v1
|
|
|
|
kind: Pod
|
|
|
|
metadata:
|
|
|
|
name: kubezero-upgrade
|
|
|
|
namespace: kube-system
|
|
|
|
labels:
|
|
|
|
app: kubezero-upgrade
|
|
|
|
spec:
|
|
|
|
hostNetwork: true
|
|
|
|
hostIPC: true
|
|
|
|
hostPID: true
|
|
|
|
containers:
|
|
|
|
- name: kubezero-admin
|
2024-06-21 17:28:45 +00:00
|
|
|
image: public.ecr.aws/zero-downtime/kubezero-admin:${KUBE_VERSION}
|
2023-12-01 16:56:56 +00:00
|
|
|
imagePullPolicy: Always
|
|
|
|
command: ["kubezero.sh"]
|
|
|
|
args: [$TASKS]
|
|
|
|
env:
|
|
|
|
- name: DEBUG
|
|
|
|
value: "$DEBUG"
|
|
|
|
- name: NODE_NAME
|
|
|
|
valueFrom:
|
|
|
|
fieldRef:
|
|
|
|
fieldPath: spec.nodeName
|
|
|
|
volumeMounts:
|
|
|
|
- name: host
|
|
|
|
mountPath: /host
|
|
|
|
- name: workdir
|
|
|
|
mountPath: /tmp
|
|
|
|
securityContext:
|
|
|
|
capabilities:
|
|
|
|
add: ["SYS_CHROOT"]
|
|
|
|
volumes:
|
|
|
|
- name: host
|
|
|
|
hostPath:
|
|
|
|
path: /
|
|
|
|
type: Directory
|
|
|
|
- name: workdir
|
|
|
|
emptyDir: {}
|
|
|
|
nodeSelector:
|
|
|
|
node-role.kubernetes.io/control-plane: ""
|
|
|
|
tolerations:
|
|
|
|
- key: node-role.kubernetes.io/control-plane
|
|
|
|
operator: Exists
|
|
|
|
effect: NoSchedule
|
|
|
|
restartPolicy: Never
|
|
|
|
EOF
|
|
|
|
|
|
|
|
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
|
|
|
|
while true; do
|
|
|
|
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
|
|
|
|
sleep 3
|
|
|
|
done
|
|
|
|
kubectl delete pod kubezero-upgrade -n kube-system
|
|
|
|
}
|