Compare commits

..

1 Commits

Author SHA1 Message Date
1f32d4547b chore(deps): update helm release falco to v4.21.3 2025-03-20 03:02:07 +00:00
47 changed files with 338 additions and 465 deletions

View File

@ -14,7 +14,7 @@ include .ci/podman.mk
Add subtree to your project: Add subtree to your project:
``` ```
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash
``` ```

View File

@ -41,8 +41,7 @@ for image in sorted(images, key=lambda d: d['imagePushedAt'], reverse=True):
_delete = True _delete = True
for tag in image["imageTags"]: for tag in image["imageTags"]:
# Look for at least one tag NOT beign a SemVer dev tag # Look for at least one tag NOT beign a SemVer dev tag
# untagged dev builds get tagged as <tag>-g<commit> if "-" not in tag:
if "-g" not in tag and "dirty" not in tag:
_delete = False _delete = False
if _delete: if _delete:
print("Deleting development image {}".format(image["imageTags"])) print("Deleting development image {}".format(image["imageTags"]))

View File

@ -8,8 +8,8 @@ SHELL := bash
.PHONY: all # All targets are accessible for user .PHONY: all # All targets are accessible for user
.DEFAULT: help # Running Make will run the help target .DEFAULT: help # Running Make will run the help target
# Parse version from latest git semver tag, use short commit otherwise # Parse version from latest git semver tag
GIT_TAG ?= $(shell git describe --tags --match v*.*.* --dirty 2>/dev/null || git describe --match="" --always --dirty 2>/dev/null) GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
TAG ::= $(GIT_TAG) TAG ::= $(GIT_TAG)
@ -85,7 +85,7 @@ rm-image:
## some useful tasks during development ## some useful tasks during development
ci-pull-upstream: ## pull latest shared .ci subtree ci-pull-upstream: ## pull latest shared .ci subtree
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash -m "Merge latest ci-tools-lib" git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
create-repo: ## create new AWS ECR public repository create-repo: ## create new AWS ECR public repository
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION) aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)

View File

@ -5,9 +5,9 @@ FROM docker.io/alpine:${ALPINE_VERSION}
ARG ALPINE_VERSION ARG ALPINE_VERSION
ARG KUBE_VERSION=1.31 ARG KUBE_VERSION=1.31
ARG SOPS_VERSION="3.10.1" ARG SOPS_VERSION="3.9.4"
ARG VALS_VERSION="0.40.1" ARG VALS_VERSION="0.39.1"
ARG HELM_SECRETS_VERSION="4.6.3" ARG HELM_SECRETS_VERSION="4.6.2"
RUN cd /etc/apk/keys && \ RUN cd /etc/apk/keys && \
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \ wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
@ -24,7 +24,6 @@ RUN cd /etc/apk/keys && \
py3-yaml \ py3-yaml \
restic \ restic \
helm \ helm \
apache2-utils \
ytt@testing \ ytt@testing \
etcd-ctl@edge-community \ etcd-ctl@edge-community \
cri-tools@kubezero \ cri-tools@kubezero \

View File

@ -19,7 +19,7 @@ KubeZero is a Kubernetes distribution providing an integrated container platform
# Version / Support Matrix # Version / Support Matrix
KubeZero releases track the same *minor* version of Kubernetes. KubeZero releases track the same *minor* version of Kubernetes.
Any 1.31.X-Y release of Kubezero supports any Kubernetes cluster 1.31.X. Any 1.30.X-Y release of Kubezero supports any Kubernetes cluster 1.30.X.
KubeZero is distributed as a collection of versioned Helm charts, allowing custom upgrade schedules and module versions as needed. KubeZero is distributed as a collection of versioned Helm charts, allowing custom upgrade schedules and module versions as needed.
@ -28,15 +28,15 @@ KubeZero is distributed as a collection of versioned Helm charts, allowing custo
gantt gantt
title KubeZero Support Timeline title KubeZero Support Timeline
dateFormat YYYY-MM-DD dateFormat YYYY-MM-DD
section 1.29
beta :129b, 2024-07-01, 2024-07-31
release :after 129b, 2024-11-30
section 1.30 section 1.30
beta :130b, 2024-09-01, 2024-10-31 beta :130b, 2024-09-01, 2024-10-31
release :after 130b, 2025-04-30 release :after 130b, 2025-02-28
section 1.31 section 1.31
beta :131b, 2024-12-01, 2025-02-28 beta :131b, 2024-12-01, 2025-01-30
release :after 131b, 2025-06-30 release :after 131b, 2025-04-30
section 1.32
beta :132b, 2025-04-01, 2025-05-19
release :after 132b, 2025-09-30
``` ```
[Upstream release policy](https://kubernetes.io/releases/) [Upstream release policy](https://kubernetes.io/releases/)
@ -44,7 +44,7 @@ gantt
# Components # Components
## OS ## OS
- all compute nodes are running on Alpine V3.21 - all compute nodes are running on Alpine V3.20
- 1 or 2 GB encrypted root file system - 1 or 2 GB encrypted root file system
- no external dependencies at boot time, apart from container registries - no external dependencies at boot time, apart from container registries
- focused on security and minimal footprint - focused on security and minimal footprint

View File

@ -1,44 +0,0 @@
#!/bin/bash
set -eEx
set -o pipefail
set -x
VALUES=$1
WORKDIR=$(mktemp -p /tmp -d kubezero.XXX)
[ -z "$DEBUG" ] && trap 'rm -rf $WORKDIR' ERR EXIT
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
# shellcheck disable=SC1091
. "$SCRIPT_DIR"/libhelm.sh
CHARTS="$(dirname $SCRIPT_DIR)/charts"
KUBE_VERSION="$(get_kube_version)"
PLATFORM="$(get_kubezero_platform)"
if [ -z "$KUBE_VERSION" ]; then
echo "Cannot contact cluster, cannot parse version!"
exit 1
fi
# Upload values into kubezero-values
kubectl create ns kubezero || true
kubectl create cm -n kubezero kubezero-values \
--from-file values.yaml=$VALUES || \
kubectl get cm -n kubezero kubezero-values -o=yaml | \
yq e ".data.\"values.yaml\" |= load_str($1)" | \
kubectl replace -f -
### Main
get_kubezero_values $ARGOCD
# Always use embedded kubezero chart
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $KUBE_VERSION --name-template kubezero --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
ARTIFACTS=(network addons cert-manager storage argo)
for t in ${ARTIFACTS[@]}; do
_helm crds $t || true
_helm apply $t || true
done

View File

@ -9,23 +9,34 @@ ARGOCD="${3:-true}"
LOCAL_DEV=1 LOCAL_DEV=1
#VERSION="latest"
KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"
WORKDIR=$(mktemp -p /tmp -d kubezero.XXX) WORKDIR=$(mktemp -p /tmp -d kubezero.XXX)
[ -z "$DEBUG" ] && trap 'rm -rf $WORKDIR' ERR EXIT [ -z "$DEBUG" ] && trap 'rm -rf $WORKDIR' ERR EXIT
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
# shellcheck disable=SC1091 # shellcheck disable=SC1091
. "$SCRIPT_DIR"/libhelm.sh . "$SCRIPT_DIR"/libhelm.sh
CHARTS="$(dirname $SCRIPT_DIR)/charts" CHARTS="$(dirname $SCRIPT_DIR)/charts"
KUBE_VERSION="$(get_kube_version)" # Guess platform from current context
PLATFORM="$(get_kubezero_platform)" _auth_cmd=$(kubectl config view | yq .users[0].user.exec.command)
if [ "$_auth_cmd" == "gke-gcloud-auth-plugin" ]; then
if [ -z "$KUBE_VERSION" ]; then PLATFORM=gke
echo "Cannot contact cluster, cannot parse version!" elif [ "$_auth_cmd" == "aws-iam-authenticator" ]; then
exit 1 PLATFORM=aws
else
PLATFORM=nocloud
fi fi
parse_version() {
echo $([[ $1 =~ ^v[0-9]+\.[0-9]+\.[0-9]+ ]] && echo "${BASH_REMATCH[0]//v/}")
}
KUBE_VERSION=$(parse_version $KUBE_VERSION)
### Main ### Main
get_kubezero_values $ARGOCD get_kubezero_values $ARGOCD
@ -34,7 +45,6 @@ helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $
# Root KubeZero apply directly and exit # Root KubeZero apply directly and exit
if [ ${ARTIFACTS[0]} == "kubezero" ]; then if [ ${ARTIFACTS[0]} == "kubezero" ]; then
[ -f $CHARTS/kubezero/hooks.d/pre-install.sh ] && . $CHARTS/kubezero/hooks.d/pre-install.sh
kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD) kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
exit $? exit $?

View File

@ -16,10 +16,7 @@ pre_control_plane_upgrade_cluster() {
post_control_plane_upgrade_cluster() { post_control_plane_upgrade_cluster() {
# delete previous root app controlled by kubezero module # delete previous root app controlled by kubezero module
kubectl delete application kubezero-git-sync -n argocd || true kubectl delete application kubezero-git-sync -n argocd || true
kubectl delete appproject kubezero -n argocd || true
# only patch appproject to keep SyncWindow in place
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
} }

View File

@ -111,42 +111,35 @@ post_kubeadm() {
} }
# Migrate KubeZero Config to current version
upgrade_kubezero_config() {
# get current values, argo app over cm
get_kubezero_values $ARGOCD
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
update_kubezero_cm
if [ "$ARGOCD" == "true" ]; then
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi
}
# Control plane upgrade # Control plane upgrade
kubeadm_upgrade() { control_plane_upgrade() {
CMD=$1
ARGOCD=$(argo_used) ARGOCD=$(argo_used)
render_kubeadm upgrade render_kubeadm upgrade
# Check if we already have all controllers on the current version if [[ "$CMD" =~ ^(cluster)$ ]]; then
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
# run control plane upgrade
if [ "$OLD_CONTROLLERS" != "0" ]; then
pre_control_plane_upgrade_cluster pre_control_plane_upgrade_cluster
# get current values, argo app over cm
get_kubezero_values $ARGOCD
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
update_kubezero_cm
if [ "$ARGOCD" == "true" ]; then
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi
pre_kubeadm pre_kubeadm
_kubeadm init phase upload-config kubeadm _kubeadm init phase upload-config kubeadm
@ -162,8 +155,7 @@ kubeadm_upgrade() {
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm." echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
# All controllers already on current version elif [[ "$CMD" =~ ^(final)$ ]]; then
else
pre_cluster_upgrade_final pre_cluster_upgrade_final
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase # Finally upgrade addons last, with 1.32 we can ONLY call addon phase
@ -328,7 +320,7 @@ apply_module() {
get_kubezero_values $ARGOCD get_kubezero_values $ARGOCD
# Always use embedded kubezero chart # Always use embedded kubezero chart
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $KUBE_VERSION --name-template kubezero --version ~$KUBE_VERSION --devel --output-dir $WORKDIR helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
# CRDs first # CRDs first
for t in $MODULES; do for t in $MODULES; do
@ -338,7 +330,6 @@ apply_module() {
for t in $MODULES; do for t in $MODULES; do
# apply/replace app of apps directly # apply/replace app of apps directly
if [ $t == "kubezero" ]; then if [ $t == "kubezero" ]; then
[ -f $CHARTS/kubezero/hooks.d/pre-install.sh ] && . $CHARTS/kubezero/hooks.d/pre-install.sh
kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD) kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
else else
#_helm apply $t #_helm apply $t
@ -419,8 +410,12 @@ for t in $@; do
bootstrap) control_plane_node bootstrap;; bootstrap) control_plane_node bootstrap;;
join) control_plane_node join;; join) control_plane_node join;;
restore) control_plane_node restore;; restore) control_plane_node restore;;
upgrade_control_plane) kubeadm_upgrade;; kubeadm_upgrade)
upgrade_kubezero) upgrade_kubezero_config;; control_plane_upgrade cluster
;;
finalize_cluster_upgrade)
control_plane_upgrade final
;;
apply_*) apply_*)
ARGOCD=$(argo_used) ARGOCD=$(argo_used)
apply_module "${t##apply_}";; apply_module "${t##apply_}";;

View File

@ -44,53 +44,10 @@ function field_manager() {
} }
function get_kube_version() {
local git_version="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"
echo $([[ $git_version =~ ^v[0-9]+\.[0-9]+\.[0-9]+ ]] && echo "${BASH_REMATCH[0]//v/}")
}
function get_kubezero_platform() {
_auth_cmd=$(kubectl config view | yq .users[0].user.exec.command)
if [ "$_auth_cmd" == "gke-gcloud-auth-plugin" ]; then
PLATFORM=gke
elif [ "$_auth_cmd" == "aws-iam-authenticator" ]; then
PLATFORM=aws
else
PLATFORM=nocloud
fi
echo $PLATFORM
}
function get_secret_val() {
local ns=$1
local secret=$2
local val=$(kubectl get secret -n $ns $secret -o yaml | yq ".data.\"$3\"")
if [ "$val" != "null" ]; then
echo -n $val | base64 -d -w0
else
echo ""
fi
}
function get_kubezero_secret() { function get_kubezero_secret() {
get_secret_val kubezero kubezero-secrets "$1" export _key="$1"
}
function ensure_kubezero_secret_key() { kubectl get secrets -n kubezero kubezero-secrets -o yaml | yq '.data.[env(_key)]' | base64 -d -w0
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
local key=""
local val=""
for key in $@; do
val=$(echo "$secret" | yq ".data.\"$key\"")
if [ "$val" == "null" ]; then
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}"
fi
done
} }
@ -98,9 +55,7 @@ function set_kubezero_secret() {
local key="$1" local key="$1"
local val="$2" local val="$2"
if [ -n "$val" ]; then kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"$(echo -n $val |base64 -w0)\" }}"
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"$(echo -n "$val" |base64 -w0)\" }}"
fi
} }
@ -123,7 +78,6 @@ function update_kubezero_cm() {
kubectl replace -f - kubectl replace -f -
} }
# sync kubezero-values CM from ArgoCD app # sync kubezero-values CM from ArgoCD app
function sync_kubezero_cm_from_argo() { function sync_kubezero_cm_from_argo() {
get_kubezero_values true get_kubezero_values true
@ -186,7 +140,7 @@ function delete_ns() {
# Extract crds via helm calls # Extract crds via helm calls
function crds() { function crds() {
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c ' helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c '
#!/usr/bin/python3 #!/usr/bin/python3
import yaml import yaml
import sys import sys
@ -258,7 +212,7 @@ function _helm() {
if [ $action == "crds" ]; then if [ $action == "crds" ]; then
# Pre-crd hook # Pre-crd hook
[ -f $WORKDIR/$chart/hooks.d/pre-crds.sh ] && . $WORKDIR/$chart/hooks.d/pre-crds.sh [ -f $WORKDIR/$chart/hooks.d/pre-crds.sh ] && (cd $WORKDIR; bash ./$chart/hooks.d/pre-crds.sh)
crds crds
@ -270,7 +224,7 @@ function _helm() {
create_ns $namespace create_ns $namespace
# Optional pre hook # Optional pre hook
[ -f $WORKDIR/$chart/hooks.d/pre-install.sh ] && . $WORKDIR/$chart/hooks.d/pre-install.sh [ -f $WORKDIR/$chart/hooks.d/pre-install.sh ] && (cd $WORKDIR; bash ./$chart/hooks.d/pre-install.sh)
render render
[ $action == "replace" ] && kubectl replace -f $WORKDIR/helm.yaml $(field_manager $ARGOCD) && rc=$? || rc=$? [ $action == "replace" ] && kubectl replace -f $WORKDIR/helm.yaml $(field_manager $ARGOCD) && rc=$? || rc=$?
@ -279,7 +233,7 @@ function _helm() {
[ $action == "apply" -o $rc -ne 0 ] && kubectl apply -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$? [ $action == "apply" -o $rc -ne 0 ] && kubectl apply -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$?
# Optional post hook # Optional post hook
[ -f $WORKDIR/$chart/hooks.d/post-install.sh ] && . $WORKDIR/$chart/hooks.d/post-install.sh [ -f $WORKDIR/$chart/hooks.d/post-install.sh ] && (cd $WORKDIR; bash ./$chart/hooks.d/post-install.sh)
elif [ $action == "delete" ]; then elif [ $action == "delete" ]; then
render render
@ -292,7 +246,6 @@ function _helm() {
return 0 return 0
} }
function all_nodes_upgrade() { function all_nodes_upgrade() {
CMD="$1" CMD="$1"
@ -353,7 +306,7 @@ EOF
} }
function admin_job() { function control_plane_upgrade() {
TASKS="$1" TASKS="$1"
[ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest" [ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest"
@ -363,7 +316,7 @@ function admin_job() {
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: kubezero-admin-job name: kubezero-upgrade
namespace: kube-system namespace: kube-system
labels: labels:
app: kubezero-upgrade app: kubezero-upgrade
@ -408,10 +361,10 @@ spec:
restartPolicy: Never restartPolicy: Never
EOF EOF
kubectl wait pod kubezero-admin-job -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
while true; do while true; do
kubectl logs kubezero-admin-job -n kube-system -f 2>/dev/null && break kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
sleep 3 sleep 3
done done
kubectl delete pod kubezero-admin-job -n kube-system kubectl delete pod kubezero-upgrade -n kube-system
} }

View File

@ -15,31 +15,37 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
ARGOCD=$(argo_used) ARGOCD=$(argo_used)
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning #waitSystemPodsRunning
[ "$ARGOCD" == "true" ] && disable_argo [ "$ARGOCD" == "true" ] && disable_argo
admin_job "upgrade_control_plane, upgrade_kubezero" # Check if we already have all controllers on the current version
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
# All controllers already on current version
if [ "$OLD_CONTROLLERS" == "0" ]; then
control_plane_upgrade finalize_cluster_upgrade
# Otherwise run control plane upgrade
else
control_plane_upgrade kubeadm_upgrade
fi
echo "<Return> to continue"
read -r
#echo "Adjust kubezero values as needed:" #echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015 # shellcheck disable=SC2015
#[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero #[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
#echo "<Return> to continue"
#read -r
# upgrade modules # upgrade modules
admin_job "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators" control_plane_upgrade "apply_kubezero apply_network, apply_addons, apply_storage, apply_operators"
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning waitSystemPodsRunning
echo "Applying remaining KubeZero modules..." echo "Applying remaining KubeZero modules..."
admin_job "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo" control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
# we replace the project during v1.31 so disable again
[ "$ARGOCD" == "true" ] && disable_argo
# Final step is to commit the new argocd kubezero app # Final step is to commit the new argocd kubezero app
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..)' > $ARGO_APP kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..)' > $ARGO_APP
@ -51,12 +57,6 @@ while true; do
sleep 1 sleep 1
done done
echo "Once all controller nodes are running on $KUBE_VERSION, <return> to continue"
read -r
# Final control plane upgrades
admin_job "upgrade_control_plane"
echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your cluster." echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your cluster."
echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades." echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades."

View File

@ -3,7 +3,7 @@ name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons description: KubeZero umbrella chart for various optional cluster addons
type: application type: application
version: 0.8.13 version: 0.8.13
appVersion: v1.31 appVersion: v1.30
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:

View File

@ -25,4 +25,3 @@
README.md.gotmpl README.md.gotmpl
dashboards.yaml dashboards.yaml
jsonnet jsonnet
update.sh

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo name: kubezero-argo
version: 0.3.2 version: 0.3.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -18,15 +18,15 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: argo-events - name: argo-events
version: 2.4.15 version: 2.4.13
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled condition: argo-events.enabled
- name: argo-cd - name: argo-cd
version: 7.8.23 version: 7.8.9
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled condition: argo-cd.enabled
- name: argocd-image-updater - name: argocd-image-updater
version: 0.12.1 version: 0.12.0
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argocd-image-updater.enabled condition: argocd-image-updater.enabled
kubeVersion: ">= 1.30.0-0" kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-argo # kubezero-argo
![Version: 0.3.2](https://img.shields.io/badge/Version-0.3.2-informational?style=flat-square) ![Version: 0.3.0](https://img.shields.io/badge/Version-0.3.0-informational?style=flat-square)
KubeZero Argo - Events, Workflow, CD KubeZero Argo - Events, Workflow, CD
@ -18,9 +18,9 @@ Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 7.8.23 | | https://argoproj.github.io/argo-helm | argo-cd | 7.8.9 |
| https://argoproj.github.io/argo-helm | argo-events | 2.4.15 | | https://argoproj.github.io/argo-helm | argo-events | 2.4.13 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.1 | | https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
## Values ## Values
@ -42,7 +42,6 @@ Kubernetes: `>= 1.30.0-0`
| argo-cd.configs.params."controller.sync.timeout.seconds" | int | `1800` | | | argo-cd.configs.params."controller.sync.timeout.seconds" | int | `1800` | |
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | | | argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
| argo-cd.configs.params."server.insecure" | bool | `true` | | | argo-cd.configs.params."server.insecure" | bool | `true` | |
| argo-cd.configs.secret.argocdServerAdminPassword | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.adminPassword"` | |
| argo-cd.configs.secret.createSecret | bool | `false` | | | argo-cd.configs.secret.createSecret | bool | `false` | |
| argo-cd.configs.ssh.extraHosts | string | `"git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="` | | | argo-cd.configs.ssh.extraHosts | string | `"git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="` | |
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar__logo__text-logo { height: 0em; }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | | | argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar__logo__text-logo { height: 0em; }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
@ -54,25 +53,30 @@ Kubernetes: `>= 1.30.0-0`
| argo-cd.dex.enabled | bool | `false` | | | argo-cd.dex.enabled | bool | `false` | |
| argo-cd.enabled | bool | `false` | | | argo-cd.enabled | bool | `false` | |
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | | | argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
| argo-cd.global.image.tag | string | `"v2.14.9-1"` | | | argo-cd.global.image.tag | string | `"v2.14.5"` | |
| argo-cd.global.logging.format | string | `"json"` | | | argo-cd.global.logging.format | string | `"json"` | |
| argo-cd.global.networkPolicy.create | bool | `true` | | | argo-cd.global.networkPolicy.create | bool | `true` | |
| argo-cd.istio.enabled | bool | `false` | | | argo-cd.istio.enabled | bool | `false` | |
| argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | | | argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | |
| argo-cd.istio.ipBlocks | list | `[]` | | | argo-cd.istio.ipBlocks | list | `[]` | |
| argo-cd.kubezero.bootstrap | bool | `false` | deploy the KubeZero Project and GitSync Root App | | argo-cd.kubezero.bootstrap | bool | `false` | |
| argo-cd.kubezero.password | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.password"` | |
| argo-cd.kubezero.path | string | `"/"` | | | argo-cd.kubezero.path | string | `"/"` | |
| argo-cd.kubezero.repoUrl | string | `""` | | | argo-cd.kubezero.repoUrl | string | `"https://git.my.org/thiscluster"` | |
| argo-cd.kubezero.sshPrivateKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.sshPrivateKey"` | |
| argo-cd.kubezero.targetRevision | string | `"HEAD"` | | | argo-cd.kubezero.targetRevision | string | `"HEAD"` | |
| argo-cd.kubezero.username | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.username"` | |
| argo-cd.notifications.enabled | bool | `false` | | | argo-cd.notifications.enabled | bool | `false` | |
| argo-cd.redisSecretInit.enabled | bool | `false` | | | argo-cd.redisSecretInit.enabled | bool | `false` | |
| argo-cd.repoServer.clusterRoleRules.enabled | bool | `true` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].apiGroups[0] | string | `""` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].resources[0] | string | `"secrets"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[0] | string | `"get"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[1] | string | `"watch"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[2] | string | `"list"` | |
| argo-cd.repoServer.metrics.enabled | bool | `false` | | | argo-cd.repoServer.metrics.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | | | argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.repoServer.volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | |
| argo-cd.repoServer.volumeMounts[0].name | string | `"kubeconfigs"` | |
| argo-cd.repoServer.volumes[0].emptyDir | object | `{}` | | | argo-cd.repoServer.volumes[0].emptyDir | object | `{}` | |
| argo-cd.repoServer.volumes[0].name | string | `"cmp-tmp"` | | | argo-cd.repoServer.volumes[0].name | string | `"kubeconfigs"` | |
| argo-cd.server.metrics.enabled | bool | `false` | | | argo-cd.server.metrics.enabled | bool | `false` | |
| argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | | | argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | | | argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |

28
charts/kubezero-argo/hooks.d/pre-install.sh Executable file → Normal file
View File

@ -1,26 +1,6 @@
# Bootstrap kubezero-git-sync app only if it doesnt exist yet #!/bin/sh
kubectl get application kubezero-git-sync -n argocd || \
yq -i '.argo-cd.kubezero.bootstrap=true' $WORKDIR/values.yaml
# Ensure we have an adminPassword or migrate existing one # Bootstrap kubezero-git-sync app if it doenst exist
PW=$(get_kubezero_secret argo-cd.adminPassword) kubectl get application kubezero-git-sync -n argocd && rc=$? || rc=$?
if [ -z "$PW" ]; then
# Check for existing password in actual secret
NEW_PW=$(get_secret_val argocd argocd-secret "admin.password")
if [ -z "$NEW_PW" ];then [ $rc != 0 ] && yq -i '.argo-cd.kubezero.bootstrap=true' values.yaml
ARGO_PWD=$(date +%s | sha256sum | base64 | head -c 12 ; echo)
NEW_PW=$(htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/')
set_kubezero_secret argo-cd.adminPasswordClear $ARGO_PWD
fi
set_kubezero_secret argo-cd.adminPassword "$NEW_PW"
fi
# Redis secret
kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \
--from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
# required keys in kubezero-secrets, as --ignore-missing-values in helm-secrets doesnt work with vals ;-(
ensure_kubezero_secret_key argo-cd.kubezero.username argo-cd.kubezero.password argo-cd.kubezero.sshPrivateKey

View File

@ -0,0 +1,22 @@
# KubeZero secrets
#
test: supergeheim
secrets:
- name: argocd-secret
optional: false
data:
admin.password: test
admin.passwordMtime: now
server.secretkey: boohoo
- name: zero-downtime-gitea
optional: true
data:
name: zero-downtime-gitea
type: git
url: ssh://git@git.zero-downtime.net/quark/kube-grandnagus.git
sshPrivateKey: |
boohooKey
metadata:
labels:
argocd.argoproj.io/secret-type: repository

View File

@ -1,13 +0,0 @@
{{- if index .Values "argo-cd" "enabled" }}
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
type: Opaque
stringData:
admin.password: {{ index .Values "argo-cd" "configs" "secret" "argocdServerAdminPassword" }}
admin.passwordMtime: "2006-01-02T15:04:05Z"
{{- end }}

View File

@ -4,8 +4,6 @@ kind: Application
metadata: metadata:
name: kubezero-git-sync name: kubezero-git-sync
namespace: argocd namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
annotations: annotations:
argocd.argoproj.io/sync-wave: "-20" argocd.argoproj.io/sync-wave: "-20"
spec: spec:
@ -19,15 +17,12 @@ spec:
targetRevision: {{ .targetRevision }} targetRevision: {{ .targetRevision }}
path: {{ .path }} path: {{ .path }}
{{- end }} {{- end }}
plugin: directory:
name: kubezero-git-sync recurse: true
syncPolicy: syncPolicy:
automated: automated:
prune: true prune: true
syncOptions: syncOptions:
- ServerSideApply=true - ServerSideApply=true
- ApplyOutOfSyncOnly=true - ApplyOutOfSyncOnly=true
info:
- name: "Source:"
value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.31/"
{{- end }} {{- end }}

View File

@ -1,21 +0,0 @@
{{- if index .Values "argo-cd" "kubezero" "repoUrl" }}
apiVersion: v1
kind: Secret
metadata:
name: kubezero-git-sync
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
{{- include "kubezero-lib.labels" . | nindent 4 }}
type: Opaque
stringData:
name: kubezero-git-sync
type: git
url: {{ index .Values "argo-cd" "kubezero" "repoUrl" }}
{{- if hasPrefix "https" (index .Values "argo-cd" "kubezero" "repoUrl") }}
username: {{ index .Values "argo-cd" "kubezero" "username" }}
password: {{ index .Values "argo-cd" "kubezero" "password" }}
{{- else }}
sshPrivateKey: {{ index .Values "argo-cd" "kubezero" "sshPrivateKey" }}
{{- end }}
{{- end }}

View File

@ -4,8 +4,6 @@ kind: AppProject
metadata: metadata:
name: kubezero name: kubezero
namespace: argocd namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec: spec:
clusterResourceWhitelist: clusterResourceWhitelist:
- group: '*' - group: '*'
@ -17,10 +15,4 @@ spec:
sourceRepos: sourceRepos:
- https://cdn.zero-downtime.net/charts - https://cdn.zero-downtime.net/charts
- {{ index .Values "argo-cd" "kubezero" "repoUrl" }} - {{ index .Values "argo-cd" "kubezero" "repoUrl" }}
syncWindows:
- kind: deny
schedule: '0 * * * *'
duration: 24h
namespaces:
- '*'
{{- end }} {{- end }}

View File

@ -25,7 +25,7 @@ argo-events:
# do NOT use -alpine tag as the entrypoint differs # do NOT use -alpine tag as the entrypoint differs
versions: versions:
- version: 2.10.11 - version: 2.10.11
natsImage: nats:2.11.1-scratch natsImage: nats:2.10.11-scratch
metricsExporterImage: natsio/prometheus-nats-exporter:0.16.0 metricsExporterImage: natsio/prometheus-nats-exporter:0.16.0
configReloaderImage: natsio/nats-server-config-reloader:0.14.1 configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server startCommand: /nats-server
@ -38,7 +38,7 @@ argo-cd:
format: json format: json
image: image:
repository: public.ecr.aws/zero-downtime/zdt-argocd repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v2.14.9-1 tag: v2.14.5
networkPolicy: networkPolicy:
create: true create: true
@ -81,9 +81,10 @@ argo-cd:
secret: secret:
createSecret: false createSecret: false
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/' | base64 -w0` # `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/' | base64 -w0`
argocdServerAdminPassword: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.adminPassword # argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG"
# argocdServerAdminPassword: "ref+file://secrets.yaml#/test"
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
ssh: ssh:
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ==" extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
@ -116,8 +117,14 @@ argo-cd:
serviceMonitor: serviceMonitor:
enabled: true enabled: true
volumes:
- name: kubeconfigs
emptyDir: {}
volumeMounts:
- mountPath: /home/argocd/.kube
name: kubeconfigs
# Allow vals to read internal secrets across all namespaces # Allow vals to read internal secrets across all namespaces
# @ignored
clusterRoleRules: clusterRoleRules:
enabled: true enabled: true
rules: rules:
@ -125,34 +132,6 @@ argo-cd:
resources: ["secrets"] resources: ["secrets"]
verbs: ["get", "watch", "list"] verbs: ["get", "watch", "list"]
# cmp kubezero-git-sync plugin
# @ignored
extraContainers:
- name: cmp-kubezero-git-sync
image: '{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include "argo-cd.defaultTag" .) .Values.repoServer.image.tag }}'
imagePullPolicy: '{{ default .Values.global.image.imagePullPolicy .Values.repoServer.image.imagePullPolicy }}'
command: ["/var/run/argocd/argocd-cmp-server"]
volumeMounts:
- mountPath: /var/run/argocd
name: var-files
- mountPath: /home/argocd/cmp-server/plugins
name: plugins
- mountPath: /tmp
name: cmp-tmp
securityContext:
runAsNonRoot: true
readOnlyRootFilesystem: true
runAsUser: 999
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
volumes:
- name: cmp-tmp
emptyDir: {}
server: server:
# Rename former https port to grpc, works with istio + insecure # Rename former https port to grpc, works with istio + insecure
service: service:
@ -184,16 +163,12 @@ argo-cd:
ipBlocks: [] ipBlocks: []
kubezero: kubezero:
# -- deploy the KubeZero Project and GitSync Root App # only set this once initially to prevent the circular dependency
bootstrap: false bootstrap: false
# valid git+ssh repository url repoUrl: "https://git.my.org/thiscluster"
repoUrl: ""
path: "/" path: "/"
targetRevision: HEAD targetRevision: HEAD
sshPrivateKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.sshPrivateKey
username: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.username
password: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.password
argocd-image-updater: argocd-image-updater:
enabled: false enabled: false

View File

@ -19,7 +19,7 @@ keycloak:
resources: resources:
limits: limits:
#cpu: 750m #cpu: 750m
memory: 1024Mi memory: 768Mi
requests: requests:
cpu: 100m cpu: 100m
memory: 512Mi memory: 512Mi

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cache name: kubezero-cache
description: KubeZero Cache module description: KubeZero Cache module
type: application type: application
version: 0.1.1 version: 0.1.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -17,11 +17,11 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: redis - name: redis
version: 20.11.5 version: 20.0.3
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: redis.enabled condition: redis.enabled
- name: redis-cluster - name: redis-cluster
version: 11.5.0 version: 11.0.2
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: redis-cluster.enabled condition: redis-cluster.enabled

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-falco name: kubezero-falco
description: Falco Container Security and Audit components description: Falco Container Security and Audit components
type: application type: application
version: 0.1.2 version: 0.1.3
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -16,7 +16,7 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: falco - name: falco
version: 4.2.5 version: 4.21.3
repository: https://falcosecurity.github.io/charts repository: https://falcosecurity.github.io/charts
condition: k8saudit.enabled condition: k8saudit.enabled
alias: k8saudit alias: k8saudit

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-graph name: kubezero-graph
description: KubeZero GraphQL and GraphDB description: KubeZero GraphQL and GraphDB
type: application type: application
version: 0.1.1 version: 0.1.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -16,7 +16,7 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: neo4j - name: neo4j
version: 2025.3.0 version: 5.26.0
repository: https://helm.neo4j.com/neo4j repository: https://helm.neo4j.com/neo4j
condition: neo4j.enabled condition: neo4j.enabled

View File

@ -1,6 +1,6 @@
# kubezero-graph # kubezero-graph
![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero GraphQL and GraphDB KubeZero GraphQL and GraphDB
@ -18,8 +18,8 @@ Kubernetes: `>= 1.29.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.2.1 |
| https://helm.neo4j.com/neo4j | neo4j | 2025.3.0 | | https://helm.neo4j.com/neo4j | neo4j | 5.26.0 |
## Values ## Values
@ -28,8 +28,6 @@ Kubernetes: `>= 1.29.0-0`
| neo4j.disableLookups | bool | `true` | | | neo4j.disableLookups | bool | `true` | |
| neo4j.enabled | bool | `false` | | | neo4j.enabled | bool | `false` | |
| neo4j.neo4j.name | string | `"test-db"` | | | neo4j.neo4j.name | string | `"test-db"` | |
| neo4j.neo4j.password | string | `"secret"` | |
| neo4j.neo4j.passwordFromSecret | string | `"neo4j-admin"` | |
| neo4j.serviceMonitor.enabled | bool | `false` | | | neo4j.serviceMonitor.enabled | bool | `false` | |
| neo4j.services.neo4j.enabled | bool | `false` | | | neo4j.services.neo4j.enabled | bool | `false` | |
| neo4j.volumes.data.mode | string | `"defaultStorageClass"` | | | neo4j.volumes.data.mode | string | `"defaultStorageClass"` | |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-mq name: kubezero-mq
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
type: application type: application
version: 0.3.11 version: 0.3.10
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -17,11 +17,11 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: nats - name: nats
version: 1.3.3 version: 1.2.2
repository: https://nats-io.github.io/k8s/helm/charts/ repository: https://nats-io.github.io/k8s/helm/charts/
condition: nats.enabled condition: nats.enabled
- name: rabbitmq - name: rabbitmq
version: 14.7.0 version: 14.6.6
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: rabbitmq.enabled condition: rabbitmq.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-mq # kubezero-mq
![Version: 0.3.11](https://img.shields.io/badge/Version-0.3.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.3.10](https://img.shields.io/badge/Version-0.3.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
@ -18,9 +18,9 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 | | https://charts.bitnami.com/bitnami | rabbitmq | 14.6.6 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 | | https://nats-io.github.io/k8s/helm/charts/ | nats | 1.2.2 |
## Values ## Values
@ -34,6 +34,13 @@ Kubernetes: `>= 1.26.0`
| nats.natsBox.enabled | bool | `false` | | | nats.natsBox.enabled | bool | `false` | |
| nats.promExporter.enabled | bool | `false` | | | nats.promExporter.enabled | bool | `false` | |
| nats.promExporter.podMonitor.enabled | bool | `false` | | | nats.promExporter.podMonitor.enabled | bool | `false` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.enabled | bool | `false` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.serviceMonitor.enabled | bool | `true` | |
| rabbitmq-cluster-operator.enabled | bool | `false` | |
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.enabled | bool | `false` | |
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.serviceMonitor.enabled | bool | `true` | |
| rabbitmq-cluster-operator.rabbitmqImage.tag | string | `"3.11.4-debian-11-r0"` | |
| rabbitmq-cluster-operator.useCertManager | bool | `true` | |
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | | | rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
| rabbitmq.auth.existingPasswordSecret | string | `"rabbitmq"` | | | rabbitmq.auth.existingPasswordSecret | string | `"rabbitmq"` | |
| rabbitmq.auth.tls.enabled | bool | `false` | | | rabbitmq.auth.tls.enabled | bool | `false` | |

View File

@ -1,4 +1,4 @@
{{- if .Values.nats.promExporter.podMonitor.enabled }} {{- if .Values.nats.exporter.serviceMonitor.enabled }}
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:

View File

@ -6,12 +6,6 @@ nats:
jetstream: jetstream:
enabled: true enabled: true
podTemplate:
topologySpreadConstraints:
kubernetes.io/hostname:
maxSkew: 1
whenUnsatisfiable: DoNotSchedule
natsBox: natsBox:
enabled: false enabled: false

View File

@ -21,8 +21,4 @@
.idea/ .idea/
*.tmproj *.tmproj
.vscode/ .vscode/
Chart.lock
README.md.gotmpl
dashboards.yaml
jsonnet
update.sh

View File

@ -35,10 +35,11 @@ Kubernetes: `>= 1.31.0-0`
| addons.targetRevision | string | `"0.8.13"` | | | addons.targetRevision | string | `"0.8.13"` | |
| argo.argo-cd.enabled | bool | `false` | | | argo.argo-cd.enabled | bool | `false` | |
| argo.argo-cd.istio.enabled | bool | `false` | | | argo.argo-cd.istio.enabled | bool | `false` | |
| argo.argocd-apps.enabled | bool | `false` | |
| argo.argocd-image-updater.enabled | bool | `false` | | | argo.argocd-image-updater.enabled | bool | `false` | |
| argo.enabled | bool | `false` | | | argo.enabled | bool | `false` | |
| argo.namespace | string | `"argocd"` | | | argo.namespace | string | `"argocd"` | |
| argo.targetRevision | string | `"0.3.1"` | | | argo.targetRevision | string | `"0.2.9"` | |
| cert-manager.enabled | bool | `false` | | | cert-manager.enabled | bool | `false` | |
| cert-manager.namespace | string | `"cert-manager"` | | | cert-manager.namespace | string | `"cert-manager"` | |
| cert-manager.targetRevision | string | `"0.9.12"` | | | cert-manager.targetRevision | string | `"0.9.12"` | |

View File

@ -0,0 +1,41 @@
kind: ApplicationSet
metadata:
name: kubezero
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
generators:
- git:
repoURL: {{ .Values.kubezero.applicationSet.repoURL }}
revision: {{ .Values.kubezero.applicationSet.revision }}
files:
{{- toYaml .Values.kubezero.applicationSet.files | nindent 6 }}
template:
metadata:
name: kubezero
spec:
project: kubezero
source:
repoURL: https://cdn.zero-downtime.net/charts
chart: kubezero
targetRevision: '{{ "{{" }} kubezero.version {{ "}}" }}'
helm:
parameters:
# We use this to detect if we are called from ArgoCD
- name: argocdAppName
value: $ARGOCD_APP_NAME
# This breaks the recursion, otherwise we install another kubezero project and app
# To be removed once we applicationSet is working and AppProject is moved back to ArgoCD chart
- name: installKubeZero
value: "false"
valueFiles:
- '{{ "{{" }} kubezero.valuesPath {{ "}}" }}/kubezero.yaml'
- '{{ "{{" }} kubezero.valuesPath {{ "}}" }}/values.yaml'
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true

View File

@ -1,6 +0,0 @@
# ensure we have a basic kubezero secret for cluster bootstrap and defaults
kubectl get secret kubezero-secrets -n kubezero && rc=$? || rc=$?
if [ $rc != 0 ]; then
kubectl create secret generic kubezero-secrets -n kubezero
fi

View File

@ -0,0 +1,7 @@
#!/bin/bash
ns=$(kubectl get ns -l argocd.argoproj.io/instance | grep -v NAME | awk '{print $1}')
for n in $ns; do
kubectl label --overwrite namespace $n 'argocd.argoproj.io/instance-'
done

View File

@ -0,0 +1,25 @@
#!/usr/bin/env bash
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
# Script to migrate an existing ECK 1.2.1 installation to Helm.
set -euo pipefail
RELEASE_NAMESPACE=${RELEASE_NAMESPACE:-"elastic-system"}
echo "Uninstalling ECK"
kubectl delete -n "${RELEASE_NAMESPACE}" \
serviceaccount/elastic-operator \
secret/elastic-webhook-server-cert \
clusterrole.rbac.authorization.k8s.io/elastic-operator \
clusterrole.rbac.authorization.k8s.io/elastic-operator-view \
clusterrole.rbac.authorization.k8s.io/elastic-operator-edit \
clusterrolebinding.rbac.authorization.k8s.io/elastic-operator \
rolebinding.rbac.authorization.k8s.io/elastic-operator \
service/elastic-webhook-server \
statefulset.apps/elastic-operator \
validatingwebhookconfiguration.admissionregistration.k8s.io/elastic-webhook.k8s.elastic.co

View File

@ -25,8 +25,7 @@ spec:
repoURL: {{ default "https://cdn.zero-downtime.net/charts" (index .Values $name "repository") }} repoURL: {{ default "https://cdn.zero-downtime.net/charts" (index .Values $name "repository") }}
targetRevision: {{ default "HEAD" ( index .Values $name "targetRevision" ) | quote }} targetRevision: {{ default "HEAD" ( index .Values $name "targetRevision" ) | quote }}
helm: helm:
# add with 1.32 skipTests: true
#skipTests: true
valuesObject: valuesObject:
{{- include (print $name "-values") $ | nindent 8 }} {{- include (print $name "-values") $ | nindent 8 }}
@ -42,9 +41,6 @@ spec:
- ServerSideApply=true - ServerSideApply=true
- CreateNamespace=true - CreateNamespace=true
- ApplyOutOfSyncOnly=true - ApplyOutOfSyncOnly=true
info:
- name: "Source:"
value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.31/charts/kubezero-{{ $name }}"
{{- include (print $name "-argo") $ }} {{- include (print $name "-argo") $ }}
{{- end }} {{- end }}

View File

@ -1,30 +0,0 @@
{{- define "aws-iam-env" -}}
- name: AWS_ROLE_ARN
value: "arn:aws:iam::{{ $.Values.global.aws.accountId }}:role/{{ $.Values.global.aws.region }}.{{ $.Values.global.clusterName }}.{{ .roleName }}"
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
- name: AWS_STS_REGIONAL_ENDPOINTS
value: "regional"
- name: METADATA_TRIES
value: "0"
- name: AWS_REGION
value: {{ $.Values.global.aws.region }}
{{- end }}
{{- define "aws-iam-volumes" -}}
- name: aws-token
projected:
sources:
- serviceAccountToken:
path: token
expirationSeconds: 86400
audience: "sts.amazonaws.com"
{{- end }}
{{- define "aws-iam-volumemounts" -}}
- name: aws-token
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
readOnly: true
{{- end }}

View File

@ -1,6 +1,6 @@
{{- define "addons-values" }} {{- define "addons-values" }}
clusterBackup: clusterBackup:
enabled: {{ ternary "true" "false" (or (eq .Values.global.platform "aws") .Values.addons.clusterBackup.enabled) }} enabled: {{ ternary "true" "false" (or (hasKey .Values.global.aws "region") .Values.addons.clusterBackup.enabled) }}
{{- with omit .Values.addons.clusterBackup "enabled" }} {{- with omit .Values.addons.clusterBackup "enabled" }}
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
@ -14,7 +14,7 @@ clusterBackup:
{{- end }} {{- end }}
forseti: forseti:
enabled: {{ ternary "true" "false" (or (eq .Values.global.platform "aws") .Values.addons.forseti.enabled) }} enabled: {{ ternary "true" "false" (or (hasKey .Values.global.aws "region") .Values.addons.forseti.enabled) }}
{{- with omit .Values.addons.forseti "enabled" }} {{- with omit .Values.addons.forseti "enabled" }}
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
@ -28,7 +28,7 @@ forseti:
{{- end }} {{- end }}
external-dns: external-dns:
enabled: {{ ternary "true" "false" (or (eq .Values.global.platform "aws") (index .Values "addons" "external-dns" "enabled")) }} enabled: {{ ternary "true" "false" (or (hasKey .Values.global.aws "region") (index .Values "addons" "external-dns" "enabled")) }}
{{- with omit (index .Values "addons" "external-dns") "enabled" }} {{- with omit (index .Values "addons" "external-dns") "enabled" }}
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
@ -42,15 +42,32 @@ external-dns:
- "--aws-zone-type=public" - "--aws-zone-type=public"
- "--aws-zones-cache-duration=1h" - "--aws-zones-cache-duration=1h"
env: env:
{{- include "aws-iam-env" (merge (dict "roleName" "externalDNS") .) | nindent 4 }} - name: AWS_REGION
value: {{ .Values.global.aws.region }}
- name: AWS_ROLE_ARN
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.externalDNS"
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
- name: AWS_STS_REGIONAL_ENDPOINTS
value: "regional"
- name: METADATA_TRIES
value: "0"
extraVolumes: extraVolumes:
{{- include "aws-iam-volumes" . | nindent 4 }} - name: aws-token
projected:
sources:
- serviceAccountToken:
path: token
expirationSeconds: 86400
audience: "sts.amazonaws.com"
extraVolumeMounts: extraVolumeMounts:
{{- include "aws-iam-volumemounts" . | nindent 4 }} - name: aws-token
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
readOnly: true
{{- end }} {{- end }}
cluster-autoscaler: cluster-autoscaler:
enabled: {{ ternary "true" "false" (or (eq .Values.global.platform "aws") (index .Values "addons" "cluster-autoscaler" "enabled")) }} enabled: {{ ternary "true" "false" (or (hasKey .Values.global.aws "region") (index .Values "addons" "cluster-autoscaler" "enabled")) }}
autoDiscovery: autoDiscovery:
clusterName: {{ .Values.global.clusterName }} clusterName: {{ .Values.global.clusterName }}
@ -81,9 +98,17 @@ cluster-autoscaler:
AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token" AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
AWS_STS_REGIONAL_ENDPOINTS: "regional" AWS_STS_REGIONAL_ENDPOINTS: "regional"
extraVolumes: extraVolumes:
{{- include "aws-iam-volumes" . | nindent 4 }} - name: aws-token
projected:
sources:
- serviceAccountToken:
path: token
expirationSeconds: 86400
audience: "sts.amazonaws.com"
extraVolumeMounts: extraVolumeMounts:
{{- include "aws-iam-volumemounts" . | nindent 4 }} - name: aws-token
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
readOnly: true
{{- end }} {{- end }}
{{- with .Values.addons.fuseDevicePlugin }} {{- with .Values.addons.fuseDevicePlugin }}
@ -130,7 +155,14 @@ aws-node-termination-handler:
queueURL: "https://sqs.{{ .Values.global.aws.region }}.amazonaws.com/{{ .Values.global.aws.accountId }}/{{ .Values.global.clusterName }}_Nth" queueURL: "https://sqs.{{ .Values.global.aws.region }}.amazonaws.com/{{ .Values.global.aws.accountId }}/{{ .Values.global.clusterName }}_Nth"
managedTag: "zdt:kubezero:nth:{{ .Values.global.clusterName }}" managedTag: "zdt:kubezero:nth:{{ .Values.global.clusterName }}"
extraEnv: extraEnv:
{{- include "aws-iam-env" (merge (dict "roleName" "awsNth") .) | nindent 4 }} - name: AWS_ROLE_ARN
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.awsNth"
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
- name: AWS_STS_REGIONAL_ENDPOINTS
value: "regional"
- name: METADATA_TRIES
value: "0"
aws-eks-asg-rolling-update-handler: aws-eks-asg-rolling-update-handler:
enabled: {{ default "true" (index .Values "addons" "aws-eks-asg-rolling-update-handler" "enabled") }} enabled: {{ default "true" (index .Values "addons" "aws-eks-asg-rolling-update-handler" "enabled") }}
@ -140,9 +172,10 @@ aws-eks-asg-rolling-update-handler:
{{- end }} {{- end }}
environmentVars: environmentVars:
{{- include "aws-iam-env" (merge (dict "roleName" "awsRuh") .) | nindent 4 }}
- name: CLUSTER_NAME - name: CLUSTER_NAME
value: {{ .Values.global.clusterName }} value: {{ .Values.global.clusterName }}
- name: AWS_REGION
value: {{ .Values.global.aws.region }}
- name: EXECUTION_INTERVAL - name: EXECUTION_INTERVAL
value: "60" value: "60"
- name: METRICS - name: METRICS
@ -151,6 +184,12 @@ aws-eks-asg-rolling-update-handler:
value: "true" value: "true"
- name: SLOW_MODE - name: SLOW_MODE
value: "true" value: "true"
- name: AWS_ROLE_ARN
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.awsRuh"
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
- name: AWS_STS_REGIONAL_ENDPOINTS
value: "regional"
{{- with (index .Values "addons" "neuron-helm-chart") }} {{- with (index .Values "addons" "neuron-helm-chart") }}
neuron-helm-chart: neuron-helm-chart:

View File

@ -23,51 +23,11 @@ argo-cd:
metrics: metrics:
enabled: {{ .Values.metrics.enabled }} enabled: {{ .Values.metrics.enabled }}
repoServer: repoServer:
metrics:
enabled: {{ .Values.metrics.enabled }}
{{- with index .Values "argo" "argo-cd" "repoServer" }} {{- with index .Values "argo" "argo-cd" "repoServer" }}
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
metrics:
enabled: {{ .Values.metrics.enabled }}
volumes:
- name: cmp-tmp
emptyDir: {}
{{- if eq .Values.global.platform "aws" }}
{{- include "aws-iam-volumes" . | nindent 6 }}
env:
{{- include "aws-iam-env" (merge (dict "roleName" "argocd-repo-server") .) | nindent 6 }}
volumeMounts:
{{- include "aws-iam-volumemounts" . | nindent 6 }}
extraContainers:
- name: cmp-kubezero-git-sync
image: '{{ "{{" }} default .Values.global.image.repository .Values.repoServer.image.repository {{ "}}" }}:{{ "{{" }} default (include "argo-cd.defaultTag" .) .Values.repoServer.image.tag {{ "}}" }}'
imagePullPolicy: '{{ "{{" }} default .Values.global.image.imagePullPolicy .Values.repoServer.image.imagePullPolicy {{ "}}" }}'
command: ["/var/run/argocd/argocd-cmp-server"]
env:
{{- include "aws-iam-env" (merge (dict "roleName" "argocd-repo-server") .) | nindent 10 }}
volumeMounts:
- mountPath: /var/run/argocd
name: var-files
- mountPath: /home/argocd/cmp-server/plugins
name: plugins
- mountPath: /tmp
name: cmp-tmp
{{- include "aws-iam-volumemounts" . | nindent 10 }}
securityContext:
runAsNonRoot: true
readOnlyRootFilesystem: true
runAsUser: 999
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
{{- end }}
server: server:
metrics: metrics:
enabled: {{ .Values.metrics.enabled }} enabled: {{ .Values.metrics.enabled }}
@ -91,13 +51,30 @@ argocd-image-updater:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
{{- if eq .Values.global.platform "aws" }} {{- if .Values.global.aws }}
extraEnv: extraEnv:
{{- include "aws-iam-env" (merge (dict "roleName" "argocd-image-updater") .) | nindent 4 }} - name: AWS_ROLE_ARN
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.argocd-image-updater"
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
- name: AWS_STS_REGIONAL_ENDPOINTS
value: "regional"
- name: METADATA_TRIES
value: "0"
- name: AWS_REGION
value: {{ .Values.global.aws.region }}
volumes: volumes:
{{- include "aws-iam-volumes" . | nindent 4 }} - name: aws-token
projected:
sources:
- serviceAccountToken:
path: token
expirationSeconds: 86400
audience: "sts.amazonaws.com"
volumeMounts: volumeMounts:
{{- include "aws-iam-volumemounts" . | nindent 4 }} - name: aws-token
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
readOnly: true
{{- end }} {{- end }}
metrics: metrics:

View File

@ -1,6 +1,6 @@
{{- define "_kube-prometheus-stack" }} {{- define "_kube-prometheus-stack" }}
{{- if eq .global.platform "aws" }} {{- if .global.aws.region }}
alertmanager: alertmanager:
alertmanagerSpec: alertmanagerSpec:
podMetadata: podMetadata:

View File

@ -6,9 +6,7 @@ global:
highAvailable: false highAvailable: false
aws: aws: {}
accountId: "123456789012"
region: the-moon
gcp: {} gcp: {}
addons: addons:
@ -117,7 +115,7 @@ logging:
argo: argo:
enabled: false enabled: false
namespace: argocd namespace: argocd
targetRevision: 0.3.2 targetRevision: 0.3.0
argo-cd: argo-cd:
enabled: false enabled: false
istio: istio:

View File

@ -1,11 +0,0 @@
# KubeZero Helm hooks
## Abstract
Scripts within the `hooks.d` folder of each chart are executed at the respective times when the charts are applied via libhelm.
*These hooks do NOT work via ArgoCD*
## Flow
- hooks are execute as part of the libhelm tasks like `apply`
- are running with the current kubectl context
- executed at root working directory, eg. set a value for helm the scripts can edit the `./values.yaml` file.

View File

@ -3,7 +3,6 @@
## What's new - Major themes ## What's new - Major themes
- all KubeZero and support AMIs based on [Alpine 3.21](https://alpinelinux.org/posts/Alpine-3.21.0-released.html) - all KubeZero and support AMIs based on [Alpine 3.21](https://alpinelinux.org/posts/Alpine-3.21.0-released.html)
- network policies for ArgoCD - network policies for ArgoCD
- Nvidia worker nodes are labeled with detected GPU product code
- Prometheus upgraded to V3, reducing CPU and memory requirements, see [upstream blog](https://prometheus.io/blog/2024/11/14/prometheus-3-0/) - Prometheus upgraded to V3, reducing CPU and memory requirements, see [upstream blog](https://prometheus.io/blog/2024/11/14/prometheus-3-0/)
## Features and fixes ## Features and fixes
@ -11,10 +10,10 @@
## Version upgrades ## Version upgrades
- cilium 1.16.6 - cilium 1.16.6
- istio 1.24.3 - istio 1.24.2
- ArgoCD 2.14.5 [custom ZDT image](https://git.zero-downtime.net/ZeroDownTime/zdt-argocd) - ArgoCD 2.14.3 [custom ZDT image](https://git.zero-downtime.net/ZeroDownTime/zdt-argocd)
- Prometheus 3.1.0 / Grafana 11.5.1 - Prometheus 3.1.0 / Grafana 11.5.1
- Nvidia container toolkit 1.17.4, drivers 570.86.15, Cuda 12.8 - Nvidia container toolkit 1.17, drivers 565.57.01, Cuda 12.7
## Resources ## Resources
- [Kubernetes v1.31 upstream release blog](https://kubernetes.io/blog/2024/08/13/kubernetes-v1-31-release/) - [Kubernetes v1.31 upstream release blog](https://kubernetes.io/blog/2024/08/13/kubernetes-v1-31-release/)