Compare commits

..

28 Commits

Author SHA1 Message Date
5740cbce9e chore(deps): update helm release mariadb-galera to v14.2.2 2025-04-02 03:02:36 +00:00
daf70c9bfb fix: argocd bootstrap fix 2025-03-26 16:47:24 +00:00
eb059883c1 fix: ensure pre-install hook is run for kubezero 2025-03-25 11:17:30 +01:00
bca7f5fd45 fix: another argo migration fix 2025-03-24 22:10:38 +01:00
68997b535d fix: type in hook 2025-03-24 18:18:37 +00:00
ca69b55492 fix: allow multi-line secret val 2025-03-24 19:02:19 +01:00
01832f2e41 fix: improve argocd secret handling 2025-03-24 18:54:56 +01:00
94dd2f395e fix: kubezero root module fixes 2025-03-24 18:11:26 +01:00
6a7c0b6085 feat: more cluster bootstrap work 2025-03-24 16:44:11 +00:00
10de3a1047 Merge pull request 'chore(deps): update kubezero-argo-dependencies' (#63) from renovate/kubezero-argo-kubezero-argo-dependencies into main
Reviewed-on: #63
2025-03-21 13:51:46 +00:00
5a47b6be43 chore(deps): update kubezero-argo-dependencies 2025-03-21 13:51:46 +00:00
63eb787599 Merge pull request 'chore(deps): update public.ecr.aws/zero-downtime/zdt-argocd docker tag to v2.14.7' (#67) from renovate/public.ecr.aws-zero-downtime-zdt-argocd-2.x into main
Reviewed-on: #67
2025-03-21 13:51:29 +00:00
120072a34b chore(deps): update public.ecr.aws/zero-downtime/zdt-argocd docker tag to v2.14.7 2025-03-21 03:02:01 +00:00
63f96e58ba fix: ensure root app is re-created 2025-03-19 12:39:06 +01:00
ab744494e6 fix: apply kubezero module first, fix hooks 2025-03-18 16:18:20 +00:00
af29836a27 feat: new custom helm hooks 2025-03-18 14:47:55 +00:00
30bc95408a feat: improved ArgoCD bootstrap, tool cleanups 2025-03-17 20:30:34 +00:00
545a7fd8b1 feat: latest CI tools, improved Gitea API endpoint protection 2025-03-13 21:02:53 +00:00
56a2926917 Merge pull request 'chore(deps): update helm release gitea to v11' (#59) from renovate/kubezero-ci-major-kubezero-ci-dependencies into main
Reviewed-on: #59
2025-03-13 12:41:06 +00:00
b8114bd053 chore(deps): update helm release gitea to v11 2025-03-13 12:41:06 +00:00
53f940a54c Merge pull request 'chore(deps): update kubezero-ci-dependencies' (#57) from renovate/kubezero-ci-kubezero-ci-dependencies into main
Reviewed-on: #57
2025-03-13 12:40:57 +00:00
58780f1e0e chore(deps): update kubezero-ci-dependencies 2025-03-13 03:01:47 +00:00
4c10271ec6 Merge pull request 'chore(deps): update helm release argo-cd to v7.8.9' (#54) from renovate/kubezero-argo-kubezero-argo-dependencies into main
Reviewed-on: #54
2025-03-11 18:17:08 +00:00
5246f57329 chore(deps): update helm release argo-cd to v7.8.9 2025-03-11 18:17:08 +00:00
5bc6e6e435 fix: reduce load on api-server on single node control planes, more argo related fixes 2025-03-11 16:37:27 +00:00
cbcaec807a fix: replace apps during 1.31 2025-03-11 14:07:40 +01:00
bfafccaf32 feat: tooling tweaks, Istio ingress option to preserver external request Ids 2025-03-10 17:49:24 +00:00
3304363986 Fix: fix for minimal ES version in logging, clustered control plane upgrade fix, tooling cleanup 2025-03-04 11:47:19 +00:00
55 changed files with 469 additions and 403 deletions

View File

@ -6,8 +6,8 @@ ARG ALPINE_VERSION
ARG KUBE_VERSION=1.31
ARG SOPS_VERSION="3.9.4"
ARG VALS_VERSION="0.39.1"
ARG HELM_SECRETS_VERSION="4.6.2"
ARG VALS_VERSION="0.39.4"
ARG HELM_SECRETS_VERSION="4.6.3"
RUN cd /etc/apk/keys && \
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
@ -24,6 +24,7 @@ RUN cd /etc/apk/keys && \
py3-yaml \
restic \
helm \
apache2-utils \
ytt@testing \
etcd-ctl@edge-community \
cri-tools@kubezero \

View File

@ -4,10 +4,10 @@
set -x
ARTIFACTS=($(echo $1 | tr "," "\n"))
ACTION=$2
ACTION="${2:-apply}"
ARGOCD="${3:-true}"
LOCAL_DEV=1
ARGOCD="False"
#VERSION="latest"
KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"
@ -36,46 +36,6 @@ parse_version() {
KUBE_VERSION=$(parse_version $KUBE_VERSION)
### Various hooks for modules
################
# cert-manager #
################
function cert-manager-post() {
# If any error occurs, wait for initial webhook deployment and try again
# see: https://cert-manager.io/docs/concepts/webhook/#webhook-connection-problems-shortly-after-cert-manager-installation
if [ $rc -ne 0 ]; then
wait_for "kubectl get deployment -n $namespace cert-manager-webhook"
kubectl rollout status deployment -n $namespace cert-manager-webhook
wait_for 'kubectl get validatingwebhookconfigurations -o yaml | grep "caBundle: LS0"'
fi
wait_for "kubectl get ClusterIssuer -n $namespace kubezero-local-ca-issuer"
kubectl wait --timeout=180s --for=condition=Ready -n $namespace ClusterIssuer/kubezero-local-ca-issuer
}
###########
# ArgoCD #
###########
function argocd-pre() {
kubectl delete job argo-argocd-redis-secret-init -n argocd || true
for f in $CLUSTER/secrets/argocd-*.yaml; do
kubectl apply -f $f
done
}
###########
# Metrics #
###########
# Cleanup patch jobs from previous runs , ArgoCD does this automatically
function metrics-pre() {
kubectl delete jobs --field-selector status.successful=1 -n monitoring
}
### Main
get_kubezero_values $ARGOCD
@ -85,7 +45,8 @@ helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $
# Root KubeZero apply directly and exit
if [ ${ARTIFACTS[0]} == "kubezero" ]; then
kubectl replace -f $WORKDIR/kubezero/templates
[ -f $CHARTS/kubezero/hooks.d/pre-install.sh ] && . $CHARTS/kubezero/hooks.d/pre-install.sh
kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
exit $?
# "catch all" apply all enabled modules
@ -100,12 +61,12 @@ if [ "$ACTION" == "delete" ]; then
_helm delete ${ARTIFACTS[idx]} || true
done
else
if [ "$ACTION" == "" -o "$ACTION" == "crds" ]; then
if [ "$ACTION" == "apply" -o "$ACTION" == "crds" ]; then
for t in ${ARTIFACTS[@]}; do
_helm crds $t || true
done
fi
for t in ${ARTIFACTS[@]}; do
_helm apply $t || true
_helm $ACTION $t || true
done
fi

View File

@ -14,7 +14,12 @@ pre_control_plane_upgrade_cluster() {
# All things after the first controller / control plane upgrade
post_control_plane_upgrade_cluster() {
echo
# delete previous root app controlled by kubezero module
kubectl delete application kubezero-git-sync -n argocd || true
# Patch appproject to keep SyncWindow in place
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
}

View File

@ -104,9 +104,9 @@ pre_kubeadm() {
# Shared steps after calling kubeadm
post_kubeadm() {
# KubeZero resources
# KubeZero resources - will never be applied by ArgoCD
for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do
kubectl apply -f $f $LOG
kubectl apply -f $f --server-side --force-conflicts $LOG
done
}
@ -115,9 +115,13 @@ post_kubeadm() {
control_plane_upgrade() {
CMD=$1
ARGOCD=$(argo_used)
render_kubeadm upgrade
if [[ "$CMD" =~ ^(cluster)$ ]]; then
pre_control_plane_upgrade_cluster
# get current values, argo app over cm
get_kubezero_values $ARGOCD
@ -127,13 +131,13 @@ control_plane_upgrade() {
update_kubezero_cm
if [ "$ARGOCD" == "True" ]; then
if [ "$ARGOCD" == "true" ]; then
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi
pre_kubeadm
@ -147,13 +151,19 @@ control_plane_upgrade() {
# install re-certed kubectl config for root
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
post_control_plane_upgrade_cluster
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
elif [[ "$CMD" =~ ^(final)$ ]]; then
pre_cluster_upgrade_final
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase
#_kubeadm upgrade apply phase addon all $KUBE_VERSION
_kubeadm upgrade apply $KUBE_VERSION
post_cluster_upgrade_final
echo "Upgraded kubeadm addons."
fi
@ -318,7 +328,15 @@ apply_module() {
done
for t in $MODULES; do
_helm apply $t
# apply/replace app of apps directly
if [ $t == "kubezero" ]; then
[ -f $CHARTS/kubezero/hooks.d/pre-install.sh ] && . $CHARTS/kubezero/hooks.d/pre-install.sh
kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
else
#_helm apply $t
# During 1.31 we change the ArgoCD tracking so replace
_helm replace $t
fi
done
echo "Applied KubeZero modules: $MODULES"
@ -394,17 +412,10 @@ for t in $@; do
join) control_plane_node join;;
restore) control_plane_node restore;;
kubeadm_upgrade)
ARGOCD=$(argo_used)
# call hooks
pre_control_plane_upgrade_cluster
control_plane_upgrade cluster
post_control_plane_upgrade_cluster
;;
finalize_cluster_upgrade)
ARGOCD=$(argo_used)
pre_cluster_upgrade_final
control_plane_upgrade final
post_cluster_upgrade_final
;;
apply_*)
ARGOCD=$(argo_used)

View File

@ -2,11 +2,10 @@
# Simulate well-known CRDs being available
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
LOCAL_DEV=${LOCAL_DEV:-""}
export HELM_SECRETS_BACKEND="vals"
LOCAL_DEV=${LOCAL_DEV:-""}
# Waits for max 300s and retries
function wait_for() {
local TRIES=0
@ -30,22 +29,60 @@ function chart_location() {
function argo_used() {
kubectl get application kubezero -n argocd >/dev/null \
&& echo "True" || echo "False"
&& echo "true" || echo "false"
}
function field_manager() {
local argo=${1:-"false"}
if [ "$argo" == "true" ]; then
echo "--field-manager argo-controller"
else
echo ""
fi
}
function get_secret_val() {
local ns=$1
local secret=$2
local val=$(kubectl get secret -n $ns $secret -o yaml | yq ".data.\"$3\"")
if [ "$val" != "null" ]; then
echo -n $val | base64 -d -w0
else
echo ""
fi
}
function get_kubezero_secret() {
get_secret_val kubezero kubezero-secrets "$1"
}
function set_kubezero_secret() {
local key="$1"
local val="$2"
if [ -n "$val" ]; then
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"$(echo -n "$val" |base64 -w0)\" }}"
fi
}
# get kubezero-values from ArgoCD if available or use in-cluster CM
function get_kubezero_values() {
local argo=${1:-"False"}
local argo=${1:-"false"}
if [ "$argo" == "True" ]; then
if [ "$argo" == "true" ]; then
kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml
else
kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml
fi
}
# Overwrite kubezero-values CM with file
function update_kubezero_cm() {
kubectl get cm -n kubezero kubezero-values -o=yaml | \
@ -55,7 +92,7 @@ function update_kubezero_cm() {
# sync kubezero-values CM from ArgoCD app
function sync_kubezero_cm_from_argo() {
get_kubezero_values True
get_kubezero_values true
update_kubezero_cm
}
@ -96,25 +133,12 @@ function waitSystemPodsRunning() {
done
}
function argo_app_synced() {
APP=$1
# Ensure we are synced otherwise bail out
status=$(kubectl get application $APP -n argocd -o yaml | yq .status.sync.status)
if [ "$status" != "Synced" ]; then
echo "ArgoCD Application $APP not 'Synced'!"
return 1
fi
return 0
}
# make sure namespace exists prior to calling helm as the create-namespace options doesn't work
function create_ns() {
local namespace=$1
if [ "$namespace" != "kube-system" ]; then
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace $(field_manager $ARGOCD)
fi
}
@ -127,8 +151,8 @@ function delete_ns() {
# Extract crds via helm calls
function _crds() {
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c '
function crds() {
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c '
#!/usr/bin/python3
import yaml
import sys
@ -144,7 +168,7 @@ for manifest in yaml.safe_load_all(sys.stdin):
# Only apply if there are actually any crds
if [ -s $WORKDIR/crds.yaml ]; then
[ -n "$DEBUG" ] && cat $WORKDIR/crds.yaml
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts $(field_manager $ARGOCD)
fi
}
@ -189,11 +213,22 @@ function _helm() {
yq eval '.spec.source.helm.valuesObject' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
if [ $action == "crds" ]; then
# Allow custom CRD handling
declare -F ${module}-crds && ${module}-crds || _crds
# extract remote chart or copy local to access hooks
rm -rf $WORKDIR/$chart $WORKDIR/${chart}*.tgz
elif [ $action == "apply" ]; then
if [ -z "$LOCAL_DEV" ]; then
helm pull $(chart_location $chart) --untar -d $WORKDIR
else
cp -r $(chart_location $chart) $WORKDIR
fi
if [ $action == "crds" ]; then
# Pre-crd hook
[ -f $WORKDIR/$chart/hooks.d/pre-crds.sh ] && . $WORKDIR/$chart/hooks.d/pre-crds.sh
crds
elif [ $action == "apply" -o $action == "replace" ]; then
echo "using values to $action of module $module: "
cat $WORKDIR/values.yaml
@ -201,13 +236,16 @@ function _helm() {
create_ns $namespace
# Optional pre hook
declare -F ${module}-pre && ${module}-pre
[ -f $WORKDIR/$chart/hooks.d/pre-install.sh ] && . $WORKDIR/$chart/hooks.d/pre-install.sh
render
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$?
[ $action == "replace" ] && kubectl replace -f $WORKDIR/helm.yaml $(field_manager $ARGOCD) && rc=$? || rc=$?
# If replace failed try apply at least
[ $action == "apply" -o $rc -ne 0 ] && kubectl apply -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$?
# Optional post hook
declare -F ${module}-post && ${module}-post
[ -f $WORKDIR/$chart/hooks.d/post-install.sh ] && . $WORKDIR/$chart/hooks.d/post-install.sh
elif [ $action == "delete" ]; then
render

View File

@ -8,10 +8,18 @@ import yaml
def migrate(values):
"""Actual changes here"""
# remove syncOptions from root app
# migrate kubezero root app of apps to Argo chart
try:
if values["kubezero"]["syncPolicy"]:
values["kubezero"].pop("syncPolicy")
if values["kubezero"]:
try:
values["kubezero"].pop("syncPolicy")
except KeyError:
pass
values["kubezero"]["gitSync"]["repoUrl"] = values["kubezero"]["gitSync"].pop("repoURL")
values["argo"]["argo-cd"]["kubezero"] = values["kubezero"]["gitSync"]
values.pop("kubezero")
except KeyError:
pass

View File

@ -17,16 +17,16 @@ ARGOCD=$(argo_used)
echo "Checking that all pods in kube-system are running ..."
#waitSystemPodsRunning
[ "$ARGOCD" == "True" ] && disable_argo
[ "$ARGOCD" == "true" ] && disable_argo
# Check if we already have all controllers on the current version
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
#OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
# All controllers already on current version
if [ "$OLD_CONTROLLERS" == "0" ]; then
# All controllers already on current version
control_plane_upgrade finalize_cluster_upgrade
# Otherwise run control plane upgrade
else
# Otherwise run control plane upgrade
control_plane_upgrade kubeadm_upgrade
fi
@ -35,10 +35,10 @@ read -r
#echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015
#[ "$ARGOCD" == "True" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
#[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
# upgrade modules
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators"
control_plane_upgrade "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators"
echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning
@ -47,6 +47,9 @@ echo "Applying remaining KubeZero modules..."
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
# we replace the project during v1.31 so disable again
[ "$ARGOCD" == "true" ] && disable_argo
# Final step is to commit the new argocd kubezero app
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..)' > $ARGO_APP
@ -63,4 +66,4 @@ echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to
echo "<Return> to continue and re-enable ArgoCD:"
read -r
[ "$ARGOCD" == "True" ] && enable_argo
[ "$ARGOCD" == "true" ] && enable_argo

View File

@ -17,22 +17,36 @@ failureModeDeny: false
# - slow: 1 req/s over a minute per sourceIP
descriptors:
ingress:
- key: speed
value: slow
- key: sourceIp
value: sixtyPerMinute
descriptors:
- key: remote_address
rate_limit:
unit: minute
requests_per_unit: 60
- key: sourceIp
value: tenPerSecond
descriptors:
- key: remote_address
rate_limit:
unit: second
requests_per_unit: 10
privateIngress:
- key: speed
value: slow
- key: sourceIp
value: sixtyPerMinute
descriptors:
- key: remote_address
rate_limit:
unit: minute
requests_per_unit: 60
- key: sourceIp
value: tenPerSecond
descriptors:
- key: remote_address
rate_limit:
unit: second
requests_per_unit: 10
metrics:
enabled: false

6
charts/kubeadm/TODO Normal file
View File

@ -0,0 +1,6 @@
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_256_GCM_SHA384"}
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_128_GCM_SHA256"}
"RuntimeConfig from runtime service failed","err":"rpc error: code = Unimplemented desc = unknown method RuntimeConfig for service runtime.v1.RuntimeService"}
"CRI implementation should be updated to support RuntimeConfig when KubeletCgroupDriverFromCRI feature gate has been enabled. Falling back to using cgroupDriver from kubelet config."

View File

@ -2,8 +2,8 @@ apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
kubernetesVersion: {{ .Chart.Version }}
clusterName: {{ .Values.global.clusterName }}
#featureGates:
# NonGracefulFailover: true
featureGates:
ControlPlaneKubeletLocalMode: true
controlPlaneEndpoint: {{ .Values.api.endpoint }}
networking:
podSubnet: 10.244.0.0/16

View File

@ -3,7 +3,7 @@
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
{{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "AuthorizeWithSelectors" "AuthorizeNodeWithSelectors" "ConsistentListFromCache" "VolumeAttributesClass" "WatchList"}}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "AuthorizeWithSelectors" "AuthorizeNodeWithSelectors" "ConsistentListFromCache" "VolumeAttributesClass" "WatchList" }}
{{- if eq .return "csv" }}
{{- range $key := $gates }}
{{- $key }}=true,

View File

@ -0,0 +1,28 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
README.md.gotmpl
dashboards.yaml
jsonnet
update.sh

View File

@ -1,7 +1,7 @@
apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo
version: 0.2.8
version: 0.3.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -18,19 +18,15 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: argo-events
version: 2.4.13
version: 2.4.14
repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled
- name: argo-cd
version: 7.8.2
repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled
- name: argocd-apps
version: 2.0.2
version: 7.8.13
repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled
- name: argocd-image-updater
version: 0.12.0
repository: https://argoproj.github.io/argo-helm
condition: argocd-image-updater.enabled
kubeVersion: ">= 1.26.0-0"
kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-argo
![Version: 0.2.8](https://img.shields.io/badge/Version-0.2.8-informational?style=flat-square)
![Version: 0.3.1](https://img.shields.io/badge/Version-0.3.1-informational?style=flat-square)
KubeZero Argo - Events, Workflow, CD
@ -14,15 +14,14 @@ KubeZero Argo - Events, Workflow, CD
## Requirements
Kubernetes: `>= 1.26.0-0`
Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version |
|------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 7.8.2 |
| https://argoproj.github.io/argo-helm | argo-events | 2.4.13 |
| https://argoproj.github.io/argo-helm | argocd-apps | 2.0.2 |
| https://argoproj.github.io/argo-helm | argo-cd | 7.8.13 |
| https://argoproj.github.io/argo-helm | argo-events | 2.4.14 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
## Values
@ -30,7 +29,7 @@ Kubernetes: `>= 1.26.0-0`
|-----|------|---------|-------------|
| argo-cd.configs.cm."application.instanceLabelKey" | string | `nil` | |
| argo-cd.configs.cm."application.resourceTrackingMethod" | string | `"annotation"` | |
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
| argo-cd.configs.cm."resource.customizations" | string | `"argoproj.io/Application:\n health.lua: |\n hs = {}\n hs.status = \"Progressing\"\n hs.message = \"\"\n if obj.status ~= nil then\n if obj.status.health ~= nil then\n hs.status = obj.status.health.status\n if obj.status.health.message ~= nil then\n hs.message = obj.status.health.message\n end\n end\n end\n return hs\n"` | |
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.31 - Release notes"` | |
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
@ -39,10 +38,11 @@ Kubernetes: `>= 1.26.0-0`
| argo-cd.configs.cm.installationID | string | `"KubeZero-ArgoCD"` | |
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
| argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | |
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
| argo-cd.configs.params."controller.resource.health.persist" | string | `"false"` | |
| argo-cd.configs.params."controller.sync.timeout.seconds" | int | `1800` | |
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
| argo-cd.configs.params."server.insecure" | bool | `true` | |
| argo-cd.configs.secret.argocdServerAdminPassword | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.adminPassword"` | |
| argo-cd.configs.secret.createSecret | bool | `false` | |
| argo-cd.configs.ssh.extraHosts | string | `"git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="` | |
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar__logo__text-logo { height: 0em; }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
@ -54,31 +54,19 @@ Kubernetes: `>= 1.26.0-0`
| argo-cd.dex.enabled | bool | `false` | |
| argo-cd.enabled | bool | `false` | |
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
| argo-cd.global.image.tag | string | `"v2.13.1"` | |
| argo-cd.global.image.tag | string | `"v2.14.7"` | |
| argo-cd.global.logging.format | string | `"json"` | |
| argo-cd.global.networkPolicy.create | bool | `true` | |
| argo-cd.istio.enabled | bool | `false` | |
| argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | |
| argo-cd.istio.ipBlocks | list | `[]` | |
| argo-cd.kubezero.bootstrap | bool | `false` | deploy the KubeZero Project and GitSync Root App |
| argo-cd.kubezero.path | string | `"/"` | |
| argo-cd.kubezero.repoUrl | string | `""` | |
| argo-cd.kubezero.sshPrivateKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.sshPrivateKey"` | |
| argo-cd.kubezero.targetRevision | string | `"HEAD"` | |
| argo-cd.notifications.enabled | bool | `false` | |
| argo-cd.repoServer.clusterRoleRules.enabled | bool | `true` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].apiGroups[0] | string | `""` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].resources[0] | string | `"secrets"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[0] | string | `"get"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[1] | string | `"watch"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[2] | string | `"list"` | |
| argo-cd.repoServer.initContainers[0].command[0] | string | `"/usr/local/bin/sa2kubeconfig.sh"` | |
| argo-cd.repoServer.initContainers[0].command[1] | string | `"/home/argocd/.kube/config"` | |
| argo-cd.repoServer.initContainers[0].image | string | `"{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include \"argo-cd.defaultTag\" .) .Values.repoServer.image.tag }}"` | |
| argo-cd.repoServer.initContainers[0].imagePullPolicy | string | `"{{ default .Values.global.image.imagePullPolicy .Values.repoServer.image.imagePullPolicy }}"` | |
| argo-cd.repoServer.initContainers[0].name | string | `"create-kubeconfig"` | |
| argo-cd.repoServer.initContainers[0].securityContext.allowPrivilegeEscalation | bool | `false` | |
| argo-cd.repoServer.initContainers[0].securityContext.capabilities.drop[0] | string | `"ALL"` | |
| argo-cd.repoServer.initContainers[0].securityContext.readOnlyRootFilesystem | bool | `true` | |
| argo-cd.repoServer.initContainers[0].securityContext.runAsNonRoot | bool | `true` | |
| argo-cd.repoServer.initContainers[0].securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
| argo-cd.repoServer.initContainers[0].volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | |
| argo-cd.repoServer.initContainers[0].volumeMounts[0].name | string | `"kubeconfigs"` | |
| argo-cd.redisSecretInit.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.repoServer.volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | |
@ -101,9 +89,6 @@ Kubernetes: `>= 1.26.0-0`
| argo-events.configs.jetstream.versions[0].startCommand | string | `"/nats-server"` | |
| argo-events.configs.jetstream.versions[0].version | string | `"2.10.11"` | |
| argo-events.enabled | bool | `false` | |
| argocd-apps.applications | object | `{}` | |
| argocd-apps.enabled | bool | `false` | |
| argocd-apps.projects | object | `{}` | |
| argocd-image-updater.authScripts.enabled | bool | `true` | |
| argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | |
| argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | |

View File

@ -0,0 +1,29 @@
# Bootstrap kubezero-git-sync app only if it doesnt exist yet
kubectl get application kubezero-git-sync -n argocd || \
yq -i '.argo-cd.kubezero.bootstrap=true' $WORKDIR/values.yaml
# Ensure we have an adminPassword or migrate existing one
PW=$(get_kubezero_secret argo-cd.adminPassword)
if [ -z "$PW" ]; then
# Check for existing password in actual secret
NEW_PW=$(get_secret_val argocd argocd-secret "admin.password")
if [ -z "$NEW_PW" ];then
ARGO_PWD=$(date +%s | sha256sum | base64 | head -c 12 ; echo)
NEW_PW=$(htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/')
set_kubezero_secret argo-cd.adminPasswordClear $ARGO_PWD
fi
set_kubezero_secret argo-cd.adminPassword "$NEW_PW"
fi
# GitSync privateKey
GITKEY=$(get_kubezero_secret argo-cd.kubezero.sshPrivateKey)
if [ -z "$GITKEY" ]; then
set_kubezero_secret argo-cd.kubezero.sshPrivateKey "Insert ssh Private Key from your git server"
fi
# Redis secret
kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \
--from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo)

View File

@ -1,22 +0,0 @@
# KubeZero secrets
#
test: supergeheim
secrets:
- name: argocd-secret
optional: false
data:
admin.password: test
admin.passwordMtime: now
server.secretkey: boohoo
- name: zero-downtime-gitea
optional: true
data:
name: zero-downtime-gitea
type: git
url: ssh://git@git.zero-downtime.net/quark/kube-grandnagus.git
sshPrivateKey: |
boohooKey
metadata:
labels:
argocd.argoproj.io/secret-type: repository

View File

@ -0,0 +1,13 @@
{{- if index .Values "argo-cd" "enabled" }}
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
type: Opaque
stringData:
admin.password: {{ index .Values "argo-cd" "configs" "secret" "argocdServerAdminPassword" }}
admin.passwordMtime: {{ default (dateInZone "2006-01-02T15:04:05Z" (now) "UTC") }}
{{- end }}

View File

@ -0,0 +1,30 @@
{{- if and (index .Values "argo-cd" "kubezero" "bootstrap") (index .Values "argo-cd" "kubezero" "repoUrl") }}
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubezero-git-sync
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
annotations:
argocd.argoproj.io/sync-wave: "-20"
spec:
destination:
namespace: argocd
server: https://kubernetes.default.svc
project: kubezero
source:
{{- with index .Values "argo-cd" "kubezero" }}
repoURL: {{ .repoUrl }}
targetRevision: {{ .targetRevision }}
path: {{ .path }}
{{- end }}
directory:
recurse: true
syncPolicy:
automated:
prune: true
syncOptions:
- ServerSideApply=true
- ApplyOutOfSyncOnly=true
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if and (index .Values "argo-cd" "kubezero" "sshPrivateKey") (index .Values "argo-cd" "kubezero" "repoUrl") }}
apiVersion: v1
kind: Secret
metadata:
name: kubezero-git-sync
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
{{- include "kubezero-lib.labels" . | nindent 4 }}
type: Opaque
stringData:
name: kubezero-git-sync
type: git
url: {{ index .Values "argo-cd" "kubezero" "repoUrl" }}
sshPrivateKey: {{ index .Values "argo-cd" "kubezero" "sshPrivateKey" }}
{{- end }}

View File

@ -0,0 +1,26 @@
{{- if and (index .Values "argo-cd" "kubezero" "bootstrap") (index .Values "argo-cd" "kubezero" "repoUrl") }}
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: kubezero
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
clusterResourceWhitelist:
- group: '*'
kind: '*'
description: KubeZero - ZeroDownTime Kubernetes Platform
destinations:
- namespace: '*'
server: https://kubernetes.default.svc
sourceRepos:
- https://cdn.zero-downtime.net/charts
- {{ index .Values "argo-cd" "kubezero" "repoUrl" }}
syncWindows:
- kind: deny
schedule: '0 * * * *'
duration: 24h
namespaces:
- '*'
{{- end }}

View File

@ -30,13 +30,6 @@ argo-events:
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server
argocd-apps:
enabled: false
projects: {}
applications: {}
argo-cd:
enabled: false
@ -45,7 +38,7 @@ argo-cd:
format: json
image:
repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v2.14.2
tag: v2.14.7
networkPolicy:
create: true
@ -71,44 +64,31 @@ argo-cd:
application.instanceLabelKey: Null
resource.customizations: |
cert-manager.io/Certificate:
# Lua script for customizing the health status assessment
argoproj.io/Application:
health.lua: |
hs = {}
hs.status = "Progressing"
hs.message = ""
if obj.status ~= nil then
if obj.status.conditions ~= nil then
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Ready" and condition.status == "False" then
hs.status = "Degraded"
hs.message = condition.message
return hs
end
if condition.type == "Ready" and condition.status == "True" then
hs.status = "Healthy"
hs.message = condition.message
return hs
end
if obj.status.health ~= nil then
hs.status = obj.status.health.status
if obj.status.health.message ~= nil then
hs.message = obj.status.health.message
end
end
end
hs.status = "Progressing"
hs.message = "Waiting for certificate"
return hs
secret:
createSecret: false
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/' | base64 -w0`
# argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG"
# argocdServerAdminPassword: "ref+file://secrets.yaml#/test"
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
argocdServerAdminPassword: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.adminPassword
ssh:
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
params:
controller.status.processors: 8
controller.operation.processors: 4
controller.kubectl.parallelism.limit: 8
controller.resource.health.persist: "false"
controller.diff.server.side: "true"
controller.sync.timeout.seconds: 1800
@ -144,6 +124,7 @@ argo-cd:
name: kubeconfigs
# Allow vals to read internal secrets across all namespaces
# @ignored
clusterRoleRules:
enabled: true
rules:
@ -151,6 +132,7 @@ argo-cd:
resources: ["secrets"]
verbs: ["get", "watch", "list"]
# @ignored
initContainers:
- name: create-kubeconfig
image: '{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include "argo-cd.defaultTag" .) .Values.repoServer.image.tag }}'
@ -201,6 +183,16 @@ argo-cd:
gateway: istio-ingress/ingressgateway
ipBlocks: []
kubezero:
# -- deploy the KubeZero Project and GitSync Root App
bootstrap: false
# valid git+ssh repository url
repoUrl: ""
path: "/"
targetRevision: HEAD
sshPrivateKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.sshPrivateKey
argocd-image-updater:
enabled: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-ci
description: KubeZero umbrella chart for all things CI
type: application
version: 0.8.20
version: 0.8.21
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -18,19 +18,19 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: gitea
version: 10.6.0
version: 11.0.0
repository: https://dl.gitea.io/charts/
condition: gitea.enabled
- name: jenkins
version: 5.8.16
version: 5.8.18
repository: https://charts.jenkins.io
condition: jenkins.enabled
- name: trivy
version: 0.11.1
version: 0.12.0
repository: https://aquasecurity.github.io/helm-charts/
condition: trivy.enabled
- name: renovate
version: 39.180.2
version: 39.200.0
repository: https://docs.renovatebot.com/helm-charts
condition: renovate.enabled
kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-ci
![Version: 0.8.20](https://img.shields.io/badge/Version-0.8.20-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.8.21](https://img.shields.io/badge/Version-0.8.21-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI
@ -18,11 +18,11 @@ Kubernetes: `>= 1.25.0`
| Repository | Name | Version |
|------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.11.1 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.1.6 |
| https://charts.jenkins.io | jenkins | 5.8.16 |
| https://dl.gitea.io/charts/ | gitea | 10.6.0 |
| https://docs.renovatebot.com/helm-charts | renovate | 39.180.2 |
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.12.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://charts.jenkins.io | jenkins | 5.8.18 |
| https://dl.gitea.io/charts/ | gitea | 11.0.0 |
| https://docs.renovatebot.com/helm-charts | renovate | 39.200.0 |
# Jenkins
- default build retention 10 builds, 32days
@ -68,7 +68,8 @@ Kubernetes: `>= 1.25.0`
| gitea.gitea.metrics.enabled | bool | `false` | |
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | |
| gitea.image.rootless | bool | `true` | |
| gitea.image.tag | string | `"1.23.4"` | |
| gitea.image.tag | string | `"1.23.5"` | |
| gitea.istio.blockApi | bool | `false` | |
| gitea.istio.enabled | bool | `false` | |
| gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| gitea.istio.url | string | `"git.example.com"` | |
@ -83,6 +84,7 @@ Kubernetes: `>= 1.25.0`
| gitea.resources.requests.memory | string | `"320Mi"` | |
| gitea.securityContext.allowPrivilegeEscalation | bool | `false` | |
| gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | |
| gitea.service.http.port | int | `80` | |
| gitea.strategy.type | string | `"Recreate"` | |
| gitea.test.enabled | bool | `false` | |
| jenkins.agent.annotations."cluster-autoscaler.kubernetes.io/safe-to-evict" | string | `"false"` | |
@ -156,7 +158,7 @@ Kubernetes: `>= 1.25.0`
| jenkins.serviceAccountAgent.create | bool | `true` | |
| jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | |
| renovate.cronjob.concurrencyPolicy | string | `"Forbid"` | |
| renovate.cronjob.jobBackoffLimit | int | `3` | |
| renovate.cronjob.jobBackoffLimit | int | `2` | |
| renovate.cronjob.schedule | string | `"0 3 * * *"` | |
| renovate.cronjob.successfulJobsHistoryLimit | int | `1` | |
| renovate.enabled | bool | `false` | |

View File

@ -12,6 +12,14 @@ Use the following links to reference issues, PRs, and commits prior to v2.6.0.
The changelog until v1.5.7 was auto-generated based on git commits.
Those entries include a reference to the git commit to be able to get more details.
## 5.8.18
Update `jenkins/jenkins` to version `2.492.2-jdk17`
## 5.8.17
Update `kubernetes` to version `4314.v5b_846cf499eb_`
## 5.8.16
Update `docker.io/kiwigrid/k8s-sidecar` to version `1.30.1`

View File

@ -1,10 +1,10 @@
annotations:
artifacthub.io/category: integration-delivery
artifacthub.io/changes: |
- Update `docker.io/kiwigrid/k8s-sidecar` to version `1.30.1`
- Update `jenkins/jenkins` to version `2.492.2-jdk17`
artifacthub.io/images: |
- name: jenkins
image: docker.io/jenkins/jenkins:2.492.1-jdk17
image: docker.io/jenkins/jenkins:2.492.2-jdk17
- name: k8s-sidecar
image: docker.io/kiwigrid/k8s-sidecar:1.30.1
- name: inbound-agent
@ -18,7 +18,7 @@ annotations:
- name: support
url: https://github.com/jenkinsci/helm-charts/issues
apiVersion: v2
appVersion: 2.492.1
appVersion: 2.492.2
description: 'Jenkins - Build great things at any scale! As the leading open source
automation server, Jenkins provides over 2000 plugins to support building, deploying
and automating any project. '
@ -46,4 +46,4 @@ sources:
- https://github.com/maorfr/kube-tasks
- https://github.com/jenkinsci/configuration-as-code-plugin
type: application
version: 5.8.16
version: 5.8.18

View File

@ -165,7 +165,7 @@ The following tables list the configurable parameters of the Jenkins chart and t
| [controller.initializeOnce](./values.yaml#L424) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` |
| [controller.installLatestPlugins](./values.yaml#L413) | bool | Download the minimum required version or latest version of all dependencies | `true` |
| [controller.installLatestSpecifiedPlugins](./values.yaml#L416) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` |
| [controller.installPlugins](./values.yaml#L405) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4313.va_9b_4fe2a_0e34","workflow-aggregator:600.vb_57cdd26fdd7","git:5.7.0","configuration-as-code:1932.v75cb_b_f1b_698d"]` |
| [controller.installPlugins](./values.yaml#L405) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4314.v5b_846cf499eb_","workflow-aggregator:600.vb_57cdd26fdd7","git:5.7.0","configuration-as-code:1932.v75cb_b_f1b_698d"]` |
| [controller.javaOpts](./values.yaml#L162) | string | Append to `JAVA_OPTS` env var | `nil` |
| [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` |
| [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` |

View File

@ -403,7 +403,7 @@ controller:
# Plugins will be installed during Jenkins controller start
# -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false`
installPlugins:
- kubernetes:4313.va_9b_4fe2a_0e34
- kubernetes:4314.v5b_846cf499eb_
- workflow-aggregator:600.vb_57cdd26fdd7
- git:5.7.0
- configuration-as-code:1932.v75cb_b_f1b_698d

View File

@ -1,4 +1,5 @@
{{- if and .Values.gitea.enabled .Values.gitea.istio.enabled .Values.gitea.istio.ipBlocks }}
{{- if and .Values.gitea.enabled .Values.gitea.istio.enabled .Values.gitea.istio.ipBlocks .Values.gitea.istio.blockApi }}
# Limit access to /api
apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy
metadata:
@ -19,6 +20,7 @@ spec:
to:
- operation:
hosts: ["{{ .Values.gitea.istio.url }}"]
paths: [ "/api/*" ]
when:
- key: connection.sni
values:

View File

@ -12,16 +12,15 @@ spec:
hosts:
- {{ .Values.gitea.istio.url }}
http:
{{- if .Values.gitea.istio.authProvider }}
# https://github.com/go-gitea/gitea/issues/13606
- match:
- name: api
match:
- uri:
regex: ^/user/login.*
redirect:
uri: /user/oauth2/{{ .Values.gitea.istio.authProvider }}
redirectCode: 302
{{- end }}
- route:
prefix: /api/
route:
- destination:
host: gitea-http
- name: notApi
route:
- destination:
host: gitea-http
tcp:

View File

@ -2,7 +2,7 @@ gitea:
enabled: false
image:
tag: 1.23.4
tag: 1.23.5
rootless: true
repliaCount: 1
@ -87,6 +87,7 @@ gitea:
enabled: false
gateway: istio-ingress/private-ingressgateway
url: git.example.com
blockApi: false
jenkins:

View File

@ -41,6 +41,7 @@ Kubernetes: `>= 1.30.0-0`
| gateway.service.externalTrafficPolicy | string | `"Local"` | |
| gateway.service.type | string | `"NodePort"` | |
| gateway.terminationGracePeriodSeconds | int | `120` | |
| hardening.preserveExternalRequestId | bool | `false` | |
| hardening.rejectUnderscoresHeaders | bool | `true` | |
| hardening.unescapeSlashes | bool | `true` | |
| proxyProtocol | bool | `true` | |

View File

@ -32,6 +32,7 @@ spec:
use_remote_address: true
normalize_path: true
merge_slashes: true
preserve_external_request_id: {{ .Values.hardening.preserveExternalRequestId }}
{{- if .Values.hardening.unescapeSlashes }}
path_with_escaped_slashes_action: UNESCAPE_AND_REDIRECT
{{- end }}

View File

@ -43,3 +43,4 @@ proxyProtocol: true
hardening:
rejectUnderscoresHeaders: true
unescapeSlashes: true
preserveExternalRequestId: false

View File

@ -30,17 +30,7 @@ Kubernetes: `>= 1.30.0-0`
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| envoy-ratelimit.descriptors.ingress[0].key | string | `"remote_address"` | |
| envoy-ratelimit.descriptors.ingress[0].rate_limit.requests_per_unit | int | `10` | |
| envoy-ratelimit.descriptors.ingress[0].rate_limit.unit | string | `"second"` | |
| envoy-ratelimit.descriptors.privateIngress[0].key | string | `"remote_address"` | |
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.requests_per_unit | int | `10` | |
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.unit | string | `"second"` | |
| envoy-ratelimit.enabled | bool | `false` | |
| envoy-ratelimit.failureModeDeny | bool | `false` | |
| envoy-ratelimit.localCacheSize | int | `1048576` | |
| envoy-ratelimit.log.format | string | `"json"` | |
| envoy-ratelimit.log.level | string | `"warn"` | |
| global.defaultPodDisruptionBudget.enabled | bool | `false` | |
| global.logAsJson | bool | `true` | |
| global.variant | string | `"distroless"` | |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack
type: application
version: 0.8.13
version: 0.8.14
appVersion: 1.6.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png

View File

@ -1,6 +1,6 @@
# kubezero-logging
![Version: 0.8.13](https://img.shields.io/badge/Version-0.8.13-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
![Version: 0.8.14](https://img.shields.io/badge/Version-0.8.14-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack
@ -18,7 +18,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
@ -135,7 +135,7 @@ Kubernetes: `>= 1.26.0`
| kibana.istio.enabled | bool | `false` | |
| kibana.istio.gateway | string | `"istio-system/ingressgateway"` | |
| kibana.istio.url | string | `""` | |
| version | string | `"7.17.3"` | |
| version | string | `"7.17.7"` | |
## Resources:

View File

@ -2,7 +2,7 @@
# fullnameOverride: ""
# Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.17.3
version: 7.17.7
elastic_password: "" # super_secret_elastic_password

View File

@ -62,12 +62,8 @@ kube-prometheus-stack:
memory: 128Mi
admissionWebhooks:
patch:
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
certManager:
enabled: true
nodeExporter:
enabled: true

View File

@ -17,7 +17,7 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: mariadb-galera
version: 14.2.1
version: 14.2.2
repository: https://charts.bitnami.com/bitnami
condition: mariadb-galera.enabled
kubeVersion: ">= 1.26.0"

View File

@ -274,7 +274,7 @@ fluentd:
#- fluent-plugin-s3
source:
sharedKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey # "cloudbender"
sharedKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey?inCluster # "cloudbender"
output:
# Defaults to OpenSearch in same namespace

View File

@ -21,4 +21,8 @@
.idea/
*.tmproj
.vscode/
Chart.lock
README.md.gotmpl
dashboards.yaml
jsonnet
update.sh

View File

@ -1,6 +1,6 @@
# kubezero
![Version: 1.31.3](https://img.shields.io/badge/Version-1.31.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 1.31.6](https://img.shields.io/badge/Version-1.31.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero - Root App of Apps chart
@ -14,11 +14,11 @@ KubeZero - Root App of Apps chart
## Requirements
Kubernetes: `>= 1.26.0-0`
Kubernetes: `>= 1.31.0-0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts | kubezero-lib | >= 0.2.1 |
| https://cdn.zero-downtime.net/charts | kubezero-lib | 0.2.1 |
## Values
@ -32,16 +32,16 @@ Kubernetes: `>= 1.26.0-0`
| addons.external-dns.enabled | bool | `false` | |
| addons.forseti.enabled | bool | `false` | |
| addons.sealed-secrets.enabled | bool | `false` | |
| addons.targetRevision | string | `"0.8.11"` | |
| addons.targetRevision | string | `"0.8.13"` | |
| argo.argo-cd.enabled | bool | `false` | |
| argo.argo-cd.istio.enabled | bool | `false` | |
| argo.argocd-image-updater.enabled | bool | `false` | |
| argo.enabled | bool | `false` | |
| argo.namespace | string | `"argocd"` | |
| argo.targetRevision | string | `"0.2.6"` | |
| argo.targetRevision | string | `"0.3.1"` | |
| cert-manager.enabled | bool | `false` | |
| cert-manager.namespace | string | `"cert-manager"` | |
| cert-manager.targetRevision | string | `"0.9.10"` | |
| cert-manager.targetRevision | string | `"0.9.12"` | |
| falco.enabled | bool | `false` | |
| falco.k8saudit.enabled | bool | `false` | |
| falco.targetRevision | string | `"0.1.2"` | |
@ -54,35 +54,32 @@ Kubernetes: `>= 1.26.0-0`
| istio-ingress.enabled | bool | `false` | |
| istio-ingress.gateway.service | object | `{}` | |
| istio-ingress.namespace | string | `"istio-ingress"` | |
| istio-ingress.targetRevision | string | `"0.23.2"` | |
| istio-ingress.targetRevision | string | `"0.24.3"` | |
| istio-private-ingress.chart | string | `"kubezero-istio-gateway"` | |
| istio-private-ingress.enabled | bool | `false` | |
| istio-private-ingress.gateway.service | object | `{}` | |
| istio-private-ingress.namespace | string | `"istio-ingress"` | |
| istio-private-ingress.targetRevision | string | `"0.23.2"` | |
| istio-private-ingress.targetRevision | string | `"0.24.3"` | |
| istio.enabled | bool | `false` | |
| istio.namespace | string | `"istio-system"` | |
| istio.targetRevision | string | `"0.23.2"` | |
| kubezero.defaultTargetRevision | string | `"*"` | |
| kubezero.gitSync | object | `{}` | |
| kubezero.repoURL | string | `"https://cdn.zero-downtime.net/charts"` | |
| kubezero.server | string | `"https://kubernetes.default.svc"` | |
| istio.targetRevision | string | `"0.24.3"` | |
| logging.annotations."argocd.argoproj.io/compare-options" | string | `"ServerSideDiff=false"` | |
| logging.enabled | bool | `false` | |
| logging.namespace | string | `"logging"` | |
| logging.targetRevision | string | `"0.8.13"` | |
| logging.targetRevision | string | `"0.8.14"` | |
| metrics.enabled | bool | `false` | |
| metrics.istio.grafana | object | `{}` | |
| metrics.istio.prometheus | object | `{}` | |
| metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigs | list | `[]` | |
| metrics.namespace | string | `"monitoring"` | |
| metrics.targetRevision | string | `"0.10.2"` | |
| metrics.targetRevision | string | `"0.11.0"` | |
| network.cilium.cluster | object | `{}` | |
| network.enabled | bool | `true` | |
| network.retain | bool | `true` | |
| network.targetRevision | string | `"0.5.5"` | |
| network.targetRevision | string | `"0.5.7"` | |
| operators.enabled | bool | `false` | |
| operators.namespace | string | `"operators"` | |
| operators.targetRevision | string | `"0.1.6"` | |
| operators.targetRevision | string | `"0.2.0"` | |
| storage.aws-ebs-csi-driver.enabled | bool | `false` | |
| storage.aws-efs-csi-driver.enabled | bool | `false` | |
| storage.enabled | bool | `false` | |
@ -90,7 +87,7 @@ Kubernetes: `>= 1.26.0-0`
| storage.k8up.enabled | bool | `false` | |
| storage.lvm-localpv.enabled | bool | `false` | |
| storage.snapshotController.enabled | bool | `false` | |
| storage.targetRevision | string | `"0.8.9"` | |
| storage.targetRevision | string | `"0.8.10"` | |
| telemetry.enabled | bool | `false` | |
| telemetry.namespace | string | `"telemetry"` | |
| telemetry.targetRevision | string | `"0.4.1"` | |

View File

@ -1,41 +0,0 @@
kind: ApplicationSet
metadata:
name: kubezero
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
generators:
- git:
repoURL: {{ .Values.kubezero.applicationSet.repoURL }}
revision: {{ .Values.kubezero.applicationSet.revision }}
files:
{{- toYaml .Values.kubezero.applicationSet.files | nindent 6 }}
template:
metadata:
name: kubezero
spec:
project: kubezero
source:
repoURL: https://cdn.zero-downtime.net/charts
chart: kubezero
targetRevision: '{{ "{{" }} kubezero.version {{ "}}" }}'
helm:
parameters:
# We use this to detect if we are called from ArgoCD
- name: argocdAppName
value: $ARGOCD_APP_NAME
# This breaks the recursion, otherwise we install another kubezero project and app
# To be removed once we applicationSet is working and AppProject is moved back to ArgoCD chart
- name: installKubeZero
value: "false"
valueFiles:
- '{{ "{{" }} kubezero.valuesPath {{ "}}" }}/kubezero.yaml'
- '{{ "{{" }} kubezero.valuesPath {{ "}}" }}/values.yaml'
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true

View File

@ -0,0 +1,6 @@
# ensure we have a basic kubezero secret for cluster bootstrap and defaults
kubectl get secret kubezero-secrets -n kubezero && rc=$? || rc=$?
if [ $rc != 0 ]; then
kubectl create secret generic kubezero-secrets -n kubezero
fi

View File

@ -1,7 +0,0 @@
#!/bin/bash
ns=$(kubectl get ns -l argocd.argoproj.io/instance | grep -v NAME | awk '{print $1}')
for n in $ns; do
kubectl label --overwrite namespace $n 'argocd.argoproj.io/instance-'
done

View File

@ -1,25 +0,0 @@
#!/usr/bin/env bash
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
# Script to migrate an existing ECK 1.2.1 installation to Helm.
set -euo pipefail
RELEASE_NAMESPACE=${RELEASE_NAMESPACE:-"elastic-system"}
echo "Uninstalling ECK"
kubectl delete -n "${RELEASE_NAMESPACE}" \
serviceaccount/elastic-operator \
secret/elastic-webhook-server-cert \
clusterrole.rbac.authorization.k8s.io/elastic-operator \
clusterrole.rbac.authorization.k8s.io/elastic-operator-view \
clusterrole.rbac.authorization.k8s.io/elastic-operator-edit \
clusterrolebinding.rbac.authorization.k8s.io/elastic-operator \
rolebinding.rbac.authorization.k8s.io/elastic-operator \
service/elastic-webhook-server \
statefulset.apps/elastic-operator \
validatingwebhookconfiguration.admissionregistration.k8s.io/elastic-webhook.k8s.elastic.co

View File

@ -9,6 +9,10 @@ metadata:
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
{{- with ( index .Values $name "annotations" ) }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if not ( index .Values $name "retain" ) }}
finalizers:
- resources-finalizer.argocd.argoproj.io
@ -17,20 +21,17 @@ spec:
project: kubezero
source:
{{- if index .Values $name "chart" }}
chart: {{ index .Values $name "chart" }}
{{- else }}
chart: kubezero-{{ $name }}
{{- end }}
repoURL: {{ .Values.kubezero.repoURL }}
targetRevision: {{ default .Values.kubezero.targetRevision ( index .Values $name "targetRevision" ) | quote }}
chart: {{ default (print "kubezero-" $name) (index .Values $name "chart") }}
repoURL: {{ default "https://cdn.zero-downtime.net/charts" (index .Values $name "repository") }}
targetRevision: {{ default "HEAD" ( index .Values $name "targetRevision" ) | quote }}
helm:
skipTests: true
# add with 1.32
#skipTests: true
valuesObject:
{{- include (print $name "-values") $ | nindent 8 }}
destination:
server: {{ .Values.kubezero.server }}
server: "https://kubernetes.default.svc"
namespace: {{ default "kube-system" ( index .Values $name "namespace" ) }}
revisionHistoryLimit: 2

View File

@ -42,6 +42,8 @@ external-dns:
- "--aws-zone-type=public"
- "--aws-zones-cache-duration=1h"
env:
- name: AWS_REGION
value: {{ .Values.global.aws.region }}
- name: AWS_ROLE_ARN
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.externalDNS"
- name: AWS_WEB_IDENTITY_TOKEN_FILE

View File

@ -2,10 +2,22 @@
argo-cd:
enabled: {{ default "false" (index .Values "argo" "argo-cd" "enabled") }}
{{- with index .Values "argo" "argo-cd" "configs" }}
configs:
{{- with index .Values "argo" "argo-cd" "configs" }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
params:
{{- if not $.Values.global.highAvailable }}
# Reduce load on API server on single node control plane
controller.status.processors: 2
controller.operation.processors: 1
controller.kubectl.parallelism.limit: 1
{{- else }}
controller.status.processors: 8
controller.operation.processors: 4
controller.kubectl.parallelism.limit: 4
{{- end }}
controller:
metrics:
@ -27,42 +39,10 @@ argo-cd:
{{- end }}
{{- end }}
argocd-apps:
enabled: {{ default "false" (index .Values "argo" "argo-cd" "enabled") }}
projects:
kubezero:
namespace: argocd
description: KubeZero - ZeroDownTime Kubernetes Platform
sourceRepos:
- {{ .Values.kubezero.repoURL }}
{{- with .Values.kubezero.gitSync.repoURL }}
- {{ . }}
{{- end }}
destinations:
- namespace: '*'
server: https://kubernetes.default.svc
clusterResourceWhitelist:
- group: '*'
kind: '*'
applications:
kubezero-git-sync:
namespace: argocd
project: kubezero
source:
repoURL: {{ .Values.kubezero.gitSync.repoURL }}
targetRevision: {{ .Values.kubezero.gitSync.targetRevision }}
path: {{ .Values.kubezero.gitSync.path }}
directory:
recurse: true
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
{{- with index .Values "argo" "argo-cd" "kubezero" }}
kubezero:
{{- toYaml . | nindent 4 }}
{{- end }}
argocd-image-updater:
enabled: {{ default "false" (index .Values "argo" "argocd-image-updater" "enabled") }}

View File

@ -1,9 +1,3 @@
kubezero:
server: https://kubernetes.default.svc
repoURL: https://cdn.zero-downtime.net/charts
defaultTargetRevision: '*'
gitSync: {}
global:
clusterName: zdt-trial-cluster
@ -114,12 +108,14 @@ metrics:
logging:
enabled: false
namespace: logging
targetRevision: 0.8.13
targetRevision: 0.8.14
annotations:
argocd.argoproj.io/compare-options: ServerSideDiff=false
argo:
enabled: false
namespace: argocd
targetRevision: 0.2.8
targetRevision: 0.3.1
argo-cd:
enabled: false
istio:

11
docs/hooks.md Normal file
View File

@ -0,0 +1,11 @@
# KubeZero Helm hooks
## Abstract
Scripts within the `hooks.d` folder of each chart are executed at the respective times when the charts are applied via libhelm.
*These hooks do NOT work via ArgoCD*
## Flow
- hooks are execute as part of the libhelm tasks like `apply`
- are running with the current kubectl context
- executed at root working directory, eg. set a value for helm the scripts can edit the `./values.yaml` file.

View File

@ -3,6 +3,7 @@
## What's new - Major themes
- all KubeZero and support AMIs based on [Alpine 3.21](https://alpinelinux.org/posts/Alpine-3.21.0-released.html)
- network policies for ArgoCD
- Nvidia worker nodes are labeled with detected GPU product code
- Prometheus upgraded to V3, reducing CPU and memory requirements, see [upstream blog](https://prometheus.io/blog/2024/11/14/prometheus-3-0/)
## Features and fixes
@ -10,10 +11,10 @@
## Version upgrades
- cilium 1.16.6
- istio 1.24.2
- ArgoCD 2.14.3 [custom ZDT image](https://git.zero-downtime.net/ZeroDownTime/zdt-argocd)
- istio 1.24.3
- ArgoCD 2.14.5 [custom ZDT image](https://git.zero-downtime.net/ZeroDownTime/zdt-argocd)
- Prometheus 3.1.0 / Grafana 11.5.1
- Nvidia container toolkit 1.17, drivers 565.57.01, Cuda 12.7
- Nvidia container toolkit 1.17.4, drivers 570.86.15, Cuda 12.8
## Resources
- [Kubernetes v1.31 upstream release blog](https://kubernetes.io/blog/2024/08/13/kubernetes-v1-31-release/)

View File

@ -18,7 +18,7 @@ update_jsonnet() {
update_helm() {
#helm repo update
helm dep update
helm dep build
}
# AWS public ECR