Compare commits

..

16 Commits

Author SHA1 Message Date
85c67ad9dc chore(deps): update helm release rabbitmq to v15 2025-03-20 03:02:27 +00:00
63f96e58ba fix: ensure root app is re-created 2025-03-19 12:39:06 +01:00
ab744494e6 fix: apply kubezero module first, fix hooks 2025-03-18 16:18:20 +00:00
af29836a27 feat: new custom helm hooks 2025-03-18 14:47:55 +00:00
30bc95408a feat: improved ArgoCD bootstrap, tool cleanups 2025-03-17 20:30:34 +00:00
545a7fd8b1 feat: latest CI tools, improved Gitea API endpoint protection 2025-03-13 21:02:53 +00:00
56a2926917 Merge pull request 'chore(deps): update helm release gitea to v11' (#59) from renovate/kubezero-ci-major-kubezero-ci-dependencies into main
Reviewed-on: #59
2025-03-13 12:41:06 +00:00
b8114bd053 chore(deps): update helm release gitea to v11 2025-03-13 12:41:06 +00:00
53f940a54c Merge pull request 'chore(deps): update kubezero-ci-dependencies' (#57) from renovate/kubezero-ci-kubezero-ci-dependencies into main
Reviewed-on: #57
2025-03-13 12:40:57 +00:00
58780f1e0e chore(deps): update kubezero-ci-dependencies 2025-03-13 03:01:47 +00:00
4c10271ec6 Merge pull request 'chore(deps): update helm release argo-cd to v7.8.9' (#54) from renovate/kubezero-argo-kubezero-argo-dependencies into main
Reviewed-on: #54
2025-03-11 18:17:08 +00:00
5246f57329 chore(deps): update helm release argo-cd to v7.8.9 2025-03-11 18:17:08 +00:00
5bc6e6e435 fix: reduce load on api-server on single node control planes, more argo related fixes 2025-03-11 16:37:27 +00:00
cbcaec807a fix: replace apps during 1.31 2025-03-11 14:07:40 +01:00
bfafccaf32 feat: tooling tweaks, Istio ingress option to preserver external request Ids 2025-03-10 17:49:24 +00:00
3304363986 Fix: fix for minimal ES version in logging, clustered control plane upgrade fix, tooling cleanup 2025-03-04 11:47:19 +00:00
41 changed files with 340 additions and 304 deletions

View File

@ -4,10 +4,10 @@
set -x set -x
ARTIFACTS=($(echo $1 | tr "," "\n")) ARTIFACTS=($(echo $1 | tr "," "\n"))
ACTION=$2 ACTION="${2:-apply}"
ARGOCD="${3:-true}"
LOCAL_DEV=1 LOCAL_DEV=1
ARGOCD="False"
#VERSION="latest" #VERSION="latest"
KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)" KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"
@ -36,46 +36,6 @@ parse_version() {
KUBE_VERSION=$(parse_version $KUBE_VERSION) KUBE_VERSION=$(parse_version $KUBE_VERSION)
### Various hooks for modules
################
# cert-manager #
################
function cert-manager-post() {
# If any error occurs, wait for initial webhook deployment and try again
# see: https://cert-manager.io/docs/concepts/webhook/#webhook-connection-problems-shortly-after-cert-manager-installation
if [ $rc -ne 0 ]; then
wait_for "kubectl get deployment -n $namespace cert-manager-webhook"
kubectl rollout status deployment -n $namespace cert-manager-webhook
wait_for 'kubectl get validatingwebhookconfigurations -o yaml | grep "caBundle: LS0"'
fi
wait_for "kubectl get ClusterIssuer -n $namespace kubezero-local-ca-issuer"
kubectl wait --timeout=180s --for=condition=Ready -n $namespace ClusterIssuer/kubezero-local-ca-issuer
}
###########
# ArgoCD #
###########
function argocd-pre() {
kubectl delete job argo-argocd-redis-secret-init -n argocd || true
for f in $CLUSTER/secrets/argocd-*.yaml; do
kubectl apply -f $f
done
}
###########
# Metrics #
###########
# Cleanup patch jobs from previous runs , ArgoCD does this automatically
function metrics-pre() {
kubectl delete jobs --field-selector status.successful=1 -n monitoring
}
### Main ### Main
get_kubezero_values $ARGOCD get_kubezero_values $ARGOCD
@ -85,7 +45,7 @@ helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $
# Root KubeZero apply directly and exit # Root KubeZero apply directly and exit
if [ ${ARTIFACTS[0]} == "kubezero" ]; then if [ ${ARTIFACTS[0]} == "kubezero" ]; then
kubectl replace -f $WORKDIR/kubezero/templates kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
exit $? exit $?
# "catch all" apply all enabled modules # "catch all" apply all enabled modules
@ -100,12 +60,12 @@ if [ "$ACTION" == "delete" ]; then
_helm delete ${ARTIFACTS[idx]} || true _helm delete ${ARTIFACTS[idx]} || true
done done
else else
if [ "$ACTION" == "" -o "$ACTION" == "crds" ]; then if [ "$ACTION" == "apply" -o "$ACTION" == "crds" ]; then
for t in ${ARTIFACTS[@]}; do for t in ${ARTIFACTS[@]}; do
_helm crds $t || true _helm crds $t || true
done done
fi fi
for t in ${ARTIFACTS[@]}; do for t in ${ARTIFACTS[@]}; do
_helm apply $t || true _helm $ACTION $t || true
done done
fi fi

View File

@ -14,7 +14,9 @@ pre_control_plane_upgrade_cluster() {
# All things after the first controller / control plane upgrade # All things after the first controller / control plane upgrade
post_control_plane_upgrade_cluster() { post_control_plane_upgrade_cluster() {
echo # delete previous root app controlled by kubezero module
kubectl delete application kubezero-git-sync -n argocd || true
kubectl delete appproject kubezero -n argocd || true
} }

View File

@ -104,9 +104,9 @@ pre_kubeadm() {
# Shared steps after calling kubeadm # Shared steps after calling kubeadm
post_kubeadm() { post_kubeadm() {
# KubeZero resources # KubeZero resources - will never be applied by ArgoCD
for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do
kubectl apply -f $f $LOG kubectl apply -f $f --server-side --force-conflicts $LOG
done done
} }
@ -115,9 +115,13 @@ post_kubeadm() {
control_plane_upgrade() { control_plane_upgrade() {
CMD=$1 CMD=$1
ARGOCD=$(argo_used)
render_kubeadm upgrade render_kubeadm upgrade
if [[ "$CMD" =~ ^(cluster)$ ]]; then if [[ "$CMD" =~ ^(cluster)$ ]]; then
pre_control_plane_upgrade_cluster
# get current values, argo app over cm # get current values, argo app over cm
get_kubezero_values $ARGOCD get_kubezero_values $ARGOCD
@ -127,13 +131,13 @@ control_plane_upgrade() {
update_kubezero_cm update_kubezero_cm
if [ "$ARGOCD" == "True" ]; then if [ "$ARGOCD" == "true" ]; then
# update argo app # update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml) export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \ kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \ yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml > $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi fi
pre_kubeadm pre_kubeadm
@ -147,13 +151,19 @@ control_plane_upgrade() {
# install re-certed kubectl config for root # install re-certed kubectl config for root
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
post_control_plane_upgrade_cluster
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm." echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
elif [[ "$CMD" =~ ^(final)$ ]]; then elif [[ "$CMD" =~ ^(final)$ ]]; then
pre_cluster_upgrade_final
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase # Finally upgrade addons last, with 1.32 we can ONLY call addon phase
#_kubeadm upgrade apply phase addon all $KUBE_VERSION #_kubeadm upgrade apply phase addon all $KUBE_VERSION
_kubeadm upgrade apply $KUBE_VERSION _kubeadm upgrade apply $KUBE_VERSION
post_cluster_upgrade_final
echo "Upgraded kubeadm addons." echo "Upgraded kubeadm addons."
fi fi
@ -318,7 +328,14 @@ apply_module() {
done done
for t in $MODULES; do for t in $MODULES; do
_helm apply $t # apply/replace app of apps directly
if [ $t == "kubezero" ]; then
kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
else
#_helm apply $t
# During 1.31 we change the ArgoCD tracking so replace
_helm replace $t
fi
done done
echo "Applied KubeZero modules: $MODULES" echo "Applied KubeZero modules: $MODULES"
@ -394,17 +411,10 @@ for t in $@; do
join) control_plane_node join;; join) control_plane_node join;;
restore) control_plane_node restore;; restore) control_plane_node restore;;
kubeadm_upgrade) kubeadm_upgrade)
ARGOCD=$(argo_used)
# call hooks
pre_control_plane_upgrade_cluster
control_plane_upgrade cluster control_plane_upgrade cluster
post_control_plane_upgrade_cluster
;; ;;
finalize_cluster_upgrade) finalize_cluster_upgrade)
ARGOCD=$(argo_used)
pre_cluster_upgrade_final
control_plane_upgrade final control_plane_upgrade final
post_cluster_upgrade_final
;; ;;
apply_*) apply_*)
ARGOCD=$(argo_used) ARGOCD=$(argo_used)

View File

@ -2,11 +2,10 @@
# Simulate well-known CRDs being available # Simulate well-known CRDs being available
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1" API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
LOCAL_DEV=${LOCAL_DEV:-""}
export HELM_SECRETS_BACKEND="vals" export HELM_SECRETS_BACKEND="vals"
LOCAL_DEV=${LOCAL_DEV:-""}
# Waits for max 300s and retries # Waits for max 300s and retries
function wait_for() { function wait_for() {
local TRIES=0 local TRIES=0
@ -30,15 +29,41 @@ function chart_location() {
function argo_used() { function argo_used() {
kubectl get application kubezero -n argocd >/dev/null \ kubectl get application kubezero -n argocd >/dev/null \
&& echo "True" || echo "False" && echo "true" || echo "false"
}
function field_manager() {
local argo=${1:-"false"}
if [ "$argo" == "true" ]; then
echo "--field-manager argo-controller"
else
echo ""
fi
}
function get_kubezero_secret() {
export _key="$1"
kubectl get secrets -n kubezero kubezero-secrets -o yaml | yq '.data.[env(_key)]' | base64 -d -w0
}
function set_kubezero_secret() {
local key="$1"
local val="$2"
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"$(echo -n $val |base64 -w0)\" }}"
} }
# get kubezero-values from ArgoCD if available or use in-cluster CM # get kubezero-values from ArgoCD if available or use in-cluster CM
function get_kubezero_values() { function get_kubezero_values() {
local argo=${1:-"False"} local argo=${1:-"false"}
if [ "$argo" == "True" ]; then if [ "$argo" == "true" ]; then
kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml
else else
kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml
@ -55,7 +80,7 @@ function update_kubezero_cm() {
# sync kubezero-values CM from ArgoCD app # sync kubezero-values CM from ArgoCD app
function sync_kubezero_cm_from_argo() { function sync_kubezero_cm_from_argo() {
get_kubezero_values True get_kubezero_values true
update_kubezero_cm update_kubezero_cm
} }
@ -96,25 +121,12 @@ function waitSystemPodsRunning() {
done done
} }
function argo_app_synced() {
APP=$1
# Ensure we are synced otherwise bail out
status=$(kubectl get application $APP -n argocd -o yaml | yq .status.sync.status)
if [ "$status" != "Synced" ]; then
echo "ArgoCD Application $APP not 'Synced'!"
return 1
fi
return 0
}
# make sure namespace exists prior to calling helm as the create-namespace options doesn't work # make sure namespace exists prior to calling helm as the create-namespace options doesn't work
function create_ns() { function create_ns() {
local namespace=$1 local namespace=$1
if [ "$namespace" != "kube-system" ]; then if [ "$namespace" != "kube-system" ]; then
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace kubectl get ns $namespace > /dev/null || kubectl create ns $namespace $(field_manager $ARGOCD)
fi fi
} }
@ -127,7 +139,7 @@ function delete_ns() {
# Extract crds via helm calls # Extract crds via helm calls
function _crds() { function crds() {
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c ' helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c '
#!/usr/bin/python3 #!/usr/bin/python3
import yaml import yaml
@ -144,7 +156,7 @@ for manifest in yaml.safe_load_all(sys.stdin):
# Only apply if there are actually any crds # Only apply if there are actually any crds
if [ -s $WORKDIR/crds.yaml ]; then if [ -s $WORKDIR/crds.yaml ]; then
[ -n "$DEBUG" ] && cat $WORKDIR/crds.yaml [ -n "$DEBUG" ] && cat $WORKDIR/crds.yaml
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts $(field_manager $ARGOCD)
fi fi
} }
@ -189,11 +201,22 @@ function _helm() {
yq eval '.spec.source.helm.valuesObject' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml yq eval '.spec.source.helm.valuesObject' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
if [ $action == "crds" ]; then # extract remote chart or copy local to access hooks
# Allow custom CRD handling rm -rf $WORKDIR/$chart $WORKDIR/${chart}*.tgz
declare -F ${module}-crds && ${module}-crds || _crds
elif [ $action == "apply" ]; then if [ -z "$LOCAL_DEV" ]; then
helm pull $(chart_location $chart) --untar -d $WORKDIR
else
cp -r $(chart_location $chart) $WORKDIR
fi
if [ $action == "crds" ]; then
# Pre-crd hook
[ -f $WORKDIR/$chart/hooks.d/pre-crds.sh ] && (cd $WORKDIR; bash ./$chart/hooks.d/pre-crds.sh)
crds
elif [ $action == "apply" -o $action == "replace" ]; then
echo "using values to $action of module $module: " echo "using values to $action of module $module: "
cat $WORKDIR/values.yaml cat $WORKDIR/values.yaml
@ -201,13 +224,16 @@ function _helm() {
create_ns $namespace create_ns $namespace
# Optional pre hook # Optional pre hook
declare -F ${module}-pre && ${module}-pre [ -f $WORKDIR/$chart/hooks.d/pre-install.sh ] && (cd $WORKDIR; bash ./$chart/hooks.d/pre-install.sh)
render render
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$? [ $action == "replace" ] && kubectl replace -f $WORKDIR/helm.yaml $(field_manager $ARGOCD) && rc=$? || rc=$?
# If replace failed try apply at least
[ $action == "apply" -o $rc -ne 0 ] && kubectl apply -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$?
# Optional post hook # Optional post hook
declare -F ${module}-post && ${module}-post [ -f $WORKDIR/$chart/hooks.d/post-install.sh ] && (cd $WORKDIR; bash ./$chart/hooks.d/post-install.sh)
elif [ $action == "delete" ]; then elif [ $action == "delete" ]; then
render render

View File

@ -8,12 +8,20 @@ import yaml
def migrate(values): def migrate(values):
"""Actual changes here""" """Actual changes here"""
# remove syncOptions from root app # migrate kubezero root app of apps to Argo chart
try:
if values["kubezero"]:
try: try:
if values["kubezero"]["syncPolicy"]:
values["kubezero"].pop("syncPolicy") values["kubezero"].pop("syncPolicy")
except KeyError: except KeyError:
pass pass
values["kubezero"]["gitSync"]["repoUrl"] = values["kubezero"]["gitSync"].pop("repoURL")
values["argo"]["argo-cd"]["kubezero"] = values["kubezero"]["gitSync"]
values.pop("kubezero")
except KeyError:
pass
return values return values

View File

@ -17,7 +17,7 @@ ARGOCD=$(argo_used)
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
#waitSystemPodsRunning #waitSystemPodsRunning
[ "$ARGOCD" == "True" ] && disable_argo [ "$ARGOCD" == "true" ] && disable_argo
# Check if we already have all controllers on the current version # Check if we already have all controllers on the current version
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true) OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
@ -35,10 +35,10 @@ read -r
#echo "Adjust kubezero values as needed:" #echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015 # shellcheck disable=SC2015
#[ "$ARGOCD" == "True" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero #[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
# upgrade modules # upgrade modules
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators" control_plane_upgrade "apply_kubezero apply_network, apply_addons, apply_storage, apply_operators"
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning waitSystemPodsRunning
@ -63,4 +63,4 @@ echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to
echo "<Return> to continue and re-enable ArgoCD:" echo "<Return> to continue and re-enable ArgoCD:"
read -r read -r
[ "$ARGOCD" == "True" ] && enable_argo [ "$ARGOCD" == "true" ] && enable_argo

View File

@ -17,22 +17,36 @@ failureModeDeny: false
# - slow: 1 req/s over a minute per sourceIP # - slow: 1 req/s over a minute per sourceIP
descriptors: descriptors:
ingress: ingress:
- key: speed - key: sourceIp
value: slow value: sixtyPerMinute
descriptors: descriptors:
- key: remote_address - key: remote_address
rate_limit: rate_limit:
unit: minute unit: minute
requests_per_unit: 60 requests_per_unit: 60
- key: sourceIp
value: tenPerSecond
descriptors:
- key: remote_address
rate_limit:
unit: second
requests_per_unit: 10
privateIngress: privateIngress:
- key: speed - key: sourceIp
value: slow value: sixtyPerMinute
descriptors: descriptors:
- key: remote_address - key: remote_address
rate_limit: rate_limit:
unit: minute unit: minute
requests_per_unit: 60 requests_per_unit: 60
- key: sourceIp
value: tenPerSecond
descriptors:
- key: remote_address
rate_limit:
unit: second
requests_per_unit: 10
metrics: metrics:
enabled: false enabled: false

6
charts/kubeadm/TODO Normal file
View File

@ -0,0 +1,6 @@
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_256_GCM_SHA384"}
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_128_GCM_SHA256"}
"RuntimeConfig from runtime service failed","err":"rpc error: code = Unimplemented desc = unknown method RuntimeConfig for service runtime.v1.RuntimeService"}
"CRI implementation should be updated to support RuntimeConfig when KubeletCgroupDriverFromCRI feature gate has been enabled. Falling back to using cgroupDriver from kubelet config."

View File

@ -2,8 +2,8 @@ apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration kind: ClusterConfiguration
kubernetesVersion: {{ .Chart.Version }} kubernetesVersion: {{ .Chart.Version }}
clusterName: {{ .Values.global.clusterName }} clusterName: {{ .Values.global.clusterName }}
#featureGates: featureGates:
# NonGracefulFailover: true ControlPlaneKubeletLocalMode: true
controlPlaneEndpoint: {{ .Values.api.endpoint }} controlPlaneEndpoint: {{ .Values.api.endpoint }}
networking: networking:
podSubnet: 10.244.0.0/16 podSubnet: 10.244.0.0/16

View File

@ -3,7 +3,7 @@
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}} {{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}} {{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
{{- define "kubeadm.featuregates" }} {{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "AuthorizeWithSelectors" "AuthorizeNodeWithSelectors" "ConsistentListFromCache" "VolumeAttributesClass" "WatchList"}} {{- $gates := list "CustomCPUCFSQuotaPeriod" "AuthorizeWithSelectors" "AuthorizeNodeWithSelectors" "ConsistentListFromCache" "VolumeAttributesClass" "WatchList" }}
{{- if eq .return "csv" }} {{- if eq .return "csv" }}
{{- range $key := $gates }} {{- range $key := $gates }}
{{- $key }}=true, {{- $key }}=true,

View File

@ -0,0 +1,27 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
README.md.gotmpl
dashboards.yaml
jsonnet

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo name: kubezero-argo
version: 0.2.8 version: 0.3.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -22,15 +22,11 @@ dependencies:
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled condition: argo-events.enabled
- name: argo-cd - name: argo-cd
version: 7.8.2 version: 7.8.9
repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled
- name: argocd-apps
version: 2.0.2
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled condition: argo-cd.enabled
- name: argocd-image-updater - name: argocd-image-updater
version: 0.12.0 version: 0.12.0
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argocd-image-updater.enabled condition: argocd-image-updater.enabled
kubeVersion: ">= 1.26.0-0" kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-argo # kubezero-argo
![Version: 0.2.8](https://img.shields.io/badge/Version-0.2.8-informational?style=flat-square) ![Version: 0.3.0](https://img.shields.io/badge/Version-0.3.0-informational?style=flat-square)
KubeZero Argo - Events, Workflow, CD KubeZero Argo - Events, Workflow, CD
@ -14,15 +14,14 @@ KubeZero Argo - Events, Workflow, CD
## Requirements ## Requirements
Kubernetes: `>= 1.26.0-0` Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 7.8.2 | | https://argoproj.github.io/argo-helm | argo-cd | 7.8.9 |
| https://argoproj.github.io/argo-helm | argo-events | 2.4.13 | | https://argoproj.github.io/argo-helm | argo-events | 2.4.13 |
| https://argoproj.github.io/argo-helm | argocd-apps | 2.0.2 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.0 | | https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
## Values ## Values
@ -30,7 +29,7 @@ Kubernetes: `>= 1.26.0-0`
|-----|------|---------|-------------| |-----|------|---------|-------------|
| argo-cd.configs.cm."application.instanceLabelKey" | string | `nil` | | | argo-cd.configs.cm."application.instanceLabelKey" | string | `nil` | |
| argo-cd.configs.cm."application.resourceTrackingMethod" | string | `"annotation"` | | | argo-cd.configs.cm."application.resourceTrackingMethod" | string | `"annotation"` | |
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | | | argo-cd.configs.cm."resource.customizations" | string | `"argoproj.io/Application:\n health.lua: |\n hs = {}\n hs.status = \"Progressing\"\n hs.message = \"\"\n if obj.status ~= nil then\n if obj.status.health ~= nil then\n hs.status = obj.status.health.status\n if obj.status.health.message ~= nil then\n hs.message = obj.status.health.message\n end\n end\n end\n return hs\n"` | |
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | | | argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.31 - Release notes"` | | | argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.31 - Release notes"` | |
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | | | argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
@ -39,8 +38,8 @@ Kubernetes: `>= 1.26.0-0`
| argo-cd.configs.cm.installationID | string | `"KubeZero-ArgoCD"` | | | argo-cd.configs.cm.installationID | string | `"KubeZero-ArgoCD"` | |
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | | | argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
| argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | | | argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | |
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | | | argo-cd.configs.params."controller.resource.health.persist" | string | `"false"` | |
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | | | argo-cd.configs.params."controller.sync.timeout.seconds" | int | `1800` | |
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | | | argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
| argo-cd.configs.params."server.insecure" | bool | `true` | | | argo-cd.configs.params."server.insecure" | bool | `true` | |
| argo-cd.configs.secret.createSecret | bool | `false` | | | argo-cd.configs.secret.createSecret | bool | `false` | |
@ -54,31 +53,24 @@ Kubernetes: `>= 1.26.0-0`
| argo-cd.dex.enabled | bool | `false` | | | argo-cd.dex.enabled | bool | `false` | |
| argo-cd.enabled | bool | `false` | | | argo-cd.enabled | bool | `false` | |
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | | | argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
| argo-cd.global.image.tag | string | `"v2.13.1"` | | | argo-cd.global.image.tag | string | `"v2.14.5"` | |
| argo-cd.global.logging.format | string | `"json"` | | | argo-cd.global.logging.format | string | `"json"` | |
| argo-cd.global.networkPolicy.create | bool | `true` | | | argo-cd.global.networkPolicy.create | bool | `true` | |
| argo-cd.istio.enabled | bool | `false` | | | argo-cd.istio.enabled | bool | `false` | |
| argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | | | argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | |
| argo-cd.istio.ipBlocks | list | `[]` | | | argo-cd.istio.ipBlocks | list | `[]` | |
| argo-cd.kubezero.bootstrap | bool | `false` | |
| argo-cd.kubezero.path | string | `"/"` | |
| argo-cd.kubezero.repoUrl | string | `"https://git.my.org/thiscluster"` | |
| argo-cd.kubezero.targetRevision | string | `"HEAD"` | |
| argo-cd.notifications.enabled | bool | `false` | | | argo-cd.notifications.enabled | bool | `false` | |
| argo-cd.redisSecretInit.enabled | bool | `false` | |
| argo-cd.repoServer.clusterRoleRules.enabled | bool | `true` | | | argo-cd.repoServer.clusterRoleRules.enabled | bool | `true` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].apiGroups[0] | string | `""` | | | argo-cd.repoServer.clusterRoleRules.rules[0].apiGroups[0] | string | `""` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].resources[0] | string | `"secrets"` | | | argo-cd.repoServer.clusterRoleRules.rules[0].resources[0] | string | `"secrets"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[0] | string | `"get"` | | | argo-cd.repoServer.clusterRoleRules.rules[0].verbs[0] | string | `"get"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[1] | string | `"watch"` | | | argo-cd.repoServer.clusterRoleRules.rules[0].verbs[1] | string | `"watch"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[2] | string | `"list"` | | | argo-cd.repoServer.clusterRoleRules.rules[0].verbs[2] | string | `"list"` | |
| argo-cd.repoServer.initContainers[0].command[0] | string | `"/usr/local/bin/sa2kubeconfig.sh"` | |
| argo-cd.repoServer.initContainers[0].command[1] | string | `"/home/argocd/.kube/config"` | |
| argo-cd.repoServer.initContainers[0].image | string | `"{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include \"argo-cd.defaultTag\" .) .Values.repoServer.image.tag }}"` | |
| argo-cd.repoServer.initContainers[0].imagePullPolicy | string | `"{{ default .Values.global.image.imagePullPolicy .Values.repoServer.image.imagePullPolicy }}"` | |
| argo-cd.repoServer.initContainers[0].name | string | `"create-kubeconfig"` | |
| argo-cd.repoServer.initContainers[0].securityContext.allowPrivilegeEscalation | bool | `false` | |
| argo-cd.repoServer.initContainers[0].securityContext.capabilities.drop[0] | string | `"ALL"` | |
| argo-cd.repoServer.initContainers[0].securityContext.readOnlyRootFilesystem | bool | `true` | |
| argo-cd.repoServer.initContainers[0].securityContext.runAsNonRoot | bool | `true` | |
| argo-cd.repoServer.initContainers[0].securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
| argo-cd.repoServer.initContainers[0].volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | |
| argo-cd.repoServer.initContainers[0].volumeMounts[0].name | string | `"kubeconfigs"` | |
| argo-cd.repoServer.metrics.enabled | bool | `false` | | | argo-cd.repoServer.metrics.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | | | argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.repoServer.volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | | | argo-cd.repoServer.volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | |
@ -101,9 +93,6 @@ Kubernetes: `>= 1.26.0-0`
| argo-events.configs.jetstream.versions[0].startCommand | string | `"/nats-server"` | | | argo-events.configs.jetstream.versions[0].startCommand | string | `"/nats-server"` | |
| argo-events.configs.jetstream.versions[0].version | string | `"2.10.11"` | | | argo-events.configs.jetstream.versions[0].version | string | `"2.10.11"` | |
| argo-events.enabled | bool | `false` | | | argo-events.enabled | bool | `false` | |
| argocd-apps.applications | object | `{}` | |
| argocd-apps.enabled | bool | `false` | |
| argocd-apps.projects | object | `{}` | |
| argocd-image-updater.authScripts.enabled | bool | `true` | | | argocd-image-updater.authScripts.enabled | bool | `true` | |
| argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | | | argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | |
| argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | | | argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | |

View File

@ -0,0 +1,6 @@
#!/bin/sh
# Bootstrap kubezero-git-sync app if it doenst exist
kubectl get application kubezero-git-sync -n argocd && rc=$? || rc=$?
[ $rc != 0 ] && yq -i '.argo-cd.kubezero.bootstrap=true' values.yaml

View File

@ -0,0 +1,28 @@
{{- if index .Values "argo-cd" "kubezero" "bootstrap" }}
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubezero-git-sync
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "-20"
spec:
destination:
namespace: argocd
server: https://kubernetes.default.svc
project: kubezero
source:
{{- with index .Values "argo-cd" "kubezero" }}
repoURL: {{ .repoUrl }}
targetRevision: {{ .targetRevision }}
path: {{ .path }}
{{- end }}
directory:
recurse: true
syncPolicy:
automated:
prune: true
syncOptions:
- ServerSideApply=true
- ApplyOutOfSyncOnly=true
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if index .Values "argo-cd" "kubezero" "bootstrap" }}
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: kubezero
namespace: argocd
spec:
clusterResourceWhitelist:
- group: '*'
kind: '*'
description: KubeZero - ZeroDownTime Kubernetes Platform
destinations:
- namespace: '*'
server: https://kubernetes.default.svc
sourceRepos:
- https://cdn.zero-downtime.net/charts
- {{ index .Values "argo-cd" "kubezero" "repoUrl" }}
{{- end }}

View File

@ -30,13 +30,6 @@ argo-events:
configReloaderImage: natsio/nats-server-config-reloader:0.14.1 configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server startCommand: /nats-server
argocd-apps:
enabled: false
projects: {}
applications: {}
argo-cd: argo-cd:
enabled: false enabled: false
@ -45,7 +38,7 @@ argo-cd:
format: json format: json
image: image:
repository: public.ecr.aws/zero-downtime/zdt-argocd repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v2.14.2 tag: v2.14.5
networkPolicy: networkPolicy:
create: true create: true
@ -71,28 +64,19 @@ argo-cd:
application.instanceLabelKey: Null application.instanceLabelKey: Null
resource.customizations: | resource.customizations: |
cert-manager.io/Certificate: argoproj.io/Application:
# Lua script for customizing the health status assessment
health.lua: | health.lua: |
hs = {} hs = {}
if obj.status ~= nil then
if obj.status.conditions ~= nil then
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Ready" and condition.status == "False" then
hs.status = "Degraded"
hs.message = condition.message
return hs
end
if condition.type == "Ready" and condition.status == "True" then
hs.status = "Healthy"
hs.message = condition.message
return hs
end
end
end
end
hs.status = "Progressing" hs.status = "Progressing"
hs.message = "Waiting for certificate" hs.message = ""
if obj.status ~= nil then
if obj.status.health ~= nil then
hs.status = obj.status.health.status
if obj.status.health.message ~= nil then
hs.message = obj.status.health.message
end
end
end
return hs return hs
secret: secret:
@ -106,9 +90,6 @@ argo-cd:
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ==" extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
params: params:
controller.status.processors: 8
controller.operation.processors: 4
controller.kubectl.parallelism.limit: 8
controller.resource.health.persist: "false" controller.resource.health.persist: "false"
controller.diff.server.side: "true" controller.diff.server.side: "true"
controller.sync.timeout.seconds: 1800 controller.sync.timeout.seconds: 1800
@ -151,26 +132,6 @@ argo-cd:
resources: ["secrets"] resources: ["secrets"]
verbs: ["get", "watch", "list"] verbs: ["get", "watch", "list"]
initContainers:
- name: create-kubeconfig
image: '{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include "argo-cd.defaultTag" .) .Values.repoServer.image.tag }}'
imagePullPolicy: '{{ default .Values.global.image.imagePullPolicy .Values.repoServer.image.imagePullPolicy }}'
command:
- /usr/local/bin/sa2kubeconfig.sh
- /home/argocd/.kube/config
volumeMounts:
- mountPath: /home/argocd/.kube
name: kubeconfigs
securityContext:
runAsNonRoot: true
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
server: server:
# Rename former https port to grpc, works with istio + insecure # Rename former https port to grpc, works with istio + insecure
service: service:
@ -201,6 +162,14 @@ argo-cd:
gateway: istio-ingress/ingressgateway gateway: istio-ingress/ingressgateway
ipBlocks: [] ipBlocks: []
kubezero:
# only set this once initially to prevent the circular dependency
bootstrap: false
repoUrl: "https://git.my.org/thiscluster"
path: "/"
targetRevision: HEAD
argocd-image-updater: argocd-image-updater:
enabled: false enabled: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-ci name: kubezero-ci
description: KubeZero umbrella chart for all things CI description: KubeZero umbrella chart for all things CI
type: application type: application
version: 0.8.20 version: 0.8.21
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -18,19 +18,19 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: gitea - name: gitea
version: 10.6.0 version: 11.0.0
repository: https://dl.gitea.io/charts/ repository: https://dl.gitea.io/charts/
condition: gitea.enabled condition: gitea.enabled
- name: jenkins - name: jenkins
version: 5.8.16 version: 5.8.18
repository: https://charts.jenkins.io repository: https://charts.jenkins.io
condition: jenkins.enabled condition: jenkins.enabled
- name: trivy - name: trivy
version: 0.11.1 version: 0.12.0
repository: https://aquasecurity.github.io/helm-charts/ repository: https://aquasecurity.github.io/helm-charts/
condition: trivy.enabled condition: trivy.enabled
- name: renovate - name: renovate
version: 39.180.2 version: 39.200.0
repository: https://docs.renovatebot.com/helm-charts repository: https://docs.renovatebot.com/helm-charts
condition: renovate.enabled condition: renovate.enabled
kubeVersion: ">= 1.25.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-ci # kubezero-ci
![Version: 0.8.20](https://img.shields.io/badge/Version-0.8.20-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.8.21](https://img.shields.io/badge/Version-0.8.21-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI KubeZero umbrella chart for all things CI
@ -18,11 +18,11 @@ Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.11.1 | | https://aquasecurity.github.io/helm-charts/ | trivy | 0.12.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://charts.jenkins.io | jenkins | 5.8.16 | | https://charts.jenkins.io | jenkins | 5.8.18 |
| https://dl.gitea.io/charts/ | gitea | 10.6.0 | | https://dl.gitea.io/charts/ | gitea | 11.0.0 |
| https://docs.renovatebot.com/helm-charts | renovate | 39.180.2 | | https://docs.renovatebot.com/helm-charts | renovate | 39.200.0 |
# Jenkins # Jenkins
- default build retention 10 builds, 32days - default build retention 10 builds, 32days
@ -68,7 +68,8 @@ Kubernetes: `>= 1.25.0`
| gitea.gitea.metrics.enabled | bool | `false` | | | gitea.gitea.metrics.enabled | bool | `false` | |
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | | | gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | |
| gitea.image.rootless | bool | `true` | | | gitea.image.rootless | bool | `true` | |
| gitea.image.tag | string | `"1.23.4"` | | | gitea.image.tag | string | `"1.23.5"` | |
| gitea.istio.blockApi | bool | `false` | |
| gitea.istio.enabled | bool | `false` | | | gitea.istio.enabled | bool | `false` | |
| gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | | | gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| gitea.istio.url | string | `"git.example.com"` | | | gitea.istio.url | string | `"git.example.com"` | |
@ -83,6 +84,7 @@ Kubernetes: `>= 1.25.0`
| gitea.resources.requests.memory | string | `"320Mi"` | | | gitea.resources.requests.memory | string | `"320Mi"` | |
| gitea.securityContext.allowPrivilegeEscalation | bool | `false` | | | gitea.securityContext.allowPrivilegeEscalation | bool | `false` | |
| gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | | | gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | |
| gitea.service.http.port | int | `80` | |
| gitea.strategy.type | string | `"Recreate"` | | | gitea.strategy.type | string | `"Recreate"` | |
| gitea.test.enabled | bool | `false` | | | gitea.test.enabled | bool | `false` | |
| jenkins.agent.annotations."cluster-autoscaler.kubernetes.io/safe-to-evict" | string | `"false"` | | | jenkins.agent.annotations."cluster-autoscaler.kubernetes.io/safe-to-evict" | string | `"false"` | |
@ -156,7 +158,7 @@ Kubernetes: `>= 1.25.0`
| jenkins.serviceAccountAgent.create | bool | `true` | | | jenkins.serviceAccountAgent.create | bool | `true` | |
| jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | | | jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | |
| renovate.cronjob.concurrencyPolicy | string | `"Forbid"` | | | renovate.cronjob.concurrencyPolicy | string | `"Forbid"` | |
| renovate.cronjob.jobBackoffLimit | int | `3` | | | renovate.cronjob.jobBackoffLimit | int | `2` | |
| renovate.cronjob.schedule | string | `"0 3 * * *"` | | | renovate.cronjob.schedule | string | `"0 3 * * *"` | |
| renovate.cronjob.successfulJobsHistoryLimit | int | `1` | | | renovate.cronjob.successfulJobsHistoryLimit | int | `1` | |
| renovate.enabled | bool | `false` | | | renovate.enabled | bool | `false` | |

View File

@ -12,6 +12,14 @@ Use the following links to reference issues, PRs, and commits prior to v2.6.0.
The changelog until v1.5.7 was auto-generated based on git commits. The changelog until v1.5.7 was auto-generated based on git commits.
Those entries include a reference to the git commit to be able to get more details. Those entries include a reference to the git commit to be able to get more details.
## 5.8.18
Update `jenkins/jenkins` to version `2.492.2-jdk17`
## 5.8.17
Update `kubernetes` to version `4314.v5b_846cf499eb_`
## 5.8.16 ## 5.8.16
Update `docker.io/kiwigrid/k8s-sidecar` to version `1.30.1` Update `docker.io/kiwigrid/k8s-sidecar` to version `1.30.1`

View File

@ -1,10 +1,10 @@
annotations: annotations:
artifacthub.io/category: integration-delivery artifacthub.io/category: integration-delivery
artifacthub.io/changes: | artifacthub.io/changes: |
- Update `docker.io/kiwigrid/k8s-sidecar` to version `1.30.1` - Update `jenkins/jenkins` to version `2.492.2-jdk17`
artifacthub.io/images: | artifacthub.io/images: |
- name: jenkins - name: jenkins
image: docker.io/jenkins/jenkins:2.492.1-jdk17 image: docker.io/jenkins/jenkins:2.492.2-jdk17
- name: k8s-sidecar - name: k8s-sidecar
image: docker.io/kiwigrid/k8s-sidecar:1.30.1 image: docker.io/kiwigrid/k8s-sidecar:1.30.1
- name: inbound-agent - name: inbound-agent
@ -18,7 +18,7 @@ annotations:
- name: support - name: support
url: https://github.com/jenkinsci/helm-charts/issues url: https://github.com/jenkinsci/helm-charts/issues
apiVersion: v2 apiVersion: v2
appVersion: 2.492.1 appVersion: 2.492.2
description: 'Jenkins - Build great things at any scale! As the leading open source description: 'Jenkins - Build great things at any scale! As the leading open source
automation server, Jenkins provides over 2000 plugins to support building, deploying automation server, Jenkins provides over 2000 plugins to support building, deploying
and automating any project. ' and automating any project. '
@ -46,4 +46,4 @@ sources:
- https://github.com/maorfr/kube-tasks - https://github.com/maorfr/kube-tasks
- https://github.com/jenkinsci/configuration-as-code-plugin - https://github.com/jenkinsci/configuration-as-code-plugin
type: application type: application
version: 5.8.16 version: 5.8.18

View File

@ -165,7 +165,7 @@ The following tables list the configurable parameters of the Jenkins chart and t
| [controller.initializeOnce](./values.yaml#L424) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` | | [controller.initializeOnce](./values.yaml#L424) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` |
| [controller.installLatestPlugins](./values.yaml#L413) | bool | Download the minimum required version or latest version of all dependencies | `true` | | [controller.installLatestPlugins](./values.yaml#L413) | bool | Download the minimum required version or latest version of all dependencies | `true` |
| [controller.installLatestSpecifiedPlugins](./values.yaml#L416) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` | | [controller.installLatestSpecifiedPlugins](./values.yaml#L416) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` |
| [controller.installPlugins](./values.yaml#L405) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4313.va_9b_4fe2a_0e34","workflow-aggregator:600.vb_57cdd26fdd7","git:5.7.0","configuration-as-code:1932.v75cb_b_f1b_698d"]` | | [controller.installPlugins](./values.yaml#L405) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4314.v5b_846cf499eb_","workflow-aggregator:600.vb_57cdd26fdd7","git:5.7.0","configuration-as-code:1932.v75cb_b_f1b_698d"]` |
| [controller.javaOpts](./values.yaml#L162) | string | Append to `JAVA_OPTS` env var | `nil` | | [controller.javaOpts](./values.yaml#L162) | string | Append to `JAVA_OPTS` env var | `nil` |
| [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` | | [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` |
| [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` | | [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` |

View File

@ -403,7 +403,7 @@ controller:
# Plugins will be installed during Jenkins controller start # Plugins will be installed during Jenkins controller start
# -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` # -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false`
installPlugins: installPlugins:
- kubernetes:4313.va_9b_4fe2a_0e34 - kubernetes:4314.v5b_846cf499eb_
- workflow-aggregator:600.vb_57cdd26fdd7 - workflow-aggregator:600.vb_57cdd26fdd7
- git:5.7.0 - git:5.7.0
- configuration-as-code:1932.v75cb_b_f1b_698d - configuration-as-code:1932.v75cb_b_f1b_698d

View File

@ -1,4 +1,5 @@
{{- if and .Values.gitea.enabled .Values.gitea.istio.enabled .Values.gitea.istio.ipBlocks }} {{- if and .Values.gitea.enabled .Values.gitea.istio.enabled .Values.gitea.istio.ipBlocks .Values.gitea.istio.blockApi }}
# Limit access to /api
apiVersion: security.istio.io/v1beta1 apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy kind: AuthorizationPolicy
metadata: metadata:
@ -19,6 +20,7 @@ spec:
to: to:
- operation: - operation:
hosts: ["{{ .Values.gitea.istio.url }}"] hosts: ["{{ .Values.gitea.istio.url }}"]
paths: [ "/api/*" ]
when: when:
- key: connection.sni - key: connection.sni
values: values:

View File

@ -12,16 +12,15 @@ spec:
hosts: hosts:
- {{ .Values.gitea.istio.url }} - {{ .Values.gitea.istio.url }}
http: http:
{{- if .Values.gitea.istio.authProvider }} - name: api
# https://github.com/go-gitea/gitea/issues/13606 match:
- match:
- uri: - uri:
regex: ^/user/login.* prefix: /api/
redirect: route:
uri: /user/oauth2/{{ .Values.gitea.istio.authProvider }} - destination:
redirectCode: 302 host: gitea-http
{{- end }} - name: notApi
- route: route:
- destination: - destination:
host: gitea-http host: gitea-http
tcp: tcp:

View File

@ -2,7 +2,7 @@ gitea:
enabled: false enabled: false
image: image:
tag: 1.23.4 tag: 1.23.5
rootless: true rootless: true
repliaCount: 1 repliaCount: 1
@ -87,6 +87,7 @@ gitea:
enabled: false enabled: false
gateway: istio-ingress/private-ingressgateway gateway: istio-ingress/private-ingressgateway
url: git.example.com url: git.example.com
blockApi: false
jenkins: jenkins:

View File

@ -41,6 +41,7 @@ Kubernetes: `>= 1.30.0-0`
| gateway.service.externalTrafficPolicy | string | `"Local"` | | | gateway.service.externalTrafficPolicy | string | `"Local"` | |
| gateway.service.type | string | `"NodePort"` | | | gateway.service.type | string | `"NodePort"` | |
| gateway.terminationGracePeriodSeconds | int | `120` | | | gateway.terminationGracePeriodSeconds | int | `120` | |
| hardening.preserveExternalRequestId | bool | `false` | |
| hardening.rejectUnderscoresHeaders | bool | `true` | | | hardening.rejectUnderscoresHeaders | bool | `true` | |
| hardening.unescapeSlashes | bool | `true` | | | hardening.unescapeSlashes | bool | `true` | |
| proxyProtocol | bool | `true` | | | proxyProtocol | bool | `true` | |

View File

@ -32,6 +32,7 @@ spec:
use_remote_address: true use_remote_address: true
normalize_path: true normalize_path: true
merge_slashes: true merge_slashes: true
preserve_external_request_id: {{ .Values.hardening.preserveExternalRequestId }}
{{- if .Values.hardening.unescapeSlashes }} {{- if .Values.hardening.unescapeSlashes }}
path_with_escaped_slashes_action: UNESCAPE_AND_REDIRECT path_with_escaped_slashes_action: UNESCAPE_AND_REDIRECT
{{- end }} {{- end }}

View File

@ -43,3 +43,4 @@ proxyProtocol: true
hardening: hardening:
rejectUnderscoresHeaders: true rejectUnderscoresHeaders: true
unescapeSlashes: true unescapeSlashes: true
preserveExternalRequestId: false

View File

@ -30,17 +30,7 @@ Kubernetes: `>= 1.30.0-0`
| Key | Type | Default | Description | | Key | Type | Default | Description |
|-----|------|---------|-------------| |-----|------|---------|-------------|
| envoy-ratelimit.descriptors.ingress[0].key | string | `"remote_address"` | |
| envoy-ratelimit.descriptors.ingress[0].rate_limit.requests_per_unit | int | `10` | |
| envoy-ratelimit.descriptors.ingress[0].rate_limit.unit | string | `"second"` | |
| envoy-ratelimit.descriptors.privateIngress[0].key | string | `"remote_address"` | |
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.requests_per_unit | int | `10` | |
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.unit | string | `"second"` | |
| envoy-ratelimit.enabled | bool | `false` | | | envoy-ratelimit.enabled | bool | `false` | |
| envoy-ratelimit.failureModeDeny | bool | `false` | |
| envoy-ratelimit.localCacheSize | int | `1048576` | |
| envoy-ratelimit.log.format | string | `"json"` | |
| envoy-ratelimit.log.level | string | `"warn"` | |
| global.defaultPodDisruptionBudget.enabled | bool | `false` | | | global.defaultPodDisruptionBudget.enabled | bool | `false` | |
| global.logAsJson | bool | `true` | | | global.logAsJson | bool | `true` | |
| global.variant | string | `"distroless"` | | | global.variant | string | `"distroless"` | |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack description: KubeZero Umbrella Chart for complete EFK stack
type: application type: application
version: 0.8.13 version: 0.8.14
appVersion: 1.6.0 appVersion: 1.6.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png

View File

@ -1,6 +1,6 @@
# kubezero-logging # kubezero-logging
![Version: 0.8.13](https://img.shields.io/badge/Version-0.8.13-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square) ![Version: 0.8.14](https://img.shields.io/badge/Version-0.8.14-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack KubeZero Umbrella Chart for complete EFK stack
@ -18,7 +18,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 | | https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 | | https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
@ -135,7 +135,7 @@ Kubernetes: `>= 1.26.0`
| kibana.istio.enabled | bool | `false` | | | kibana.istio.enabled | bool | `false` | |
| kibana.istio.gateway | string | `"istio-system/ingressgateway"` | | | kibana.istio.gateway | string | `"istio-system/ingressgateway"` | |
| kibana.istio.url | string | `""` | | | kibana.istio.url | string | `""` | |
| version | string | `"7.17.3"` | | | version | string | `"7.17.7"` | |
## Resources: ## Resources:

View File

@ -2,7 +2,7 @@
# fullnameOverride: "" # fullnameOverride: ""
# Version for ElasticSearch and Kibana have to match so we define it at top-level # Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.17.3 version: 7.17.7
elastic_password: "" # super_secret_elastic_password elastic_password: "" # super_secret_elastic_password

View File

@ -62,12 +62,8 @@ kube-prometheus-stack:
memory: 128Mi memory: 128Mi
admissionWebhooks: admissionWebhooks:
patch: certManager:
tolerations: enabled: true
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
nodeExporter: nodeExporter:
enabled: true enabled: true

View File

@ -21,7 +21,7 @@ dependencies:
repository: https://nats-io.github.io/k8s/helm/charts/ repository: https://nats-io.github.io/k8s/helm/charts/
condition: nats.enabled condition: nats.enabled
- name: rabbitmq - name: rabbitmq
version: 15.3.3 version: 15.4.0
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: rabbitmq.enabled condition: rabbitmq.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.26.0"

View File

@ -274,7 +274,7 @@ fluentd:
#- fluent-plugin-s3 #- fluent-plugin-s3
source: source:
sharedKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey # "cloudbender" sharedKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey?inCluster # "cloudbender"
output: output:
# Defaults to OpenSearch in same namespace # Defaults to OpenSearch in same namespace

View File

@ -1,6 +1,6 @@
# kubezero # kubezero
![Version: 1.31.3](https://img.shields.io/badge/Version-1.31.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.31.6](https://img.shields.io/badge/Version-1.31.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero - Root App of Apps chart KubeZero - Root App of Apps chart
@ -14,11 +14,11 @@ KubeZero - Root App of Apps chart
## Requirements ## Requirements
Kubernetes: `>= 1.26.0-0` Kubernetes: `>= 1.31.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts | kubezero-lib | >= 0.2.1 | | https://cdn.zero-downtime.net/charts | kubezero-lib | 0.2.1 |
## Values ## Values
@ -32,16 +32,17 @@ Kubernetes: `>= 1.26.0-0`
| addons.external-dns.enabled | bool | `false` | | | addons.external-dns.enabled | bool | `false` | |
| addons.forseti.enabled | bool | `false` | | | addons.forseti.enabled | bool | `false` | |
| addons.sealed-secrets.enabled | bool | `false` | | | addons.sealed-secrets.enabled | bool | `false` | |
| addons.targetRevision | string | `"0.8.11"` | | | addons.targetRevision | string | `"0.8.13"` | |
| argo.argo-cd.enabled | bool | `false` | | | argo.argo-cd.enabled | bool | `false` | |
| argo.argo-cd.istio.enabled | bool | `false` | | | argo.argo-cd.istio.enabled | bool | `false` | |
| argo.argocd-apps.enabled | bool | `false` | |
| argo.argocd-image-updater.enabled | bool | `false` | | | argo.argocd-image-updater.enabled | bool | `false` | |
| argo.enabled | bool | `false` | | | argo.enabled | bool | `false` | |
| argo.namespace | string | `"argocd"` | | | argo.namespace | string | `"argocd"` | |
| argo.targetRevision | string | `"0.2.6"` | | | argo.targetRevision | string | `"0.2.9"` | |
| cert-manager.enabled | bool | `false` | | | cert-manager.enabled | bool | `false` | |
| cert-manager.namespace | string | `"cert-manager"` | | | cert-manager.namespace | string | `"cert-manager"` | |
| cert-manager.targetRevision | string | `"0.9.10"` | | | cert-manager.targetRevision | string | `"0.9.12"` | |
| falco.enabled | bool | `false` | | | falco.enabled | bool | `false` | |
| falco.k8saudit.enabled | bool | `false` | | | falco.k8saudit.enabled | bool | `false` | |
| falco.targetRevision | string | `"0.1.2"` | | | falco.targetRevision | string | `"0.1.2"` | |
@ -54,35 +55,32 @@ Kubernetes: `>= 1.26.0-0`
| istio-ingress.enabled | bool | `false` | | | istio-ingress.enabled | bool | `false` | |
| istio-ingress.gateway.service | object | `{}` | | | istio-ingress.gateway.service | object | `{}` | |
| istio-ingress.namespace | string | `"istio-ingress"` | | | istio-ingress.namespace | string | `"istio-ingress"` | |
| istio-ingress.targetRevision | string | `"0.23.2"` | | | istio-ingress.targetRevision | string | `"0.24.3"` | |
| istio-private-ingress.chart | string | `"kubezero-istio-gateway"` | | | istio-private-ingress.chart | string | `"kubezero-istio-gateway"` | |
| istio-private-ingress.enabled | bool | `false` | | | istio-private-ingress.enabled | bool | `false` | |
| istio-private-ingress.gateway.service | object | `{}` | | | istio-private-ingress.gateway.service | object | `{}` | |
| istio-private-ingress.namespace | string | `"istio-ingress"` | | | istio-private-ingress.namespace | string | `"istio-ingress"` | |
| istio-private-ingress.targetRevision | string | `"0.23.2"` | | | istio-private-ingress.targetRevision | string | `"0.24.3"` | |
| istio.enabled | bool | `false` | | | istio.enabled | bool | `false` | |
| istio.namespace | string | `"istio-system"` | | | istio.namespace | string | `"istio-system"` | |
| istio.targetRevision | string | `"0.23.2"` | | | istio.targetRevision | string | `"0.24.3"` | |
| kubezero.defaultTargetRevision | string | `"*"` | | | logging.annotations."argocd.argoproj.io/compare-options" | string | `"ServerSideDiff=false"` | |
| kubezero.gitSync | object | `{}` | |
| kubezero.repoURL | string | `"https://cdn.zero-downtime.net/charts"` | |
| kubezero.server | string | `"https://kubernetes.default.svc"` | |
| logging.enabled | bool | `false` | | | logging.enabled | bool | `false` | |
| logging.namespace | string | `"logging"` | | | logging.namespace | string | `"logging"` | |
| logging.targetRevision | string | `"0.8.13"` | | | logging.targetRevision | string | `"0.8.14"` | |
| metrics.enabled | bool | `false` | | | metrics.enabled | bool | `false` | |
| metrics.istio.grafana | object | `{}` | | | metrics.istio.grafana | object | `{}` | |
| metrics.istio.prometheus | object | `{}` | | | metrics.istio.prometheus | object | `{}` | |
| metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigs | list | `[]` | | | metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigs | list | `[]` | |
| metrics.namespace | string | `"monitoring"` | | | metrics.namespace | string | `"monitoring"` | |
| metrics.targetRevision | string | `"0.10.2"` | | | metrics.targetRevision | string | `"0.11.0"` | |
| network.cilium.cluster | object | `{}` | | | network.cilium.cluster | object | `{}` | |
| network.enabled | bool | `true` | | | network.enabled | bool | `true` | |
| network.retain | bool | `true` | | | network.retain | bool | `true` | |
| network.targetRevision | string | `"0.5.5"` | | | network.targetRevision | string | `"0.5.7"` | |
| operators.enabled | bool | `false` | | | operators.enabled | bool | `false` | |
| operators.namespace | string | `"operators"` | | | operators.namespace | string | `"operators"` | |
| operators.targetRevision | string | `"0.1.6"` | | | operators.targetRevision | string | `"0.2.0"` | |
| storage.aws-ebs-csi-driver.enabled | bool | `false` | | | storage.aws-ebs-csi-driver.enabled | bool | `false` | |
| storage.aws-efs-csi-driver.enabled | bool | `false` | | | storage.aws-efs-csi-driver.enabled | bool | `false` | |
| storage.enabled | bool | `false` | | | storage.enabled | bool | `false` | |
@ -90,7 +88,7 @@ Kubernetes: `>= 1.26.0-0`
| storage.k8up.enabled | bool | `false` | | | storage.k8up.enabled | bool | `false` | |
| storage.lvm-localpv.enabled | bool | `false` | | | storage.lvm-localpv.enabled | bool | `false` | |
| storage.snapshotController.enabled | bool | `false` | | | storage.snapshotController.enabled | bool | `false` | |
| storage.targetRevision | string | `"0.8.9"` | | | storage.targetRevision | string | `"0.8.10"` | |
| telemetry.enabled | bool | `false` | | | telemetry.enabled | bool | `false` | |
| telemetry.namespace | string | `"telemetry"` | | | telemetry.namespace | string | `"telemetry"` | |
| telemetry.targetRevision | string | `"0.4.1"` | | | telemetry.targetRevision | string | `"0.4.1"` | |

View File

@ -9,6 +9,10 @@ metadata:
namespace: argocd namespace: argocd
labels: labels:
{{- include "kubezero-lib.labels" . | nindent 4 }} {{- include "kubezero-lib.labels" . | nindent 4 }}
{{- with ( index .Values $name "annotations" ) }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if not ( index .Values $name "retain" ) }} {{- if not ( index .Values $name "retain" ) }}
finalizers: finalizers:
- resources-finalizer.argocd.argoproj.io - resources-finalizer.argocd.argoproj.io
@ -17,20 +21,16 @@ spec:
project: kubezero project: kubezero
source: source:
{{- if index .Values $name "chart" }} chart: {{ default (print "kubezero-" $name) (index .Values $name "chart") }}
chart: {{ index .Values $name "chart" }} repoURL: {{ default "https://cdn.zero-downtime.net/charts" (index .Values $name "repository") }}
{{- else }} targetRevision: {{ default "HEAD" ( index .Values $name "targetRevision" ) | quote }}
chart: kubezero-{{ $name }}
{{- end }}
repoURL: {{ .Values.kubezero.repoURL }}
targetRevision: {{ default .Values.kubezero.targetRevision ( index .Values $name "targetRevision" ) | quote }}
helm: helm:
skipTests: true skipTests: true
valuesObject: valuesObject:
{{- include (print $name "-values") $ | nindent 8 }} {{- include (print $name "-values") $ | nindent 8 }}
destination: destination:
server: {{ .Values.kubezero.server }} server: "https://kubernetes.default.svc"
namespace: {{ default "kube-system" ( index .Values $name "namespace" ) }} namespace: {{ default "kube-system" ( index .Values $name "namespace" ) }}
revisionHistoryLimit: 2 revisionHistoryLimit: 2

View File

@ -42,6 +42,8 @@ external-dns:
- "--aws-zone-type=public" - "--aws-zone-type=public"
- "--aws-zones-cache-duration=1h" - "--aws-zones-cache-duration=1h"
env: env:
- name: AWS_REGION
value: {{ .Values.global.aws.region }}
- name: AWS_ROLE_ARN - name: AWS_ROLE_ARN
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.externalDNS" value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.externalDNS"
- name: AWS_WEB_IDENTITY_TOKEN_FILE - name: AWS_WEB_IDENTITY_TOKEN_FILE

View File

@ -2,10 +2,22 @@
argo-cd: argo-cd:
enabled: {{ default "false" (index .Values "argo" "argo-cd" "enabled") }} enabled: {{ default "false" (index .Values "argo" "argo-cd" "enabled") }}
{{- with index .Values "argo" "argo-cd" "configs" }}
configs: configs:
{{- with index .Values "argo" "argo-cd" "configs" }}
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
params:
{{- if not $.Values.global.highAvailable }}
# Reduce load on API server on single node control plane
controller.status.processors: 2
controller.operation.processors: 1
controller.kubectl.parallelism.limit: 1
{{- else }}
controller.status.processors: 8
controller.operation.processors: 4
controller.kubectl.parallelism.limit: 4
{{- end }}
controller: controller:
metrics: metrics:
@ -27,42 +39,10 @@ argo-cd:
{{- end }} {{- end }}
{{- end }} {{- end }}
argocd-apps: {{- with index .Values "argo" "argo-cd" "kubezero" }}
enabled: {{ default "false" (index .Values "argo" "argo-cd" "enabled") }}
projects:
kubezero: kubezero:
namespace: argocd {{- toYaml . | nindent 4 }}
description: KubeZero - ZeroDownTime Kubernetes Platform
sourceRepos:
- {{ .Values.kubezero.repoURL }}
{{- with .Values.kubezero.gitSync.repoURL }}
- {{ . }}
{{- end }} {{- end }}
destinations:
- namespace: '*'
server: https://kubernetes.default.svc
clusterResourceWhitelist:
- group: '*'
kind: '*'
applications:
kubezero-git-sync:
namespace: argocd
project: kubezero
source:
repoURL: {{ .Values.kubezero.gitSync.repoURL }}
targetRevision: {{ .Values.kubezero.gitSync.targetRevision }}
path: {{ .Values.kubezero.gitSync.path }}
directory:
recurse: true
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
argocd-image-updater: argocd-image-updater:
enabled: {{ default "false" (index .Values "argo" "argocd-image-updater" "enabled") }} enabled: {{ default "false" (index .Values "argo" "argocd-image-updater" "enabled") }}

View File

@ -1,9 +1,3 @@
kubezero:
server: https://kubernetes.default.svc
repoURL: https://cdn.zero-downtime.net/charts
defaultTargetRevision: '*'
gitSync: {}
global: global:
clusterName: zdt-trial-cluster clusterName: zdt-trial-cluster
@ -114,12 +108,14 @@ metrics:
logging: logging:
enabled: false enabled: false
namespace: logging namespace: logging
targetRevision: 0.8.13 targetRevision: 0.8.14
annotations:
argocd.argoproj.io/compare-options: ServerSideDiff=false
argo: argo:
enabled: false enabled: false
namespace: argocd namespace: argocd
targetRevision: 0.2.8 targetRevision: 0.3.0
argo-cd: argo-cd:
enabled: false enabled: false
istio: istio: