Compare commits

..

22 Commits

Author SHA1 Message Date
06e6d61cd8 chore(deps): update helm release neo4j to v5.26.5 2025-04-08 03:02:29 +00:00
daf70c9bfb fix: argocd bootstrap fix 2025-03-26 16:47:24 +00:00
eb059883c1 fix: ensure pre-install hook is run for kubezero 2025-03-25 11:17:30 +01:00
bca7f5fd45 fix: another argo migration fix 2025-03-24 22:10:38 +01:00
68997b535d fix: type in hook 2025-03-24 18:18:37 +00:00
ca69b55492 fix: allow multi-line secret val 2025-03-24 19:02:19 +01:00
01832f2e41 fix: improve argocd secret handling 2025-03-24 18:54:56 +01:00
94dd2f395e fix: kubezero root module fixes 2025-03-24 18:11:26 +01:00
6a7c0b6085 feat: more cluster bootstrap work 2025-03-24 16:44:11 +00:00
10de3a1047 Merge pull request 'chore(deps): update kubezero-argo-dependencies' (#63) from renovate/kubezero-argo-kubezero-argo-dependencies into main
Reviewed-on: #63
2025-03-21 13:51:46 +00:00
5a47b6be43 chore(deps): update kubezero-argo-dependencies 2025-03-21 13:51:46 +00:00
63eb787599 Merge pull request 'chore(deps): update public.ecr.aws/zero-downtime/zdt-argocd docker tag to v2.14.7' (#67) from renovate/public.ecr.aws-zero-downtime-zdt-argocd-2.x into main
Reviewed-on: #67
2025-03-21 13:51:29 +00:00
120072a34b chore(deps): update public.ecr.aws/zero-downtime/zdt-argocd docker tag to v2.14.7 2025-03-21 03:02:01 +00:00
63f96e58ba fix: ensure root app is re-created 2025-03-19 12:39:06 +01:00
ab744494e6 fix: apply kubezero module first, fix hooks 2025-03-18 16:18:20 +00:00
af29836a27 feat: new custom helm hooks 2025-03-18 14:47:55 +00:00
30bc95408a feat: improved ArgoCD bootstrap, tool cleanups 2025-03-17 20:30:34 +00:00
545a7fd8b1 feat: latest CI tools, improved Gitea API endpoint protection 2025-03-13 21:02:53 +00:00
56a2926917 Merge pull request 'chore(deps): update helm release gitea to v11' (#59) from renovate/kubezero-ci-major-kubezero-ci-dependencies into main
Reviewed-on: #59
2025-03-13 12:41:06 +00:00
b8114bd053 chore(deps): update helm release gitea to v11 2025-03-13 12:41:06 +00:00
53f940a54c Merge pull request 'chore(deps): update kubezero-ci-dependencies' (#57) from renovate/kubezero-ci-kubezero-ci-dependencies into main
Reviewed-on: #57
2025-03-13 12:40:57 +00:00
58780f1e0e chore(deps): update kubezero-ci-dependencies 2025-03-13 03:01:47 +00:00
45 changed files with 384 additions and 357 deletions

View File

@ -6,8 +6,8 @@ ARG ALPINE_VERSION
ARG KUBE_VERSION=1.31 ARG KUBE_VERSION=1.31
ARG SOPS_VERSION="3.9.4" ARG SOPS_VERSION="3.9.4"
ARG VALS_VERSION="0.39.1" ARG VALS_VERSION="0.39.4"
ARG HELM_SECRETS_VERSION="4.6.2" ARG HELM_SECRETS_VERSION="4.6.3"
RUN cd /etc/apk/keys && \ RUN cd /etc/apk/keys && \
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \ wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
@ -24,6 +24,7 @@ RUN cd /etc/apk/keys && \
py3-yaml \ py3-yaml \
restic \ restic \
helm \ helm \
apache2-utils \
ytt@testing \ ytt@testing \
etcd-ctl@edge-community \ etcd-ctl@edge-community \
cri-tools@kubezero \ cri-tools@kubezero \

View File

@ -5,7 +5,7 @@ set -x
ARTIFACTS=($(echo $1 | tr "," "\n")) ARTIFACTS=($(echo $1 | tr "," "\n"))
ACTION="${2:-apply}" ACTION="${2:-apply}"
ARGOCD="${3:-False}" ARGOCD="${3:-true}"
LOCAL_DEV=1 LOCAL_DEV=1
@ -36,46 +36,6 @@ parse_version() {
KUBE_VERSION=$(parse_version $KUBE_VERSION) KUBE_VERSION=$(parse_version $KUBE_VERSION)
### Various hooks for modules
################
# cert-manager #
################
function cert-manager-post() {
# If any error occurs, wait for initial webhook deployment and try again
# see: https://cert-manager.io/docs/concepts/webhook/#webhook-connection-problems-shortly-after-cert-manager-installation
if [ $rc -ne 0 ]; then
wait_for "kubectl get deployment -n $namespace cert-manager-webhook"
kubectl rollout status deployment -n $namespace cert-manager-webhook
wait_for 'kubectl get validatingwebhookconfigurations -o yaml | grep "caBundle: LS0"'
fi
wait_for "kubectl get ClusterIssuer -n $namespace kubezero-local-ca-issuer"
kubectl wait --timeout=180s --for=condition=Ready -n $namespace ClusterIssuer/kubezero-local-ca-issuer
}
###########
# ArgoCD #
###########
function argocd-pre() {
kubectl delete job argo-argocd-redis-secret-init -n argocd || true
for f in $CLUSTER/secrets/argocd-*.yaml; do
kubectl apply -f $f
done
}
###########
# Metrics #
###########
# Cleanup patch jobs from previous runs , ArgoCD does this automatically
function metrics-pre() {
kubectl delete jobs --field-selector status.successful=1 -n monitoring
}
### Main ### Main
get_kubezero_values $ARGOCD get_kubezero_values $ARGOCD
@ -85,6 +45,7 @@ helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $
# Root KubeZero apply directly and exit # Root KubeZero apply directly and exit
if [ ${ARTIFACTS[0]} == "kubezero" ]; then if [ ${ARTIFACTS[0]} == "kubezero" ]; then
[ -f $CHARTS/kubezero/hooks.d/pre-install.sh ] && . $CHARTS/kubezero/hooks.d/pre-install.sh
kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD) kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
exit $? exit $?

View File

@ -14,7 +14,12 @@ pre_control_plane_upgrade_cluster() {
# All things after the first controller / control plane upgrade # All things after the first controller / control plane upgrade
post_control_plane_upgrade_cluster() { post_control_plane_upgrade_cluster() {
echo # delete previous root app controlled by kubezero module
kubectl delete application kubezero-git-sync -n argocd || true
# Patch appproject to keep SyncWindow in place
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
} }

View File

@ -131,7 +131,7 @@ control_plane_upgrade() {
update_kubezero_cm update_kubezero_cm
if [ "$ARGOCD" == "True" ]; then if [ "$ARGOCD" == "true" ]; then
# update argo app # update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml) export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \ kubectl get application kubezero -n argocd -o yaml | \
@ -328,10 +328,15 @@ apply_module() {
done done
for t in $MODULES; do for t in $MODULES; do
#_helm apply $t # apply/replace app of apps directly
if [ $t == "kubezero" ]; then
# During 1.31 we change the ArgoCD tracking so replace [ -f $CHARTS/kubezero/hooks.d/pre-install.sh ] && . $CHARTS/kubezero/hooks.d/pre-install.sh
_helm replace $t kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
else
#_helm apply $t
# During 1.31 we change the ArgoCD tracking so replace
_helm replace $t
fi
done done
echo "Applied KubeZero modules: $MODULES" echo "Applied KubeZero modules: $MODULES"

View File

@ -29,14 +29,14 @@ function chart_location() {
function argo_used() { function argo_used() {
kubectl get application kubezero -n argocd >/dev/null \ kubectl get application kubezero -n argocd >/dev/null \
&& echo "True" || echo "False" && echo "true" || echo "false"
} }
function field_manager() { function field_manager() {
local argo=${1:-"False"} local argo=${1:-"false"}
if [ "$argo" == "True" ]; then if [ "$argo" == "true" ]; then
echo "--field-manager argo-controller" echo "--field-manager argo-controller"
else else
echo "" echo ""
@ -44,10 +44,21 @@ function field_manager() {
} }
function get_kubezero_secret() { function get_secret_val() {
export _key="$1" local ns=$1
local secret=$2
local val=$(kubectl get secret -n $ns $secret -o yaml | yq ".data.\"$3\"")
kubectl get secrets -n kubezero kubezero-secrets -o yaml | yq '.data.[env(_key)]' | base64 -d -w0 if [ "$val" != "null" ]; then
echo -n $val | base64 -d -w0
else
echo ""
fi
}
function get_kubezero_secret() {
get_secret_val kubezero kubezero-secrets "$1"
} }
@ -55,22 +66,23 @@ function set_kubezero_secret() {
local key="$1" local key="$1"
local val="$2" local val="$2"
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"$(echo -n $val |base64 -w0)\" }}" if [ -n "$val" ]; then
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"$(echo -n "$val" |base64 -w0)\" }}"
fi
} }
# get kubezero-values from ArgoCD if available or use in-cluster CM # get kubezero-values from ArgoCD if available or use in-cluster CM
function get_kubezero_values() { function get_kubezero_values() {
local argo=${1:-"False"} local argo=${1:-"false"}
if [ "$argo" == "True" ]; then if [ "$argo" == "true" ]; then
kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml
else else
kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml
fi fi
} }
# Overwrite kubezero-values CM with file # Overwrite kubezero-values CM with file
function update_kubezero_cm() { function update_kubezero_cm() {
kubectl get cm -n kubezero kubezero-values -o=yaml | \ kubectl get cm -n kubezero kubezero-values -o=yaml | \
@ -80,7 +92,7 @@ function update_kubezero_cm() {
# sync kubezero-values CM from ArgoCD app # sync kubezero-values CM from ArgoCD app
function sync_kubezero_cm_from_argo() { function sync_kubezero_cm_from_argo() {
get_kubezero_values True get_kubezero_values true
update_kubezero_cm update_kubezero_cm
} }
@ -139,8 +151,8 @@ function delete_ns() {
# Extract crds via helm calls # Extract crds via helm calls
function _crds() { function crds() {
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c ' helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c '
#!/usr/bin/python3 #!/usr/bin/python3
import yaml import yaml
import sys import sys
@ -201,9 +213,20 @@ function _helm() {
yq eval '.spec.source.helm.valuesObject' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml yq eval '.spec.source.helm.valuesObject' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
# extract remote chart or copy local to access hooks
rm -rf $WORKDIR/$chart $WORKDIR/${chart}*.tgz
if [ -z "$LOCAL_DEV" ]; then
helm pull $(chart_location $chart) --untar -d $WORKDIR
else
cp -r $(chart_location $chart) $WORKDIR
fi
if [ $action == "crds" ]; then if [ $action == "crds" ]; then
# Allow custom CRD handling # Pre-crd hook
declare -F ${module}-crds && ${module}-crds || _crds [ -f $WORKDIR/$chart/hooks.d/pre-crds.sh ] && . $WORKDIR/$chart/hooks.d/pre-crds.sh
crds
elif [ $action == "apply" -o $action == "replace" ]; then elif [ $action == "apply" -o $action == "replace" ]; then
echo "using values to $action of module $module: " echo "using values to $action of module $module: "
@ -213,14 +236,16 @@ function _helm() {
create_ns $namespace create_ns $namespace
# Optional pre hook # Optional pre hook
declare -F ${module}-pre && ${module}-pre [ -f $WORKDIR/$chart/hooks.d/pre-install.sh ] && . $WORKDIR/$chart/hooks.d/pre-install.sh
render render
[ $action == "apply" ] && kubectl apply -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$?
[ $action == "replace" ] && kubectl replace -f $WORKDIR/helm.yaml $(field_manager $ARGOCD) && rc=$? || rc=$? [ $action == "replace" ] && kubectl replace -f $WORKDIR/helm.yaml $(field_manager $ARGOCD) && rc=$? || rc=$?
# If replace failed try apply at least
[ $action == "apply" -o $rc -ne 0 ] && kubectl apply -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$?
# Optional post hook # Optional post hook
declare -F ${module}-post && ${module}-post [ -f $WORKDIR/$chart/hooks.d/post-install.sh ] && . $WORKDIR/$chart/hooks.d/post-install.sh
elif [ $action == "delete" ]; then elif [ $action == "delete" ]; then
render render

View File

@ -8,10 +8,18 @@ import yaml
def migrate(values): def migrate(values):
"""Actual changes here""" """Actual changes here"""
# remove syncOptions from root app # migrate kubezero root app of apps to Argo chart
try: try:
if values["kubezero"]["syncPolicy"]: if values["kubezero"]:
values["kubezero"].pop("syncPolicy") try:
values["kubezero"].pop("syncPolicy")
except KeyError:
pass
values["kubezero"]["gitSync"]["repoUrl"] = values["kubezero"]["gitSync"].pop("repoURL")
values["argo"]["argo-cd"]["kubezero"] = values["kubezero"]["gitSync"]
values.pop("kubezero")
except KeyError: except KeyError:
pass pass

View File

@ -17,16 +17,16 @@ ARGOCD=$(argo_used)
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
#waitSystemPodsRunning #waitSystemPodsRunning
[ "$ARGOCD" == "True" ] && disable_argo [ "$ARGOCD" == "true" ] && disable_argo
# Check if we already have all controllers on the current version # Check if we already have all controllers on the current version
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true) #OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
# All controllers already on current version
if [ "$OLD_CONTROLLERS" == "0" ]; then if [ "$OLD_CONTROLLERS" == "0" ]; then
# All controllers already on current version
control_plane_upgrade finalize_cluster_upgrade control_plane_upgrade finalize_cluster_upgrade
# Otherwise run control plane upgrade
else else
# Otherwise run control plane upgrade
control_plane_upgrade kubeadm_upgrade control_plane_upgrade kubeadm_upgrade
fi fi
@ -35,10 +35,10 @@ read -r
#echo "Adjust kubezero values as needed:" #echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015 # shellcheck disable=SC2015
#[ "$ARGOCD" == "True" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero #[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
# upgrade modules # upgrade modules
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators" control_plane_upgrade "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators"
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning waitSystemPodsRunning
@ -47,6 +47,9 @@ echo "Applying remaining KubeZero modules..."
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo" control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
# we replace the project during v1.31 so disable again
[ "$ARGOCD" == "true" ] && disable_argo
# Final step is to commit the new argocd kubezero app # Final step is to commit the new argocd kubezero app
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..)' > $ARGO_APP kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..)' > $ARGO_APP
@ -63,4 +66,4 @@ echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to
echo "<Return> to continue and re-enable ArgoCD:" echo "<Return> to continue and re-enable ArgoCD:"
read -r read -r
[ "$ARGOCD" == "True" ] && enable_argo [ "$ARGOCD" == "true" ] && enable_argo

View File

@ -17,22 +17,36 @@ failureModeDeny: false
# - slow: 1 req/s over a minute per sourceIP # - slow: 1 req/s over a minute per sourceIP
descriptors: descriptors:
ingress: ingress:
- key: speed - key: sourceIp
value: slow value: sixtyPerMinute
descriptors: descriptors:
- key: remote_address - key: remote_address
rate_limit: rate_limit:
unit: minute unit: minute
requests_per_unit: 60 requests_per_unit: 60
- key: sourceIp
value: tenPerSecond
descriptors:
- key: remote_address
rate_limit:
unit: second
requests_per_unit: 10
privateIngress: privateIngress:
- key: speed - key: sourceIp
value: slow value: sixtyPerMinute
descriptors: descriptors:
- key: remote_address - key: remote_address
rate_limit: rate_limit:
unit: minute unit: minute
requests_per_unit: 60 requests_per_unit: 60
- key: sourceIp
value: tenPerSecond
descriptors:
- key: remote_address
rate_limit:
unit: second
requests_per_unit: 10
metrics: metrics:
enabled: false enabled: false

View File

@ -0,0 +1,28 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
README.md.gotmpl
dashboards.yaml
jsonnet
update.sh

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo name: kubezero-argo
version: 0.2.9 version: 0.3.1
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -18,19 +18,15 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: argo-events - name: argo-events
version: 2.4.13 version: 2.4.14
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled condition: argo-events.enabled
- name: argo-cd - name: argo-cd
version: 7.8.9 version: 7.8.13
repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled
- name: argocd-apps
version: 2.0.2
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled condition: argo-cd.enabled
- name: argocd-image-updater - name: argocd-image-updater
version: 0.12.0 version: 0.12.0
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argocd-image-updater.enabled condition: argocd-image-updater.enabled
kubeVersion: ">= 1.26.0-0" kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-argo # kubezero-argo
![Version: 0.2.8](https://img.shields.io/badge/Version-0.2.8-informational?style=flat-square) ![Version: 0.3.1](https://img.shields.io/badge/Version-0.3.1-informational?style=flat-square)
KubeZero Argo - Events, Workflow, CD KubeZero Argo - Events, Workflow, CD
@ -14,15 +14,14 @@ KubeZero Argo - Events, Workflow, CD
## Requirements ## Requirements
Kubernetes: `>= 1.26.0-0` Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 7.8.2 | | https://argoproj.github.io/argo-helm | argo-cd | 7.8.13 |
| https://argoproj.github.io/argo-helm | argo-events | 2.4.13 | | https://argoproj.github.io/argo-helm | argo-events | 2.4.14 |
| https://argoproj.github.io/argo-helm | argocd-apps | 2.0.2 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.0 | | https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
## Values ## Values
@ -30,7 +29,7 @@ Kubernetes: `>= 1.26.0-0`
|-----|------|---------|-------------| |-----|------|---------|-------------|
| argo-cd.configs.cm."application.instanceLabelKey" | string | `nil` | | | argo-cd.configs.cm."application.instanceLabelKey" | string | `nil` | |
| argo-cd.configs.cm."application.resourceTrackingMethod" | string | `"annotation"` | | | argo-cd.configs.cm."application.resourceTrackingMethod" | string | `"annotation"` | |
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | | | argo-cd.configs.cm."resource.customizations" | string | `"argoproj.io/Application:\n health.lua: |\n hs = {}\n hs.status = \"Progressing\"\n hs.message = \"\"\n if obj.status ~= nil then\n if obj.status.health ~= nil then\n hs.status = obj.status.health.status\n if obj.status.health.message ~= nil then\n hs.message = obj.status.health.message\n end\n end\n end\n return hs\n"` | |
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | | | argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.31 - Release notes"` | | | argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.31 - Release notes"` | |
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | | | argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
@ -39,10 +38,11 @@ Kubernetes: `>= 1.26.0-0`
| argo-cd.configs.cm.installationID | string | `"KubeZero-ArgoCD"` | | | argo-cd.configs.cm.installationID | string | `"KubeZero-ArgoCD"` | |
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | | | argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
| argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | | | argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | |
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | | | argo-cd.configs.params."controller.resource.health.persist" | string | `"false"` | |
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | | | argo-cd.configs.params."controller.sync.timeout.seconds" | int | `1800` | |
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | | | argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
| argo-cd.configs.params."server.insecure" | bool | `true` | | | argo-cd.configs.params."server.insecure" | bool | `true` | |
| argo-cd.configs.secret.argocdServerAdminPassword | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.adminPassword"` | |
| argo-cd.configs.secret.createSecret | bool | `false` | | | argo-cd.configs.secret.createSecret | bool | `false` | |
| argo-cd.configs.ssh.extraHosts | string | `"git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="` | | | argo-cd.configs.ssh.extraHosts | string | `"git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="` | |
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar__logo__text-logo { height: 0em; }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | | | argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar__logo__text-logo { height: 0em; }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
@ -54,31 +54,19 @@ Kubernetes: `>= 1.26.0-0`
| argo-cd.dex.enabled | bool | `false` | | | argo-cd.dex.enabled | bool | `false` | |
| argo-cd.enabled | bool | `false` | | | argo-cd.enabled | bool | `false` | |
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | | | argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
| argo-cd.global.image.tag | string | `"v2.13.1"` | | | argo-cd.global.image.tag | string | `"v2.14.7"` | |
| argo-cd.global.logging.format | string | `"json"` | | | argo-cd.global.logging.format | string | `"json"` | |
| argo-cd.global.networkPolicy.create | bool | `true` | | | argo-cd.global.networkPolicy.create | bool | `true` | |
| argo-cd.istio.enabled | bool | `false` | | | argo-cd.istio.enabled | bool | `false` | |
| argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | | | argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | |
| argo-cd.istio.ipBlocks | list | `[]` | | | argo-cd.istio.ipBlocks | list | `[]` | |
| argo-cd.kubezero.bootstrap | bool | `false` | deploy the KubeZero Project and GitSync Root App |
| argo-cd.kubezero.path | string | `"/"` | |
| argo-cd.kubezero.repoUrl | string | `""` | |
| argo-cd.kubezero.sshPrivateKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.sshPrivateKey"` | |
| argo-cd.kubezero.targetRevision | string | `"HEAD"` | |
| argo-cd.notifications.enabled | bool | `false` | | | argo-cd.notifications.enabled | bool | `false` | |
| argo-cd.repoServer.clusterRoleRules.enabled | bool | `true` | | | argo-cd.redisSecretInit.enabled | bool | `false` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].apiGroups[0] | string | `""` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].resources[0] | string | `"secrets"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[0] | string | `"get"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[1] | string | `"watch"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[2] | string | `"list"` | |
| argo-cd.repoServer.initContainers[0].command[0] | string | `"/usr/local/bin/sa2kubeconfig.sh"` | |
| argo-cd.repoServer.initContainers[0].command[1] | string | `"/home/argocd/.kube/config"` | |
| argo-cd.repoServer.initContainers[0].image | string | `"{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include \"argo-cd.defaultTag\" .) .Values.repoServer.image.tag }}"` | |
| argo-cd.repoServer.initContainers[0].imagePullPolicy | string | `"{{ default .Values.global.image.imagePullPolicy .Values.repoServer.image.imagePullPolicy }}"` | |
| argo-cd.repoServer.initContainers[0].name | string | `"create-kubeconfig"` | |
| argo-cd.repoServer.initContainers[0].securityContext.allowPrivilegeEscalation | bool | `false` | |
| argo-cd.repoServer.initContainers[0].securityContext.capabilities.drop[0] | string | `"ALL"` | |
| argo-cd.repoServer.initContainers[0].securityContext.readOnlyRootFilesystem | bool | `true` | |
| argo-cd.repoServer.initContainers[0].securityContext.runAsNonRoot | bool | `true` | |
| argo-cd.repoServer.initContainers[0].securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
| argo-cd.repoServer.initContainers[0].volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | |
| argo-cd.repoServer.initContainers[0].volumeMounts[0].name | string | `"kubeconfigs"` | |
| argo-cd.repoServer.metrics.enabled | bool | `false` | | | argo-cd.repoServer.metrics.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | | | argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.repoServer.volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | | | argo-cd.repoServer.volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | |
@ -101,9 +89,6 @@ Kubernetes: `>= 1.26.0-0`
| argo-events.configs.jetstream.versions[0].startCommand | string | `"/nats-server"` | | | argo-events.configs.jetstream.versions[0].startCommand | string | `"/nats-server"` | |
| argo-events.configs.jetstream.versions[0].version | string | `"2.10.11"` | | | argo-events.configs.jetstream.versions[0].version | string | `"2.10.11"` | |
| argo-events.enabled | bool | `false` | | | argo-events.enabled | bool | `false` | |
| argocd-apps.applications | object | `{}` | |
| argocd-apps.enabled | bool | `false` | |
| argocd-apps.projects | object | `{}` | |
| argocd-image-updater.authScripts.enabled | bool | `true` | | | argocd-image-updater.authScripts.enabled | bool | `true` | |
| argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | | | argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | |
| argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | | | argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | |

View File

@ -0,0 +1,29 @@
# Bootstrap kubezero-git-sync app only if it doesnt exist yet
kubectl get application kubezero-git-sync -n argocd || \
yq -i '.argo-cd.kubezero.bootstrap=true' $WORKDIR/values.yaml
# Ensure we have an adminPassword or migrate existing one
PW=$(get_kubezero_secret argo-cd.adminPassword)
if [ -z "$PW" ]; then
# Check for existing password in actual secret
NEW_PW=$(get_secret_val argocd argocd-secret "admin.password")
if [ -z "$NEW_PW" ];then
ARGO_PWD=$(date +%s | sha256sum | base64 | head -c 12 ; echo)
NEW_PW=$(htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/')
set_kubezero_secret argo-cd.adminPasswordClear $ARGO_PWD
fi
set_kubezero_secret argo-cd.adminPassword "$NEW_PW"
fi
# GitSync privateKey
GITKEY=$(get_kubezero_secret argo-cd.kubezero.sshPrivateKey)
if [ -z "$GITKEY" ]; then
set_kubezero_secret argo-cd.kubezero.sshPrivateKey "Insert ssh Private Key from your git server"
fi
# Redis secret
kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \
--from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo)

View File

@ -1,22 +0,0 @@
# KubeZero secrets
#
test: supergeheim
secrets:
- name: argocd-secret
optional: false
data:
admin.password: test
admin.passwordMtime: now
server.secretkey: boohoo
- name: zero-downtime-gitea
optional: true
data:
name: zero-downtime-gitea
type: git
url: ssh://git@git.zero-downtime.net/quark/kube-grandnagus.git
sshPrivateKey: |
boohooKey
metadata:
labels:
argocd.argoproj.io/secret-type: repository

View File

@ -0,0 +1,13 @@
{{- if index .Values "argo-cd" "enabled" }}
apiVersion: v1
kind: Secret
metadata:
name: argocd-secret
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
type: Opaque
stringData:
admin.password: {{ index .Values "argo-cd" "configs" "secret" "argocdServerAdminPassword" }}
admin.passwordMtime: {{ default (dateInZone "2006-01-02T15:04:05Z" (now) "UTC") }}
{{- end }}

View File

@ -0,0 +1,30 @@
{{- if and (index .Values "argo-cd" "kubezero" "bootstrap") (index .Values "argo-cd" "kubezero" "repoUrl") }}
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubezero-git-sync
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
annotations:
argocd.argoproj.io/sync-wave: "-20"
spec:
destination:
namespace: argocd
server: https://kubernetes.default.svc
project: kubezero
source:
{{- with index .Values "argo-cd" "kubezero" }}
repoURL: {{ .repoUrl }}
targetRevision: {{ .targetRevision }}
path: {{ .path }}
{{- end }}
directory:
recurse: true
syncPolicy:
automated:
prune: true
syncOptions:
- ServerSideApply=true
- ApplyOutOfSyncOnly=true
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if and (index .Values "argo-cd" "kubezero" "sshPrivateKey") (index .Values "argo-cd" "kubezero" "repoUrl") }}
apiVersion: v1
kind: Secret
metadata:
name: kubezero-git-sync
namespace: argocd
labels:
argocd.argoproj.io/secret-type: repository
{{- include "kubezero-lib.labels" . | nindent 4 }}
type: Opaque
stringData:
name: kubezero-git-sync
type: git
url: {{ index .Values "argo-cd" "kubezero" "repoUrl" }}
sshPrivateKey: {{ index .Values "argo-cd" "kubezero" "sshPrivateKey" }}
{{- end }}

View File

@ -0,0 +1,26 @@
{{- if and (index .Values "argo-cd" "kubezero" "bootstrap") (index .Values "argo-cd" "kubezero" "repoUrl") }}
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: kubezero
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
clusterResourceWhitelist:
- group: '*'
kind: '*'
description: KubeZero - ZeroDownTime Kubernetes Platform
destinations:
- namespace: '*'
server: https://kubernetes.default.svc
sourceRepos:
- https://cdn.zero-downtime.net/charts
- {{ index .Values "argo-cd" "kubezero" "repoUrl" }}
syncWindows:
- kind: deny
schedule: '0 * * * *'
duration: 24h
namespaces:
- '*'
{{- end }}

View File

@ -30,13 +30,6 @@ argo-events:
configReloaderImage: natsio/nats-server-config-reloader:0.14.1 configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server startCommand: /nats-server
argocd-apps:
enabled: false
projects: {}
applications: {}
argo-cd: argo-cd:
enabled: false enabled: false
@ -45,7 +38,7 @@ argo-cd:
format: json format: json
image: image:
repository: public.ecr.aws/zero-downtime/zdt-argocd repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v2.14.2 tag: v2.14.7
networkPolicy: networkPolicy:
create: true create: true
@ -71,36 +64,26 @@ argo-cd:
application.instanceLabelKey: Null application.instanceLabelKey: Null
resource.customizations: | resource.customizations: |
cert-manager.io/Certificate: argoproj.io/Application:
# Lua script for customizing the health status assessment
health.lua: | health.lua: |
hs = {} hs = {}
hs.status = "Progressing"
hs.message = ""
if obj.status ~= nil then if obj.status ~= nil then
if obj.status.conditions ~= nil then if obj.status.health ~= nil then
for i, condition in ipairs(obj.status.conditions) do hs.status = obj.status.health.status
if condition.type == "Ready" and condition.status == "False" then if obj.status.health.message ~= nil then
hs.status = "Degraded" hs.message = obj.status.health.message
hs.message = condition.message
return hs
end
if condition.type == "Ready" and condition.status == "True" then
hs.status = "Healthy"
hs.message = condition.message
return hs
end
end end
end end
end end
hs.status = "Progressing"
hs.message = "Waiting for certificate"
return hs return hs
secret: secret:
createSecret: false createSecret: false
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/' | base64 -w0` # `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/' | base64 -w0`
# argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG" argocdServerAdminPassword: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.adminPassword
# argocdServerAdminPassword: "ref+file://secrets.yaml#/test"
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
ssh: ssh:
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ==" extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
@ -141,6 +124,7 @@ argo-cd:
name: kubeconfigs name: kubeconfigs
# Allow vals to read internal secrets across all namespaces # Allow vals to read internal secrets across all namespaces
# @ignored
clusterRoleRules: clusterRoleRules:
enabled: true enabled: true
rules: rules:
@ -148,6 +132,7 @@ argo-cd:
resources: ["secrets"] resources: ["secrets"]
verbs: ["get", "watch", "list"] verbs: ["get", "watch", "list"]
# @ignored
initContainers: initContainers:
- name: create-kubeconfig - name: create-kubeconfig
image: '{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include "argo-cd.defaultTag" .) .Values.repoServer.image.tag }}' image: '{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include "argo-cd.defaultTag" .) .Values.repoServer.image.tag }}'
@ -198,6 +183,16 @@ argo-cd:
gateway: istio-ingress/ingressgateway gateway: istio-ingress/ingressgateway
ipBlocks: [] ipBlocks: []
kubezero:
# -- deploy the KubeZero Project and GitSync Root App
bootstrap: false
# valid git+ssh repository url
repoUrl: ""
path: "/"
targetRevision: HEAD
sshPrivateKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.sshPrivateKey
argocd-image-updater: argocd-image-updater:
enabled: false enabled: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-ci name: kubezero-ci
description: KubeZero umbrella chart for all things CI description: KubeZero umbrella chart for all things CI
type: application type: application
version: 0.8.20 version: 0.8.21
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -18,19 +18,19 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: gitea - name: gitea
version: 10.6.0 version: 11.0.0
repository: https://dl.gitea.io/charts/ repository: https://dl.gitea.io/charts/
condition: gitea.enabled condition: gitea.enabled
- name: jenkins - name: jenkins
version: 5.8.16 version: 5.8.18
repository: https://charts.jenkins.io repository: https://charts.jenkins.io
condition: jenkins.enabled condition: jenkins.enabled
- name: trivy - name: trivy
version: 0.11.1 version: 0.12.0
repository: https://aquasecurity.github.io/helm-charts/ repository: https://aquasecurity.github.io/helm-charts/
condition: trivy.enabled condition: trivy.enabled
- name: renovate - name: renovate
version: 39.180.2 version: 39.200.0
repository: https://docs.renovatebot.com/helm-charts repository: https://docs.renovatebot.com/helm-charts
condition: renovate.enabled condition: renovate.enabled
kubeVersion: ">= 1.25.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-ci # kubezero-ci
![Version: 0.8.20](https://img.shields.io/badge/Version-0.8.20-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.8.21](https://img.shields.io/badge/Version-0.8.21-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI KubeZero umbrella chart for all things CI
@ -18,11 +18,11 @@ Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.11.1 | | https://aquasecurity.github.io/helm-charts/ | trivy | 0.12.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://charts.jenkins.io | jenkins | 5.8.16 | | https://charts.jenkins.io | jenkins | 5.8.18 |
| https://dl.gitea.io/charts/ | gitea | 10.6.0 | | https://dl.gitea.io/charts/ | gitea | 11.0.0 |
| https://docs.renovatebot.com/helm-charts | renovate | 39.180.2 | | https://docs.renovatebot.com/helm-charts | renovate | 39.200.0 |
# Jenkins # Jenkins
- default build retention 10 builds, 32days - default build retention 10 builds, 32days
@ -68,7 +68,8 @@ Kubernetes: `>= 1.25.0`
| gitea.gitea.metrics.enabled | bool | `false` | | | gitea.gitea.metrics.enabled | bool | `false` | |
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | | | gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | |
| gitea.image.rootless | bool | `true` | | | gitea.image.rootless | bool | `true` | |
| gitea.image.tag | string | `"1.23.4"` | | | gitea.image.tag | string | `"1.23.5"` | |
| gitea.istio.blockApi | bool | `false` | |
| gitea.istio.enabled | bool | `false` | | | gitea.istio.enabled | bool | `false` | |
| gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | | | gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| gitea.istio.url | string | `"git.example.com"` | | | gitea.istio.url | string | `"git.example.com"` | |
@ -83,6 +84,7 @@ Kubernetes: `>= 1.25.0`
| gitea.resources.requests.memory | string | `"320Mi"` | | | gitea.resources.requests.memory | string | `"320Mi"` | |
| gitea.securityContext.allowPrivilegeEscalation | bool | `false` | | | gitea.securityContext.allowPrivilegeEscalation | bool | `false` | |
| gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | | | gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | |
| gitea.service.http.port | int | `80` | |
| gitea.strategy.type | string | `"Recreate"` | | | gitea.strategy.type | string | `"Recreate"` | |
| gitea.test.enabled | bool | `false` | | | gitea.test.enabled | bool | `false` | |
| jenkins.agent.annotations."cluster-autoscaler.kubernetes.io/safe-to-evict" | string | `"false"` | | | jenkins.agent.annotations."cluster-autoscaler.kubernetes.io/safe-to-evict" | string | `"false"` | |
@ -156,7 +158,7 @@ Kubernetes: `>= 1.25.0`
| jenkins.serviceAccountAgent.create | bool | `true` | | | jenkins.serviceAccountAgent.create | bool | `true` | |
| jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | | | jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | |
| renovate.cronjob.concurrencyPolicy | string | `"Forbid"` | | | renovate.cronjob.concurrencyPolicy | string | `"Forbid"` | |
| renovate.cronjob.jobBackoffLimit | int | `3` | | | renovate.cronjob.jobBackoffLimit | int | `2` | |
| renovate.cronjob.schedule | string | `"0 3 * * *"` | | | renovate.cronjob.schedule | string | `"0 3 * * *"` | |
| renovate.cronjob.successfulJobsHistoryLimit | int | `1` | | | renovate.cronjob.successfulJobsHistoryLimit | int | `1` | |
| renovate.enabled | bool | `false` | | | renovate.enabled | bool | `false` | |

View File

@ -12,6 +12,14 @@ Use the following links to reference issues, PRs, and commits prior to v2.6.0.
The changelog until v1.5.7 was auto-generated based on git commits. The changelog until v1.5.7 was auto-generated based on git commits.
Those entries include a reference to the git commit to be able to get more details. Those entries include a reference to the git commit to be able to get more details.
## 5.8.18
Update `jenkins/jenkins` to version `2.492.2-jdk17`
## 5.8.17
Update `kubernetes` to version `4314.v5b_846cf499eb_`
## 5.8.16 ## 5.8.16
Update `docker.io/kiwigrid/k8s-sidecar` to version `1.30.1` Update `docker.io/kiwigrid/k8s-sidecar` to version `1.30.1`

View File

@ -1,10 +1,10 @@
annotations: annotations:
artifacthub.io/category: integration-delivery artifacthub.io/category: integration-delivery
artifacthub.io/changes: | artifacthub.io/changes: |
- Update `docker.io/kiwigrid/k8s-sidecar` to version `1.30.1` - Update `jenkins/jenkins` to version `2.492.2-jdk17`
artifacthub.io/images: | artifacthub.io/images: |
- name: jenkins - name: jenkins
image: docker.io/jenkins/jenkins:2.492.1-jdk17 image: docker.io/jenkins/jenkins:2.492.2-jdk17
- name: k8s-sidecar - name: k8s-sidecar
image: docker.io/kiwigrid/k8s-sidecar:1.30.1 image: docker.io/kiwigrid/k8s-sidecar:1.30.1
- name: inbound-agent - name: inbound-agent
@ -18,7 +18,7 @@ annotations:
- name: support - name: support
url: https://github.com/jenkinsci/helm-charts/issues url: https://github.com/jenkinsci/helm-charts/issues
apiVersion: v2 apiVersion: v2
appVersion: 2.492.1 appVersion: 2.492.2
description: 'Jenkins - Build great things at any scale! As the leading open source description: 'Jenkins - Build great things at any scale! As the leading open source
automation server, Jenkins provides over 2000 plugins to support building, deploying automation server, Jenkins provides over 2000 plugins to support building, deploying
and automating any project. ' and automating any project. '
@ -46,4 +46,4 @@ sources:
- https://github.com/maorfr/kube-tasks - https://github.com/maorfr/kube-tasks
- https://github.com/jenkinsci/configuration-as-code-plugin - https://github.com/jenkinsci/configuration-as-code-plugin
type: application type: application
version: 5.8.16 version: 5.8.18

View File

@ -165,7 +165,7 @@ The following tables list the configurable parameters of the Jenkins chart and t
| [controller.initializeOnce](./values.yaml#L424) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` | | [controller.initializeOnce](./values.yaml#L424) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` |
| [controller.installLatestPlugins](./values.yaml#L413) | bool | Download the minimum required version or latest version of all dependencies | `true` | | [controller.installLatestPlugins](./values.yaml#L413) | bool | Download the minimum required version or latest version of all dependencies | `true` |
| [controller.installLatestSpecifiedPlugins](./values.yaml#L416) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` | | [controller.installLatestSpecifiedPlugins](./values.yaml#L416) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` |
| [controller.installPlugins](./values.yaml#L405) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4313.va_9b_4fe2a_0e34","workflow-aggregator:600.vb_57cdd26fdd7","git:5.7.0","configuration-as-code:1932.v75cb_b_f1b_698d"]` | | [controller.installPlugins](./values.yaml#L405) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4314.v5b_846cf499eb_","workflow-aggregator:600.vb_57cdd26fdd7","git:5.7.0","configuration-as-code:1932.v75cb_b_f1b_698d"]` |
| [controller.javaOpts](./values.yaml#L162) | string | Append to `JAVA_OPTS` env var | `nil` | | [controller.javaOpts](./values.yaml#L162) | string | Append to `JAVA_OPTS` env var | `nil` |
| [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` | | [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` |
| [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` | | [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` |

View File

@ -403,7 +403,7 @@ controller:
# Plugins will be installed during Jenkins controller start # Plugins will be installed during Jenkins controller start
# -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` # -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false`
installPlugins: installPlugins:
- kubernetes:4313.va_9b_4fe2a_0e34 - kubernetes:4314.v5b_846cf499eb_
- workflow-aggregator:600.vb_57cdd26fdd7 - workflow-aggregator:600.vb_57cdd26fdd7
- git:5.7.0 - git:5.7.0
- configuration-as-code:1932.v75cb_b_f1b_698d - configuration-as-code:1932.v75cb_b_f1b_698d

View File

@ -1,4 +1,5 @@
{{- if and .Values.gitea.enabled .Values.gitea.istio.enabled .Values.gitea.istio.ipBlocks }} {{- if and .Values.gitea.enabled .Values.gitea.istio.enabled .Values.gitea.istio.ipBlocks .Values.gitea.istio.blockApi }}
# Limit access to /api
apiVersion: security.istio.io/v1beta1 apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy kind: AuthorizationPolicy
metadata: metadata:
@ -19,6 +20,7 @@ spec:
to: to:
- operation: - operation:
hosts: ["{{ .Values.gitea.istio.url }}"] hosts: ["{{ .Values.gitea.istio.url }}"]
paths: [ "/api/*" ]
when: when:
- key: connection.sni - key: connection.sni
values: values:

View File

@ -12,14 +12,15 @@ spec:
hosts: hosts:
- {{ .Values.gitea.istio.url }} - {{ .Values.gitea.istio.url }}
http: http:
{{- if .Values.gitea.istio.blockApi }} - name: api
- match: match:
- uri: - uri:
prefix: /api prefix: /api/
directResponse: route:
status: 401 - destination:
{{- end }} host: gitea-http
- route: - name: notApi
route:
- destination: - destination:
host: gitea-http host: gitea-http
tcp: tcp:

View File

@ -2,7 +2,7 @@ gitea:
enabled: false enabled: false
image: image:
tag: 1.23.4 tag: 1.23.5
rootless: true rootless: true
repliaCount: 1 repliaCount: 1

View File

@ -16,7 +16,7 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: neo4j - name: neo4j
version: 5.26.4 version: 5.26.5
repository: https://helm.neo4j.com/neo4j repository: https://helm.neo4j.com/neo4j
condition: neo4j.enabled condition: neo4j.enabled

View File

@ -41,6 +41,7 @@ Kubernetes: `>= 1.30.0-0`
| gateway.service.externalTrafficPolicy | string | `"Local"` | | | gateway.service.externalTrafficPolicy | string | `"Local"` | |
| gateway.service.type | string | `"NodePort"` | | | gateway.service.type | string | `"NodePort"` | |
| gateway.terminationGracePeriodSeconds | int | `120` | | | gateway.terminationGracePeriodSeconds | int | `120` | |
| hardening.preserveExternalRequestId | bool | `false` | |
| hardening.rejectUnderscoresHeaders | bool | `true` | | | hardening.rejectUnderscoresHeaders | bool | `true` | |
| hardening.unescapeSlashes | bool | `true` | | | hardening.unescapeSlashes | bool | `true` | |
| proxyProtocol | bool | `true` | | | proxyProtocol | bool | `true` | |

View File

@ -30,17 +30,7 @@ Kubernetes: `>= 1.30.0-0`
| Key | Type | Default | Description | | Key | Type | Default | Description |
|-----|------|---------|-------------| |-----|------|---------|-------------|
| envoy-ratelimit.descriptors.ingress[0].key | string | `"remote_address"` | |
| envoy-ratelimit.descriptors.ingress[0].rate_limit.requests_per_unit | int | `10` | |
| envoy-ratelimit.descriptors.ingress[0].rate_limit.unit | string | `"second"` | |
| envoy-ratelimit.descriptors.privateIngress[0].key | string | `"remote_address"` | |
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.requests_per_unit | int | `10` | |
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.unit | string | `"second"` | |
| envoy-ratelimit.enabled | bool | `false` | | | envoy-ratelimit.enabled | bool | `false` | |
| envoy-ratelimit.failureModeDeny | bool | `false` | |
| envoy-ratelimit.localCacheSize | int | `1048576` | |
| envoy-ratelimit.log.format | string | `"json"` | |
| envoy-ratelimit.log.level | string | `"warn"` | |
| global.defaultPodDisruptionBudget.enabled | bool | `false` | | | global.defaultPodDisruptionBudget.enabled | bool | `false` | |
| global.logAsJson | bool | `true` | | | global.logAsJson | bool | `true` | |
| global.variant | string | `"distroless"` | | | global.variant | string | `"distroless"` | |

View File

@ -274,7 +274,7 @@ fluentd:
#- fluent-plugin-s3 #- fluent-plugin-s3
source: source:
sharedKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey # "cloudbender" sharedKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey?inCluster # "cloudbender"
output: output:
# Defaults to OpenSearch in same namespace # Defaults to OpenSearch in same namespace

View File

@ -21,4 +21,8 @@
.idea/ .idea/
*.tmproj *.tmproj
.vscode/ .vscode/
Chart.lock
README.md.gotmpl
dashboards.yaml
jsonnet
update.sh

View File

@ -1,6 +1,6 @@
# kubezero # kubezero
![Version: 1.31.3](https://img.shields.io/badge/Version-1.31.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.31.6](https://img.shields.io/badge/Version-1.31.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero - Root App of Apps chart KubeZero - Root App of Apps chart
@ -14,11 +14,11 @@ KubeZero - Root App of Apps chart
## Requirements ## Requirements
Kubernetes: `>= 1.26.0-0` Kubernetes: `>= 1.31.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts | kubezero-lib | >= 0.2.1 | | https://cdn.zero-downtime.net/charts | kubezero-lib | 0.2.1 |
## Values ## Values
@ -32,16 +32,16 @@ Kubernetes: `>= 1.26.0-0`
| addons.external-dns.enabled | bool | `false` | | | addons.external-dns.enabled | bool | `false` | |
| addons.forseti.enabled | bool | `false` | | | addons.forseti.enabled | bool | `false` | |
| addons.sealed-secrets.enabled | bool | `false` | | | addons.sealed-secrets.enabled | bool | `false` | |
| addons.targetRevision | string | `"0.8.11"` | | | addons.targetRevision | string | `"0.8.13"` | |
| argo.argo-cd.enabled | bool | `false` | | | argo.argo-cd.enabled | bool | `false` | |
| argo.argo-cd.istio.enabled | bool | `false` | | | argo.argo-cd.istio.enabled | bool | `false` | |
| argo.argocd-image-updater.enabled | bool | `false` | | | argo.argocd-image-updater.enabled | bool | `false` | |
| argo.enabled | bool | `false` | | | argo.enabled | bool | `false` | |
| argo.namespace | string | `"argocd"` | | | argo.namespace | string | `"argocd"` | |
| argo.targetRevision | string | `"0.2.6"` | | | argo.targetRevision | string | `"0.3.1"` | |
| cert-manager.enabled | bool | `false` | | | cert-manager.enabled | bool | `false` | |
| cert-manager.namespace | string | `"cert-manager"` | | | cert-manager.namespace | string | `"cert-manager"` | |
| cert-manager.targetRevision | string | `"0.9.10"` | | | cert-manager.targetRevision | string | `"0.9.12"` | |
| falco.enabled | bool | `false` | | | falco.enabled | bool | `false` | |
| falco.k8saudit.enabled | bool | `false` | | | falco.k8saudit.enabled | bool | `false` | |
| falco.targetRevision | string | `"0.1.2"` | | | falco.targetRevision | string | `"0.1.2"` | |
@ -54,35 +54,32 @@ Kubernetes: `>= 1.26.0-0`
| istio-ingress.enabled | bool | `false` | | | istio-ingress.enabled | bool | `false` | |
| istio-ingress.gateway.service | object | `{}` | | | istio-ingress.gateway.service | object | `{}` | |
| istio-ingress.namespace | string | `"istio-ingress"` | | | istio-ingress.namespace | string | `"istio-ingress"` | |
| istio-ingress.targetRevision | string | `"0.23.2"` | | | istio-ingress.targetRevision | string | `"0.24.3"` | |
| istio-private-ingress.chart | string | `"kubezero-istio-gateway"` | | | istio-private-ingress.chart | string | `"kubezero-istio-gateway"` | |
| istio-private-ingress.enabled | bool | `false` | | | istio-private-ingress.enabled | bool | `false` | |
| istio-private-ingress.gateway.service | object | `{}` | | | istio-private-ingress.gateway.service | object | `{}` | |
| istio-private-ingress.namespace | string | `"istio-ingress"` | | | istio-private-ingress.namespace | string | `"istio-ingress"` | |
| istio-private-ingress.targetRevision | string | `"0.23.2"` | | | istio-private-ingress.targetRevision | string | `"0.24.3"` | |
| istio.enabled | bool | `false` | | | istio.enabled | bool | `false` | |
| istio.namespace | string | `"istio-system"` | | | istio.namespace | string | `"istio-system"` | |
| istio.targetRevision | string | `"0.23.2"` | | | istio.targetRevision | string | `"0.24.3"` | |
| kubezero.defaultTargetRevision | string | `"*"` | | | logging.annotations."argocd.argoproj.io/compare-options" | string | `"ServerSideDiff=false"` | |
| kubezero.gitSync | object | `{}` | |
| kubezero.repoURL | string | `"https://cdn.zero-downtime.net/charts"` | |
| kubezero.server | string | `"https://kubernetes.default.svc"` | |
| logging.enabled | bool | `false` | | | logging.enabled | bool | `false` | |
| logging.namespace | string | `"logging"` | | | logging.namespace | string | `"logging"` | |
| logging.targetRevision | string | `"0.8.13"` | | | logging.targetRevision | string | `"0.8.14"` | |
| metrics.enabled | bool | `false` | | | metrics.enabled | bool | `false` | |
| metrics.istio.grafana | object | `{}` | | | metrics.istio.grafana | object | `{}` | |
| metrics.istio.prometheus | object | `{}` | | | metrics.istio.prometheus | object | `{}` | |
| metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigs | list | `[]` | | | metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigs | list | `[]` | |
| metrics.namespace | string | `"monitoring"` | | | metrics.namespace | string | `"monitoring"` | |
| metrics.targetRevision | string | `"0.10.2"` | | | metrics.targetRevision | string | `"0.11.0"` | |
| network.cilium.cluster | object | `{}` | | | network.cilium.cluster | object | `{}` | |
| network.enabled | bool | `true` | | | network.enabled | bool | `true` | |
| network.retain | bool | `true` | | | network.retain | bool | `true` | |
| network.targetRevision | string | `"0.5.5"` | | | network.targetRevision | string | `"0.5.7"` | |
| operators.enabled | bool | `false` | | | operators.enabled | bool | `false` | |
| operators.namespace | string | `"operators"` | | | operators.namespace | string | `"operators"` | |
| operators.targetRevision | string | `"0.1.6"` | | | operators.targetRevision | string | `"0.2.0"` | |
| storage.aws-ebs-csi-driver.enabled | bool | `false` | | | storage.aws-ebs-csi-driver.enabled | bool | `false` | |
| storage.aws-efs-csi-driver.enabled | bool | `false` | | | storage.aws-efs-csi-driver.enabled | bool | `false` | |
| storage.enabled | bool | `false` | | | storage.enabled | bool | `false` | |
@ -90,7 +87,7 @@ Kubernetes: `>= 1.26.0-0`
| storage.k8up.enabled | bool | `false` | | | storage.k8up.enabled | bool | `false` | |
| storage.lvm-localpv.enabled | bool | `false` | | | storage.lvm-localpv.enabled | bool | `false` | |
| storage.snapshotController.enabled | bool | `false` | | | storage.snapshotController.enabled | bool | `false` | |
| storage.targetRevision | string | `"0.8.9"` | | | storage.targetRevision | string | `"0.8.10"` | |
| telemetry.enabled | bool | `false` | | | telemetry.enabled | bool | `false` | |
| telemetry.namespace | string | `"telemetry"` | | | telemetry.namespace | string | `"telemetry"` | |
| telemetry.targetRevision | string | `"0.4.1"` | | | telemetry.targetRevision | string | `"0.4.1"` | |

View File

@ -1,41 +0,0 @@
kind: ApplicationSet
metadata:
name: kubezero
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
generators:
- git:
repoURL: {{ .Values.kubezero.applicationSet.repoURL }}
revision: {{ .Values.kubezero.applicationSet.revision }}
files:
{{- toYaml .Values.kubezero.applicationSet.files | nindent 6 }}
template:
metadata:
name: kubezero
spec:
project: kubezero
source:
repoURL: https://cdn.zero-downtime.net/charts
chart: kubezero
targetRevision: '{{ "{{" }} kubezero.version {{ "}}" }}'
helm:
parameters:
# We use this to detect if we are called from ArgoCD
- name: argocdAppName
value: $ARGOCD_APP_NAME
# This breaks the recursion, otherwise we install another kubezero project and app
# To be removed once we applicationSet is working and AppProject is moved back to ArgoCD chart
- name: installKubeZero
value: "false"
valueFiles:
- '{{ "{{" }} kubezero.valuesPath {{ "}}" }}/kubezero.yaml'
- '{{ "{{" }} kubezero.valuesPath {{ "}}" }}/values.yaml'
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true

View File

@ -0,0 +1,6 @@
# ensure we have a basic kubezero secret for cluster bootstrap and defaults
kubectl get secret kubezero-secrets -n kubezero && rc=$? || rc=$?
if [ $rc != 0 ]; then
kubectl create secret generic kubezero-secrets -n kubezero
fi

View File

@ -1,7 +0,0 @@
#!/bin/bash
ns=$(kubectl get ns -l argocd.argoproj.io/instance | grep -v NAME | awk '{print $1}')
for n in $ns; do
kubectl label --overwrite namespace $n 'argocd.argoproj.io/instance-'
done

View File

@ -1,25 +0,0 @@
#!/usr/bin/env bash
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
# Script to migrate an existing ECK 1.2.1 installation to Helm.
set -euo pipefail
RELEASE_NAMESPACE=${RELEASE_NAMESPACE:-"elastic-system"}
echo "Uninstalling ECK"
kubectl delete -n "${RELEASE_NAMESPACE}" \
serviceaccount/elastic-operator \
secret/elastic-webhook-server-cert \
clusterrole.rbac.authorization.k8s.io/elastic-operator \
clusterrole.rbac.authorization.k8s.io/elastic-operator-view \
clusterrole.rbac.authorization.k8s.io/elastic-operator-edit \
clusterrolebinding.rbac.authorization.k8s.io/elastic-operator \
rolebinding.rbac.authorization.k8s.io/elastic-operator \
service/elastic-webhook-server \
statefulset.apps/elastic-operator \
validatingwebhookconfiguration.admissionregistration.k8s.io/elastic-webhook.k8s.elastic.co

View File

@ -21,20 +21,17 @@ spec:
project: kubezero project: kubezero
source: source:
{{- if index .Values $name "chart" }} chart: {{ default (print "kubezero-" $name) (index .Values $name "chart") }}
chart: {{ index .Values $name "chart" }} repoURL: {{ default "https://cdn.zero-downtime.net/charts" (index .Values $name "repository") }}
{{- else }} targetRevision: {{ default "HEAD" ( index .Values $name "targetRevision" ) | quote }}
chart: kubezero-{{ $name }}
{{- end }}
repoURL: {{ .Values.kubezero.repoURL }}
targetRevision: {{ default .Values.kubezero.targetRevision ( index .Values $name "targetRevision" ) | quote }}
helm: helm:
skipTests: true # add with 1.32
#skipTests: true
valuesObject: valuesObject:
{{- include (print $name "-values") $ | nindent 8 }} {{- include (print $name "-values") $ | nindent 8 }}
destination: destination:
server: {{ .Values.kubezero.server }} server: "https://kubernetes.default.svc"
namespace: {{ default "kube-system" ( index .Values $name "namespace" ) }} namespace: {{ default "kube-system" ( index .Values $name "namespace" ) }}
revisionHistoryLimit: 2 revisionHistoryLimit: 2

View File

@ -39,42 +39,10 @@ argo-cd:
{{- end }} {{- end }}
{{- end }} {{- end }}
argocd-apps: {{- with index .Values "argo" "argo-cd" "kubezero" }}
enabled: {{ default "false" (index .Values "argo" "argo-cd" "enabled") }} kubezero:
projects: {{- toYaml . | nindent 4 }}
kubezero: {{- end }}
namespace: argocd
description: KubeZero - ZeroDownTime Kubernetes Platform
sourceRepos:
- {{ .Values.kubezero.repoURL }}
{{- with .Values.kubezero.gitSync.repoURL }}
- {{ . }}
{{- end }}
destinations:
- namespace: '*'
server: https://kubernetes.default.svc
clusterResourceWhitelist:
- group: '*'
kind: '*'
applications:
kubezero-git-sync:
namespace: argocd
project: kubezero
source:
repoURL: {{ .Values.kubezero.gitSync.repoURL }}
targetRevision: {{ .Values.kubezero.gitSync.targetRevision }}
path: {{ .Values.kubezero.gitSync.path }}
directory:
recurse: true
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true
argocd-image-updater: argocd-image-updater:
enabled: {{ default "false" (index .Values "argo" "argocd-image-updater" "enabled") }} enabled: {{ default "false" (index .Values "argo" "argocd-image-updater" "enabled") }}

View File

@ -1,9 +1,3 @@
kubezero:
server: https://kubernetes.default.svc
repoURL: https://cdn.zero-downtime.net/charts
defaultTargetRevision: '*'
gitSync: {}
global: global:
clusterName: zdt-trial-cluster clusterName: zdt-trial-cluster
@ -121,7 +115,7 @@ logging:
argo: argo:
enabled: false enabled: false
namespace: argocd namespace: argocd
targetRevision: 0.2.8 targetRevision: 0.3.1
argo-cd: argo-cd:
enabled: false enabled: false
istio: istio:

11
docs/hooks.md Normal file
View File

@ -0,0 +1,11 @@
# KubeZero Helm hooks
## Abstract
Scripts within the `hooks.d` folder of each chart are executed at the respective times when the charts are applied via libhelm.
*These hooks do NOT work via ArgoCD*
## Flow
- hooks are execute as part of the libhelm tasks like `apply`
- are running with the current kubectl context
- executed at root working directory, eg. set a value for helm the scripts can edit the `./values.yaml` file.

View File

@ -3,6 +3,7 @@
## What's new - Major themes ## What's new - Major themes
- all KubeZero and support AMIs based on [Alpine 3.21](https://alpinelinux.org/posts/Alpine-3.21.0-released.html) - all KubeZero and support AMIs based on [Alpine 3.21](https://alpinelinux.org/posts/Alpine-3.21.0-released.html)
- network policies for ArgoCD - network policies for ArgoCD
- Nvidia worker nodes are labeled with detected GPU product code
- Prometheus upgraded to V3, reducing CPU and memory requirements, see [upstream blog](https://prometheus.io/blog/2024/11/14/prometheus-3-0/) - Prometheus upgraded to V3, reducing CPU and memory requirements, see [upstream blog](https://prometheus.io/blog/2024/11/14/prometheus-3-0/)
## Features and fixes ## Features and fixes
@ -10,10 +11,10 @@
## Version upgrades ## Version upgrades
- cilium 1.16.6 - cilium 1.16.6
- istio 1.24.2 - istio 1.24.3
- ArgoCD 2.14.3 [custom ZDT image](https://git.zero-downtime.net/ZeroDownTime/zdt-argocd) - ArgoCD 2.14.5 [custom ZDT image](https://git.zero-downtime.net/ZeroDownTime/zdt-argocd)
- Prometheus 3.1.0 / Grafana 11.5.1 - Prometheus 3.1.0 / Grafana 11.5.1
- Nvidia container toolkit 1.17, drivers 565.57.01, Cuda 12.7 - Nvidia container toolkit 1.17.4, drivers 570.86.15, Cuda 12.8
## Resources ## Resources
- [Kubernetes v1.31 upstream release blog](https://kubernetes.io/blog/2024/08/13/kubernetes-v1-31-release/) - [Kubernetes v1.31 upstream release blog](https://kubernetes.io/blog/2024/08/13/kubernetes-v1-31-release/)

View File

@ -18,7 +18,7 @@ update_jsonnet() {
update_helm() { update_helm() {
#helm repo update #helm repo update
helm dep update helm dep build
} }
# AWS public ECR # AWS public ECR