Compare commits
1 Commits
main
...
renovate/k
Author | SHA1 | Date | |
---|---|---|---|
eeef3d405d |
@ -6,8 +6,8 @@ ARG ALPINE_VERSION
|
||||
ARG KUBE_VERSION=1.31
|
||||
|
||||
ARG SOPS_VERSION="3.9.4"
|
||||
ARG VALS_VERSION="0.39.4"
|
||||
ARG HELM_SECRETS_VERSION="4.6.3"
|
||||
ARG VALS_VERSION="0.39.1"
|
||||
ARG HELM_SECRETS_VERSION="4.6.2"
|
||||
|
||||
RUN cd /etc/apk/keys && \
|
||||
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
||||
@ -24,7 +24,6 @@ RUN cd /etc/apk/keys && \
|
||||
py3-yaml \
|
||||
restic \
|
||||
helm \
|
||||
apache2-utils \
|
||||
ytt@testing \
|
||||
etcd-ctl@edge-community \
|
||||
cri-tools@kubezero \
|
||||
|
@ -5,7 +5,7 @@ set -x
|
||||
|
||||
ARTIFACTS=($(echo $1 | tr "," "\n"))
|
||||
ACTION="${2:-apply}"
|
||||
ARGOCD="${3:-true}"
|
||||
ARGOCD="${3:-False}"
|
||||
|
||||
LOCAL_DEV=1
|
||||
|
||||
@ -36,6 +36,46 @@ parse_version() {
|
||||
|
||||
KUBE_VERSION=$(parse_version $KUBE_VERSION)
|
||||
|
||||
### Various hooks for modules
|
||||
|
||||
################
|
||||
# cert-manager #
|
||||
################
|
||||
function cert-manager-post() {
|
||||
# If any error occurs, wait for initial webhook deployment and try again
|
||||
# see: https://cert-manager.io/docs/concepts/webhook/#webhook-connection-problems-shortly-after-cert-manager-installation
|
||||
|
||||
if [ $rc -ne 0 ]; then
|
||||
wait_for "kubectl get deployment -n $namespace cert-manager-webhook"
|
||||
kubectl rollout status deployment -n $namespace cert-manager-webhook
|
||||
wait_for 'kubectl get validatingwebhookconfigurations -o yaml | grep "caBundle: LS0"'
|
||||
fi
|
||||
|
||||
wait_for "kubectl get ClusterIssuer -n $namespace kubezero-local-ca-issuer"
|
||||
kubectl wait --timeout=180s --for=condition=Ready -n $namespace ClusterIssuer/kubezero-local-ca-issuer
|
||||
}
|
||||
|
||||
|
||||
###########
|
||||
# ArgoCD #
|
||||
###########
|
||||
function argocd-pre() {
|
||||
kubectl delete job argo-argocd-redis-secret-init -n argocd || true
|
||||
|
||||
for f in $CLUSTER/secrets/argocd-*.yaml; do
|
||||
kubectl apply -f $f
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
###########
|
||||
# Metrics #
|
||||
###########
|
||||
# Cleanup patch jobs from previous runs , ArgoCD does this automatically
|
||||
function metrics-pre() {
|
||||
kubectl delete jobs --field-selector status.successful=1 -n monitoring
|
||||
}
|
||||
|
||||
|
||||
### Main
|
||||
get_kubezero_values $ARGOCD
|
||||
@ -45,7 +85,6 @@ helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $
|
||||
|
||||
# Root KubeZero apply directly and exit
|
||||
if [ ${ARTIFACTS[0]} == "kubezero" ]; then
|
||||
[ -f $CHARTS/kubezero/hooks.d/pre-install.sh ] && . $CHARTS/kubezero/hooks.d/pre-install.sh
|
||||
kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
|
||||
exit $?
|
||||
|
||||
|
@ -14,12 +14,7 @@ pre_control_plane_upgrade_cluster() {
|
||||
|
||||
# All things after the first controller / control plane upgrade
|
||||
post_control_plane_upgrade_cluster() {
|
||||
# delete previous root app controlled by kubezero module
|
||||
kubectl delete application kubezero-git-sync -n argocd || true
|
||||
|
||||
# Patch appproject to keep SyncWindow in place
|
||||
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true
|
||||
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
|
||||
echo
|
||||
}
|
||||
|
||||
|
||||
|
@ -131,7 +131,7 @@ control_plane_upgrade() {
|
||||
|
||||
update_kubezero_cm
|
||||
|
||||
if [ "$ARGOCD" == "true" ]; then
|
||||
if [ "$ARGOCD" == "True" ]; then
|
||||
# update argo app
|
||||
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
|
||||
kubectl get application kubezero -n argocd -o yaml | \
|
||||
@ -328,15 +328,10 @@ apply_module() {
|
||||
done
|
||||
|
||||
for t in $MODULES; do
|
||||
# apply/replace app of apps directly
|
||||
if [ $t == "kubezero" ]; then
|
||||
[ -f $CHARTS/kubezero/hooks.d/pre-install.sh ] && . $CHARTS/kubezero/hooks.d/pre-install.sh
|
||||
kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
|
||||
else
|
||||
#_helm apply $t
|
||||
# During 1.31 we change the ArgoCD tracking so replace
|
||||
_helm replace $t
|
||||
fi
|
||||
#_helm apply $t
|
||||
|
||||
# During 1.31 we change the ArgoCD tracking so replace
|
||||
_helm replace $t
|
||||
done
|
||||
|
||||
echo "Applied KubeZero modules: $MODULES"
|
||||
|
@ -29,14 +29,14 @@ function chart_location() {
|
||||
|
||||
function argo_used() {
|
||||
kubectl get application kubezero -n argocd >/dev/null \
|
||||
&& echo "true" || echo "false"
|
||||
&& echo "True" || echo "False"
|
||||
}
|
||||
|
||||
|
||||
function field_manager() {
|
||||
local argo=${1:-"false"}
|
||||
local argo=${1:-"False"}
|
||||
|
||||
if [ "$argo" == "true" ]; then
|
||||
if [ "$argo" == "True" ]; then
|
||||
echo "--field-manager argo-controller"
|
||||
else
|
||||
echo ""
|
||||
@ -44,21 +44,10 @@ function field_manager() {
|
||||
}
|
||||
|
||||
|
||||
function get_secret_val() {
|
||||
local ns=$1
|
||||
local secret=$2
|
||||
local val=$(kubectl get secret -n $ns $secret -o yaml | yq ".data.\"$3\"")
|
||||
|
||||
if [ "$val" != "null" ]; then
|
||||
echo -n $val | base64 -d -w0
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function get_kubezero_secret() {
|
||||
get_secret_val kubezero kubezero-secrets "$1"
|
||||
export _key="$1"
|
||||
|
||||
kubectl get secrets -n kubezero kubezero-secrets -o yaml | yq '.data.[env(_key)]' | base64 -d -w0
|
||||
}
|
||||
|
||||
|
||||
@ -66,23 +55,22 @@ function set_kubezero_secret() {
|
||||
local key="$1"
|
||||
local val="$2"
|
||||
|
||||
if [ -n "$val" ]; then
|
||||
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"$(echo -n "$val" |base64 -w0)\" }}"
|
||||
fi
|
||||
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"$(echo -n $val |base64 -w0)\" }}"
|
||||
}
|
||||
|
||||
|
||||
# get kubezero-values from ArgoCD if available or use in-cluster CM
|
||||
function get_kubezero_values() {
|
||||
local argo=${1:-"false"}
|
||||
local argo=${1:-"False"}
|
||||
|
||||
if [ "$argo" == "true" ]; then
|
||||
if [ "$argo" == "True" ]; then
|
||||
kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml
|
||||
else
|
||||
kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Overwrite kubezero-values CM with file
|
||||
function update_kubezero_cm() {
|
||||
kubectl get cm -n kubezero kubezero-values -o=yaml | \
|
||||
@ -92,7 +80,7 @@ function update_kubezero_cm() {
|
||||
|
||||
# sync kubezero-values CM from ArgoCD app
|
||||
function sync_kubezero_cm_from_argo() {
|
||||
get_kubezero_values true
|
||||
get_kubezero_values True
|
||||
update_kubezero_cm
|
||||
}
|
||||
|
||||
@ -151,8 +139,8 @@ function delete_ns() {
|
||||
|
||||
|
||||
# Extract crds via helm calls
|
||||
function crds() {
|
||||
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c '
|
||||
function _crds() {
|
||||
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c '
|
||||
#!/usr/bin/python3
|
||||
import yaml
|
||||
import sys
|
||||
@ -213,20 +201,9 @@ function _helm() {
|
||||
|
||||
yq eval '.spec.source.helm.valuesObject' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
|
||||
|
||||
# extract remote chart or copy local to access hooks
|
||||
rm -rf $WORKDIR/$chart $WORKDIR/${chart}*.tgz
|
||||
|
||||
if [ -z "$LOCAL_DEV" ]; then
|
||||
helm pull $(chart_location $chart) --untar -d $WORKDIR
|
||||
else
|
||||
cp -r $(chart_location $chart) $WORKDIR
|
||||
fi
|
||||
|
||||
if [ $action == "crds" ]; then
|
||||
# Pre-crd hook
|
||||
[ -f $WORKDIR/$chart/hooks.d/pre-crds.sh ] && . $WORKDIR/$chart/hooks.d/pre-crds.sh
|
||||
|
||||
crds
|
||||
# Allow custom CRD handling
|
||||
declare -F ${module}-crds && ${module}-crds || _crds
|
||||
|
||||
elif [ $action == "apply" -o $action == "replace" ]; then
|
||||
echo "using values to $action of module $module: "
|
||||
@ -236,16 +213,14 @@ function _helm() {
|
||||
create_ns $namespace
|
||||
|
||||
# Optional pre hook
|
||||
[ -f $WORKDIR/$chart/hooks.d/pre-install.sh ] && . $WORKDIR/$chart/hooks.d/pre-install.sh
|
||||
declare -F ${module}-pre && ${module}-pre
|
||||
|
||||
render
|
||||
[ $action == "apply" ] && kubectl apply -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$?
|
||||
[ $action == "replace" ] && kubectl replace -f $WORKDIR/helm.yaml $(field_manager $ARGOCD) && rc=$? || rc=$?
|
||||
|
||||
# If replace failed try apply at least
|
||||
[ $action == "apply" -o $rc -ne 0 ] && kubectl apply -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$?
|
||||
|
||||
# Optional post hook
|
||||
[ -f $WORKDIR/$chart/hooks.d/post-install.sh ] && . $WORKDIR/$chart/hooks.d/post-install.sh
|
||||
declare -F ${module}-post && ${module}-post
|
||||
|
||||
elif [ $action == "delete" ]; then
|
||||
render
|
||||
|
@ -8,18 +8,10 @@ import yaml
|
||||
def migrate(values):
|
||||
"""Actual changes here"""
|
||||
|
||||
# migrate kubezero root app of apps to Argo chart
|
||||
# remove syncOptions from root app
|
||||
try:
|
||||
if values["kubezero"]:
|
||||
try:
|
||||
values["kubezero"].pop("syncPolicy")
|
||||
except KeyError:
|
||||
pass
|
||||
values["kubezero"]["gitSync"]["repoUrl"] = values["kubezero"]["gitSync"].pop("repoURL")
|
||||
|
||||
values["argo"]["argo-cd"]["kubezero"] = values["kubezero"]["gitSync"]
|
||||
|
||||
values.pop("kubezero")
|
||||
if values["kubezero"]["syncPolicy"]:
|
||||
values["kubezero"].pop("syncPolicy")
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
@ -17,16 +17,16 @@ ARGOCD=$(argo_used)
|
||||
echo "Checking that all pods in kube-system are running ..."
|
||||
#waitSystemPodsRunning
|
||||
|
||||
[ "$ARGOCD" == "true" ] && disable_argo
|
||||
[ "$ARGOCD" == "True" ] && disable_argo
|
||||
|
||||
# Check if we already have all controllers on the current version
|
||||
#OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
|
||||
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
|
||||
|
||||
# All controllers already on current version
|
||||
if [ "$OLD_CONTROLLERS" == "0" ]; then
|
||||
# All controllers already on current version
|
||||
control_plane_upgrade finalize_cluster_upgrade
|
||||
# Otherwise run control plane upgrade
|
||||
else
|
||||
# Otherwise run control plane upgrade
|
||||
control_plane_upgrade kubeadm_upgrade
|
||||
fi
|
||||
|
||||
@ -35,10 +35,10 @@ read -r
|
||||
|
||||
#echo "Adjust kubezero values as needed:"
|
||||
# shellcheck disable=SC2015
|
||||
#[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
|
||||
#[ "$ARGOCD" == "True" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
|
||||
|
||||
# upgrade modules
|
||||
control_plane_upgrade "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators"
|
||||
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators"
|
||||
|
||||
echo "Checking that all pods in kube-system are running ..."
|
||||
waitSystemPodsRunning
|
||||
@ -47,9 +47,6 @@ echo "Applying remaining KubeZero modules..."
|
||||
|
||||
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
|
||||
|
||||
# we replace the project during v1.31 so disable again
|
||||
[ "$ARGOCD" == "true" ] && disable_argo
|
||||
|
||||
# Final step is to commit the new argocd kubezero app
|
||||
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..)' > $ARGO_APP
|
||||
|
||||
@ -66,4 +63,4 @@ echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to
|
||||
echo "<Return> to continue and re-enable ArgoCD:"
|
||||
read -r
|
||||
|
||||
[ "$ARGOCD" == "true" ] && enable_argo
|
||||
[ "$ARGOCD" == "True" ] && enable_argo
|
||||
|
@ -1,28 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
|
||||
README.md.gotmpl
|
||||
dashboards.yaml
|
||||
jsonnet
|
||||
update.sh
|
@ -1,7 +1,7 @@
|
||||
apiVersion: v2
|
||||
description: KubeZero Argo - Events, Workflow, CD
|
||||
name: kubezero-argo
|
||||
version: 0.3.1
|
||||
version: 0.2.9
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -18,15 +18,19 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: argo-events
|
||||
version: 2.4.14
|
||||
version: 2.4.13
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-events.enabled
|
||||
- name: argo-cd
|
||||
version: 7.8.13
|
||||
version: 7.8.9
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-cd.enabled
|
||||
- name: argocd-apps
|
||||
version: 2.0.2
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-cd.enabled
|
||||
- name: argocd-image-updater
|
||||
version: 0.12.0
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argocd-image-updater.enabled
|
||||
kubeVersion: ">= 1.30.0-0"
|
||||
kubeVersion: ">= 1.26.0-0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-argo
|
||||
|
||||

|
||||

|
||||
|
||||
KubeZero Argo - Events, Workflow, CD
|
||||
|
||||
@ -14,14 +14,15 @@ KubeZero Argo - Events, Workflow, CD
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.30.0-0`
|
||||
Kubernetes: `>= 1.26.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 7.8.13 |
|
||||
| https://argoproj.github.io/argo-helm | argo-events | 2.4.14 |
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 7.8.2 |
|
||||
| https://argoproj.github.io/argo-helm | argo-events | 2.4.13 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-apps | 2.0.2 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.0 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
|
||||
## Values
|
||||
|
||||
@ -29,7 +30,7 @@ Kubernetes: `>= 1.30.0-0`
|
||||
|-----|------|---------|-------------|
|
||||
| argo-cd.configs.cm."application.instanceLabelKey" | string | `nil` | |
|
||||
| argo-cd.configs.cm."application.resourceTrackingMethod" | string | `"annotation"` | |
|
||||
| argo-cd.configs.cm."resource.customizations" | string | `"argoproj.io/Application:\n health.lua: |\n hs = {}\n hs.status = \"Progressing\"\n hs.message = \"\"\n if obj.status ~= nil then\n if obj.status.health ~= nil then\n hs.status = obj.status.health.status\n if obj.status.health.message ~= nil then\n hs.message = obj.status.health.message\n end\n end\n end\n return hs\n"` | |
|
||||
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
|
||||
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
|
||||
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.31 - Release notes"` | |
|
||||
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
|
||||
@ -38,11 +39,10 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| argo-cd.configs.cm.installationID | string | `"KubeZero-ArgoCD"` | |
|
||||
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
|
||||
| argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | |
|
||||
| argo-cd.configs.params."controller.resource.health.persist" | string | `"false"` | |
|
||||
| argo-cd.configs.params."controller.sync.timeout.seconds" | int | `1800` | |
|
||||
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
|
||||
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
|
||||
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
|
||||
| argo-cd.configs.params."server.insecure" | bool | `true` | |
|
||||
| argo-cd.configs.secret.argocdServerAdminPassword | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.adminPassword"` | |
|
||||
| argo-cd.configs.secret.createSecret | bool | `false` | |
|
||||
| argo-cd.configs.ssh.extraHosts | string | `"git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="` | |
|
||||
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar__logo__text-logo { height: 0em; }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
|
||||
@ -54,19 +54,31 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| argo-cd.dex.enabled | bool | `false` | |
|
||||
| argo-cd.enabled | bool | `false` | |
|
||||
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
|
||||
| argo-cd.global.image.tag | string | `"v2.14.7"` | |
|
||||
| argo-cd.global.image.tag | string | `"v2.13.1"` | |
|
||||
| argo-cd.global.logging.format | string | `"json"` | |
|
||||
| argo-cd.global.networkPolicy.create | bool | `true` | |
|
||||
| argo-cd.istio.enabled | bool | `false` | |
|
||||
| argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | |
|
||||
| argo-cd.istio.ipBlocks | list | `[]` | |
|
||||
| argo-cd.kubezero.bootstrap | bool | `false` | deploy the KubeZero Project and GitSync Root App |
|
||||
| argo-cd.kubezero.path | string | `"/"` | |
|
||||
| argo-cd.kubezero.repoUrl | string | `""` | |
|
||||
| argo-cd.kubezero.sshPrivateKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.sshPrivateKey"` | |
|
||||
| argo-cd.kubezero.targetRevision | string | `"HEAD"` | |
|
||||
| argo-cd.notifications.enabled | bool | `false` | |
|
||||
| argo-cd.redisSecretInit.enabled | bool | `false` | |
|
||||
| argo-cd.repoServer.clusterRoleRules.enabled | bool | `true` | |
|
||||
| argo-cd.repoServer.clusterRoleRules.rules[0].apiGroups[0] | string | `""` | |
|
||||
| argo-cd.repoServer.clusterRoleRules.rules[0].resources[0] | string | `"secrets"` | |
|
||||
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[0] | string | `"get"` | |
|
||||
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[1] | string | `"watch"` | |
|
||||
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[2] | string | `"list"` | |
|
||||
| argo-cd.repoServer.initContainers[0].command[0] | string | `"/usr/local/bin/sa2kubeconfig.sh"` | |
|
||||
| argo-cd.repoServer.initContainers[0].command[1] | string | `"/home/argocd/.kube/config"` | |
|
||||
| argo-cd.repoServer.initContainers[0].image | string | `"{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include \"argo-cd.defaultTag\" .) .Values.repoServer.image.tag }}"` | |
|
||||
| argo-cd.repoServer.initContainers[0].imagePullPolicy | string | `"{{ default .Values.global.image.imagePullPolicy .Values.repoServer.image.imagePullPolicy }}"` | |
|
||||
| argo-cd.repoServer.initContainers[0].name | string | `"create-kubeconfig"` | |
|
||||
| argo-cd.repoServer.initContainers[0].securityContext.allowPrivilegeEscalation | bool | `false` | |
|
||||
| argo-cd.repoServer.initContainers[0].securityContext.capabilities.drop[0] | string | `"ALL"` | |
|
||||
| argo-cd.repoServer.initContainers[0].securityContext.readOnlyRootFilesystem | bool | `true` | |
|
||||
| argo-cd.repoServer.initContainers[0].securityContext.runAsNonRoot | bool | `true` | |
|
||||
| argo-cd.repoServer.initContainers[0].securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
|
||||
| argo-cd.repoServer.initContainers[0].volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | |
|
||||
| argo-cd.repoServer.initContainers[0].volumeMounts[0].name | string | `"kubeconfigs"` | |
|
||||
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
|
||||
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argo-cd.repoServer.volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | |
|
||||
@ -89,6 +101,9 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| argo-events.configs.jetstream.versions[0].startCommand | string | `"/nats-server"` | |
|
||||
| argo-events.configs.jetstream.versions[0].version | string | `"2.10.11"` | |
|
||||
| argo-events.enabled | bool | `false` | |
|
||||
| argocd-apps.applications | object | `{}` | |
|
||||
| argocd-apps.enabled | bool | `false` | |
|
||||
| argocd-apps.projects | object | `{}` | |
|
||||
| argocd-image-updater.authScripts.enabled | bool | `true` | |
|
||||
| argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | |
|
||||
| argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | |
|
||||
|
@ -1,29 +0,0 @@
|
||||
# Bootstrap kubezero-git-sync app only if it doesnt exist yet
|
||||
kubectl get application kubezero-git-sync -n argocd || \
|
||||
yq -i '.argo-cd.kubezero.bootstrap=true' $WORKDIR/values.yaml
|
||||
|
||||
# Ensure we have an adminPassword or migrate existing one
|
||||
PW=$(get_kubezero_secret argo-cd.adminPassword)
|
||||
if [ -z "$PW" ]; then
|
||||
# Check for existing password in actual secret
|
||||
NEW_PW=$(get_secret_val argocd argocd-secret "admin.password")
|
||||
|
||||
if [ -z "$NEW_PW" ];then
|
||||
ARGO_PWD=$(date +%s | sha256sum | base64 | head -c 12 ; echo)
|
||||
NEW_PW=$(htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/')
|
||||
|
||||
set_kubezero_secret argo-cd.adminPasswordClear $ARGO_PWD
|
||||
fi
|
||||
|
||||
set_kubezero_secret argo-cd.adminPassword "$NEW_PW"
|
||||
fi
|
||||
|
||||
# GitSync privateKey
|
||||
GITKEY=$(get_kubezero_secret argo-cd.kubezero.sshPrivateKey)
|
||||
if [ -z "$GITKEY" ]; then
|
||||
set_kubezero_secret argo-cd.kubezero.sshPrivateKey "Insert ssh Private Key from your git server"
|
||||
fi
|
||||
|
||||
# Redis secret
|
||||
kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \
|
||||
--from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
|
22
charts/kubezero-argo/secrets.yaml
Normal file
22
charts/kubezero-argo/secrets.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
# KubeZero secrets
|
||||
#
|
||||
test: supergeheim
|
||||
secrets:
|
||||
- name: argocd-secret
|
||||
optional: false
|
||||
data:
|
||||
admin.password: test
|
||||
admin.passwordMtime: now
|
||||
server.secretkey: boohoo
|
||||
- name: zero-downtime-gitea
|
||||
optional: true
|
||||
data:
|
||||
name: zero-downtime-gitea
|
||||
type: git
|
||||
url: ssh://git@git.zero-downtime.net/quark/kube-grandnagus.git
|
||||
sshPrivateKey: |
|
||||
boohooKey
|
||||
metadata:
|
||||
labels:
|
||||
argocd.argoproj.io/secret-type: repository
|
||||
|
@ -1,13 +0,0 @@
|
||||
{{- if index .Values "argo-cd" "enabled" }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: argocd-secret
|
||||
namespace: argocd
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
admin.password: {{ index .Values "argo-cd" "configs" "secret" "argocdServerAdminPassword" }}
|
||||
admin.passwordMtime: {{ default (dateInZone "2006-01-02T15:04:05Z" (now) "UTC") }}
|
||||
{{- end }}
|
@ -1,30 +0,0 @@
|
||||
{{- if and (index .Values "argo-cd" "kubezero" "bootstrap") (index .Values "argo-cd" "kubezero" "repoUrl") }}
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: kubezero-git-sync
|
||||
namespace: argocd
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-wave: "-20"
|
||||
spec:
|
||||
destination:
|
||||
namespace: argocd
|
||||
server: https://kubernetes.default.svc
|
||||
project: kubezero
|
||||
source:
|
||||
{{- with index .Values "argo-cd" "kubezero" }}
|
||||
repoURL: {{ .repoUrl }}
|
||||
targetRevision: {{ .targetRevision }}
|
||||
path: {{ .path }}
|
||||
{{- end }}
|
||||
directory:
|
||||
recurse: true
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
syncOptions:
|
||||
- ServerSideApply=true
|
||||
- ApplyOutOfSyncOnly=true
|
||||
{{- end }}
|
@ -1,16 +0,0 @@
|
||||
{{- if and (index .Values "argo-cd" "kubezero" "sshPrivateKey") (index .Values "argo-cd" "kubezero" "repoUrl") }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kubezero-git-sync
|
||||
namespace: argocd
|
||||
labels:
|
||||
argocd.argoproj.io/secret-type: repository
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
name: kubezero-git-sync
|
||||
type: git
|
||||
url: {{ index .Values "argo-cd" "kubezero" "repoUrl" }}
|
||||
sshPrivateKey: {{ index .Values "argo-cd" "kubezero" "sshPrivateKey" }}
|
||||
{{- end }}
|
@ -1,26 +0,0 @@
|
||||
{{- if and (index .Values "argo-cd" "kubezero" "bootstrap") (index .Values "argo-cd" "kubezero" "repoUrl") }}
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: AppProject
|
||||
metadata:
|
||||
name: kubezero
|
||||
namespace: argocd
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
spec:
|
||||
clusterResourceWhitelist:
|
||||
- group: '*'
|
||||
kind: '*'
|
||||
description: KubeZero - ZeroDownTime Kubernetes Platform
|
||||
destinations:
|
||||
- namespace: '*'
|
||||
server: https://kubernetes.default.svc
|
||||
sourceRepos:
|
||||
- https://cdn.zero-downtime.net/charts
|
||||
- {{ index .Values "argo-cd" "kubezero" "repoUrl" }}
|
||||
syncWindows:
|
||||
- kind: deny
|
||||
schedule: '0 * * * *'
|
||||
duration: 24h
|
||||
namespaces:
|
||||
- '*'
|
||||
{{- end }}
|
@ -30,6 +30,13 @@ argo-events:
|
||||
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
|
||||
startCommand: /nats-server
|
||||
|
||||
|
||||
argocd-apps:
|
||||
enabled: false
|
||||
projects: {}
|
||||
applications: {}
|
||||
|
||||
|
||||
argo-cd:
|
||||
enabled: false
|
||||
|
||||
@ -38,7 +45,7 @@ argo-cd:
|
||||
format: json
|
||||
image:
|
||||
repository: public.ecr.aws/zero-downtime/zdt-argocd
|
||||
tag: v2.14.7
|
||||
tag: v2.14.2
|
||||
networkPolicy:
|
||||
create: true
|
||||
|
||||
@ -64,26 +71,36 @@ argo-cd:
|
||||
application.instanceLabelKey: Null
|
||||
|
||||
resource.customizations: |
|
||||
argoproj.io/Application:
|
||||
cert-manager.io/Certificate:
|
||||
# Lua script for customizing the health status assessment
|
||||
health.lua: |
|
||||
hs = {}
|
||||
hs.status = "Progressing"
|
||||
hs.message = ""
|
||||
if obj.status ~= nil then
|
||||
if obj.status.health ~= nil then
|
||||
hs.status = obj.status.health.status
|
||||
if obj.status.health.message ~= nil then
|
||||
hs.message = obj.status.health.message
|
||||
if obj.status.conditions ~= nil then
|
||||
for i, condition in ipairs(obj.status.conditions) do
|
||||
if condition.type == "Ready" and condition.status == "False" then
|
||||
hs.status = "Degraded"
|
||||
hs.message = condition.message
|
||||
return hs
|
||||
end
|
||||
if condition.type == "Ready" and condition.status == "True" then
|
||||
hs.status = "Healthy"
|
||||
hs.message = condition.message
|
||||
return hs
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
hs.status = "Progressing"
|
||||
hs.message = "Waiting for certificate"
|
||||
return hs
|
||||
|
||||
secret:
|
||||
createSecret: false
|
||||
|
||||
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/' | base64 -w0`
|
||||
argocdServerAdminPassword: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.adminPassword
|
||||
# argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG"
|
||||
# argocdServerAdminPassword: "ref+file://secrets.yaml#/test"
|
||||
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
|
||||
|
||||
ssh:
|
||||
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
|
||||
@ -124,7 +141,6 @@ argo-cd:
|
||||
name: kubeconfigs
|
||||
|
||||
# Allow vals to read internal secrets across all namespaces
|
||||
# @ignored
|
||||
clusterRoleRules:
|
||||
enabled: true
|
||||
rules:
|
||||
@ -132,7 +148,6 @@ argo-cd:
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
|
||||
# @ignored
|
||||
initContainers:
|
||||
- name: create-kubeconfig
|
||||
image: '{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include "argo-cd.defaultTag" .) .Values.repoServer.image.tag }}'
|
||||
@ -183,16 +198,6 @@ argo-cd:
|
||||
gateway: istio-ingress/ingressgateway
|
||||
ipBlocks: []
|
||||
|
||||
kubezero:
|
||||
# -- deploy the KubeZero Project and GitSync Root App
|
||||
bootstrap: false
|
||||
|
||||
# valid git+ssh repository url
|
||||
repoUrl: ""
|
||||
path: "/"
|
||||
targetRevision: HEAD
|
||||
sshPrivateKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/argo-cd.kubezero.sshPrivateKey
|
||||
|
||||
argocd-image-updater:
|
||||
enabled: false
|
||||
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-network
|
||||
description: KubeZero umbrella chart for all things network
|
||||
type: application
|
||||
version: 0.5.7
|
||||
version: 0.5.8
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -19,7 +19,7 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: cilium
|
||||
version: 1.16.6
|
||||
version: 1.17.2
|
||||
repository: https://helm.cilium.io/
|
||||
condition: cilium.enabled
|
||||
- name: metallb
|
||||
@ -27,7 +27,7 @@ dependencies:
|
||||
repository: https://metallb.github.io/metallb
|
||||
condition: metallb.enabled
|
||||
- name: haproxy
|
||||
version: 1.23.0
|
||||
version: 1.24.0
|
||||
repository: https://haproxytech.github.io/helm-charts
|
||||
condition: haproxy.enabled
|
||||
kubeVersion: ">= 1.29.0-0"
|
||||
|
@ -274,7 +274,7 @@ fluentd:
|
||||
#- fluent-plugin-s3
|
||||
|
||||
source:
|
||||
sharedKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey?inCluster # "cloudbender"
|
||||
sharedKey: secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey # "cloudbender"
|
||||
|
||||
output:
|
||||
# Defaults to OpenSearch in same namespace
|
||||
|
@ -21,8 +21,4 @@
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
|
||||
README.md.gotmpl
|
||||
dashboards.yaml
|
||||
jsonnet
|
||||
update.sh
|
||||
Chart.lock
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
KubeZero - Root App of Apps chart
|
||||
|
||||
@ -14,11 +14,11 @@ KubeZero - Root App of Apps chart
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.31.0-0`
|
||||
Kubernetes: `>= 1.26.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts | kubezero-lib | 0.2.1 |
|
||||
| https://cdn.zero-downtime.net/charts | kubezero-lib | >= 0.2.1 |
|
||||
|
||||
## Values
|
||||
|
||||
@ -32,16 +32,16 @@ Kubernetes: `>= 1.31.0-0`
|
||||
| addons.external-dns.enabled | bool | `false` | |
|
||||
| addons.forseti.enabled | bool | `false` | |
|
||||
| addons.sealed-secrets.enabled | bool | `false` | |
|
||||
| addons.targetRevision | string | `"0.8.13"` | |
|
||||
| addons.targetRevision | string | `"0.8.11"` | |
|
||||
| argo.argo-cd.enabled | bool | `false` | |
|
||||
| argo.argo-cd.istio.enabled | bool | `false` | |
|
||||
| argo.argocd-image-updater.enabled | bool | `false` | |
|
||||
| argo.enabled | bool | `false` | |
|
||||
| argo.namespace | string | `"argocd"` | |
|
||||
| argo.targetRevision | string | `"0.3.1"` | |
|
||||
| argo.targetRevision | string | `"0.2.6"` | |
|
||||
| cert-manager.enabled | bool | `false` | |
|
||||
| cert-manager.namespace | string | `"cert-manager"` | |
|
||||
| cert-manager.targetRevision | string | `"0.9.12"` | |
|
||||
| cert-manager.targetRevision | string | `"0.9.10"` | |
|
||||
| falco.enabled | bool | `false` | |
|
||||
| falco.k8saudit.enabled | bool | `false` | |
|
||||
| falco.targetRevision | string | `"0.1.2"` | |
|
||||
@ -54,32 +54,35 @@ Kubernetes: `>= 1.31.0-0`
|
||||
| istio-ingress.enabled | bool | `false` | |
|
||||
| istio-ingress.gateway.service | object | `{}` | |
|
||||
| istio-ingress.namespace | string | `"istio-ingress"` | |
|
||||
| istio-ingress.targetRevision | string | `"0.24.3"` | |
|
||||
| istio-ingress.targetRevision | string | `"0.23.2"` | |
|
||||
| istio-private-ingress.chart | string | `"kubezero-istio-gateway"` | |
|
||||
| istio-private-ingress.enabled | bool | `false` | |
|
||||
| istio-private-ingress.gateway.service | object | `{}` | |
|
||||
| istio-private-ingress.namespace | string | `"istio-ingress"` | |
|
||||
| istio-private-ingress.targetRevision | string | `"0.24.3"` | |
|
||||
| istio-private-ingress.targetRevision | string | `"0.23.2"` | |
|
||||
| istio.enabled | bool | `false` | |
|
||||
| istio.namespace | string | `"istio-system"` | |
|
||||
| istio.targetRevision | string | `"0.24.3"` | |
|
||||
| logging.annotations."argocd.argoproj.io/compare-options" | string | `"ServerSideDiff=false"` | |
|
||||
| istio.targetRevision | string | `"0.23.2"` | |
|
||||
| kubezero.defaultTargetRevision | string | `"*"` | |
|
||||
| kubezero.gitSync | object | `{}` | |
|
||||
| kubezero.repoURL | string | `"https://cdn.zero-downtime.net/charts"` | |
|
||||
| kubezero.server | string | `"https://kubernetes.default.svc"` | |
|
||||
| logging.enabled | bool | `false` | |
|
||||
| logging.namespace | string | `"logging"` | |
|
||||
| logging.targetRevision | string | `"0.8.14"` | |
|
||||
| logging.targetRevision | string | `"0.8.13"` | |
|
||||
| metrics.enabled | bool | `false` | |
|
||||
| metrics.istio.grafana | object | `{}` | |
|
||||
| metrics.istio.prometheus | object | `{}` | |
|
||||
| metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigs | list | `[]` | |
|
||||
| metrics.namespace | string | `"monitoring"` | |
|
||||
| metrics.targetRevision | string | `"0.11.0"` | |
|
||||
| metrics.targetRevision | string | `"0.10.2"` | |
|
||||
| network.cilium.cluster | object | `{}` | |
|
||||
| network.enabled | bool | `true` | |
|
||||
| network.retain | bool | `true` | |
|
||||
| network.targetRevision | string | `"0.5.7"` | |
|
||||
| network.targetRevision | string | `"0.5.5"` | |
|
||||
| operators.enabled | bool | `false` | |
|
||||
| operators.namespace | string | `"operators"` | |
|
||||
| operators.targetRevision | string | `"0.2.0"` | |
|
||||
| operators.targetRevision | string | `"0.1.6"` | |
|
||||
| storage.aws-ebs-csi-driver.enabled | bool | `false` | |
|
||||
| storage.aws-efs-csi-driver.enabled | bool | `false` | |
|
||||
| storage.enabled | bool | `false` | |
|
||||
@ -87,7 +90,7 @@ Kubernetes: `>= 1.31.0-0`
|
||||
| storage.k8up.enabled | bool | `false` | |
|
||||
| storage.lvm-localpv.enabled | bool | `false` | |
|
||||
| storage.snapshotController.enabled | bool | `false` | |
|
||||
| storage.targetRevision | string | `"0.8.10"` | |
|
||||
| storage.targetRevision | string | `"0.8.9"` | |
|
||||
| telemetry.enabled | bool | `false` | |
|
||||
| telemetry.namespace | string | `"telemetry"` | |
|
||||
| telemetry.targetRevision | string | `"0.4.1"` | |
|
||||
|
41
charts/kubezero/docs/applicationSet.yaml
Normal file
41
charts/kubezero/docs/applicationSet.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
kind: ApplicationSet
|
||||
metadata:
|
||||
name: kubezero
|
||||
namespace: argocd
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
spec:
|
||||
generators:
|
||||
- git:
|
||||
repoURL: {{ .Values.kubezero.applicationSet.repoURL }}
|
||||
revision: {{ .Values.kubezero.applicationSet.revision }}
|
||||
files:
|
||||
{{- toYaml .Values.kubezero.applicationSet.files | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
name: kubezero
|
||||
spec:
|
||||
project: kubezero
|
||||
source:
|
||||
repoURL: https://cdn.zero-downtime.net/charts
|
||||
chart: kubezero
|
||||
targetRevision: '{{ "{{" }} kubezero.version {{ "}}" }}'
|
||||
helm:
|
||||
parameters:
|
||||
# We use this to detect if we are called from ArgoCD
|
||||
- name: argocdAppName
|
||||
value: $ARGOCD_APP_NAME
|
||||
# This breaks the recursion, otherwise we install another kubezero project and app
|
||||
# To be removed once we applicationSet is working and AppProject is moved back to ArgoCD chart
|
||||
- name: installKubeZero
|
||||
value: "false"
|
||||
valueFiles:
|
||||
- '{{ "{{" }} kubezero.valuesPath {{ "}}" }}/kubezero.yaml'
|
||||
- '{{ "{{" }} kubezero.valuesPath {{ "}}" }}/values.yaml'
|
||||
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: argocd
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
@ -1,6 +0,0 @@
|
||||
# ensure we have a basic kubezero secret for cluster bootstrap and defaults
|
||||
kubectl get secret kubezero-secrets -n kubezero && rc=$? || rc=$?
|
||||
|
||||
if [ $rc != 0 ]; then
|
||||
kubectl create secret generic kubezero-secrets -n kubezero
|
||||
fi
|
7
charts/kubezero/scripts/remove_argo_ns.sh
Executable file
7
charts/kubezero/scripts/remove_argo_ns.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
ns=$(kubectl get ns -l argocd.argoproj.io/instance | grep -v NAME | awk '{print $1}')
|
||||
|
||||
for n in $ns; do
|
||||
kubectl label --overwrite namespace $n 'argocd.argoproj.io/instance-'
|
||||
done
|
25
charts/kubezero/scripts/remove_old_eck.sh
Executable file
25
charts/kubezero/scripts/remove_old_eck.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
# Script to migrate an existing ECK 1.2.1 installation to Helm.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
RELEASE_NAMESPACE=${RELEASE_NAMESPACE:-"elastic-system"}
|
||||
|
||||
echo "Uninstalling ECK"
|
||||
kubectl delete -n "${RELEASE_NAMESPACE}" \
|
||||
serviceaccount/elastic-operator \
|
||||
secret/elastic-webhook-server-cert \
|
||||
clusterrole.rbac.authorization.k8s.io/elastic-operator \
|
||||
clusterrole.rbac.authorization.k8s.io/elastic-operator-view \
|
||||
clusterrole.rbac.authorization.k8s.io/elastic-operator-edit \
|
||||
clusterrolebinding.rbac.authorization.k8s.io/elastic-operator \
|
||||
rolebinding.rbac.authorization.k8s.io/elastic-operator \
|
||||
service/elastic-webhook-server \
|
||||
statefulset.apps/elastic-operator \
|
||||
validatingwebhookconfiguration.admissionregistration.k8s.io/elastic-webhook.k8s.elastic.co
|
||||
|
@ -21,17 +21,20 @@ spec:
|
||||
project: kubezero
|
||||
|
||||
source:
|
||||
chart: {{ default (print "kubezero-" $name) (index .Values $name "chart") }}
|
||||
repoURL: {{ default "https://cdn.zero-downtime.net/charts" (index .Values $name "repository") }}
|
||||
targetRevision: {{ default "HEAD" ( index .Values $name "targetRevision" ) | quote }}
|
||||
{{- if index .Values $name "chart" }}
|
||||
chart: {{ index .Values $name "chart" }}
|
||||
{{- else }}
|
||||
chart: kubezero-{{ $name }}
|
||||
{{- end }}
|
||||
repoURL: {{ .Values.kubezero.repoURL }}
|
||||
targetRevision: {{ default .Values.kubezero.targetRevision ( index .Values $name "targetRevision" ) | quote }}
|
||||
helm:
|
||||
# add with 1.32
|
||||
#skipTests: true
|
||||
skipTests: true
|
||||
valuesObject:
|
||||
{{- include (print $name "-values") $ | nindent 8 }}
|
||||
|
||||
destination:
|
||||
server: "https://kubernetes.default.svc"
|
||||
server: {{ .Values.kubezero.server }}
|
||||
namespace: {{ default "kube-system" ( index .Values $name "namespace" ) }}
|
||||
|
||||
revisionHistoryLimit: 2
|
||||
|
@ -39,10 +39,42 @@ argo-cd:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- with index .Values "argo" "argo-cd" "kubezero" }}
|
||||
kubezero:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
argocd-apps:
|
||||
enabled: {{ default "false" (index .Values "argo" "argo-cd" "enabled") }}
|
||||
projects:
|
||||
kubezero:
|
||||
namespace: argocd
|
||||
description: KubeZero - ZeroDownTime Kubernetes Platform
|
||||
sourceRepos:
|
||||
- {{ .Values.kubezero.repoURL }}
|
||||
{{- with .Values.kubezero.gitSync.repoURL }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
destinations:
|
||||
- namespace: '*'
|
||||
server: https://kubernetes.default.svc
|
||||
clusterResourceWhitelist:
|
||||
- group: '*'
|
||||
kind: '*'
|
||||
applications:
|
||||
kubezero-git-sync:
|
||||
namespace: argocd
|
||||
project: kubezero
|
||||
source:
|
||||
repoURL: {{ .Values.kubezero.gitSync.repoURL }}
|
||||
targetRevision: {{ .Values.kubezero.gitSync.targetRevision }}
|
||||
path: {{ .Values.kubezero.gitSync.path }}
|
||||
|
||||
directory:
|
||||
recurse: true
|
||||
|
||||
destination:
|
||||
server: https://kubernetes.default.svc
|
||||
namespace: argocd
|
||||
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
|
||||
argocd-image-updater:
|
||||
enabled: {{ default "false" (index .Values "argo" "argocd-image-updater" "enabled") }}
|
||||
|
@ -1,3 +1,9 @@
|
||||
kubezero:
|
||||
server: https://kubernetes.default.svc
|
||||
repoURL: https://cdn.zero-downtime.net/charts
|
||||
defaultTargetRevision: '*'
|
||||
gitSync: {}
|
||||
|
||||
global:
|
||||
clusterName: zdt-trial-cluster
|
||||
|
||||
@ -115,7 +121,7 @@ logging:
|
||||
argo:
|
||||
enabled: false
|
||||
namespace: argocd
|
||||
targetRevision: 0.3.1
|
||||
targetRevision: 0.2.8
|
||||
argo-cd:
|
||||
enabled: false
|
||||
istio:
|
||||
|
@ -1,11 +0,0 @@
|
||||
# KubeZero Helm hooks
|
||||
|
||||
## Abstract
|
||||
Scripts within the `hooks.d` folder of each chart are executed at the respective times when the charts are applied via libhelm.
|
||||
|
||||
*These hooks do NOT work via ArgoCD*
|
||||
|
||||
## Flow
|
||||
- hooks are execute as part of the libhelm tasks like `apply`
|
||||
- are running with the current kubectl context
|
||||
- executed at root working directory, eg. set a value for helm the scripts can edit the `./values.yaml` file.
|
@ -3,7 +3,6 @@
|
||||
## What's new - Major themes
|
||||
- all KubeZero and support AMIs based on [Alpine 3.21](https://alpinelinux.org/posts/Alpine-3.21.0-released.html)
|
||||
- network policies for ArgoCD
|
||||
- Nvidia worker nodes are labeled with detected GPU product code
|
||||
- Prometheus upgraded to V3, reducing CPU and memory requirements, see [upstream blog](https://prometheus.io/blog/2024/11/14/prometheus-3-0/)
|
||||
|
||||
## Features and fixes
|
||||
@ -11,10 +10,10 @@
|
||||
|
||||
## Version upgrades
|
||||
- cilium 1.16.6
|
||||
- istio 1.24.3
|
||||
- ArgoCD 2.14.5 [custom ZDT image](https://git.zero-downtime.net/ZeroDownTime/zdt-argocd)
|
||||
- istio 1.24.2
|
||||
- ArgoCD 2.14.3 [custom ZDT image](https://git.zero-downtime.net/ZeroDownTime/zdt-argocd)
|
||||
- Prometheus 3.1.0 / Grafana 11.5.1
|
||||
- Nvidia container toolkit 1.17.4, drivers 570.86.15, Cuda 12.8
|
||||
- Nvidia container toolkit 1.17, drivers 565.57.01, Cuda 12.7
|
||||
|
||||
## Resources
|
||||
- [Kubernetes v1.31 upstream release blog](https://kubernetes.io/blog/2024/08/13/kubernetes-v1-31-release/)
|
||||
|
@ -18,7 +18,7 @@ update_jsonnet() {
|
||||
|
||||
update_helm() {
|
||||
#helm repo update
|
||||
helm dep build
|
||||
helm dep update
|
||||
}
|
||||
|
||||
# AWS public ECR
|
||||
|
Loading…
x
Reference in New Issue
Block a user