Compare commits
1 Commits
main
...
renovate/k
Author | SHA1 | Date | |
---|---|---|---|
fd125b4fa4 |
@ -2,13 +2,7 @@
|
||||
|
||||
# All things BEFORE the first controller / control plane upgrade
|
||||
pre_control_plane_upgrade_cluster() {
|
||||
if [ "$PLATFORM" != "gke" ];then
|
||||
# patch multus DS to ONLY run pods on 1.31 controllers
|
||||
kubectl patch ds kube-multus-ds -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
|
||||
|
||||
# patch kube-proxy DS to ONLY run pods on 1.31 controllers
|
||||
kubectl patch ds kube-proxy -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
|
||||
@ -22,20 +16,7 @@ post_control_plane_upgrade_cluster() {
|
||||
pre_cluster_upgrade_final() {
|
||||
set +e
|
||||
|
||||
if [ "$PLATFORM" != "gke" ];then
|
||||
# cleanup multus
|
||||
kubectl delete clusterrolebinding multus
|
||||
kubectl delete clusterrole multus
|
||||
kubectl delete serviceaccount multus -n kube-system
|
||||
kubectl delete cm multus-cni-config -n kube-system
|
||||
kubectl delete ds kube-multus-ds -n kube-system
|
||||
kubectl delete NetworkAttachmentDefinition cilium
|
||||
kubectl delete crd network-attachment-definitions.k8s.cni.cncf.io
|
||||
|
||||
# remove kube-proxy
|
||||
kubectl -n kube-system delete ds kube-proxy
|
||||
kubectl -n kube-system delete cm kube-proxy
|
||||
fi
|
||||
echo
|
||||
|
||||
set -e
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ render_kubeadm() {
|
||||
|
||||
# Assemble kubeadm config
|
||||
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||
for f in Cluster Kubelet; do
|
||||
for f in Cluster KubeProxy Kubelet; do
|
||||
# echo "---" >> /etc/kubernetes/kubeadm.yaml
|
||||
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||
done
|
||||
@ -169,7 +169,7 @@ kubeadm_upgrade() {
|
||||
else
|
||||
pre_cluster_upgrade_final
|
||||
|
||||
_kubeadm upgrade apply phase addon coredns $KUBE_VERSION
|
||||
_kubeadm upgrade apply phase addon all $KUBE_VERSION
|
||||
|
||||
post_cluster_upgrade_final
|
||||
|
||||
@ -239,7 +239,7 @@ control_plane_node() {
|
||||
if [[ "$CMD" =~ ^(join)$ ]]; then
|
||||
# Delete any former self in case forseti did not delete yet
|
||||
kubectl delete node ${NODENAME} --wait=true || true
|
||||
# Wait for all pods to be deleted otherwise we end up with stale pods
|
||||
# Wait for all pods to be deleted otherwise we end up with stale pods eg. kube-proxy and all goes to ....
|
||||
kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME}
|
||||
|
||||
# get current running etcd pods for etcdctl commands
|
||||
@ -251,7 +251,7 @@ control_plane_node() {
|
||||
done
|
||||
|
||||
# see if we are a former member and remove our former self if so
|
||||
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//' || true)
|
||||
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
|
||||
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
|
||||
|
||||
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
|
||||
@ -309,9 +309,8 @@ control_plane_node() {
|
||||
_kubeadm init phase mark-control-plane
|
||||
_kubeadm init phase kubelet-finalize all
|
||||
|
||||
# we skip kube-proxy
|
||||
if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then
|
||||
_kubeadm init phase addon coredns
|
||||
_kubeadm init phase addon all
|
||||
fi
|
||||
|
||||
post_kubeadm
|
||||
|
@ -3,7 +3,6 @@
|
||||
# Simulate well-known CRDs being available
|
||||
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
|
||||
LOCAL_DEV=${LOCAL_DEV:-""}
|
||||
ENV_VALUES=""
|
||||
|
||||
export HELM_SECRETS_BACKEND="vals"
|
||||
|
||||
@ -81,19 +80,15 @@ function get_kubezero_secret() {
|
||||
get_secret_val kubezero kubezero-secrets "$1"
|
||||
}
|
||||
|
||||
|
||||
function ensure_kubezero_secret_key() {
|
||||
local ns=$1
|
||||
local secret=$2
|
||||
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
|
||||
local key=""
|
||||
local val=""
|
||||
|
||||
local secret="$(kubectl get secret -n $ns $secret -o yaml)"
|
||||
local key
|
||||
local val
|
||||
|
||||
for key in $1; do
|
||||
val=$(echo $secret | yq ".data.\"$key\"")
|
||||
for key in $@; do
|
||||
val=$(echo "$secret" | yq ".data.\"$key\"")
|
||||
if [ "$val" == "null" ]; then
|
||||
set_kubezero_secret $key ""
|
||||
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
@ -215,7 +210,7 @@ for manifest in yaml.safe_load_all(sys.stdin):
|
||||
# helm template | kubectl apply -f -
|
||||
# confine to one namespace if possible
|
||||
function render() {
|
||||
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-tests --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $ENV_VALUES \
|
||||
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-tests --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
|
||||
| python3 -c '
|
||||
#!/usr/bin/python3
|
||||
import yaml
|
||||
@ -267,11 +262,6 @@ function _helm() {
|
||||
|
||||
crds
|
||||
|
||||
elif [ $action == "dryrun" ]; then
|
||||
cat $WORKDIR/values.yaml
|
||||
render
|
||||
cat $WORKDIR/helm.yaml
|
||||
|
||||
elif [ $action == "apply" -o $action == "replace" ]; then
|
||||
echo "using values to $action of module $module: "
|
||||
cat $WORKDIR/values.yaml
|
||||
|
@ -10,14 +10,7 @@ def migrate(values):
|
||||
|
||||
# 1.32
|
||||
try:
|
||||
values["istio-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-ingress"]["gateway"]["service"]["ports"]
|
||||
values["istio-ingress"]["gateway"]["service"].pop("ports")
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
values["istio-private-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-private-ingress"]["gateway"]["service"]["ports"]
|
||||
values["istio-private-ingress"]["gateway"]["service"].pop("ports")
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
@ -47,6 +47,7 @@ Kubernetes: `>= 1.32.0-0`
|
||||
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
||||
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
|
||||
|
||||
- https://github.com/awslabs/amazon-eks-ami
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
||||
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
|
||||
|
||||
- https://github.com/awslabs/amazon-eks-ami
|
||||
|
||||
|
@ -29,6 +29,12 @@ kind: Policy
|
||||
rules:
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
|
@ -6,8 +6,6 @@ featureGates:
|
||||
ControlPlaneKubeletLocalMode: true
|
||||
NodeLocalCRISocket: true
|
||||
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
||||
proxy:
|
||||
disabled: true
|
||||
networking:
|
||||
podSubnet: 10.244.0.0/16
|
||||
etcd:
|
||||
|
@ -7,8 +7,6 @@ localAPIEndpoint:
|
||||
patches:
|
||||
directory: {{ . }}
|
||||
{{- end }}
|
||||
skipPhases:
|
||||
- addon/kube-proxy
|
||||
nodeRegistration:
|
||||
criSocket: "unix:///run/containerd/containerd.sock"
|
||||
ignorePreflightErrors:
|
||||
|
10
charts/kubeadm/templates/KubeProxyConfiguration.yaml
Normal file
10
charts/kubeadm/templates/KubeProxyConfiguration.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
# kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways
|
||||
metricsBindAddress: "0.0.0.0:10249"
|
||||
mode: "iptables"
|
||||
logging:
|
||||
format: json
|
||||
iptables:
|
||||
localhostNodePorts: false
|
||||
#nodePortAddresses: primary
|
@ -3,6 +3,12 @@ kind: Policy
|
||||
rules:
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
@ -108,7 +114,7 @@ rules:
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources:
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
@ -131,7 +137,7 @@ rules:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-addons
|
||||
description: KubeZero umbrella chart for various optional cluster addons
|
||||
type: application
|
||||
version: 0.8.15
|
||||
version: 0.8.14
|
||||
appVersion: v1.31
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
@ -34,7 +34,7 @@ dependencies:
|
||||
repository: https://nvidia.github.io/k8s-device-plugin
|
||||
condition: nvidia-device-plugin.enabled
|
||||
- name: neuron-helm-chart
|
||||
version: 1.1.2
|
||||
version: 1.1.1
|
||||
# https://github.com/aws-neuron/neuron-helm-charts/tree/main/charts/neuron-helm-chart
|
||||
repository: oci://public.ecr.aws/neuron #/neuron-helm-chart
|
||||
condition: neuron-helm-chart.enabled
|
||||
@ -43,7 +43,7 @@ dependencies:
|
||||
repository: https://bitnami-labs.github.io/sealed-secrets
|
||||
condition: sealed-secrets.enabled
|
||||
- name: aws-node-termination-handler
|
||||
version: 0.27.1
|
||||
version: 0.27.0
|
||||
repository: "oci://public.ecr.aws/aws-ec2/helm"
|
||||
condition: aws-node-termination-handler.enabled
|
||||
- name: aws-eks-asg-rolling-update-handler
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: v2
|
||||
description: KubeZero Argo - Events, Workflow, CD
|
||||
name: kubezero-argo
|
||||
version: 0.4.0
|
||||
version: 0.3.4
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-argo
|
||||
|
||||

|
||||

|
||||
|
||||
KubeZero Argo - Events, Workflow, CD
|
||||
|
||||
@ -18,7 +18,7 @@ Kubernetes: `>= 1.30.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 8.0.9 |
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 7.9.0 |
|
||||
| https://argoproj.github.io/argo-helm | argo-events | 2.4.15 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.1 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
@ -28,16 +28,17 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| argo-cd.configs.cm."application.instanceLabelKey" | string | `nil` | |
|
||||
| argo-cd.configs.cm."application.resourceTrackingMethod" | string | `"annotation"` | |
|
||||
| argo-cd.configs.cm."resource.customizations" | string | `"argoproj.io/Application:\n health.lua: |\n hs = {}\n hs.status = \"Progressing\"\n hs.message = \"\"\n if obj.status ~= nil then\n if obj.status.health ~= nil then\n hs.status = obj.status.health.status\n if obj.status.health.message ~= nil then\n hs.message = obj.status.health.message\n end\n end\n end\n return hs\n"` | |
|
||||
| argo-cd.configs.cm."server.rbac.log.enforce.enable" | string | `nil` | |
|
||||
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
|
||||
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.32 - Release notes"` | |
|
||||
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.31 - Release notes"` | |
|
||||
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
|
||||
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
|
||||
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.32"` | |
|
||||
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.31"` | |
|
||||
| argo-cd.configs.cm.installationID | string | `"KubeZero-ArgoCD"` | |
|
||||
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
|
||||
| argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | |
|
||||
| argo-cd.configs.params."controller.resource.health.persist" | string | `"false"` | |
|
||||
| argo-cd.configs.params."controller.sync.timeout.seconds" | int | `1800` | |
|
||||
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
|
||||
| argo-cd.configs.params."server.insecure" | bool | `true` | |
|
||||
@ -53,7 +54,7 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| argo-cd.dex.enabled | bool | `false` | |
|
||||
| argo-cd.enabled | bool | `false` | |
|
||||
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
|
||||
| argo-cd.global.image.tag | string | `"v3.0.3"` | |
|
||||
| argo-cd.global.image.tag | string | `"v2.14.9-1"` | |
|
||||
| argo-cd.global.logging.format | string | `"json"` | |
|
||||
| argo-cd.global.networkPolicy.create | bool | `true` | |
|
||||
| argo-cd.istio.enabled | bool | `false` | |
|
||||
|
@ -4,6 +4,6 @@ condition: 'index .Values "argo-cd" "controller" "metrics" "enabled"'
|
||||
folder: KubeZero
|
||||
dashboards:
|
||||
- name: ArgoCD
|
||||
url: https://raw.githubusercontent.com/argoproj/argo-cd/refs/heads/master/examples/dashboard.json
|
||||
url: https://grafana.com/api/dashboards/14584/revisions/1/download
|
||||
tags:
|
||||
- ArgoCD
|
||||
|
File diff suppressed because one or more lines are too long
@ -25,8 +25,9 @@ spec:
|
||||
automated:
|
||||
prune: true
|
||||
syncOptions:
|
||||
- ServerSideApply=true
|
||||
- ApplyOutOfSyncOnly=true
|
||||
info:
|
||||
- name: "Source:"
|
||||
value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.32/"
|
||||
value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.31/"
|
||||
{{- end }}
|
||||
|
@ -26,7 +26,7 @@ argo-events:
|
||||
versions:
|
||||
- version: 2.10.11
|
||||
natsImage: nats:2.11.1-scratch
|
||||
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.3
|
||||
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.2
|
||||
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
|
||||
startCommand: /nats-server
|
||||
|
||||
@ -38,7 +38,7 @@ argo-cd:
|
||||
format: json
|
||||
image:
|
||||
repository: public.ecr.aws/zero-downtime/zdt-argocd
|
||||
tag: v3.0.3
|
||||
tag: v2.14.9-1
|
||||
networkPolicy:
|
||||
create: true
|
||||
|
||||
@ -49,8 +49,8 @@ argo-cd:
|
||||
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
|
||||
|
||||
cm:
|
||||
ui.bannercontent: "KubeZero v1.32 - Release notes"
|
||||
ui.bannerurl: "https://kubezero.com/releases/v1.32"
|
||||
ui.bannercontent: "KubeZero v1.31 - Release notes"
|
||||
ui.bannerurl: "https://kubezero.com/releases/v1.31"
|
||||
ui.bannerpermanent: "true"
|
||||
ui.bannerposition: "bottom"
|
||||
|
||||
@ -59,9 +59,9 @@ argo-cd:
|
||||
|
||||
timeout.reconciliation: 300s
|
||||
|
||||
application.resourceTrackingMethod: annotation
|
||||
installationID: "KubeZero-ArgoCD"
|
||||
application.instanceLabelKey: Null
|
||||
server.rbac.log.enforce.enable: Null
|
||||
|
||||
resource.customizations: |
|
||||
argoproj.io/Application:
|
||||
@ -89,6 +89,7 @@ argo-cd:
|
||||
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
|
||||
|
||||
params:
|
||||
controller.resource.health.persist: "false"
|
||||
controller.diff.server.side: "true"
|
||||
controller.sync.timeout.seconds: 1800
|
||||
|
||||
|
@ -289,7 +289,7 @@ trivy:
|
||||
#tag: 0.57.0
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 2Gi
|
||||
size: 1Gi
|
||||
rbac:
|
||||
create: false
|
||||
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-falco
|
||||
description: Falco Container Security and Audit components
|
||||
type: application
|
||||
version: 0.1.3
|
||||
version: 0.1.2
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -16,7 +16,7 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: falco
|
||||
version: 5.0.0
|
||||
version: 4.2.5
|
||||
repository: https://falcosecurity.github.io/charts
|
||||
condition: k8saudit.enabled
|
||||
alias: k8saudit
|
||||
|
@ -42,3 +42,17 @@ helm.sh/chart: {{ include "kubezero-lib.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/part-of: kubezero
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
kubezero-lib.util.merge will merge two YAML templates and output the result.
|
||||
This takes an array of three values:
|
||||
- the top context
|
||||
- the template name of the overrides (destination)
|
||||
- the template name of the base (source)
|
||||
*/ -}}
|
||||
{{- define "kubezero-lib.util.merge" -}}
|
||||
{{- $top := first . -}}
|
||||
{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}
|
||||
{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}}
|
||||
{{- toYaml (merge $overrides $tpl) -}}
|
||||
{{- end -}}
|
||||
|
@ -12,7 +12,7 @@ kube-prometheus-stack:
|
||||
kubeStateMetrics:
|
||||
enabled: true
|
||||
kubeProxy:
|
||||
enabled: false
|
||||
enabled: true
|
||||
|
||||
kubeEtcd:
|
||||
enabled: true
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-mq
|
||||
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
||||
type: application
|
||||
version: 0.3.12
|
||||
version: 0.3.11
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -17,7 +17,7 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: nats
|
||||
version: 1.3.7
|
||||
version: 1.3.3
|
||||
repository: https://nats-io.github.io/k8s/helm/charts/
|
||||
condition: nats.enabled
|
||||
- name: rabbitmq
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-mq
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
||||
|
||||
@ -20,7 +20,7 @@ Kubernetes: `>= 1.26.0`
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
|
||||
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.7 |
|
||||
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 |
|
||||
|
||||
## Values
|
||||
|
||||
@ -32,8 +32,6 @@ Kubernetes: `>= 1.26.0`
|
||||
| nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| nats.mqtt.enabled | bool | `false` | |
|
||||
| nats.natsBox.enabled | bool | `false` | |
|
||||
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".maxSkew | int | `1` | |
|
||||
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".whenUnsatisfiable | string | `"DoNotSchedule"` | |
|
||||
| nats.promExporter.enabled | bool | `false` | |
|
||||
| nats.promExporter.podMonitor.enabled | bool | `false` | |
|
||||
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
|
||||
|
@ -1,7 +1,7 @@
|
||||
configmap: grafana-dashboards-nats
|
||||
condition: '.Values.nats.promExporter.podMonitor.enabled'
|
||||
condition: '.Values.nats.exporter.serviceMonitor.enabled'
|
||||
gzip: true
|
||||
# folder:
|
||||
# folder:
|
||||
dashboards:
|
||||
- name: nats
|
||||
url: https://grafana.com/api/dashboards/13707/revisions/1/download
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-network
|
||||
description: KubeZero umbrella chart for all things network
|
||||
type: application
|
||||
version: 0.5.9
|
||||
version: 0.5.8
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -10,6 +10,7 @@ keywords:
|
||||
- multus
|
||||
- cilium
|
||||
- aws-cni
|
||||
- metallb
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
@ -18,9 +19,13 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: cilium
|
||||
version: 1.17.4
|
||||
version: 1.17.3
|
||||
repository: https://helm.cilium.io/
|
||||
condition: cilium.enabled
|
||||
- name: metallb
|
||||
version: 0.14.9
|
||||
repository: https://metallb.github.io/metallb
|
||||
condition: metallb.enabled
|
||||
- name: haproxy
|
||||
version: 1.24.0
|
||||
repository: https://haproxytech.github.io/helm-charts
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-network
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
KubeZero umbrella chart for all things network
|
||||
|
||||
@ -20,22 +20,20 @@ Kubernetes: `>= 1.30.0-0`
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 |
|
||||
| https://helm.cilium.io/ | cilium | 1.17.4 |
|
||||
| https://helm.cilium.io/ | cilium | 1.17.3 |
|
||||
| https://metallb.github.io/metallb | metallb | 0.14.9 |
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| cilium.bpf.preallocateMaps | bool | `true` | |
|
||||
| cilium.cgroup.autoMount.enabled | bool | `false` | |
|
||||
| cilium.cgroup.hostRoot | string | `"/sys/fs/cgroup"` | |
|
||||
| cilium.cluster.id | int | `240` | |
|
||||
| cilium.cluster.name | string | `"default"` | |
|
||||
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
|
||||
| cilium.cni.exclusive | bool | `true` | |
|
||||
| cilium.cni.iptablesRemoveAWSRules | bool | `false` | |
|
||||
| cilium.cni.exclusive | bool | `false` | |
|
||||
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
|
||||
| cilium.dnsProxy.enableTransparentMode | bool | `true` | |
|
||||
| cilium.enabled | bool | `false` | |
|
||||
| cilium.envoy.enabled | bool | `false` | |
|
||||
| cilium.hubble.enabled | bool | `false` | |
|
||||
@ -47,9 +45,6 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| cilium.hubble.ui.enabled | bool | `false` | |
|
||||
| cilium.image.useDigest | bool | `false` | |
|
||||
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
|
||||
| cilium.k8sServiceHost | string | `""` | |
|
||||
| cilium.k8sServicePort | int | `6443` | |
|
||||
| cilium.kubeProxyReplacement | bool | `true` | |
|
||||
| cilium.l7Proxy | bool | `false` | |
|
||||
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cilium.operator.prometheus.enabled | bool | `false` | |
|
||||
@ -59,13 +54,12 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cilium.operator.tolerations[1].key | string | `"node.cilium.io/agent-not-ready"` | |
|
||||
| cilium.operator.tolerations[2].effect | string | `"NoSchedule"` | |
|
||||
| cilium.operator.tolerations[2].key | string | `"node.kubernetes.io/not-ready"` | |
|
||||
| cilium.prometheus.enabled | bool | `false` | |
|
||||
| cilium.prometheus.port | int | `9091` | |
|
||||
| cilium.prometheus.serviceMonitor.enabled | bool | `false` | |
|
||||
| cilium.resources.requests.cpu | string | `"50m"` | |
|
||||
| cilium.resources.requests.memory | string | `"256Mi"` | |
|
||||
| cilium.resources.limits.memory | string | `"1Gi"` | |
|
||||
| cilium.resources.requests.cpu | string | `"10m"` | |
|
||||
| cilium.resources.requests.memory | string | `"160Mi"` | |
|
||||
| cilium.routingMode | string | `"tunnel"` | |
|
||||
| cilium.sysctlfix.enabled | bool | `false` | |
|
||||
| cilium.tunnelProtocol | string | `"geneve"` | |
|
||||
@ -113,6 +107,11 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| haproxy.serviceMonitor.endpoints[0].path | string | `"/metrics"` | |
|
||||
| haproxy.serviceMonitor.endpoints[0].port | string | `"prometheus"` | |
|
||||
| haproxy.serviceMonitor.endpoints[0].scheme | string | `"http"` | |
|
||||
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| metallb.enabled | bool | `false` | |
|
||||
| metallb.ipAddressPools | list | `[]` | |
|
||||
| multus.clusterNetwork | string | `"cilium"` | |
|
||||
| multus.defaultNetworks | list | `[]` | |
|
||||
| multus.enabled | bool | `false` | |
|
||||
|
27
charts/kubezero-network/templates/metallb/config.yaml
Normal file
27
charts/kubezero-network/templates/metallb/config.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
{{- if .Values.metallb.enabled }}
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: l2advertisement1
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ipAddressPools:
|
||||
{{- range $key, $val := .Values.metallb.ipAddressPools }}
|
||||
{{- if eq $val.protocol "layer2" }}
|
||||
- {{ $val.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
---
|
||||
|
||||
{{- range $key, $val := .Values.metallb.ipAddressPools }}
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: {{ $val.name }}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
addresses:
|
||||
{{- $val.addresses | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
@ -1,3 +1,19 @@
|
||||
metallb:
|
||||
enabled: false
|
||||
|
||||
controller:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
ipAddressPools: []
|
||||
#- name: my-ip-space
|
||||
# protocol: layer2
|
||||
# addresses:
|
||||
# - 192.168.42.0/24
|
||||
|
||||
multus:
|
||||
enabled: false
|
||||
image:
|
||||
@ -17,18 +33,17 @@ cilium:
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 256Mi
|
||||
# limits:
|
||||
# memory: 1Gi
|
||||
# cpu: 4000m
|
||||
cpu: 10m
|
||||
memory: 160Mi
|
||||
limits:
|
||||
memory: 1Gi
|
||||
# cpu: 4000m
|
||||
|
||||
cni:
|
||||
binPath: "/usr/libexec/cni"
|
||||
logFile: /var/log/cilium-cni.log
|
||||
#-- Ensure this is false if multus is enabled
|
||||
exclusive: true
|
||||
iptablesRemoveAWSRules: false
|
||||
exclusive: false
|
||||
|
||||
cluster:
|
||||
# This should match the second octet of clusterPoolIPv4PodCIDRList
|
||||
@ -48,32 +63,13 @@ cilium:
|
||||
enabled: false
|
||||
#rollOutCiliumPods: true
|
||||
|
||||
kubeProxyReplacement: true
|
||||
dnsProxy:
|
||||
enableTransparentMode: true
|
||||
|
||||
# For LB support via L2announcement or BGP - on-prem only
|
||||
# l2announcements:
|
||||
# enabled: true
|
||||
# Not needed normally
|
||||
# externalIPs:
|
||||
# enabled: true
|
||||
|
||||
k8sServiceHost: ""
|
||||
k8sServicePort: 6443
|
||||
# k8s:
|
||||
# # This has to be set to the DNS name of all API servers
|
||||
# # For example "https://192.168.0.1:6443 https://192.168.0.2:6443"
|
||||
# apiServerURLs: ""
|
||||
|
||||
cgroup:
|
||||
autoMount:
|
||||
enabled: false
|
||||
hostRoot: "/sys/fs/cgroup"
|
||||
|
||||
bpf:
|
||||
preallocateMaps: true
|
||||
# we need biDirectional so use helm init-container
|
||||
#bpf:
|
||||
# autoMount:
|
||||
# enabled: false
|
||||
|
||||
@ -95,11 +91,9 @@ cilium:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
# the operator removes the taints,
|
||||
# so we need to break chicken egg
|
||||
# so we need to break chicken egg on single controller
|
||||
- key: node.cilium.io/agent-not-ready
|
||||
effect: NoSchedule
|
||||
- key: node.kubernetes.io/not-ready
|
||||
effect: NoSchedule
|
||||
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-sql
|
||||
description: KubeZero umbrella chart for SQL databases, mariadb-galera
|
||||
type: application
|
||||
version: 0.4.0
|
||||
version: 0.4.1
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -17,7 +17,7 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: mariadb-galera
|
||||
version: 14.0.10
|
||||
version: 14.2.6
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: mariadb-galera.enabled
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
@ -9,11 +9,10 @@ metadata:
|
||||
namespace: argocd
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
{{- with ( index .Values $name "annotations" ) }}
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-options: Replace=true
|
||||
{{- with ( index .Values $name "annotations" ) }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if not ( index .Values $name "retain" ) }}
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
@ -28,7 +27,7 @@ spec:
|
||||
helm:
|
||||
skipTests: true
|
||||
valuesObject:
|
||||
{{- toYaml (merge (omit (index .Values $name) "enabled" "namespace" "retain" "targetRevision") (fromYaml (include (print $name "-values") $ ))) | nindent 8 }}
|
||||
{{- include (print $name "-values") $ | nindent 8 }}
|
||||
|
||||
destination:
|
||||
server: "https://kubernetes.default.svc"
|
||||
@ -39,6 +38,7 @@ spec:
|
||||
automated:
|
||||
prune: true
|
||||
syncOptions:
|
||||
- ServerSideApply=true
|
||||
- CreateNamespace=true
|
||||
- ApplyOutOfSyncOnly=true
|
||||
info:
|
||||
|
@ -67,7 +67,7 @@ gateway:
|
||||
gatewayProtocol: HTTPS
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
{{- with index .Values "istio-ingress" "gateway" "service" "extraPorts" }}
|
||||
{{- with index .Values "istio-ingress" "gateway" "service" "ports" }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@ -93,6 +93,7 @@ certificates:
|
||||
{{- toYaml $cert.dnsNames | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
proxyProtocol: {{ default true (index .Values "istio-ingress" "proxyProtocol") }}
|
||||
{{- with (index .Values "istio-ingress" "hardening") }}
|
||||
hardening:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
|
@ -64,7 +64,7 @@ gateway:
|
||||
gatewayProtocol: HTTPS
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
{{- with index .Values "istio-private-ingress" "gateway" "service" "extraPorts" }}
|
||||
{{- with index .Values "istio-private-ingress" "gateway" "service" "ports" }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@ -88,6 +88,7 @@ certificates:
|
||||
dnsNames:
|
||||
{{- toYaml $cert.dnsNames | nindent 4 }}
|
||||
{{- end }}
|
||||
proxyProtocol: {{ default true (index .Values "istio-private-ingress" "proxyProtocol") }}
|
||||
{{- with (index .Values "istio-private-ingress" "hardening") }}
|
||||
hardening:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
|
@ -1,22 +1,30 @@
|
||||
{{- define "network-values" }}
|
||||
cilium:
|
||||
k8sServiceHost: {{ .Values.global.apiServerUrl }}
|
||||
multus:
|
||||
enabled: true
|
||||
clusterNetwork: "cilium"
|
||||
|
||||
# {{- if eq .Values.global.platform "aws" }}
|
||||
# image:
|
||||
# pullPolicy: Never
|
||||
# {{- end }}
|
||||
|
||||
cilium:
|
||||
enabled: true
|
||||
|
||||
# {{- if eq .Values.global.platform "aws" }}
|
||||
# image:
|
||||
# pullPolicy: Never
|
||||
# {{- end }}
|
||||
# k8s:
|
||||
# apiServerURLs: "https://{{ .Values.global.apiServerUrl }}"
|
||||
|
||||
cluster:
|
||||
name: {{ .Values.global.clusterName }}
|
||||
{{- with ((.Values.network.cilium).cluster).id }}
|
||||
{{- with .Values.network.cilium.cluster.id }}
|
||||
id: {{ . }}
|
||||
ipam:
|
||||
operator:
|
||||
clusterPoolIPv4PodCIDRList:
|
||||
- 10.{{ . }}.0.0/16
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
prometheus:
|
||||
enabled: {{ .Values.metrics.enabled }}
|
||||
@ -32,6 +40,11 @@ cilium:
|
||||
serviceMonitor:
|
||||
enabled: {{ .Values.metrics.enabled }}
|
||||
|
||||
{{- with .Values.network.metallb }}
|
||||
metallb:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{- with .Values.network.haproxy }}
|
||||
haproxy:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
|
@ -61,10 +61,8 @@ opensearch:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
# Disabled until upstream made up their mind
|
||||
# https://github.com/opensearch-project/technical-steering/issues/35
|
||||
# serviceMonitor:
|
||||
# enabled: {{ .Values.metrics.enabled }}
|
||||
serviceMonitor:
|
||||
enabled: {{ .Values.metrics.enabled }}
|
||||
{{- end }}
|
||||
|
||||
{{- if index .Values "telemetry" "opensearch-dashboards" }}
|
||||
@ -73,10 +71,8 @@ opensearch-dashboards:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
# Disabled until upstream made up their mind
|
||||
# https://github.com/opensearch-project/technical-steering/issues/35
|
||||
# serviceMonitor:
|
||||
# enabled: {{ .Values.metrics.enabled }}
|
||||
serviceMonitor:
|
||||
enabled: {{ .Values.metrics.enabled }}
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
||||
|
@ -1,6 +1,5 @@
|
||||
global:
|
||||
clusterName: zdt-trial-cluster
|
||||
apiServerUrl: localhost:6443
|
||||
|
||||
# platform: aws (kubeadm, default), gke, or nocloud
|
||||
platform: "aws"
|
||||
@ -33,9 +32,9 @@ addons:
|
||||
network:
|
||||
enabled: true
|
||||
retain: true
|
||||
targetRevision: 0.5.9
|
||||
targetRevision: 0.5.8
|
||||
cilium:
|
||||
enabled: true
|
||||
cluster: {}
|
||||
|
||||
cert-manager:
|
||||
enabled: false
|
||||
@ -61,13 +60,13 @@ storage:
|
||||
istio:
|
||||
enabled: false
|
||||
namespace: istio-system
|
||||
targetRevision: 0.24.6
|
||||
targetRevision: 0.24.3
|
||||
|
||||
istio-ingress:
|
||||
enabled: false
|
||||
chart: kubezero-istio-gateway
|
||||
namespace: istio-ingress
|
||||
targetRevision: 0.24.6
|
||||
targetRevision: 0.24.3
|
||||
gateway:
|
||||
service: {}
|
||||
|
||||
@ -75,7 +74,7 @@ istio-private-ingress:
|
||||
enabled: false
|
||||
chart: kubezero-istio-gateway
|
||||
namespace: istio-ingress
|
||||
targetRevision: 0.24.6
|
||||
targetRevision: 0.24.3
|
||||
gateway:
|
||||
service: {}
|
||||
|
||||
@ -88,7 +87,7 @@ falco:
|
||||
telemetry:
|
||||
enabled: false
|
||||
namespace: telemetry
|
||||
targetRevision: 0.5.0
|
||||
targetRevision: 0.4.1
|
||||
|
||||
operators:
|
||||
enabled: false
|
||||
@ -118,7 +117,7 @@ logging:
|
||||
argo:
|
||||
enabled: false
|
||||
namespace: argocd
|
||||
targetRevision: 0.4.0
|
||||
targetRevision: 0.3.2
|
||||
argo-cd:
|
||||
enabled: false
|
||||
istio:
|
||||
|
Loading…
x
Reference in New Issue
Block a user