Compare commits

..

27 Commits

Author SHA1 Message Date
b062116c54 chore(deps): update kubezero-operators-dependencies 2025-06-03 03:01:32 +00:00
5a16422f75 Merge pull request 'chore(deps): update helm release falco to v5' (#120) from renovate/kubezero-falco-major-kubezero-falco-dependencies into main
Reviewed-on: #120
2025-06-02 17:48:45 +00:00
84081514c6 chore(deps): update helm release falco to v5 2025-06-02 17:48:45 +00:00
12fd6df3d8 Merge pull request 'chore(deps): update kubezero-addons-dependencies' (#87) from renovate/kubezero-addons-kubezero-addons-dependencies into main
Reviewed-on: #87
2025-06-02 17:48:39 +00:00
71d8919cc5 chore(deps): update kubezero-addons-dependencies 2025-06-02 17:48:39 +00:00
58986e1d5b feat: remove metallb, minor control-plane fix 2025-06-02 17:47:10 +00:00
257bedf284 chore: mq version bump, minor helm tweaks 2025-06-02 14:07:53 +00:00
6a002155a7 Merge pull request 'chore(deps): update natsio/prometheus-nats-exporter docker tag to v0.17.3' (#82) from renovate/natsio-prometheus-nats-exporter-0.x into main
Reviewed-on: #82
2025-06-02 13:49:47 +00:00
fb9865ef2c chore(deps): update natsio/prometheus-nats-exporter docker tag to v0.17.3 2025-06-02 13:49:47 +00:00
f01df7954d Merge pull request 'chore(deps): update helm release nats to v1.3.7' (#90) from renovate/kubezero-mq-kubezero-mq-dependencies into main
Reviewed-on: #90
2025-06-02 13:49:22 +00:00
e6493e9961 chore(deps): update helm release nats to v1.3.7 2025-06-02 13:49:22 +00:00
9e87f92d45 feat: merge kubezero and module values by default 2025-06-01 17:27:13 +00:00
49fa7b3c42 feat: ensure kube-proxy gets phased out during 1.32 upgrade 2025-05-30 17:55:14 +00:00
1538ea0d45 Merge pull request 'remove-kube-proxy' (#118) from remove-kube-proxy into main
Reviewed-on: #118
2025-05-30 12:36:56 +00:00
f72ef007f2 fix: more upgrade fixes 2025-05-30 12:36:56 +00:00
87e7f5fe20 fix: ensure dnsproxy is set, clean default values 2025-05-30 12:36:56 +00:00
7527e085ea feat: remove kube-proxy, enable cilium 2025-05-30 12:36:56 +00:00
7612d257aa fix: various typos, remove unnecessary LifeCycle for cilium-agent DS 2025-05-30 11:25:01 +00:00
c16a233864 ci: remove duplicated func 2025-05-30 11:14:07 +00:00
33307fccce Merge pull request 'disable-multus' (#117) from disable-multus into main
Reviewed-on: #117
2025-05-30 11:09:36 +00:00
09a2ead705 feat: add post-upgrade multus cleanup 2025-05-30 11:06:51 +00:00
7863202ca7 feat: disable multus by default 2025-05-29 23:52:18 +00:00
d3036ad1ac fix: minor tweaks 2025-05-29 17:19:51 +00:00
04ca35c676 Merge pull request 'chore(deps): update helm release cilium to v1.17.4' (#111) from renovate/kubezero-network-kubezero-network-dependencies into main
Reviewed-on: #111
2025-05-28 17:20:56 +00:00
55e22e7f6d chore(deps): update helm release cilium to v1.17.4 2025-05-28 17:20:56 +00:00
9e9ae3e8b8 Feat: tune ArgoCD sync options 2025-05-24 14:10:26 +00:00
ccdf4652cf feat: upgrade ArgoCD to V3 2025-05-23 17:46:34 +00:00
36 changed files with 161 additions and 194 deletions

View File

@ -2,7 +2,13 @@
# All things BEFORE the first controller / control plane upgrade
pre_control_plane_upgrade_cluster() {
echo
if [ "$PLATFORM" != "gke" ];then
# patch multus DS to ONLY run pods on 1.31 controllers
kubectl patch ds kube-multus-ds -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
# patch kube-proxy DS to ONLY run pods on 1.31 controllers
kubectl patch ds kube-proxy -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
fi
}
@ -16,7 +22,20 @@ post_control_plane_upgrade_cluster() {
pre_cluster_upgrade_final() {
set +e
echo
if [ "$PLATFORM" != "gke" ];then
# cleanup multus
kubectl delete clusterrolebinding multus
kubectl delete clusterrole multus
kubectl delete serviceaccount multus -n kube-system
kubectl delete cm multus-cni-config -n kube-system
kubectl delete ds kube-multus-ds -n kube-system
kubectl delete NetworkAttachmentDefinition cilium
kubectl delete crd network-attachment-definitions.k8s.cni.cncf.io
# remove kube-proxy
kubectl -n kube-system delete ds kube-proxy
kubectl -n kube-system delete cm kube-proxy
fi
set -e
}

View File

@ -63,7 +63,7 @@ render_kubeadm() {
# Assemble kubeadm config
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
for f in Cluster KubeProxy Kubelet; do
for f in Cluster Kubelet; do
# echo "---" >> /etc/kubernetes/kubeadm.yaml
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
done
@ -169,7 +169,7 @@ kubeadm_upgrade() {
else
pre_cluster_upgrade_final
_kubeadm upgrade apply phase addon all $KUBE_VERSION
_kubeadm upgrade apply phase addon coredns $KUBE_VERSION
post_cluster_upgrade_final
@ -239,7 +239,7 @@ control_plane_node() {
if [[ "$CMD" =~ ^(join)$ ]]; then
# Delete any former self in case forseti did not delete yet
kubectl delete node ${NODENAME} --wait=true || true
# Wait for all pods to be deleted otherwise we end up with stale pods eg. kube-proxy and all goes to ....
# Wait for all pods to be deleted otherwise we end up with stale pods
kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME}
# get current running etcd pods for etcdctl commands
@ -251,7 +251,7 @@ control_plane_node() {
done
# see if we are a former member and remove our former self if so
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//' || true)
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
@ -309,8 +309,9 @@ control_plane_node() {
_kubeadm init phase mark-control-plane
_kubeadm init phase kubelet-finalize all
# we skip kube-proxy
if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then
_kubeadm init phase addon all
_kubeadm init phase addon coredns
fi
post_kubeadm

View File

@ -3,6 +3,7 @@
# Simulate well-known CRDs being available
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
LOCAL_DEV=${LOCAL_DEV:-""}
ENV_VALUES=""
export HELM_SECRETS_BACKEND="vals"
@ -80,15 +81,19 @@ function get_kubezero_secret() {
get_secret_val kubezero kubezero-secrets "$1"
}
function ensure_kubezero_secret_key() {
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
local key=""
local val=""
for key in $@; do
val=$(echo "$secret" | yq ".data.\"$key\"")
function ensure_kubezero_secret_key() {
local ns=$1
local secret=$2
local secret="$(kubectl get secret -n $ns $secret -o yaml)"
local key
local val
for key in $1; do
val=$(echo $secret | yq ".data.\"$key\"")
if [ "$val" == "null" ]; then
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}"
set_kubezero_secret $key ""
fi
done
}
@ -210,7 +215,7 @@ for manifest in yaml.safe_load_all(sys.stdin):
# helm template | kubectl apply -f -
# confine to one namespace if possible
function render() {
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-tests --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-tests --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $ENV_VALUES \
| python3 -c '
#!/usr/bin/python3
import yaml
@ -262,6 +267,11 @@ function _helm() {
crds
elif [ $action == "dryrun" ]; then
cat $WORKDIR/values.yaml
render
cat $WORKDIR/helm.yaml
elif [ $action == "apply" -o $action == "replace" ]; then
echo "using values to $action of module $module: "
cat $WORKDIR/values.yaml

View File

@ -10,7 +10,14 @@ def migrate(values):
# 1.32
try:
values["istio-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-ingress"]["gateway"]["service"]["ports"]
values["istio-ingress"]["gateway"]["service"].pop("ports")
except KeyError:
pass
try:
values["istio-private-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-private-ingress"]["gateway"]["service"]["ports"]
values["istio-private-ingress"]["gateway"]["service"].pop("ports")
except KeyError:
pass

View File

@ -47,7 +47,6 @@ Kubernetes: `>= 1.32.0-0`
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
- https://github.com/awslabs/amazon-eks-ami

View File

@ -22,7 +22,6 @@
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
- https://github.com/awslabs/amazon-eks-ami

View File

@ -29,12 +29,6 @@ kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account.

View File

@ -6,6 +6,8 @@ featureGates:
ControlPlaneKubeletLocalMode: true
NodeLocalCRISocket: true
controlPlaneEndpoint: {{ .Values.api.endpoint }}
proxy:
disabled: true
networking:
podSubnet: 10.244.0.0/16
etcd:

View File

@ -7,6 +7,8 @@ localAPIEndpoint:
patches:
directory: {{ . }}
{{- end }}
skipPhases:
- addon/kube-proxy
nodeRegistration:
criSocket: "unix:///run/containerd/containerd.sock"
ignorePreflightErrors:

View File

@ -1,10 +0,0 @@
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways
metricsBindAddress: "0.0.0.0:10249"
mode: "iptables"
logging:
format: json
iptables:
localhostNodePorts: false
#nodePortAddresses: primary

View File

@ -3,12 +3,6 @@ kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account.
@ -114,7 +108,7 @@ rules:
# Get responses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources:
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
@ -137,7 +131,7 @@ rules:
- "RequestReceived"
# Default level for known APIs
- level: RequestResponse
resources:
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons
type: application
version: 0.8.14
version: 0.8.15
appVersion: v1.31
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -34,7 +34,7 @@ dependencies:
repository: https://nvidia.github.io/k8s-device-plugin
condition: nvidia-device-plugin.enabled
- name: neuron-helm-chart
version: 1.1.1
version: 1.1.2
# https://github.com/aws-neuron/neuron-helm-charts/tree/main/charts/neuron-helm-chart
repository: oci://public.ecr.aws/neuron #/neuron-helm-chart
condition: neuron-helm-chart.enabled
@ -43,7 +43,7 @@ dependencies:
repository: https://bitnami-labs.github.io/sealed-secrets
condition: sealed-secrets.enabled
- name: aws-node-termination-handler
version: 0.27.0
version: 0.27.1
repository: "oci://public.ecr.aws/aws-ec2/helm"
condition: aws-node-termination-handler.enabled
- name: aws-eks-asg-rolling-update-handler

View File

@ -1,7 +1,7 @@
apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo
version: 0.3.4
version: 0.4.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -1,6 +1,6 @@
# kubezero-argo
![Version: 0.3.3](https://img.shields.io/badge/Version-0.3.3-informational?style=flat-square)
![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square)
KubeZero Argo - Events, Workflow, CD
@ -18,7 +18,7 @@ Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version |
|------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 7.9.0 |
| https://argoproj.github.io/argo-helm | argo-cd | 8.0.9 |
| https://argoproj.github.io/argo-helm | argo-events | 2.4.15 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.1 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
@ -28,17 +28,16 @@ Kubernetes: `>= 1.30.0-0`
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| argo-cd.configs.cm."application.instanceLabelKey" | string | `nil` | |
| argo-cd.configs.cm."application.resourceTrackingMethod" | string | `"annotation"` | |
| argo-cd.configs.cm."resource.customizations" | string | `"argoproj.io/Application:\n health.lua: |\n hs = {}\n hs.status = \"Progressing\"\n hs.message = \"\"\n if obj.status ~= nil then\n if obj.status.health ~= nil then\n hs.status = obj.status.health.status\n if obj.status.health.message ~= nil then\n hs.message = obj.status.health.message\n end\n end\n end\n return hs\n"` | |
| argo-cd.configs.cm."server.rbac.log.enforce.enable" | string | `nil` | |
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.31 - Release notes"` | |
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.32 - Release notes"` | |
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.31"` | |
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.32"` | |
| argo-cd.configs.cm.installationID | string | `"KubeZero-ArgoCD"` | |
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
| argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | |
| argo-cd.configs.params."controller.resource.health.persist" | string | `"false"` | |
| argo-cd.configs.params."controller.sync.timeout.seconds" | int | `1800` | |
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
| argo-cd.configs.params."server.insecure" | bool | `true` | |
@ -54,7 +53,7 @@ Kubernetes: `>= 1.30.0-0`
| argo-cd.dex.enabled | bool | `false` | |
| argo-cd.enabled | bool | `false` | |
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
| argo-cd.global.image.tag | string | `"v2.14.9-1"` | |
| argo-cd.global.image.tag | string | `"v3.0.3"` | |
| argo-cd.global.logging.format | string | `"json"` | |
| argo-cd.global.networkPolicy.create | bool | `true` | |
| argo-cd.istio.enabled | bool | `false` | |

View File

@ -4,6 +4,6 @@ condition: 'index .Values "argo-cd" "controller" "metrics" "enabled"'
folder: KubeZero
dashboards:
- name: ArgoCD
url: https://grafana.com/api/dashboards/14584/revisions/1/download
url: https://raw.githubusercontent.com/argoproj/argo-cd/refs/heads/master/examples/dashboard.json
tags:
- ArgoCD

File diff suppressed because one or more lines are too long

View File

@ -25,9 +25,8 @@ spec:
automated:
prune: true
syncOptions:
- ServerSideApply=true
- ApplyOutOfSyncOnly=true
info:
- name: "Source:"
value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.31/"
value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.32/"
{{- end }}

View File

@ -26,7 +26,7 @@ argo-events:
versions:
- version: 2.10.11
natsImage: nats:2.11.1-scratch
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.2
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.3
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server
@ -38,7 +38,7 @@ argo-cd:
format: json
image:
repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v2.14.9-1
tag: v3.0.3
networkPolicy:
create: true
@ -49,8 +49,8 @@ argo-cd:
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
cm:
ui.bannercontent: "KubeZero v1.31 - Release notes"
ui.bannerurl: "https://kubezero.com/releases/v1.31"
ui.bannercontent: "KubeZero v1.32 - Release notes"
ui.bannerurl: "https://kubezero.com/releases/v1.32"
ui.bannerpermanent: "true"
ui.bannerposition: "bottom"
@ -59,9 +59,9 @@ argo-cd:
timeout.reconciliation: 300s
application.resourceTrackingMethod: annotation
installationID: "KubeZero-ArgoCD"
application.instanceLabelKey: Null
server.rbac.log.enforce.enable: Null
resource.customizations: |
argoproj.io/Application:
@ -89,7 +89,6 @@ argo-cd:
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
params:
controller.resource.health.persist: "false"
controller.diff.server.side: "true"
controller.sync.timeout.seconds: 1800

View File

@ -289,7 +289,7 @@ trivy:
#tag: 0.57.0
persistence:
enabled: true
size: 1Gi
size: 2Gi
rbac:
create: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-falco
description: Falco Container Security and Audit components
type: application
version: 0.1.2
version: 0.1.3
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -16,7 +16,7 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: falco
version: 4.2.5
version: 5.0.0
repository: https://falcosecurity.github.io/charts
condition: k8saudit.enabled
alias: k8saudit

View File

@ -42,17 +42,3 @@ helm.sh/chart: {{ include "kubezero-lib.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: kubezero
{{- end -}}
{{- /*
kubezero-lib.util.merge will merge two YAML templates and output the result.
This takes an array of three values:
- the top context
- the template name of the overrides (destination)
- the template name of the base (source)
*/ -}}
{{- define "kubezero-lib.util.merge" -}}
{{- $top := first . -}}
{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}
{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}}
{{- toYaml (merge $overrides $tpl) -}}
{{- end -}}

View File

@ -12,7 +12,7 @@ kube-prometheus-stack:
kubeStateMetrics:
enabled: true
kubeProxy:
enabled: true
enabled: false
kubeEtcd:
enabled: true

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-mq
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
type: application
version: 0.3.11
version: 0.3.12
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -17,7 +17,7 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: nats
version: 1.3.3
version: 1.3.7
repository: https://nats-io.github.io/k8s/helm/charts/
condition: nats.enabled
- name: rabbitmq

View File

@ -1,6 +1,6 @@
# kubezero-mq
![Version: 0.3.11](https://img.shields.io/badge/Version-0.3.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.3.12](https://img.shields.io/badge/Version-0.3.12-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
@ -20,7 +20,7 @@ Kubernetes: `>= 1.26.0`
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.7 |
## Values
@ -32,6 +32,8 @@ Kubernetes: `>= 1.26.0`
| nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| nats.mqtt.enabled | bool | `false` | |
| nats.natsBox.enabled | bool | `false` | |
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".maxSkew | int | `1` | |
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".whenUnsatisfiable | string | `"DoNotSchedule"` | |
| nats.promExporter.enabled | bool | `false` | |
| nats.promExporter.podMonitor.enabled | bool | `false` | |
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |

View File

@ -1,7 +1,7 @@
configmap: grafana-dashboards-nats
condition: '.Values.nats.exporter.serviceMonitor.enabled'
condition: '.Values.nats.promExporter.podMonitor.enabled'
gzip: true
# folder:
# folder:
dashboards:
- name: nats
url: https://grafana.com/api/dashboards/13707/revisions/1/download

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-network
description: KubeZero umbrella chart for all things network
type: application
version: 0.5.8
version: 0.5.9
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -10,7 +10,6 @@ keywords:
- multus
- cilium
- aws-cni
- metallb
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
@ -19,13 +18,9 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: cilium
version: 1.17.3
version: 1.17.4
repository: https://helm.cilium.io/
condition: cilium.enabled
- name: metallb
version: 0.14.9
repository: https://metallb.github.io/metallb
condition: metallb.enabled
- name: haproxy
version: 1.24.0
repository: https://haproxytech.github.io/helm-charts

View File

@ -1,6 +1,6 @@
# kubezero-network
![Version: 0.5.8](https://img.shields.io/badge/Version-0.5.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.5.9](https://img.shields.io/badge/Version-0.5.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things network
@ -20,20 +20,22 @@ Kubernetes: `>= 1.30.0-0`
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 |
| https://helm.cilium.io/ | cilium | 1.17.3 |
| https://metallb.github.io/metallb | metallb | 0.14.9 |
| https://helm.cilium.io/ | cilium | 1.17.4 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| cilium.bpf.preallocateMaps | bool | `true` | |
| cilium.cgroup.autoMount.enabled | bool | `false` | |
| cilium.cgroup.hostRoot | string | `"/sys/fs/cgroup"` | |
| cilium.cluster.id | int | `240` | |
| cilium.cluster.name | string | `"default"` | |
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
| cilium.cni.exclusive | bool | `false` | |
| cilium.cni.exclusive | bool | `true` | |
| cilium.cni.iptablesRemoveAWSRules | bool | `false` | |
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
| cilium.dnsProxy.enableTransparentMode | bool | `true` | |
| cilium.enabled | bool | `false` | |
| cilium.envoy.enabled | bool | `false` | |
| cilium.hubble.enabled | bool | `false` | |
@ -45,6 +47,9 @@ Kubernetes: `>= 1.30.0-0`
| cilium.hubble.ui.enabled | bool | `false` | |
| cilium.image.useDigest | bool | `false` | |
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
| cilium.k8sServiceHost | string | `""` | |
| cilium.k8sServicePort | int | `6443` | |
| cilium.kubeProxyReplacement | bool | `true` | |
| cilium.l7Proxy | bool | `false` | |
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cilium.operator.prometheus.enabled | bool | `false` | |
@ -54,12 +59,13 @@ Kubernetes: `>= 1.30.0-0`
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[1].key | string | `"node.cilium.io/agent-not-ready"` | |
| cilium.operator.tolerations[2].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[2].key | string | `"node.kubernetes.io/not-ready"` | |
| cilium.prometheus.enabled | bool | `false` | |
| cilium.prometheus.port | int | `9091` | |
| cilium.prometheus.serviceMonitor.enabled | bool | `false` | |
| cilium.resources.limits.memory | string | `"1Gi"` | |
| cilium.resources.requests.cpu | string | `"10m"` | |
| cilium.resources.requests.memory | string | `"160Mi"` | |
| cilium.resources.requests.cpu | string | `"50m"` | |
| cilium.resources.requests.memory | string | `"256Mi"` | |
| cilium.routingMode | string | `"tunnel"` | |
| cilium.sysctlfix.enabled | bool | `false` | |
| cilium.tunnelProtocol | string | `"geneve"` | |
@ -107,11 +113,6 @@ Kubernetes: `>= 1.30.0-0`
| haproxy.serviceMonitor.endpoints[0].path | string | `"/metrics"` | |
| haproxy.serviceMonitor.endpoints[0].port | string | `"prometheus"` | |
| haproxy.serviceMonitor.endpoints[0].scheme | string | `"http"` | |
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| metallb.enabled | bool | `false` | |
| metallb.ipAddressPools | list | `[]` | |
| multus.clusterNetwork | string | `"cilium"` | |
| multus.defaultNetworks | list | `[]` | |
| multus.enabled | bool | `false` | |

View File

@ -1,27 +0,0 @@
{{- if .Values.metallb.enabled }}
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: l2advertisement1
namespace: kube-system
spec:
ipAddressPools:
{{- range $key, $val := .Values.metallb.ipAddressPools }}
{{- if eq $val.protocol "layer2" }}
- {{ $val.name }}
{{- end }}
{{- end }}
---
{{- range $key, $val := .Values.metallb.ipAddressPools }}
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: {{ $val.name }}
namespace: kube-system
spec:
addresses:
{{- $val.addresses | toYaml | nindent 4 }}
{{- end }}
---
{{- end }}

View File

@ -1,19 +1,3 @@
metallb:
enabled: false
controller:
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
ipAddressPools: []
#- name: my-ip-space
# protocol: layer2
# addresses:
# - 192.168.42.0/24
multus:
enabled: false
image:
@ -33,17 +17,18 @@ cilium:
resources:
requests:
cpu: 10m
memory: 160Mi
limits:
memory: 1Gi
# cpu: 4000m
cpu: 50m
memory: 256Mi
# limits:
# memory: 1Gi
# cpu: 4000m
cni:
binPath: "/usr/libexec/cni"
logFile: /var/log/cilium-cni.log
#-- Ensure this is false if multus is enabled
exclusive: false
exclusive: true
iptablesRemoveAWSRules: false
cluster:
# This should match the second octet of clusterPoolIPv4PodCIDRList
@ -63,13 +48,32 @@ cilium:
enabled: false
#rollOutCiliumPods: true
kubeProxyReplacement: true
dnsProxy:
enableTransparentMode: true
# For LB support via L2announcement or BGP - on-prem only
# l2announcements:
# enabled: true
# Not needed normally
# externalIPs:
# enabled: true
k8sServiceHost: ""
k8sServicePort: 6443
# k8s:
# # This has to be set to the DNS name of all API servers
# # For example "https://192.168.0.1:6443 https://192.168.0.2:6443"
# apiServerURLs: ""
cgroup:
autoMount:
enabled: false
hostRoot: "/sys/fs/cgroup"
bpf:
preallocateMaps: true
# we need biDirectional so use helm init-container
#bpf:
# autoMount:
# enabled: false
@ -91,9 +95,11 @@ cilium:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
# the operator removes the taints,
# so we need to break chicken egg on single controller
# so we need to break chicken egg
- key: node.cilium.io/agent-not-ready
effect: NoSchedule
- key: node.kubernetes.io/not-ready
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""

View File

@ -31,7 +31,7 @@ dependencies:
repository: "oci://quay.io/strimzi-helm"
condition: strimzi-kafka-operator.enabled
- name: rabbitmq-cluster-operator
version: 4.4.11
version: 4.4.15
repository: https://charts.bitnami.com/bitnami
condition: rabbitmq-cluster-operator.enabled
kubeVersion: ">= 1.30.0-0"

View File

@ -9,10 +9,11 @@ metadata:
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
{{- with ( index .Values $name "annotations" ) }}
annotations:
argocd.argoproj.io/sync-options: Replace=true
{{- with ( index .Values $name "annotations" ) }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
{{- if not ( index .Values $name "retain" ) }}
finalizers:
- resources-finalizer.argocd.argoproj.io
@ -27,7 +28,7 @@ spec:
helm:
skipTests: true
valuesObject:
{{- include (print $name "-values") $ | nindent 8 }}
{{- toYaml (merge (omit (index .Values $name) "enabled" "namespace" "retain" "targetRevision") (fromYaml (include (print $name "-values") $ ))) | nindent 8 }}
destination:
server: "https://kubernetes.default.svc"
@ -38,7 +39,6 @@ spec:
automated:
prune: true
syncOptions:
- ServerSideApply=true
- CreateNamespace=true
- ApplyOutOfSyncOnly=true
info:

View File

@ -67,7 +67,7 @@ gateway:
gatewayProtocol: HTTPS
tls:
mode: SIMPLE
{{- with index .Values "istio-ingress" "gateway" "service" "ports" }}
{{- with index .Values "istio-ingress" "gateway" "service" "extraPorts" }}
{{- toYaml . | nindent 4 }}
{{- end }}
@ -93,7 +93,6 @@ certificates:
{{- toYaml $cert.dnsNames | nindent 4 }}
{{- end }}
{{- end }}
proxyProtocol: {{ default true (index .Values "istio-ingress" "proxyProtocol") }}
{{- with (index .Values "istio-ingress" "hardening") }}
hardening:
{{- toYaml . | nindent 2 }}

View File

@ -64,7 +64,7 @@ gateway:
gatewayProtocol: HTTPS
tls:
mode: SIMPLE
{{- with index .Values "istio-private-ingress" "gateway" "service" "ports" }}
{{- with index .Values "istio-private-ingress" "gateway" "service" "extraPorts" }}
{{- toYaml . | nindent 4 }}
{{- end }}
@ -88,7 +88,6 @@ certificates:
dnsNames:
{{- toYaml $cert.dnsNames | nindent 4 }}
{{- end }}
proxyProtocol: {{ default true (index .Values "istio-private-ingress" "proxyProtocol") }}
{{- with (index .Values "istio-private-ingress" "hardening") }}
hardening:
{{- toYaml . | nindent 2 }}

View File

@ -1,30 +1,22 @@
{{- define "network-values" }}
multus:
enabled: true
clusterNetwork: "cilium"
# {{- if eq .Values.global.platform "aws" }}
# image:
# pullPolicy: Never
# {{- end }}
cilium:
enabled: true
k8sServiceHost: {{ .Values.global.apiServerUrl }}
# {{- if eq .Values.global.platform "aws" }}
# image:
# pullPolicy: Never
# {{- end }}
# k8s:
# apiServerURLs: "https://{{ .Values.global.apiServerUrl }}"
cluster:
name: {{ .Values.global.clusterName }}
{{- with .Values.network.cilium.cluster.id }}
id: {{ . }}
{{- with ((.Values.network.cilium).cluster).id }}
ipam:
operator:
clusterPoolIPv4PodCIDRList:
- 10.{{ . }}.0.0/16
{{- end }}
{{- end }}
prometheus:
enabled: {{ .Values.metrics.enabled }}
@ -40,11 +32,6 @@ cilium:
serviceMonitor:
enabled: {{ .Values.metrics.enabled }}
{{- with .Values.network.metallb }}
metallb:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.network.haproxy }}
haproxy:
{{- toYaml . | nindent 2 }}

View File

@ -61,8 +61,10 @@ opensearch:
{{- toYaml . | nindent 2 }}
{{- end }}
serviceMonitor:
enabled: {{ .Values.metrics.enabled }}
# Disabled until upstream made up their mind
# https://github.com/opensearch-project/technical-steering/issues/35
# serviceMonitor:
# enabled: {{ .Values.metrics.enabled }}
{{- end }}
{{- if index .Values "telemetry" "opensearch-dashboards" }}
@ -71,8 +73,10 @@ opensearch-dashboards:
{{- toYaml . | nindent 2 }}
{{- end }}
serviceMonitor:
enabled: {{ .Values.metrics.enabled }}
# Disabled until upstream made up their mind
# https://github.com/opensearch-project/technical-steering/issues/35
# serviceMonitor:
# enabled: {{ .Values.metrics.enabled }}
{{- end }}
{{- end }}

View File

@ -1,5 +1,6 @@
global:
clusterName: zdt-trial-cluster
apiServerUrl: localhost:6443
# platform: aws (kubeadm, default), gke, or nocloud
platform: "aws"
@ -32,9 +33,9 @@ addons:
network:
enabled: true
retain: true
targetRevision: 0.5.8
targetRevision: 0.5.9
cilium:
cluster: {}
enabled: true
cert-manager:
enabled: false
@ -60,13 +61,13 @@ storage:
istio:
enabled: false
namespace: istio-system
targetRevision: 0.24.3
targetRevision: 0.24.6
istio-ingress:
enabled: false
chart: kubezero-istio-gateway
namespace: istio-ingress
targetRevision: 0.24.3
targetRevision: 0.24.6
gateway:
service: {}
@ -74,7 +75,7 @@ istio-private-ingress:
enabled: false
chart: kubezero-istio-gateway
namespace: istio-ingress
targetRevision: 0.24.3
targetRevision: 0.24.6
gateway:
service: {}
@ -87,7 +88,7 @@ falco:
telemetry:
enabled: false
namespace: telemetry
targetRevision: 0.4.1
targetRevision: 0.5.0
operators:
enabled: false
@ -117,7 +118,7 @@ logging:
argo:
enabled: false
namespace: argocd
targetRevision: 0.3.2
targetRevision: 0.4.0
argo-cd:
enabled: false
istio: