Compare commits

..

1 Commits

Author SHA1 Message Date
3075ae8a3a chore(deps): update kubezero-cache-dependencies 2025-05-31 03:03:24 +00:00
21 changed files with 125 additions and 69 deletions

View File

@ -251,7 +251,7 @@ control_plane_node() {
done
# see if we are a former member and remove our former self if so
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//' || true)
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades

View File

@ -83,9 +83,6 @@ function get_kubezero_secret() {
function ensure_kubezero_secret_key() {
local ns=$1
local secret=$2
local secret="$(kubectl get secret -n $ns $secret -o yaml)"
local key
local val
@ -267,11 +264,6 @@ function _helm() {
crds
elif [ $action == "dryrun" ]; then
cat $WORKDIR/values.yaml
render
cat $WORKDIR/helm.yaml
elif [ $action == "apply" -o $action == "replace" ]; then
echo "using values to $action of module $module: "
cat $WORKDIR/values.yaml

View File

@ -10,14 +10,7 @@ def migrate(values):
# 1.32
try:
values["istio-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-ingress"]["gateway"]["service"]["ports"]
values["istio-ingress"]["gateway"]["service"].pop("ports")
except KeyError:
pass
try:
values["istio-private-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-private-ingress"]["gateway"]["service"]["ports"]
values["istio-private-ingress"]["gateway"]["service"].pop("ports")
except KeyError:
pass

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons
type: application
version: 0.8.15
version: 0.8.14
appVersion: v1.31
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -34,7 +34,7 @@ dependencies:
repository: https://nvidia.github.io/k8s-device-plugin
condition: nvidia-device-plugin.enabled
- name: neuron-helm-chart
version: 1.1.2
version: 1.1.1
# https://github.com/aws-neuron/neuron-helm-charts/tree/main/charts/neuron-helm-chart
repository: oci://public.ecr.aws/neuron #/neuron-helm-chart
condition: neuron-helm-chart.enabled
@ -43,7 +43,7 @@ dependencies:
repository: https://bitnami-labs.github.io/sealed-secrets
condition: sealed-secrets.enabled
- name: aws-node-termination-handler
version: 0.27.1
version: 0.27.0
repository: "oci://public.ecr.aws/aws-ec2/helm"
condition: aws-node-termination-handler.enabled
- name: aws-eks-asg-rolling-update-handler

View File

@ -26,7 +26,7 @@ argo-events:
versions:
- version: 2.10.11
natsImage: nats:2.11.1-scratch
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.3
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.2
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cache
description: KubeZero Cache module
type: application
version: 0.1.1
version: 0.1.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -17,11 +17,11 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: redis
version: 20.11.5
version: 21.1.11
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled
- name: redis-cluster
version: 11.5.0
version: 12.0.7
repository: https://charts.bitnami.com/bitnami
condition: redis-cluster.enabled

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-falco
description: Falco Container Security and Audit components
type: application
version: 0.1.3
version: 0.1.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -16,7 +16,7 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: falco
version: 5.0.0
version: 4.2.5
repository: https://falcosecurity.github.io/charts
condition: k8saudit.enabled
alias: k8saudit

View File

@ -42,3 +42,17 @@ helm.sh/chart: {{ include "kubezero-lib.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: kubezero
{{- end -}}
{{- /*
kubezero-lib.util.merge will merge two YAML templates and output the result.
This takes an array of three values:
- the top context
- the template name of the overrides (destination)
- the template name of the base (source)
*/ -}}
{{- define "kubezero-lib.util.merge" -}}
{{- $top := first . -}}
{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}
{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}}
{{- toYaml (merge $overrides $tpl) -}}
{{- end -}}

View File

@ -12,7 +12,7 @@ kube-prometheus-stack:
kubeStateMetrics:
enabled: true
kubeProxy:
enabled: false
enabled: true
kubeEtcd:
enabled: true

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-mq
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
type: application
version: 0.3.12
version: 0.3.11
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -17,7 +17,7 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: nats
version: 1.3.7
version: 1.3.3
repository: https://nats-io.github.io/k8s/helm/charts/
condition: nats.enabled
- name: rabbitmq

View File

@ -1,6 +1,6 @@
# kubezero-mq
![Version: 0.3.12](https://img.shields.io/badge/Version-0.3.12-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.3.11](https://img.shields.io/badge/Version-0.3.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
@ -20,7 +20,7 @@ Kubernetes: `>= 1.26.0`
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.7 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 |
## Values
@ -32,8 +32,6 @@ Kubernetes: `>= 1.26.0`
| nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| nats.mqtt.enabled | bool | `false` | |
| nats.natsBox.enabled | bool | `false` | |
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".maxSkew | int | `1` | |
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".whenUnsatisfiable | string | `"DoNotSchedule"` | |
| nats.promExporter.enabled | bool | `false` | |
| nats.promExporter.podMonitor.enabled | bool | `false` | |
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |

View File

@ -1,5 +1,5 @@
configmap: grafana-dashboards-nats
condition: '.Values.nats.promExporter.podMonitor.enabled'
condition: '.Values.nats.exporter.serviceMonitor.enabled'
gzip: true
# folder:
dashboards:

View File

@ -10,6 +10,7 @@ keywords:
- multus
- cilium
- aws-cni
- metallb
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
@ -21,6 +22,10 @@ dependencies:
version: 1.17.4
repository: https://helm.cilium.io/
condition: cilium.enabled
- name: metallb
version: 0.14.9
repository: https://metallb.github.io/metallb
condition: metallb.enabled
- name: haproxy
version: 1.24.0
repository: https://haproxytech.github.io/helm-charts

View File

@ -21,21 +21,19 @@ Kubernetes: `>= 1.30.0-0`
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 |
| https://helm.cilium.io/ | cilium | 1.17.4 |
| https://metallb.github.io/metallb | metallb | 0.14.9 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| cilium.bpf.preallocateMaps | bool | `true` | |
| cilium.cgroup.autoMount.enabled | bool | `false` | |
| cilium.cgroup.hostRoot | string | `"/sys/fs/cgroup"` | |
| cilium.cluster.id | int | `240` | |
| cilium.cluster.name | string | `"default"` | |
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
| cilium.cni.exclusive | bool | `true` | |
| cilium.cni.iptablesRemoveAWSRules | bool | `false` | |
| cilium.cni.exclusive | bool | `false` | |
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
| cilium.dnsProxy.enableTransparentMode | bool | `true` | |
| cilium.enabled | bool | `false` | |
| cilium.envoy.enabled | bool | `false` | |
| cilium.hubble.enabled | bool | `false` | |
@ -47,8 +45,7 @@ Kubernetes: `>= 1.30.0-0`
| cilium.hubble.ui.enabled | bool | `false` | |
| cilium.image.useDigest | bool | `false` | |
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
| cilium.k8sServiceHost | string | `""` | |
| cilium.k8sServicePort | int | `6443` | |
| cilium.k8s.apiServerURLs | string | `""` | |
| cilium.kubeProxyReplacement | bool | `true` | |
| cilium.l7Proxy | bool | `false` | |
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
@ -59,13 +56,12 @@ Kubernetes: `>= 1.30.0-0`
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[1].key | string | `"node.cilium.io/agent-not-ready"` | |
| cilium.operator.tolerations[2].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[2].key | string | `"node.kubernetes.io/not-ready"` | |
| cilium.prometheus.enabled | bool | `false` | |
| cilium.prometheus.port | int | `9091` | |
| cilium.prometheus.serviceMonitor.enabled | bool | `false` | |
| cilium.resources.requests.cpu | string | `"50m"` | |
| cilium.resources.requests.memory | string | `"256Mi"` | |
| cilium.resources.limits.memory | string | `"1Gi"` | |
| cilium.resources.requests.cpu | string | `"10m"` | |
| cilium.resources.requests.memory | string | `"160Mi"` | |
| cilium.routingMode | string | `"tunnel"` | |
| cilium.sysctlfix.enabled | bool | `false` | |
| cilium.tunnelProtocol | string | `"geneve"` | |
@ -113,6 +109,11 @@ Kubernetes: `>= 1.30.0-0`
| haproxy.serviceMonitor.endpoints[0].path | string | `"/metrics"` | |
| haproxy.serviceMonitor.endpoints[0].port | string | `"prometheus"` | |
| haproxy.serviceMonitor.endpoints[0].scheme | string | `"http"` | |
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| metallb.enabled | bool | `false` | |
| metallb.ipAddressPools | list | `[]` | |
| multus.clusterNetwork | string | `"cilium"` | |
| multus.defaultNetworks | list | `[]` | |
| multus.enabled | bool | `false` | |

View File

@ -0,0 +1,27 @@
{{- if .Values.metallb.enabled }}
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: l2advertisement1
namespace: kube-system
spec:
ipAddressPools:
{{- range $key, $val := .Values.metallb.ipAddressPools }}
{{- if eq $val.protocol "layer2" }}
- {{ $val.name }}
{{- end }}
{{- end }}
---
{{- range $key, $val := .Values.metallb.ipAddressPools }}
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: {{ $val.name }}
namespace: kube-system
spec:
addresses:
{{- $val.addresses | toYaml | nindent 4 }}
{{- end }}
---
{{- end }}

View File

@ -1,3 +1,19 @@
metallb:
enabled: false
controller:
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
ipAddressPools: []
#- name: my-ip-space
# protocol: layer2
# addresses:
# - 192.168.42.0/24
multus:
enabled: false
image:
@ -17,11 +33,11 @@ cilium:
resources:
requests:
cpu: 50m
memory: 256Mi
# limits:
# memory: 1Gi
# cpu: 4000m
cpu: 10m
memory: 160Mi
limits:
memory: 1Gi
# cpu: 4000m
cni:
binPath: "/usr/libexec/cni"
@ -52,13 +68,6 @@ cilium:
dnsProxy:
enableTransparentMode: true
# For LB support via L2announcement or BGP - on-prem only
# l2announcements:
# enabled: true
# Not needed normally
# externalIPs:
# enabled: true
k8sServiceHost: ""
k8sServicePort: 6443
# k8s:
@ -71,9 +80,8 @@ cilium:
enabled: false
hostRoot: "/sys/fs/cgroup"
bpf:
preallocateMaps: true
# we need biDirectional so use helm init-container
#bpf:
# autoMount:
# enabled: false

View File

@ -28,7 +28,7 @@ spec:
helm:
skipTests: true
valuesObject:
{{- toYaml (merge (omit (index .Values $name) "enabled" "namespace" "retain" "targetRevision") (fromYaml (include (print $name "-values") $ ))) | nindent 8 }}
{{- include (print $name "-values") $ | nindent 8 }}
destination:
server: "https://kubernetes.default.svc"

View File

@ -67,7 +67,7 @@ gateway:
gatewayProtocol: HTTPS
tls:
mode: SIMPLE
{{- with index .Values "istio-ingress" "gateway" "service" "extraPorts" }}
{{- with index .Values "istio-ingress" "gateway" "service" "ports" }}
{{- toYaml . | nindent 4 }}
{{- end }}
@ -93,6 +93,7 @@ certificates:
{{- toYaml $cert.dnsNames | nindent 4 }}
{{- end }}
{{- end }}
proxyProtocol: {{ default true (index .Values "istio-ingress" "proxyProtocol") }}
{{- with (index .Values "istio-ingress" "hardening") }}
hardening:
{{- toYaml . | nindent 2 }}

View File

@ -64,7 +64,7 @@ gateway:
gatewayProtocol: HTTPS
tls:
mode: SIMPLE
{{- with index .Values "istio-private-ingress" "gateway" "service" "extraPorts" }}
{{- with index .Values "istio-private-ingress" "gateway" "service" "ports" }}
{{- toYaml . | nindent 4 }}
{{- end }}
@ -88,6 +88,7 @@ certificates:
dnsNames:
{{- toYaml $cert.dnsNames | nindent 4 }}
{{- end }}
proxyProtocol: {{ default true (index .Values "istio-private-ingress" "proxyProtocol") }}
{{- with (index .Values "istio-private-ingress" "hardening") }}
hardening:
{{- toYaml . | nindent 2 }}

View File

@ -1,17 +1,28 @@
{{- define "network-values" }}
cilium:
k8sServiceHost: {{ .Values.global.apiServerUrl }}
multus:
enabled: false
clusterNetwork: "cilium"
# {{- if eq .Values.global.platform "aws" }}
# image:
# pullPolicy: Never
# {{- end }}
cilium:
enabled: true
# {{- if eq .Values.global.platform "aws" }}
# image:
# pullPolicy: Never
# {{- end }}
k8sServiceHost: {{ .Values.global.apiServerUrl }}
# k8s:
# apiServerURLs: "https://{{ .Values.global.apiServerUrl }}"
cluster:
name: {{ .Values.global.clusterName }}
{{- with ((.Values.network.cilium).cluster).id }}
{{- with .Values.network.cilium.cluster.id }}
id: {{ . }}
ipam:
operator:
clusterPoolIPv4PodCIDRList:
@ -32,6 +43,11 @@ cilium:
serviceMonitor:
enabled: {{ .Values.metrics.enabled }}
{{- with .Values.network.metallb }}
metallb:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.network.haproxy }}
haproxy:
{{- toYaml . | nindent 2 }}

View File

@ -35,7 +35,7 @@ network:
retain: true
targetRevision: 0.5.9
cilium:
enabled: true
cluster: {}
cert-manager:
enabled: false
@ -61,13 +61,13 @@ storage:
istio:
enabled: false
namespace: istio-system
targetRevision: 0.24.6
targetRevision: 0.24.3
istio-ingress:
enabled: false
chart: kubezero-istio-gateway
namespace: istio-ingress
targetRevision: 0.24.6
targetRevision: 0.24.3
gateway:
service: {}
@ -75,7 +75,7 @@ istio-private-ingress:
enabled: false
chart: kubezero-istio-gateway
namespace: istio-ingress
targetRevision: 0.24.6
targetRevision: 0.24.3
gateway:
service: {}