Compare commits

..

1 Commits

Author SHA1 Message Date
a9833b1d0f chore(deps): update helm release gateway to v1.26.1 2025-05-30 03:02:21 +00:00
50 changed files with 197 additions and 604 deletions

View File

@ -2,13 +2,7 @@
# All things BEFORE the first controller / control plane upgrade # All things BEFORE the first controller / control plane upgrade
pre_control_plane_upgrade_cluster() { pre_control_plane_upgrade_cluster() {
if [ "$PLATFORM" != "gke" ];then echo
# patch multus DS to ONLY run pods on 1.31 controllers
kubectl patch ds kube-multus-ds -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
# patch kube-proxy DS to ONLY run pods on 1.31 controllers
kubectl patch ds kube-proxy -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
fi
} }
@ -22,20 +16,7 @@ post_control_plane_upgrade_cluster() {
pre_cluster_upgrade_final() { pre_cluster_upgrade_final() {
set +e set +e
if [ "$PLATFORM" != "gke" ];then echo
# cleanup multus
kubectl delete clusterrolebinding multus
kubectl delete clusterrole multus
kubectl delete serviceaccount multus -n kube-system
kubectl delete cm multus-cni-config -n kube-system
kubectl delete ds kube-multus-ds -n kube-system
kubectl delete NetworkAttachmentDefinition cilium
kubectl delete crd network-attachment-definitions.k8s.cni.cncf.io
# remove kube-proxy
kubectl -n kube-system delete ds kube-proxy
kubectl -n kube-system delete cm kube-proxy
fi
set -e set -e
} }

View File

@ -63,7 +63,7 @@ render_kubeadm() {
# Assemble kubeadm config # Assemble kubeadm config
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
for f in Cluster Kubelet; do for f in Cluster KubeProxy Kubelet; do
# echo "---" >> /etc/kubernetes/kubeadm.yaml # echo "---" >> /etc/kubernetes/kubeadm.yaml
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
done done
@ -169,7 +169,7 @@ kubeadm_upgrade() {
else else
pre_cluster_upgrade_final pre_cluster_upgrade_final
_kubeadm upgrade apply phase addon coredns $KUBE_VERSION _kubeadm upgrade apply phase addon all $KUBE_VERSION
post_cluster_upgrade_final post_cluster_upgrade_final
@ -239,7 +239,7 @@ control_plane_node() {
if [[ "$CMD" =~ ^(join)$ ]]; then if [[ "$CMD" =~ ^(join)$ ]]; then
# Delete any former self in case forseti did not delete yet # Delete any former self in case forseti did not delete yet
kubectl delete node ${NODENAME} --wait=true || true kubectl delete node ${NODENAME} --wait=true || true
# Wait for all pods to be deleted otherwise we end up with stale pods # Wait for all pods to be deleted otherwise we end up with stale pods eg. kube-proxy and all goes to ....
kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME} kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME}
# get current running etcd pods for etcdctl commands # get current running etcd pods for etcdctl commands
@ -251,7 +251,7 @@ control_plane_node() {
done done
# see if we are a former member and remove our former self if so # see if we are a former member and remove our former self if so
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//' || true) MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints [ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades # flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
@ -309,9 +309,8 @@ control_plane_node() {
_kubeadm init phase mark-control-plane _kubeadm init phase mark-control-plane
_kubeadm init phase kubelet-finalize all _kubeadm init phase kubelet-finalize all
# we skip kube-proxy
if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then
_kubeadm init phase addon coredns _kubeadm init phase addon all
fi fi
post_kubeadm post_kubeadm

View File

@ -81,19 +81,15 @@ function get_kubezero_secret() {
get_secret_val kubezero kubezero-secrets "$1" get_secret_val kubezero kubezero-secrets "$1"
} }
function ensure_kubezero_secret_key() { function ensure_kubezero_secret_key() {
local ns=$1 local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
local secret=$2 local key=""
local val=""
local secret="$(kubectl get secret -n $ns $secret -o yaml)" for key in $@; do
local key val=$(echo "$secret" | yq ".data.\"$key\"")
local val
for key in $1; do
val=$(echo $secret | yq ".data.\"$key\"")
if [ "$val" == "null" ]; then if [ "$val" == "null" ]; then
set_kubezero_secret $key "" kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}"
fi fi
done done
} }
@ -267,11 +263,6 @@ function _helm() {
crds crds
elif [ $action == "dryrun" ]; then
cat $WORKDIR/values.yaml
render
cat $WORKDIR/helm.yaml
elif [ $action == "apply" -o $action == "replace" ]; then elif [ $action == "apply" -o $action == "replace" ]; then
echo "using values to $action of module $module: " echo "using values to $action of module $module: "
cat $WORKDIR/values.yaml cat $WORKDIR/values.yaml

View File

@ -10,14 +10,7 @@ def migrate(values):
# 1.32 # 1.32
try: try:
values["istio-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-ingress"]["gateway"]["service"]["ports"]
values["istio-ingress"]["gateway"]["service"].pop("ports")
except KeyError:
pass pass
try:
values["istio-private-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-private-ingress"]["gateway"]["service"]["ports"]
values["istio-private-ingress"]["gateway"]["service"].pop("ports")
except KeyError: except KeyError:
pass pass

View File

@ -47,6 +47,7 @@ Kubernetes: `>= 1.32.0-0`
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 - https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go - https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/ - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
- https://github.com/awslabs/amazon-eks-ami - https://github.com/awslabs/amazon-eks-ami

View File

@ -22,6 +22,7 @@
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 - https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go - https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/ - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
- https://github.com/awslabs/amazon-eks-ami - https://github.com/awslabs/amazon-eks-ami

View File

@ -29,6 +29,12 @@ kind: Policy
rules: rules:
# The following requests were manually identified as high-volume and low-risk, # The following requests were manually identified as high-volume and low-risk,
# so drop them. # so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None - level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port. # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account. # TODO(#46983): Change this to the ingress controller service account.

View File

@ -6,8 +6,6 @@ featureGates:
ControlPlaneKubeletLocalMode: true ControlPlaneKubeletLocalMode: true
NodeLocalCRISocket: true NodeLocalCRISocket: true
controlPlaneEndpoint: {{ .Values.api.endpoint }} controlPlaneEndpoint: {{ .Values.api.endpoint }}
proxy:
disabled: true
networking: networking:
podSubnet: 10.244.0.0/16 podSubnet: 10.244.0.0/16
etcd: etcd:

View File

@ -7,8 +7,6 @@ localAPIEndpoint:
patches: patches:
directory: {{ . }} directory: {{ . }}
{{- end }} {{- end }}
skipPhases:
- addon/kube-proxy
nodeRegistration: nodeRegistration:
criSocket: "unix:///run/containerd/containerd.sock" criSocket: "unix:///run/containerd/containerd.sock"
ignorePreflightErrors: ignorePreflightErrors:

View File

@ -0,0 +1,10 @@
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways
metricsBindAddress: "0.0.0.0:10249"
mode: "iptables"
logging:
format: json
iptables:
localhostNodePorts: false
#nodePortAddresses: primary

View File

@ -3,6 +3,12 @@ kind: Policy
rules: rules:
# The following requests were manually identified as high-volume and low-risk, # The following requests were manually identified as high-volume and low-risk,
# so drop them. # so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None - level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port. # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account. # TODO(#46983): Change this to the ingress controller service account.
@ -108,7 +114,7 @@ rules:
# Get responses can be large; skip them. # Get responses can be large; skip them.
- level: Request - level: Request
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
resources: resources:
- group: "" # core - group: "" # core
- group: "admissionregistration.k8s.io" - group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io" - group: "apiextensions.k8s.io"
@ -131,7 +137,7 @@ rules:
- "RequestReceived" - "RequestReceived"
# Default level for known APIs # Default level for known APIs
- level: RequestResponse - level: RequestResponse
resources: resources:
- group: "" # core - group: "" # core
- group: "admissionregistration.k8s.io" - group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io" - group: "apiextensions.k8s.io"

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-addons name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons description: KubeZero umbrella chart for various optional cluster addons
type: application type: application
version: 0.8.15 version: 0.8.14
appVersion: v1.32 appVersion: v1.31
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -34,7 +34,7 @@ dependencies:
repository: https://nvidia.github.io/k8s-device-plugin repository: https://nvidia.github.io/k8s-device-plugin
condition: nvidia-device-plugin.enabled condition: nvidia-device-plugin.enabled
- name: neuron-helm-chart - name: neuron-helm-chart
version: 1.1.2 version: 1.1.1
# https://github.com/aws-neuron/neuron-helm-charts/tree/main/charts/neuron-helm-chart # https://github.com/aws-neuron/neuron-helm-charts/tree/main/charts/neuron-helm-chart
repository: oci://public.ecr.aws/neuron #/neuron-helm-chart repository: oci://public.ecr.aws/neuron #/neuron-helm-chart
condition: neuron-helm-chart.enabled condition: neuron-helm-chart.enabled
@ -43,7 +43,7 @@ dependencies:
repository: https://bitnami-labs.github.io/sealed-secrets repository: https://bitnami-labs.github.io/sealed-secrets
condition: sealed-secrets.enabled condition: sealed-secrets.enabled
- name: aws-node-termination-handler - name: aws-node-termination-handler
version: 0.27.1 version: 0.27.0
repository: "oci://public.ecr.aws/aws-ec2/helm" repository: "oci://public.ecr.aws/aws-ec2/helm"
condition: aws-node-termination-handler.enabled condition: aws-node-termination-handler.enabled
- name: aws-eks-asg-rolling-update-handler - name: aws-eks-asg-rolling-update-handler
@ -54,4 +54,4 @@ dependencies:
version: 0.3.2 version: 0.3.2
repository: https://caas-team.github.io/helm-charts/ repository: https://caas-team.github.io/helm-charts/
condition: py-kube-downscaler.enabled condition: py-kube-downscaler.enabled
kubeVersion: ">= 1.31.0-0" kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-addons # kubezero-addons
![Version: 0.8.15](https://img.shields.io/badge/Version-0.8.15-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.32](https://img.shields.io/badge/AppVersion-v1.32-informational?style=flat-square) ![Version: 0.8.14](https://img.shields.io/badge/Version-0.8.14-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.31](https://img.shields.io/badge/AppVersion-v1.31-informational?style=flat-square)
KubeZero umbrella chart for various optional cluster addons KubeZero umbrella chart for various optional cluster addons
@ -14,7 +14,7 @@ KubeZero umbrella chart for various optional cluster addons
## Requirements ## Requirements
Kubernetes: `>= 1.31.0-0` Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
@ -24,8 +24,8 @@ Kubernetes: `>= 1.31.0-0`
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.46.6 | | https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.46.6 |
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.17.1 | | https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.17.1 |
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 | | https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.27.1 | | oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.27.0 |
| oci://public.ecr.aws/neuron | neuron-helm-chart | 1.1.2 | | oci://public.ecr.aws/neuron | neuron-helm-chart | 1.1.1 |
# MetalLB # MetalLB

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 1.25.1 appVersion: 1.25.0
description: A Helm chart for the AWS Node Termination Handler. description: A Helm chart for the AWS Node Termination Handler.
home: https://github.com/aws/aws-node-termination-handler/ home: https://github.com/aws/aws-node-termination-handler/
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
@ -21,4 +21,4 @@ name: aws-node-termination-handler
sources: sources:
- https://github.com/aws/aws-node-termination-handler/ - https://github.com/aws/aws-node-termination-handler/
type: application type: application
version: 0.27.1 version: 0.27.0

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo name: kubezero-argo
version: 0.4.1 version: 0.4.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -22,11 +22,11 @@ dependencies:
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled condition: argo-events.enabled
- name: argo-cd - name: argo-cd
version: 8.0.14 version: 8.0.9
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled condition: argo-cd.enabled
- name: argocd-image-updater - name: argocd-image-updater
version: 0.12.2 version: 0.12.1
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argocd-image-updater.enabled condition: argocd-image-updater.enabled
kubeVersion: ">= 1.30.0-0" kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-argo # kubezero-argo
![Version: 0.4.1](https://img.shields.io/badge/Version-0.4.1-informational?style=flat-square) ![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square)
KubeZero Argo - Events, Workflow, CD KubeZero Argo - Events, Workflow, CD
@ -18,9 +18,9 @@ Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 8.0.14 | | https://argoproj.github.io/argo-helm | argo-cd | 8.0.9 |
| https://argoproj.github.io/argo-helm | argo-events | 2.4.15 | | https://argoproj.github.io/argo-helm | argo-events | 2.4.15 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.2 | | https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.1 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
## Values ## Values
@ -53,7 +53,7 @@ Kubernetes: `>= 1.30.0-0`
| argo-cd.dex.enabled | bool | `false` | | | argo-cd.dex.enabled | bool | `false` | |
| argo-cd.enabled | bool | `false` | | | argo-cd.enabled | bool | `false` | |
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | | | argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
| argo-cd.global.image.tag | string | `"v3.0.5"` | | | argo-cd.global.image.tag | string | `"v3.0.3"` | |
| argo-cd.global.logging.format | string | `"json"` | | | argo-cd.global.logging.format | string | `"json"` | |
| argo-cd.global.networkPolicy.create | bool | `true` | | | argo-cd.global.networkPolicy.create | bool | `true` | |
| argo-cd.istio.enabled | bool | `false` | | | argo-cd.istio.enabled | bool | `false` | |
@ -83,8 +83,8 @@ Kubernetes: `>= 1.30.0-0`
| argo-events.configs.jetstream.streamConfig.maxMsgs | int | `1000000` | Maximum number of messages before expiring oldest message | | argo-events.configs.jetstream.streamConfig.maxMsgs | int | `1000000` | Maximum number of messages before expiring oldest message |
| argo-events.configs.jetstream.streamConfig.replicas | int | `1` | Number of replicas, defaults to 3 and requires minimal 3 | | argo-events.configs.jetstream.streamConfig.replicas | int | `1` | Number of replicas, defaults to 3 and requires minimal 3 |
| argo-events.configs.jetstream.versions[0].configReloaderImage | string | `"natsio/nats-server-config-reloader:0.14.1"` | | | argo-events.configs.jetstream.versions[0].configReloaderImage | string | `"natsio/nats-server-config-reloader:0.14.1"` | |
| argo-events.configs.jetstream.versions[0].metricsExporterImage | string | `"natsio/prometheus-nats-exporter:0.17.3"` | | | argo-events.configs.jetstream.versions[0].metricsExporterImage | string | `"natsio/prometheus-nats-exporter:0.17.2"` | |
| argo-events.configs.jetstream.versions[0].natsImage | string | `"nats:2.11.4-scratch"` | | | argo-events.configs.jetstream.versions[0].natsImage | string | `"nats:2.11.1-scratch"` | |
| argo-events.configs.jetstream.versions[0].startCommand | string | `"/nats-server"` | | | argo-events.configs.jetstream.versions[0].startCommand | string | `"/nats-server"` | |
| argo-events.configs.jetstream.versions[0].version | string | `"2.10.11"` | | | argo-events.configs.jetstream.versions[0].version | string | `"2.10.11"` | |
| argo-events.enabled | bool | `false` | | | argo-events.enabled | bool | `false` | |

View File

@ -26,7 +26,6 @@ spec:
prune: true prune: true
syncOptions: syncOptions:
- ApplyOutOfSyncOnly=true - ApplyOutOfSyncOnly=true
- ServerSideApply=true
info: info:
- name: "Source:" - name: "Source:"
value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.32/" value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.32/"

View File

@ -25,9 +25,9 @@ argo-events:
# do NOT use -alpine tag as the entrypoint differs # do NOT use -alpine tag as the entrypoint differs
versions: versions:
- version: 2.10.11 - version: 2.10.11
natsImage: nats:2.11.4-scratch natsImage: nats:2.11.1-scratch
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.3 metricsExporterImage: natsio/prometheus-nats-exporter:0.17.2
configReloaderImage: natsio/nats-server-config-reloader:0.18.0 configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server startCommand: /nats-server
argo-cd: argo-cd:
@ -38,7 +38,7 @@ argo-cd:
format: json format: json
image: image:
repository: public.ecr.aws/zero-downtime/zdt-argocd repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v3.0.5 tag: v3.0.3
networkPolicy: networkPolicy:
create: true create: true
@ -63,10 +63,6 @@ argo-cd:
application.instanceLabelKey: Null application.instanceLabelKey: Null
server.rbac.log.enforce.enable: Null server.rbac.log.enforce.enable: Null
resource.compareoptions: |
# disables status field diffing in specified resource types
ignoreAggregatedRoles: true
resource.customizations: | resource.customizations: |
argoproj.io/Application: argoproj.io/Application:
health.lua: | health.lua: |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-falco name: kubezero-falco
description: Falco Container Security and Audit components description: Falco Container Security and Audit components
type: application type: application
version: 0.1.3 version: 0.1.2
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -16,7 +16,7 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: falco - name: falco
version: 5.0.0 version: 4.2.5
repository: https://falcosecurity.github.io/charts repository: https://falcosecurity.github.io/charts
condition: k8saudit.enabled condition: k8saudit.enabled
alias: k8saudit alias: k8saudit

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio-gateway name: kubezero-istio-gateway
description: KubeZero Umbrella Chart for Istio gateways description: KubeZero Umbrella Chart for Istio gateways
type: application type: application
version: 0.24.6 version: 0.24.7
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -17,6 +17,6 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: gateway - name: gateway
version: 1.24.6 version: 1.26.1
repository: https://istio-release.storage.googleapis.com/charts repository: https://istio-release.storage.googleapis.com/charts
kubeVersion: ">= 1.30.0-0" kubeVersion: ">= 1.30.0-0"

View File

@ -42,3 +42,17 @@ helm.sh/chart: {{ include "kubezero-lib.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: kubezero app.kubernetes.io/part-of: kubezero
{{- end -}} {{- end -}}
{{- /*
kubezero-lib.util.merge will merge two YAML templates and output the result.
This takes an array of three values:
- the top context
- the template name of the overrides (destination)
- the template name of the base (source)
*/ -}}
{{- define "kubezero-lib.util.merge" -}}
{{- $top := first . -}}
{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}
{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}}
{{- toYaml (merge $overrides $tpl) -}}
{{- end -}}

View File

@ -12,7 +12,7 @@ kube-prometheus-stack:
kubeStateMetrics: kubeStateMetrics:
enabled: true enabled: true
kubeProxy: kubeProxy:
enabled: false enabled: true
kubeEtcd: kubeEtcd:
enabled: true enabled: true

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-mq name: kubezero-mq
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
type: application type: application
version: 0.3.12 version: 0.3.11
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -17,7 +17,7 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: nats - name: nats
version: 1.3.7 version: 1.3.3
repository: https://nats-io.github.io/k8s/helm/charts/ repository: https://nats-io.github.io/k8s/helm/charts/
condition: nats.enabled condition: nats.enabled
- name: rabbitmq - name: rabbitmq

View File

@ -1,6 +1,6 @@
# kubezero-mq # kubezero-mq
![Version: 0.3.12](https://img.shields.io/badge/Version-0.3.12-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.3.11](https://img.shields.io/badge/Version-0.3.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
@ -20,7 +20,7 @@ Kubernetes: `>= 1.26.0`
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 | | https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.7 | | https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 |
## Values ## Values
@ -32,8 +32,6 @@ Kubernetes: `>= 1.26.0`
| nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | | | nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| nats.mqtt.enabled | bool | `false` | | | nats.mqtt.enabled | bool | `false` | |
| nats.natsBox.enabled | bool | `false` | | | nats.natsBox.enabled | bool | `false` | |
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".maxSkew | int | `1` | |
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".whenUnsatisfiable | string | `"DoNotSchedule"` | |
| nats.promExporter.enabled | bool | `false` | | | nats.promExporter.enabled | bool | `false` | |
| nats.promExporter.podMonitor.enabled | bool | `false` | | | nats.promExporter.podMonitor.enabled | bool | `false` | |
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | | | rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |

View File

@ -1,7 +1,7 @@
configmap: grafana-dashboards-nats configmap: grafana-dashboards-nats
condition: '.Values.nats.promExporter.podMonitor.enabled' condition: '.Values.nats.exporter.serviceMonitor.enabled'
gzip: true gzip: true
# folder: # folder:
dashboards: dashboards:
- name: nats - name: nats
url: https://grafana.com/api/dashboards/13707/revisions/1/download url: https://grafana.com/api/dashboards/13707/revisions/1/download

View File

@ -3,10 +3,6 @@ nats:
enabled: false enabled: false
config: config:
cluster:
routeURLs:
useFQDN: true
jetstream: jetstream:
enabled: true enabled: true

View File

@ -10,6 +10,7 @@ keywords:
- multus - multus
- cilium - cilium
- aws-cni - aws-cni
- metallb
maintainers: maintainers:
- name: Stefan Reimer - name: Stefan Reimer
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
@ -21,6 +22,10 @@ dependencies:
version: 1.17.4 version: 1.17.4
repository: https://helm.cilium.io/ repository: https://helm.cilium.io/
condition: cilium.enabled condition: cilium.enabled
- name: metallb
version: 0.14.9
repository: https://metallb.github.io/metallb
condition: metallb.enabled
- name: haproxy - name: haproxy
version: 1.24.0 version: 1.24.0
repository: https://haproxytech.github.io/helm-charts repository: https://haproxytech.github.io/helm-charts

View File

@ -1,6 +1,6 @@
# kubezero-network # kubezero-network
![Version: 0.5.9](https://img.shields.io/badge/Version-0.5.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.5.8](https://img.shields.io/badge/Version-0.5.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things network KubeZero umbrella chart for all things network
@ -20,22 +20,20 @@ Kubernetes: `>= 1.30.0-0`
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 | | https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 |
| https://helm.cilium.io/ | cilium | 1.17.4 | | https://helm.cilium.io/ | cilium | 1.17.3 |
| https://metallb.github.io/metallb | metallb | 0.14.9 |
## Values ## Values
| Key | Type | Default | Description | | Key | Type | Default | Description |
|-----|------|---------|-------------| |-----|------|---------|-------------|
| cilium.bpf.preallocateMaps | bool | `true` | |
| cilium.cgroup.autoMount.enabled | bool | `false` | | | cilium.cgroup.autoMount.enabled | bool | `false` | |
| cilium.cgroup.hostRoot | string | `"/sys/fs/cgroup"` | | | cilium.cgroup.hostRoot | string | `"/sys/fs/cgroup"` | |
| cilium.cluster.id | int | `240` | | | cilium.cluster.id | int | `240` | |
| cilium.cluster.name | string | `"default"` | | | cilium.cluster.name | string | `"default"` | |
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | | | cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
| cilium.cni.exclusive | bool | `true` | | | cilium.cni.exclusive | bool | `false` | |
| cilium.cni.iptablesRemoveAWSRules | bool | `false` | |
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | | | cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
| cilium.dnsProxy.enableTransparentMode | bool | `true` | |
| cilium.enabled | bool | `false` | | | cilium.enabled | bool | `false` | |
| cilium.envoy.enabled | bool | `false` | | | cilium.envoy.enabled | bool | `false` | |
| cilium.hubble.enabled | bool | `false` | | | cilium.hubble.enabled | bool | `false` | |
@ -47,9 +45,6 @@ Kubernetes: `>= 1.30.0-0`
| cilium.hubble.ui.enabled | bool | `false` | | | cilium.hubble.ui.enabled | bool | `false` | |
| cilium.image.useDigest | bool | `false` | | | cilium.image.useDigest | bool | `false` | |
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | | | cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
| cilium.k8sServiceHost | string | `""` | |
| cilium.k8sServicePort | int | `6443` | |
| cilium.kubeProxyReplacement | bool | `true` | |
| cilium.l7Proxy | bool | `false` | | | cilium.l7Proxy | bool | `false` | |
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cilium.operator.prometheus.enabled | bool | `false` | | | cilium.operator.prometheus.enabled | bool | `false` | |
@ -59,13 +54,12 @@ Kubernetes: `>= 1.30.0-0`
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | | | cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[1].key | string | `"node.cilium.io/agent-not-ready"` | | | cilium.operator.tolerations[1].key | string | `"node.cilium.io/agent-not-ready"` | |
| cilium.operator.tolerations[2].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[2].key | string | `"node.kubernetes.io/not-ready"` | |
| cilium.prometheus.enabled | bool | `false` | | | cilium.prometheus.enabled | bool | `false` | |
| cilium.prometheus.port | int | `9091` | | | cilium.prometheus.port | int | `9091` | |
| cilium.prometheus.serviceMonitor.enabled | bool | `false` | | | cilium.prometheus.serviceMonitor.enabled | bool | `false` | |
| cilium.resources.requests.cpu | string | `"50m"` | | | cilium.resources.limits.memory | string | `"1Gi"` | |
| cilium.resources.requests.memory | string | `"256Mi"` | | | cilium.resources.requests.cpu | string | `"10m"` | |
| cilium.resources.requests.memory | string | `"160Mi"` | |
| cilium.routingMode | string | `"tunnel"` | | | cilium.routingMode | string | `"tunnel"` | |
| cilium.sysctlfix.enabled | bool | `false` | | | cilium.sysctlfix.enabled | bool | `false` | |
| cilium.tunnelProtocol | string | `"geneve"` | | | cilium.tunnelProtocol | string | `"geneve"` | |
@ -113,6 +107,11 @@ Kubernetes: `>= 1.30.0-0`
| haproxy.serviceMonitor.endpoints[0].path | string | `"/metrics"` | | | haproxy.serviceMonitor.endpoints[0].path | string | `"/metrics"` | |
| haproxy.serviceMonitor.endpoints[0].port | string | `"prometheus"` | | | haproxy.serviceMonitor.endpoints[0].port | string | `"prometheus"` | |
| haproxy.serviceMonitor.endpoints[0].scheme | string | `"http"` | | | haproxy.serviceMonitor.endpoints[0].scheme | string | `"http"` | |
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| metallb.enabled | bool | `false` | |
| metallb.ipAddressPools | list | `[]` | |
| multus.clusterNetwork | string | `"cilium"` | | | multus.clusterNetwork | string | `"cilium"` | |
| multus.defaultNetworks | list | `[]` | | | multus.defaultNetworks | list | `[]` | |
| multus.enabled | bool | `false` | | | multus.enabled | bool | `false` | |

View File

@ -0,0 +1,27 @@
{{- if .Values.metallb.enabled }}
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: l2advertisement1
namespace: kube-system
spec:
ipAddressPools:
{{- range $key, $val := .Values.metallb.ipAddressPools }}
{{- if eq $val.protocol "layer2" }}
- {{ $val.name }}
{{- end }}
{{- end }}
---
{{- range $key, $val := .Values.metallb.ipAddressPools }}
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: {{ $val.name }}
namespace: kube-system
spec:
addresses:
{{- $val.addresses | toYaml | nindent 4 }}
{{- end }}
---
{{- end }}

View File

@ -1,3 +1,19 @@
metallb:
enabled: false
controller:
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
ipAddressPools: []
#- name: my-ip-space
# protocol: layer2
# addresses:
# - 192.168.42.0/24
multus: multus:
enabled: false enabled: false
image: image:
@ -17,18 +33,17 @@ cilium:
resources: resources:
requests: requests:
cpu: 50m cpu: 10m
memory: 256Mi memory: 160Mi
# limits: limits:
# memory: 1Gi memory: 1Gi
# cpu: 4000m # cpu: 4000m
cni: cni:
binPath: "/usr/libexec/cni" binPath: "/usr/libexec/cni"
logFile: /var/log/cilium-cni.log logFile: /var/log/cilium-cni.log
#-- Ensure this is false if multus is enabled #-- Ensure this is false if multus is enabled
exclusive: true exclusive: false
iptablesRemoveAWSRules: false
cluster: cluster:
# This should match the second octet of clusterPoolIPv4PodCIDRList # This should match the second octet of clusterPoolIPv4PodCIDRList
@ -48,32 +63,13 @@ cilium:
enabled: false enabled: false
#rollOutCiliumPods: true #rollOutCiliumPods: true
kubeProxyReplacement: true
dnsProxy:
enableTransparentMode: true
# For LB support via L2announcement or BGP - on-prem only
# l2announcements:
# enabled: true
# Not needed normally
# externalIPs:
# enabled: true
k8sServiceHost: ""
k8sServicePort: 6443
# k8s:
# # This has to be set to the DNS name of all API servers
# # For example "https://192.168.0.1:6443 https://192.168.0.2:6443"
# apiServerURLs: ""
cgroup: cgroup:
autoMount: autoMount:
enabled: false enabled: false
hostRoot: "/sys/fs/cgroup" hostRoot: "/sys/fs/cgroup"
bpf:
preallocateMaps: true
# we need biDirectional so use helm init-container # we need biDirectional so use helm init-container
#bpf:
# autoMount: # autoMount:
# enabled: false # enabled: false
@ -95,11 +91,9 @@ cilium:
- key: node-role.kubernetes.io/control-plane - key: node-role.kubernetes.io/control-plane
effect: NoSchedule effect: NoSchedule
# the operator removes the taints, # the operator removes the taints,
# so we need to break chicken egg # so we need to break chicken egg on single controller
- key: node.cilium.io/agent-not-ready - key: node.cilium.io/agent-not-ready
effect: NoSchedule effect: NoSchedule
- key: node.kubernetes.io/not-ready
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""

View File

@ -1,26 +0,0 @@
apiVersion: v2
name: kubezero-policy
description: KubeZero umbrella chart for Kyverno
type: application
version: 0.1.0
appVersion: v1.14
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- kyverno
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: kyverno
version: 3.4.2
repository: https://kyverno.github.io/kyverno/
condition: kyverno.enabled
- name: policies
version: 0.1.0
condition: policies.enabled
kubeVersion: ">= 1.30.0-0"

View File

@ -1,49 +0,0 @@
# kubezero-policy
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.14](https://img.shields.io/badge/AppVersion-v1.14-informational?style=flat-square)
KubeZero umbrella chart for Kyverno
**Homepage:** <https://kubezero.com>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Stefan Reimer | <stefan@zero-downtime.net> | |
## Requirements
Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version |
|------------|------|---------|
| | policies | 0.1.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://kyverno.github.io/kyverno/ | kyverno | 3.4.2 |
# Kyverno
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| kyverno.admissionController.revisionHistoryLimit | int | `2` | |
| kyverno.backgroundController.revisionHistoryLimit | int | `2` | |
| kyverno.cleanupController.rbac.clusterRole.extraResources[0].apiGroups[0] | string | `"postgresql.cnpg.io"` | |
| kyverno.cleanupController.rbac.clusterRole.extraResources[0].resources[0] | string | `"backups"` | |
| kyverno.cleanupController.rbac.clusterRole.extraResources[0].verbs[0] | string | `"delete"` | |
| kyverno.cleanupController.rbac.clusterRole.extraResources[0].verbs[1] | string | `"list"` | |
| kyverno.cleanupController.rbac.clusterRole.extraResources[0].verbs[2] | string | `"watch"` | |
| kyverno.cleanupController.revisionHistoryLimit | int | `2` | |
| kyverno.config.preserve | bool | `false` | |
| kyverno.config.webhookAnnotations."argocd.argoproj.io/installation-id" | string | `"KubeZero-ArgoCD"` | |
| kyverno.crds.migration.enabled | bool | `false` | |
| kyverno.enabled | bool | `false` | |
| kyverno.features.logging.format | string | `"json"` | |
| kyverno.grafana.enabled | bool | `false` | |
| kyverno.policyReportsCleanup.enabled | bool | `false` | |
| kyverno.reportsController.enabled | bool | `false` | |
| kyverno.reportsController.revisionHistoryLimit | int | `2` | |
| kyverno.webhooksCleanup.autoDeleteWebhooks.enabled | bool | `true` | |
| kyverno.webhooksCleanup.enabled | bool | `true` | |

View File

@ -1,18 +0,0 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
{{ template "chart.description" . }}
{{ template "chart.homepageLine" . }}
{{ template "chart.maintainersSection" . }}
{{ template "chart.sourcesSection" . }}
{{ template "chart.requirementsSection" . }}
# Kyverno
{{ template "chart.valuesSection" . }}

View File

@ -1,18 +0,0 @@
apiVersion: v2
name: policies
description: KubeZero collection of Kyverno policies
type: application
version: 0.1.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- kyverno
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
kubeVersion: ">= 1.30.0-0"

View File

@ -1,18 +0,0 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
{{ template "chart.description" . }}
{{ template "chart.homepageLine" . }}
{{ template "chart.maintainersSection" . }}
{{ template "chart.sourcesSection" . }}
{{ template "chart.requirementsSection" . }}
# Kyverno
{{ template "chart.valuesSection" . }}

View File

@ -1,70 +0,0 @@
{{- if .Values.aws.enabled }}
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-aws-iam-pod-identity
annotations:
policies.kyverno.io/title: AWS Pod Identity
policies.kyverno.io/category: aws
kyverno.io/kyverno-version: 1.14.0
kyverno.io/kubernetes-version: "1.31"
policies.kyverno.io/subject: Pod
policies.kyverno.io/description: >-
This provides the EKS Pod Identity Webhook functionality for KubeZero.
Pods having a service account annotated with \"kubezero.com/aws-iam-role-arn\"
will get the required environment variables as well as volumes injected
to make the SDKs automatically find and use the IAM role.
spec:
useServerSideApply: true
background: false
rules:
- name: add-aws-iam-oidc-mapping
context:
- name: saAnnotations
apiCall:
urlPath: "/api/v1/namespaces/{{`{{request.namespace}}`}}/serviceaccounts/{{`{{request.object.spec.serviceAccountName}}`}}"
jmesPath: "metadata.annotations || ''"
match:
any:
- resources:
kinds:
- Pod
operations:
- CREATE
preconditions:
all:
- key: "{{`{{request.object.spec.serviceAccountName || '' }}`}}"
operator: NotEquals
value: ""
- key: "{{`{{ saAnnotations.\"kubezero.com/aws-iam-role-arn\" || '' }}`}}"
operator: NotEquals
value: ""
mutate:
foreach:
- list: "request.object.spec.containers"
patchStrategicMerge:
spec:
containers:
- (name): "{{`{{ element.name }}`}}"
env:
- name: AWS_REGION
value: {{ .Values.aws.region }}
- name: AWS_ROLE_ARN
value: "{{`{{ saAnnotations.\"kubezero.com/aws-iam-role-arn\" }}`}}"
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
- name: AWS_STS_REGIONAL_ENDPOINTS
value: regional
volumeMounts:
- name: aws-token
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
readOnly: true
volumes:
- name: aws-token
projected:
sources:
- serviceAccountToken:
path: token
expirationSeconds: 86400
audience: "sts.amazonaws.com"
{{- end }}

View File

@ -1,62 +0,0 @@
{{- if .Values.bestPractices.enabled }}
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-container-sock-mounts
annotations:
policies.kyverno.io/title: Disallow CRI socket mounts in CEL expressions
policies.kyverno.io/category: Best Practices, EKS Best Practices in CEL
policies.kyverno.io/severity: medium
policies.kyverno.io/subject: Pod
policies.kyverno.io/minversion: 1.11.0
kyverno.io/kubernetes-version: "1.26-1.27"
policies.kyverno.io/description: >-
Container daemon socket bind mounts allows access to the container engine on the
node. This access can be used for privilege escalation and to manage containers
outside of Kubernetes, and hence should not be allowed. This policy validates that
the sockets used for CRI engines Docker, Containerd, and CRI-O are not used. In addition
to or replacement of this policy, preventing users from mounting the parent directories
(/var/run and /var) may be necessary to completely prevent socket bind mounts.
spec:
background: true
rules:
- name: validate-socket-mounts
match:
any:
- resources:
kinds:
- Pod
operations:
- CREATE
- UPDATE
validate:
failureAction: Enforce
cel:
variables:
- name: hasVolumes
expression: "!has(object.spec.volumes)"
- name: volumes
expression: "object.spec.volumes"
- name: volumesWithHostPath
expression: "variables.volumes.filter(volume, has(volume.hostPath))"
expressions:
- expression: >-
variables.hasVolumes ||
variables.volumesWithHostPath.all(volume, !volume.hostPath.path.matches('/var/run/docker.sock'))
message: "Use of the Docker Unix socket is not allowed."
- expression: >-
variables.hasVolumes ||
variables.volumesWithHostPath.all(volume, !volume.hostPath.path.matches('/var/run/containerd/containerd.sock'))
message: "Use of the Containerd Unix socket is not allowed."
- expression: >-
variables.hasVolumes ||
variables.volumesWithHostPath.all(volume, !volume.hostPath.path.matches('/var/run/crio/crio.sock'))
message: "Use of the CRI-O Unix socket is not allowed."
- expression: >-
variables.hasVolumes ||
variables.volumesWithHostPath.all(volume, !volume.hostPath.path.matches('/var/run/cri-dockerd.sock'))
message: "Use of the Docker CRI socket is not allowed."
{{- end }}

View File

@ -1,9 +0,0 @@
#!/bin/bash
set -ex
. ../../scripts/lib-update.sh
login_ecr_public
update_helm
update_docs

View File

@ -1,6 +0,0 @@
bestPractices:
enabled: false
aws:
enabled: false
region: us-west-2

View File

@ -1,52 +0,0 @@
{{- if and false .Values.kyverno.enabled }}
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ template "kubezero-lib.fullname" . }}-admission-tls
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | nindent 4 }}
spec:
secretName: {{ template "kubezero-lib.fullname" . }}-kyverno-svc.{{ .Release.Namespace }}.svc.kyverno-tls-pair
issuerRef:
name: kubezero-local-ca-issuer
kind: ClusterIssuer
duration: 8760h0m0s
privateKey:
encoding: PKCS8
usages:
- "client auth"
- "server auth"
commonName: {{ template "kubezero-lib.fullname" . }}-admission
dnsNames:
# <cluster-name>-<nodepool-component>-<index>
- 'kyverno-svc'
- 'kyverno-svc.{{ .Release.Namespace }}'
- 'kyverno-svc.{{ .Release.Namespace }}.svc'
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ template "kubezero-lib.fullname" . }}-cleanup-tls
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | nindent 4 }}
spec:
secretName: {{ template "kubezero-lib.fullname" . }}-kyverno-cleanup-controller.{{ .Release.Namespace }}.svc.kyverno-tls-pair
issuerRef:
name: kubezero-local-ca-issuer
kind: ClusterIssuer
duration: 8760h0m0s
privateKey:
encoding: PKCS8
usages:
- "client auth"
- "server auth"
commonName: {{ template "kubezero-lib.fullname" . }}-cleanup-controller
dnsNames:
# <cluster-name>-<nodepool-component>-<index>
- 'kyverno-cleanup-controller'
- 'kyverno-cleanup-controller.{{ .Release.Namespace }}'
- 'kyverno-cleanup-controller.{{ .Release.Namespace }}.svc'
{{- end }}

View File

@ -1,9 +0,0 @@
#!/bin/bash
set -ex
. ../../scripts/lib-update.sh
login_ecr_public
update_helm
update_docs

View File

@ -1,57 +0,0 @@
kyverno:
enabled: false
# Disable hooks being triggered during each sync
policyReportsCleanup:
enabled: false
webhooksCleanup:
enabled: true
autoDeleteWebhooks:
enabled: true
crds:
migration:
enabled: false
# templating:
# enabled: true
config:
preserve: false
webhookAnnotations:
argocd.argoproj.io/installation-id: KubeZero-ArgoCD
# Unfortunately Argo needs different values for Mutating and Validating hooks so disabled for now
# argocd.argoproj.io/tracking-id: policy:/ServiceAccount:kyverno/kyverno-admission-controller
features:
logging:
format: json
# Enabled via kubezero global metrics flag
grafana:
enabled: false
admissionController:
revisionHistoryLimit: 2
cleanupController:
revisionHistoryLimit: 2
rbac:
clusterRole:
extraResources:
# Allow to clean up postgreSQL backups
- apiGroups:
- postgresql.cnpg.io
resources:
- backups
verbs:
- delete
- list
- watch
backgroundController:
revisionHistoryLimit: 2
reportsController:
revisionHistoryLimit: 2
enabled: false

View File

@ -10,8 +10,7 @@ metadata:
labels: labels:
{{- include "kubezero-lib.labels" . | nindent 4 }} {{- include "kubezero-lib.labels" . | nindent 4 }}
annotations: annotations:
argocd.argoproj.io/compare-options: IncludeMutationWebhook=true argocd.argoproj.io/sync-options: Replace=true
# argocd.argoproj.io/sync-options: Replace=true
{{- with ( index .Values $name "annotations" ) }} {{- with ( index .Values $name "annotations" ) }}
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
@ -29,7 +28,7 @@ spec:
helm: helm:
skipTests: true skipTests: true
valuesObject: valuesObject:
{{- toYaml (merge (omit (index .Values $name) "enabled" "namespace" "retain" "targetRevision") (fromYaml (include (print $name "-values") $ ))) | nindent 8 }} {{- include (print $name "-values") $ | nindent 8 }}
destination: destination:
server: "https://kubernetes.default.svc" server: "https://kubernetes.default.svc"
@ -42,7 +41,6 @@ spec:
syncOptions: syncOptions:
- CreateNamespace=true - CreateNamespace=true
- ApplyOutOfSyncOnly=true - ApplyOutOfSyncOnly=true
- ServerSideApply=true
info: info:
- name: "Source:" - name: "Source:"
value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.31/charts/kubezero-{{ $name }}" value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.31/charts/kubezero-{{ $name }}"

View File

@ -10,9 +10,9 @@ argo-cd:
params: params:
{{- if not $.Values.global.highAvailable }} {{- if not $.Values.global.highAvailable }}
# Reduce load on API server on single node control plane # Reduce load on API server on single node control plane
controller.status.processors: 4 controller.status.processors: 2
controller.operation.processors: 2 controller.operation.processors: 1
controller.kubectl.parallelism.limit: 2 controller.kubectl.parallelism.limit: 1
{{- else }} {{- else }}
controller.status.processors: 8 controller.status.processors: 8
controller.operation.processors: 4 controller.operation.processors: 4

View File

@ -67,7 +67,7 @@ gateway:
gatewayProtocol: HTTPS gatewayProtocol: HTTPS
tls: tls:
mode: SIMPLE mode: SIMPLE
{{- with index .Values "istio-ingress" "gateway" "service" "extraPorts" }} {{- with index .Values "istio-ingress" "gateway" "service" "ports" }}
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
@ -93,6 +93,7 @@ certificates:
{{- toYaml $cert.dnsNames | nindent 4 }} {{- toYaml $cert.dnsNames | nindent 4 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
proxyProtocol: {{ default true (index .Values "istio-ingress" "proxyProtocol") }}
{{- with (index .Values "istio-ingress" "hardening") }} {{- with (index .Values "istio-ingress" "hardening") }}
hardening: hardening:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}

View File

@ -64,7 +64,7 @@ gateway:
gatewayProtocol: HTTPS gatewayProtocol: HTTPS
tls: tls:
mode: SIMPLE mode: SIMPLE
{{- with index .Values "istio-private-ingress" "gateway" "service" "extraPorts" }} {{- with index .Values "istio-private-ingress" "gateway" "service" "ports" }}
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
@ -88,6 +88,7 @@ certificates:
dnsNames: dnsNames:
{{- toYaml $cert.dnsNames | nindent 4 }} {{- toYaml $cert.dnsNames | nindent 4 }}
{{- end }} {{- end }}
proxyProtocol: {{ default true (index .Values "istio-private-ingress" "proxyProtocol") }}
{{- with (index .Values "istio-private-ingress" "hardening") }} {{- with (index .Values "istio-private-ingress" "hardening") }}
hardening: hardening:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}

View File

@ -1,22 +1,30 @@
{{- define "network-values" }} {{- define "network-values" }}
cilium: multus:
k8sServiceHost: {{ .Values.global.apiServerUrl }} enabled: true
clusterNetwork: "cilium"
# {{- if eq .Values.global.platform "aws" }}
# image:
# pullPolicy: Never
# {{- end }}
cilium:
enabled: true
# {{- if eq .Values.global.platform "aws" }} # {{- if eq .Values.global.platform "aws" }}
# image: # image:
# pullPolicy: Never # pullPolicy: Never
# {{- end }} # {{- end }}
# k8s:
# apiServerURLs: "https://{{ .Values.global.apiServerUrl }}"
cluster: cluster:
name: {{ .Values.global.clusterName }} name: {{ .Values.global.clusterName }}
{{- with ((.Values.network.cilium).cluster).id }} {{- with .Values.network.cilium.cluster.id }}
id: {{ . }}
ipam: ipam:
operator: operator:
clusterPoolIPv4PodCIDRList: clusterPoolIPv4PodCIDRList:
- 10.{{ . }}.0.0/16 - 10.{{ . }}.0.0/16
{{- end }} {{- end }}
prometheus: prometheus:
enabled: {{ .Values.metrics.enabled }} enabled: {{ .Values.metrics.enabled }}
@ -32,6 +40,11 @@ cilium:
serviceMonitor: serviceMonitor:
enabled: {{ .Values.metrics.enabled }} enabled: {{ .Values.metrics.enabled }}
{{- with .Values.network.metallb }}
metallb:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.network.haproxy }} {{- with .Values.network.haproxy }}
haproxy: haproxy:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
@ -41,6 +54,12 @@ haproxy:
{{- define "network-argo" }} {{- define "network-argo" }}
# Metallb
ignoreDifferences:
- group: apiextensions.k8s.io
kind: CustomResourceDefinition
jsonPointers:
- /spec/conversion/webhook/clientConfig/caBundle
{{- end }} {{- end }}
{{ include "kubezero-app.app" . }} {{ include "kubezero-app.app" . }}

View File

@ -1,39 +0,0 @@
{{- define "policy-values" }}
kyverno:
{{- if eq .Values.global.platform "aws" }}
global:
{{- include "kubezero-lib.control-plane" . | nindent 4 }}
{{- end }}
grafana:
enabled: {{ .Values.metrics.enabled }}
admissionController:
serviceMonitor:
enabled: {{ .Values.metrics.enabled }}
cleanupController:
serviceMonitor:
enabled: {{ .Values.metrics.enabled }}
backgroundController:
serviceMonitor:
enabled: {{ .Values.metrics.enabled }}
reportsController:
serviceMonitor:
enabled: {{ .Values.metrics.enabled }}
policies:
{{- if eq .Values.global.platform "aws" }}
aws:
enabled: true
region: {{ .global.aws.region }}
{{- end }}
{{- end }}
{{- define "policy-argo" }}
{{- end }}
{{ include "kubezero-app.app" . }}

View File

@ -1,6 +1,5 @@
global: global:
clusterName: zdt-trial-cluster clusterName: zdt-trial-cluster
apiServerUrl: localhost:6443
# platform: aws (kubeadm, default), gke, or nocloud # platform: aws (kubeadm, default), gke, or nocloud
platform: "aws" platform: "aws"
@ -33,14 +32,9 @@ addons:
network: network:
enabled: true enabled: true
retain: true retain: true
targetRevision: 0.5.9 targetRevision: 0.5.8
cilium: cilium:
enabled: true cluster: {}
policy:
enabled: false
namespace: kyverno
targetRevision: 0.1.0
cert-manager: cert-manager:
enabled: false enabled: false
@ -66,13 +60,13 @@ storage:
istio: istio:
enabled: false enabled: false
namespace: istio-system namespace: istio-system
targetRevision: 0.24.6 targetRevision: 0.24.3
istio-ingress: istio-ingress:
enabled: false enabled: false
chart: kubezero-istio-gateway chart: kubezero-istio-gateway
namespace: istio-ingress namespace: istio-ingress
targetRevision: 0.24.6 targetRevision: 0.24.3
gateway: gateway:
service: {} service: {}
@ -80,7 +74,7 @@ istio-private-ingress:
enabled: false enabled: false
chart: kubezero-istio-gateway chart: kubezero-istio-gateway
namespace: istio-ingress namespace: istio-ingress
targetRevision: 0.24.6 targetRevision: 0.24.3
gateway: gateway:
service: {} service: {}
@ -123,7 +117,7 @@ logging:
argo: argo:
enabled: false enabled: false
namespace: argocd namespace: argocd
targetRevision: 0.4.1 targetRevision: 0.4.0
argo-cd: argo-cd:
enabled: false enabled: false
istio: istio:

View File

@ -1,3 +1,4 @@
---
apiVersion: batch/v1 apiVersion: batch/v1
kind: Job kind: Job
metadata: metadata: