Compare commits

..

1 Commits

Author SHA1 Message Date
88579bace8 chore(deps): update helm release rabbitmq to v15 2025-04-15 03:03:01 +00:00
62 changed files with 355 additions and 1694 deletions

View File

@ -14,7 +14,7 @@ include .ci/podman.mk
Add subtree to your project: Add subtree to your project:
``` ```
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash
``` ```

View File

@ -41,8 +41,7 @@ for image in sorted(images, key=lambda d: d['imagePushedAt'], reverse=True):
_delete = True _delete = True
for tag in image["imageTags"]: for tag in image["imageTags"]:
# Look for at least one tag NOT beign a SemVer dev tag # Look for at least one tag NOT beign a SemVer dev tag
# untagged dev builds get tagged as <tag>-g<commit> if "-" not in tag:
if "-g" not in tag and "dirty" not in tag:
_delete = False _delete = False
if _delete: if _delete:
print("Deleting development image {}".format(image["imageTags"])) print("Deleting development image {}".format(image["imageTags"]))

View File

@ -8,8 +8,8 @@ SHELL := bash
.PHONY: all # All targets are accessible for user .PHONY: all # All targets are accessible for user
.DEFAULT: help # Running Make will run the help target .DEFAULT: help # Running Make will run the help target
# Parse version from latest git semver tag, use short commit otherwise # Parse version from latest git semver tag
GIT_TAG ?= $(shell git describe --tags --match v*.*.* --dirty 2>/dev/null || git describe --match="" --always --dirty 2>/dev/null) GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
TAG ::= $(GIT_TAG) TAG ::= $(GIT_TAG)
@ -85,7 +85,7 @@ rm-image:
## some useful tasks during development ## some useful tasks during development
ci-pull-upstream: ## pull latest shared .ci subtree ci-pull-upstream: ## pull latest shared .ci subtree
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash -m "Merge latest ci-tools-lib" git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
create-repo: ## create new AWS ECR public repository create-repo: ## create new AWS ECR public repository
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION) aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)

View File

@ -3,7 +3,7 @@ ARG ALPINE_VERSION=3.21
FROM docker.io/alpine:${ALPINE_VERSION} FROM docker.io/alpine:${ALPINE_VERSION}
ARG ALPINE_VERSION ARG ALPINE_VERSION
ARG KUBE_VERSION=1.32 ARG KUBE_VERSION=1.31
ARG SOPS_VERSION="3.10.1" ARG SOPS_VERSION="3.10.1"
ARG VALS_VERSION="0.40.1" ARG VALS_VERSION="0.40.1"

View File

@ -17,7 +17,7 @@ post_control_plane_upgrade_cluster() {
# delete previous root app controlled by kubezero module # delete previous root app controlled by kubezero module
kubectl delete application kubezero-git-sync -n argocd || true kubectl delete application kubezero-git-sync -n argocd || true
# only patch appproject to keep SyncWindow in place # Patch appproject to keep SyncWindow in place
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
} }

View File

@ -1,28 +0,0 @@
### v1.32
# All things BEFORE the first controller / control plane upgrade
pre_control_plane_upgrade_cluster() {
echo
}
# All things after the first controller / control plane upgrade
post_control_plane_upgrade_cluster() {
echo
}
# All things AFTER all contollers are on the new version
pre_cluster_upgrade_final() {
set +e
echo
set -e
}
# Last call
post_cluster_upgrade_final() {
echo
}

View File

@ -57,7 +57,6 @@ render_kubeadm() {
local phase=$1 local phase=$1
helm template $CHARTS/kubeadm --output-dir ${WORKDIR} \ helm template $CHARTS/kubeadm --output-dir ${WORKDIR} \
--kube-version $KUBE_VERSION \
-f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml \ -f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml \
--set patches=/etc/kubernetes/patches --set patches=/etc/kubernetes/patches
@ -112,44 +111,35 @@ post_kubeadm() {
} }
# Migrate KubeZero Config to current version
upgrade_kubezero_config() {
ARGOCD=$(argo_used)
# get current values, argo app over cm
get_kubezero_values $ARGOCD
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
update_kubezero_cm
if [ "$ARGOCD" == "true" ]; then
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi
}
# Control plane upgrade # Control plane upgrade
kubeadm_upgrade() { control_plane_upgrade() {
CMD=$1
ARGOCD=$(argo_used) ARGOCD=$(argo_used)
render_kubeadm upgrade render_kubeadm upgrade
# Check if we already have all controllers on the current version if [[ "$CMD" =~ ^(cluster)$ ]]; then
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
# run control plane upgrade
if [ "$OLD_CONTROLLERS" != "0" ]; then
pre_control_plane_upgrade_cluster pre_control_plane_upgrade_cluster
# get current values, argo app over cm
get_kubezero_values $ARGOCD
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
update_kubezero_cm
if [ "$ARGOCD" == "true" ]; then
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi
pre_kubeadm pre_kubeadm
_kubeadm init phase upload-config kubeadm _kubeadm init phase upload-config kubeadm
@ -165,11 +155,12 @@ kubeadm_upgrade() {
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm." echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
# All controllers already on current version elif [[ "$CMD" =~ ^(final)$ ]]; then
else
pre_cluster_upgrade_final pre_cluster_upgrade_final
_kubeadm upgrade apply phase addon all $KUBE_VERSION # Finally upgrade addons last, with 1.32 we can ONLY call addon phase
#_kubeadm upgrade apply phase addon all $KUBE_VERSION
_kubeadm upgrade apply $KUBE_VERSION
post_cluster_upgrade_final post_cluster_upgrade_final
@ -205,6 +196,10 @@ control_plane_node() {
# Put PKI in place # Put PKI in place
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
### 1.31 only to clean up previous aws-iam-auth certs
rm -f ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.key ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt
###
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks # Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
cp ${WORKDIR}/super-admin.conf ${HOSTFS}/root/.kube/config cp ${WORKDIR}/super-admin.conf ${HOSTFS}/root/.kube/config
@ -338,7 +333,9 @@ apply_module() {
[ -f $CHARTS/kubezero/hooks.d/pre-install.sh ] && . $CHARTS/kubezero/hooks.d/pre-install.sh [ -f $CHARTS/kubezero/hooks.d/pre-install.sh ] && . $CHARTS/kubezero/hooks.d/pre-install.sh
kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD) kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
else else
_helm apply $t #_helm apply $t
# During 1.31 we change the ArgoCD tracking so replace
_helm replace $t
fi fi
done done
@ -352,9 +349,7 @@ delete_module() {
get_kubezero_values $ARGOCD get_kubezero_values $ARGOCD
# Always use embedded kubezero chart # Always use embedded kubezero chart
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml \ helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
--kube-version $KUBE_VERSION \
--version ~$KUBE_VERSION --devel --output-dir $WORKDIR
for t in $MODULES; do for t in $MODULES; do
_helm delete $t _helm delete $t
@ -416,8 +411,12 @@ for t in $@; do
bootstrap) control_plane_node bootstrap;; bootstrap) control_plane_node bootstrap;;
join) control_plane_node join;; join) control_plane_node join;;
restore) control_plane_node restore;; restore) control_plane_node restore;;
upgrade_control_plane) kubeadm_upgrade;; kubeadm_upgrade)
upgrade_kubezero) upgrade_kubezero_config;; control_plane_upgrade cluster
;;
finalize_cluster_upgrade)
control_plane_upgrade final
;;
apply_*) apply_*)
ARGOCD=$(argo_used) ARGOCD=$(argo_used)
apply_module "${t##apply_}";; apply_module "${t##apply_}";;

View File

@ -80,19 +80,6 @@ function get_kubezero_secret() {
get_secret_val kubezero kubezero-secrets "$1" get_secret_val kubezero kubezero-secrets "$1"
} }
function ensure_kubezero_secret_key() {
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
local key=""
local val=""
for key in $@; do
val=$(echo "$secret" | yq ".data.\"$key\"")
if [ "$val" == "null" ]; then
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}"
fi
done
}
function set_kubezero_secret() { function set_kubezero_secret() {
local key="$1" local key="$1"
@ -353,17 +340,17 @@ EOF
} }
function admin_job() { function control_plane_upgrade() {
TASKS="$1" TASKS="$1"
ADMIN_TAG=${ADMIN_TAG:-$KUBE_VERSION} [ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest"
echo "Deploy cluster admin task: $TASKS" echo "Deploy cluster admin task: $TASKS"
cat <<EOF | kubectl apply -f - cat <<EOF | kubectl apply -f -
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: kubezero-admin-job name: kubezero-upgrade
namespace: kube-system namespace: kube-system
labels: labels:
app: kubezero-upgrade app: kubezero-upgrade
@ -373,7 +360,7 @@ spec:
hostPID: true hostPID: true
containers: containers:
- name: kubezero-admin - name: kubezero-admin
image: public.ecr.aws/zero-downtime/kubezero-admin:${ADMIN_TAG} image: public.ecr.aws/zero-downtime/kubezero-admin:${KUBE_VERSION}
imagePullPolicy: Always imagePullPolicy: Always
command: ["kubezero.sh"] command: ["kubezero.sh"]
args: [$TASKS] args: [$TASKS]
@ -408,10 +395,10 @@ spec:
restartPolicy: Never restartPolicy: Never
EOF EOF
kubectl wait pod kubezero-admin-job -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
while true; do while true; do
kubectl logs kubezero-admin-job -n kube-system -f 2>/dev/null && break kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
sleep 3 sleep 3
done done
kubectl delete pod kubezero-admin-job -n kube-system kubectl delete pod kubezero-upgrade -n kube-system
} }

View File

@ -15,28 +15,37 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
ARGOCD=$(argo_used) ARGOCD=$(argo_used)
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning #waitSystemPodsRunning
[ "$ARGOCD" == "true" ] && disable_argo [ "$ARGOCD" == "true" ] && disable_argo
admin_job "upgrade_control_plane, upgrade_kubezero" # Check if we already have all controllers on the current version
#OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
if [ "$OLD_CONTROLLERS" == "0" ]; then
# All controllers already on current version
control_plane_upgrade finalize_cluster_upgrade
else
# Otherwise run control plane upgrade
control_plane_upgrade kubeadm_upgrade
fi
echo "<Return> to continue"
read -r
#echo "Adjust kubezero values as needed:" #echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015 # shellcheck disable=SC2015
#[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero #[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
#echo "<Return> to continue"
#read -r
# upgrade modules # upgrade modules
admin_job "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators" control_plane_upgrade "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators"
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning waitSystemPodsRunning
echo "Applying remaining KubeZero modules..." echo "Applying remaining KubeZero modules..."
admin_job "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo" control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
# we replace the project during v1.31 so disable again # we replace the project during v1.31 so disable again
[ "$ARGOCD" == "true" ] && disable_argo [ "$ARGOCD" == "true" ] && disable_argo
@ -51,12 +60,6 @@ while true; do
sleep 1 sleep 1
done done
echo "Once all controller nodes are running on $KUBE_VERSION, <return> to continue"
read -r
# Final control plane upgrades
admin_job "upgrade_control_plane"
echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your cluster." echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your cluster."
echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades." echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades."

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubeadm name: kubeadm
description: KubeZero Kubeadm cluster config description: KubeZero Kubeadm cluster config
type: application type: application
version: 1.32.3 version: 1.31.6
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -11,4 +11,4 @@ keywords:
maintainers: maintainers:
- name: Stefan Reimer - name: Stefan Reimer
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
kubeVersion: ">= 1.32.0-0" kubeVersion: ">= 1.31.0-0"

View File

@ -1,6 +1,6 @@
# kubeadm # kubeadm
![Version: 1.32.3](https://img.shields.io/badge/Version-1.32.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.25.8](https://img.shields.io/badge/Version-1.25.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Kubeadm cluster config KubeZero Kubeadm cluster config
@ -14,18 +14,19 @@ KubeZero Kubeadm cluster config
## Requirements ## Requirements
Kubernetes: `>= 1.32.0-0` Kubernetes: `>= 1.25.0`
## Values ## Values
| Key | Type | Default | Description | | Key | Type | Default | Description |
|-----|------|---------|-------------| |-----|------|---------|-------------|
| api.apiAudiences | string | `"istio-ca"` | | | api.apiAudiences | string | `"istio-ca"` | |
| api.awsIamAuth | bool | `false` | | | api.awsIamAuth.enabled | bool | `false` | |
| api.awsIamAuth.kubeAdminRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
| api.awsIamAuth.workerNodeRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
| api.endpoint | string | `"kube-api.changeme.org:6443"` | | | api.endpoint | string | `"kube-api.changeme.org:6443"` | |
| api.etcdServers | string | `"https://etcd:2379"` | | | api.etcdServers | string | `"https://etcd:2379"` | |
| api.extraArgs | object | `{}` | | | api.extraArgs | object | `{}` | |
| api.falco.enabled | bool | `false` | |
| api.listenPort | int | `6443` | | | api.listenPort | int | `6443` | |
| api.oidcEndpoint | string | `""` | s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME | | api.oidcEndpoint | string | `""` | s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |
| api.serviceAccountIssuer | string | `""` | https://s3.${REGION}.amazonaws.com/${CFN[ConfigBucket]}/k8s/$CLUSTERNAME | | api.serviceAccountIssuer | string | `""` | https://s3.${REGION}.amazonaws.com/${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |

View File

@ -4,7 +4,6 @@ kubernetesVersion: {{ .Chart.Version }}
clusterName: {{ .Values.global.clusterName }} clusterName: {{ .Values.global.clusterName }}
featureGates: featureGates:
ControlPlaneKubeletLocalMode: true ControlPlaneKubeletLocalMode: true
NodeLocalCRISocket: true
controlPlaneEndpoint: {{ .Values.api.endpoint }} controlPlaneEndpoint: {{ .Values.api.endpoint }}
networking: networking:
podSubnet: 10.244.0.0/16 podSubnet: 10.244.0.0/16
@ -120,8 +119,6 @@ apiServer:
value: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }} value: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
- name: authorization-config - name: authorization-config
value: /etc/kubernetes/apiserver/authz-config.yaml value: /etc/kubernetes/apiserver/authz-config.yaml
- name: authentication-config
value: /etc/kubernetes/apiserver/authn-config.yaml
- name: enable-admission-plugins - name: enable-admission-plugins
value: DenyServiceExternalIPs,NodeRestriction,EventRateLimit,ExtendedResourceToleration value: DenyServiceExternalIPs,NodeRestriction,EventRateLimit,ExtendedResourceToleration
{{- if .Values.global.highAvailable }} {{- if .Values.global.highAvailable }}
@ -130,11 +127,6 @@ apiServer:
{{- end }} {{- end }}
- name: logging-format - name: logging-format
value: json value: json
# Required for MutatingAdmissionPolicy
# Required for VolumeAttributesClass
# Required for CoordinatedLeaderElection - coordination.k8s.io/v1alpha1=true
- name: runtime-config
value: admissionregistration.k8s.io/v1alpha1=true,storage.k8s.io/v1beta1=true
{{- with .Values.api.extraArgs }} {{- with .Values.api.extraArgs }}
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}

View File

@ -1,9 +1,9 @@
{{- /* Feature gates for all control plane components */ -}} {{- /* Feature gates for all control plane components */ -}}
{{- /* Issues: MemoryQoS */ -}} {{- /* Issues: MemoryQoS */ -}}
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}} {{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
{{- /* v1.32: not required? working ? "DisableNodeKubeProxyVersion" "CoordinatedLeaderElection" */ -}} {{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
{{- define "kubeadm.featuregates" }} {{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "VolumeAttributesClass" "MutatingAdmissionPolicy" }} {{- $gates := list "CustomCPUCFSQuotaPeriod" "AuthorizeWithSelectors" "AuthorizeNodeWithSelectors" "ConsistentListFromCache" "VolumeAttributesClass" "WatchList" }}
{{- if eq .return "csv" }} {{- if eq .return "csv" }}
{{- range $key := $gates }} {{- range $key := $gates }}
{{- $key }}=true, {{- $key }}=true,

View File

@ -1,5 +1,7 @@
apiVersion: apiserver.config.k8s.io/v1 apiVersion: apiserver.config.k8s.io/v1
kind: AdmissionConfiguration kind: AdmissionConfiguration
metadata:
name: kubezero-admissionconfiguration
plugins: plugins:
- name: EventRateLimit - name: EventRateLimit
path: /etc/kubernetes/apiserver/event-config.yaml path: /etc/kubernetes/apiserver/event-config.yaml

View File

@ -1,10 +0,0 @@
apiVersion: apiserver.config.k8s.io/v1beta1
kind: AuthenticationConfiguration
anonymous:
enabled: true
conditions:
- path: /livez
- path: /readyz
- path: /healthz
- path: /.well-known/openid-configuration
- path: /openid/v1/jwks

View File

@ -1,4 +1,4 @@
apiVersion: apiserver.config.k8s.io/v1 apiVersion: apiserver.config.k8s.io/v1beta1
kind: AuthorizationConfiguration kind: AuthorizationConfiguration
authorizers: authorizers:
- type: Node - type: Node

View File

@ -8,6 +8,3 @@ json:
- op: replace - op: replace
path: /spec/containers/0/startupProbe/httpGet/host path: /spec/containers/0/startupProbe/httpGet/host
value: {{ .Values.listenAddress }} value: {{ .Values.listenAddress }}
- op: replace
path: /spec/containers/0/readinessProbe/httpGet/host
value: {{ .Values.listenAddress }}

View File

@ -1,9 +0,0 @@
#!/bin/bash
set -ex
. ../../scripts/lib-update.sh
login_ecr_public
update_helm
update_docs

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-addons name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons description: KubeZero umbrella chart for various optional cluster addons
type: application type: application
version: 0.8.14 version: 0.8.13
appVersion: v1.31 appVersion: v1.30
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -21,15 +21,15 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: external-dns - name: external-dns
version: 1.16.1 version: 1.15.1
repository: https://kubernetes-sigs.github.io/external-dns/ repository: https://kubernetes-sigs.github.io/external-dns/
condition: external-dns.enabled condition: external-dns.enabled
- name: cluster-autoscaler - name: cluster-autoscaler
version: 9.46.6 version: 9.46.0
repository: https://kubernetes.github.io/autoscaler repository: https://kubernetes.github.io/autoscaler
condition: cluster-autoscaler.enabled condition: cluster-autoscaler.enabled
- name: nvidia-device-plugin - name: nvidia-device-plugin
version: 0.17.1 version: 0.17.0
# https://github.com/NVIDIA/k8s-device-plugin # https://github.com/NVIDIA/k8s-device-plugin
repository: https://nvidia.github.io/k8s-device-plugin repository: https://nvidia.github.io/k8s-device-plugin
condition: nvidia-device-plugin.enabled condition: nvidia-device-plugin.enabled
@ -39,11 +39,11 @@ dependencies:
repository: oci://public.ecr.aws/neuron #/neuron-helm-chart repository: oci://public.ecr.aws/neuron #/neuron-helm-chart
condition: neuron-helm-chart.enabled condition: neuron-helm-chart.enabled
- name: sealed-secrets - name: sealed-secrets
version: 2.17.2 version: 2.17.1
repository: https://bitnami-labs.github.io/sealed-secrets repository: https://bitnami-labs.github.io/sealed-secrets
condition: sealed-secrets.enabled condition: sealed-secrets.enabled
- name: aws-node-termination-handler - name: aws-node-termination-handler
version: 0.27.0 version: 0.26.0
repository: "oci://public.ecr.aws/aws-ec2/helm" repository: "oci://public.ecr.aws/aws-ec2/helm"
condition: aws-node-termination-handler.enabled condition: aws-node-termination-handler.enabled
- name: aws-eks-asg-rolling-update-handler - name: aws-eks-asg-rolling-update-handler
@ -51,7 +51,7 @@ dependencies:
repository: https://twin.github.io/helm-charts repository: https://twin.github.io/helm-charts
condition: aws-eks-asg-rolling-update-handler.enabled condition: aws-eks-asg-rolling-update-handler.enabled
- name: py-kube-downscaler - name: py-kube-downscaler
version: 0.3.2 version: 0.2.12
repository: https://caas-team.github.io/helm-charts/ repository: https://caas-team.github.io/helm-charts/
condition: py-kube-downscaler.enabled condition: py-kube-downscaler.enabled
kubeVersion: ">= 1.30.0-0" kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-addons # kubezero-addons
![Version: 0.8.14](https://img.shields.io/badge/Version-0.8.14-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.31](https://img.shields.io/badge/AppVersion-v1.31-informational?style=flat-square) ![Version: 0.8.13](https://img.shields.io/badge/Version-0.8.13-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.30](https://img.shields.io/badge/AppVersion-v1.30-informational?style=flat-square)
KubeZero umbrella chart for various optional cluster addons KubeZero umbrella chart for various optional cluster addons
@ -18,13 +18,13 @@ Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.17.2 | | https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.17.1 |
| https://caas-team.github.io/helm-charts/ | py-kube-downscaler | 0.3.2 | | https://caas-team.github.io/helm-charts/ | py-kube-downscaler | 0.2.12 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.16.1 | | https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.15.1 |
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.46.6 | | https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.46.0 |
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.17.1 | | https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.17.0 |
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 | | https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.27.0 | | oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.26.0 |
| oci://public.ecr.aws/neuron | neuron-helm-chart | 1.1.1 | | oci://public.ecr.aws/neuron | neuron-helm-chart | 1.1.1 |
# MetalLB # MetalLB
@ -109,7 +109,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | | | cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | | | cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
| cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | | | cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | |
| cluster-autoscaler.image.tag | string | `"v1.32.1"` | | | cluster-autoscaler.image.tag | string | `"v1.31.1"` | |
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cluster-autoscaler.podDisruptionBudget | bool | `false` | | | cluster-autoscaler.podDisruptionBudget | bool | `false` | |
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | | | cluster-autoscaler.prometheusRule.enabled | bool | `false` | |

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 1.25.0 appVersion: 1.24.0
description: A Helm chart for the AWS Node Termination Handler. description: A Helm chart for the AWS Node Termination Handler.
home: https://github.com/aws/aws-node-termination-handler/ home: https://github.com/aws/aws-node-termination-handler/
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
@ -21,4 +21,4 @@ name: aws-node-termination-handler
sources: sources:
- https://github.com/aws/aws-node-termination-handler/ - https://github.com/aws/aws-node-termination-handler/
type: application type: application
version: 0.27.0 version: 0.26.0

View File

@ -95,7 +95,6 @@ The configuration in this table applies to all AWS Node Termination Handler mode
| `webhookTemplateConfigMapName` | Pass the webhook template file as a configmap. | "``" | | `webhookTemplateConfigMapName` | Pass the webhook template file as a configmap. | "``" |
| `webhookTemplateConfigMapKey` | Name of the Configmap key storing the template file. | `""` | | `webhookTemplateConfigMapKey` | Name of the Configmap key storing the template file. | `""` |
| `enableSqsTerminationDraining` | If `true`, this turns on queue-processor mode which drains nodes when an SQS termination event is received. | `false` | | `enableSqsTerminationDraining` | If `true`, this turns on queue-processor mode which drains nodes when an SQS termination event is received. | `false` |
| `enableOutOfServiceTaint` | If `true`, this will add out-of-service taint to node after cordon/drain process which would forcefully evict pods without matching tolerations and detach persistent volumes. | `false` |
### Queue-Processor Mode Configuration ### Queue-Processor Mode Configuration
@ -121,9 +120,6 @@ The configuration in this table applies to AWS Node Termination Handler in queue
| `managedAsgTag` | [DEPRECATED](Use `managedTag` instead) The node tag to check if `checkASGTagBeforeDraining` is `true`. | `managedAsgTag` | [DEPRECATED](Use `managedTag` instead) The node tag to check if `checkASGTagBeforeDraining` is `true`.
| `useProviderId` | If `true`, fetch node name through Kubernetes node spec ProviderID instead of AWS event PrivateDnsHostname. | `false` | | `useProviderId` | If `true`, fetch node name through Kubernetes node spec ProviderID instead of AWS event PrivateDnsHostname. | `false` |
| `topologySpreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) for pod scheduling. Useful with a highly available deployment to reduce the risk of running multiple replicas on the same Node | `[]` | | `topologySpreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) for pod scheduling. Useful with a highly available deployment to reduce the risk of running multiple replicas on the same Node | `[]` |
| `heartbeatInterval` | The time period in seconds between consecutive heartbeat signals. Valid range: 30-3600 seconds (30 seconds to 1 hour). | `-1` |
| `heartbeatUntil` | The duration in seconds over which heartbeat signals are sent. Valid range: 60-172800 seconds (1 minute to 48 hours). | `-1` |
### IMDS Mode Configuration ### IMDS Mode Configuration
The configuration in this table applies to AWS Node Termination Handler in IMDS mode. The configuration in this table applies to AWS Node Termination Handler in IMDS mode.

View File

@ -99,8 +99,6 @@ spec:
value: {{ .Values.cordonOnly | quote }} value: {{ .Values.cordonOnly | quote }}
- name: TAINT_NODE - name: TAINT_NODE
value: {{ .Values.taintNode | quote }} value: {{ .Values.taintNode | quote }}
- name: ENABLE_OUT_OF_SERVICE_TAINT
value: {{ .Values.enableOutOfServiceTaint | quote }}
- name: EXCLUDE_FROM_LOAD_BALANCERS - name: EXCLUDE_FROM_LOAD_BALANCERS
value: {{ .Values.excludeFromLoadBalancers | quote }} value: {{ .Values.excludeFromLoadBalancers | quote }}
- name: DELETE_LOCAL_DATA - name: DELETE_LOCAL_DATA

View File

@ -99,8 +99,6 @@ spec:
value: {{ .Values.cordonOnly | quote }} value: {{ .Values.cordonOnly | quote }}
- name: TAINT_NODE - name: TAINT_NODE
value: {{ .Values.taintNode | quote }} value: {{ .Values.taintNode | quote }}
- name: ENABLE_OUT_OF_SERVICE_TAINT
value: {{ .Values.enableOutOfServiceTaint | quote }}
- name: EXCLUDE_FROM_LOAD_BALANCERS - name: EXCLUDE_FROM_LOAD_BALANCERS
value: {{ .Values.excludeFromLoadBalancers | quote }} value: {{ .Values.excludeFromLoadBalancers | quote }}
- name: DELETE_LOCAL_DATA - name: DELETE_LOCAL_DATA

View File

@ -102,8 +102,6 @@ spec:
value: {{ .Values.cordonOnly | quote }} value: {{ .Values.cordonOnly | quote }}
- name: TAINT_NODE - name: TAINT_NODE
value: {{ .Values.taintNode | quote }} value: {{ .Values.taintNode | quote }}
- name: ENABLE_OUT_OF_SERVICE_TAINT
value: {{ .Values.enableOutOfServiceTaint | quote }}
- name: EXCLUDE_FROM_LOAD_BALANCERS - name: EXCLUDE_FROM_LOAD_BALANCERS
value: {{ .Values.excludeFromLoadBalancers | quote }} value: {{ .Values.excludeFromLoadBalancers | quote }}
- name: DELETE_LOCAL_DATA - name: DELETE_LOCAL_DATA

View File

@ -86,9 +86,6 @@ cordonOnly: false
# Taint node upon spot interruption termination notice. # Taint node upon spot interruption termination notice.
taintNode: false taintNode: false
# Add out-of-service taint to node after cordon/drain process which would forcefully evict pods without matching tolerations and detach persistent volumes.
enableOutOfServiceTaint: false
# Exclude node from load balancer before cordoning via the ServiceNodeExclusion feature gate. # Exclude node from load balancer before cordoning via the ServiceNodeExclusion feature gate.
excludeFromLoadBalancers: false excludeFromLoadBalancers: false
@ -288,12 +285,6 @@ enableRebalanceDraining: false
# deleteSqsMsgIfNodeNotFound If true, delete the SQS Message from the SQS Queue if the targeted node(s) are not found. Only used in Queue Processor mode. # deleteSqsMsgIfNodeNotFound If true, delete the SQS Message from the SQS Queue if the targeted node(s) are not found. Only used in Queue Processor mode.
deleteSqsMsgIfNodeNotFound: false deleteSqsMsgIfNodeNotFound: false
# The time period in seconds between consecutive heartbeat signals. Valid range: 30-3600 seconds (30 seconds to 1 hour).
heartbeatInterval: -1
# The duration in seconds over which heartbeat signals are sent. Valid range: 60-172800 seconds (1 minute to 48 hours).
heartbeatUntil: -1
# --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------
# Testing # Testing
# --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------

View File

@ -219,7 +219,7 @@ cluster-autoscaler:
image: image:
repository: registry.k8s.io/autoscaling/cluster-autoscaler repository: registry.k8s.io/autoscaling/cluster-autoscaler
tag: v1.32.1 tag: v1.31.1
autoDiscovery: autoDiscovery:
clusterName: "" clusterName: ""

View File

@ -21,6 +21,3 @@ fi
# Redis secret # Redis secret
kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \ kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \
--from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo) --from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
# required keys in kubezero-secrets, as --ignore-missing-values in helm-secrets doesnt work with vals ;-(
ensure_kubezero_secret_key argo-cd.kubezero.username argo-cd.kubezero.password argo-cd.kubezero.sshPrivateKey

View File

@ -26,7 +26,7 @@ argo-events:
versions: versions:
- version: 2.10.11 - version: 2.10.11
natsImage: nats:2.11.1-scratch natsImage: nats:2.11.1-scratch
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.2 metricsExporterImage: natsio/prometheus-nats-exporter:0.16.0
configReloaderImage: natsio/nats-server-config-reloader:0.14.1 configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server startCommand: /nats-server

View File

@ -1,6 +1,6 @@
# kubezero-graph # kubezero-graph
![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero GraphQL and GraphDB KubeZero GraphQL and GraphDB
@ -18,8 +18,8 @@ Kubernetes: `>= 1.29.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.2.1 |
| https://helm.neo4j.com/neo4j | neo4j | 2025.3.0 | | https://helm.neo4j.com/neo4j | neo4j | 5.26.0 |
## Values ## Values
@ -28,8 +28,6 @@ Kubernetes: `>= 1.29.0-0`
| neo4j.disableLookups | bool | `true` | | | neo4j.disableLookups | bool | `true` | |
| neo4j.enabled | bool | `false` | | | neo4j.enabled | bool | `false` | |
| neo4j.neo4j.name | string | `"test-db"` | | | neo4j.neo4j.name | string | `"test-db"` | |
| neo4j.neo4j.password | string | `"secret"` | |
| neo4j.neo4j.passwordFromSecret | string | `"neo4j-admin"` | |
| neo4j.serviceMonitor.enabled | bool | `false` | | | neo4j.serviceMonitor.enabled | bool | `false` | |
| neo4j.services.neo4j.enabled | bool | `false` | | | neo4j.services.neo4j.enabled | bool | `false` | |
| neo4j.volumes.data.mode | string | `"defaultStorageClass"` | | | neo4j.volumes.data.mode | string | `"defaultStorageClass"` | |

View File

@ -21,7 +21,7 @@ dependencies:
repository: https://nats-io.github.io/k8s/helm/charts/ repository: https://nats-io.github.io/k8s/helm/charts/
condition: nats.enabled condition: nats.enabled
- name: rabbitmq - name: rabbitmq
version: 16.0.0 version: 15.5.3
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: rabbitmq.enabled condition: rabbitmq.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-mq # kubezero-mq
![Version: 0.3.11](https://img.shields.io/badge/Version-0.3.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.3.10](https://img.shields.io/badge/Version-0.3.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
@ -18,9 +18,9 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 | | https://charts.bitnami.com/bitnami | rabbitmq | 14.6.6 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 | | https://nats-io.github.io/k8s/helm/charts/ | nats | 1.2.2 |
## Values ## Values
@ -34,6 +34,13 @@ Kubernetes: `>= 1.26.0`
| nats.natsBox.enabled | bool | `false` | | | nats.natsBox.enabled | bool | `false` | |
| nats.promExporter.enabled | bool | `false` | | | nats.promExporter.enabled | bool | `false` | |
| nats.promExporter.podMonitor.enabled | bool | `false` | | | nats.promExporter.podMonitor.enabled | bool | `false` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.enabled | bool | `false` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.serviceMonitor.enabled | bool | `true` | |
| rabbitmq-cluster-operator.enabled | bool | `false` | |
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.enabled | bool | `false` | |
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.serviceMonitor.enabled | bool | `true` | |
| rabbitmq-cluster-operator.rabbitmqImage.tag | string | `"3.11.4-debian-11-r0"` | |
| rabbitmq-cluster-operator.useCertManager | bool | `true` | |
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | | | rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
| rabbitmq.auth.existingPasswordSecret | string | `"rabbitmq"` | | | rabbitmq.auth.existingPasswordSecret | string | `"rabbitmq"` | |
| rabbitmq.auth.tls.enabled | bool | `false` | | | rabbitmq.auth.tls.enabled | bool | `false` | |

View File

@ -1,4 +1,4 @@
{{- if .Values.nats.promExporter.podMonitor.enabled }} {{- if .Values.nats.exporter.serviceMonitor.enabled }}
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:

View File

@ -6,12 +6,6 @@ nats:
jetstream: jetstream:
enabled: true enabled: true
podTemplate:
topologySpreadConstraints:
kubernetes.io/hostname:
maxSkew: 1
whenUnsatisfiable: DoNotSchedule
natsBox: natsBox:
enabled: false enabled: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-network name: kubezero-network
description: KubeZero umbrella chart for all things network description: KubeZero umbrella chart for all things network
type: application type: application
version: 0.5.8 version: 0.5.7
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -19,7 +19,7 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: cilium - name: cilium
version: 1.17.3 version: 1.16.6
repository: https://helm.cilium.io/ repository: https://helm.cilium.io/
condition: cilium.enabled condition: cilium.enabled
- name: metallb - name: metallb
@ -27,7 +27,7 @@ dependencies:
repository: https://metallb.github.io/metallb repository: https://metallb.github.io/metallb
condition: metallb.enabled condition: metallb.enabled
- name: haproxy - name: haproxy
version: 1.24.0 version: 1.23.0
repository: https://haproxytech.github.io/helm-charts repository: https://haproxytech.github.io/helm-charts
condition: haproxy.enabled condition: haproxy.enabled
kubeVersion: ">= 1.30.0-0" kubeVersion: ">= 1.29.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-network # kubezero-network
![Version: 0.5.8](https://img.shields.io/badge/Version-0.5.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.5.7](https://img.shields.io/badge/Version-0.5.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things network KubeZero umbrella chart for all things network
@ -14,13 +14,13 @@ KubeZero umbrella chart for all things network
## Requirements ## Requirements
Kubernetes: `>= 1.30.0-0` Kubernetes: `>= 1.29.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 | | https://haproxytech.github.io/helm-charts | haproxy | 1.23.0 |
| https://helm.cilium.io/ | cilium | 1.17.3 | | https://helm.cilium.io/ | cilium | 1.16.6 |
| https://metallb.github.io/metallb | metallb | 0.14.9 | | https://metallb.github.io/metallb | metallb | 0.14.9 |
## Values ## Values
@ -116,5 +116,5 @@ Kubernetes: `>= 1.30.0-0`
| multus.defaultNetworks | list | `[]` | | | multus.defaultNetworks | list | `[]` | |
| multus.enabled | bool | `false` | | | multus.enabled | bool | `false` | |
| multus.image.repository | string | `"ghcr.io/k8snetworkplumbingwg/multus-cni"` | | | multus.image.repository | string | `"ghcr.io/k8snetworkplumbingwg/multus-cni"` | |
| multus.image.tag | string | `"v4.2.0"` | | | multus.image.tag | string | `"v3.9.3"` | |
| multus.readinessindicatorfile | string | `"/etc/cni/net.d/05-cilium.conflist"` | | | multus.readinessindicatorfile | string | `"/etc/cni/net.d/05-cilium.conflist"` | |

File diff suppressed because one or more lines are too long

View File

@ -18,7 +18,7 @@ multus:
enabled: false enabled: false
image: image:
repository: ghcr.io/k8snetworkplumbingwg/multus-cni repository: ghcr.io/k8snetworkplumbingwg/multus-cni
tag: v4.2.0 tag: v4.1.4
clusterNetwork: "cilium" clusterNetwork: "cilium"
defaultNetworks: [] defaultNetworks: []

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-storage name: kubezero-storage
description: KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini description: KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini
type: application type: application
version: 0.8.11 version: 0.8.10
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -24,11 +24,11 @@ dependencies:
condition: lvm-localpv.enabled condition: lvm-localpv.enabled
repository: https://openebs.github.io/lvm-localpv repository: https://openebs.github.io/lvm-localpv
- name: aws-ebs-csi-driver - name: aws-ebs-csi-driver
version: 2.42.0 version: 2.39.3
condition: aws-ebs-csi-driver.enabled condition: aws-ebs-csi-driver.enabled
repository: https://kubernetes-sigs.github.io/aws-ebs-csi-driver repository: https://kubernetes-sigs.github.io/aws-ebs-csi-driver
- name: aws-efs-csi-driver - name: aws-efs-csi-driver
version: 2.5.7 version: 3.1.6
condition: aws-efs-csi-driver.enabled condition: aws-efs-csi-driver.enabled
repository: https://kubernetes-sigs.github.io/aws-efs-csi-driver repository: https://kubernetes-sigs.github.io/aws-efs-csi-driver
- name: gemini - name: gemini
@ -36,7 +36,7 @@ dependencies:
condition: gemini.enabled condition: gemini.enabled
repository: https://charts.fairwinds.com/stable repository: https://charts.fairwinds.com/stable
- name: k8up - name: k8up
version: 4.8.4 version: 4.8.3
condition: k8up.enabled condition: k8up.enabled
repository: https://k8up-io.github.io/k8up repository: https://k8up-io.github.io/k8up
kubeVersion: ">= 1.30.0-0" kubeVersion: ">= 1.26.0"

View File

@ -1,32 +0,0 @@
diff -rtuN charts/aws-efs-csi-driver.orig/templates/controller-deployment.yaml charts/aws-efs-csi-driver/templates/controller-deployment.yaml
--- charts/aws-efs-csi-driver.orig/templates/controller-deployment.yaml 2023-08-23 11:32:48.964952023 +0000
+++ charts/aws-efs-csi-driver/templates/controller-deployment.yaml 2023-08-23 11:32:48.968285371 +0000
@@ -76,9 +76,14 @@
- name: AWS_USE_FIPS_ENDPOINT
value: "true"
{{- end }}
+ {{- if .Values.controller.extraEnv }}
+ {{- toYaml .Values.controller.extraEnv | nindent 12 }}
+ {{- end }}
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
+ - name: aws-token
+ mountPath: /var/run/secrets/sts.amazonaws.com/serviceaccount/
ports:
- name: healthz
containerPort: {{ .Values.controller.healthPort }}
@@ -137,6 +142,13 @@
volumes:
- name: socket-dir
emptyDir: {}
+ - name: aws-token
+ projected:
+ sources:
+ - serviceAccountToken:
+ path: token
+ expirationSeconds: 86400
+ audience: "sts.amazonaws.com"
{{- with .Values.controller.affinity }}
affinity: {{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -1,62 +1,5 @@
# Helm chart # Helm chart
## v2.42.0
### Feature
- Set internal traffic policy to local for node metric service ([#2432](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2432), [@ElijahQuinones](https://github.com/ElijahQuinones))
## v2.41.0
### Feature
- Add `enabled` flag to schema for use in sub-charting ([#2361](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2361), [@ConnorJC3](https://github.com/ConnorJC3))
- Add Prometheus Annotations to the Node Service ([#2363](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2363), [@mdzraf](https://github.com/mdzraf))
### Bug or regression
- Prevent nil pointer deref in Helm chart when `node.enableWindows` and `node.otelTracing` are both set ([#2357](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2357), [@ConnorJC3](https://github.com/ConnorJC3))
## v2.40.3
### Feature
- Upgrade csi-attacher to v4.8.1, csi-snapshotter to v8.2.1, csi-resizer to v1.13.2
### Bug or regression
- Fix incorrect schema entry for controller.podDisruptionBudget.unhealthyPodEvictionPolicy ([#2389](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2389),[@jamesalford](https://github.com/jamesalford))
## v2.40.2
### Bug or Regression
- Add enabled flag to schema for sub-charting ([#2359](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2359), [@ConnorJC3](https://github.com/ConnorJC3))
## v2.40.1
### Bug or Regression
- Prevent null deref when enableWindows and otelTracing enabled on node ([#2357](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2357), [@ConnorJC3](https://github.com/ConnorJC3))
- Fix incorrect properties validation in Helm schema ([#2356](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2356), [@ConnorJC3](https://github.com/ConnorJC3))
## v2.40.0
#### Default for enable windows changed
The default value for enableWindows has been changed from false to true. This change makes it so the node damemonset will be scheduled on windows nodes by default. If you wish to not have the node daemonset scheduled on your windows nodes you will need to change enableWindows to false.
### Feature
- Add values.schema.json to validate changes in values.yaml. ([#2286](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2286), [@ElijahQuinones](https://github.com/ElijahQuinones))
### Bug or Regression
- Fix helm regression with values.schema.yaml. ([#2322](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2322), [@ElijahQuinones](https://github.com/ElijahQuinones))
- `global` has been added to the values schema, allowing aws-ebs-csi-driver to be used in a Helm sub chart ([#2321](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2321), [@kejne](https://github.com/kejne))
- Reconcile some differences between helm chart and values.schema.json ([#2335](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2335), [@ElijahQuinones](https://github.com/ElijahQuinones))
- Fix helm regression with a1CompatibilityDaemonSet=true ([#2316](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/pull/2316), [@AndrewSirenko](https://github.com/AndrewSirenko))
## v2.39.3 ## v2.39.3
### Urgent Upgrade Notes ### Urgent Upgrade Notes

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 1.42.0 appVersion: 1.39.0
description: A Helm chart for AWS EBS CSI Driver description: A Helm chart for AWS EBS CSI Driver
home: https://github.com/kubernetes-sigs/aws-ebs-csi-driver home: https://github.com/kubernetes-sigs/aws-ebs-csi-driver
keywords: keywords:
@ -13,4 +13,4 @@ maintainers:
name: aws-ebs-csi-driver name: aws-ebs-csi-driver
sources: sources:
- https://github.com/kubernetes-sigs/aws-ebs-csi-driver - https://github.com/kubernetes-sigs/aws-ebs-csi-driver
version: 2.42.0 version: 2.39.3

View File

@ -2,6 +2,6 @@ To verify that aws-ebs-csi-driver has started, run:
kubectl get pod -n {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "aws-ebs-csi-driver.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" kubectl get pod -n {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "aws-ebs-csi-driver.name" . }},app.kubernetes.io/instance={{ .Release.Name }}"
[Deprecation announcement] AWS Snow Family device support for the EBS CSI Driver [ACTION REQUIRED] Update to the EBS CSI Driver IAM Policy
Support for the EBS CSI Driver on [AWS Snow Family devices](https://aws.amazon.com/snowball/) is deprecated, effective immediately. No further Snow-specific bugfixes or feature requests will be merged. The existing functionality for Snow devices will be removed the 1.44 release of the EBS CSI Driver. This announcement does not affect the support of the EBS CSI Driver on other platforms, such as [Amazon EC2](https://aws.amazon.com/ec2/) or EC2 on [AWS Outposts](https://aws.amazon.com/outposts/). For any questions related to this announcement, please comment on this issue [#2365](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/issues/2365) or open a new issue. Due to an upcoming change in handling of IAM polices for the CreateVolume API when creating a volume from an EBS snapshot, a change to your EBS CSI Driver policy may be needed. For more information and remediation steps, see GitHub issue #2190 (https://github.com/kubernetes-sigs/aws-ebs-csi-driver/issues/2190). This change affects all versions of the EBS CSI Driver and action may be required even on clusters where the driver is not upgraded.

View File

@ -17,7 +17,7 @@ spec:
app: {{ .NodeName }} app: {{ .NodeName }}
{{- include "aws-ebs-csi-driver.selectorLabels" . | nindent 6 }} {{- include "aws-ebs-csi-driver.selectorLabels" . | nindent 6 }}
updateStrategy: updateStrategy:
{{- toYaml .Values.node.updateStrategy | nindent 4 }} {{ toYaml .Values.node.updateStrategy | nindent 4 }}
template: template:
metadata: metadata:
labels: labels:
@ -111,11 +111,11 @@ spec:
value: {{ .otelServiceName }} value: {{ .otelServiceName }}
- name: OTEL_EXPORTER_OTLP_ENDPOINT - name: OTEL_EXPORTER_OTLP_ENDPOINT
value: {{ .otelExporterEndpoint }} value: {{ .otelExporterEndpoint }}
{{- end }}
{{- if .Values.fips }} {{- if .Values.fips }}
- name: AWS_USE_FIPS_ENDPOINT - name: AWS_USE_FIPS_ENDPOINT
value: "true" value: "true"
{{- end }} {{- end }}
{{- end }}
{{- with .Values.node.env }} {{- with .Values.node.env }}
{{- . | toYaml | nindent 12 }} {{- . | toYaml | nindent 12 }}
{{- end }} {{- end }}

View File

@ -429,9 +429,6 @@ spec:
{{- if not (regexMatch "(-timeout)" (join " " .Values.sidecars.resizer.additionalArgs)) }} {{- if not (regexMatch "(-timeout)" (join " " .Values.sidecars.resizer.additionalArgs)) }}
- --timeout=60s - --timeout=60s
{{- end }} {{- end }}
{{- if .Values.controller.extraCreateMetadata }}
- --extra-modify-metadata
{{- end}}
- --csi-address=$(ADDRESS) - --csi-address=$(ADDRESS)
- --v={{ .Values.sidecars.resizer.logLevel }} - --v={{ .Values.sidecars.resizer.logLevel }}
- --handle-volume-inuse-error=false - --handle-volume-inuse-error=false

View File

@ -47,9 +47,6 @@ kind: Service
metadata: metadata:
name: ebs-csi-node name: ebs-csi-node
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "3302"
labels: labels:
app: ebs-csi-node app: ebs-csi-node
spec: spec:
@ -59,6 +56,5 @@ spec:
- name: metrics - name: metrics
port: 3302 port: 3302
targetPort: 3302 targetPort: 3302
internalTrafficPolicy: Local
type: ClusterIP type: ClusterIP
{{- end }} {{- end }}

View File

@ -11,9 +11,9 @@ image:
customLabels: {} customLabels: {}
# k8s-app: aws-ebs-csi-driver # k8s-app: aws-ebs-csi-driver
# Instruct the AWS SDK to use AWS FIPS endpoints, and deploy container built with Boring Crypto (a FIPS-validated cryptographic library) instead of the Go default # Instruct the AWS SDK to use AWS FIPS endpoints, and deploy container built with BoringCrypto (a FIPS-validated cryptographic library) instead of the Go default
# #
# The EBS CSI Driver FIPS images have not undergone FIPS certification, and no official guarantee is made about the compliance of these images under the FIPS standard # The EBS CSI Driver FIPS images have not undergone FIPS certification, and no official guarnatee is made about the compliance of these images under the FIPS standard
# Users relying on these images for FIPS compliance should perform their own independent evaluation # Users relying on these images for FIPS compliance should perform their own independent evaluation
fips: false fips: false
sidecars: sidecars:
@ -22,7 +22,7 @@ sidecars:
image: image:
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner
tag: "v5.2.0-eks-1-33-1" tag: "v5.1.0-eks-1-31-12"
logLevel: 2 logLevel: 2
# Additional parameters provided by external-provisioner. # Additional parameters provided by external-provisioner.
additionalArgs: [] additionalArgs: []
@ -49,7 +49,7 @@ sidecars:
image: image:
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
repository: public.ecr.aws/eks-distro/kubernetes-csi/external-attacher repository: public.ecr.aws/eks-distro/kubernetes-csi/external-attacher
tag: "v4.8.1-eks-1-33-1" tag: "v4.8.0-eks-1-31-12"
# Tune leader lease election for csi-attacher. # Tune leader lease election for csi-attacher.
# Leader election is on by default. # Leader election is on by default.
leaderElection: leaderElection:
@ -78,7 +78,7 @@ sidecars:
image: image:
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
repository: public.ecr.aws/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter repository: public.ecr.aws/eks-distro/kubernetes-csi/external-snapshotter/csi-snapshotter
tag: "v8.2.1-eks-1-33-1" tag: "v8.2.0-eks-1-31-12"
logLevel: 2 logLevel: 2
# Additional parameters provided by csi-snapshotter. # Additional parameters provided by csi-snapshotter.
additionalArgs: [] additionalArgs: []
@ -94,7 +94,7 @@ sidecars:
image: image:
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe
tag: "v2.15.0-eks-1-33-1" tag: "v2.14.0-eks-1-31-12"
# Additional parameters provided by livenessprobe. # Additional parameters provided by livenessprobe.
additionalArgs: [] additionalArgs: []
resources: {} resources: {}
@ -106,7 +106,7 @@ sidecars:
image: image:
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
repository: public.ecr.aws/eks-distro/kubernetes-csi/external-resizer repository: public.ecr.aws/eks-distro/kubernetes-csi/external-resizer
tag: "v1.13.2-eks-1-33-1" tag: "v1.12.0-eks-1-31-11"
# Tune leader lease election for csi-resizer. # Tune leader lease election for csi-resizer.
# Leader election is on by default. # Leader election is on by default.
leaderElection: leaderElection:
@ -133,7 +133,7 @@ sidecars:
image: image:
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar
tag: "v2.13.0-eks-1-33-1" tag: "v2.13.0-eks-1-31-12"
logLevel: 2 logLevel: 2
# Additional parameters provided by node-driver-registrar. # Additional parameters provided by node-driver-registrar.
additionalArgs: [] additionalArgs: []
@ -220,7 +220,7 @@ controller:
env: [] env: []
# Use envFrom to reference ConfigMaps and Secrets across all containers in the deployment # Use envFrom to reference ConfigMaps and Secrets across all containers in the deployment
envFrom: [] envFrom: []
# If set, add pv/pvc metadata to plugin create and modify requests as parameters. # If set, add pv/pvc metadata to plugin create requests as parameters.
extraCreateMetadata: true extraCreateMetadata: true
# Extra volume tags to attach to each dynamically provisioned volume. # Extra volume tags to attach to each dynamically provisioned volume.
# --- # ---
@ -337,7 +337,7 @@ controller:
# Example: # Example:
# #
# - name: wait # - name: wait
# image: public.ecr.aws/amazonlinux/amazonlinux # image: busybox
# command: [ 'sh', '-c', "sleep 20" ] # command: [ 'sh', '-c', "sleep 20" ]
# Enable opentelemetry tracing for the plugin running on the daemonset # Enable opentelemetry tracing for the plugin running on the daemonset
otelTracing: {} otelTracing: {}
@ -405,7 +405,7 @@ node:
automountServiceAccountToken: true automountServiceAccountToken: true
# Enable the linux daemonset creation # Enable the linux daemonset creation
enableLinux: true enableLinux: true
enableWindows: true enableWindows: false
# Warning: This option will be removed in a future release. It is a temporary workaround for users unable to immediately migrate off of older kernel versions. # Warning: This option will be removed in a future release. It is a temporary workaround for users unable to immediately migrate off of older kernel versions.
# Formats XFS volumes with bigtime=0,inobtcount=0,reflink=0, for mounting onto nodes with linux kernel version <= 5.4. # Formats XFS volumes with bigtime=0,inobtcount=0,reflink=0, for mounting onto nodes with linux kernel version <= 5.4.
# Note that XFS volumes formatted with this option will only have timestamp records until 2038. # Note that XFS volumes formatted with this option will only have timestamp records until 2038.
@ -454,7 +454,7 @@ node:
# Example: # Example:
# #
# - name: wait # - name: wait
# image: public.ecr.aws/amazonlinux/amazonlinux # image: busybox
# command: [ 'sh', '-c', "sleep 20" ] # command: [ 'sh', '-c', "sleep 20" ]
# Enable opentelemetry tracing for the plugin running on the daemonset # Enable opentelemetry tracing for the plugin running on the daemonset
otelTracing: {} otelTracing: {}
@ -511,4 +511,4 @@ nodeComponentOnly: false
helmTester: helmTester:
enabled: true enabled: true
# Supply a custom image to the ebs-csi-driver-test pod in helm-tester.yaml # Supply a custom image to the ebs-csi-driver-test pod in helm-tester.yaml
image: "us-central1-docker.pkg.dev/k8s-staging-test-infra/images/kubekins-e2e:v20250411-0688312353-master" image: "us-central1-docker.pkg.dev/k8s-staging-test-infra/images/kubekins-e2e:v20241230-3006692a6f-master"

View File

@ -1,4 +1,38 @@
# Helm chart # Helm chart
# v3.1.6
* Bump app/driver version to `v2.1.5`
# v3.1.5
* Bump app/driver version to `v2.1.4`
# v3.1.4
* Bump app/driver version to `v2.1.3`
# v3.1.3
* Bump app/driver version to `v2.1.2`
# v3.1.2
* Bump app/driver version to `v2.1.1`
# v3.1.1
* Bump app/driver version to `v2.1.0`
# v3.1.0
* Bump app/driver version to `v2.0.9`
# v3.0.9
* Bump app/driver version to `v2.0.8`
# v3.0.8
* Bump app/driver version to `v2.0.7`
# v3.0.7
* Bump app/driver version to `v2.0.6`
# v3.0.6
* Bump app/driver version to `v2.0.5`
# v3.0.5
* Bump app/driver version to `v2.0.4`
# v3.0.4
* Bump app/driver version to `v2.0.3`
# v3.0.3
* Bump app/driver version to `v2.0.2`
# v3.0.2
* Update Helm to use the image from Public ECR rather than DockerHub
# v3.0.1
* Bump app/driver version to `v2.0.1`
# v3.0.0
* Bump app/driver version to `v2.0.0`
# v2.5.7 # v2.5.7
* Bump app/driver version to `v1.7.7` * Bump app/driver version to `v1.7.7`
# v2.5.6 # v2.5.6
@ -210,4 +244,4 @@ for Controller deployment and Node daemonset
* Fixing Controller deployment using `podAnnotations` and `tolerations` values from Node daemonset * Fixing Controller deployment using `podAnnotations` and `tolerations` values from Node daemonset
* Let the user define the whole `tolerations` array, default to `- operator: Exists` * Let the user define the whole `tolerations` array, default to `- operator: Exists`
* Default `logLevel` lowered from `5` to `2` * Default `logLevel` lowered from `5` to `2`
* Default `imagePullPolicy` everywhere set to `IfNotPresent` * Default `imagePullPolicy` everywhere set to `IfNotPresent`

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 1.7.7 appVersion: 2.1.5
description: A Helm chart for AWS EFS CSI Driver description: A Helm chart for AWS EFS CSI Driver
home: https://github.com/kubernetes-sigs/aws-efs-csi-driver home: https://github.com/kubernetes-sigs/aws-efs-csi-driver
keywords: keywords:
@ -15,4 +15,4 @@ maintainers:
name: aws-efs-csi-driver name: aws-efs-csi-driver
sources: sources:
- https://github.com/kubernetes-sigs/aws-efs-csi-driver - https://github.com/kubernetes-sigs/aws-efs-csi-driver
version: 2.5.7 version: 3.1.6

View File

@ -3,17 +3,18 @@
kind: Deployment kind: Deployment
apiVersion: apps/v1 apiVersion: apps/v1
metadata: metadata:
name: efs-csi-controller name: {{ .Values.controller.name }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }} app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
{{- with .Values.controller.additionalLabels }} {{- with .Values.controller.additionalLabels }}
{{ toYaml . | nindent 4 }} {{ toYaml . | nindent 4 }}
{{- end }} {{- end }}
spec: spec:
replicas: {{ .Values.replicaCount }} replicas: {{ .Values.controller.replicaCount }}
selector: selector:
matchLabels: matchLabels:
app: efs-csi-controller app: {{ .Values.controller.name }}
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }} app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
{{- with .Values.controller.updateStrategy }} {{- with .Values.controller.updateStrategy }}
@ -23,7 +24,7 @@ spec:
template: template:
metadata: metadata:
labels: labels:
app: efs-csi-controller app: {{ .Values.controller.name }}
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }} app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
{{- with .Values.controller.podLabels }} {{- with .Values.controller.podLabels }}
@ -93,14 +94,17 @@ spec:
- name: AWS_USE_FIPS_ENDPOINT - name: AWS_USE_FIPS_ENDPOINT
value: "true" value: "true"
{{- end }} {{- end }}
{{- if .Values.controller.extraEnv }} - name: PORT_RANGE_UPPER_BOUND
{{- toYaml .Values.controller.extraEnv | nindent 12 }} value: "{{ .Values.portRangeUpperBound }}"
{{- with .Values.controller.env }}
{{- toYaml . | nindent 12 }}
{{- end }} {{- end }}
volumeMounts: volumeMounts:
- name: socket-dir - name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/ mountPath: /var/lib/csi/sockets/pluginproxy/
- name: aws-token {{- with .Values.controller.volumeMounts }}
mountPath: /var/run/secrets/sts.amazonaws.com/serviceaccount/ {{- toYaml . | nindent 12 }}
{{- end }}
ports: ports:
- name: healthz - name: healthz
containerPort: {{ .Values.controller.healthPort }} containerPort: {{ .Values.controller.healthPort }}
@ -133,13 +137,16 @@ spec:
{{- if hasKey .Values.controller "leaderElectionLeaseDuration" }} {{- if hasKey .Values.controller "leaderElectionLeaseDuration" }}
- --leader-election-lease-duration={{ .Values.controller.leaderElectionLeaseDuration }} - --leader-election-lease-duration={{ .Values.controller.leaderElectionLeaseDuration }}
{{- end }} {{- end }}
{{- range .Values.sidecars.csiProvisioner.additionalArgs }}
- {{ . }}
{{- end }}
env: env:
- name: ADDRESS - name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts: volumeMounts:
- name: socket-dir - name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/ mountPath: /var/lib/csi/sockets/pluginproxy/
{{- with .Values.sidecars.csiProvisioner.resources }} {{- with default .Values.controller.resources .Values.sidecars.csiProvisioner.resources }}
resources: {{ toYaml . | nindent 12 }} resources: {{ toYaml . | nindent 12 }}
{{- end }} {{- end }}
{{- with .Values.sidecars.csiProvisioner.securityContext }} {{- with .Values.sidecars.csiProvisioner.securityContext }}
@ -155,7 +162,10 @@ spec:
volumeMounts: volumeMounts:
- name: socket-dir - name: socket-dir
mountPath: /csi mountPath: /csi
{{- with .Values.sidecars.livenessProbe.resources }} {{- with .Values.controller.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with default .Values.controller.resources .Values.sidecars.livenessProbe.resources }}
resources: {{ toYaml . | nindent 12 }} resources: {{ toYaml . | nindent 12 }}
{{- end }} {{- end }}
{{- with .Values.sidecars.livenessProbe.securityContext }} {{- with .Values.sidecars.livenessProbe.securityContext }}
@ -165,14 +175,19 @@ spec:
volumes: volumes:
- name: socket-dir - name: socket-dir
emptyDir: {} emptyDir: {}
- name: aws-token {{- with .Values.controller.volumes }}
projected: {{- toYaml . | nindent 8 }}
sources: {{- end }}
- serviceAccountToken:
path: token
expirationSeconds: 86400
audience: "sts.amazonaws.com"
{{- with .Values.controller.affinity }} {{- with .Values.controller.affinity }}
affinity: {{- toYaml . | nindent 8 }} affinity: {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
{{- if .Values.controller.topologySpreadConstraints }}
{{- $tscLabelSelector := dict "labelSelector" ( dict "matchLabels" ( dict "app" "efs-csi-controller" ) ) }}
{{- $constraints := list }}
{{- range .Values.controller.topologySpreadConstraints }}
{{- $constraints = mustAppend $constraints (mergeOverwrite . $tscLabelSelector) }}
{{- end }}
topologySpreadConstraints:
{{- $constraints | toYaml | nindent 8 }}
{{- end }}
{{- end }} {{- end }}

View File

@ -0,0 +1,24 @@
{{- if .Values.controller.podDisruptionBudget.enabled -}}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ .Values.controller.name }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "aws-efs-csi-driver.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
app: {{ .Values.controller.name }}
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.controller.podDisruptionBudget.unhealthyPodEvictionPolicy }}
unhealthyPodEvictionPolicy: {{ .Values.controller.podDisruptionBudget.unhealthyPodEvictionPolicy }}
{{- end }}
{{- if .Values.controller.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.controller.podDisruptionBudget.maxUnavailable }}
{{- end }}
{{- if .Values.controller.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.controller.podDisruptionBudget.minAvailable }}
{{- end }}
{{- end -}}

View File

@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ .Values.controller.serviceAccount.name }} name: {{ .Values.controller.serviceAccount.name }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }} app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
{{- with .Values.controller.serviceAccount.annotations }} {{- with .Values.controller.serviceAccount.annotations }}
@ -21,7 +22,7 @@ metadata:
rules: rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["persistentvolumes"] resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"] verbs: ["get", "list", "watch", "create", "patch", "delete"]
- apiGroups: [""] - apiGroups: [""]
resources: ["persistentvolumeclaims"] resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"] verbs: ["get", "list", "watch", "update"]
@ -74,6 +75,7 @@ kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: efs-csi-provisioner-binding-describe-secrets name: efs-csi-provisioner-binding-describe-secrets
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }} app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
subjects: subjects:

View File

@ -3,8 +3,10 @@ kind: CSIDriver
metadata: metadata:
name: efs.csi.aws.com name: efs.csi.aws.com
annotations: annotations:
{{- if .Values.useHelmHooksForCSIDriver }}
"helm.sh/hook": pre-install, pre-upgrade "helm.sh/hook": pre-install, pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation "helm.sh/hook-delete-policy": before-hook-creation
{{- end }}
"helm.sh/resource-policy": keep "helm.sh/resource-policy": keep
spec: spec:
attachRequired: false attachRequired: false

View File

@ -3,8 +3,12 @@ kind: DaemonSet
apiVersion: apps/v1 apiVersion: apps/v1
metadata: metadata:
name: efs-csi-node name: efs-csi-node
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }} app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
{{- with .Values.node.additionalLabels }}
{{ toYaml . | nindent 4 }}
{{- end }}
spec: spec:
selector: selector:
matchLabels: matchLabels:
@ -21,6 +25,9 @@ spec:
app: efs-csi-node app: efs-csi-node
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }} app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
{{- with .Values.node.podLabels }}
{{ toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.node.podAnnotations }} {{- if .Values.node.podAnnotations }}
annotations: {{ toYaml .Values.node.podAnnotations | nindent 8 }} annotations: {{ toYaml .Values.node.podAnnotations | nindent 8 }}
{{- end }} {{- end }}
@ -53,7 +60,7 @@ spec:
dnsConfig: {{- toYaml . | nindent 8 }} dnsConfig: {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
serviceAccountName: {{ .Values.node.serviceAccount.name }} serviceAccountName: {{ .Values.node.serviceAccount.name }}
priorityClassName: system-node-critical priorityClassName: {{ .Values.node.priorityClassName}}
{{- with .Values.node.tolerations }} {{- with .Values.node.tolerations }}
tolerations: {{- toYaml . | nindent 8 }} tolerations: {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
@ -85,9 +92,14 @@ spec:
- name: AWS_USE_FIPS_ENDPOINT - name: AWS_USE_FIPS_ENDPOINT
value: "true" value: "true"
{{- end }} {{- end }}
- name: PORT_RANGE_UPPER_BOUND
value: "{{ .Values.portRangeUpperBound }}"
{{- with .Values.node.env }}
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts: volumeMounts:
- name: kubelet-dir - name: kubelet-dir
mountPath: /var/lib/kubelet mountPath: {{ .Values.node.kubeletPath }}
mountPropagation: "Bidirectional" mountPropagation: "Bidirectional"
- name: plugin-dir - name: plugin-dir
mountPath: /csi mountPath: /csi
@ -97,6 +109,9 @@ spec:
mountPath: /var/amazon/efs mountPath: /var/amazon/efs
- name: efs-utils-config-legacy - name: efs-utils-config-legacy
mountPath: /etc/amazon/efs-legacy mountPath: /etc/amazon/efs-legacy
{{- with .Values.node.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
ports: ports:
- name: healthz - name: healthz
containerPort: {{ .Values.node.healthPort }} containerPort: {{ .Values.node.healthPort }}
@ -123,7 +138,7 @@ spec:
- name: ADDRESS - name: ADDRESS
value: /csi/csi.sock value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH - name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/efs.csi.aws.com/csi.sock value: {{ printf "%s/plugins/efs.csi.aws.com/csi.sock" (trimSuffix "/" .Values.node.kubeletPath) }}
- name: KUBE_NODE_NAME - name: KUBE_NODE_NAME
valueFrom: valueFrom:
fieldRef: fieldRef:
@ -160,15 +175,15 @@ spec:
volumes: volumes:
- name: kubelet-dir - name: kubelet-dir
hostPath: hostPath:
path: /var/lib/kubelet path: {{ .Values.node.kubeletPath }}
type: Directory type: Directory
- name: plugin-dir - name: plugin-dir
hostPath: hostPath:
path: /var/lib/kubelet/plugins/efs.csi.aws.com/ path: {{ printf "%s/plugins/efs.csi.aws.com/" (trimSuffix "/" .Values.node.kubeletPath) }}
type: DirectoryOrCreate type: DirectoryOrCreate
- name: registration-dir - name: registration-dir
hostPath: hostPath:
path: /var/lib/kubelet/plugins_registry/ path: {{ printf "%s/plugins_registry/" (trimSuffix "/" .Values.node.kubeletPath) }}
type: Directory type: Directory
- name: efs-state-dir - name: efs-state-dir
hostPath: hostPath:
@ -182,3 +197,6 @@ spec:
hostPath: hostPath:
path: /etc/amazon/efs path: /etc/amazon/efs
type: DirectoryOrCreate type: DirectoryOrCreate
{{- with .Values.node.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ .Values.node.serviceAccount.name }} name: {{ .Values.node.serviceAccount.name }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }} app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
{{- with .Values.node.serviceAccount.annotations }} {{- with .Values.node.serviceAccount.annotations }}

View File

@ -5,20 +5,20 @@
nameOverride: "" nameOverride: ""
fullnameOverride: "" fullnameOverride: ""
replicaCount: 2
useFIPS: false useFIPS: false
portRangeUpperBound: "21049"
image: image:
repository: amazon/aws-efs-csi-driver repository: public.ecr.aws/efs-csi-driver/amazon/aws-efs-csi-driver
tag: "v1.7.7" tag: "v2.1.5"
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
sidecars: sidecars:
livenessProbe: livenessProbe:
image: image:
repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe
tag: v2.11.0-eks-1-29-2 tag: v2.14.0-eks-1-31-5
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}
securityContext: securityContext:
@ -27,7 +27,7 @@ sidecars:
nodeDriverRegistrar: nodeDriverRegistrar:
image: image:
repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar
tag: v2.9.3-eks-1-29-2 tag: v2.12.0-eks-1-31-5
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}
securityContext: securityContext:
@ -36,12 +36,13 @@ sidecars:
csiProvisioner: csiProvisioner:
image: image:
repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner
tag: v3.6.3-eks-1-29-2 tag: v5.1.0-eks-1-31-5
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
resources: {} resources: {}
securityContext: securityContext:
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
additionalArgs: []
imagePullSecrets: [] imagePullSecrets: []
@ -50,6 +51,10 @@ imagePullSecrets: []
controller: controller:
# Specifies whether a deployment should be created # Specifies whether a deployment should be created
create: true create: true
# Name of the CSI controller service
name: efs-csi-controller
# Number of replicas for the CSI controller service deployment
replicaCount: 2
# Number for the log level verbosity # Number for the log level verbosity
logLevel: 2 logLevel: 2
# If set, add pv/pvc metadata to plugin create requests as parameters. # If set, add pv/pvc metadata to plugin create requests as parameters.
@ -63,7 +68,7 @@ controller:
# path on efs when deleteing an access point # path on efs when deleteing an access point
deleteAccessPointRootDir: false deleteAccessPointRootDir: false
podAnnotations: {} podAnnotations: {}
podLabel: {} podLabels: {}
hostNetwork: false hostNetwork: false
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
dnsPolicy: ClusterFirst dnsPolicy: ClusterFirst
@ -89,6 +94,9 @@ controller:
- key: efs.csi.aws.com/agent-not-ready - key: efs.csi.aws.com/agent-not-ready
operator: Exists operator: Exists
affinity: {} affinity: {}
env: []
volumes: []
volumeMounts: []
# Specifies whether a service account should be created # Specifies whether a service account should be created
serviceAccount: serviceAccount:
create: true create: true
@ -98,6 +106,12 @@ controller:
# eks.amazonaws.com/role-arn: arn:aws:iam::111122223333:role/efs-csi-role # eks.amazonaws.com/role-arn: arn:aws:iam::111122223333:role/efs-csi-role
healthPort: 9909 healthPort: 9909
regionalStsEndpoints: false regionalStsEndpoints: false
# Pod Disruption Budget
podDisruptionBudget:
enabled: false
# maxUnavailable: 1
minAvailable: 1
unhealthyPodEvictionPolicy: IfHealthyBudget
# securityContext on the controller pod # securityContext on the controller pod
securityContext: securityContext:
runAsNonRoot: false runAsNonRoot: false
@ -110,7 +124,18 @@ controller:
privileged: true privileged: true
leaderElectionRenewDeadline: 10s leaderElectionRenewDeadline: 10s
leaderElectionLeaseDuration: 15s leaderElectionLeaseDuration: 15s
# TSCs without the label selector stanza
#
# Example:
#
# topologySpreadConstraints:
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# whenUnsatisfiable: ScheduleAnyway
# - maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: ScheduleAnyway
topologySpreadConstraints: []
## Node daemonset variables ## Node daemonset variables
@ -130,6 +155,7 @@ node:
# "fs-01234567": # "fs-01234567":
# ip: 10.10.2.2 # ip: 10.10.2.2
# region: us-east-2 # region: us-east-2
priorityClassName: system-node-critical
dnsPolicy: ClusterFirst dnsPolicy: ClusterFirst
dnsConfig: dnsConfig:
{} {}
@ -138,7 +164,9 @@ node:
# dnsConfig: # dnsConfig:
# nameservers: # nameservers:
# - 169.254.169.253 # - 169.254.169.253
podLabels: {}
podAnnotations: {} podAnnotations: {}
additionalLabels: {}
resources: resources:
{} {}
# limits: # limits:
@ -148,7 +176,8 @@ node:
# cpu: 100m # cpu: 100m
# memory: 128Mi # memory: 128Mi
nodeSelector: {} nodeSelector: {}
updateStrategy: {} updateStrategy:
{}
# Override default strategy (RollingUpdate) to speed up deployment. # Override default strategy (RollingUpdate) to speed up deployment.
# This can be useful if helm timeouts are observed. # This can be useful if helm timeouts are observed.
# type: OnDelete # type: OnDelete
@ -163,6 +192,7 @@ node:
operator: NotIn operator: NotIn
values: values:
- fargate - fargate
- hybrid
# Specifies whether a service account should be created # Specifies whether a service account should be created
serviceAccount: serviceAccount:
create: true create: true
@ -178,6 +208,10 @@ node:
runAsUser: 0 runAsUser: 0
runAsGroup: 0 runAsGroup: 0
fsGroup: 0 fsGroup: 0
env: []
volumes: []
volumeMounts: []
kubeletPath: /var/lib/kubelet
storageClasses: [] storageClasses: []
# Add StorageClass resources like: # Add StorageClass resources like:
@ -198,3 +232,6 @@ storageClasses: []
# ensureUniqueDirectory: true # ensureUniqueDirectory: true
# reclaimPolicy: Delete # reclaimPolicy: Delete
# volumeBindingMode: Immediate # volumeBindingMode: Immediate
# Specifies wether to use helm hooks to apply the CSI driver
useHelmHooksForCSIDriver: true

View File

@ -18,7 +18,7 @@
"subdir": "contrib/mixin" "subdir": "contrib/mixin"
} }
}, },
"version": "8f933a5b5867d078c714fd6a9584aa47f450d8d0", "version": "8c52b414f324d6369b77096af98d8f0416fe20cb",
"sum": "XmXkOCriQIZmXwlIIFhqlJMa0e6qGWdxZD+ZDYaN0Po=" "sum": "XmXkOCriQIZmXwlIIFhqlJMa0e6qGWdxZD+ZDYaN0Po="
}, },
{ {
@ -78,18 +78,8 @@
"subdir": "grafana-builder" "subdir": "grafana-builder"
} }
}, },
"version": "42da78cf7f2735c0cf57dee8f80cc52e9e7e57d8", "version": "393630ca7ba9b25258c098f1fd4c81962e3ca046",
"sum": "G7B6E5sqWirDbMWRhifbLRfGgRFbIh9WCYa6X3kMh6g=" "sum": "yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo="
},
{
"source": {
"git": {
"remote": "https://github.com/grafana/jsonnet-libs.git",
"subdir": "mixin-utils"
}
},
"version": "42da78cf7f2735c0cf57dee8f80cc52e9e7e57d8",
"sum": "SRElwa/XrKAN8aZA9zvdRUx8iebl2It7KNQ7VFvMcBA="
}, },
{ {
"source": { "source": {
@ -108,8 +98,8 @@
"subdir": "" "subdir": ""
} }
}, },
"version": "4eee017d21cb63a303925d1dcd9fc5c496809b46", "version": "1199b50e9d2ff53d4bb5fb2304ad1fb69d38e609",
"sum": "Kh0GbIycNmJPzk6IOMXn1BbtLNyaiiimclYk7+mvsns=" "sum": "LfbgcJbilu4uBdKYZSvmkoOTPwEAzg10L3/VqKAIWtA="
}, },
{ {
"source": { "source": {
@ -118,8 +108,8 @@
"subdir": "" "subdir": ""
} }
}, },
"version": "aad557d746a4e05d028a2ce542f61dde3b13c621", "version": "4ff562d5e8145940cf355f62cf2308895c4dca81",
"sum": "H+gpR450rmG2/USp9Y4vMfiz9FCUhKiG7xgqPNB1FJk=" "sum": "kiL19fTbXOtNglsmT62kOzIf/Xpu+YwoiMPAApDXhkE="
}, },
{ {
"source": { "source": {
@ -128,7 +118,7 @@
"subdir": "jsonnet/kube-state-metrics" "subdir": "jsonnet/kube-state-metrics"
} }
}, },
"version": "0b01e3abce1da521b5e620b8aaa76774bb0fda87", "version": "2a95d4649b2fea55799032fb9c0b571c4ba7f776",
"sum": "3bioG7CfTfY9zeu5xU4yon6Zt3kYvNkyl492nOhQxnM=" "sum": "3bioG7CfTfY9zeu5xU4yon6Zt3kYvNkyl492nOhQxnM="
}, },
{ {
@ -138,7 +128,7 @@
"subdir": "jsonnet/kube-state-metrics-mixin" "subdir": "jsonnet/kube-state-metrics-mixin"
} }
}, },
"version": "0b01e3abce1da521b5e620b8aaa76774bb0fda87", "version": "2a95d4649b2fea55799032fb9c0b571c4ba7f776",
"sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c=" "sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c="
}, },
{ {
@ -148,8 +138,8 @@
"subdir": "" "subdir": ""
} }
}, },
"version": "9abc7566be4b58233d7b2aa29665bf47425b30e6", "version": "d2dc72021d0247a5199007ed6e425d4615f9fa5c",
"sum": "lL17qG4Ejhae7giWBzD2y6HDSxaNgkg8kX7p0i4eUNA=" "sum": "rHh5ItS3fs1kwz8GKNEPiBBn58m4Bn5v9KAdBU+tf1U="
}, },
{ {
"source": { "source": {
@ -158,8 +148,8 @@
"subdir": "jsonnet/kube-prometheus" "subdir": "jsonnet/kube-prometheus"
} }
}, },
"version": "696ce89f1f4d9107bd3a3b026178b320bac03b8e", "version": "1eea946a1532f1e8cccfceea98d907bf3a10b1d9",
"sum": "NYKZ3k27E/3sk27DCNct1X7gqv8tmYxqACnOm96W7pc=" "sum": "17LhiwefVfoNDsF3DcFZw/UL4PMU7YpNNUaOdaYd1gE="
}, },
{ {
"source": { "source": {
@ -168,7 +158,7 @@
"subdir": "jsonnet/mixin" "subdir": "jsonnet/mixin"
} }
}, },
"version": "8ce76ccb32d054cb26898f498ec6bc947cd87d6c", "version": "7deab71d6d5921eeaf8c79e3ae8e31efe63783a9",
"sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=", "sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=",
"name": "prometheus-operator-mixin" "name": "prometheus-operator-mixin"
}, },
@ -179,8 +169,8 @@
"subdir": "jsonnet/prometheus-operator" "subdir": "jsonnet/prometheus-operator"
} }
}, },
"version": "8ce76ccb32d054cb26898f498ec6bc947cd87d6c", "version": "7deab71d6d5921eeaf8c79e3ae8e31efe63783a9",
"sum": "D8bNt3/sB6EO2AirgMZDt1M/5MwbLMpiQtKqCzfTrE4=" "sum": "LctDdofQostvviE5y8vpRKWGGO1ZKO3dgJe7P9xifW0="
}, },
{ {
"source": { "source": {
@ -189,8 +179,8 @@
"subdir": "doc/alertmanager-mixin" "subdir": "doc/alertmanager-mixin"
} }
}, },
"version": "79805945102a7ba3566f38a627ca3f1edd27756e", "version": "b5d1a64ad5bb0ff879705714d1e40cea82efbd5c",
"sum": "j5prvRrJdoCv7n45l5Uy2ghl1IDb9BBUqjwCDs4ZJoQ=", "sum": "Mf4h1BYLle2nrgjf/HXrBbl0Zk8N+xaoEM017o0BC+k=",
"name": "alertmanager" "name": "alertmanager"
}, },
{ {
@ -200,8 +190,8 @@
"subdir": "docs/node-mixin" "subdir": "docs/node-mixin"
} }
}, },
"version": "38d32a397720dfdaf547429ea1b40ab8cfa57e85", "version": "11365f97bef6cb0e6259d536a7e21c49e3f5c065",
"sum": "NcpQ0Hz0qciUqmOYoAR0X8GUK5pH/QiUXm1aDNgvua0=" "sum": "xYj6VYFT/eafsbleNlC+Z2VfLy1CndyYrJs9BcTmnX8="
}, },
{ {
"source": { "source": {
@ -210,7 +200,7 @@
"subdir": "documentation/prometheus-mixin" "subdir": "documentation/prometheus-mixin"
} }
}, },
"version": "9659e30dec7073703fb8548e7b0ad80dd0df48f0", "version": "a5ffa83be83be22e2ec9fd1d4765299d8d16119e",
"sum": "2c+wttfee9TwuQJZIkNV7Tekem74Qgc7iZ842P28rNw=", "sum": "2c+wttfee9TwuQJZIkNV7Tekem74Qgc7iZ842P28rNw=",
"name": "prometheus" "name": "prometheus"
}, },
@ -232,7 +222,7 @@
"subdir": "mixin" "subdir": "mixin"
} }
}, },
"version": "7d7ea650b76cd201de8ee2c73f31497914026293", "version": "346d18bb0f8011c63d7106de494cf3b9253161a1",
"sum": "ieCD4eMgGbOlrI8GmckGPHBGQDcLasE1rULYq56W/bs=", "sum": "ieCD4eMgGbOlrI8GmckGPHBGQDcLasE1rULYq56W/bs=",
"name": "thanos-mixin" "name": "thanos-mixin"
} }

View File

@ -1432,9 +1432,6 @@ spec:
type: object type: object
type: array type: array
type: object type: object
clusterName:
description: ClusterName sets the kubernetes cluster name to send to pushgateway for grouping metrics
type: string
failedJobsHistoryLimit: failedJobsHistoryLimit:
description: |- description: |-
FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. FailedJobsHistoryLimit amount of failed jobs to keep for later analysis.
@ -1447,56 +1444,6 @@ spec:
Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively.
type: integer type: integer
labelSelectors:
description: |-
LabelSelectors is a list of selectors that we filter for.
When defined, only PVCs and PreBackupPods matching them are backed up.
items:
description: |-
A label selector is a label query over a set of resources. The result of matchLabels and
matchExpressions are ANDed. An empty label selector matches all objects. A null
label selector matches no objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
type: array
podConfigRef: podConfigRef:
description: |- description: |-
PodConfigRef describes the pod spec with wich this action shall be executed. PodConfigRef describes the pod spec with wich this action shall be executed.
@ -2399,9 +2346,6 @@ spec:
type: object type: object
type: array type: array
type: object type: object
clusterName:
description: ClusterName sets the kubernetes cluster name to send to pushgateway for grouping metrics
type: string
failedJobsHistoryLimit: failedJobsHistoryLimit:
description: |- description: |-
FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. FailedJobsHistoryLimit amount of failed jobs to keep for later analysis.
@ -20774,9 +20718,6 @@ spec:
type: object type: object
type: array type: array
type: object type: object
clusterName:
description: ClusterName sets the kubernetes cluster name to send to pushgateway for grouping metrics
type: string
concurrentRunsAllowed: concurrentRunsAllowed:
type: boolean type: boolean
failedJobsHistoryLimit: failedJobsHistoryLimit:
@ -20791,56 +20732,6 @@ spec:
Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively.
type: integer type: integer
labelSelectors:
description: |-
LabelSelectors is a list of selectors that we filter for.
When defined, only PVCs and PreBackupPods matching them are backed up.
items:
description: |-
A label selector is a label query over a set of resources. The result of matchLabels and
matchExpressions are ANDed. An empty label selector matches all objects. A null
label selector matches no objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
type: array
podConfigRef: podConfigRef:
description: |- description: |-
PodConfigRef describes the pod spec with wich this action shall be executed. PodConfigRef describes the pod spec with wich this action shall be executed.
@ -21613,9 +21504,6 @@ spec:
type: object type: object
type: array type: array
type: object type: object
clusterName:
description: ClusterName sets the kubernetes cluster name to send to pushgateway for grouping metrics
type: string
concurrentRunsAllowed: concurrentRunsAllowed:
type: boolean type: boolean
failedJobsHistoryLimit: failedJobsHistoryLimit:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero name: kubezero
description: KubeZero - Root App of Apps chart description: KubeZero - Root App of Apps chart
type: application type: application
version: 1.32.3 version: 1.31.6
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -15,4 +15,4 @@ dependencies:
- name: kubezero-lib - name: kubezero-lib
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts repository: https://cdn.zero-downtime.net/charts
kubeVersion: ">= 1.32.0-0" kubeVersion: ">= 1.31.0-0"

View File

@ -1,6 +1,6 @@
# kubezero # kubezero
![Version: 1.32.3](https://img.shields.io/badge/Version-1.32.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.31.6](https://img.shields.io/badge/Version-1.31.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero - Root App of Apps chart KubeZero - Root App of Apps chart
@ -14,7 +14,7 @@ KubeZero - Root App of Apps chart
## Requirements ## Requirements
Kubernetes: `>= 1.32.0-0` Kubernetes: `>= 1.31.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
@ -38,15 +38,14 @@ Kubernetes: `>= 1.32.0-0`
| argo.argocd-image-updater.enabled | bool | `false` | | | argo.argocd-image-updater.enabled | bool | `false` | |
| argo.enabled | bool | `false` | | | argo.enabled | bool | `false` | |
| argo.namespace | string | `"argocd"` | | | argo.namespace | string | `"argocd"` | |
| argo.targetRevision | string | `"0.3.2"` | | | argo.targetRevision | string | `"0.3.1"` | |
| cert-manager.enabled | bool | `false` | | | cert-manager.enabled | bool | `false` | |
| cert-manager.namespace | string | `"cert-manager"` | | | cert-manager.namespace | string | `"cert-manager"` | |
| cert-manager.targetRevision | string | `"0.9.12"` | | | cert-manager.targetRevision | string | `"0.9.12"` | |
| falco.enabled | bool | `false` | | | falco.enabled | bool | `false` | |
| falco.k8saudit.enabled | bool | `false` | | | falco.k8saudit.enabled | bool | `false` | |
| falco.targetRevision | string | `"0.1.2"` | | | falco.targetRevision | string | `"0.1.2"` | |
| global.aws.accountId | string | `"123456789012"` | | | global.aws | object | `{}` | |
| global.aws.region | string | `"the-moon"` | |
| global.clusterName | string | `"zdt-trial-cluster"` | | | global.clusterName | string | `"zdt-trial-cluster"` | |
| global.gcp | object | `{}` | | | global.gcp | object | `{}` | |
| global.highAvailable | bool | `false` | | | global.highAvailable | bool | `false` | |

View File

@ -13,7 +13,7 @@ global:
addons: addons:
enabled: true enabled: true
targetRevision: 0.8.14 targetRevision: 0.8.13
external-dns: external-dns:
enabled: false enabled: false
forseti: forseti:
@ -32,7 +32,7 @@ addons:
network: network:
enabled: true enabled: true
retain: true retain: true
targetRevision: 0.5.8 targetRevision: 0.5.7
cilium: cilium:
cluster: {} cluster: {}
@ -43,7 +43,7 @@ cert-manager:
storage: storage:
enabled: false enabled: false
targetRevision: 0.8.11 targetRevision: 0.8.10
lvm-localpv: lvm-localpv:
enabled: false enabled: false
aws-ebs-csi-driver: aws-ebs-csi-driver: