chore(deps): update kubezero-redis-dependencies (major) - autoclosed #324

Closed
renovate wants to merge 0 commits from renovate/kubezero-redis-major-kubezero-redis-dependencies into main
115 changed files with 3627 additions and 868 deletions

1
.gitattributes vendored
View File

@ -1 +0,0 @@
*.png filter=lfs diff=lfs merge=lfs -text

46
CHANGELOG.md Normal file
View File

@ -0,0 +1,46 @@
# Changelog
## KubeZero - 2.18 ( Argoless )
### High level / Admin changes
- ArgoCD is now optional and NOT required nor used during initial cluster bootstrap
- the bootstrap process now uses the same config and templates as the optional ArgoCD applications later on
- the bootstrap is can now be restarted at any time and considerably faster
- the top level KubeZero config for the ArgoCD app-of-apps is now also maintained via the gitops workflow. Changes can be applied by a simple git push rather than manual scripts
### Calico
- version bump
### Cert-manager
- local issuers are now cluster issuer to allow them being used across namespaces
- all cert-manager resources moved into the cert-manager namespace
- version bump to 1.10
### Kiam
- set priorty class to cluster essential
- certificates are now issued by the cluster issuer
### EBS / EFS
- version bump
### Istio
- istio operator removed, deployment migrated to helm, various cleanups
- version bump to 1.8
- all ingress resources are now in the dedicated new namespace istio-ingress ( deployed via separate kubezero chart istio-ingress)
- set priorty class of ingress components to cluster essential
### Logging
- ES/Kibana version bump to 7.10
- ECK operator is now installed on demand in logging ns
- Custom event fields configurable via new fluent-bit chart
e.g. clustername could be added to each event allowing easy filtering in case multiple clusters stream events into a single central ES cluster
### ArgoCD
- version bump, new app of app architecure
### Metrics
- version bump
- all servicemonitor resources are now in the same namespaces as the respective apps to avoid deployments across multiple namespaces
### upstream Kubernetes 1.18
https://sysdig.com/blog/whats-new-kubernetes-1-18/

View File

@ -3,9 +3,9 @@ ARG ALPINE_VERSION=3.20
FROM docker.io/alpine:${ALPINE_VERSION} FROM docker.io/alpine:${ALPINE_VERSION}
ARG ALPINE_VERSION ARG ALPINE_VERSION
ARG KUBE_VERSION=1.30.5 ARG KUBE_VERSION=1.29.7
ARG SECRETS_VERSION=4.6.1 ARG SECRETS_VERSION=4.6.0
ARG VALS_VERSION=0.37.5 ARG VALS_VERSION=0.37.3
RUN cd /etc/apk/keys && \ RUN cd /etc/apk/keys && \
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \ wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
@ -22,11 +22,11 @@ RUN cd /etc/apk/keys && \
py3-yaml \ py3-yaml \
restic \ restic \
helm \ helm \
etcd-ctl@edge-community \
cri-tools@kubezero \ cri-tools@kubezero \
etcdhelper@kubezero \
kubeadm@kubezero~=${KUBE_VERSION} \ kubeadm@kubezero~=${KUBE_VERSION} \
kubectl@kubezero~=${KUBE_VERSION} kubectl@kubezero~=${KUBE_VERSION} \
etcdhelper@kubezero \
etcd-ctl@edge-testing
RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \ RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \
mkdir -p /var/lib/kubezero mkdir -p /var/lib/kubezero

View File

@ -17,7 +17,7 @@ update-chart-docs:
for c in charts/*; do \ for c in charts/*; do \
[[ $$c =~ "kubezero-lib" ]] && continue ; \ [[ $$c =~ "kubezero-lib" ]] && continue ; \
[[ $$c =~ "kubeadm" ]] && continue ; \ [[ $$c =~ "kubeadm" ]] && continue ; \
helm-docs --skip-version-footer -c $$c ; \ helm-docs -c $$c ; \
done done
publish-charts: publish-charts:

View File

@ -1,8 +1,8 @@
# Cluster upgrade flow # Cluster upgrade flow
## Hard refresh ## During 1.23 upgrade
```kubectl annotate app/kubezero -n argocd argocd.argoproj.io/refresh="hard" - create new kubezero-values CM if not exists yet, by merging parts of the legacy /etc/kubernetes/kubeadm-values.yaml values with potentially existing values from kubezero ArgoCD app values
```
# General flow # General flow

View File

@ -47,24 +47,15 @@ _kubeadm() {
# Render cluster config # Render cluster config
render_kubeadm() { render_kubeadm() {
local phase=$1 helm template $CHARTS/kubeadm --output-dir ${WORKDIR} -f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
helm template $CHARTS/kubeadm --output-dir ${WORKDIR} \
-f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml \
--set patches=/etc/kubernetes/patches
# Assemble kubeadm config # Assemble kubeadm config
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
for f in Cluster KubeProxy Kubelet; do for f in Cluster Init Join KubeProxy Kubelet; do
# echo "---" >> /etc/kubernetes/kubeadm.yaml # echo "---" >> /etc/kubernetes/kubeadm.yaml
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
done done
# skip InitConfig during upgrade
if [ "$phase" != "upgrade" ]; then
cat ${WORKDIR}/kubeadm/templates/InitConfiguration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
fi
# "uncloak" the json patches after they got processed by helm # "uncloak" the json patches after they got processed by helm
for s in apiserver controller-manager scheduler; do for s in apiserver controller-manager scheduler; do
yq eval '.json' ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml > /tmp/_tmp.yaml && \ yq eval '.json' ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml > /tmp/_tmp.yaml && \
@ -107,7 +98,7 @@ pre_kubeadm() {
fi fi
# copy patches to host to make --rootfs of kubeadm work # copy patches to host to make --rootfs of kubeadm work
cp -r ${WORKDIR}/kubeadm/templates/patches ${HOSTFS}/etc/kubernetes cp -r ${WORKDIR}/kubeadm/templates/patches /host/tmp/
} }
@ -120,6 +111,8 @@ post_kubeadm() {
# Patch coreDNS addon, ideally we prevent kubeadm to reset coreDNS to its defaults # Patch coreDNS addon, ideally we prevent kubeadm to reset coreDNS to its defaults
kubectl patch deployment coredns -n kube-system --patch-file ${WORKDIR}/kubeadm/templates/patches/coredns0.yaml $LOG kubectl patch deployment coredns -n kube-system --patch-file ${WORKDIR}/kubeadm/templates/patches/coredns0.yaml $LOG
rm -rf /host/tmp/patches
} }
@ -133,28 +126,26 @@ kubeadm_upgrade() {
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
# Update kubezero-values CM # Update kubezero-values CM
kubectl get cm -n kubezero kubezero-values -o=yaml | \ kubectl get cm -n kube-system kubezero-values -o=yaml | \
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \ yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
kubectl replace -f - kubectl replace -f -
# update argo app # update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \ kubectl get application kubezero -n argocd -o yaml | \
yq 'del(.spec.source.helm.values) | .spec.source.helm.valuesObject |= load("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \ kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
kubectl apply -f - kubectl apply -f -
# finally remove annotation to allow argo to sync again # finally remove annotation to allow argo to sync again
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'
# Local node upgrade # Local node upgrade
render_kubeadm upgrade render_kubeadm
pre_kubeadm pre_kubeadm
# Upgrade - we upload the new config first so we can use --patch during 1.30 # Upgrade
_kubeadm init phase upload-config kubeadm _kubeadm upgrade apply -y --patches /tmp/patches
kubeadm upgrade apply --yes --patches /etc/kubernetes/patches $KUBE_VERSION --rootfs ${HOSTFS} $LOG
post_kubeadm post_kubeadm
@ -181,7 +172,7 @@ kubeadm_upgrade() {
control_plane_node() { control_plane_node() {
CMD=$1 CMD=$1
render_kubeadm $CMD render_kubeadm
# Ensure clean slate if bootstrap, restore PKI otherwise # Ensure clean slate if bootstrap, restore PKI otherwise
if [[ "$CMD" =~ ^(bootstrap)$ ]]; then if [[ "$CMD" =~ ^(bootstrap)$ ]]; then
@ -202,7 +193,9 @@ control_plane_node() {
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks # Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
cp ${WORKDIR}/super-admin.conf ${HOSTFS}/root/.kube/config # Fallback to old config remove with 1.30 !!
cp ${WORKDIR}/super-admin.conf ${HOSTFS}/root/.kube/config || \
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
# Only restore etcd data during "restore" and none exists already # Only restore etcd data during "restore" and none exists already
if [[ "$CMD" =~ ^(restore)$ ]]; then if [[ "$CMD" =~ ^(restore)$ ]]; then
@ -261,7 +254,7 @@ control_plane_node() {
yq eval -i '.etcd.state = "existing" yq eval -i '.etcd.state = "existing"
| .etcd.initialCluster = strenv(ETCD_INITIAL_CLUSTER) | .etcd.initialCluster = strenv(ETCD_INITIAL_CLUSTER)
' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml ' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
render_kubeadm join render_kubeadm
fi fi
# Generate our custom etcd yaml # Generate our custom etcd yaml
@ -270,7 +263,12 @@ control_plane_node() {
_kubeadm init phase kubelet-start _kubeadm init phase kubelet-start
# Remove conditional with 1.30
if [ -f ${HOSTFS}/etc/kubernetes/super-admin.conf ]; then
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
else
cp ${HOSTFS}/etc/kubernetes/admin.conf ${HOSTFS}/root/.kube/config
fi
# Wait for api to be online # Wait for api to be online
echo "Waiting for Kubernetes API to be online ..." echo "Waiting for Kubernetes API to be online ..."
@ -374,7 +372,9 @@ backup() {
# pki & cluster-admin access # pki & cluster-admin access
cp -r ${HOSTFS}/etc/kubernetes/pki ${WORKDIR} cp -r ${HOSTFS}/etc/kubernetes/pki ${WORKDIR}
cp ${HOSTFS}/etc/kubernetes/admin.conf ${WORKDIR} cp ${HOSTFS}/etc/kubernetes/admin.conf ${WORKDIR}
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${WORKDIR}
# Remove conditional with 1.30
[ -f ${HOSTFS}/etc/kubernetes/super-admin.conf ] && cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${WORKDIR}
# Backup via restic # Backup via restic
restic backup ${WORKDIR} -H $CLUSTERNAME --tag $CLUSTER_VERSION restic backup ${WORKDIR} -H $CLUSTERNAME --tag $CLUSTER_VERSION

View File

@ -34,18 +34,11 @@ function argo_used() {
# get kubezero-values from ArgoCD if available or use in-cluster CM without Argo # get kubezero-values from ArgoCD if available or use in-cluster CM without Argo
function get_kubezero_values() { function get_kubezero_values() {
### Remove with 1.31 local _namespace="kube-system"
### Migrate the kubezero CM from kube-system to kubezero NS during the 1.30 cycle [ "$PLATFORM" == "gke" ] && _namespace=kubezero
kubectl get cm kubezero-values -n kubezero > /dev/null || \
{ create_ns kubezero; kubectl get cm kubezero-values -n kube-system -o yaml | \
sed 's/^ namespace: kube-system/ namespace: kubezero/' | \
kubectl create -f - && \
kubectl delete cm kubezero-values -n kube-system ; }
###
argo_used && \ argo_used && \
{ kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml ; } || \ { kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.values > ${WORKDIR}/kubezero-values.yaml; } || \
{ kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml ; } { kubectl get configmap -n $_namespace kubezero-values -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml ;}
} }
@ -103,7 +96,7 @@ function argo_app_synced() {
function create_ns() { function create_ns() {
local namespace=$1 local namespace=$1
if [ "$namespace" != "kube-system" ]; then if [ "$namespace" != "kube-system" ]; then
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace kubectl get ns $namespace || kubectl create ns $namespace
fi fi
} }
@ -176,7 +169,7 @@ function _helm() {
[ -n "$_version" ] && targetRevision="--version $_version" [ -n "$_version" ] && targetRevision="--version $_version"
fi fi
yq eval '.spec.source.helm.valuesObject' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml yq eval '.spec.source.helm.values' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
if [ $action == "crds" ]; then if [ $action == "crds" ]; then
# Allow custom CRD handling # Allow custom CRD handling

View File

@ -8,6 +8,14 @@ import yaml
def migrate(values): def migrate(values):
"""Actual changes here""" """Actual changes here"""
# argoCD moves to argo module
try:
if values["argocd"]["enabled"]:
values["argo"] = { "enabled": True, "argo-cd": values["argocd"] }
values.pop("argocd")
except KeyError:
pass
return values return values

View File

@ -2,7 +2,7 @@
set -eE set -eE
set -o pipefail set -o pipefail
KUBE_VERSION=v1.30 KUBE_VERSION=v1.29
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml} ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
@ -21,14 +21,11 @@ argo_used && disable_argo
control_plane_upgrade kubeadm_upgrade control_plane_upgrade kubeadm_upgrade
echo "Control plane upgraded, <Return> to continue"
read -r
#echo "Adjust kubezero values as needed:" #echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015 # shellcheck disable=SC2015
#argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero #argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
### v1.30 ### v1.29
# #
# upgrade modules # upgrade modules
@ -42,7 +39,7 @@ echo "Applying remaining KubeZero modules..."
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo" control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
# Final step is to commit the new argocd kubezero app # Final step is to commit the new argocd kubezero app
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..)' > $ARGO_APP kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..) | .spec.source.helm.values |= (from_yaml | to_yaml)' > $ARGO_APP
# Trigger backup of upgraded cluster state # Trigger backup of upgraded cluster state
kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$KUBE_VERSION -n kube-system kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$KUBE_VERSION -n kube-system

View File

@ -36,3 +36,6 @@ Kubernetes: `>= 1.26.0`
| replicaCount | int | `1` | | | replicaCount | int | `1` | |
| resources | object | `{"requests":{"cpu":"300m","memory":"2000M"}}` | The resource requests and limits for the clamav service | | resources | object | `{"requests":{"cpu":"300m","memory":"2000M"}}` | The resource requests and limits for the clamav service |
| service.port | int | `3310` | The port to be used by the clamav service | | service.port | int | `3310` | The port to be used by the clamav service |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubeadm name: kubeadm
description: KubeZero Kubeadm cluster config description: KubeZero Kubeadm cluster config
type: application type: application
version: 1.30.5 version: 1.29.7
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:

View File

@ -2,7 +2,8 @@ apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration kind: ClusterConfiguration
kubernetesVersion: {{ .Chart.Version }} kubernetesVersion: {{ .Chart.Version }}
clusterName: {{ .Values.global.clusterName }} clusterName: {{ .Values.global.clusterName }}
#featureGates: featureGates:
EtcdLearnerMode: true # becomes beta in 1.29
# NonGracefulFailover: true # NonGracefulFailover: true
controlPlaneEndpoint: {{ .Values.api.endpoint }} controlPlaneEndpoint: {{ .Values.api.endpoint }}
networking: networking:

View File

@ -3,10 +3,8 @@ kind: InitConfiguration
localAPIEndpoint: localAPIEndpoint:
advertiseAddress: {{ .Values.listenAddress }} advertiseAddress: {{ .Values.listenAddress }}
bindPort: {{ .Values.api.listenPort }} bindPort: {{ .Values.api.listenPort }}
{{- with .Values.patches }}
patches: patches:
directory: {{ . }} directory: /tmp/patches
{{- end }}
nodeRegistration: nodeRegistration:
criSocket: "unix:///var/run/crio/crio.sock" criSocket: "unix:///var/run/crio/crio.sock"
ignorePreflightErrors: ignorePreflightErrors:

View File

@ -2,8 +2,9 @@
{{- /* Issues: MemoryQoS */ -}} {{- /* Issues: MemoryQoS */ -}}
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}} {{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}} {{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
{{- /* v1.30: remove/beta KubeProxyDrainingTerminatingNodes */ -}}
{{- define "kubeadm.featuregates" }} {{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" }} {{- $gates := list "CustomCPUCFSQuotaPeriod" "KubeProxyDrainingTerminatingNodes" "ImageMaximumGCAge" }}
{{- if eq .return "csv" }} {{- if eq .return "csv" }}
{{- range $key := $gates }} {{- range $key := $gates }}
{{- $key }}=true, {{- $key }}=true,

View File

@ -36,5 +36,3 @@ etcd:
# -- Set to false for openrc, eg. on Gentoo or Alpine # -- Set to false for openrc, eg. on Gentoo or Alpine
systemd: false systemd: false
protectKernelDefaults: false protectKernelDefaults: false
# patches: /tmp/patches

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-addons name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons description: KubeZero umbrella chart for various optional cluster addons
type: application type: application
version: 0.8.9 version: 0.8.8
appVersion: v1.29 appVersion: v1.29
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -20,24 +20,24 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: external-dns - name: external-dns
version: 1.15.0 version: 1.14.5
repository: https://kubernetes-sigs.github.io/external-dns/ repository: https://kubernetes-sigs.github.io/external-dns/
condition: external-dns.enabled condition: external-dns.enabled
- name: cluster-autoscaler - name: cluster-autoscaler
version: 9.43.0 version: 9.37.0
repository: https://kubernetes.github.io/autoscaler repository: https://kubernetes.github.io/autoscaler
condition: cluster-autoscaler.enabled condition: cluster-autoscaler.enabled
- name: nvidia-device-plugin - name: nvidia-device-plugin
version: 0.16.2 version: 0.16.0
# https://github.com/NVIDIA/k8s-device-plugin # https://github.com/NVIDIA/k8s-device-plugin
repository: https://nvidia.github.io/k8s-device-plugin repository: https://nvidia.github.io/k8s-device-plugin
condition: nvidia-device-plugin.enabled condition: nvidia-device-plugin.enabled
- name: sealed-secrets - name: sealed-secrets
version: 2.16.1 version: 2.16.0
repository: https://bitnami-labs.github.io/sealed-secrets repository: https://bitnami-labs.github.io/sealed-secrets
condition: sealed-secrets.enabled condition: sealed-secrets.enabled
- name: aws-node-termination-handler - name: aws-node-termination-handler
version: 0.24.1 version: 0.24.0
repository: "oci://public.ecr.aws/aws-ec2/helm" repository: "oci://public.ecr.aws/aws-ec2/helm"
condition: aws-node-termination-handler.enabled condition: aws-node-termination-handler.enabled
- name: aws-eks-asg-rolling-update-handler - name: aws-eks-asg-rolling-update-handler

View File

@ -1,6 +1,6 @@
# kubezero-addons # kubezero-addons
![Version: 0.8.9](https://img.shields.io/badge/Version-0.8.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29](https://img.shields.io/badge/AppVersion-v1.29-informational?style=flat-square) ![Version: 0.8.8](https://img.shields.io/badge/Version-0.8.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29](https://img.shields.io/badge/AppVersion-v1.29-informational?style=flat-square)
KubeZero umbrella chart for various optional cluster addons KubeZero umbrella chart for various optional cluster addons
@ -18,12 +18,12 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.16.1 | | https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.16.0 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.15.0 | | https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.14.5 |
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.43.0 | | https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.37.0 |
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.16.2 | | https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.16.0 |
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 | | https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.24.1 | | oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.24.0 |
# MetalLB # MetalLB
@ -101,7 +101,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| aws-node-termination-handler.useProviderId | bool | `true` | | | aws-node-termination-handler.useProviderId | bool | `true` | |
| awsNeuron.enabled | bool | `false` | | | awsNeuron.enabled | bool | `false` | |
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | | | awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
| awsNeuron.image.tag | string | `"2.22.4.0"` | | | awsNeuron.image.tag | string | `"2.19.16.0"` | |
| cluster-autoscaler.autoDiscovery.clusterName | string | `""` | | | cluster-autoscaler.autoDiscovery.clusterName | string | `""` | |
| cluster-autoscaler.awsRegion | string | `"us-west-2"` | | | cluster-autoscaler.awsRegion | string | `"us-west-2"` | |
| cluster-autoscaler.enabled | bool | `false` | | | cluster-autoscaler.enabled | bool | `false` | |
@ -110,7 +110,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | | | cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | | | cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
| cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | | | cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | |
| cluster-autoscaler.image.tag | string | `"v1.30.2"` | | | cluster-autoscaler.image.tag | string | `"v1.29.4"` | |
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cluster-autoscaler.podDisruptionBudget | bool | `false` | | | cluster-autoscaler.podDisruptionBudget | bool | `false` | |
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | | | cluster-autoscaler.prometheusRule.enabled | bool | `false` | |

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 1.22.1 appVersion: 1.22.0
description: A Helm chart for the AWS Node Termination Handler. description: A Helm chart for the AWS Node Termination Handler.
home: https://github.com/aws/aws-node-termination-handler/ home: https://github.com/aws/aws-node-termination-handler/
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
@ -21,4 +21,4 @@ name: aws-node-termination-handler
sources: sources:
- https://github.com/aws/aws-node-termination-handler/ - https://github.com/aws/aws-node-termination-handler/
type: application type: application
version: 0.24.1 version: 0.24.0

View File

@ -6,14 +6,6 @@ set -ex
login_ecr_public login_ecr_public
update_helm update_helm
# Abandon for now in favor of KRR
# get latest VPA resources, from https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/hack/vpa-process-yamls.sh
# COMPONENTS="vpa-v1-crd-gen vpa-rbac updater-deployment recommender-deployment admission-controller-deployment"
# mkdir -p templates/vertical-pod-autoscaler
#for c in $COMPONENTS; do
# wget -q -O templates/vertical-pod-autoscaler/${c}.yaml https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/vertical-pod-autoscaler/deploy/${c}.yaml
#done
patch_chart aws-node-termination-handler patch_chart aws-node-termination-handler
patch_chart aws-eks-asg-rolling-update-handler patch_chart aws-eks-asg-rolling-update-handler

View File

@ -160,7 +160,7 @@ awsNeuron:
image: image:
name: public.ecr.aws/neuron/neuron-device-plugin name: public.ecr.aws/neuron/neuron-device-plugin
tag: 2.22.4.0 tag: 2.19.16.0
nvidia-device-plugin: nvidia-device-plugin:
enabled: false enabled: false
@ -200,7 +200,7 @@ cluster-autoscaler:
image: image:
repository: registry.k8s.io/autoscaling/cluster-autoscaler repository: registry.k8s.io/autoscaling/cluster-autoscaler
tag: v1.30.2 tag: v1.29.4
autoDiscovery: autoDiscovery:
clusterName: "" clusterName: ""

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo name: kubezero-argo
version: 0.2.5 version: 0.2.4
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -18,15 +18,15 @@ dependencies:
version: ">= 0.1.6" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: argo-events - name: argo-events
version: 2.4.8 version: 2.4.7
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled condition: argo-events.enabled
- name: argo-cd - name: argo-cd
version: 7.6.10 version: 7.3.8
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled condition: argo-cd.enabled
- name: argocd-apps - name: argocd-apps
version: 2.0.2 version: 2.0.0
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled condition: argo-cd.enabled
- name: argocd-image-updater - name: argocd-image-updater

View File

@ -1,6 +1,6 @@
# kubezero-argo # kubezero-argo
![Version: 0.2.5](https://img.shields.io/badge/Version-0.2.5-informational?style=flat-square) ![Version: 0.2.4](https://img.shields.io/badge/Version-0.2.4-informational?style=flat-square)
KubeZero Argo - Events, Workflow, CD KubeZero Argo - Events, Workflow, CD
@ -18,9 +18,9 @@ Kubernetes: `>= 1.26.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 7.6.10 | | https://argoproj.github.io/argo-helm | argo-cd | 7.3.8 |
| https://argoproj.github.io/argo-helm | argo-events | 2.4.8 | | https://argoproj.github.io/argo-helm | argo-events | 2.4.7 |
| https://argoproj.github.io/argo-helm | argocd-apps | 2.0.2 | | https://argoproj.github.io/argo-helm | argocd-apps | 2.0.0 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.11.0 | | https://argoproj.github.io/argo-helm | argocd-image-updater | 0.11.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |

View File

@ -45,7 +45,7 @@ argo-cd:
format: json format: json
image: image:
repository: public.ecr.aws/zero-downtime/zdt-argocd repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v2.12.4 tag: v2.11.5
configs: configs:
styles: | styles: |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-auth name: kubezero-auth
description: KubeZero umbrella chart for all things Authentication and Identity management description: KubeZero umbrella chart for all things Authentication and Identity management
type: application type: application
version: 0.5.1 version: 0.5.0
appVersion: 22.0.5 appVersion: 22.0.5
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -18,6 +18,6 @@ dependencies:
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: keycloak - name: keycloak
repository: "oci://registry-1.docker.io/bitnamicharts" repository: "oci://registry-1.docker.io/bitnamicharts"
version: 22.2.1 version: 22.1.1
condition: keycloak.enabled condition: keycloak.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-auth # kubezero-auth
![Version: 0.5.1](https://img.shields.io/badge/Version-0.5.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 22.0.5](https://img.shields.io/badge/AppVersion-22.0.5-informational?style=flat-square) ![Version: 0.5.0](https://img.shields.io/badge/Version-0.5.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 22.0.5](https://img.shields.io/badge/AppVersion-22.0.5-informational?style=flat-square)
KubeZero umbrella chart for all things Authentication and Identity management KubeZero umbrella chart for all things Authentication and Identity management
@ -19,7 +19,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| oci://registry-1.docker.io/bitnamicharts | keycloak | 22.2.1 | | oci://registry-1.docker.io/bitnamicharts | keycloak | 22.1.1 |
# Keycloak # Keycloak
@ -41,7 +41,7 @@ https://github.com/keycloak/keycloak-benchmark/tree/main/provision/minikube/keyc
| keycloak.auth.existingSecret | string | `"kubezero-auth"` | | | keycloak.auth.existingSecret | string | `"kubezero-auth"` | |
| keycloak.auth.passwordSecretKey | string | `"admin-password"` | | | keycloak.auth.passwordSecretKey | string | `"admin-password"` | |
| keycloak.enabled | bool | `false` | | | keycloak.enabled | bool | `false` | |
| keycloak.hostnameStrict | bool | `false` | | | keycloak.hostnameStrict | bool | `true` | |
| keycloak.istio.admin.enabled | bool | `false` | | | keycloak.istio.admin.enabled | bool | `false` | |
| keycloak.istio.admin.gateway | string | `"istio-ingress/private-ingressgateway"` | | | keycloak.istio.admin.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| keycloak.istio.admin.url | string | `""` | | | keycloak.istio.admin.url | string | `""` | |
@ -56,9 +56,6 @@ https://github.com/keycloak/keycloak-benchmark/tree/main/provision/minikube/keyc
| keycloak.postgresql.auth.existingSecret | string | `"kubezero-auth"` | | | keycloak.postgresql.auth.existingSecret | string | `"kubezero-auth"` | |
| keycloak.postgresql.auth.username | string | `"keycloak"` | | | keycloak.postgresql.auth.username | string | `"keycloak"` | |
| keycloak.postgresql.primary.persistence.size | string | `"1Gi"` | | | keycloak.postgresql.primary.persistence.size | string | `"1Gi"` | |
| keycloak.postgresql.primary.resources.limits.memory | string | `"128Mi"` | |
| keycloak.postgresql.primary.resources.requests.cpu | string | `"100m"` | |
| keycloak.postgresql.primary.resources.requests.memory | string | `"64Mi"` | |
| keycloak.postgresql.readReplicas.replicaCount | int | `0` | | | keycloak.postgresql.readReplicas.replicaCount | int | `0` | |
| keycloak.production | bool | `true` | | | keycloak.production | bool | `true` | |
| keycloak.proxyHeaders | string | `"xforwarded"` | | | keycloak.proxyHeaders | string | `"xforwarded"` | |

View File

@ -2,11 +2,11 @@
## backup ## backup
- shell into running postgres-auth pod - shell into running posgres-auth pod
``` ```
export PGPASSWORD="$POSTGRES_POSTGRES_PASSWORD" export PGPASSWORD="<postgres_password from secret>"
cd /bitnami/postgresql cd /bitnami/posgresql
pg_dumpall -U postgres > /bitnami/postgresql/backup pg_dumpall -U postgres > backup
``` ```
- store backup off-site - store backup off-site
@ -29,10 +29,8 @@ kubectl cp keycloak/kubezero-auth-postgresql-0:/bitnami/postgresql/backup postgr
kubectl cp postgres-backup keycloak/kubezero-auth-postgresql-0:/bitnami/postgresql/backup kubectl cp postgres-backup keycloak/kubezero-auth-postgresql-0:/bitnami/postgresql/backup
``` ```
- shell into running postgres-auth pod - log into psql as admin ( shell on running pod )
``` ```
export PGPASSWORD="$POSTGRES_POSTGRES_PASSWORD"
cd /bitnami/postgresql
psql -U postgres psql -U postgres
``` ```

View File

@ -2,7 +2,7 @@ keycloak:
enabled: false enabled: false
production: true production: true
hostnameStrict: false hostnameStrict: true
proxyHeaders: xforwarded proxyHeaders: xforwarded
auth: auth:
@ -39,14 +39,6 @@ keycloak:
persistence: persistence:
size: 1Gi size: 1Gi
resources:
limits:
#cpu: 750m
memory: 128Mi
requests:
cpu: 100m
memory: 64Mi
readReplicas: readReplicas:
replicaCount: 0 replicaCount: 0

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-ci name: kubezero-ci
description: KubeZero umbrella chart for all things CI description: KubeZero umbrella chart for all things CI
type: application type: application
version: 0.8.17 version: 0.8.14
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -18,19 +18,19 @@ dependencies:
version: ">= 0.1.6" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: gitea - name: gitea
version: 10.4.1 version: 10.4.0
repository: https://dl.gitea.io/charts/ repository: https://dl.gitea.io/charts/
condition: gitea.enabled condition: gitea.enabled
- name: jenkins - name: jenkins
version: 5.7.6 version: 5.5.4
repository: https://charts.jenkins.io repository: https://charts.jenkins.io
condition: jenkins.enabled condition: jenkins.enabled
- name: trivy - name: trivy
version: 0.8.0 version: 0.7.0
repository: https://aquasecurity.github.io/helm-charts/ repository: https://aquasecurity.github.io/helm-charts/
condition: trivy.enabled condition: trivy.enabled
- name: renovate - name: renovate
version: 38.124.1 version: 37.440.7
repository: https://docs.renovatebot.com/helm-charts repository: https://docs.renovatebot.com/helm-charts
condition: renovate.enabled condition: renovate.enabled
kubeVersion: ">= 1.25.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-ci # kubezero-ci
![Version: 0.8.17](https://img.shields.io/badge/Version-0.8.17-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.8.14](https://img.shields.io/badge/Version-0.8.14-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI KubeZero umbrella chart for all things CI
@ -18,11 +18,11 @@ Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.8.0 | | https://aquasecurity.github.io/helm-charts/ | trivy | 0.7.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.jenkins.io | jenkins | 5.7.6 | | https://charts.jenkins.io | jenkins | 5.5.4 |
| https://dl.gitea.io/charts/ | gitea | 10.4.1 | | https://dl.gitea.io/charts/ | gitea | 10.4.0 |
| https://docs.renovatebot.com/helm-charts | renovate | 38.124.1 | | https://docs.renovatebot.com/helm-charts | renovate | 37.440.7 |
# Jenkins # Jenkins
- default build retention 10 builds, 32days - default build retention 10 builds, 32days
@ -67,7 +67,6 @@ Kubernetes: `>= 1.25.0`
| gitea.gitea.metrics.enabled | bool | `false` | | | gitea.gitea.metrics.enabled | bool | `false` | |
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | | | gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | |
| gitea.image.rootless | bool | `true` | | | gitea.image.rootless | bool | `true` | |
| gitea.image.tag | string | `"1.22.3"` | |
| gitea.istio.enabled | bool | `false` | | | gitea.istio.enabled | bool | `false` | |
| gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | | | gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| gitea.istio.url | string | `"git.example.com"` | | | gitea.istio.url | string | `"git.example.com"` | |
@ -92,7 +91,7 @@ Kubernetes: `>= 1.25.0`
| jenkins.agent.defaultsProviderTemplate | string | `"podman-aws"` | | | jenkins.agent.defaultsProviderTemplate | string | `"podman-aws"` | |
| jenkins.agent.idleMinutes | int | `30` | | | jenkins.agent.idleMinutes | int | `30` | |
| jenkins.agent.image.repository | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | | | jenkins.agent.image.repository | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
| jenkins.agent.image.tag | string | `"v0.6.2"` | | | jenkins.agent.image.tag | string | `"v0.6.1"` | |
| jenkins.agent.inheritYamlMergeStrategy | bool | `true` | | | jenkins.agent.inheritYamlMergeStrategy | bool | `true` | |
| jenkins.agent.podName | string | `"podman-aws"` | | | jenkins.agent.podName | string | `"podman-aws"` | |
| jenkins.agent.podRetention | string | `"Default"` | | | jenkins.agent.podRetention | string | `"Default"` | |
@ -162,7 +161,7 @@ Kubernetes: `>= 1.25.0`
| renovate.env.LOG_FORMAT | string | `"json"` | | | renovate.env.LOG_FORMAT | string | `"json"` | |
| renovate.securityContext.fsGroup | int | `1000` | | | renovate.securityContext.fsGroup | int | `1000` | |
| trivy.enabled | bool | `false` | | | trivy.enabled | bool | `false` | |
| trivy.image.tag | string | `"0.56.2"` | | | trivy.image.tag | string | `"0.52.1"` | |
| trivy.persistence.enabled | bool | `true` | | | trivy.persistence.enabled | bool | `true` | |
| trivy.persistence.size | string | `"1Gi"` | | | trivy.persistence.size | string | `"1Gi"` | |
| trivy.rbac.create | bool | `false` | | | trivy.rbac.create | bool | `false` | |

View File

@ -12,103 +12,6 @@ Use the following links to reference issues, PRs, and commits prior to v2.6.0.
The changelog until v1.5.7 was auto-generated based on git commits. The changelog until v1.5.7 was auto-generated based on git commits.
Those entries include a reference to the git commit to be able to get more details. Those entries include a reference to the git commit to be able to get more details.
## 5.7.5
Fix helm release deployment with flux revision reconciliation
## 5.7.4
Update `kubernetes` to version `4292.v11898cf8fa_66`
## 5.7.3
Update `git` to version `5.5.2`
## 5.7.2
Update `jenkins/jenkins` to version `2.462.3-jdk17`
## 5.7.1
Update `docker.io/kiwigrid/k8s-sidecar` to version `1.28.0`
## 5.7.0
Add RBAC support for using the `nonroot` and `nonroot-v2` `SecurityContextConstraints` on OpenShift.
## 5.6.5
Update `kubernetes` to version `4290.v93ea_4b_b_26a_61`
## 5.6.4
Update `git` to version `5.5.1`
## 5.6.3
Update `git` to version `5.5.0`
## 5.6.2
Update `kubernetes` to version `4288.v1719f9d0c854`
## 5.6.1
Documentation about OCI installation
## 5.6.0
Helm chart is also now deployed on GitHub packages and can be installed from `oci://ghcr.io/jenkinsci/helm-charts/jenkins`
## 5.5.16
Update `kubernetes` to version `4287.v73451380b_576`
## 5.5.15
Add support for `controller.enableServiceLinks` to disable service links in the controller pod.
## 5.5.14
Update `jenkins/jenkins` to version `2.462.2-jdk17`
## 5.5.13
Update `docker.io/kiwigrid/k8s-sidecar` to version `1.27.6`
## 5.5.12
Update `configuration-as-code` to version `1850.va_a_8c31d3158b_`
## 5.5.11
Update `configuration-as-code` to version `1849.v3a_d20568000a_`
## 5.5.10
Update `git` to version `5.4.1`
## 5.5.9
Update `git` to version `5.4.0`
## 5.5.8
Add `agent.garbageCollection` to support setting [kubernetes plugin garbage collection](https://plugins.jenkins.io/kubernetes/#plugin-content-garbage-collection-beta).
## 5.5.7
Update `kubernetes` to version `4285.v50ed5f624918`
## 5.5.6
Add `agent.useDefaultServiceAccount` to support omitting setting `serviceAccount` in the default pod template from `serviceAgentAccount.name`.
Add `agent.serviceAccount` to support setting the default pod template value.
## 5.5.5
Update `jenkins/inbound-agent` to version `3261.v9c670a_4748a_9-1`
## 5.5.4 ## 5.5.4
Update `jenkins/jenkins` to version `2.462.1-jdk17` Update `jenkins/jenkins` to version `2.462.1-jdk17`

View File

@ -1,12 +1,14 @@
annotations: annotations:
artifacthub.io/category: integration-delivery artifacthub.io/category: integration-delivery
artifacthub.io/changes: |
- Update `jenkins/jenkins` to version `2.462.1-jdk17`
artifacthub.io/images: | artifacthub.io/images: |
- name: jenkins - name: jenkins
image: docker.io/jenkins/jenkins:2.462.3-jdk17 image: docker.io/jenkins/jenkins:2.462.1-jdk17
- name: k8s-sidecar - name: k8s-sidecar
image: docker.io/kiwigrid/k8s-sidecar:1.28.0 image: docker.io/kiwigrid/k8s-sidecar:1.27.5
- name: inbound-agent - name: inbound-agent
image: jenkins/inbound-agent:3261.v9c670a_4748a_9-1 image: jenkins/inbound-agent:3256.v88a_f6e922152-1
artifacthub.io/license: Apache-2.0 artifacthub.io/license: Apache-2.0
artifacthub.io/links: | artifacthub.io/links: |
- name: Chart Source - name: Chart Source
@ -16,7 +18,7 @@ annotations:
- name: support - name: support
url: https://github.com/jenkinsci/helm-charts/issues url: https://github.com/jenkinsci/helm-charts/issues
apiVersion: v2 apiVersion: v2
appVersion: 2.462.3 appVersion: 2.462.1
description: 'Jenkins - Build great things at any scale! As the leading open source description: 'Jenkins - Build great things at any scale! As the leading open source
automation server, Jenkins provides over 1800 plugins to support building, deploying automation server, Jenkins provides over 1800 plugins to support building, deploying
and automating any project. ' and automating any project. '
@ -44,4 +46,4 @@ sources:
- https://github.com/maorfr/kube-tasks - https://github.com/maorfr/kube-tasks
- https://github.com/jenkinsci/configuration-as-code-plugin - https://github.com/jenkinsci/configuration-as-code-plugin
type: application type: application
version: 5.7.6 version: 5.5.4

View File

@ -23,13 +23,8 @@ _See [`helm repo`](https://helm.sh/docs/helm/helm_repo/) for command documentati
## Install Chart ## Install Chart
```console ```console
helm install [RELEASE_NAME] jenkins/jenkins [flags] # Helm 3
``` $ helm install [RELEASE_NAME] jenkins/jenkins [flags]
Since version `5.6.0` the chart is available as an OCI image and can be installed using:
```console
helm install [RELEASE_NAME] oci://ghcr.io/jenkinsci/helm-charts/jenkins [flags]
``` ```
_See [configuration](#configuration) below._ _See [configuration](#configuration) below._
@ -75,7 +70,7 @@ To see all configurable options with detailed comments, visit the chart's [value
$ helm show values jenkins/jenkins $ helm show values jenkins/jenkins
``` ```
For a summary of all configurable options, see [VALUES.md](https://github.com/jenkinsci/helm-charts/blob/main/charts/jenkins/VALUES.md). For a summary of all configurable options, see [VALUES_SUMMARY.md](https://github.com/jenkinsci/helm-charts/blob/main/charts/jenkins/VALUES_SUMMARY.md).
### Configure Security Realm and Authorization Strategy ### Configure Security Realm and Authorization Strategy

View File

@ -8,311 +8,304 @@ The following tables list the configurable parameters of the Jenkins chart and t
| Key | Type | Description | Default | | Key | Type | Description | Default |
|:----|:-----|:---------|:------------| |:----|:-----|:---------|:------------|
| [additionalAgents](./values.yaml#L1195) | object | Configure additional | `{}` | | [additionalAgents](./values.yaml#L1169) | object | Configure additional | `{}` |
| [additionalClouds](./values.yaml#L1220) | object | | `{}` | | [additionalClouds](./values.yaml#L1194) | object | | `{}` |
| [agent.TTYEnabled](./values.yaml#L1101) | bool | Allocate pseudo tty to the side container | `false` | | [agent.TTYEnabled](./values.yaml#L1087) | bool | Allocate pseudo tty to the side container | `false` |
| [agent.additionalContainers](./values.yaml#L1148) | list | Add additional containers to the agents | `[]` | | [agent.additionalContainers](./values.yaml#L1122) | list | Add additional containers to the agents | `[]` |
| [agent.alwaysPullImage](./values.yaml#L994) | bool | Always pull agent container image before build | `false` | | [agent.alwaysPullImage](./values.yaml#L980) | bool | Always pull agent container image before build | `false` |
| [agent.annotations](./values.yaml#L1144) | object | Annotations to apply to the pod | `{}` | | [agent.annotations](./values.yaml#L1118) | object | Annotations to apply to the pod | `{}` |
| [agent.args](./values.yaml#L1095) | string | Arguments passed to command to execute | `"${computer.jnlpmac} ${computer.name}"` | | [agent.args](./values.yaml#L1081) | string | Arguments passed to command to execute | `"${computer.jnlpmac} ${computer.name}"` |
| [agent.command](./values.yaml#L1093) | string | Command to execute when side container starts | `nil` | | [agent.command](./values.yaml#L1079) | string | Command to execute when side container starts | `nil` |
| [agent.componentName](./values.yaml#L962) | string | | `"jenkins-agent"` | | [agent.componentName](./values.yaml#L948) | string | | `"jenkins-agent"` |
| [agent.connectTimeout](./values.yaml#L1142) | int | Timeout in seconds for an agent to be online | `100` | | [agent.connectTimeout](./values.yaml#L1116) | int | Timeout in seconds for an agent to be online | `100` |
| [agent.containerCap](./values.yaml#L1103) | int | Max number of agents to launch | `10` | | [agent.containerCap](./values.yaml#L1089) | int | Max number of agents to launch | `10` |
| [agent.customJenkinsLabels](./values.yaml#L959) | list | Append Jenkins labels to the agent | `[]` | | [agent.customJenkinsLabels](./values.yaml#L945) | list | Append Jenkins labels to the agent | `[]` |
| [agent.defaultsProviderTemplate](./values.yaml#L913) | string | The name of the pod template to use for providing default values | `""` | | [agent.defaultsProviderTemplate](./values.yaml#L907) | string | The name of the pod template to use for providing default values | `""` |
| [agent.directConnection](./values.yaml#L965) | bool | | `false` | | [agent.directConnection](./values.yaml#L951) | bool | | `false` |
| [agent.disableDefaultAgent](./values.yaml#L1166) | bool | Disable the default Jenkins Agent configuration | `false` | | [agent.disableDefaultAgent](./values.yaml#L1140) | bool | Disable the default Jenkins Agent configuration | `false` |
| [agent.enabled](./values.yaml#L911) | bool | Enable Kubernetes plugin jnlp-agent podTemplate | `true` | | [agent.enabled](./values.yaml#L905) | bool | Enable Kubernetes plugin jnlp-agent podTemplate | `true` |
| [agent.envVars](./values.yaml#L1076) | list | Environment variables for the agent Pod | `[]` | | [agent.envVars](./values.yaml#L1062) | list | Environment variables for the agent Pod | `[]` |
| [agent.garbageCollection.enabled](./values.yaml#L1110) | bool | When enabled, Jenkins will periodically check for orphan pods that have not been touched for the given timeout period and delete them. | `false` | | [agent.hostNetworking](./values.yaml#L959) | bool | Enables the agent to use the host network | `false` |
| [agent.garbageCollection.namespaces](./values.yaml#L1112) | string | Namespaces to look at for garbage collection, in addition to the default namespace defined for the cloud. One namespace per line. | `""` | | [agent.idleMinutes](./values.yaml#L1094) | int | Allows the Pod to remain active for reuse until the configured number of minutes has passed since the last step was executed on it | `0` |
| [agent.garbageCollection.timeout](./values.yaml#L1117) | int | Timeout value for orphaned pods | `300` | | [agent.image.repository](./values.yaml#L938) | string | Repository to pull the agent jnlp image from | `"jenkins/inbound-agent"` |
| [agent.hostNetworking](./values.yaml#L973) | bool | Enables the agent to use the host network | `false` | | [agent.image.tag](./values.yaml#L940) | string | Tag of the image to pull | `"3256.v88a_f6e922152-1"` |
| [agent.idleMinutes](./values.yaml#L1120) | int | Allows the Pod to remain active for reuse until the configured number of minutes has passed since the last step was executed on it | `0` | | [agent.imagePullSecretName](./values.yaml#L947) | string | Name of the secret to be used to pull the image | `nil` |
| [agent.image.repository](./values.yaml#L952) | string | Repository to pull the agent jnlp image from | `"jenkins/inbound-agent"` | | [agent.inheritYamlMergeStrategy](./values.yaml#L1114) | bool | Controls whether the defined yaml merge strategy will be inherited if another defined pod template is configured to inherit from the current one | `false` |
| [agent.image.tag](./values.yaml#L954) | string | Tag of the image to pull | `"3261.v9c670a_4748a_9-1"` | | [agent.jenkinsTunnel](./values.yaml#L915) | string | Overrides the Kubernetes Jenkins tunnel | `nil` |
| [agent.imagePullSecretName](./values.yaml#L961) | string | Name of the secret to be used to pull the image | `nil` | | [agent.jenkinsUrl](./values.yaml#L911) | string | Overrides the Kubernetes Jenkins URL | `nil` |
| [agent.inheritYamlMergeStrategy](./values.yaml#L1140) | bool | Controls whether the defined yaml merge strategy will be inherited if another defined pod template is configured to inherit from the current one | `false` | | [agent.jnlpregistry](./values.yaml#L935) | string | Custom registry used to pull the agent jnlp image from | `nil` |
| [agent.jenkinsTunnel](./values.yaml#L929) | string | Overrides the Kubernetes Jenkins tunnel | `nil` | | [agent.kubernetesConnectTimeout](./values.yaml#L921) | int | The connection timeout in seconds for connections to Kubernetes API. The minimum value is 5 | `5` |
| [agent.jenkinsUrl](./values.yaml#L925) | string | Overrides the Kubernetes Jenkins URL | `nil` | | [agent.kubernetesReadTimeout](./values.yaml#L923) | int | The read timeout in seconds for connections to Kubernetes API. The minimum value is 15 | `15` |
| [agent.jnlpregistry](./values.yaml#L949) | string | Custom registry used to pull the agent jnlp image from | `nil` | | [agent.livenessProbe](./values.yaml#L970) | object | | `{}` |
| [agent.kubernetesConnectTimeout](./values.yaml#L935) | int | The connection timeout in seconds for connections to Kubernetes API. The minimum value is 5 | `5` | | [agent.maxRequestsPerHostStr](./values.yaml#L925) | string | The maximum concurrent connections to Kubernetes API | `"32"` |
| [agent.kubernetesReadTimeout](./values.yaml#L937) | int | The read timeout in seconds for connections to Kubernetes API. The minimum value is 15 | `15` | | [agent.namespace](./values.yaml#L931) | string | Namespace in which the Kubernetes agents should be launched | `nil` |
| [agent.livenessProbe](./values.yaml#L984) | object | | `{}` | | [agent.nodeSelector](./values.yaml#L1073) | object | Node labels for pod assignment | `{}` |
| [agent.maxRequestsPerHostStr](./values.yaml#L939) | string | The maximum concurrent connections to Kubernetes API | `"32"` | | [agent.nodeUsageMode](./values.yaml#L943) | string | | `"NORMAL"` |
| [agent.namespace](./values.yaml#L945) | string | Namespace in which the Kubernetes agents should be launched | `nil` | | [agent.podLabels](./values.yaml#L933) | object | Custom Pod labels (an object with `label-key: label-value` pairs) | `{}` |
| [agent.nodeSelector](./values.yaml#L1087) | object | Node labels for pod assignment | `{}` | | [agent.podName](./values.yaml#L1091) | string | Agent Pod base name | `"default"` |
| [agent.nodeUsageMode](./values.yaml#L957) | string | | `"NORMAL"` | | [agent.podRetention](./values.yaml#L989) | string | | `"Never"` |
| [agent.podLabels](./values.yaml#L947) | object | Custom Pod labels (an object with `label-key: label-value` pairs) | `{}` | | [agent.podTemplates](./values.yaml#L1150) | object | Configures extra pod templates for the default kubernetes cloud | `{}` |
| [agent.podName](./values.yaml#L1105) | string | Agent Pod base name | `"default"` | | [agent.privileged](./values.yaml#L953) | bool | Agent privileged container | `false` |
| [agent.podRetention](./values.yaml#L1003) | string | | `"Never"` | | [agent.resources](./values.yaml#L961) | object | Resources allocation (Requests and Limits) | `{"limits":{"cpu":"512m","memory":"512Mi"},"requests":{"cpu":"512m","memory":"512Mi"}}` |
| [agent.podTemplates](./values.yaml#L1176) | object | Configures extra pod templates for the default kubernetes cloud | `{}` | | [agent.restrictedPssSecurityContext](./values.yaml#L986) | bool | Set a restricted securityContext on jnlp containers | `false` |
| [agent.privileged](./values.yaml#L967) | bool | Agent privileged container | `false` | | [agent.retentionTimeout](./values.yaml#L927) | int | Time in minutes after which the Kubernetes cloud plugin will clean up an idle worker that has not already terminated | `5` |
| [agent.resources](./values.yaml#L975) | object | Resources allocation (Requests and Limits) | `{"limits":{"cpu":"512m","memory":"512Mi"},"requests":{"cpu":"512m","memory":"512Mi"}}` | | [agent.runAsGroup](./values.yaml#L957) | string | Configure container group | `nil` |
| [agent.restrictedPssSecurityContext](./values.yaml#L1000) | bool | Set a restricted securityContext on jnlp containers | `false` | | [agent.runAsUser](./values.yaml#L955) | string | Configure container user | `nil` |
| [agent.retentionTimeout](./values.yaml#L941) | int | Time in minutes after which the Kubernetes cloud plugin will clean up an idle worker that has not already terminated | `5` | | [agent.secretEnvVars](./values.yaml#L1066) | list | Mount a secret as environment variable | `[]` |
| [agent.runAsGroup](./values.yaml#L971) | string | Configure container group | `nil` | | [agent.showRawYaml](./values.yaml#L993) | bool | | `true` |
| [agent.runAsUser](./values.yaml#L969) | string | Configure container user | `nil` | | [agent.sideContainerName](./values.yaml#L1083) | string | Side container name | `"jnlp"` |
| [agent.secretEnvVars](./values.yaml#L1080) | list | Mount a secret as environment variable | `[]` | | [agent.skipTlsVerify](./values.yaml#L917) | bool | Disables the verification of the controller certificate on remote connection. This flag correspond to the "Disable https certificate check" flag in kubernetes plugin UI | `false` |
| [agent.serviceAccount](./values.yaml#L921) | string | Override the default service account | `serviceAccountAgent.name` if `agent.useDefaultServiceAccount` is `true` | | [agent.usageRestricted](./values.yaml#L919) | bool | Enable the possibility to restrict the usage of this agent to specific folder. This flag correspond to the "Restrict pipeline support to authorized folders" flag in kubernetes plugin UI | `false` |
| [agent.showRawYaml](./values.yaml#L1007) | bool | | `true` | | [agent.volumes](./values.yaml#L1000) | list | Additional volumes | `[]` |
| [agent.sideContainerName](./values.yaml#L1097) | string | Side container name | `"jnlp"` | | [agent.waitForPodSec](./values.yaml#L929) | int | Seconds to wait for pod to be running | `600` |
| [agent.skipTlsVerify](./values.yaml#L931) | bool | Disables the verification of the controller certificate on remote connection. This flag correspond to the "Disable https certificate check" flag in kubernetes plugin UI | `false` | | [agent.websocket](./values.yaml#L950) | bool | Enables agent communication via websockets | `false` |
| [agent.usageRestricted](./values.yaml#L933) | bool | Enable the possibility to restrict the usage of this agent to specific folder. This flag correspond to the "Restrict pipeline support to authorized folders" flag in kubernetes plugin UI | `false` | | [agent.workingDir](./values.yaml#L942) | string | Configure working directory for default agent | `"/home/jenkins/agent"` |
| [agent.useDefaultServiceAccount](./values.yaml#L917) | bool | Use `serviceAccountAgent.name` as the default value for defaults template `serviceAccount` | `true` | | [agent.workspaceVolume](./values.yaml#L1035) | object | Workspace volume (defaults to EmptyDir) | `{}` |
| [agent.volumes](./values.yaml#L1014) | list | Additional volumes | `[]` | | [agent.yamlMergeStrategy](./values.yaml#L1112) | string | Defines how the raw yaml field gets merged with yaml definitions from inherited pod templates. Possible values: "merge" or "override" | `"override"` |
| [agent.waitForPodSec](./values.yaml#L943) | int | Seconds to wait for pod to be running | `600` | | [agent.yamlTemplate](./values.yaml#L1101) | string | The raw yaml of a Pod API Object to merge into the agent spec | `""` |
| [agent.websocket](./values.yaml#L964) | bool | Enables agent communication via websockets | `false` | | [awsSecurityGroupPolicies.enabled](./values.yaml#L1320) | bool | | `false` |
| [agent.workingDir](./values.yaml#L956) | string | Configure working directory for default agent | `"/home/jenkins/agent"` | | [awsSecurityGroupPolicies.policies[0].name](./values.yaml#L1322) | string | | `""` |
| [agent.workspaceVolume](./values.yaml#L1049) | object | Workspace volume (defaults to EmptyDir) | `{}` | | [awsSecurityGroupPolicies.policies[0].podSelector](./values.yaml#L1324) | object | | `{}` |
| [agent.yamlMergeStrategy](./values.yaml#L1138) | string | Defines how the raw yaml field gets merged with yaml definitions from inherited pod templates. Possible values: "merge" or "override" | `"override"` | | [awsSecurityGroupPolicies.policies[0].securityGroupIds](./values.yaml#L1323) | list | | `[]` |
| [agent.yamlTemplate](./values.yaml#L1127) | string | The raw yaml of a Pod API Object to merge into the agent spec | `""` | | [checkDeprecation](./values.yaml#L1317) | bool | Checks if any deprecated values are used | `true` |
| [awsSecurityGroupPolicies.enabled](./values.yaml#L1348) | bool | | `false` |
| [awsSecurityGroupPolicies.policies[0].name](./values.yaml#L1350) | string | | `""` |
| [awsSecurityGroupPolicies.policies[0].podSelector](./values.yaml#L1352) | object | | `{}` |
| [awsSecurityGroupPolicies.policies[0].securityGroupIds](./values.yaml#L1351) | list | | `[]` |
| [checkDeprecation](./values.yaml#L1345) | bool | Checks if any deprecated values are used | `true` |
| [clusterZone](./values.yaml#L21) | string | Override the cluster name for FQDN resolving | `"cluster.local"` | | [clusterZone](./values.yaml#L21) | string | Override the cluster name for FQDN resolving | `"cluster.local"` |
| [controller.JCasC.authorizationStrategy](./values.yaml#L539) | string | Jenkins Config as Code Authorization Strategy-section | `"loggedInUsersCanDoAnything:\n allowAnonymousRead: false"` | | [controller.JCasC.authorizationStrategy](./values.yaml#L533) | string | Jenkins Config as Code Authorization Strategy-section | `"loggedInUsersCanDoAnything:\n allowAnonymousRead: false"` |
| [controller.JCasC.configMapAnnotations](./values.yaml#L544) | object | Annotations for the JCasC ConfigMap | `{}` | | [controller.JCasC.configMapAnnotations](./values.yaml#L538) | object | Annotations for the JCasC ConfigMap | `{}` |
| [controller.JCasC.configScripts](./values.yaml#L513) | object | List of Jenkins Config as Code scripts | `{}` | | [controller.JCasC.configScripts](./values.yaml#L507) | object | List of Jenkins Config as Code scripts | `{}` |
| [controller.JCasC.configUrls](./values.yaml#L510) | list | Remote URLs for configuration files. | `[]` | | [controller.JCasC.configUrls](./values.yaml#L504) | list | Remote URLs for configuration files. | `[]` |
| [controller.JCasC.defaultConfig](./values.yaml#L504) | bool | Enables default Jenkins configuration via configuration as code plugin | `true` | | [controller.JCasC.defaultConfig](./values.yaml#L498) | bool | Enables default Jenkins configuration via configuration as code plugin | `true` |
| [controller.JCasC.overwriteConfiguration](./values.yaml#L508) | bool | Whether Jenkins Config as Code should overwrite any existing configuration | `false` | | [controller.JCasC.overwriteConfiguration](./values.yaml#L502) | bool | Whether Jenkins Config as Code should overwrite any existing configuration | `false` |
| [controller.JCasC.security](./values.yaml#L520) | object | Jenkins Config as Code security-section | `{"apiToken":{"creationOfLegacyTokenEnabled":false,"tokenGenerationOnCreationEnabled":false,"usageStatisticsEnabled":true}}` | | [controller.JCasC.security](./values.yaml#L514) | object | Jenkins Config as Code security-section | `{"apiToken":{"creationOfLegacyTokenEnabled":false,"tokenGenerationOnCreationEnabled":false,"usageStatisticsEnabled":true}}` |
| [controller.JCasC.securityRealm](./values.yaml#L528) | string | Jenkins Config as Code Security Realm-section | `"local:\n allowsSignup: false\n enableCaptcha: false\n users:\n - id: \"${chart-admin-username}\"\n name: \"Jenkins Admin\"\n password: \"${chart-admin-password}\""` | | [controller.JCasC.securityRealm](./values.yaml#L522) | string | Jenkins Config as Code Security Realm-section | `"local:\n allowsSignup: false\n enableCaptcha: false\n users:\n - id: \"${chart-admin-username}\"\n name: \"Jenkins Admin\"\n password: \"${chart-admin-password}\""` |
| [controller.additionalExistingSecrets](./values.yaml#L465) | list | List of additional existing secrets to mount | `[]` | | [controller.additionalExistingSecrets](./values.yaml#L459) | list | List of additional existing secrets to mount | `[]` |
| [controller.additionalPlugins](./values.yaml#L415) | list | List of plugins to install in addition to those listed in controller.installPlugins | `[]` | | [controller.additionalPlugins](./values.yaml#L409) | list | List of plugins to install in addition to those listed in controller.installPlugins | `[]` |
| [controller.additionalSecrets](./values.yaml#L474) | list | List of additional secrets to create and mount | `[]` | | [controller.additionalSecrets](./values.yaml#L468) | list | List of additional secrets to create and mount | `[]` |
| [controller.admin.createSecret](./values.yaml#L91) | bool | Create secret for admin user | `true` | | [controller.admin.createSecret](./values.yaml#L91) | bool | Create secret for admin user | `true` |
| [controller.admin.existingSecret](./values.yaml#L94) | string | The name of an existing secret containing the admin credentials | `""` | | [controller.admin.existingSecret](./values.yaml#L94) | string | The name of an existing secret containing the admin credentials | `""` |
| [controller.admin.password](./values.yaml#L81) | string | Admin password created as a secret if `controller.admin.createSecret` is true | `<random password>` | | [controller.admin.password](./values.yaml#L81) | string | Admin password created as a secret if `controller.admin.createSecret` is true | `<random password>` |
| [controller.admin.passwordKey](./values.yaml#L86) | string | The key in the existing admin secret containing the password | `"jenkins-admin-password"` | | [controller.admin.passwordKey](./values.yaml#L86) | string | The key in the existing admin secret containing the password | `"jenkins-admin-password"` |
| [controller.admin.userKey](./values.yaml#L84) | string | The key in the existing admin secret containing the username | `"jenkins-admin-user"` | | [controller.admin.userKey](./values.yaml#L84) | string | The key in the existing admin secret containing the username | `"jenkins-admin-user"` |
| [controller.admin.username](./values.yaml#L78) | string | Admin username created as a secret if `controller.admin.createSecret` is true | `"admin"` | | [controller.admin.username](./values.yaml#L78) | string | Admin username created as a secret if `controller.admin.createSecret` is true | `"admin"` |
| [controller.affinity](./values.yaml#L666) | object | Affinity settings | `{}` | | [controller.affinity](./values.yaml#L660) | object | Affinity settings | `{}` |
| [controller.agentListenerEnabled](./values.yaml#L324) | bool | Create Agent listener service | `true` | | [controller.agentListenerEnabled](./values.yaml#L318) | bool | Create Agent listener service | `true` |
| [controller.agentListenerExternalTrafficPolicy](./values.yaml#L334) | string | Traffic Policy of for the agentListener service | `nil` | | [controller.agentListenerExternalTrafficPolicy](./values.yaml#L328) | string | Traffic Policy of for the agentListener service | `nil` |
| [controller.agentListenerHostPort](./values.yaml#L328) | string | Host port to listen for agents | `nil` | | [controller.agentListenerHostPort](./values.yaml#L322) | string | Host port to listen for agents | `nil` |
| [controller.agentListenerLoadBalancerIP](./values.yaml#L364) | string | Static IP for the agentListener LoadBalancer | `nil` | | [controller.agentListenerLoadBalancerIP](./values.yaml#L358) | string | Static IP for the agentListener LoadBalancer | `nil` |
| [controller.agentListenerLoadBalancerSourceRanges](./values.yaml#L336) | list | Allowed inbound IP for the agentListener service | `["0.0.0.0/0"]` | | [controller.agentListenerLoadBalancerSourceRanges](./values.yaml#L330) | list | Allowed inbound IP for the agentListener service | `["0.0.0.0/0"]` |
| [controller.agentListenerNodePort](./values.yaml#L330) | string | Node port to listen for agents | `nil` | | [controller.agentListenerNodePort](./values.yaml#L324) | string | Node port to listen for agents | `nil` |
| [controller.agentListenerPort](./values.yaml#L326) | int | Listening port for agents | `50000` | | [controller.agentListenerPort](./values.yaml#L320) | int | Listening port for agents | `50000` |
| [controller.agentListenerServiceAnnotations](./values.yaml#L359) | object | Annotations for the agentListener service | `{}` | | [controller.agentListenerServiceAnnotations](./values.yaml#L353) | object | Annotations for the agentListener service | `{}` |
| [controller.agentListenerServiceType](./values.yaml#L356) | string | Defines how to expose the agentListener service | `"ClusterIP"` | | [controller.agentListenerServiceType](./values.yaml#L350) | string | Defines how to expose the agentListener service | `"ClusterIP"` |
| [controller.backendconfig.annotations](./values.yaml#L769) | object | backendconfig annotations | `{}` | | [controller.backendconfig.annotations](./values.yaml#L763) | object | backendconfig annotations | `{}` |
| [controller.backendconfig.apiVersion](./values.yaml#L763) | string | backendconfig API version | `"extensions/v1beta1"` | | [controller.backendconfig.apiVersion](./values.yaml#L757) | string | backendconfig API version | `"extensions/v1beta1"` |
| [controller.backendconfig.enabled](./values.yaml#L761) | bool | Enables backendconfig | `false` | | [controller.backendconfig.enabled](./values.yaml#L755) | bool | Enables backendconfig | `false` |
| [controller.backendconfig.labels](./values.yaml#L767) | object | backendconfig labels | `{}` | | [controller.backendconfig.labels](./values.yaml#L761) | object | backendconfig labels | `{}` |
| [controller.backendconfig.name](./values.yaml#L765) | string | backendconfig name | `nil` | | [controller.backendconfig.name](./values.yaml#L759) | string | backendconfig name | `nil` |
| [controller.backendconfig.spec](./values.yaml#L771) | object | backendconfig spec | `{}` | | [controller.backendconfig.spec](./values.yaml#L765) | object | backendconfig spec | `{}` |
| [controller.cloudName](./values.yaml#L493) | string | Name of default cloud configuration. | `"kubernetes"` | | [controller.cloudName](./values.yaml#L487) | string | Name of default cloud configuration. | `"kubernetes"` |
| [controller.clusterIp](./values.yaml#L223) | string | k8s service clusterIP. Only used if serviceType is ClusterIP | `nil` | | [controller.clusterIp](./values.yaml#L217) | string | k8s service clusterIP. Only used if serviceType is ClusterIP | `nil` |
| [controller.componentName](./values.yaml#L34) | string | Used for label app.kubernetes.io/component | `"jenkins-controller"` | | [controller.componentName](./values.yaml#L34) | string | Used for label app.kubernetes.io/component | `"jenkins-controller"` |
| [controller.containerEnv](./values.yaml#L156) | list | Environment variables for Jenkins Container | `[]` | | [controller.containerEnv](./values.yaml#L150) | list | Environment variables for Jenkins Container | `[]` |
| [controller.containerEnvFrom](./values.yaml#L153) | list | Environment variable sources for Jenkins Container | `[]` | | [controller.containerEnvFrom](./values.yaml#L147) | list | Environment variable sources for Jenkins Container | `[]` |
| [controller.containerSecurityContext](./values.yaml#L211) | object | Allow controlling the securityContext for the jenkins container | `{"allowPrivilegeEscalation":false,"readOnlyRootFilesystem":true,"runAsGroup":1000,"runAsUser":1000}` | | [controller.containerSecurityContext](./values.yaml#L205) | object | Allow controlling the securityContext for the jenkins container | `{"allowPrivilegeEscalation":false,"readOnlyRootFilesystem":true,"runAsGroup":1000,"runAsUser":1000}` |
| [controller.csrf.defaultCrumbIssuer.enabled](./values.yaml#L345) | bool | Enable the default CSRF Crumb issuer | `true` | | [controller.csrf.defaultCrumbIssuer.enabled](./values.yaml#L339) | bool | Enable the default CSRF Crumb issuer | `true` |
| [controller.csrf.defaultCrumbIssuer.proxyCompatability](./values.yaml#L347) | bool | Enable proxy compatibility | `true` | | [controller.csrf.defaultCrumbIssuer.proxyCompatability](./values.yaml#L341) | bool | Enable proxy compatibility | `true` |
| [controller.customInitContainers](./values.yaml#L547) | list | Custom init-container specification in raw-yaml format | `[]` | | [controller.customInitContainers](./values.yaml#L541) | list | Custom init-container specification in raw-yaml format | `[]` |
| [controller.customJenkinsLabels](./values.yaml#L68) | list | Append Jenkins labels to the controller | `[]` | | [controller.customJenkinsLabels](./values.yaml#L68) | list | Append Jenkins labels to the controller | `[]` |
| [controller.disableRememberMe](./values.yaml#L59) | bool | Disable use of remember me | `false` | | [controller.disableRememberMe](./values.yaml#L59) | bool | Disable use of remember me | `false` |
| [controller.disabledAgentProtocols](./values.yaml#L339) | list | Disabled agent protocols | `["JNLP-connect","JNLP2-connect"]` | | [controller.disabledAgentProtocols](./values.yaml#L333) | list | Disabled agent protocols | `["JNLP-connect","JNLP2-connect"]` |
| [controller.enableRawHtmlMarkupFormatter](./values.yaml#L435) | bool | Enable HTML parsing using OWASP Markup Formatter Plugin (antisamy-markup-formatter) | `false` | | [controller.enableRawHtmlMarkupFormatter](./values.yaml#L429) | bool | Enable HTML parsing using OWASP Markup Formatter Plugin (antisamy-markup-formatter) | `false` |
| [controller.enableServiceLinks](./values.yaml#L130) | bool | | `false` |
| [controller.executorMode](./values.yaml#L65) | string | Sets the executor mode of the Jenkins node. Possible values are "NORMAL" or "EXCLUSIVE" | `"NORMAL"` | | [controller.executorMode](./values.yaml#L65) | string | Sets the executor mode of the Jenkins node. Possible values are "NORMAL" or "EXCLUSIVE" | `"NORMAL"` |
| [controller.existingSecret](./values.yaml#L462) | string | | `nil` | | [controller.existingSecret](./values.yaml#L456) | string | | `nil` |
| [controller.extraPorts](./values.yaml#L394) | list | Optionally configure other ports to expose in the controller container | `[]` | | [controller.extraPorts](./values.yaml#L388) | list | Optionally configure other ports to expose in the controller container | `[]` |
| [controller.fsGroup](./values.yaml#L192) | int | Deprecated in favor of `controller.podSecurityContextOverride`. uid that will be used for persistent volume. | `1000` | | [controller.fsGroup](./values.yaml#L186) | int | Deprecated in favor of `controller.podSecurityContextOverride`. uid that will be used for persistent volume. | `1000` |
| [controller.googlePodMonitor.enabled](./values.yaml#L832) | bool | | `false` | | [controller.googlePodMonitor.enabled](./values.yaml#L826) | bool | | `false` |
| [controller.googlePodMonitor.scrapeEndpoint](./values.yaml#L837) | string | | `"/prometheus"` | | [controller.googlePodMonitor.scrapeEndpoint](./values.yaml#L831) | string | | `"/prometheus"` |
| [controller.googlePodMonitor.scrapeInterval](./values.yaml#L835) | string | | `"60s"` | | [controller.googlePodMonitor.scrapeInterval](./values.yaml#L829) | string | | `"60s"` |
| [controller.healthProbes](./values.yaml#L254) | bool | Enable Kubernetes Probes configuration configured in `controller.probes` | `true` | | [controller.healthProbes](./values.yaml#L248) | bool | Enable Kubernetes Probes configuration configured in `controller.probes` | `true` |
| [controller.hostAliases](./values.yaml#L785) | list | Allows for adding entries to Pod /etc/hosts | `[]` | | [controller.hostAliases](./values.yaml#L779) | list | Allows for adding entries to Pod /etc/hosts | `[]` |
| [controller.hostNetworking](./values.yaml#L70) | bool | | `false` | | [controller.hostNetworking](./values.yaml#L70) | bool | | `false` |
| [controller.httpsKeyStore.disableSecretMount](./values.yaml#L853) | bool | | `false` | | [controller.httpsKeyStore.disableSecretMount](./values.yaml#L847) | bool | | `false` |
| [controller.httpsKeyStore.enable](./values.yaml#L844) | bool | Enables HTTPS keystore on jenkins controller | `false` | | [controller.httpsKeyStore.enable](./values.yaml#L838) | bool | Enables HTTPS keystore on jenkins controller | `false` |
| [controller.httpsKeyStore.fileName](./values.yaml#L861) | string | Jenkins keystore filename which will appear under controller.httpsKeyStore.path | `"keystore.jks"` | | [controller.httpsKeyStore.fileName](./values.yaml#L855) | string | Jenkins keystore filename which will appear under controller.httpsKeyStore.path | `"keystore.jks"` |
| [controller.httpsKeyStore.httpPort](./values.yaml#L857) | int | HTTP Port that Jenkins should listen to along with HTTPS, it also serves as the liveness and readiness probes port. | `8081` | | [controller.httpsKeyStore.httpPort](./values.yaml#L851) | int | HTTP Port that Jenkins should listen to along with HTTPS, it also serves as the liveness and readiness probes port. | `8081` |
| [controller.httpsKeyStore.jenkinsHttpsJksPasswordSecretKey](./values.yaml#L852) | string | Name of the key in the secret that contains the JKS password | `"https-jks-password"` | | [controller.httpsKeyStore.jenkinsHttpsJksPasswordSecretKey](./values.yaml#L846) | string | Name of the key in the secret that contains the JKS password | `"https-jks-password"` |
| [controller.httpsKeyStore.jenkinsHttpsJksPasswordSecretName](./values.yaml#L850) | string | Name of the secret that contains the JKS password, if it is not in the same secret as the JKS file | `""` | | [controller.httpsKeyStore.jenkinsHttpsJksPasswordSecretName](./values.yaml#L844) | string | Name of the secret that contains the JKS password, if it is not in the same secret as the JKS file | `""` |
| [controller.httpsKeyStore.jenkinsHttpsJksSecretKey](./values.yaml#L848) | string | Name of the key in the secret that already has ssl keystore | `"jenkins-jks-file"` | | [controller.httpsKeyStore.jenkinsHttpsJksSecretKey](./values.yaml#L842) | string | Name of the key in the secret that already has ssl keystore | `"jenkins-jks-file"` |
| [controller.httpsKeyStore.jenkinsHttpsJksSecretName](./values.yaml#L846) | string | Name of the secret that already has ssl keystore | `""` | | [controller.httpsKeyStore.jenkinsHttpsJksSecretName](./values.yaml#L840) | string | Name of the secret that already has ssl keystore | `""` |
| [controller.httpsKeyStore.jenkinsKeyStoreBase64Encoded](./values.yaml#L866) | string | Base64 encoded Keystore content. Keystore must be converted to base64 then being pasted here | `nil` | | [controller.httpsKeyStore.jenkinsKeyStoreBase64Encoded](./values.yaml#L860) | string | Base64 encoded Keystore content. Keystore must be converted to base64 then being pasted here | `nil` |
| [controller.httpsKeyStore.password](./values.yaml#L863) | string | Jenkins keystore password | `"password"` | | [controller.httpsKeyStore.password](./values.yaml#L857) | string | Jenkins keystore password | `"password"` |
| [controller.httpsKeyStore.path](./values.yaml#L859) | string | Path of HTTPS keystore file | `"/var/jenkins_keystore"` | | [controller.httpsKeyStore.path](./values.yaml#L853) | string | Path of HTTPS keystore file | `"/var/jenkins_keystore"` |
| [controller.image.pullPolicy](./values.yaml#L47) | string | Controller image pull policy | `"Always"` | | [controller.image.pullPolicy](./values.yaml#L47) | string | Controller image pull policy | `"Always"` |
| [controller.image.registry](./values.yaml#L37) | string | Controller image registry | `"docker.io"` | | [controller.image.registry](./values.yaml#L37) | string | Controller image registry | `"docker.io"` |
| [controller.image.repository](./values.yaml#L39) | string | Controller image repository | `"jenkins/jenkins"` | | [controller.image.repository](./values.yaml#L39) | string | Controller image repository | `"jenkins/jenkins"` |
| [controller.image.tag](./values.yaml#L42) | string | Controller image tag override; i.e., tag: "2.440.1-jdk17" | `nil` | | [controller.image.tag](./values.yaml#L42) | string | Controller image tag override; i.e., tag: "2.440.1-jdk17" | `nil` |
| [controller.image.tagLabel](./values.yaml#L45) | string | Controller image tag label | `"jdk17"` | | [controller.image.tagLabel](./values.yaml#L45) | string | Controller image tag label | `"jdk17"` |
| [controller.imagePullSecretName](./values.yaml#L49) | string | Controller image pull secret | `nil` | | [controller.imagePullSecretName](./values.yaml#L49) | string | Controller image pull secret | `nil` |
| [controller.ingress.annotations](./values.yaml#L708) | object | Ingress annotations | `{}` | | [controller.ingress.annotations](./values.yaml#L702) | object | Ingress annotations | `{}` |
| [controller.ingress.apiVersion](./values.yaml#L704) | string | Ingress API version | `"extensions/v1beta1"` | | [controller.ingress.apiVersion](./values.yaml#L698) | string | Ingress API version | `"extensions/v1beta1"` |
| [controller.ingress.enabled](./values.yaml#L687) | bool | Enables ingress | `false` | | [controller.ingress.enabled](./values.yaml#L681) | bool | Enables ingress | `false` |
| [controller.ingress.hostName](./values.yaml#L721) | string | Ingress hostname | `nil` | | [controller.ingress.hostName](./values.yaml#L715) | string | Ingress hostname | `nil` |
| [controller.ingress.labels](./values.yaml#L706) | object | Ingress labels | `{}` | | [controller.ingress.labels](./values.yaml#L700) | object | Ingress labels | `{}` |
| [controller.ingress.path](./values.yaml#L717) | string | Ingress path | `nil` | | [controller.ingress.path](./values.yaml#L711) | string | Ingress path | `nil` |
| [controller.ingress.paths](./values.yaml#L691) | list | Override for the default Ingress paths | `[]` | | [controller.ingress.paths](./values.yaml#L685) | list | Override for the default Ingress paths | `[]` |
| [controller.ingress.resourceRootUrl](./values.yaml#L723) | string | Hostname to serve assets from | `nil` | | [controller.ingress.resourceRootUrl](./values.yaml#L717) | string | Hostname to serve assets from | `nil` |
| [controller.ingress.tls](./values.yaml#L725) | list | Ingress TLS configuration | `[]` | | [controller.ingress.tls](./values.yaml#L719) | list | Ingress TLS configuration | `[]` |
| [controller.initConfigMap](./values.yaml#L452) | string | Name of the existing ConfigMap that contains init scripts | `nil` | | [controller.initConfigMap](./values.yaml#L446) | string | Name of the existing ConfigMap that contains init scripts | `nil` |
| [controller.initContainerEnv](./values.yaml#L147) | list | Environment variables for Init Container | `[]` | | [controller.initContainerEnv](./values.yaml#L141) | list | Environment variables for Init Container | `[]` |
| [controller.initContainerEnvFrom](./values.yaml#L143) | list | Environment variable sources for Init Container | `[]` | | [controller.initContainerEnvFrom](./values.yaml#L137) | list | Environment variable sources for Init Container | `[]` |
| [controller.initContainerResources](./values.yaml#L134) | object | Resources allocation (Requests and Limits) for Init Container | `{}` | | [controller.initContainerResources](./values.yaml#L128) | object | Resources allocation (Requests and Limits) for Init Container | `{}` |
| [controller.initScripts](./values.yaml#L448) | object | Map of groovy init scripts to be executed during Jenkins controller start | `{}` | | [controller.initScripts](./values.yaml#L442) | object | Map of groovy init scripts to be executed during Jenkins controller start | `{}` |
| [controller.initializeOnce](./values.yaml#L420) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` | | [controller.initializeOnce](./values.yaml#L414) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` |
| [controller.installLatestPlugins](./values.yaml#L409) | bool | Download the minimum required version or latest version of all dependencies | `true` | | [controller.installLatestPlugins](./values.yaml#L403) | bool | Download the minimum required version or latest version of all dependencies | `true` |
| [controller.installLatestSpecifiedPlugins](./values.yaml#L412) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` | | [controller.installLatestSpecifiedPlugins](./values.yaml#L406) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` |
| [controller.installPlugins](./values.yaml#L401) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4292.v11898cf8fa_66","workflow-aggregator:600.vb_57cdd26fdd7","git:5.5.2","configuration-as-code:1850.va_a_8c31d3158b_"]` | | [controller.installPlugins](./values.yaml#L395) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4280.vd919fa_528c7e","workflow-aggregator:600.vb_57cdd26fdd7","git:5.3.0","configuration-as-code:1836.vccda_4a_122a_a_e"]` |
| [controller.javaOpts](./values.yaml#L162) | string | Append to `JAVA_OPTS` env var | `nil` | | [controller.javaOpts](./values.yaml#L156) | string | Append to `JAVA_OPTS` env var | `nil` |
| [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` | | [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` |
| [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` | | [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` |
| [controller.jenkinsOpts](./values.yaml#L164) | string | Append to `JENKINS_OPTS` env var | `nil` | | [controller.jenkinsOpts](./values.yaml#L158) | string | Append to `JENKINS_OPTS` env var | `nil` |
| [controller.jenkinsRef](./values.yaml#L106) | string | Custom Jenkins reference path | `"/usr/share/jenkins/ref"` | | [controller.jenkinsRef](./values.yaml#L106) | string | Custom Jenkins reference path | `"/usr/share/jenkins/ref"` |
| [controller.jenkinsUriPrefix](./values.yaml#L179) | string | Root URI Jenkins will be served on | `nil` | | [controller.jenkinsUriPrefix](./values.yaml#L173) | string | Root URI Jenkins will be served on | `nil` |
| [controller.jenkinsUrl](./values.yaml#L174) | string | Set Jenkins URL if you are not using the ingress definitions provided by the chart | `nil` | | [controller.jenkinsUrl](./values.yaml#L168) | string | Set Jenkins URL if you are not using the ingress definitions provided by the chart | `nil` |
| [controller.jenkinsUrlProtocol](./values.yaml#L171) | string | Set protocol for Jenkins URL; `https` if `controller.ingress.tls`, `http` otherwise | `nil` | | [controller.jenkinsUrlProtocol](./values.yaml#L165) | string | Set protocol for Jenkins URL; `https` if `controller.ingress.tls`, `http` otherwise | `nil` |
| [controller.jenkinsWar](./values.yaml#L109) | string | | `"/usr/share/jenkins/jenkins.war"` | | [controller.jenkinsWar](./values.yaml#L109) | string | | `"/usr/share/jenkins/jenkins.war"` |
| [controller.jmxPort](./values.yaml#L391) | string | Open a port, for JMX stats | `nil` | | [controller.jmxPort](./values.yaml#L385) | string | Open a port, for JMX stats | `nil` |
| [controller.legacyRemotingSecurityEnabled](./values.yaml#L367) | bool | Whether legacy remoting security should be enabled | `false` | | [controller.legacyRemotingSecurityEnabled](./values.yaml#L361) | bool | Whether legacy remoting security should be enabled | `false` |
| [controller.lifecycle](./values.yaml#L51) | object | Lifecycle specification for controller-container | `{}` | | [controller.lifecycle](./values.yaml#L51) | object | Lifecycle specification for controller-container | `{}` |
| [controller.loadBalancerIP](./values.yaml#L382) | string | Optionally assign a known public LB IP | `nil` | | [controller.loadBalancerIP](./values.yaml#L376) | string | Optionally assign a known public LB IP | `nil` |
| [controller.loadBalancerSourceRanges](./values.yaml#L378) | list | Allowed inbound IP addresses | `["0.0.0.0/0"]` | | [controller.loadBalancerSourceRanges](./values.yaml#L372) | list | Allowed inbound IP addresses | `["0.0.0.0/0"]` |
| [controller.markupFormatter](./values.yaml#L439) | string | Yaml of the markup formatter to use | `"plainText"` | | [controller.markupFormatter](./values.yaml#L433) | string | Yaml of the markup formatter to use | `"plainText"` |
| [controller.nodePort](./values.yaml#L229) | string | k8s node port. Only used if serviceType is NodePort | `nil` | | [controller.nodePort](./values.yaml#L223) | string | k8s node port. Only used if serviceType is NodePort | `nil` |
| [controller.nodeSelector](./values.yaml#L653) | object | Node labels for pod assignment | `{}` | | [controller.nodeSelector](./values.yaml#L647) | object | Node labels for pod assignment | `{}` |
| [controller.numExecutors](./values.yaml#L62) | int | Set Number of executors | `0` | | [controller.numExecutors](./values.yaml#L62) | int | Set Number of executors | `0` |
| [controller.overwritePlugins](./values.yaml#L424) | bool | Overwrite installed plugins on start | `false` | | [controller.overwritePlugins](./values.yaml#L418) | bool | Overwrite installed plugins on start | `false` |
| [controller.overwritePluginsFromImage](./values.yaml#L428) | bool | Overwrite plugins that are already installed in the controller image | `true` | | [controller.overwritePluginsFromImage](./values.yaml#L422) | bool | Overwrite plugins that are already installed in the controller image | `true` |
| [controller.podAnnotations](./values.yaml#L674) | object | Annotations for controller pod | `{}` | | [controller.podAnnotations](./values.yaml#L668) | object | Annotations for controller pod | `{}` |
| [controller.podDisruptionBudget.annotations](./values.yaml#L318) | object | | `{}` | | [controller.podDisruptionBudget.annotations](./values.yaml#L312) | object | | `{}` |
| [controller.podDisruptionBudget.apiVersion](./values.yaml#L316) | string | Policy API version | `"policy/v1beta1"` | | [controller.podDisruptionBudget.apiVersion](./values.yaml#L310) | string | Policy API version | `"policy/v1beta1"` |
| [controller.podDisruptionBudget.enabled](./values.yaml#L311) | bool | Enable Kubernetes Pod Disruption Budget configuration | `false` | | [controller.podDisruptionBudget.enabled](./values.yaml#L305) | bool | Enable Kubernetes Pod Disruption Budget configuration | `false` |
| [controller.podDisruptionBudget.labels](./values.yaml#L319) | object | | `{}` | | [controller.podDisruptionBudget.labels](./values.yaml#L313) | object | | `{}` |
| [controller.podDisruptionBudget.maxUnavailable](./values.yaml#L321) | string | Number of pods that can be unavailable. Either an absolute number or a percentage | `"0"` | | [controller.podDisruptionBudget.maxUnavailable](./values.yaml#L315) | string | Number of pods that can be unavailable. Either an absolute number or a percentage | `"0"` |
| [controller.podLabels](./values.yaml#L247) | object | Custom Pod labels (an object with `label-key: label-value` pairs) | `{}` | | [controller.podLabels](./values.yaml#L241) | object | Custom Pod labels (an object with `label-key: label-value` pairs) | `{}` |
| [controller.podSecurityContextOverride](./values.yaml#L208) | string | Completely overwrites the contents of the pod security context, ignoring the values provided for `runAsUser`, `fsGroup`, and `securityContextCapabilities` | `nil` | | [controller.podSecurityContextOverride](./values.yaml#L202) | string | Completely overwrites the contents of the pod security context, ignoring the values provided for `runAsUser`, `fsGroup`, and `securityContextCapabilities` | `nil` |
| [controller.priorityClassName](./values.yaml#L671) | string | The name of a `priorityClass` to apply to the controller pod | `nil` | | [controller.priorityClassName](./values.yaml#L665) | string | The name of a `priorityClass` to apply to the controller pod | `nil` |
| [controller.probes.livenessProbe.failureThreshold](./values.yaml#L272) | int | Set the failure threshold for the liveness probe | `5` | | [controller.probes.livenessProbe.failureThreshold](./values.yaml#L266) | int | Set the failure threshold for the liveness probe | `5` |
| [controller.probes.livenessProbe.httpGet.path](./values.yaml#L275) | string | Set the Pod's HTTP path for the liveness probe | `"{{ default \"\" .Values.controller.jenkinsUriPrefix }}/login"` | | [controller.probes.livenessProbe.httpGet.path](./values.yaml#L269) | string | Set the Pod's HTTP path for the liveness probe | `"{{ default \"\" .Values.controller.jenkinsUriPrefix }}/login"` |
| [controller.probes.livenessProbe.httpGet.port](./values.yaml#L277) | string | Set the Pod's HTTP port to use for the liveness probe | `"http"` | | [controller.probes.livenessProbe.httpGet.port](./values.yaml#L271) | string | Set the Pod's HTTP port to use for the liveness probe | `"http"` |
| [controller.probes.livenessProbe.initialDelaySeconds](./values.yaml#L286) | string | Set the initial delay for the liveness probe in seconds | `nil` | | [controller.probes.livenessProbe.initialDelaySeconds](./values.yaml#L280) | string | Set the initial delay for the liveness probe in seconds | `nil` |
| [controller.probes.livenessProbe.periodSeconds](./values.yaml#L279) | int | Set the time interval between two liveness probes executions in seconds | `10` | | [controller.probes.livenessProbe.periodSeconds](./values.yaml#L273) | int | Set the time interval between two liveness probes executions in seconds | `10` |
| [controller.probes.livenessProbe.timeoutSeconds](./values.yaml#L281) | int | Set the timeout for the liveness probe in seconds | `5` | | [controller.probes.livenessProbe.timeoutSeconds](./values.yaml#L275) | int | Set the timeout for the liveness probe in seconds | `5` |
| [controller.probes.readinessProbe.failureThreshold](./values.yaml#L290) | int | Set the failure threshold for the readiness probe | `3` | | [controller.probes.readinessProbe.failureThreshold](./values.yaml#L284) | int | Set the failure threshold for the readiness probe | `3` |
| [controller.probes.readinessProbe.httpGet.path](./values.yaml#L293) | string | Set the Pod's HTTP path for the liveness probe | `"{{ default \"\" .Values.controller.jenkinsUriPrefix }}/login"` | | [controller.probes.readinessProbe.httpGet.path](./values.yaml#L287) | string | Set the Pod's HTTP path for the liveness probe | `"{{ default \"\" .Values.controller.jenkinsUriPrefix }}/login"` |
| [controller.probes.readinessProbe.httpGet.port](./values.yaml#L295) | string | Set the Pod's HTTP port to use for the readiness probe | `"http"` | | [controller.probes.readinessProbe.httpGet.port](./values.yaml#L289) | string | Set the Pod's HTTP port to use for the readiness probe | `"http"` |
| [controller.probes.readinessProbe.initialDelaySeconds](./values.yaml#L304) | string | Set the initial delay for the readiness probe in seconds | `nil` | | [controller.probes.readinessProbe.initialDelaySeconds](./values.yaml#L298) | string | Set the initial delay for the readiness probe in seconds | `nil` |
| [controller.probes.readinessProbe.periodSeconds](./values.yaml#L297) | int | Set the time interval between two readiness probes executions in seconds | `10` | | [controller.probes.readinessProbe.periodSeconds](./values.yaml#L291) | int | Set the time interval between two readiness probes executions in seconds | `10` |
| [controller.probes.readinessProbe.timeoutSeconds](./values.yaml#L299) | int | Set the timeout for the readiness probe in seconds | `5` | | [controller.probes.readinessProbe.timeoutSeconds](./values.yaml#L293) | int | Set the timeout for the readiness probe in seconds | `5` |
| [controller.probes.startupProbe.failureThreshold](./values.yaml#L259) | int | Set the failure threshold for the startup probe | `12` | | [controller.probes.startupProbe.failureThreshold](./values.yaml#L253) | int | Set the failure threshold for the startup probe | `12` |
| [controller.probes.startupProbe.httpGet.path](./values.yaml#L262) | string | Set the Pod's HTTP path for the startup probe | `"{{ default \"\" .Values.controller.jenkinsUriPrefix }}/login"` | | [controller.probes.startupProbe.httpGet.path](./values.yaml#L256) | string | Set the Pod's HTTP path for the startup probe | `"{{ default \"\" .Values.controller.jenkinsUriPrefix }}/login"` |
| [controller.probes.startupProbe.httpGet.port](./values.yaml#L264) | string | Set the Pod's HTTP port to use for the startup probe | `"http"` | | [controller.probes.startupProbe.httpGet.port](./values.yaml#L258) | string | Set the Pod's HTTP port to use for the startup probe | `"http"` |
| [controller.probes.startupProbe.periodSeconds](./values.yaml#L266) | int | Set the time interval between two startup probes executions in seconds | `10` | | [controller.probes.startupProbe.periodSeconds](./values.yaml#L260) | int | Set the time interval between two startup probes executions in seconds | `10` |
| [controller.probes.startupProbe.timeoutSeconds](./values.yaml#L268) | int | Set the timeout for the startup probe in seconds | `5` | | [controller.probes.startupProbe.timeoutSeconds](./values.yaml#L262) | int | Set the timeout for the startup probe in seconds | `5` |
| [controller.projectNamingStrategy](./values.yaml#L431) | string | | `"standard"` | | [controller.projectNamingStrategy](./values.yaml#L425) | string | | `"standard"` |
| [controller.prometheus.alertingRulesAdditionalLabels](./values.yaml#L818) | object | Additional labels to add to the PrometheusRule object | `{}` | | [controller.prometheus.alertingRulesAdditionalLabels](./values.yaml#L812) | object | Additional labels to add to the PrometheusRule object | `{}` |
| [controller.prometheus.alertingrules](./values.yaml#L816) | list | Array of prometheus alerting rules | `[]` | | [controller.prometheus.alertingrules](./values.yaml#L810) | list | Array of prometheus alerting rules | `[]` |
| [controller.prometheus.enabled](./values.yaml#L801) | bool | Enables prometheus service monitor | `false` | | [controller.prometheus.enabled](./values.yaml#L795) | bool | Enables prometheus service monitor | `false` |
| [controller.prometheus.metricRelabelings](./values.yaml#L828) | list | | `[]` | | [controller.prometheus.metricRelabelings](./values.yaml#L822) | list | | `[]` |
| [controller.prometheus.prometheusRuleNamespace](./values.yaml#L820) | string | Set a custom namespace where to deploy PrometheusRule resource | `""` | | [controller.prometheus.prometheusRuleNamespace](./values.yaml#L814) | string | Set a custom namespace where to deploy PrometheusRule resource | `""` |
| [controller.prometheus.relabelings](./values.yaml#L826) | list | | `[]` | | [controller.prometheus.relabelings](./values.yaml#L820) | list | | `[]` |
| [controller.prometheus.scrapeEndpoint](./values.yaml#L811) | string | The endpoint prometheus should get metrics from | `"/prometheus"` | | [controller.prometheus.scrapeEndpoint](./values.yaml#L805) | string | The endpoint prometheus should get metrics from | `"/prometheus"` |
| [controller.prometheus.scrapeInterval](./values.yaml#L807) | string | How often prometheus should scrape metrics | `"60s"` | | [controller.prometheus.scrapeInterval](./values.yaml#L801) | string | How often prometheus should scrape metrics | `"60s"` |
| [controller.prometheus.serviceMonitorAdditionalLabels](./values.yaml#L803) | object | Additional labels to add to the service monitor object | `{}` | | [controller.prometheus.serviceMonitorAdditionalLabels](./values.yaml#L797) | object | Additional labels to add to the service monitor object | `{}` |
| [controller.prometheus.serviceMonitorNamespace](./values.yaml#L805) | string | Set a custom namespace where to deploy ServiceMonitor resource | `nil` | | [controller.prometheus.serviceMonitorNamespace](./values.yaml#L799) | string | Set a custom namespace where to deploy ServiceMonitor resource | `nil` |
| [controller.resources](./values.yaml#L115) | object | Resource allocation (Requests and Limits) | `{"limits":{"cpu":"2000m","memory":"4096Mi"},"requests":{"cpu":"50m","memory":"256Mi"}}` | | [controller.resources](./values.yaml#L115) | object | Resource allocation (Requests and Limits) | `{"limits":{"cpu":"2000m","memory":"4096Mi"},"requests":{"cpu":"50m","memory":"256Mi"}}` |
| [controller.route.annotations](./values.yaml#L780) | object | Route annotations | `{}` | | [controller.route.annotations](./values.yaml#L774) | object | Route annotations | `{}` |
| [controller.route.enabled](./values.yaml#L776) | bool | Enables openshift route | `false` | | [controller.route.enabled](./values.yaml#L770) | bool | Enables openshift route | `false` |
| [controller.route.labels](./values.yaml#L778) | object | Route labels | `{}` | | [controller.route.labels](./values.yaml#L772) | object | Route labels | `{}` |
| [controller.route.path](./values.yaml#L782) | string | Route path | `nil` | | [controller.route.path](./values.yaml#L776) | string | Route path | `nil` |
| [controller.runAsUser](./values.yaml#L189) | int | Deprecated in favor of `controller.podSecurityContextOverride`. uid that jenkins runs with. | `1000` | | [controller.runAsUser](./values.yaml#L183) | int | Deprecated in favor of `controller.podSecurityContextOverride`. uid that jenkins runs with. | `1000` |
| [controller.schedulerName](./values.yaml#L649) | string | Name of the Kubernetes scheduler to use | `""` | | [controller.schedulerName](./values.yaml#L643) | string | Name of the Kubernetes scheduler to use | `""` |
| [controller.scriptApproval](./values.yaml#L443) | list | List of groovy functions to approve | `[]` | | [controller.scriptApproval](./values.yaml#L437) | list | List of groovy functions to approve | `[]` |
| [controller.secondaryingress.annotations](./values.yaml#L743) | object | | `{}` | | [controller.secondaryingress.annotations](./values.yaml#L737) | object | | `{}` |
| [controller.secondaryingress.apiVersion](./values.yaml#L741) | string | | `"extensions/v1beta1"` | | [controller.secondaryingress.apiVersion](./values.yaml#L735) | string | | `"extensions/v1beta1"` |
| [controller.secondaryingress.enabled](./values.yaml#L735) | bool | | `false` | | [controller.secondaryingress.enabled](./values.yaml#L729) | bool | | `false` |
| [controller.secondaryingress.hostName](./values.yaml#L750) | string | | `nil` | | [controller.secondaryingress.hostName](./values.yaml#L744) | string | | `nil` |
| [controller.secondaryingress.labels](./values.yaml#L742) | object | | `{}` | | [controller.secondaryingress.labels](./values.yaml#L736) | object | | `{}` |
| [controller.secondaryingress.paths](./values.yaml#L738) | list | | `[]` | | [controller.secondaryingress.paths](./values.yaml#L732) | list | | `[]` |
| [controller.secondaryingress.tls](./values.yaml#L751) | string | | `nil` | | [controller.secondaryingress.tls](./values.yaml#L745) | string | | `nil` |
| [controller.secretClaims](./values.yaml#L486) | list | List of `SecretClaim` resources to create | `[]` | | [controller.secretClaims](./values.yaml#L480) | list | List of `SecretClaim` resources to create | `[]` |
| [controller.securityContextCapabilities](./values.yaml#L198) | object | | `{}` | | [controller.securityContextCapabilities](./values.yaml#L192) | object | | `{}` |
| [controller.serviceAnnotations](./values.yaml#L236) | object | Jenkins controller service annotations | `{}` | | [controller.serviceAnnotations](./values.yaml#L230) | object | Jenkins controller service annotations | `{}` |
| [controller.serviceExternalTrafficPolicy](./values.yaml#L233) | string | | `nil` | | [controller.serviceExternalTrafficPolicy](./values.yaml#L227) | string | | `nil` |
| [controller.serviceLabels](./values.yaml#L242) | object | Labels for the Jenkins controller-service | `{}` | | [controller.serviceLabels](./values.yaml#L236) | object | Labels for the Jenkins controller-service | `{}` |
| [controller.servicePort](./values.yaml#L225) | int | k8s service port | `8080` | | [controller.servicePort](./values.yaml#L219) | int | k8s service port | `8080` |
| [controller.serviceType](./values.yaml#L220) | string | k8s service type | `"ClusterIP"` | | [controller.serviceType](./values.yaml#L214) | string | k8s service type | `"ClusterIP"` |
| [controller.shareProcessNamespace](./values.yaml#L124) | bool | | `false` | | [controller.shareProcessNamespace](./values.yaml#L124) | bool | | `false` |
| [controller.sidecars.additionalSidecarContainers](./values.yaml#L631) | list | Configures additional sidecar container(s) for the Jenkins controller | `[]` | | [controller.sidecars.additionalSidecarContainers](./values.yaml#L625) | list | Configures additional sidecar container(s) for the Jenkins controller | `[]` |
| [controller.sidecars.configAutoReload.additionalVolumeMounts](./values.yaml#L577) | list | Enables additional volume mounts for the config auto-reload container | `[]` | | [controller.sidecars.configAutoReload.additionalVolumeMounts](./values.yaml#L571) | list | Enables additional volume mounts for the config auto-reload container | `[]` |
| [controller.sidecars.configAutoReload.containerSecurityContext](./values.yaml#L626) | object | Enable container security context | `{"allowPrivilegeEscalation":false,"readOnlyRootFilesystem":true}` | | [controller.sidecars.configAutoReload.containerSecurityContext](./values.yaml#L620) | object | Enable container security context | `{"allowPrivilegeEscalation":false,"readOnlyRootFilesystem":true}` |
| [controller.sidecars.configAutoReload.enabled](./values.yaml#L560) | bool | Enables Jenkins Config as Code auto-reload | `true` | | [controller.sidecars.configAutoReload.enabled](./values.yaml#L554) | bool | Enables Jenkins Config as Code auto-reload | `true` |
| [controller.sidecars.configAutoReload.env](./values.yaml#L608) | object | Environment variables for the Jenkins Config as Code auto-reload container | `{}` | | [controller.sidecars.configAutoReload.env](./values.yaml#L602) | object | Environment variables for the Jenkins Config as Code auto-reload container | `{}` |
| [controller.sidecars.configAutoReload.envFrom](./values.yaml#L606) | list | Environment variable sources for the Jenkins Config as Code auto-reload container | `[]` | | [controller.sidecars.configAutoReload.envFrom](./values.yaml#L600) | list | Environment variable sources for the Jenkins Config as Code auto-reload container | `[]` |
| [controller.sidecars.configAutoReload.folder](./values.yaml#L619) | string | | `"/var/jenkins_home/casc_configs"` | | [controller.sidecars.configAutoReload.folder](./values.yaml#L613) | string | | `"/var/jenkins_home/casc_configs"` |
| [controller.sidecars.configAutoReload.image.registry](./values.yaml#L563) | string | Registry for the image that triggers the reload | `"docker.io"` | | [controller.sidecars.configAutoReload.image.registry](./values.yaml#L557) | string | Registry for the image that triggers the reload | `"docker.io"` |
| [controller.sidecars.configAutoReload.image.repository](./values.yaml#L565) | string | Repository of the image that triggers the reload | `"kiwigrid/k8s-sidecar"` | | [controller.sidecars.configAutoReload.image.repository](./values.yaml#L559) | string | Repository of the image that triggers the reload | `"kiwigrid/k8s-sidecar"` |
| [controller.sidecars.configAutoReload.image.tag](./values.yaml#L567) | string | Tag for the image that triggers the reload | `"1.28.0"` | | [controller.sidecars.configAutoReload.image.tag](./values.yaml#L561) | string | Tag for the image that triggers the reload | `"1.27.5"` |
| [controller.sidecars.configAutoReload.imagePullPolicy](./values.yaml#L568) | string | | `"IfNotPresent"` | | [controller.sidecars.configAutoReload.imagePullPolicy](./values.yaml#L562) | string | | `"IfNotPresent"` |
| [controller.sidecars.configAutoReload.logging](./values.yaml#L583) | object | Config auto-reload logging settings | `{"configuration":{"backupCount":3,"formatter":"JSON","logLevel":"INFO","logToConsole":true,"logToFile":false,"maxBytes":1024,"override":false}}` | | [controller.sidecars.configAutoReload.logging](./values.yaml#L577) | object | Config auto-reload logging settings | `{"configuration":{"backupCount":3,"formatter":"JSON","logLevel":"INFO","logToConsole":true,"logToFile":false,"maxBytes":1024,"override":false}}` |
| [controller.sidecars.configAutoReload.logging.configuration.override](./values.yaml#L587) | bool | Enables custom log config utilizing using the settings below. | `false` | | [controller.sidecars.configAutoReload.logging.configuration.override](./values.yaml#L581) | bool | Enables custom log config utilizing using the settings below. | `false` |
| [controller.sidecars.configAutoReload.reqRetryConnect](./values.yaml#L601) | int | How many connection-related errors to retry on | `10` | | [controller.sidecars.configAutoReload.reqRetryConnect](./values.yaml#L595) | int | How many connection-related errors to retry on | `10` |
| [controller.sidecars.configAutoReload.resources](./values.yaml#L569) | object | | `{}` | | [controller.sidecars.configAutoReload.resources](./values.yaml#L563) | object | | `{}` |
| [controller.sidecars.configAutoReload.scheme](./values.yaml#L596) | string | The scheme to use when connecting to the Jenkins configuration as code endpoint | `"http"` | | [controller.sidecars.configAutoReload.scheme](./values.yaml#L590) | string | The scheme to use when connecting to the Jenkins configuration as code endpoint | `"http"` |
| [controller.sidecars.configAutoReload.skipTlsVerify](./values.yaml#L598) | bool | Skip TLS verification when connecting to the Jenkins configuration as code endpoint | `false` | | [controller.sidecars.configAutoReload.skipTlsVerify](./values.yaml#L592) | bool | Skip TLS verification when connecting to the Jenkins configuration as code endpoint | `false` |
| [controller.sidecars.configAutoReload.sleepTime](./values.yaml#L603) | string | How many seconds to wait before updating config-maps/secrets (sets METHOD=SLEEP on the sidecar) | `nil` | | [controller.sidecars.configAutoReload.sleepTime](./values.yaml#L597) | string | How many seconds to wait before updating config-maps/secrets (sets METHOD=SLEEP on the sidecar) | `nil` |
| [controller.sidecars.configAutoReload.sshTcpPort](./values.yaml#L617) | int | | `1044` | | [controller.sidecars.configAutoReload.sshTcpPort](./values.yaml#L611) | int | | `1044` |
| [controller.statefulSetAnnotations](./values.yaml#L676) | object | Annotations for controller StatefulSet | `{}` | | [controller.statefulSetAnnotations](./values.yaml#L670) | object | Annotations for controller StatefulSet | `{}` |
| [controller.statefulSetLabels](./values.yaml#L238) | object | Jenkins controller custom labels for the StatefulSet | `{}` | | [controller.statefulSetLabels](./values.yaml#L232) | object | Jenkins controller custom labels for the StatefulSet | `{}` |
| [controller.targetPort](./values.yaml#L227) | int | k8s target port | `8080` | | [controller.targetPort](./values.yaml#L221) | int | k8s target port | `8080` |
| [controller.terminationGracePeriodSeconds](./values.yaml#L659) | string | Set TerminationGracePeriodSeconds | `nil` | | [controller.terminationGracePeriodSeconds](./values.yaml#L653) | string | Set TerminationGracePeriodSeconds | `nil` |
| [controller.terminationMessagePath](./values.yaml#L661) | string | Set the termination message path | `nil` | | [controller.terminationMessagePath](./values.yaml#L655) | string | Set the termination message path | `nil` |
| [controller.terminationMessagePolicy](./values.yaml#L663) | string | Set the termination message policy | `nil` | | [controller.terminationMessagePolicy](./values.yaml#L657) | string | Set the termination message policy | `nil` |
| [controller.testEnabled](./values.yaml#L840) | bool | Can be used to disable rendering controller test resources when using helm template | `true` | | [controller.testEnabled](./values.yaml#L834) | bool | Can be used to disable rendering controller test resources when using helm template | `true` |
| [controller.tolerations](./values.yaml#L657) | list | Toleration labels for pod assignment | `[]` | | [controller.tolerations](./values.yaml#L651) | list | Toleration labels for pod assignment | `[]` |
| [controller.topologySpreadConstraints](./values.yaml#L683) | object | Topology spread constraints | `{}` | | [controller.topologySpreadConstraints](./values.yaml#L677) | object | Topology spread constraints | `{}` |
| [controller.updateStrategy](./values.yaml#L680) | object | Update strategy for StatefulSet | `{}` | | [controller.updateStrategy](./values.yaml#L674) | object | Update strategy for StatefulSet | `{}` |
| [controller.usePodSecurityContext](./values.yaml#L182) | bool | Enable pod security context (must be `true` if podSecurityContextOverride, runAsUser or fsGroup are set) | `true` | | [controller.usePodSecurityContext](./values.yaml#L176) | bool | Enable pod security context (must be `true` if podSecurityContextOverride, runAsUser or fsGroup are set) | `true` |
| [credentialsId](./values.yaml#L27) | string | The Jenkins credentials to access the Kubernetes API server. For the default cluster it is not needed. | `nil` | | [credentialsId](./values.yaml#L27) | string | The Jenkins credentials to access the Kubernetes API server. For the default cluster it is not needed. | `nil` |
| [fullnameOverride](./values.yaml#L13) | string | Override the full resource names | `jenkins-(release-name)` or `jenkins` if the release-name is `jenkins` | | [fullnameOverride](./values.yaml#L13) | string | Override the full resource names | `jenkins-(release-name)` or `jenkins` if the release-name is `jenkins` |
| [helmtest.bats.image.registry](./values.yaml#L1361) | string | Registry of the image used to test the framework | `"docker.io"` | | [helmtest.bats.image.registry](./values.yaml#L1333) | string | Registry of the image used to test the framework | `"docker.io"` |
| [helmtest.bats.image.repository](./values.yaml#L1363) | string | Repository of the image used to test the framework | `"bats/bats"` | | [helmtest.bats.image.repository](./values.yaml#L1335) | string | Repository of the image used to test the framework | `"bats/bats"` |
| [helmtest.bats.image.tag](./values.yaml#L1365) | string | Tag of the image to test the framework | `"1.11.0"` | | [helmtest.bats.image.tag](./values.yaml#L1337) | string | Tag of the image to test the framework | `"1.11.0"` |
| [kubernetesURL](./values.yaml#L24) | string | The URL of the Kubernetes API server | `"https://kubernetes.default"` | | [kubernetesURL](./values.yaml#L24) | string | The URL of the Kubernetes API server | `"https://kubernetes.default"` |
| [nameOverride](./values.yaml#L10) | string | Override the resource name prefix | `Chart.Name` | | [nameOverride](./values.yaml#L10) | string | Override the resource name prefix | `Chart.Name` |
| [namespaceOverride](./values.yaml#L16) | string | Override the deployment namespace | `Release.Namespace` | | [namespaceOverride](./values.yaml#L16) | string | Override the deployment namespace | `Release.Namespace` |
| [networkPolicy.apiVersion](./values.yaml#L1289) | string | NetworkPolicy ApiVersion | `"networking.k8s.io/v1"` | | [networkPolicy.apiVersion](./values.yaml#L1263) | string | NetworkPolicy ApiVersion | `"networking.k8s.io/v1"` |
| [networkPolicy.enabled](./values.yaml#L1284) | bool | Enable the creation of NetworkPolicy resources | `false` | | [networkPolicy.enabled](./values.yaml#L1258) | bool | Enable the creation of NetworkPolicy resources | `false` |
| [networkPolicy.externalAgents.except](./values.yaml#L1303) | list | A list of IP sub-ranges to be excluded from the allowlisted IP range | `[]` | | [networkPolicy.externalAgents.except](./values.yaml#L1277) | list | A list of IP sub-ranges to be excluded from the allowlisted IP range | `[]` |
| [networkPolicy.externalAgents.ipCIDR](./values.yaml#L1301) | string | The IP range from which external agents are allowed to connect to controller, i.e., 172.17.0.0/16 | `nil` | | [networkPolicy.externalAgents.ipCIDR](./values.yaml#L1275) | string | The IP range from which external agents are allowed to connect to controller, i.e., 172.17.0.0/16 | `nil` |
| [networkPolicy.internalAgents.allowed](./values.yaml#L1293) | bool | Allow internal agents (from the same cluster) to connect to controller. Agent pods will be filtered based on PodLabels | `true` | | [networkPolicy.internalAgents.allowed](./values.yaml#L1267) | bool | Allow internal agents (from the same cluster) to connect to controller. Agent pods will be filtered based on PodLabels | `true` |
| [networkPolicy.internalAgents.namespaceLabels](./values.yaml#L1297) | object | A map of labels (keys/values) that agents namespaces must have to be able to connect to controller | `{}` | | [networkPolicy.internalAgents.namespaceLabels](./values.yaml#L1271) | object | A map of labels (keys/values) that agents namespaces must have to be able to connect to controller | `{}` |
| [networkPolicy.internalAgents.podLabels](./values.yaml#L1295) | object | A map of labels (keys/values) that agent pods must have to be able to connect to controller | `{}` | | [networkPolicy.internalAgents.podLabels](./values.yaml#L1269) | object | A map of labels (keys/values) that agent pods must have to be able to connect to controller | `{}` |
| [persistence.accessMode](./values.yaml#L1259) | string | The PVC access mode | `"ReadWriteOnce"` | | [persistence.accessMode](./values.yaml#L1233) | string | The PVC access mode | `"ReadWriteOnce"` |
| [persistence.annotations](./values.yaml#L1255) | object | Annotations for the PVC | `{}` | | [persistence.annotations](./values.yaml#L1229) | object | Annotations for the PVC | `{}` |
| [persistence.dataSource](./values.yaml#L1265) | object | Existing data source to clone PVC from | `{}` | | [persistence.dataSource](./values.yaml#L1239) | object | Existing data source to clone PVC from | `{}` |
| [persistence.enabled](./values.yaml#L1239) | bool | Enable the use of a Jenkins PVC | `true` | | [persistence.enabled](./values.yaml#L1213) | bool | Enable the use of a Jenkins PVC | `true` |
| [persistence.existingClaim](./values.yaml#L1245) | string | Provide the name of a PVC | `nil` | | [persistence.existingClaim](./values.yaml#L1219) | string | Provide the name of a PVC | `nil` |
| [persistence.labels](./values.yaml#L1257) | object | Labels for the PVC | `{}` | | [persistence.labels](./values.yaml#L1231) | object | Labels for the PVC | `{}` |
| [persistence.mounts](./values.yaml#L1277) | list | Additional mounts | `[]` | | [persistence.mounts](./values.yaml#L1251) | list | Additional mounts | `[]` |
| [persistence.size](./values.yaml#L1261) | string | The size of the PVC | `"8Gi"` | | [persistence.size](./values.yaml#L1235) | string | The size of the PVC | `"8Gi"` |
| [persistence.storageClass](./values.yaml#L1253) | string | Storage class for the PVC | `nil` | | [persistence.storageClass](./values.yaml#L1227) | string | Storage class for the PVC | `nil` |
| [persistence.subPath](./values.yaml#L1270) | string | SubPath for jenkins-home mount | `nil` | | [persistence.subPath](./values.yaml#L1244) | string | SubPath for jenkins-home mount | `nil` |
| [persistence.volumes](./values.yaml#L1272) | list | Additional volumes | `[]` | | [persistence.volumes](./values.yaml#L1246) | list | Additional volumes | `[]` |
| [rbac.create](./values.yaml#L1309) | bool | Whether RBAC resources are created | `true` | | [rbac.create](./values.yaml#L1283) | bool | Whether RBAC resources are created | `true` |
| [rbac.readSecrets](./values.yaml#L1311) | bool | Whether the Jenkins service account should be able to read Kubernetes secrets | `false` | | [rbac.readSecrets](./values.yaml#L1285) | bool | Whether the Jenkins service account should be able to read Kubernetes secrets | `false` |
| [rbac.useOpenShiftNonRootSCC](./values.yaml#L1313) | bool | Whether the Jenkins service account should be able to use the OpenShift "nonroot" Security Context Constraints | `false` |
| [renderHelmLabels](./values.yaml#L30) | bool | Enables rendering of the helm.sh/chart label to the annotations | `true` | | [renderHelmLabels](./values.yaml#L30) | bool | Enables rendering of the helm.sh/chart label to the annotations | `true` |
| [serviceAccount.annotations](./values.yaml#L1323) | object | Configures annotations for the ServiceAccount | `{}` | | [serviceAccount.annotations](./values.yaml#L1295) | object | Configures annotations for the ServiceAccount | `{}` |
| [serviceAccount.create](./values.yaml#L1317) | bool | Configures if a ServiceAccount with this name should be created | `true` | | [serviceAccount.create](./values.yaml#L1289) | bool | Configures if a ServiceAccount with this name should be created | `true` |
| [serviceAccount.extraLabels](./values.yaml#L1325) | object | Configures extra labels for the ServiceAccount | `{}` | | [serviceAccount.extraLabels](./values.yaml#L1297) | object | Configures extra labels for the ServiceAccount | `{}` |
| [serviceAccount.imagePullSecretName](./values.yaml#L1327) | string | Controller ServiceAccount image pull secret | `nil` | | [serviceAccount.imagePullSecretName](./values.yaml#L1299) | string | Controller ServiceAccount image pull secret | `nil` |
| [serviceAccount.name](./values.yaml#L1321) | string | | `nil` | | [serviceAccount.name](./values.yaml#L1293) | string | | `nil` |
| [serviceAccountAgent.annotations](./values.yaml#L1338) | object | Configures annotations for the agent ServiceAccount | `{}` | | [serviceAccountAgent.annotations](./values.yaml#L1310) | object | Configures annotations for the agent ServiceAccount | `{}` |
| [serviceAccountAgent.create](./values.yaml#L1332) | bool | Configures if an agent ServiceAccount should be created | `false` | | [serviceAccountAgent.create](./values.yaml#L1304) | bool | Configures if an agent ServiceAccount should be created | `false` |
| [serviceAccountAgent.extraLabels](./values.yaml#L1340) | object | Configures extra labels for the agent ServiceAccount | `{}` | | [serviceAccountAgent.extraLabels](./values.yaml#L1312) | object | Configures extra labels for the agent ServiceAccount | `{}` |
| [serviceAccountAgent.imagePullSecretName](./values.yaml#L1342) | string | Agent ServiceAccount image pull secret | `nil` | | [serviceAccountAgent.imagePullSecretName](./values.yaml#L1314) | string | Agent ServiceAccount image pull secret | `nil` |
| [serviceAccountAgent.name](./values.yaml#L1336) | string | The name of the agent ServiceAccount to be used by access-controlled resources | `nil` | | [serviceAccountAgent.name](./values.yaml#L1308) | string | The name of the agent ServiceAccount to be used by access-controlled resources | `nil` |

View File

@ -140,14 +140,6 @@ jenkins:
clouds: clouds:
- kubernetes: - kubernetes:
containerCapStr: "{{ .Values.agent.containerCap }}" containerCapStr: "{{ .Values.agent.containerCap }}"
{{- if .Values.agent.garbageCollection.enabled }}
garbageCollection:
{{- if .Values.agent.garbageCollection.namespaces }}
namespaces: |-
{{- .Values.agent.garbageCollection.namespaces | nindent 10 }}
{{- end }}
timeout: "{{ .Values.agent.garbageCollection.timeout }}"
{{- end }}
{{- if .Values.agent.jnlpregistry }} {{- if .Values.agent.jnlpregistry }}
jnlpregistry: "{{ .Values.agent.jnlpregistry }}" jnlpregistry: "{{ .Values.agent.jnlpregistry }}"
{{- end }} {{- end }}
@ -483,10 +475,7 @@ Returns kubernetes pod template configuration as code
nodeUsageMode: {{ quote .Values.agent.nodeUsageMode }} nodeUsageMode: {{ quote .Values.agent.nodeUsageMode }}
podRetention: {{ .Values.agent.podRetention }} podRetention: {{ .Values.agent.podRetention }}
showRawYaml: {{ .Values.agent.showRawYaml }} showRawYaml: {{ .Values.agent.showRawYaml }}
{{- $asaname := default (include "jenkins.serviceAccountAgentName" .) .Values.agent.serviceAccount -}} serviceAccount: "{{ include "jenkins.serviceAccountAgentName" . }}"
{{- if or (.Values.agent.useDefaultServiceAccount) (.Values.agent.serviceAccount) }}
serviceAccount: "{{ $asaname }}"
{{- end }}
slaveConnectTimeoutStr: "{{ .Values.agent.connectTimeout }}" slaveConnectTimeoutStr: "{{ .Values.agent.connectTimeout }}"
{{- if .Values.agent.volumes }} {{- if .Values.agent.volumes }}
volumes: volumes:

View File

@ -7,7 +7,7 @@ metadata:
labels: labels:
"app.kubernetes.io/name": {{ template "jenkins.name" . }} "app.kubernetes.io/name": {{ template "jenkins.name" . }}
{{- if .Values.renderHelmLabels }} {{- if .Values.renderHelmLabels }}
"helm.sh/chart": "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" "helm.sh/chart": "{{ .Chart.Name }}-{{ .Chart.Version }}"
{{- end }} {{- end }}
"app.kubernetes.io/managed-by": "{{ $.Release.Service }}" "app.kubernetes.io/managed-by": "{{ $.Release.Service }}"
"app.kubernetes.io/instance": "{{ $.Release.Name }}" "app.kubernetes.io/instance": "{{ $.Release.Name }}"

View File

@ -11,7 +11,7 @@ metadata:
labels: labels:
"app.kubernetes.io/name": {{ template "jenkins.name" $root}} "app.kubernetes.io/name": {{ template "jenkins.name" $root}}
{{- if $root.Values.renderHelmLabels }} {{- if $root.Values.renderHelmLabels }}
"helm.sh/chart": "{{ $root.Chart.Name }}-{{ $root.Chart.Version | replace "+" "_" }}" "helm.sh/chart": "{{ $root.Chart.Name }}-{{ $root.Chart.Version }}"
{{- end }} {{- end }}
"app.kubernetes.io/managed-by": "{{ $.Release.Service }}" "app.kubernetes.io/managed-by": "{{ $.Release.Service }}"
"app.kubernetes.io/instance": "{{ $.Release.Name }}" "app.kubernetes.io/instance": "{{ $.Release.Name }}"
@ -36,7 +36,7 @@ metadata:
labels: labels:
"app.kubernetes.io/name": {{ template "jenkins.name" $root}} "app.kubernetes.io/name": {{ template "jenkins.name" $root}}
{{- if .Values.renderHelmLabels }} {{- if .Values.renderHelmLabels }}
"helm.sh/chart": "{{ $root.Chart.Name }}-{{ $root.Chart.Version | replace "+" "_" }}" "helm.sh/chart": "{{ $root.Chart.Name }}-{{ $root.Chart.Version }}"
{{- end }} {{- end }}
"app.kubernetes.io/managed-by": "{{ $.Release.Service }}" "app.kubernetes.io/managed-by": "{{ $.Release.Service }}"
"app.kubernetes.io/instance": "{{ $.Release.Name }}" "app.kubernetes.io/instance": "{{ $.Release.Name }}"

View File

@ -80,9 +80,6 @@ spec:
{{- if .Values.controller.shareProcessNamespace }} {{- if .Values.controller.shareProcessNamespace }}
shareProcessNamespace: true shareProcessNamespace: true
{{- end }} {{- end }}
{{- if not .Values.controller.enableServiceLinks }}
enableServiceLinks: false
{{- end }}
{{- if .Values.controller.usePodSecurityContext }} {{- if .Values.controller.usePodSecurityContext }}
securityContext: securityContext:
{{- if kindIs "map" .Values.controller.podSecurityContextOverride }} {{- if kindIs "map" .Values.controller.podSecurityContextOverride }}

View File

@ -99,55 +99,6 @@ subjects:
--- ---
{{- end}} {{- end}}
{{- if .Values.rbac.useOpenShiftNonRootSCC }}
# This is needed if you are running on OpenShift and using the default
# containerSecurityContext in the chart. It grants the Jenkins service account
# permission to use the "nonroot" and "nonroot-v2" SecurityContextConstraints.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ $serviceName }}-use-nonroot-scc
namespace: {{ template "jenkins.namespace" . }}
labels:
"app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
{{- if .Values.renderHelmLabels }}
"helm.sh/chart": "{{ template "jenkins.label" .}}"
{{- end }}
"app.kubernetes.io/managed-by": "{{ .Release.Service }}"
"app.kubernetes.io/instance": "{{ .Release.Name }}"
"app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
rules:
- apiGroups: ["security.openshift.io"]
resources: ["securitycontextconstraints"]
resourceNames: ["nonroot", "nonroot-v2"]
verbs: ["use"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ $serviceName }}-use-nonroot-scc
namespace: {{ template "jenkins.namespace" . }}
labels:
"app.kubernetes.io/name": '{{ template "jenkins.name" .}}'
{{- if .Values.renderHelmLabels }}
"helm.sh/chart": "{{ template "jenkins.label" .}}"
{{- end }}
"app.kubernetes.io/managed-by": "{{ .Release.Service }}"
"app.kubernetes.io/instance": "{{ .Release.Name }}"
"app.kubernetes.io/component": "{{ .Values.controller.componentName }}"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "jenkins.fullname" . }}-use-nonroot-scc
subjects:
- kind: ServiceAccount
name: {{ template "jenkins.serviceAccountName" . }}
namespace: {{ template "jenkins.namespace" . }}
---
{{- end}}
{{- if .Values.controller.sidecars.configAutoReload.enabled }} {{- if .Values.controller.sidecars.configAutoReload.enabled }}
# The sidecar container which is responsible for reloading configuration changes # The sidecar container which is responsible for reloading configuration changes
# needs permissions to watch ConfigMaps # needs permissions to watch ConfigMaps

View File

@ -123,12 +123,6 @@ controller:
# Share process namespace to allow sidecar containers to interact with processes in other containers in the same pod # Share process namespace to allow sidecar containers to interact with processes in other containers in the same pod
shareProcessNamespace: false shareProcessNamespace: false
# Service links might cause issue if running in a namespace with a large amount of services
# that might cause a slow startup when plugins are copied from ref to volume
# Set to true to keep previous behavior
# See https://github.com/kubernetes/kubernetes/issues/121787
enableServiceLinks: false
# Overrides the init container default values # Overrides the init container default values
# -- Resources allocation (Requests and Limits) for Init Container # -- Resources allocation (Requests and Limits) for Init Container
initContainerResources: {} initContainerResources: {}
@ -399,10 +393,10 @@ controller:
# Plugins will be installed during Jenkins controller start # Plugins will be installed during Jenkins controller start
# -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` # -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false`
installPlugins: installPlugins:
- kubernetes:4292.v11898cf8fa_66 - kubernetes:4280.vd919fa_528c7e
- workflow-aggregator:600.vb_57cdd26fdd7 - workflow-aggregator:600.vb_57cdd26fdd7
- git:5.5.2 - git:5.3.0
- configuration-as-code:1850.va_a_8c31d3158b_ - configuration-as-code:1836.vccda_4a_122a_a_e
# If set to false, Jenkins will download the minimum required version of all dependencies. # If set to false, Jenkins will download the minimum required version of all dependencies.
# -- Download the minimum required version or latest version of all dependencies # -- Download the minimum required version or latest version of all dependencies
@ -564,7 +558,7 @@ controller:
# -- Repository of the image that triggers the reload # -- Repository of the image that triggers the reload
repository: kiwigrid/k8s-sidecar repository: kiwigrid/k8s-sidecar
# -- Tag for the image that triggers the reload # -- Tag for the image that triggers the reload
tag: 1.28.0 tag: 1.27.5
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: {} resources: {}
# limits: # limits:
@ -912,14 +906,6 @@ agent:
# -- The name of the pod template to use for providing default values # -- The name of the pod template to use for providing default values
defaultsProviderTemplate: "" defaultsProviderTemplate: ""
# Useful for not including a serviceAccount in the template if `false`
# -- Use `serviceAccountAgent.name` as the default value for defaults template `serviceAccount`
useDefaultServiceAccount: true
# -- Override the default service account
# @default -- `serviceAccountAgent.name` if `agent.useDefaultServiceAccount` is `true`
serviceAccount:
# For connecting to the Jenkins controller # For connecting to the Jenkins controller
# -- Overrides the Kubernetes Jenkins URL # -- Overrides the Kubernetes Jenkins URL
jenkinsUrl: jenkinsUrl:
@ -951,7 +937,7 @@ agent:
# -- Repository to pull the agent jnlp image from # -- Repository to pull the agent jnlp image from
repository: "jenkins/inbound-agent" repository: "jenkins/inbound-agent"
# -- Tag of the image to pull # -- Tag of the image to pull
tag: "3261.v9c670a_4748a_9-1" tag: "3256.v88a_f6e922152-1"
# -- Configure working directory for default agent # -- Configure working directory for default agent
workingDir: "/home/jenkins/agent" workingDir: "/home/jenkins/agent"
nodeUsageMode: "NORMAL" nodeUsageMode: "NORMAL"
@ -1104,18 +1090,6 @@ agent:
# -- Agent Pod base name # -- Agent Pod base name
podName: "default" podName: "default"
# Enables garbage collection of orphan pods for this Kubernetes cloud. (beta)
garbageCollection:
# -- When enabled, Jenkins will periodically check for orphan pods that have not been touched for the given timeout period and delete them.
enabled: false
# -- Namespaces to look at for garbage collection, in addition to the default namespace defined for the cloud. One namespace per line.
namespaces: ""
# namespaces: |-
# namespaceOne
# namespaceTwo
# -- Timeout value for orphaned pods
timeout: 300
# -- Allows the Pod to remain active for reuse until the configured number of minutes has passed since the last step was executed on it # -- Allows the Pod to remain active for reuse until the configured number of minutes has passed since the last step was executed on it
idleMinutes: 0 idleMinutes: 0
@ -1309,8 +1283,6 @@ rbac:
create: true create: true
# -- Whether the Jenkins service account should be able to read Kubernetes secrets # -- Whether the Jenkins service account should be able to read Kubernetes secrets
readSecrets: false readSecrets: false
# -- Whether the Jenkins service account should be able to use the OpenShift "nonroot" Security Context Constraints
useOpenShiftNonRootSCC: false
serviceAccount: serviceAccount:
# -- Configures if a ServiceAccount with this name should be created # -- Configures if a ServiceAccount with this name should be created

View File

@ -2,7 +2,7 @@ gitea:
enabled: false enabled: false
image: image:
tag: 1.22.3 # tag: 1.21.11
rootless: true rootless: true
repliaCount: 1 repliaCount: 1
@ -277,7 +277,7 @@ jenkins:
trivy: trivy:
enabled: false enabled: false
image: image:
tag: 0.56.2 tag: 0.52.1
persistence: persistence:
enabled: true enabled: true
size: 1Gi size: 1Gi

View File

@ -45,19 +45,20 @@ Kubernetes: `>= 1.26.0`
| k8saudit.falco.rules_file[0] | string | `"/etc/falco/rules.d"` | | | k8saudit.falco.rules_file[0] | string | `"/etc/falco/rules.d"` | |
| k8saudit.falco.syslog_output.enabled | bool | `false` | | | k8saudit.falco.syslog_output.enabled | bool | `false` | |
| k8saudit.falcoctl.artifact.follow.enabled | bool | `false` | | | k8saudit.falcoctl.artifact.follow.enabled | bool | `false` | |
| k8saudit.falcoctl.config.artifact.allowedTypes[0] | string | `"plugin"` | | | k8saudit.falcoctl.artifact.install.enabled | bool | `false` | |
| k8saudit.falcoctl.config.artifact.install.refs[0] | string | `"k8saudit:0.7.0"` | |
| k8saudit.falcoctl.config.artifact.install.refs[1] | string | `"json:0.7.2"` | |
| k8saudit.fullnameOverride | string | `"falco-k8saudit"` | | | k8saudit.fullnameOverride | string | `"falco-k8saudit"` | |
| k8saudit.mounts.volumeMounts[0].mountPath | string | `"/etc/falco/rules.d"` | | | k8saudit.mounts.volumeMounts[0].mountPath | string | `"/etc/falco/rules.d"` | |
| k8saudit.mounts.volumeMounts[0].name | string | `"rules-volume"` | | | k8saudit.mounts.volumeMounts[0].name | string | `"rules-volume"` | |
| k8saudit.mounts.volumes[0].configMap.name | string | `"falco-k8saudit-rules"` | | | k8saudit.mounts.volumes[0].configMap.name | string | `"falco-k8saudit-rules"` | |
| k8saudit.mounts.volumes[0].name | string | `"rules-volume"` | | | k8saudit.mounts.volumes[0].name | string | `"rules-volume"` | |
| k8saudit.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | k8saudit.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| k8saudit.resources.limits.cpu | int | `1` | | | k8saudit.resources.limits.cpu | string | `"1000m"` | |
| k8saudit.resources.limits.memory | string | `"512Mi"` | | | k8saudit.resources.limits.memory | string | `"512Mi"` | |
| k8saudit.resources.requests.cpu | string | `"100m"` | | | k8saudit.resources.requests.cpu | string | `"100m"` | |
| k8saudit.resources.requests.memory | string | `"64Mi"` | | | k8saudit.resources.requests.memory | string | `"256Mi"` | |
| k8saudit.services[0].name | string | `"webhook"` | | | k8saudit.services[0].name | string | `"webhook"` | |
| k8saudit.services[0].ports[0].port | int | `9765` | | | k8saudit.services[0].ports[0].port | int | `9765` | |
| k8saudit.services[0].ports[0].protocol | string | `"TCP"` | | | k8saudit.services[0].ports[0].protocol | string | `"TCP"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)

View File

@ -47,5 +47,6 @@ Kubernetes: `>= 1.26.0-0`
## Resources ## Resources
- https://github.com/cilium/cilium/blob/main/operator/pkg/model/translation/envoy_listener.go#L134 - https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/#IstioOperatorSpec
- https://github.com/istio/istio/blob/master/manifests/profiles/default.yaml
- https://istio.io/latest/docs/setup/install/standalone-operator/

View File

@ -19,5 +19,6 @@ Installs Istio Ingress Gateways, requires kubezero-istio to be installed !
## Resources ## Resources
- https://github.com/cilium/cilium/blob/main/operator/pkg/model/translation/envoy_listener.go#L134 - https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/#IstioOperatorSpec
- https://github.com/istio/istio/blob/master/manifests/profiles/default.yaml
- https://istio.io/latest/docs/setup/install/standalone-operator/

View File

@ -1,15 +0,0 @@
{{- if or .Values.redis.metrics.enabled ( index .Values "redis-cluster" "metrics" "enabled") }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "grafana-dashboards" | trunc 63 | trimSuffix "-" }}
namespace: {{ .Release.Namespace }}
labels:
grafana_dashboard: "1"
{{- include "kubezero-lib.labels" . | nindent 4 }}
binaryData:
redis.json.gz:
H4sIAAAAAAAC/+1daVPcOBr+Pr9C5Zndgq0OsQ0dmqmaDySBmdQAyQYyW7UD1aW2RbcWX2PJQIdifvvq8CHZ6jMNNMEfOKxX1vGejw5Ldz8AYPX7OEoySqyfwZ/sGYA78ZtRIhgilmq9P+1/+vzx+ODst4Mvp1anIAdwgAJO/5TGIaIjlJGK6CPipTihOI54lopAx4ko1IcUkjhLPVTRkiAb4uiDz+mJoVBJP8mbpVQrMtyz3xcd2aUU/ZXhFBk6VdQ/TOEljGBVOPaNyQUTfq0TrlFK8t692epuuXkjOubqEhgxZjUrS0bGqtRkpaLpdZhYiicyM2qy0VSls2Vv2Uv0jeBoGCBCIW1WeWqgNXtZihNGUczyMiqXp6zeCjChpXSrRjHKIMMB/cBLcjpVqsIcc6dZHhTBQcDpNM2Qkj7CviEVe3H0Lg7ilBeYDgdww+4A13HYr263A5xNteii6/tVX8A/wX6AUqo1oZIlGQ1imPpWTrsXfy9+yMVQN7DPyMcEvC/eApdxCqpOAkk+uE3ilKIUOFu3HYApuInTKwJuMB2BEQpCwETCGPA65blfjSBA+Qtb5+l5xH8+XIJxnIEQEy5fIDKCEIVxOgYZxQH+KvrWAUmAIEEgjH18OQbnVghvZbZzC1zDIEMAR/IfsjWGoVQfixVHayKwhhGiwiU4Tm+7K5O4gZzFcUBxwgi2SBRqF2VBIJ9Yq2HOHKfb23Z2uju2280LCHB0JXyDVDChwAZf4UFvhM5wiOKMKoVLGpf8W+hdDdM4i3jdlzAgSKf/wTtoJhFFeUGuP+4O05vujvyxt/Y0DZI5tneZZrl7HbBj8yw9Q54uozi7LsvSFaXsbhY6dFG551nWwPTLwyEUXLHLRIN8ZHqaCjPQ+8l0MIRUOIOq3CHMhqg0Y5HEdKNglGPbSndCHBUENZmM4pt6Zdx0Rszhj+LAP+KBiUzLcQzTKyREwLtRWFjVxhT7n2Kit3LEHneVwngTXOX5ttbKMX9uFC20dK96jJieMjOoaxcmJ+imzmVda3PWJQkzxDPpNRxTuq5oVYcUpyQNksaAoluq6BPISbzoMvG+M72wFEbDGYW5VWENzWTK8J4p56eYsYboGmFxFgnCcSwcMrOkKEIeRb6l5TnjNdc4GidlAKmkkcSEXuJbHaHkiYdxRE/xV1FP1/6HQk9R8x2RNvEVwZNjmEyRxSWzQf4eb7TONSp7Y5283q8R4vKFKQwlCVN2pjo1o7vEQaBHr20WuZiPZb96e9xxOD3Nt1zyepo2xUtWy5HFuNwH7W1rBeRma7Q44VVYMVlYh4swHSI6hW8sRImqmd5ssJ9+zIBEnzKfvSGCUz9L+APDuH2CmLr45A5HLMxFHvrl73Prp+Lh3Lr/8ycOhaU9Xmxu6ryufJkojqAUI6JnUWzZMlMOoUcFo1yNHKAhivzDsgb9ZeaZU+w105nGSaRc0wpCEQ+KTs+2p2hF6QpJjd+YCvdufRF8a+J2E64Tdq0q/66q/II6Xflj3mLrlzn1vnAjM1VfZCyGC16WpiiiZijbRvoVRfoojtALCfbuzGDPxw1cqT4yn5Q2xxASDDhuiwZaNNCigZWiARn7S4XoewFmvn9y7F862D9mSHeXiuchOsyVyQn19NMRvmxYRgkA3kmWLYkAei0CmI4ADLF7GQjQBBJPjQASxGqK6EOAgNoE3BNggJ0aBthZEQZwWgzQYoAWA6wUAzA9Af8C+TSAnHzuZ4SBgcGYoslQAIDXQHuHzyrMeGXzO8MPPbuz110FiDiWSwNfCByil4IkYIAheVfEfcW3DGDaCFJioecIRUM6Eq5NS0em7LOC+YJxGwsbdrSEX1Poc/Cn4QEeI/X+zB01e7Wo2ZsnavooOpWGU2+zaIc7IzIKA9KbBq+HBndVyLJJYYZvSsWRIXXSNEVMoclHypWvgtDoPnehxBDs0Q32haK40yDAPEFRCX76GulRWV6jVTmyg8MmzE14dVxrMv5uV09vSpAx3EcpEm76MogV85e+sgBLtY6xwOUhk7EwF+JdNWrhbi9B/pGMdDpt/pEkpMVUsheHIYx80k/S2EOEhxIh38kzyk54sYbzyPuLRY0dW3elgnPVOv0c8UQRoRJKahEjRJ/RMNfJ2gszBqq5WMDBLfIypuUsgBPkKeGmXClWhyDkMyIMXuRrxE17hkzozTEHiVNaG5wIW+4XMc3LwiyAFF8jq4lpqr0n6gaPW3iLa5Y4yLwrqZ5qn3mzc5PWJucrrF3LbR43lc7HYONjeDttfKAs6Y44J3T9yzcE6Y3ghHj4FhKk78goHWwju/SwjeQmWpw89Fi7djYMZNwUOgMOQ5MuivQjdF02WtsSsmbwQ5lLcJ8TJnHerACUOC0oeVJQojXt+8UkWAElV2gsmtAfYUpm4ZHuMngk3/1W05ZVAhXe9NWMcOfBKp0lmMs3vKHnyl7Z+AUZ/PZ5gsHfmCox/HcsugyYcwCnTwYFceTja+xnMHixULCmXUsBLLtFgd+MAuv4wfrx7aHz1rabivliJ6mcWZs5d5cAhLsPDwh5/DgIEzqeQPsvSuPV4Mj68ttawEieACABX3k32+mt5TZKLLA68tArHbwNa4MFF10HWj34mxfiMWteEMetB147404ETFgmauftVg/WhAY/JBJq4drzm7R7DnDMcVeAxxRmrdMM3Voiq3bVcCWrhhGi8gtrCR4eZA5pQbRwdwdEi8D9/aKQan7opHMgzujjsOCbZ8sYc2RjJ3NnzTHVCaL8w17w4fXHxcBUHTO1WOqJsVS7Bvpy4NTu085uOTtLwKnuEmiKC3KfnJlPWpiFtWrZF4FaKR6O6KnxJIclUZhx32oLwpYDYRq/VoLBSBYWu3/9gVjKmwg5NsFgDDb8wdPCDn/AIAf4vjZnyfmdDxSFcjXu/dt2Me5BMYn2AW4LSVpI8nwgSWOGZylM4my3UzwtulgvdPEKNLP3WUk4xdFwyntPB0eimIKigY++7PbcmGVm1PJbrNYDuh3kvQLXBJzE9FX5/DsTSIvi2t31LYxTikfX2BMqZf3Y27MPbVfVnxCF4nwBv4/5SKgvMjc/Hzq3nF13y9ndsrecn/ecrn1udf4XD1h6dYAnP6zCXEmKvADiULZh+/BNb3u73d41/wSY6ywBNnst1nwZWNPsCIU3kBuFctvTfFXhhdzpH+M/+uokw1cbyvqcgC/MGwmgNcfanJwtK1LXccd73qO12UdWZ3ju/h+L4avjq2z4M8az4uvQg1o32p1mLYZtpyLXGhz2nngqcil06Oy8ZHjotFORjzMVSePkaqPbAdh0VsV0eAHEERVPu+zphX5jq1WF1bTu9D0YBHmflt+5tgaHVYB3vCPLHFSxqj1a7Uxai0JeMgpZbNJpRVNMvWVAxJsWL0z47u+RoIL73X3vN+Vg5FWfi+xMCtPrEY/lxUglP4DXOPXYFJPbqNtG3WcSdctb1Jjpcavivd62pYJbxBuhEP5RXr3mOjKZjoP8FrL0SuZkbrISvLQaqyyaojDhM1rRcJ772arYdGc6otN4NZt6Umf93r+apIvpZdUIceQFmY/2zefVTrnHkMsxY/ZveG367YhaTKo8Ekv+K0PpeMKlfJqYHC11iG5rgy2LXOHkSxqcjiPPdMpT81bAuj1r2hUUB2/XvIQirvsF79Pz0SWOcHFFneBzX7qX8nKeDuB8FJFQOxp4aSGeFMUtIsPI9NIs+S3Un0qs7reJ1RBimG0K2ZF/F02zdGqjDzzNnDlXGtlJhZARdCYLMqK7NdCku5LxHMOUDwzEdEAS+6vRrk+xD8QhwAsoF6u8H9XeWUq3FunhqvXNafVtXmkIYfC0QvBCQKbVu6X1sA77ZmhgUffDaeACff5ONbN2PatA4CUYKo/yj29eOQW2L07rZ3hQey3BDN2n1cs5w8q76VQAbnWVoYpjKw/b6oN6aHtX+d9RH7ZtlaKMQFzlfye/kPai6AMfCCq6NLMWteA3asFqLe6O+qAsCez6anuLtmjs+xqL8bE1SOMbNkLN4as+3pv3ntwN0624UputTN5zfPv+6OTz71/+81WmVtcY7/xw/38XppizYHsAAA==
redis-cluster.json.gz:
H4sIAAAAAAAC/+1d63PTOhb/3r/CY5idMpMW23mWmfuhLfTCLAUuLezsQjej2Goialu+stw2dLp/++rhh2Q7aRJKm4BhKLWOrMfRefx0JEs3W4ZhDocojBIamy+ML+zZMG7ET0YJQQBZqvnyZPjh4/vjV6evX306MVsZ2Qcj6HP6B4IDSCcwiQuiB2OXoIgiHPIsBYFOI1GoByiIcUJcWNAiPxmj8I3H6VFNoZL+Lm2WUq3IcMt+nrVklwj8O0EE1nQqq39MwDkIQVE48mqTMyb8WSZcQhKnvevvOrt22ohWfXURCBmzqpVFk9qq1GSlohXqCAGF/o6H2HBACndKefIhTum1tVq79q41v+q60UQzxzGsjmBdvazWu+qt7XJMAa1WdqKlVpmaSw8IQ8zyMioXH1ml6aOY5sJUNIRRRgny6Rtekt0qUhWG1HeU5YEhGPmcTkkClfQJ8mpSkYvDQ+xjwgsk4xHYtlqGY9vsR7fbMuxnatFZp/eLvhj/MPZ9SKjWhGL84skIA+KZKe1W/H+2lbK+rM9Fh4z8VeMcE+MjZKJmxJBwBouaTJZCSx01xyGkQs/tTs/uyiQu9acY+xRFjGCJRDGgYeL78olCAtIm2D2n0+kN7L2Okxbgo/BCKLwcRiEaNQbgzpExzxH0vUMcnqNxLgCpUTsHiU9jLZWlu0lMccBTb1tqegCiCIXjolEF3yfMOE2w75XL4m9hMfwmGMXYTxSdTOkxhVGsyGL256b0zBuWycuYQBiWCpJ6APwEpjwuUW+15zPlSe9lEiIq1M7cqslgYiYJhEm05MJWKQNrGfI+YJ0N5oQ99pUyrthzT3m+zgQkfZ7y50rRQnjsQf6MI12pCxYdpyyX3FAKFkKZUUMcasRvbNjR+TQjuzBk8qlmwASxtExiTZBQrJIJ9BIXvq9pFW8X8N3qKDOnG9N3mL5jw2UqFF26hADzl83n//3Mu/T0uTb2sp88wznwY1g7bBReU50tFf5Kd/y57AcLVw/IGFK9E1of4XUkpDMA19vs35CLypCiAG4TbkaGScQfGD4ZxpBZPy+++YZHf/zvq/nU9RnvIflq3n55ygEM+5018uzZM72fKGS+IKQVYypI8h0NnUgWwzEMvSNMAkCrVALPJUDZV8R9qzQKJm/3ERE2IbNeefrJBJ3TKoEKC2keyp4Zn0Tfq7hJOLdanzgC7sWY4CT0dDdhO4NW9o+58oJFTMbcCTxl1eCk0iCpFyCaM3xPn06Y7f8n5Npn4tE36NIXg85A51dugp4cHh7qJC5inPLu+VxeugTH8QQgknfryeDI6luHCtq806YzcYp8IFrK+cp8hcJbJoeAjTh77a8EkumJal0YkTJxeMv07nNqK1VZ+hFnoWs28NE4LI1CXofPPZ/0obrKlo3xhruc1hIVMBsxr/iBtbg/e1DH1bvTcU3QeMKkYULfh8c4iSFvSVnuhHPbKx4Lc6apsTRmJ5jQAyH8O4GiFFwLypBS/NWshI6rUim7fslU7gNm1XKSbRX6wlSPIJdP0gp9tcQfc2HPQUA4hssbn+7A0i3MuTTBJmfJbNujE3D+whybRPDVa8hHiGXuFn1n+nXFplIUhS49ZJa47HlEhrdiSGZTxIvxbDpn7RzyB0g4EpmdgRv82dTPGTSo0Pl7+9eonsZGLEYcyNR2e2EkkPl94eg1P18a2Nw7C4TA5hsIxuaCDj6jHAGXCgF15vr/m5sIe7elBkgpnwsPtHRuQQUatax5sIFJ5Qn6zofH6VRS04GznYVgRoErZ/A8myP+GwIS1+BDTpsKWp15rC3qGId0MqusQBIXLuxfEF7MKutK0BYu6iWYzirJ46SFC3rNQMaskiaCtjivUMg87UxmpdSFizuRCHlGcSl+XqZ1vo/iuWUGapY7sPAHAl2UGvubrQV4MZMTt4tg6QCeMp0p/I838PY8JXaXo+1ZKHt+1IzPeMX0ipk+ogSVmKs+LXVWoLUi6sQ7tYJf65dANea2xPxjQZeWs/IuryYy6pwre+4rgijc9/3P2gxSJ8/EyoJ6nOODzJPc12xmtRlLb9D9wRlLa/HK+rYzs7LBkXPo1NdnrVpbe1Zt/fbBUW+vvjZn1do6s2o72u91LKu+Nvvnz/302OUx4JAiNibgEhohDne+Q4INZsZC1gvoGbHPCGs+dbzfGcugNGOxndKURUv4wTmL3bvPyckDzTYeYyLRzBPumifkOjuUOnvXrGHxmN/NjcFAv1FG/QsG/xoU36D4BsU/HIqXHn2dUPwaAfQqAr8XfA58BGLRjFhb+GTAnVTnBXyR+C0Mx3QiooVaOqzLfhemq1lUlumECMbo5T000jpnSqVuCBAJfxLgIelxLQ2T6fxbOK5cQWnlFdF+DUjzPBieyGhZmUmiIU4JnOnNEGsU+/Fp/b4FcDkuFypYSQisWYPjQeSa7MxQ1KQSDpFOavdFcFhRTaWYCj9fLuhSVYsKfzjmrOgMT7xCnhBdZ15UnGuvCInnK8IZPjHnr0EDvi/jNFufmdW4SAI2MIaVobsL4Ua8VVz8El5zV0+vSgIbLw8Sgd/Ncx8rtlEGWt9r0l8QI+DCOi2PKZvMV2rhIdEIem/5fKpMWxwIMhO/naHBIGBzsXgYEezCOGawUEhBFRV+6QZnz9Y2oLy/VETZ6Vi6kxGcK/Y1zfDm6mLgl8XWrT/CcQZFz5ZZ0E6HxXh1Dd2ET7CfGwxvKN463/OjqEQQf4RiLbLGAQulB3z5r2oMuCfXLaHQ+GEGCdwkSHxA0WXNfgZla6C6/+4ayMmMtufLvZDiqa/W5gupWlSxgEil3PWmKzdRNY5lCq7hkvEt2+pZs2RdRz7ars7yMrTp4/EBiKG+zy0345Xs0o5XkpUurxRkYr2x17w3FWWbVgUoW+wvy7VIfwsv80Zr2/HWDIN5DCAHQOyxcxpgtkz4bLACMrMbZLYRyExr4e8LzJCCzC7gVDRhOGEz7vsFZele6ZIw/QS0tsPbfi97ABZCbK0VuBsgBno3lr+y9Uty+GAzMfFrJksMBh+LLhvMfBgnPxkROzMRMQo9dIm8BPi/CyLeGzgrQMjyisEqANK6Vyy8N2g3ULgeCpexjPnk4Mg+sKyqiDfhynUJV9qdFVBx/+FQMXeFr4KITmte4rT/QILrYPMvD6Z5ggFig++jaCKdPy3SKcBeAANMpsOERzdHU1q35m08emRzhzdvbbByvleAmdMh00bJwvvg2wOCY2ZElkTA64F0T7mBMo4Fy41PMVfpx4G5v2Hg1+7PxLnCdPxMfHi/SJd1pYG6zcr75gR47e4qEV6rCfE2i++bufgeQirPkZGodL1X3ndES5cFoosDTp0tOKGbwhfZ1M1Emu8gvcLkwnjz/P1yELNi0hqEuSgsc7rWoyHM+99a4HSbrQUNyFwcZA4eN17q2KtgzE6DMedhTLvBmD+AMfXvfe4DYlIcXWx3Wwaq2+g5A0wZX+ygchTQw8Ipww2qXw8VMVCtH0MX+H7amdUx6Rps8TQOeUdW2d7ZQLCVIVjbGfxCuzvbzl4DwRoItjAE6z9ynM9xVsFg7QaDNXG+zcFgcRIYKfTyRmK7YRVzPTN2jGq+ISsCERSO6154PHwWYmpkLXvw5eiN4VI9h1bfkrkeWPVV2ivjMjbeYbqTPzMkEjdbMB8Ktnas3i8EWztWv4GtDWxdV9haiRyu8v1RtwGtDWj91UDraGpse6PHDRV6o3nnDG3k9y1y198bCgP5dcvLg0f63PvXRlbnyowmhGsLmRowVP0sBV4iV54+9GSwZx1Z6k5NNscKxNH83hBxFRqKzPlxFvJSAxcyW2b3mTHv71q79os9u2t9NVvSxhWX7vAz0OorIdD1AQpkG9pHvUG73Xwds8ZbCp29FUDboAFtDWhbCrTVm3Bhx+TKZWo1NCub2U9njo19jA2KDARuKwvHIqLF7KgIus3biSeBIfA88uyR429r9B1NmZmpU9oQZsrWbnDMUhyc9KrUjeYbmp+Bptc7Avk7wen8UkamqlzdeK/bVnqxYMwgcgAKN+n0ZDKd+umhsORC5mRethh4U9xRmLJNPuykt26ZeX0Md0dc1MPxIndAgvS8zaqUFnBJvRtwoeshGZxFIcpOLRcDO5TinV+P1jKYydUufkw/SVRnxChkBtnjZ4LW8L9eXkym5xTVZM+Oek1NvHbZXXFSsXLxlPk3PxZ9iQ4UA21rqWN4XXICZnyBok/EP5mGbp2Vq8YGmBjIY1H/yhpl6tRK63lafebU3MnuKQR+eqwsSL2oq3ydJpWHQKeClZ8ujq927Mx4ZgeIM4XTXosQM5+keDllWH4JnmrhzK4yy7It5aGtPthB8XtX+d1WH9qWSlFMvKP8bqcXiJ5lfeCOHVfOsJ5di1pwTy1YrcXpqA+KN+x7anuztmjs+44FoDRHBF/FqQQXnlZeX5pag9i4RMDIoSOTJ0iM9q7c8m4m8rLZ92A62e99DL/L1OJe2b2t2/8DY8J/imB5AAA=
{{- end }}

View File

@ -90,16 +90,14 @@ Kubernetes: `>= 1.26.0`
| fluent-bit.serviceMonitor.selector.release | string | `"metrics"` | | | fluent-bit.serviceMonitor.selector.release | string | `"metrics"` | |
| fluent-bit.testFramework.enabled | bool | `false` | | | fluent-bit.testFramework.enabled | bool | `false` | |
| fluent-bit.tolerations[0].effect | string | `"NoSchedule"` | | | fluent-bit.tolerations[0].effect | string | `"NoSchedule"` | |
| fluent-bit.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | fluent-bit.tolerations[0].key | string | `"kubezero-workergroup"` | |
| fluent-bit.tolerations[0].operator | string | `"Exists"` | |
| fluent-bit.tolerations[1].effect | string | `"NoSchedule"` | | | fluent-bit.tolerations[1].effect | string | `"NoSchedule"` | |
| fluent-bit.tolerations[1].key | string | `"kubezero-workergroup"` | | | fluent-bit.tolerations[1].key | string | `"nvidia.com/gpu"` | |
| fluent-bit.tolerations[1].operator | string | `"Exists"` | | | fluent-bit.tolerations[1].operator | string | `"Exists"` | |
| fluent-bit.tolerations[2].effect | string | `"NoSchedule"` | | | fluent-bit.tolerations[2].effect | string | `"NoSchedule"` | |
| fluent-bit.tolerations[2].key | string | `"nvidia.com/gpu"` | | | fluent-bit.tolerations[2].key | string | `"aws.amazon.com/neuron"` | |
| fluent-bit.tolerations[2].operator | string | `"Exists"` | | | fluent-bit.tolerations[2].operator | string | `"Exists"` | |
| fluent-bit.tolerations[3].effect | string | `"NoSchedule"` | |
| fluent-bit.tolerations[3].key | string | `"aws.amazon.com/neuron"` | |
| fluent-bit.tolerations[3].operator | string | `"Exists"` | |
| fluentd.configMapConfigs[0] | string | `"fluentd-prometheus-conf"` | | | fluentd.configMapConfigs[0] | string | `"fluentd-prometheus-conf"` | |
| fluentd.dashboards.enabled | bool | `false` | | | fluentd.dashboards.enabled | bool | `false` | |
| fluentd.enabled | bool | `false` | | | fluentd.enabled | bool | `false` | |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-mq name: kubezero-mq
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
type: application type: application
version: 0.3.10 version: 0.3.8
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -18,15 +18,15 @@ dependencies:
version: ">= 0.1.6" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: nats - name: nats
version: 1.2.2 version: 0.8.4
repository: https://nats-io.github.io/k8s/helm/charts/ #repository: https://nats-io.github.io/k8s/helm/charts/
condition: nats.enabled condition: nats.enabled
- name: rabbitmq - name: rabbitmq
version: 14.6.6 version: 12.5.7
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: rabbitmq.enabled condition: rabbitmq.enabled
- name: rabbitmq-cluster-operator - name: rabbitmq-cluster-operator
version: 4.3.19 version: 3.10.7
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: rabbitmq-cluster-operator.enabled condition: rabbitmq-cluster-operator.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-mq # kubezero-mq
![Version: 0.3.10](https://img.shields.io/badge/Version-0.3.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.3.5](https://img.shields.io/badge/Version-0.3.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
@ -14,27 +14,27 @@ KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
## Requirements ## Requirements
Kubernetes: `>= 1.26.0` Kubernetes: `>= 1.20.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| | nats | 0.8.4 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.bitnami.com/bitnami | rabbitmq | 14.6.6 | | https://charts.bitnami.com/bitnami | rabbitmq | 11.3.2 |
| https://charts.bitnami.com/bitnami | rabbitmq-cluster-operator | 4.3.19 | | https://charts.bitnami.com/bitnami | rabbitmq-cluster-operator | 3.1.4 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.2.2 |
## Values ## Values
| Key | Type | Default | Description | | Key | Type | Default | Description |
|-----|------|---------|-------------| |-----|------|---------|-------------|
| nats.config.jetstream.enabled | bool | `true` | |
| nats.enabled | bool | `false` | | | nats.enabled | bool | `false` | |
| nats.exporter.serviceMonitor.enabled | bool | `false` | |
| nats.istio.enabled | bool | `false` | | | nats.istio.enabled | bool | `false` | |
| nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | | | nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| nats.mqtt.enabled | bool | `false` | | | nats.mqtt.enabled | bool | `false` | |
| nats.natsBox.enabled | bool | `false` | | | nats.nats.advertise | bool | `false` | |
| nats.promExporter.enabled | bool | `false` | | | nats.nats.jetstream.enabled | bool | `true` | |
| nats.promExporter.podMonitor.enabled | bool | `false` | | | nats.natsbox.enabled | bool | `false` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.enabled | bool | `false` | | | rabbitmq-cluster-operator.clusterOperator.metrics.enabled | bool | `false` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.serviceMonitor.enabled | bool | `true` | | | rabbitmq-cluster-operator.clusterOperator.metrics.serviceMonitor.enabled | bool | `true` | |
| rabbitmq-cluster-operator.enabled | bool | `false` | | | rabbitmq-cluster-operator.enabled | bool | `false` | |
@ -64,7 +64,7 @@ Kubernetes: `>= 1.26.0`
| rabbitmq.podAntiAffinityPreset | string | `""` | | | rabbitmq.podAntiAffinityPreset | string | `""` | |
| rabbitmq.replicaCount | int | `1` | | | rabbitmq.replicaCount | int | `1` | |
| rabbitmq.resources.requests.cpu | string | `"100m"` | | | rabbitmq.resources.requests.cpu | string | `"100m"` | |
| rabbitmq.resources.requests.memory | string | `"512Mi"` | | | rabbitmq.resources.requests.memory | string | `"256Mi"` | |
| rabbitmq.topologySpreadConstraints | string | `"- maxSkew: 1\n topologyKey: topology.kubernetes.io/zone\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels: {{- include \"common.labels.matchLabels\" . | nindent 6 }}\n- maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels: {{- include \"common.labels.matchLabels\" . | nindent 6 }}"` | | | rabbitmq.topologySpreadConstraints | string | `"- maxSkew: 1\n topologyKey: topology.kubernetes.io/zone\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels: {{- include \"common.labels.matchLabels\" . | nindent 6 }}\n- maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels: {{- include \"common.labels.matchLabels\" . | nindent 6 }}"` | |
| rabbitmq.ulimitNofiles | string | `""` | | | rabbitmq.ulimitNofiles | string | `""` | |

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,19 @@
apiVersion: v2
appVersion: 2.3.2
description: A Helm chart for the NATS.io High Speed Cloud Native Distributed Communications
Technology.
home: http://github.com/nats-io/k8s
icon: https://nats.io/img/nats-icon-color.png
keywords:
- nats
- messaging
- cncf
maintainers:
- email: wally@nats.io
name: Waldemar Quevedo
- email: colin@nats.io
name: Colin Sullivan
- email: jaime@nats.io
name: Jaime Piña
name: nats
version: 0.8.4

View File

@ -0,0 +1,596 @@
# NATS Server
[NATS](https://nats.io) is a simple, secure and performant communications system for digital systems, services and devices. NATS is part of the Cloud Native Computing Foundation ([CNCF](https://cncf.io)). NATS has over [30 client language implementations](https://nats.io/download/), and its server can run on-premise, in the cloud, at the edge, and even on a Raspberry Pi. NATS can secure and simplify design and operation of modern distributed systems.
## TL;DR;
```console
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
helm install my-nats nats/nats
```
## Configuration
### Server Image
```yaml
nats:
image: nats:2.1.7-alpine3.11
pullPolicy: IfNotPresent
```
### Limits
```yaml
nats:
# The number of connect attempts against discovered routes.
connectRetries: 30
# How many seconds should pass before sending a PING
# to a client that has no activity.
pingInterval:
# Server settings.
limits:
maxConnections:
maxSubscriptions:
maxControlLine:
maxPayload:
writeDeadline:
maxPending:
maxPings:
lameDuckDuration:
# Number of seconds to wait for client connections to end after the pod termination is requested
terminationGracePeriodSeconds: 60
```
### Logging
*Note*: It is not recommended to enable trace or debug in production since enabling it will significantly degrade performance.
```yaml
nats:
logging:
debug:
trace:
logtime:
connectErrorReports:
reconnectErrorReports:
```
### TLS setup for client connections
You can find more on how to setup and trouble shoot TLS connnections at:
https://docs.nats.io/nats-server/configuration/securing_nats/tls
```yaml
nats:
tls:
secret:
name: nats-client-tls
ca: "ca.crt"
cert: "tls.crt"
key: "tls.key"
```
## Clustering
If clustering is enabled, then a 3-node cluster will be setup. More info at:
https://docs.nats.io/nats-server/configuration/clustering#nats-server-clustering
```yaml
cluster:
enabled: true
replicas: 3
tls:
secret:
name: nats-server-tls
ca: "ca.crt"
cert: "tls.crt"
key: "tls.key"
```
Example:
```sh
$ helm install nats nats/nats --set cluster.enabled=true
```
## Leafnodes
Leafnode connections to extend a cluster. More info at:
https://docs.nats.io/nats-server/configuration/leafnodes
```yaml
leafnodes:
enabled: true
remotes:
- url: "tls://connect.ngs.global:7422"
# credentials:
# secret:
# name: leafnode-creds
# key: TA.creds
# tls:
# secret:
# name: nats-leafnode-tls
# ca: "ca.crt"
# cert: "tls.crt"
# key: "tls.key"
#######################
# #
# TLS Configuration #
# #
#######################
#
# # You can find more on how to setup and trouble shoot TLS connnections at:
#
# # https://docs.nats.io/nats-server/configuration/securing_nats/tls
#
tls:
secret:
name: nats-client-tls
ca: "ca.crt"
cert: "tls.crt"
key: "tls.key"
```
## Setting up External Access
### Using HostPorts
In case of both external access and advertisements being enabled, an
initializer container will be used to gather the public ips. This
container will required to have enough RBAC policy to be able to make a
look up of the public ip of the node where it is running.
For example, to setup external access for a cluster and advertise the public ip to clients:
```yaml
nats:
# Toggle whether to enable external access.
# This binds a host port for clients, gateways and leafnodes.
externalAccess: true
# Toggle to disable client advertisements (connect_urls),
# in case of running behind a load balancer (which is not recommended)
# it might be required to disable advertisements.
advertise: true
# In case both external access and advertise are enabled
# then a service account would be required to be able to
# gather the public ip from a node.
serviceAccount: "nats-server"
```
Where the service account named `nats-server` has the following RBAC policy for example:
```yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nats-server
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nats-server
rules:
- apiGroups: [""]
resources:
- nodes
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nats-server-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nats-server
subjects:
- kind: ServiceAccount
name: nats-server
namespace: default
```
The container image of the initializer can be customized via:
```yaml
bootconfig:
image: natsio/nats-boot-config:latest
pullPolicy: IfNotPresent
```
### Using LoadBalancers
In case of using a load balancer for external access, it is recommended to disable no advertise
so that internal ips from the NATS Servers are not advertised to the clients connecting through
the load balancer.
```yaml
nats:
image: nats:alpine
cluster:
enabled: true
noAdvertise: true
leafnodes:
enabled: true
noAdvertise: true
natsbox:
enabled: true
```
Then could use an L4 enabled load balancer to connect to NATS, for example:
```yaml
apiVersion: v1
kind: Service
metadata:
name: nats-lb
spec:
type: LoadBalancer
selector:
app.kubernetes.io/name: nats
ports:
- protocol: TCP
port: 4222
targetPort: 4222
name: nats
- protocol: TCP
port: 7422
targetPort: 7422
name: leafnodes
- protocol: TCP
port: 7522
targetPort: 7522
name: gateways
```
## Gateways
A super cluster can be formed by pointing to remote gateways.
You can find more about gateways in the NATS documentation:
https://docs.nats.io/nats-server/configuration/gateways
```yaml
gateway:
enabled: false
name: 'default'
#############################
# #
# List of remote gateways #
# #
#############################
# gateways:
# - name: other
# url: nats://my-gateway-url:7522
#######################
# #
# TLS Configuration #
# #
#######################
#
# # You can find more on how to setup and trouble shoot TLS connnections at:
#
# # https://docs.nats.io/nats-server/configuration/securing_nats/tls
#
# tls:
# secret:
# name: nats-client-tls
# ca: "ca.crt"
# cert: "tls.crt"
# key: "tls.key"
```
## Auth setup
### Auth with a Memory Resolver
```yaml
auth:
enabled: true
# Reference to the Operator JWT.
operatorjwt:
configMap:
name: operator-jwt
key: KO.jwt
# Public key of the System Account
systemAccount:
resolver:
############################
# #
# Memory resolver settings #
# #
##############################
type: memory
#
# Use a configmap reference which will be mounted
# into the container.
#
configMap:
name: nats-accounts
key: resolver.conf
```
### Auth using an Account Server Resolver
```yaml
auth:
enabled: true
# Reference to the Operator JWT.
operatorjwt:
configMap:
name: operator-jwt
key: KO.jwt
# Public key of the System Account
systemAccount:
resolver:
##########################
# #
# URL resolver settings #
# #
##########################
type: URL
url: "http://nats-account-server:9090/jwt/v1/accounts/"
```
## JetStream
### Setting up Memory and File Storage
```yaml
nats:
image: nats:alpine
jetstream:
enabled: true
memStorage:
enabled: true
size: 2Gi
fileStorage:
enabled: true
size: 1Gi
storageDirectory: /data/
storageClassName: default
```
### Using with an existing PersistentVolumeClaim
For example, given the following `PersistentVolumeClaim`:
```yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nats-js-disk
annotations:
volume.beta.kubernetes.io/storage-class: "default"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
```
You can start JetStream so that one pod is bounded to it:
```yaml
nats:
image: nats:alpine
jetstream:
enabled: true
fileStorage:
enabled: true
storageDirectory: /data/
existingClaim: nats-js-disk
claimStorageSize: 3Gi
```
### Clustering example
```yaml
nats:
image: nats:alpine
jetstream:
enabled: true
memStorage:
enabled: true
size: "2Gi"
fileStorage:
enabled: true
size: "1Gi"
storageDirectory: /data/
storageClassName: default
cluster:
enabled: true
# Cluster name is required, by default will be release name.
# name: "nats"
replicas: 3
```
## Misc
### NATS Box
A lightweight container with NATS and NATS Streaming utilities that is deployed along the cluster to confirm the setup.
You can find the image at: https://github.com/nats-io/nats-box
```yaml
natsbox:
enabled: true
image: nats:alpine
pullPolicy: IfNotPresent
# credentials:
# secret:
# name: nats-sys-creds
# key: sys.creds
```
### Configuration Reload sidecar
The NATS config reloader image to use:
```yaml
reloader:
enabled: true
image: natsio/nats-server-config-reloader:latest
pullPolicy: IfNotPresent
```
### Prometheus Exporter sidecar
You can toggle whether to start the sidecar that can be used to feed metrics to Prometheus:
```yaml
exporter:
enabled: true
image: natsio/prometheus-nats-exporter:latest
pullPolicy: IfNotPresent
```
### Prometheus operator ServiceMonitor support
You can enable prometheus operator ServiceMonitor:
```yaml
exporter:
# You have to enable exporter first
enabled: true
serviceMonitor:
enabled: true
## Specify the namespace where Prometheus Operator is running
# namespace: monitoring
# ...
```
### Pod Customizations
#### Security Context
```yaml
# Toggle whether to use setup a Pod Security Context
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
fsGroup: 1000
runAsUser: 1000
runAsNonRoot: true
```
#### Affinity
<https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity>
`matchExpressions` must be configured according to your setup
```yaml
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node.kubernetes.io/purpose
operator: In
values:
- nats
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- nats
- stan
topologyKey: "kubernetes.io/hostname"
```
#### Service topology
[Service topology](https://kubernetes.io/docs/concepts/services-networking/service-topology/) is disabled by default, but can be enabled by setting `topologyKeys`. For example:
```yaml
topologyKeys:
- "kubernetes.io/hostname"
- "topology.kubernetes.io/zone"
- "topology.kubernetes.io/region"
```
#### CPU/Memory Resource Requests/Limits
Sets the pods cpu/memory requests/limits
```yaml
nats:
resources:
requests:
cpu: 2
memory: 4Gi
limits:
cpu: 4
memory: 6Gi
```
No resources are set by default.
#### Annotations
<https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations>
```yaml
podAnnotations:
key1 : "value1",
key2 : "value2"
```
### Name Overides
Can change the name of the resources as needed with:
```yaml
nameOverride: "my-nats"
```
### Image Pull Secrets
```yaml
imagePullSecrets:
- name: myRegistry
```
Adds this to the StatefulSet:
```yaml
spec:
imagePullSecrets:
- name: myRegistry
```

View File

@ -0,0 +1,21 @@
// Operator "KO"
operator: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiI0U09OUjZLT05FMzNFRFhRWE5IR1JUSEg2TEhPM0dFU0xXWlJYNlNENTQ2MjQyTE80QlVRIiwiaWF0IjoxNTgzNzg1MTMyLCJpc3MiOiJPQ0RHNk9ZUFdYRlNLTEdTSFBBUkdTUllDS0xKSVFJMklORUtVVkFGMzJNVzU2VlRMTDRGV0o0SSIsIm5hbWUiOiJLTyIsInN1YiI6Ik9DREc2T1lQV1hGU0tMR1NIUEFSR1NSWUNLTEpJUUkySU5FS1VWQUYzMk1XNTZWVExMNEZXSjRJIiwidHlwZSI6Im9wZXJhdG9yIiwibmF0cyI6e319.0039eTgLj-uyYFoWB3rivGP0WyIZkb_vrrE6tnqcNgIDM59o92nw_Rvb-hrvsK30QWqwm_W8BpVZHDMEY-CiBQ
system_account: ACLZ6OSWC7BXFT4VNVBDMWUFNBIVGHTUONOXI6TCBP3QHOD34JIDSRYW
resolver: MEMORY
resolver_preload: {
// Account "A"
AA3NXTHTXOHCTPIBKEDHNAYAHJ4CO7ERCOJFYCXOXVEOPZTMW55WX32Z: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJSM0QyWUM1UVlJWk4zS0hYR1FFRTZNQTRCRVU3WkFWQk5LSElJNTNOM0tLRVRTTVZEVVRRIiwiaWF0IjoxNTgzNzg1MTMyLCJpc3MiOiJPQ0RHNk9ZUFdYRlNLTEdTSFBBUkdTUllDS0xKSVFJMklORUtVVkFGMzJNVzU2VlRMTDRGV0o0SSIsIm5hbWUiOiJBIiwic3ViIjoiQUEzTlhUSFRYT0hDVFBJQktFREhOQVlBSEo0Q083RVJDT0pGWUNYT1hWRU9QWlRNVzU1V1gzMloiLCJ0eXBlIjoiYWNjb3VudCIsIm5hdHMiOnsiZXhwb3J0cyI6W3sibmFtZSI6InRlc3QiLCJzdWJqZWN0IjoidGVzdCIsInR5cGUiOiJzZXJ2aWNlIiwicmVzcG9uc2VfdHlwZSI6IlNpbmdsZXRvbiIsInNlcnZpY2VfbGF0ZW5jeSI6eyJzYW1wbGluZyI6MTAwLCJyZXN1bHRzIjoibGF0ZW5jeS5vbi50ZXN0In19XSwibGltaXRzIjp7InN1YnMiOi0xLCJjb25uIjotMSwibGVhZiI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwiZGF0YSI6LTEsInBheWxvYWQiOi0xLCJ3aWxkY2FyZHMiOnRydWV9fX0.W7oEjpQA986Hai3t8UOiJwCcVDYm2sj7L545oYZhQtYbydh_ragPn8pc0f1pA1krMz_ZDuBwKHLZRgXuNSysDQ
// Account "STAN"
AAYNFTMTKWXZEPPSEZLECMHE3VBULMIUO2QGVY3P4VCI7NNQC3TVX2PB: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJRSUozV0I0MjdSVU5RSlZFM1dRVEs3TlNaVlpaNkRQT01KWkdHMlhTMzQ2WFNQTVZERElBIiwiaWF0IjoxNTgzNzg1MTMyLCJpc3MiOiJPQ0RHNk9ZUFdYRlNLTEdTSFBBUkdTUllDS0xKSVFJMklORUtVVkFGMzJNVzU2VlRMTDRGV0o0SSIsIm5hbWUiOiJTVEFOIiwic3ViIjoiQUFZTkZUTVRLV1haRVBQU0VaTEVDTUhFM1ZCVUxNSVVPMlFHVlkzUDRWQ0k3Tk5RQzNUVlgyUEIiLCJ0eXBlIjoiYWNjb3VudCIsIm5hdHMiOnsibGltaXRzIjp7InN1YnMiOi0xLCJjb25uIjotMSwibGVhZiI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwiZGF0YSI6LTEsInBheWxvYWQiOi0xLCJ3aWxkY2FyZHMiOnRydWV9fX0.SPyQdAFmoON577s-eZP4K3-9QXYhTn9Xqy3aDGeHvHYRE9IVD47Eu7d38ZiySPlxgkdM_WXZn241_59d07axBA
// Account "SYS"
ACLZ6OSWC7BXFT4VNVBDMWUFNBIVGHTUONOXI6TCBP3QHOD34JIDSRYW: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJGSk1TSEROVlVGUEM0U0pSRlcyV0NZT1hRWUFDM1hNNUJaWTRKQUZWUTc1V0lEUkdDN0lBIiwiaWF0IjoxNTgzNzg1MTMyLCJpc3MiOiJPQ0RHNk9ZUFdYRlNLTEdTSFBBUkdTUllDS0xKSVFJMklORUtVVkFGMzJNVzU2VlRMTDRGV0o0SSIsIm5hbWUiOiJTWVMiLCJzdWIiOiJBQ0xaNk9TV0M3QlhGVDRWTlZCRE1XVUZOQklWR0hUVU9OT1hJNlRDQlAzUUhPRDM0SklEU1JZVyIsInR5cGUiOiJhY2NvdW50IiwibmF0cyI6eyJsaW1pdHMiOnsic3VicyI6LTEsImNvbm4iOi0xLCJsZWFmIjotMSwiaW1wb3J0cyI6LTEsImV4cG9ydHMiOi0xLCJkYXRhIjotMSwicGF5bG9hZCI6LTEsIndpbGRjYXJkcyI6dHJ1ZX19fQ.owW08dIa97STqgT0ux-5sD00Ad0I3HstJKTmh1CGVpsQwelaZdrBuia-4XgCgN88zuLokPMfWI_pkxXU_iB0BA
// Account "B"
ADOR7Q5KMWC2XIWRRRC4MZUDCPYG3UMAIWDRX6M2MFDY5SR6HQAAMHJA: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJRQjdIRFg3VUZYN01KUjZPS1E2S1dRSlVUUEpWWENTNkJCWjQ3SDVVTFdVVFNRUU1NQzJRIiwiaWF0IjoxNTgzNzg1MTMyLCJpc3MiOiJPQ0RHNk9ZUFdYRlNLTEdTSFBBUkdTUllDS0xKSVFJMklORUtVVkFGMzJNVzU2VlRMTDRGV0o0SSIsIm5hbWUiOiJCIiwic3ViIjoiQURPUjdRNUtNV0MyWElXUlJSQzRNWlVEQ1BZRzNVTUFJV0RSWDZNMk1GRFk1U1I2SFFBQU1ISkEiLCJ0eXBlIjoiYWNjb3VudCIsIm5hdHMiOnsiaW1wb3J0cyI6W3sibmFtZSI6InRlc3QiLCJzdWJqZWN0IjoidGVzdCIsImFjY291bnQiOiJBQTNOWFRIVFhPSENUUElCS0VESE5BWUFISjRDTzdFUkNPSkZZQ1hPWFZFT1BaVE1XNTVXWDMyWiIsInRvIjoidGVzdCIsInR5cGUiOiJzZXJ2aWNlIn1dLCJsaW1pdHMiOnsic3VicyI6LTEsImNvbm4iOi0xLCJsZWFmIjotMSwiaW1wb3J0cyI6LTEsImV4cG9ydHMiOi0xLCJkYXRhIjotMSwicGF5bG9hZCI6LTEsIndpbGRjYXJkcyI6dHJ1ZX19fQ.r5p_sGt_hmDfWWIJGrLodAM8VfXPeUzsbRtzrMTBGGkcLdi4jqAHXRu09CmFISEzX2VKeGuOonGuAMOFotvICg
}

View File

@ -0,0 +1,24 @@
# Setup memory preload config.
auth:
enabled: true
resolver:
type: memory
preload: |
// Operator "KO"
operator: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiI0U09OUjZLT05FMzNFRFhRWE5IR1JUSEg2TEhPM0dFU0xXWlJYNlNENTQ2MjQyTE80QlVRIiwiaWF0IjoxNTgzNzg1MTMyLCJpc3MiOiJPQ0RHNk9ZUFdYRlNLTEdTSFBBUkdTUllDS0xKSVFJMklORUtVVkFGMzJNVzU2VlRMTDRGV0o0SSIsIm5hbWUiOiJLTyIsInN1YiI6Ik9DREc2T1lQV1hGU0tMR1NIUEFSR1NSWUNLTEpJUUkySU5FS1VWQUYzMk1XNTZWVExMNEZXSjRJIiwidHlwZSI6Im9wZXJhdG9yIiwibmF0cyI6e319.0039eTgLj-uyYFoWB3rivGP0WyIZkb_vrrE6tnqcNgIDM59o92nw_Rvb-hrvsK30QWqwm_W8BpVZHDMEY-CiBQ
system_account: ACLZ6OSWC7BXFT4VNVBDMWUFNBIVGHTUONOXI6TCBP3QHOD34JIDSRYW
resolver_preload: {
// Account "A"
AA3NXTHTXOHCTPIBKEDHNAYAHJ4CO7ERCOJFYCXOXVEOPZTMW55WX32Z: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJSM0QyWUM1UVlJWk4zS0hYR1FFRTZNQTRCRVU3WkFWQk5LSElJNTNOM0tLRVRTTVZEVVRRIiwiaWF0IjoxNTgzNzg1MTMyLCJpc3MiOiJPQ0RHNk9ZUFdYRlNLTEdTSFBBUkdTUllDS0xKSVFJMklORUtVVkFGMzJNVzU2VlRMTDRGV0o0SSIsIm5hbWUiOiJBIiwic3ViIjoiQUEzTlhUSFRYT0hDVFBJQktFREhOQVlBSEo0Q083RVJDT0pGWUNYT1hWRU9QWlRNVzU1V1gzMloiLCJ0eXBlIjoiYWNjb3VudCIsIm5hdHMiOnsiZXhwb3J0cyI6W3sibmFtZSI6InRlc3QiLCJzdWJqZWN0IjoidGVzdCIsInR5cGUiOiJzZXJ2aWNlIiwicmVzcG9uc2VfdHlwZSI6IlNpbmdsZXRvbiIsInNlcnZpY2VfbGF0ZW5jeSI6eyJzYW1wbGluZyI6MTAwLCJyZXN1bHRzIjoibGF0ZW5jeS5vbi50ZXN0In19XSwibGltaXRzIjp7InN1YnMiOi0xLCJjb25uIjotMSwibGVhZiI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwiZGF0YSI6LTEsInBheWxvYWQiOi0xLCJ3aWxkY2FyZHMiOnRydWV9fX0.W7oEjpQA986Hai3t8UOiJwCcVDYm2sj7L545oYZhQtYbydh_ragPn8pc0f1pA1krMz_ZDuBwKHLZRgXuNSysDQ
// Account "STAN"
AAYNFTMTKWXZEPPSEZLECMHE3VBULMIUO2QGVY3P4VCI7NNQC3TVX2PB: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJRSUozV0I0MjdSVU5RSlZFM1dRVEs3TlNaVlpaNkRQT01KWkdHMlhTMzQ2WFNQTVZERElBIiwiaWF0IjoxNTgzNzg1MTMyLCJpc3MiOiJPQ0RHNk9ZUFdYRlNLTEdTSFBBUkdTUllDS0xKSVFJMklORUtVVkFGMzJNVzU2VlRMTDRGV0o0SSIsIm5hbWUiOiJTVEFOIiwic3ViIjoiQUFZTkZUTVRLV1haRVBQU0VaTEVDTUhFM1ZCVUxNSVVPMlFHVlkzUDRWQ0k3Tk5RQzNUVlgyUEIiLCJ0eXBlIjoiYWNjb3VudCIsIm5hdHMiOnsibGltaXRzIjp7InN1YnMiOi0xLCJjb25uIjotMSwibGVhZiI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwiZGF0YSI6LTEsInBheWxvYWQiOi0xLCJ3aWxkY2FyZHMiOnRydWV9fX0.SPyQdAFmoON577s-eZP4K3-9QXYhTn9Xqy3aDGeHvHYRE9IVD47Eu7d38ZiySPlxgkdM_WXZn241_59d07axBA
// Account "SYS"
ACLZ6OSWC7BXFT4VNVBDMWUFNBIVGHTUONOXI6TCBP3QHOD34JIDSRYW: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJGSk1TSEROVlVGUEM0U0pSRlcyV0NZT1hRWUFDM1hNNUJaWTRKQUZWUTc1V0lEUkdDN0lBIiwiaWF0IjoxNTgzNzg1MTMyLCJpc3MiOiJPQ0RHNk9ZUFdYRlNLTEdTSFBBUkdTUllDS0xKSVFJMklORUtVVkFGMzJNVzU2VlRMTDRGV0o0SSIsIm5hbWUiOiJTWVMiLCJzdWIiOiJBQ0xaNk9TV0M3QlhGVDRWTlZCRE1XVUZOQklWR0hUVU9OT1hJNlRDQlAzUUhPRDM0SklEU1JZVyIsInR5cGUiOiJhY2NvdW50IiwibmF0cyI6eyJsaW1pdHMiOnsic3VicyI6LTEsImNvbm4iOi0xLCJsZWFmIjotMSwiaW1wb3J0cyI6LTEsImV4cG9ydHMiOi0xLCJkYXRhIjotMSwicGF5bG9hZCI6LTEsIndpbGRjYXJkcyI6dHJ1ZX19fQ.owW08dIa97STqgT0ux-5sD00Ad0I3HstJKTmh1CGVpsQwelaZdrBuia-4XgCgN88zuLokPMfWI_pkxXU_iB0BA
// Account "B"
ADOR7Q5KMWC2XIWRRRC4MZUDCPYG3UMAIWDRX6M2MFDY5SR6HQAAMHJA: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJRQjdIRFg3VUZYN01KUjZPS1E2S1dRSlVUUEpWWENTNkJCWjQ3SDVVTFdVVFNRUU1NQzJRIiwiaWF0IjoxNTgzNzg1MTMyLCJpc3MiOiJPQ0RHNk9ZUFdYRlNLTEdTSFBBUkdTUllDS0xKSVFJMklORUtVVkFGMzJNVzU2VlRMTDRGV0o0SSIsIm5hbWUiOiJCIiwic3ViIjoiQURPUjdRNUtNV0MyWElXUlJSQzRNWlVEQ1BZRzNVTUFJV0RSWDZNMk1GRFk1U1I2SFFBQU1ISkEiLCJ0eXBlIjoiYWNjb3VudCIsIm5hdHMiOnsiaW1wb3J0cyI6W3sibmFtZSI6InRlc3QiLCJzdWJqZWN0IjoidGVzdCIsImFjY291bnQiOiJBQTNOWFRIVFhPSENUUElCS0VESE5BWUFISjRDTzdFUkNPSkZZQ1hPWFZFT1BaVE1XNTVXWDMyWiIsInRvIjoidGVzdCIsInR5cGUiOiJzZXJ2aWNlIn1dLCJsaW1pdHMiOnsic3VicyI6LTEsImNvbm4iOi0xLCJsZWFmIjotMSwiaW1wb3J0cyI6LTEsImV4cG9ydHMiOi0xLCJkYXRhIjotMSwicGF5bG9hZCI6LTEsIndpbGRjYXJkcyI6dHJ1ZX19fQ.r5p_sGt_hmDfWWIJGrLodAM8VfXPeUzsbRtzrMTBGGkcLdi4jqAHXRu09CmFISEzX2VKeGuOonGuAMOFotvICg
}

View File

@ -0,0 +1,9 @@
# Setup memory preload config.
auth:
enabled: true
resolver:
type: memory
configMap:
name: nats-accounts
key: resolver.conf

View File

View File

@ -0,0 +1,9 @@
let accounts = ./accounts.conf as Text
in
''
port: 4222
${accounts}
''

View File

@ -0,0 +1,21 @@
// Operator "KO"
operator: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJKS0E2U0pKUUVOTFpYVDJEWTRWNE00UDZXUFRVUlhIQzNMU1pJWEZWRlFGV0I3U0tKVk9BIiwiaWF0IjoxNTgzODIyNjYwLCJpc3MiOiJPQkZCSEMzVTVXVVRFTVpKTzNYN0hZWTJCNjNQWUpQT0RYS0FWWUdHU0VNQTczTEtGTVg0TEYyQSIsIm5hbWUiOiJLTyIsInN1YiI6Ik9CRkJIQzNVNVdVVEVNWkpPM1g3SFlZMkI2M1BZSlBPRFhLQVZZR0dTRU1BNzNMS0ZNWDRMRjJBIiwidHlwZSI6Im9wZXJhdG9yIiwibmF0cyI6e319.60YToJe3Dz9OZES80jYXVgg7uCB1c3BsX6HglA8tsKKRe-Br3pMpn9yUPUujjB61MGqnA7Zmbx8qWnoj8CkuCw
system_account: ABL65FFQWUDHHTGMGRFVVSQDBAWHGEJ2CDRCMGBFV6SB4MLKFSUPN7GP
resolver: MEMORY
resolver_preload: {
// Account "B"
AAIJAGRSL2KCEPTRBP6DJCTAMSNOUXRILLZXIY6CTZ4GR27ISCZOP6QH: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJEVTdWV1BXQUtBSVdNNkhNUElDNE43TVRGTFEyV01JUFhFVU5aNVEzRE1YSTRKVkpLQU1BIiwiaWF0IjoxNTgzODIyNjYwLCJpc3MiOiJPQkZCSEMzVTVXVVRFTVpKTzNYN0hZWTJCNjNQWUpQT0RYS0FWWUdHU0VNQTczTEtGTVg0TEYyQSIsIm5hbWUiOiJCIiwic3ViIjoiQUFJSkFHUlNMMktDRVBUUkJQNkRKQ1RBTVNOT1VYUklMTFpYSVk2Q1RaNEdSMjdJU0NaT1A2UUgiLCJ0eXBlIjoiYWNjb3VudCIsIm5hdHMiOnsiaW1wb3J0cyI6W3sibmFtZSI6InRlc3QiLCJzdWJqZWN0IjoidGVzdCIsImFjY291bnQiOiJBQlhXNU9aV09LSzUzWDNWNUhSVkdPMlJXTlVUU1NQSU1HVDZORU9SMjNBQzRNTk1QTlFTUTZWTCIsInRvIjoidGVzdCIsInR5cGUiOiJzZXJ2aWNlIn1dLCJsaW1pdHMiOnsic3VicyI6LTEsImNvbm4iOi0xLCJsZWFmIjotMSwiaW1wb3J0cyI6LTEsImV4cG9ydHMiOi0xLCJkYXRhIjotMSwicGF5bG9hZCI6LTEsIndpbGRjYXJkcyI6dHJ1ZX19fQ.VLv3U7k8jJaIcGpDYXo0XQCYNVMNQd2PHVUOXGMvCU8ifiYpkaRJ4G0UXZHqlQl_0g3M_LEtJw0K-4HwgOeIAA
// Account "SYS"
ABL65FFQWUDHHTGMGRFVVSQDBAWHGEJ2CDRCMGBFV6SB4MLKFSUPN7GP: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJPSUpENkozTjdCVk0zSEY0M0NCTUhLMllUNlpXTlFCWkZBRzQ0VE5RSFA3SlVZT0hZR0dRIiwiaWF0IjoxNTgzODIyNjYwLCJpc3MiOiJPQkZCSEMzVTVXVVRFTVpKTzNYN0hZWTJCNjNQWUpQT0RYS0FWWUdHU0VNQTczTEtGTVg0TEYyQSIsIm5hbWUiOiJTWVMiLCJzdWIiOiJBQkw2NUZGUVdVREhIVEdNR1JGVlZTUURCQVdIR0VKMkNEUkNNR0JGVjZTQjRNTEtGU1VQTjdHUCIsInR5cGUiOiJhY2NvdW50IiwibmF0cyI6eyJsaW1pdHMiOnsic3VicyI6LTEsImNvbm4iOi0xLCJsZWFmIjotMSwiaW1wb3J0cyI6LTEsImV4cG9ydHMiOi0xLCJkYXRhIjotMSwicGF5bG9hZCI6LTEsIndpbGRjYXJkcyI6dHJ1ZX19fQ.Jei8psto5h35bFn4y1Unsk0Noh6MYJxkB8Hs-nnLuUBrkTppSwukEkM_ufNGA_lxsmPki3zBf8y6rsQ13Ec5AA
// Account "A"
ABXW5OZWOKK53X3V5HRVGO2RWNUTSSPIMGT6NEOR23AC4MNMPNQSQ6VL: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJSRFNRTE9GT0gzRUoyUUNLSkkyUEJXNkxLMllUVFJDUUdGV0tJRFJJRVRVMzZTT0RPT1FRIiwiaWF0IjoxNTgzODIyNjYwLCJpc3MiOiJPQkZCSEMzVTVXVVRFTVpKTzNYN0hZWTJCNjNQWUpQT0RYS0FWWUdHU0VNQTczTEtGTVg0TEYyQSIsIm5hbWUiOiJBIiwic3ViIjoiQUJYVzVPWldPS0s1M1gzVjVIUlZHTzJSV05VVFNTUElNR1Q2TkVPUjIzQUM0TU5NUE5RU1E2VkwiLCJ0eXBlIjoiYWNjb3VudCIsIm5hdHMiOnsiZXhwb3J0cyI6W3sibmFtZSI6InRlc3QiLCJzdWJqZWN0IjoidGVzdCIsInR5cGUiOiJzZXJ2aWNlIiwicmVzcG9uc2VfdHlwZSI6IlNpbmdsZXRvbiIsInNlcnZpY2VfbGF0ZW5jeSI6eyJzYW1wbGluZyI6MTAwLCJyZXN1bHRzIjoibGF0ZW5jeS5vbi50ZXN0In19XSwibGltaXRzIjp7InN1YnMiOi0xLCJjb25uIjotMSwibGVhZiI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwiZGF0YSI6LTEsInBheWxvYWQiOi0xLCJ3aWxkY2FyZHMiOnRydWV9fX0.lJfHHkbXeEf6DbHFju0zktCjWL0kgll17BdYJl6f2hcZxbUtiyf3H1mGfrzELgCuEO7p8X11UpRVy_eTQfnGAA
// Account "STAN"
ACLSVE2AZYTXOBIJXOV5XHAIIM7KLL777F7GAEWW5W5P4IAR2VZJSGID: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJJT1ZPSFBPV1hJRDI2U1JYVEJQTTVUQlVKWDJRU0FSSTJMQjJTM09aRFpMU0paS1BOVU9BIiwiaWF0IjoxNTgzODIyNjYwLCJpc3MiOiJPQkZCSEMzVTVXVVRFTVpKTzNYN0hZWTJCNjNQWUpQT0RYS0FWWUdHU0VNQTczTEtGTVg0TEYyQSIsIm5hbWUiOiJTVEFOIiwic3ViIjoiQUNMU1ZFMkFaWVRYT0JJSlhPVjVYSEFJSU03S0xMNzc3RjdHQUVXVzVXNVA0SUFSMlZaSlNHSUQiLCJ0eXBlIjoiYWNjb3VudCIsIm5hdHMiOnsibGltaXRzIjp7InN1YnMiOi0xLCJjb25uIjotMSwibGVhZiI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwiZGF0YSI6LTEsInBheWxvYWQiOi0xLCJ3aWxkY2FyZHMiOnRydWV9fX0.CE5_K9kAdAgxesJRiJYh3kK2f74_c7T3bNQhgfaXOMzI8X6VOWqn0_5gH9jOD0xzHXIYiUMwy7a4Ou63PizHCw
}

View File

@ -0,0 +1,26 @@
{{- if or .Values.nats.logging.debug .Values.nats.logging.trace }}
*WARNING*: Keep in mind that running the server with
debug and/or trace enabled significantly affects the
performance of the server!
{{- end }}
You can find more information about running NATS on Kubernetes
in the NATS documentation website:
https://docs.nats.io/nats-on-kubernetes/nats-kubernetes
{{- if .Values.natsbox.enabled }}
NATS Box has been deployed into your cluster, you can
now use the NATS tools within the container as follows:
kubectl exec -n {{ .Release.Namespace }} -it deployment/{{ template "nats.fullname" . }}-box -- /bin/sh -l
nats-box:~# nats-sub test &
nats-box:~# nats-pub test hi
nats-box:~# nc {{ template "nats.fullname" . }} 4222
{{- end }}
Thanks for using NATS!

View File

@ -0,0 +1,98 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "nats.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "nats.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "nats.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "nats.labels" -}}
helm.sh/chart: {{ include "nats.chart" . }}
{{- range $name, $value := .Values.commonLabels }}
{{ $name }}: {{ $value }}
{{- end }}
{{ include "nats.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "nats.selectorLabels" -}}
app.kubernetes.io/name: {{ include "nats.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Return the proper NATS image name
*/}}
{{- define "nats.clusterAdvertise" -}}
{{- printf "$(POD_NAME).%s.$(POD_NAMESPACE).svc.%s." (include "nats.fullname" . ) $.Values.k8sClusterDomain }}
{{- end }}
{{/*
Return the NATS cluster routes.
*/}}
{{- define "nats.clusterRoutes" -}}
{{- $name := (include "nats.fullname" . ) -}}
{{- range $i, $e := until (.Values.cluster.replicas | int) -}}
{{- printf "nats://%s-%d.%s.%s.svc.%s.:6222," $name $i $name $.Release.Namespace $.Values.k8sClusterDomain -}}
{{- end -}}
{{- end }}
{{- define "nats.tlsConfig" -}}
tls {
{{- if .cert }}
cert_file: {{ .secretPath }}/{{ .secret.name }}/{{ .cert }}
{{- end }}
{{- if .key }}
key_file: {{ .secretPath }}/{{ .secret.name }}/{{ .key }}
{{- end }}
{{- if .ca }}
ca_file: {{ .secretPath }}/{{ .secret.name }}/{{ .ca }}
{{- end }}
{{- if .insecure }}
insecure: {{ .insecure }}
{{- end }}
{{- if .verify }}
verify: {{ .verify }}
{{- end }}
{{- if .verifyAndMap }}
verify_and_map: {{ .verifyAndMap }}
{{- end }}
{{- if .curvePreferences }}
curve_preferences: {{ .curvePreferences }}
{{- end }}
{{- if .timeout }}
timeout: {{ .timeout }}
{{- end }}
}
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if .Values.auth.enabled }}
{{- if eq .Values.auth.resolver.type "memory" }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "nats.name" . }}-accounts
labels:
app: {{ template "nats.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
data:
accounts.conf: |-
{{- .Files.Get "accounts.conf" | indent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,398 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "nats.fullname" . }}-config
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "nats.labels" . | nindent 4 }}
data:
nats.conf: |
# PID file shared with configuration reloader.
pid_file: "/var/run/nats/nats.pid"
###############
# #
# Monitoring #
# #
###############
http: 8222
server_name: $POD_NAME
{{- if .Values.nats.tls }}
#####################
# #
# TLS Configuration #
# #
#####################
{{- with .Values.nats.tls }}
{{- $nats_tls := merge (dict) . }}
{{- $_ := set $nats_tls "secretPath" "/etc/nats-certs/clients" }}
{{- include "nats.tlsConfig" $nats_tls | nindent 4}}
{{- end }}
{{- end }}
{{- if .Values.nats.jetstream.enabled }}
###################################
# #
# NATS JetStream #
# #
###################################
jetstream {
{{- if .Values.nats.jetstream.memStorage.enabled }}
max_mem: {{ .Values.nats.jetstream.memStorage.size }}
{{- end }}
{{- if .Values.nats.jetstream.fileStorage.enabled }}
store_dir: {{ .Values.nats.jetstream.fileStorage.storageDirectory }}
max_file:
{{- if .Values.nats.jetstream.fileStorage.existingClaim }}
{{- .Values.nats.jetstream.fileStorage.claimStorageSize }}
{{- else }}
{{- .Values.nats.jetstream.fileStorage.size }}
{{- end }}
{{- end }}
}
{{- end }}
{{- if .Values.mqtt.enabled }}
###################################
# #
# NATS MQTT #
# #
###################################
mqtt {
port: 1883
{{- with .Values.mqtt.tls }}
{{- $mqtt_tls := merge (dict) . }}
{{- $_ := set $mqtt_tls "secretPath" "/etc/nats-certs/mqtt" }}
{{- include "nats.tlsConfig" $mqtt_tls | nindent 6}}
{{- end }}
{{- if .Values.mqtt.noAuthUser }}
no_auth_user: {{ .Values.mqtt.noAuthUser | quote }}
{{- end }}
ack_wait: {{ .Values.mqtt.ackWait | quote }}
max_ack_pending: {{ .Values.mqtt.maxAckPending }}
}
{{- end }}
{{- if .Values.cluster.enabled }}
###################################
# #
# NATS Full Mesh Clustering Setup #
# #
###################################
cluster {
port: 6222
{{- if .Values.nats.jetstream.enabled }}
{{- if .Values.cluster.name }}
name: {{ .Values.cluster.name }}
{{- else }}
name: {{ template "nats.name" . }}
{{- end }}
{{- else }}
{{- with .Values.cluster.name }}
name: {{ . }}
{{- end }}
{{- end }}
{{- with .Values.cluster.tls }}
{{- $cluster_tls := merge (dict) . }}
{{- $_ := set $cluster_tls "secretPath" "/etc/nats-certs/cluster" }}
{{- include "nats.tlsConfig" $cluster_tls | nindent 6}}
{{- end }}
{{- if .Values.cluster.authorization }}
authorization {
{{- with .Values.cluster.authorization.user }}
user: {{ . }}
{{- end }}
{{- with .Values.cluster.authorization.password }}
password: {{ . }}
{{- end }}
{{- with .Values.cluster.authorization.timeout }}
timeout: {{ . }}
{{- end }}
}
{{- end }}
routes = [
{{ include "nats.clusterRoutes" . }}
]
cluster_advertise: $CLUSTER_ADVERTISE
{{- with .Values.cluster.noAdvertise }}
no_advertise: {{ . }}
{{- end }}
connect_retries: {{ .Values.nats.connectRetries }}
}
{{ end }}
{{- if and .Values.nats.advertise .Values.nats.externalAccess }}
include "advertise/client_advertise.conf"
{{- end }}
{{- if or .Values.leafnodes.enabled .Values.leafnodes.remotes }}
#################
# #
# NATS Leafnode #
# #
#################
leafnodes {
{{- if .Values.leafnodes.enabled }}
listen: "0.0.0.0:7422"
{{- end }}
{{ if and .Values.nats.advertise .Values.nats.externalAccess }}
include "advertise/gateway_advertise.conf"
{{ end }}
{{- with .Values.leafnodes.noAdvertise }}
no_advertise: {{ . }}
{{- end }}
{{- with .Values.leafnodes.tls }}
{{- $leafnode_tls := merge (dict) . }}
{{- $_ := set $leafnode_tls "secretPath" "/etc/nats-certs/leafnodes" }}
{{- include "nats.tlsConfig" $leafnode_tls | nindent 6}}
{{- end }}
remotes: [
{{- range .Values.leafnodes.remotes }}
{
{{- with .url }}
url: {{ . }}
{{- end }}
{{- with .credentials }}
credentials: "/etc/nats-creds/{{ .secret.name }}/{{ .secret.key }}"
{{- end }}
{{- with .tls }}
{{ $secretName := .secret.name }}
tls: {
{{- with .cert }}
cert_file: /etc/nats-certs/leafnodes/{{ $secretName }}/{{ . }}
{{- end }}
{{- with .key }}
key_file: /etc/nats-certs/leafnodes/{{ $secretName }}/{{ . }}
{{- end }}
{{- with .ca }}
ca_file: /etc/nats-certs/leafnodes/{{ $secretName }}/{{ . }}
{{- end }}
}
{{- end }}
}
{{- end }}
]
}
{{ end }}
{{- if .Values.gateway.enabled }}
#################
# #
# NATS Gateways #
# #
#################
gateway {
name: {{ .Values.gateway.name }}
port: 7522
{{ if and .Values.nats.advertise .Values.nats.externalAccess }}
include "advertise/gateway_advertise.conf"
{{ end }}
{{- with .Values.gateway.tls }}
{{- $gateway_tls := merge (dict) . }}
{{- $_ := set $gateway_tls "secretPath" "/etc/nats-certs/gateway" }}
{{- include "nats.tlsConfig" $gateway_tls | nindent 6}}
{{- end }}
# Gateways array here
gateways: [
{{- range .Values.gateway.gateways }}
{
{{- with .name }}
name: {{ . }}
{{- end }}
{{- with .url }}
url: {{ . | quote }}
{{- end }}
{{- with .urls }}
urls: [{{ join "," . }}]
{{- end }}
},
{{- end }}
]
}
{{ end }}
{{- with .Values.nats.logging.debug }}
debug: {{ . }}
{{- end }}
{{- with .Values.nats.logging.trace }}
trace: {{ . }}
{{- end }}
{{- with .Values.nats.logging.logtime }}
logtime: {{ . }}
{{- end }}
{{- with .Values.nats.logging.connectErrorReports }}
connect_error_reports: {{ . }}
{{- end }}
{{- with .Values.nats.logging.reconnectErrorReports }}
reconnect_error_reports: {{ . }}
{{- end }}
{{- with .Values.nats.limits.maxConnections }}
max_connections: {{ . }}
{{- end }}
{{- with .Values.nats.limits.maxSubscriptions }}
max_subscriptions: {{ . }}
{{- end }}
{{- with .Values.nats.limits.maxPending }}
max_pending: {{ . }}
{{- end }}
{{- with .Values.nats.limits.maxControlLine }}
max_control_line: {{ . }}
{{- end }}
{{- with .Values.nats.limits.maxPayload }}
max_payload: {{ . }}
{{- end }}
{{- with .Values.nats.pingInterval }}
ping_interval: {{ . }}
{{- end }}
{{- with .Values.nats.maxPings }}
ping_max: {{ . }}
{{- end }}
{{- with .Values.nats.writeDeadline }}
write_deadline: {{ . | quote }}
{{- end }}
{{- with .Values.nats.writeDeadline }}
lame_duck_duration: {{ . | quote }}
{{- end }}
{{- if .Values.websocket.enabled }}
##################
# #
# Websocket #
# #
##################
websocket {
port: {{ .Values.websocket.port }}
{{- if .Values.websocket.tls }}
{{ $secretName := .secret.name }}
tls {
{{- with .cert }}
cert_file: /etc/nats-certs/ws/{{ $secretName }}/{{ . }}
{{- end }}
{{- with .key }}
key_file: /etc/nats-certs/ws/{{ $secretName }}/{{ . }}
{{- end }}
{{- with .ca }}
ca_file: /etc/nats-certs/ws/{{ $secretName }}/{{ . }}
{{- end }}
}
{{- else }}
no_tls: {{ .Values.websocket.noTLS }}
{{- end }}
}
{{- end }}
{{- if .Values.auth.enabled }}
##################
# #
# Authorization #
# #
##################
{{- if .Values.auth.resolver }}
{{- if eq .Values.auth.resolver.type "memory" }}
resolver: MEMORY
include "accounts/{{ .Values.auth.resolver.configMap.key }}"
{{- end }}
{{- if eq .Values.auth.resolver.type "full" }}
{{- if .Values.auth.resolver.configMap }}
include "accounts/{{ .Values.auth.resolver.configMap.key }}"
{{- else }}
{{- with .Values.auth.resolver }}
operator: {{ .operator }}
system_account: {{ .systemAccount }}
{{- end }}
resolver: {
type: full
{{- with .Values.auth.resolver }}
dir: {{ .store.dir | quote }}
allow_delete: {{ .allowDelete }}
interval: {{ .interval | quote }}
{{- end }}
}
{{- end }}
{{- end }}
{{- if .Values.auth.resolver.resolverPreload }}
resolver_preload: {{ toRawJson .Values.auth.resolver.resolverPreload }}
{{- end }}
{{- if eq .Values.auth.resolver.type "URL" }}
{{- with .Values.auth.resolver.url }}
resolver: URL({{ . }})
{{- end }}
operator: /etc/nats-config/operator/{{ .Values.auth.operatorjwt.configMap.key }}
{{- end }}
{{- end }}
{{- with .Values.auth.systemAccount }}
system_account: {{ . }}
{{- end }}
{{- with .Values.auth.basic }}
{{- with .noAuthUser }}
no_auth_user: {{ . }}
{{- end }}
{{- with .users }}
authorization {
users: [
{{- range . }}
{{- toRawJson . | nindent 4 }},
{{- end }}
]
}
{{- end }}
{{- if .token }}
authorization {
token: "{{ .token }}"
}
{{- end }}
{{- with .accounts }}
accounts: {{- toRawJson . }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,95 @@
{{- if .Values.natsbox.enabled }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "nats.fullname" . }}-box
namespace: {{ .Release.Namespace | quote }}
labels:
app: {{ include "nats.fullname" . }}-box
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
spec:
replicas: 1
selector:
matchLabels:
app: {{ include "nats.fullname" . }}-box
template:
metadata:
labels:
app: {{ include "nats.fullname" . }}-box
{{- if .Values.natsbox.podAnnotations }}
annotations:
{{- range $key, $value := .Values.natsbox.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
{{- with .Values.natsbox.affinity }}
affinity:
{{- tpl (toYaml .) $ | nindent 8 }}
{{- end }}
volumes:
{{- if .Values.natsbox.credentials }}
- name: nats-sys-creds
secret:
secretName: {{ .Values.natsbox.credentials.secret.name }}
{{- end }}
{{- with .Values.nats.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-clients-volume
secret:
secretName: {{ $secretName }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: nats-box
image: {{ .Values.natsbox.image }}
imagePullPolicy: {{ .Values.natsbox.pullPolicy }}
resources:
{{- toYaml .Values.natsbox.resources | nindent 10 }}
env:
- name: NATS_URL
value: {{ template "nats.fullname" . }}
{{- if .Values.natsbox.credentials }}
- name: USER_CREDS
value: /etc/nats-config/creds/{{ .Values.natsbox.credentials.secret.key }}
- name: USER2_CREDS
value: /etc/nats-config/creds/{{ .Values.natsbox.credentials.secret.key }}
{{- end }}
{{- with .Values.nats.tls }}
{{ $secretName := .secret.name }}
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- cp /etc/nats-certs/clients/{{ $secretName }}/* /usr/local/share/ca-certificates && update-ca-certificates
{{- end }}
command:
- "tail"
- "-f"
- "/dev/null"
volumeMounts:
{{- if .Values.natsbox.credentials }}
- name: nats-sys-creds
mountPath: /etc/nats-config/creds
{{- end }}
{{- with .Values.nats.tls }}
#######################
# #
# TLS Volumes Mounts #
# #
#######################
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-clients-volume
mountPath: /etc/nats-certs/clients/{{ $secretName }}
{{- end }}
{{- with .Values.securityContext }}
securityContext:
{{ toYaml . | indent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if .Values.podDisruptionBudget }}
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
labels:
name: {{ include "nats.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "nats.labels" . | nindent 4 }}
spec:
{{- if .Values.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
{{- end }}
selector:
matchLabels:
{{- include "nats.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,31 @@
{{ if and .Values.nats.externalAccess .Values.nats.advertise }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.nats.serviceAccount }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Values.nats.serviceAccount }}
rules:
- apiGroups: [""]
resources:
- nodes
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Values.nats.serviceAccount }}-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Values.nats.serviceAccount }}
subjects:
- kind: ServiceAccount
name: {{ .Values.nats.serviceAccount }}
namespace: {{ .Release.Namespace }}
{{ end }}

View File

@ -0,0 +1,67 @@
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "nats.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "nats.labels" . | nindent 4 }}
{{- if .Values.serviceAnnotations}}
annotations:
{{- range $key, $value := .Values.serviceAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
selector:
{{- include "nats.selectorLabels" . | nindent 4 }}
clusterIP: None
{{- if .Values.topologyKeys }}
topologyKeys:
{{- .Values.topologyKeys | toYaml | nindent 4 }}
{{- end }}
ports:
{{- if .Values.websocket.enabled }}
- name: websocket
port: {{ .Values.websocket.port }}
{{- if .Values.appProtocol.enabled }}
appProtocol: tcp
{{- end }}
{{- end }}
- name: client
port: 4222
{{- if .Values.appProtocol.enabled }}
appProtocol: tcp
{{- end }}
- name: cluster
port: 6222
{{- if .Values.appProtocol.enabled }}
appProtocol: tcp
{{- end }}
- name: monitor
port: 8222
{{- if .Values.appProtocol.enabled }}
appProtocol: http
{{- end }}
- name: metrics
port: 7777
{{- if .Values.appProtocol.enabled }}
appProtocol: http
{{- end }}
- name: leafnodes
port: 7422
{{- if .Values.appProtocol.enabled }}
appProtocol: tcp
{{- end }}
- name: gateways
port: 7522
{{- if .Values.appProtocol.enabled }}
appProtocol: tcp
{{- end }}
{{- if .Values.mqtt.enabled }}
- name: mqtt
port: 1883
{{- if .Values.appProtocol.enabled }}
appProtocol: tcp
{{- end }}
{{- end }}

View File

@ -0,0 +1,40 @@
{{ if and .Values.exporter.enabled .Values.exporter.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "nats.fullname" . }}
{{- if .Values.exporter.serviceMonitor.namespace }}
namespace: {{ .Values.exporter.serviceMonitor.namespace }}
{{- else }}
namespace: {{ .Release.Namespace | quote }}
{{- end }}
{{- if .Values.exporter.serviceMonitor.labels }}
labels:
{{- range $key, $value := .Values.exporter.serviceMonitor.labels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
{{- if .Values.exporter.serviceMonitor.annotations }}
annotations:
{{- range $key, $value := .Values.exporter.serviceMonitor.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
endpoints:
- port: metrics
{{- if .Values.exporter.serviceMonitor.path }}
path: {{ .Values.exporter.serviceMonitor.path }}
{{- end }}
{{- if .Values.exporter.serviceMonitor.interval }}
interval: {{ .Values.exporter.serviceMonitor.interval }}
{{- end }}
{{- if .Values.exporter.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.exporter.serviceMonitor.scrapeTimeout }}
{{- end }}
namespaceSelector:
any: true
selector:
matchLabels:
{{- include "nats.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,477 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "nats.fullname" . }}
namespace: {{ .Release.Namespace | quote }}
labels:
{{- include "nats.labels" . | nindent 4 }}
{{- if .Values.statefulSetAnnotations}}
annotations:
{{- range $key, $value := .Values.statefulSetAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "nats.selectorLabels" . | nindent 6 }}
{{- if .Values.cluster.enabled }}
replicas: {{ .Values.cluster.replicas }}
{{- else }}
replicas: 1
{{- end }}
serviceName: {{ include "nats.fullname" . }}
template:
metadata:
{{- if or .Values.podAnnotations .Values.exporter.enabled }}
annotations:
{{- if .Values.exporter.enabled }}
prometheus.io/path: /metrics
prometheus.io/port: "7777"
prometheus.io/scrape: "true"
{{- end }}
{{- range $key, $value := .Values.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
labels:
{{- include "nats.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.securityContext }}
securityContext:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- tpl (toYaml .) $ | nindent 8 }}
{{- end }}
{{- if .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- range .Values.topologySpreadConstraints }}
{{- if and .maxSkew .topologyKey }}
- maxSkew: {{ .maxSkew }}
topologyKey: {{ .topologyKey }}
{{- if .whenUnsatisfiable }}
whenUnsatisfiable: {{ .whenUnsatisfiable }}
{{- end }}
labelSelector:
matchLabels:
{{- include "nats.selectorLabels" $ | nindent 12 }}
{{- end}}
{{- end }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName | quote }}
{{- end }}
# Common volumes for the containers.
volumes:
- name: config-volume
configMap:
name: {{ include "nats.fullname" . }}-config
# Local volume shared with the reloader.
- name: pid
emptyDir: {}
{{- if and .Values.auth.enabled .Values.auth.resolver }}
{{- if .Values.auth.resolver.configMap }}
- name: resolver-volume
configMap:
name: {{ .Values.auth.resolver.configMap.name }}
{{- end }}
{{- if eq .Values.auth.resolver.type "URL" }}
- name: operator-jwt-volume
configMap:
name: {{ .Values.auth.operatorjwt.configMap.name }}
{{- end }}
{{- end }}
{{- if and .Values.nats.externalAccess .Values.nats.advertise }}
# Local volume shared with the advertise config initializer.
- name: advertiseconfig
emptyDir: {}
{{- end }}
{{- if and .Values.nats.jetstream.fileStorage.enabled .Values.nats.jetstream.fileStorage.existingClaim }}
# Persistent volume for jetstream running with file storage option
- name: {{ include "nats.fullname" . }}-js-pvc
persistentVolumeClaim:
claimName: {{ .Values.nats.jetstream.fileStorage.existingClaim | quote }}
{{- end }}
#################
# #
# TLS Volumes #
# #
#################
{{- with .Values.nats.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-clients-volume
secret:
secretName: {{ $secretName }}
{{- end }}
{{- with .Values.mqtt.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-mqtt-volume
secret:
secretName: {{ $secretName }}
{{- end }}
{{- with .Values.cluster.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-cluster-volume
secret:
secretName: {{ $secretName }}
{{- end }}
{{- with .Values.leafnodes.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-leafnodes-volume
secret:
secretName: {{ $secretName }}
{{- end }}
{{- with .Values.gateway.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-gateways-volume
secret:
secretName: {{ $secretName }}
{{- end }}
{{- with .Values.websocket.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-ws-volume
secret:
secretName: {{ $secretName }}
{{- end }}
{{- if .Values.leafnodes.enabled }}
#
# Leafnode credential volumes
#
{{- range .Values.leafnodes.remotes }}
{{- with .credentials }}
- name: {{ .secret.name }}-volume
secret:
secretName: {{ .secret.name }}
{{- end }}
{{- end }}
{{- end }}
{{ if and .Values.nats.externalAccess .Values.nats.advertise }}
# Assume that we only use the service account in case we want to
# figure out what is the current external public IP from the server
# in order to be able to advertise correctly.
serviceAccountName: {{ .Values.nats.serviceAccount }}
{{ end }}
# Required to be able to HUP signal and apply config
# reload to the server without restarting the pod.
shareProcessNamespace: true
{{- if and .Values.nats.externalAccess .Values.nats.advertise }}
# Initializer container required to be able to lookup
# the external ip on which this node is running.
initContainers:
- name: bootconfig
command:
- nats-pod-bootconfig
- -f
- /etc/nats-config/advertise/client_advertise.conf
- -gf
- /etc/nats-config/advertise/gateway_advertise.conf
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: {{ .Values.bootconfig.image }}
imagePullPolicy: {{ .Values.bootconfig.pullPolicy }}
resources:
{{- toYaml .Values.bootconfig.resources | nindent 10 }}
volumeMounts:
- mountPath: /etc/nats-config/advertise
name: advertiseconfig
subPath: advertise
{{- end }}
#################
# #
# NATS Server #
# #
#################
terminationGracePeriodSeconds: {{ .Values.nats.terminationGracePeriodSeconds }}
containers:
- name: nats
image: {{ .Values.nats.image }}
imagePullPolicy: {{ .Values.nats.pullPolicy }}
resources:
{{- toYaml .Values.nats.resources | nindent 10 }}
ports:
- containerPort: 4222
name: client
{{- if .Values.nats.externalAccess }}
hostPort: 4222
{{- end }}
- containerPort: 7422
name: leafnodes
{{- if .Values.nats.externalAccess }}
hostPort: 7422
{{- end }}
- containerPort: 7522
name: gateways
{{- if .Values.nats.externalAccess }}
hostPort: 7522
{{- end }}
- containerPort: 6222
name: cluster
- containerPort: 8222
name: monitor
- containerPort: 7777
name: metrics
{{- if .Values.mqtt.enabled }}
- containerPort: 1883
name: mqtt
{{- if .Values.nats.externalAccess }}
hostPort: 1883
{{- end }}
{{- end }}
{{- if .Values.websocket.enabled }}
- containerPort: {{ .Values.websocket.port }}
name: websocket
{{- if .Values.nats.externalAccess }}
hostPort: {{ .Values.websocket.port }}
{{- end }}
{{- end }}
command:
- "nats-server"
- "--config"
- "/etc/nats-config/nats.conf"
# Required to be able to define an environment variable
# that refers to other environment variables. This env var
# is later used as part of the configuration file.
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CLUSTER_ADVERTISE
value: {{ include "nats.clusterAdvertise" . }}
volumeMounts:
- name: config-volume
mountPath: /etc/nats-config
- name: pid
mountPath: /var/run/nats
{{- if and .Values.nats.externalAccess .Values.nats.advertise }}
- mountPath: /etc/nats-config/advertise
name: advertiseconfig
subPath: advertise
{{- end }}
{{- if and .Values.auth.enabled .Values.auth.resolver }}
{{- if eq .Values.auth.resolver.type "memory" }}
- name: resolver-volume
mountPath: /etc/nats-config/accounts
{{- end }}
{{- if eq .Values.auth.resolver.type "full" }}
{{- if .Values.auth.resolver.configMap }}
- name: resolver-volume
mountPath: /etc/nats-config/accounts
{{- end }}
{{- if and .Values.auth.resolver .Values.auth.resolver.store }}
- name: nats-jwt-pvc
mountPath: {{ .Values.auth.resolver.store.dir }}
{{- end }}
{{- end }}
{{- if eq .Values.auth.resolver.type "URL" }}
- name: operator-jwt-volume
mountPath: /etc/nats-config/operator
{{- end }}
{{- end }}
{{- if .Values.nats.jetstream.fileStorage.enabled }}
- name: {{ include "nats.fullname" . }}-js-pvc
mountPath: {{ .Values.nats.jetstream.fileStorage.storageDirectory }}
{{- end }}
{{- with .Values.nats.tls }}
#######################
# #
# TLS Volumes Mounts #
# #
#######################
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-clients-volume
mountPath: /etc/nats-certs/clients/{{ $secretName }}
{{- end }}
{{- with .Values.mqtt.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-mqtt-volume
mountPath: /etc/nats-certs/mqtt/{{ $secretName }}
{{- end }}
{{- with .Values.cluster.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-cluster-volume
mountPath: /etc/nats-certs/cluster/{{ $secretName }}
{{- end }}
{{- with .Values.leafnodes.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-leafnodes-volume
mountPath: /etc/nats-certs/leafnodes/{{ $secretName }}
{{- end }}
{{- with .Values.gateway.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-gateways-volume
mountPath: /etc/nats-certs/gateways/{{ $secretName }}
{{- end }}
{{- with .Values.websocket.tls }}
{{ $secretName := .secret.name }}
- name: {{ $secretName }}-ws-volume
mountPath: /etc/nats-certs/ws/{{ $secretName }}
{{- end }}
{{- if .Values.leafnodes.enabled }}
#
# Leafnode credential volumes
#
{{- range .Values.leafnodes.remotes }}
{{- with .credentials }}
- name: {{ .secret.name }}-volume
mountPath: /etc/nats-creds/{{ .secret.name }}
{{- end }}
{{- end }}
{{- end }}
# Liveness/Readiness probes against the monitoring.
#
livenessProbe:
httpGet:
path: /
port: 8222
initialDelaySeconds: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: 8222
initialDelaySeconds: 10
timeoutSeconds: 5
# Gracefully stop NATS Server on pod deletion or image upgrade.
#
lifecycle:
preStop:
exec:
# Using the alpine based NATS image, we add an extra sleep that is
# the same amount as the terminationGracePeriodSeconds to allow
# the NATS Server to gracefully terminate the client connections.
#
command:
- "/bin/sh"
- "-c"
- "nats-server -sl=ldm=/var/run/nats/nats.pid && /bin/sleep {{ .Values.nats.terminationGracePeriodSeconds }}"
#################################
# #
# NATS Configuration Reloader #
# #
#################################
{{ if .Values.reloader.enabled }}
- name: reloader
image: {{ .Values.reloader.image }}
imagePullPolicy: {{ .Values.reloader.pullPolicy }}
resources:
{{- toYaml .Values.reloader.resources | nindent 10 }}
command:
- "nats-server-config-reloader"
- "-pid"
- "/var/run/nats/nats.pid"
- "-config"
- "/etc/nats-config/nats.conf"
volumeMounts:
- name: config-volume
mountPath: /etc/nats-config
- name: pid
mountPath: /var/run/nats
{{ end }}
##############################
# #
# NATS Prometheus Exporter #
# #
##############################
{{ if .Values.exporter.enabled }}
- name: metrics
image: {{ .Values.exporter.image }}
imagePullPolicy: {{ .Values.exporter.pullPolicy }}
resources:
{{- toYaml .Values.exporter.resources | nindent 10 }}
args:
- -connz
- -routez
- -subz
- -varz
- -prefix=nats
- -use_internal_server_id
{{- if .Values.nats.jetstream.enabled }}
- -jsz=all
{{- end }}
- http://localhost:8222/
ports:
- containerPort: 7777
name: metrics
{{ end }}
volumeClaimTemplates:
{{- if eq .Values.auth.resolver.type "full" }}
{{- if and .Values.auth.resolver .Values.auth.resolver.store }}
#####################################
# #
# Account Server Embedded JWT #
# #
#####################################
- metadata:
name: nats-jwt-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.auth.resolver.store.size }}
{{- end }}
{{- end }}
{{- if and .Values.nats.jetstream.fileStorage.enabled (not .Values.nats.jetstream.fileStorage.existingClaim) }}
#####################################
# #
# Jetstream New Persistent Volume #
# #
#####################################
- metadata:
name: {{ include "nats.fullname" . }}-js-pvc
{{- if .Values.nats.jetstream.fileStorage.annotations }}
annotations:
{{- range $key, $value := .Values.nats.jetstream.fileStorage.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
accessModes:
{{- range .Values.nats.jetstream.fileStorage.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.nats.jetstream.fileStorage.size }}
storageClassName: {{ .Values.nats.jetstream.fileStorage.storageClassName | quote }}
{{- end }}

View File

@ -0,0 +1,405 @@
###############################
# #
# NATS Server Configuration #
# #
###############################
nats:
image: nats:2.3.2-alpine
pullPolicy: IfNotPresent
# Toggle whether to enable external access.
# This binds a host port for clients, gateways and leafnodes.
externalAccess: false
# Toggle to disable client advertisements (connect_urls),
# in case of running behind a load balancer (which is not recommended)
# it might be required to disable advertisements.
advertise: true
# In case both external access and advertise are enabled
# then a service account would be required to be able to
# gather the public ip from a node.
serviceAccount: "nats-server"
# The number of connect attempts against discovered routes.
connectRetries: 30
# How many seconds should pass before sending a PING
# to a client that has no activity.
pingInterval:
resources: {}
# Server settings.
limits:
maxConnections:
maxSubscriptions:
maxControlLine:
maxPayload:
writeDeadline:
maxPending:
maxPings:
lameDuckDuration:
terminationGracePeriodSeconds: 60
logging:
debug:
trace:
logtime:
connectErrorReports:
reconnectErrorReports:
jetstream:
enabled: false
#############################
# #
# Jetstream Memory Storage #
# #
#############################
memStorage:
enabled: true
size: 1Gi
############################
# #
# Jetstream File Storage #
# #
############################
fileStorage:
enabled: false
storageDirectory: /data
# Set for use with existing PVC
# existingClaim: jetstream-pvc
# claimStorageSize: 1Gi
# Use below block to create new persistent volume
# only used if existingClaim is not specified
size: 1Gi
storageClassName: default
accessModes:
- ReadWriteOnce
annotations:
# key: "value"
#######################
# #
# TLS Configuration #
# #
#######################
#
# # You can find more on how to setup and trouble shoot TLS connnections at:
#
# # https://docs.nats.io/nats-server/configuration/securing_nats/tls
#
# tls:
# secret:
# name: nats-client-tls
# ca: "ca.crt"
# cert: "tls.crt"
# key: "tls.key"
mqtt:
enabled: false
ackWait: 1m
maxAckPending: 100
#######################
# #
# TLS Configuration #
# #
#######################
#
# # You can find more on how to setup and trouble shoot TLS connnections at:
#
# # https://docs.nats.io/nats-server/configuration/securing_nats/tls
#
#
# tls:
# secret:
# name: nats-mqtt-tls
# ca: "ca.crt"
# cert: "tls.crt"
# key: "tls.key"
nameOverride: ""
# An array of imagePullSecrets, and they have to be created manually in the same namespace
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# Toggle whether to use setup a Pod Security Context
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext: {}
# securityContext:
# fsGroup: 1000
# runAsUser: 1000
# runAsNonRoot: true
# Affinity for pod assignment
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## Pod priority class name
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: null
# Service topology
# ref: https://kubernetes.io/docs/concepts/services-networking/service-topology/
topologyKeys: []
# Pod Topology Spread Constraints
# ref https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: zone
# whenUnsatisfiable: DoNotSchedule
# Annotations to add to the NATS pods
# ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# key: "value"
## Define a Pod Disruption Budget for the stateful set
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
podDisruptionBudget: null
# minAvailable: 1
# maxUnavailable: 1
# Annotations to add to the NATS StatefulSet
statefulSetAnnotations: {}
# Annotations to add to the NATS Service
serviceAnnotations: {}
cluster:
enabled: false
replicas: 3
noAdvertise: false
# authorization:
# user: foo
# password: pwd
# timeout: 0.5
# Leafnode connections to extend a cluster:
#
# https://docs.nats.io/nats-server/configuration/leafnodes
#
leafnodes:
enabled: false
noAdvertise: false
# remotes:
# - url: "tls://connect.ngs.global:7422"
#######################
# #
# TLS Configuration #
# #
#######################
#
# # You can find more on how to setup and trouble shoot TLS connnections at:
#
# # https://docs.nats.io/nats-server/configuration/securing_nats/tls
#
#
# tls:
# secret:
# name: nats-client-tls
# ca: "ca.crt"
# cert: "tls.crt"
# key: "tls.key"
# Gateway connections to create a super cluster
#
# https://docs.nats.io/nats-server/configuration/gateways
#
gateway:
enabled: false
name: 'default'
#############################
# #
# List of remote gateways #
# #
#############################
# gateways:
# - name: other
# url: nats://my-gateway-url:7522
#######################
# #
# TLS Configuration #
# #
#######################
#
# # You can find more on how to setup and trouble shoot TLS connnections at:
#
# # https://docs.nats.io/nats-server/configuration/securing_nats/tls
#
# tls:
# secret:
# name: nats-client-tls
# ca: "ca.crt"
# cert: "tls.crt"
# key: "tls.key"
# In case of both external access and advertisements being
# enabled, an initializer container will be used to gather
# the public ips.
bootconfig:
image: natsio/nats-boot-config:0.5.3
pullPolicy: IfNotPresent
# NATS Box
#
# https://github.com/nats-io/nats-box
#
natsbox:
enabled: true
image: natsio/nats-box:0.6.0
pullPolicy: IfNotPresent
# An array of imagePullSecrets, and they have to be created manually in the same namespace
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# - name: dockerhub
# credentials:
# secret:
# name: nats-sys-creds
# key: sys.creds
# Annotations to add to the box pods
# ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: {}
# key: "value"
# Affinity for nats box pod assignment
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# The NATS config reloader image to use.
reloader:
enabled: true
image: natsio/nats-server-config-reloader:0.6.1
pullPolicy: IfNotPresent
# Prometheus NATS Exporter configuration.
exporter:
enabled: true
image: natsio/prometheus-nats-exporter:0.8.0
pullPolicy: IfNotPresent
resources: {}
# Prometheus operator ServiceMonitor support. Exporter has to be enabled
serviceMonitor:
enabled: false
## Specify the namespace where Prometheus Operator is running
##
# namespace: monitoring
labels: {}
annotations: {}
path: /metrics
# interval:
# scrapeTimeout:
# Authentication setup
auth:
enabled: false
# basic:
# noAuthUser:
# # List of users that can connect with basic auth,
# # that belong to the global account.
# users:
# # List of accounts with users that can connect
# # using basic auth.
# accounts:
# Reference to the Operator JWT.
# operatorjwt:
# configMap:
# name: operator-jwt
# key: KO.jwt
# Token authentication
# token:
# Public key of the System Account
# systemAccount:
resolver:
# Disables the resolver by default
type: none
##########################################
# #
# Embedded NATS Account Server Resolver #
# #
##########################################
# type: full
# If the resolver type is 'full', delete when enabled will rename the jwt.
allowDelete: false
# Interval at which a nats-server with a nats based account resolver will compare
# it's state with one random nats based account resolver in the cluster and if needed,
# exchange jwt and converge on the same set of jwt.
interval: 2m
# Operator JWT
operator:
# System Account Public NKEY
systemAccount:
# resolverPreload:
# <ACCOUNT>: <JWT>
# Directory in which the account JWTs will be stored.
store:
dir: "/accounts/jwt"
# Size of the account JWT storage.
size: 1Gi
##############################
# #
# Memory resolver settings #
# #
##############################
# type: memory
#
# Use a configmap reference which will be mounted
# into the container.
#
# configMap:
# name: nats-accounts
# key: resolver.conf
##########################
# #
# URL resolver settings #
# #
##########################
# type: URL
# url: "http://nats-account-server:9090/jwt/v1/accounts/"
websocket:
enabled: false
port: 443
appProtocol:
enabled: false
# Cluster Domain configured on the kubelets
# https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
k8sClusterDomain: cluster.local
# Add labels to all the deployed resources
commonLabels: {}

View File

@ -1,4 +1,4 @@
{{- if .Values.nats.promExporter.podMonitor.enabled }} {{- if .Values.nats.exporter.serviceMonitor.enabled }}
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:

View File

@ -1,13 +1,13 @@
##!/bin/bash #!/bin/bash
set -ex set -ex
. ../../scripts/lib-update.sh helm dep update
#login_ecr_public ## NATS
update_helm
NATS_VERSION=0.8.4
rm -rf charts/nats && curl -L -s -o - https://github.com/nats-io/k8s/releases/download/v$NATS_VERSION/nats-$NATS_VERSION.tgz | tar xfz - -C charts
# Fetch dashboards # Fetch dashboards
../kubezero-metrics/sync_grafana_dashboards.py dashboards-nats.yaml templates/nats/grafana-dashboards.yaml ../kubezero-metrics/sync_grafana_dashboards.py dashboards-nats.yaml templates/nats/grafana-dashboards.yaml
../kubezero-metrics/sync_grafana_dashboards.py dashboards-rabbitmq.yaml templates/rabbitmq/grafana-dashboards.yaml ../kubezero-metrics/sync_grafana_dashboards.py dashboards-rabbitmq.yaml templates/rabbitmq/grafana-dashboards.yaml
update_docs

View File

@ -2,16 +2,17 @@
nats: nats:
enabled: false enabled: false
config: nats:
advertise: false
jetstream: jetstream:
enabled: true enabled: true
natsBox: natsbox:
enabled: false enabled: false
promExporter: exporter:
enabled: false serviceMonitor:
podMonitor:
enabled: false enabled: false
mqtt: mqtt:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-network name: kubezero-network
description: KubeZero umbrella chart for all things network description: KubeZero umbrella chart for all things network
type: application type: application
version: 0.5.5 version: 0.5.3
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -19,15 +19,15 @@ dependencies:
version: ">= 0.1.6" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: cilium - name: cilium
version: 1.16.3 version: 1.15.7
repository: https://helm.cilium.io/ repository: https://helm.cilium.io/
condition: cilium.enabled condition: cilium.enabled
- name: metallb - name: metallb
version: 0.14.8 version: 0.14.7
repository: https://metallb.github.io/metallb repository: https://metallb.github.io/metallb
condition: metallb.enabled condition: metallb.enabled
- name: haproxy - name: haproxy
version: 1.23.0 version: 1.22.0
repository: https://haproxytech.github.io/helm-charts repository: https://haproxytech.github.io/helm-charts
condition: haproxy.enabled condition: haproxy.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-network # kubezero-network
![Version: 0.5.5](https://img.shields.io/badge/Version-0.5.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.5.3](https://img.shields.io/badge/Version-0.5.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things network KubeZero umbrella chart for all things network
@ -19,9 +19,9 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://haproxytech.github.io/helm-charts | haproxy | 1.23.0 | | https://haproxytech.github.io/helm-charts | haproxy | 1.22.0 |
| https://helm.cilium.io/ | cilium | 1.16.3 | | https://helm.cilium.io/ | cilium | 1.15.7 |
| https://metallb.github.io/metallb | metallb | 0.14.8 | | https://metallb.github.io/metallb | metallb | 0.14.7 |
## Values ## Values
@ -35,7 +35,6 @@ Kubernetes: `>= 1.26.0`
| cilium.cni.exclusive | bool | `false` | | | cilium.cni.exclusive | bool | `false` | |
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | | | cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
| cilium.enabled | bool | `false` | | | cilium.enabled | bool | `false` | |
| cilium.envoy.enabled | bool | `false` | |
| cilium.hubble.enabled | bool | `false` | | | cilium.hubble.enabled | bool | `false` | |
| cilium.hubble.relay.enabled | bool | `false` | | | cilium.hubble.relay.enabled | bool | `false` | |
| cilium.hubble.tls.auto.certManagerIssuerRef.group | string | `"cert-manager.io"` | | | cilium.hubble.tls.auto.certManagerIssuerRef.group | string | `"cert-manager.io"` | |
@ -57,11 +56,10 @@ Kubernetes: `>= 1.26.0`
| cilium.prometheus.enabled | bool | `false` | | | cilium.prometheus.enabled | bool | `false` | |
| cilium.prometheus.port | int | `9091` | | | cilium.prometheus.port | int | `9091` | |
| cilium.prometheus.serviceMonitor.enabled | bool | `false` | | | cilium.prometheus.serviceMonitor.enabled | bool | `false` | |
| cilium.resources.limits.memory | string | `"1Gi"` | | | cilium.resources.limits.memory | string | `"1024Mi"` | |
| cilium.resources.requests.cpu | string | `"10m"` | | | cilium.resources.requests.cpu | string | `"10m"` | |
| cilium.resources.requests.memory | string | `"160Mi"` | | | cilium.resources.requests.memory | string | `"256Mi"` | |
| cilium.routingMode | string | `"tunnel"` | | | cilium.routingMode | string | `"tunnel"` | |
| cilium.sysctlfix.enabled | bool | `false` | |
| cilium.tunnelProtocol | string | `"geneve"` | | | cilium.tunnelProtocol | string | `"geneve"` | |
| haproxy.PodDisruptionBudget.enable | bool | `false` | | | haproxy.PodDisruptionBudget.enable | bool | `false` | |
| haproxy.PodDisruptionBudget.minAvailable | int | `1` | | | haproxy.PodDisruptionBudget.minAvailable | int | `1` | |

File diff suppressed because one or more lines are too long

View File

@ -28,8 +28,6 @@ spec:
containers: containers:
- name: kube-multus - name: kube-multus
image: {{ .Values.multus.image.repository }}:{{ .Values.multus.image.tag }} image: {{ .Values.multus.image.repository }}:{{ .Values.multus.image.tag }}
# Always used cached images
imagePullPolicy: {{ .Values.multus.image.pullPolicy }}
command: ["/entrypoint.sh"] command: ["/entrypoint.sh"]
args: args:
- "--multus-conf-file=/tmp/multus-conf/00-multus.conf" - "--multus-conf-file=/tmp/multus-conf/00-multus.conf"
@ -47,7 +45,6 @@ spec:
privileged: true privileged: true
capabilities: capabilities:
add: ["SYS_ADMIN"] add: ["SYS_ADMIN"]
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts: volumeMounts:
- name: run - name: run
mountPath: /run mountPath: /run

View File

@ -27,16 +27,16 @@ multus:
cilium: cilium:
enabled: false enabled: false
# Always use cached images # breaks preloaded images otherwise
image: image:
useDigest: false useDigest: false
resources: resources:
requests: requests:
cpu: 10m cpu: 10m
memory: 160Mi memory: 256Mi
limits: limits:
memory: 1Gi memory: 1024Mi
# cpu: 4000m # cpu: 4000m
cni: cni:
@ -59,8 +59,7 @@ cilium:
# Keep it simple for now # Keep it simple for now
l7Proxy: false l7Proxy: false
envoy:
enabled: false
#rollOutCiliumPods: true #rollOutCiliumPods: true
cgroup: cgroup:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-operators name: kubezero-operators
description: Various operators supported by KubeZero description: Various operators supported by KubeZero
type: application type: application
version: 0.1.5 version: 0.1.4
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -21,7 +21,7 @@ dependencies:
repository: https://opensearch-project.github.io/opensearch-k8s-operator/ repository: https://opensearch-project.github.io/opensearch-k8s-operator/
condition: opensearch-operator.enabled condition: opensearch-operator.enabled
- name: eck-operator - name: eck-operator
version: 2.14.0 version: 2.13.0
repository: https://helm.elastic.co repository: https://helm.elastic.co
condition: eck-operator.enabled condition: eck-operator.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.26.0"

View File

@ -39,3 +39,6 @@ Kubernetes: `>= 1.26.0`
| opensearch-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | opensearch-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| opensearch-operator.tolerations[0].effect | string | `"NoSchedule"` | | | opensearch-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| opensearch-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | opensearch-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)

View File

@ -1,8 +1,8 @@
apiVersion: v2 apiVersion: v2
name: kubezero-keyvalue name: kubezero-redis
description: KubeZero KeyValue Module description: KubeZero Umbrella Chart for Redis HA
type: application type: application
version: 0.1.0 version: 0.4.3
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -25,4 +25,4 @@ dependencies:
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: redis-cluster.enabled condition: redis-cluster.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.25.0"

View File

@ -1,8 +1,8 @@
# kubezero-keyvalue # kubezero-redis
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.4.1](https://img.shields.io/badge/Version-0.4.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero KeyValue Module KubeZero Umbrella Chart for Redis HA
**Homepage:** <https://kubezero.com> **Homepage:** <https://kubezero.com>
@ -14,13 +14,13 @@ KubeZero KeyValue Module
## Requirements ## Requirements
Kubernetes: `>= 1.26.0` Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.bitnami.com/bitnami | redis | 20.0.3 | | https://charts.bitnami.com/bitnami | redis | 16.10.1 |
| https://charts.bitnami.com/bitnami | redis-cluster | 11.0.2 | | https://charts.bitnami.com/bitnami | redis-cluster | 7.6.1 |
## Values ## Values
@ -37,7 +37,6 @@ Kubernetes: `>= 1.26.0`
| redis.architecture | string | `"standalone"` | | | redis.architecture | string | `"standalone"` | |
| redis.auth.enabled | bool | `false` | | | redis.auth.enabled | bool | `false` | |
| redis.enabled | bool | `false` | | | redis.enabled | bool | `false` | |
| redis.image.tag | string | `"7.2.5-debian-12-r4"` | |
| redis.master.persistence.enabled | bool | `false` | | | redis.master.persistence.enabled | bool | `false` | |
| redis.metrics.enabled | bool | `false` | | | redis.metrics.enabled | bool | `false` | |
| redis.metrics.serviceMonitor.enabled | bool | `false` | | | redis.metrics.serviceMonitor.enabled | bool | `false` | |

View File

@ -7,8 +7,3 @@ dashboards:
url: https://grafana.com/api/dashboards/11835/revisions/1/download url: https://grafana.com/api/dashboards/11835/revisions/1/download
tags: tags:
- Redis - Redis
- name: redis-cluster
url: https://grafana.com/api/dashboards/14615/revisions/1/download
tags:
- Redis
- Redis-Cluster

View File

@ -0,0 +1,13 @@
{{- if or .Values.redis.metrics.enabled ( index .Values "redis-cluster" "metrics" "enabled") }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "grafana-dashboards" | trunc 63 | trimSuffix "-" }}
namespace: {{ .Release.Namespace }}
labels:
grafana_dashboard: "1"
{{- include "kubezero-lib.labels" . | nindent 4 }}
binaryData:
redis.json.gz:
H4sIAAAAAAAC/+1daVPcOBr+Pr9C5Zndgq0OsQ0dmqmaDySBmdQAyQYyW7UD1aW2RbcWX2PJQIdifvvq8CHZ6jMNNMEfOKxX1vGejw5Ldz8AYPX7OEoySqyfwZ/sGYA78ZtRIhgilmq9P+1/+vzx+ODst4Mvp1anIAdwgAJO/5TGIaIjlJGK6CPipTihOI54lopAx4ko1IcUkjhLPVTRkiAb4uiDz+mJoVBJP8mbpVQrMtyz3xcd2aUU/ZXhFBk6VdQ/TOEljGBVOPaNyQUTfq0TrlFK8t692epuuXkjOubqEhgxZjUrS0bGqtRkpaLpdZhYiicyM2qy0VSls2Vv2Uv0jeBoGCBCIW1WeWqgNXtZihNGUczyMiqXp6zeCjChpXSrRjHKIMMB/cBLcjpVqsIcc6dZHhTBQcDpNM2Qkj7CviEVe3H0Lg7ilBeYDgdww+4A13HYr263A5xNteii6/tVX8A/wX6AUqo1oZIlGQ1imPpWTrsXfy9+yMVQN7DPyMcEvC/eApdxCqpOAkk+uE3ilKIUOFu3HYApuInTKwJuMB2BEQpCwETCGPA65blfjSBA+Qtb5+l5xH8+XIJxnIEQEy5fIDKCEIVxOgYZxQH+KvrWAUmAIEEgjH18OQbnVghvZbZzC1zDIEMAR/IfsjWGoVQfixVHayKwhhGiwiU4Tm+7K5O4gZzFcUBxwgi2SBRqF2VBIJ9Yq2HOHKfb23Z2uju2280LCHB0JXyDVDChwAZf4UFvhM5wiOKMKoVLGpf8W+hdDdM4i3jdlzAgSKf/wTtoJhFFeUGuP+4O05vujvyxt/Y0DZI5tneZZrl7HbBj8yw9Q54uozi7LsvSFaXsbhY6dFG551nWwPTLwyEUXLHLRIN8ZHqaCjPQ+8l0MIRUOIOq3CHMhqg0Y5HEdKNglGPbSndCHBUENZmM4pt6Zdx0Rszhj+LAP+KBiUzLcQzTKyREwLtRWFjVxhT7n2Kit3LEHneVwngTXOX5ttbKMX9uFC20dK96jJieMjOoaxcmJ+imzmVda3PWJQkzxDPpNRxTuq5oVYcUpyQNksaAoluq6BPISbzoMvG+M72wFEbDGYW5VWENzWTK8J4p56eYsYboGmFxFgnCcSwcMrOkKEIeRb6l5TnjNdc4GidlAKmkkcSEXuJbHaHkiYdxRE/xV1FP1/6HQk9R8x2RNvEVwZNjmEyRxSWzQf4eb7TONSp7Y5283q8R4vKFKQwlCVN2pjo1o7vEQaBHr20WuZiPZb96e9xxOD3Nt1zyepo2xUtWy5HFuNwH7W1rBeRma7Q44VVYMVlYh4swHSI6hW8sRImqmd5ssJ9+zIBEnzKfvSGCUz9L+APDuH2CmLr45A5HLMxFHvrl73Prp+Lh3Lr/8ycOhaU9Xmxu6ryufJkojqAUI6JnUWzZMlMOoUcFo1yNHKAhivzDsgb9ZeaZU+w105nGSaRc0wpCEQ+KTs+2p2hF6QpJjd+YCvdufRF8a+J2E64Tdq0q/66q/II6Xflj3mLrlzn1vnAjM1VfZCyGC16WpiiiZijbRvoVRfoojtALCfbuzGDPxw1cqT4yn5Q2xxASDDhuiwZaNNCigZWiARn7S4XoewFmvn9y7F862D9mSHeXiuchOsyVyQn19NMRvmxYRgkA3kmWLYkAei0CmI4ADLF7GQjQBBJPjQASxGqK6EOAgNoE3BNggJ0aBthZEQZwWgzQYoAWA6wUAzA9Af8C+TSAnHzuZ4SBgcGYoslQAIDXQHuHzyrMeGXzO8MPPbuz110FiDiWSwNfCByil4IkYIAheVfEfcW3DGDaCFJioecIRUM6Eq5NS0em7LOC+YJxGwsbdrSEX1Poc/Cn4QEeI/X+zB01e7Wo2ZsnavooOpWGU2+zaIc7IzIKA9KbBq+HBndVyLJJYYZvSsWRIXXSNEVMoclHypWvgtDoPnehxBDs0Q32haK40yDAPEFRCX76GulRWV6jVTmyg8MmzE14dVxrMv5uV09vSpAx3EcpEm76MogV85e+sgBLtY6xwOUhk7EwF+JdNWrhbi9B/pGMdDpt/pEkpMVUsheHIYx80k/S2EOEhxIh38kzyk54sYbzyPuLRY0dW3elgnPVOv0c8UQRoRJKahEjRJ/RMNfJ2gszBqq5WMDBLfIypuUsgBPkKeGmXClWhyDkMyIMXuRrxE17hkzozTEHiVNaG5wIW+4XMc3LwiyAFF8jq4lpqr0n6gaPW3iLa5Y4yLwrqZ5qn3mzc5PWJucrrF3LbR43lc7HYONjeDttfKAs6Y44J3T9yzcE6Y3ghHj4FhKk78goHWwju/SwjeQmWpw89Fi7djYMZNwUOgMOQ5MuivQjdF02WtsSsmbwQ5lLcJ8TJnHerACUOC0oeVJQojXt+8UkWAElV2gsmtAfYUpm4ZHuMngk3/1W05ZVAhXe9NWMcOfBKp0lmMs3vKHnyl7Z+AUZ/PZ5gsHfmCox/HcsugyYcwCnTwYFceTja+xnMHixULCmXUsBLLtFgd+MAuv4wfrx7aHz1rabivliJ6mcWZs5d5cAhLsPDwh5/DgIEzqeQPsvSuPV4Mj68ttawEieACABX3k32+mt5TZKLLA68tArHbwNa4MFF10HWj34mxfiMWteEMetB147404ETFgmauftVg/WhAY/JBJq4drzm7R7DnDMcVeAxxRmrdMM3Voiq3bVcCWrhhGi8gtrCR4eZA5pQbRwdwdEi8D9/aKQan7opHMgzujjsOCbZ8sYc2RjJ3NnzTHVCaL8w17w4fXHxcBUHTO1WOqJsVS7Bvpy4NTu085uOTtLwKnuEmiKC3KfnJlPWpiFtWrZF4FaKR6O6KnxJIclUZhx32oLwpYDYRq/VoLBSBYWu3/9gVjKmwg5NsFgDDb8wdPCDn/AIAf4vjZnyfmdDxSFcjXu/dt2Me5BMYn2AW4LSVpI8nwgSWOGZylM4my3UzwtulgvdPEKNLP3WUk4xdFwyntPB0eimIKigY++7PbcmGVm1PJbrNYDuh3kvQLXBJzE9FX5/DsTSIvi2t31LYxTikfX2BMqZf3Y27MPbVfVnxCF4nwBv4/5SKgvMjc/Hzq3nF13y9ndsrecn/ecrn1udf4XD1h6dYAnP6zCXEmKvADiULZh+/BNb3u73d41/wSY6ywBNnst1nwZWNPsCIU3kBuFctvTfFXhhdzpH+M/+uokw1cbyvqcgC/MGwmgNcfanJwtK1LXccd73qO12UdWZ3ju/h+L4avjq2z4M8az4uvQg1o32p1mLYZtpyLXGhz2nngqcil06Oy8ZHjotFORjzMVSePkaqPbAdh0VsV0eAHEERVPu+zphX5jq1WF1bTu9D0YBHmflt+5tgaHVYB3vCPLHFSxqj1a7Uxai0JeMgpZbNJpRVNMvWVAxJsWL0z47u+RoIL73X3vN+Vg5FWfi+xMCtPrEY/lxUglP4DXOPXYFJPbqNtG3WcSdctb1Jjpcavivd62pYJbxBuhEP5RXr3mOjKZjoP8FrL0SuZkbrISvLQaqyyaojDhM1rRcJ772arYdGc6otN4NZt6Umf93r+apIvpZdUIceQFmY/2zefVTrnHkMsxY/ZveG367YhaTKo8Ekv+K0PpeMKlfJqYHC11iG5rgy2LXOHkSxqcjiPPdMpT81bAuj1r2hUUB2/XvIQirvsF79Pz0SWOcHFFneBzX7qX8nKeDuB8FJFQOxp4aSGeFMUtIsPI9NIs+S3Un0qs7reJ1RBimG0K2ZF/F02zdGqjDzzNnDlXGtlJhZARdCYLMqK7NdCku5LxHMOUDwzEdEAS+6vRrk+xD8QhwAsoF6u8H9XeWUq3FunhqvXNafVtXmkIYfC0QvBCQKbVu6X1sA77ZmhgUffDaeACff5ONbN2PatA4CUYKo/yj29eOQW2L07rZ3hQey3BDN2n1cs5w8q76VQAbnWVoYpjKw/b6oN6aHtX+d9RH7ZtlaKMQFzlfye/kPai6AMfCCq6NLMWteA3asFqLe6O+qAsCez6anuLtmjs+xqL8bE1SOMbNkLN4as+3pv3ntwN0624UputTN5zfPv+6OTz71/+81WmVtcY7/xw/38XppizYHsAAA==
{{- end }}

View File

@ -1,12 +1,4 @@
#!/bin/bash #!/bin/bash
set -ex
. ../../scripts/lib-update.sh
#login_ecr_public
update_helm
# Fetch dashboards from Grafana.com and update ZDT CM # Fetch dashboards from Grafana.com and update ZDT CM
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml ../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
update_docs

View File

@ -3,10 +3,6 @@ redis:
architecture: standalone architecture: standalone
# Stick to last OSS version for now
image:
tag: 7.2.5-debian-12-r4
replica: replica:
replicaCount: 0 replicaCount: 0

View File

@ -1,14 +1,15 @@
apiVersion: v2 apiVersion: v2
name: kubezero-sql name: kubezero-sql
description: KubeZero umbrella chart for SQL databases, mariadb-galera description: KubeZero umbrella chart for SQL databases, Percona XtraDB Cluster
type: application type: application
version: 0.4.0 version: 0.3.2
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
- kubezero - kubezero
- mariadb - mysql
- galera - percona
# - postgresql
maintainers: maintainers:
- name: Stefan Reimer - name: Stefan Reimer
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
@ -16,8 +17,12 @@ dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: pxc-operator
version: 1.12.1
repository: https://percona.github.io/percona-helm-charts/
condition: pxc-operator.enabled
- name: mariadb-galera - name: mariadb-galera
version: 14.0.10 version: 7.4.7
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: mariadb-galera.enabled condition: mariadb-galera.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.24.0"

View File

@ -1,8 +1,8 @@
# kubezero-sql # kubezero-sql
![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.3.2](https://img.shields.io/badge/Version-0.3.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for SQL databases, mariadb-galera KubeZero umbrella chart for SQL databases, Percona XtraDB Cluster
**Homepage:** <https://kubezero.com> **Homepage:** <https://kubezero.com>
@ -14,12 +14,13 @@ KubeZero umbrella chart for SQL databases, mariadb-galera
## Requirements ## Requirements
Kubernetes: `>= 1.26.0` Kubernetes: `>= 1.24.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.bitnami.com/bitnami | mariadb-galera | 14.0.10 | | https://charts.bitnami.com/bitnami | mariadb-galera | 7.4.7 |
| https://percona.github.io/percona-helm-charts/ | pxc-operator | 1.12.1 |
## Values ## Values
@ -37,6 +38,14 @@ Kubernetes: `>= 1.26.0`
| mariadb-galera.metrics.prometheusRules.enabled | bool | `false` | | | mariadb-galera.metrics.prometheusRules.enabled | bool | `false` | |
| mariadb-galera.metrics.serviceMonitor.enabled | bool | `false` | | | mariadb-galera.metrics.serviceMonitor.enabled | bool | `false` | |
| mariadb-galera.replicaCount | int | `2` | | | mariadb-galera.replicaCount | int | `2` | |
| pxc-operator.enabled | bool | `false` | |
| pxc-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| pxc-operator.resources.limits.memory | string | `"512Mi"` | |
| pxc-operator.resources.requests.cpu | string | `"50m"` | |
| pxc-operator.resources.requests.memory | string | `"32Mi"` | |
| pxc-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| pxc-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| pxc-operator.watchAllNamespaces | bool | `true` | |
# Changes # Changes

591
charts/kubezero-sql/cr.yaml Normal file
View File

@ -0,0 +1,591 @@
apiVersion: pxc.percona.com/v1-11-0
kind: PerconaXtraDBCluster
metadata:
name: best-db
finalizers:
- delete-pxc-pods-in-order
# - delete-ssl
# - delete-proxysql-pvc
# - delete-pxc-pvc
# annotations:
# percona.com/issue-vault-token: "true"
spec:
crVersion: 1.11.0
# secretsName: cluster1-secrets
# vaultSecretName: keyring-secret-vault
# sslSecretName: cluster1-ssl
# sslInternalSecretName: cluster1-ssl-internal
# logCollectorSecretName: cluster1-log-collector-secrets
# initImage: percona/percona-xtradb-cluster-operator:1.12.0
# enableCRValidationWebhook: true
# tls:
# SANs:
# - pxc-1.example.com
# - pxc-2.example.com
# - pxc-3.example.com
# issuerConf:
# name: special-selfsigned-issuer
# kind: ClusterIssuer
# group: cert-manager.io
allowUnsafeConfigurations: true
# pause: false
updateStrategy: SmartUpdate
upgradeOptions:
versionServiceEndpoint: https://check.percona.com
apply: disabled
schedule: "0 4 * * *"
pxc:
size: 1
image: percona/percona-xtradb-cluster:8.0.27-18.1
autoRecovery: true
# expose:
# enabled: true
# type: LoadBalancer
# trafficPolicy: Local
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# annotations:
# networking.gke.io/load-balancer-type: "Internal"
# replicationChannels:
# - name: pxc1_to_pxc2
# isSource: true
# - name: pxc2_to_pxc1
# isSource: false
# configuration:
# sourceRetryCount: 3
# sourceConnectRetry: 60
# sourcesList:
# - host: 10.95.251.101
# port: 3306
# weight: 100
# schedulerName: mycustom-scheduler
# readinessDelaySec: 15
# livenessDelaySec: 600
# configuration: |
# [mysqld]
# wsrep_debug=CLIENT
# wsrep_provider_options="gcache.size=1G; gcache.recover=yes"
# [sst]
# xbstream-opts=--decompress
# [xtrabackup]
# compress=lz4
# for PXC 5.7
# [xtrabackup]
# compress
# imagePullSecrets:
# - name: private-registry-credentials
# priorityClassName: high-priority
# annotations:
# iam.amazonaws.com/role: role-arn
# labels:
# rack: rack-22
# readinessProbes:
# initialDelaySeconds: 15
# timeoutSeconds: 15
# periodSeconds: 30
# successThreshold: 1
# failureThreshold: 5
# livenessProbes:
# initialDelaySeconds: 300
# timeoutSeconds: 5
# periodSeconds: 10
# successThreshold: 1
# failureThreshold: 3
# containerSecurityContext:
# privileged: false
# podSecurityContext:
# runAsUser: 1001
# runAsGroup: 1001
# supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload
# imagePullPolicy: Always
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
# name: my-sidecar-1
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# envVarsSecret: my-env-var-secrets
resources:
requests:
memory: 512M
cpu: 200m
# ephemeral-storage: 1G
# limits:
# memory: 1G
# cpu: "1"
# ephemeral-storage: 1G
# nodeSelector:
# disktype: ssd
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# tolerations:
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
# podDisruptionBudget:
# maxUnavailable: 1
# minAvailable: 0
volumeSpec:
# emptyDir: {}
# hostPath:
# path: /data
# type: Directory
persistentVolumeClaim:
# storageClassName: standard
# accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2G
gracePeriod: 600
haproxy:
enabled: true
size: 1
image: perconalab/percona-xtradb-cluster-operator:main-haproxy
# imagePullPolicy: Always
# schedulerName: mycustom-scheduler
# readinessDelaySec: 15
# livenessDelaySec: 600
# configuration: |
#
# the actual default configuration file can be found here https://github.com/percona/percona-docker/blob/main/haproxy/dockerdir/etc/haproxy/haproxy-global.cfg
#
# global
# maxconn 2048
# external-check
# insecure-fork-wanted
# stats socket /etc/haproxy/pxc/haproxy.sock mode 600 expose-fd listeners level admin
#
# defaults
# default-server init-addr last,libc,none
# log global
# mode tcp
# retries 10
# timeout client 28800s
# timeout connect 100500
# timeout server 28800s
#
# frontend galera-in
# bind *:3309 accept-proxy
# bind *:3306
# mode tcp
# option clitcpka
# default_backend galera-nodes
#
# frontend galera-admin-in
# bind *:33062
# mode tcp
# option clitcpka
# default_backend galera-admin-nodes
#
# frontend galera-replica-in
# bind *:3307
# mode tcp
# option clitcpka
# default_backend galera-replica-nodes
#
# frontend galera-mysqlx-in
# bind *:33060
# mode tcp
# option clitcpka
# default_backend galera-mysqlx-nodes
#
# frontend stats
# bind *:8404
# mode http
# option http-use-htx
# http-request use-service prometheus-exporter if { path /metrics }
# imagePullSecrets:
# - name: private-registry-credentials
# annotations:
# iam.amazonaws.com/role: role-arn
# labels:
# rack: rack-22
# readinessProbes:
# initialDelaySeconds: 15
# timeoutSeconds: 1
# periodSeconds: 5
# successThreshold: 1
# failureThreshold: 3
# livenessProbes:
# initialDelaySeconds: 60
# timeoutSeconds: 5
# periodSeconds: 30
# successThreshold: 1
# failureThreshold: 4
# serviceType: ClusterIP
# externalTrafficPolicy: Cluster
# loadbalancersourceranges:
# - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
# replicasServiceEnabled: false
# replicasLoadBalancerSourceRanges:
# - 10.0.0.0/8
# replicasLoadBalancerIP: 127.0.0.1
# replicasServiceType: ClusterIP
# replicasExternalTrafficPolicy: Cluster
# replicasServiceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# replicasServiceLabels:
# rack: rack-23
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
# name: my-sidecar-1
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# envVarsSecret: my-env-var-secrets
resources:
requests:
memory: 100M
cpu: 100m
# limits:
# memory: 1G
# cpu: 700m
# priorityClassName: high-priority
# nodeSelector:
# disktype: ssd
# sidecarResources:
# requests:
# memory: 1G
# cpu: 500m
# limits:
# memory: 2G
# cpu: 600m
# containerSecurityContext:
# privileged: false
# podSecurityContext:
# runAsUser: 1001
# runAsGroup: 1001
# supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# tolerations:
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
# podDisruptionBudget:
# maxUnavailable: 1
# minAvailable: 0
gracePeriod: 30
proxysql:
enabled: false
size: 3
image: perconalab/percona-xtradb-cluster-operator:main-proxysql
# imagePullPolicy: Always
# configuration: |
# datadir="/var/lib/proxysql"
#
# admin_variables =
# {
# admin_credentials="proxyadmin:admin_password"
# mysql_ifaces="0.0.0.0:6032"
# refresh_interval=2000
#
# cluster_username="proxyadmin"
# cluster_password="admin_password"
# checksum_admin_variables=false
# checksum_ldap_variables=false
# checksum_mysql_variables=false
# cluster_check_interval_ms=200
# cluster_check_status_frequency=100
# cluster_mysql_query_rules_save_to_disk=true
# cluster_mysql_servers_save_to_disk=true
# cluster_mysql_users_save_to_disk=true
# cluster_proxysql_servers_save_to_disk=true
# cluster_mysql_query_rules_diffs_before_sync=1
# cluster_mysql_servers_diffs_before_sync=1
# cluster_mysql_users_diffs_before_sync=1
# cluster_proxysql_servers_diffs_before_sync=1
# }
#
# mysql_variables=
# {
# monitor_password="monitor"
# monitor_galera_healthcheck_interval=1000
# threads=2
# max_connections=2048
# default_query_delay=0
# default_query_timeout=10000
# poll_timeout=2000
# interfaces="0.0.0.0:3306"
# default_schema="information_schema"
# stacksize=1048576
# connect_timeout_server=10000
# monitor_history=60000
# monitor_connect_interval=20000
# monitor_ping_interval=10000
# ping_timeout_server=200
# commands_stats=true
# sessions_sort=true
# have_ssl=true
# ssl_p2s_ca="/etc/proxysql/ssl-internal/ca.crt"
# ssl_p2s_cert="/etc/proxysql/ssl-internal/tls.crt"
# ssl_p2s_key="/etc/proxysql/ssl-internal/tls.key"
# ssl_p2s_cipher="ECDHE-RSA-AES128-GCM-SHA256"
# }
# readinessDelaySec: 15
# livenessDelaySec: 600
# schedulerName: mycustom-scheduler
# imagePullSecrets:
# - name: private-registry-credentials
# annotations:
# iam.amazonaws.com/role: role-arn
# labels:
# rack: rack-22
# serviceType: ClusterIP
# loadbalancersourceranges:
# - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# externalTrafficPolicy: Cluster
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
# name: my-sidecar-1
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# envVarsSecret: my-env-var-secrets
resources:
requests:
memory: 1G
cpu: 600m
# limits:
# memory: 1G
# cpu: 700m
# priorityClassName: high-priority
# nodeSelector:
# disktype: ssd
# sidecarResources:
# requests:
# memory: 1G
# cpu: 500m
# limits:
# memory: 2G
# cpu: 600m
# containerSecurityContext:
# privileged: false
# podSecurityContext:
# runAsUser: 1001
# runAsGroup: 1001
# supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# tolerations:
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
volumeSpec:
# emptyDir: {}
# hostPath:
# path: /data
# type: Directory
persistentVolumeClaim:
# storageClassName: standard
# accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2G
podDisruptionBudget:
maxUnavailable: 1
# minAvailable: 0
gracePeriod: 30
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
logcollector:
enabled: false
image: perconalab/percona-xtradb-cluster-operator:main-logcollector
# configuration: |
# [OUTPUT]
# Name es
# Match *
# Host 192.168.2.3
# Port 9200
# Index my_index
# Type my_type
resources:
requests:
memory: 100M
cpu: 200m
pmm:
enabled: false
image: percona/pmm-client:2.28.0
serverHost: monitoring-service
# serverUser: admin
# pxcParams: "--disable-tablestats-limit=2000"
# proxysqlParams: "--custom-labels=CUSTOM-LABELS"
resources:
requests:
memory: 150M
cpu: 300m
backup:
enabled: false
image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup
# backoffLimit: 6
# serviceAccountName: percona-xtradb-cluster-operator
# imagePullSecrets:
# - name: private-registry-credentials
pitr:
enabled: false
storageName: STORAGE-NAME-HERE
timeBetweenUploads: 60
# resources:
# requests:
# memory: 0.1G
# cpu: 100m
# limits:
# memory: 1G
# cpu: 700m
storages:
s3-us-west:
type: s3
verifyTLS: true
# nodeSelector:
# storage: tape
# backupWorker: 'True'
# resources:
# requests:
# memory: 1G
# cpu: 600m
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: backupWorker
# operator: In
# values:
# - 'True'
# tolerations:
# - key: "backupWorker"
# operator: "Equal"
# value: "True"
# effect: "NoSchedule"
# annotations:
# testName: scheduled-backup
# labels:
# backupWorker: 'True'
# schedulerName: 'default-scheduler'
# priorityClassName: 'high-priority'
# containerSecurityContext:
# privileged: true
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups: [1001, 1002, 1003]
s3:
bucket: S3-BACKUP-BUCKET-NAME-HERE
credentialsSecret: my-cluster-name-backup-s3
region: us-west-2
fs-pvc:
type: filesystem
# nodeSelector:
# storage: tape
# backupWorker: 'True'
# resources:
# requests:
# memory: 1G
# cpu: 600m
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: backupWorker
# operator: In
# values:
# - 'True'
# tolerations:
# - key: "backupWorker"
# operator: "Equal"
# value: "True"
# effect: "NoSchedule"
# annotations:
# testName: scheduled-backup
# labels:
# backupWorker: 'True'
# schedulerName: 'default-scheduler'
# priorityClassName: 'high-priority'
# containerSecurityContext:
# privileged: true
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups: [1001, 1002, 1003]
volume:
persistentVolumeClaim:
# storageClassName: standard
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 6G
schedule:
- name: "sat-night-backup"
schedule: "0 0 * * 6"
keep: 3
storageName: s3-us-west
- name: "daily-backup"
schedule: "0 0 * * *"
keep: 5
storageName: fs-pvc

View File

@ -1,15 +1,9 @@
#!/bin/bash #!/bin/bash
set -ex set -ex
. ../../scripts/lib-update.sh helm dep update
#login_ecr_public
update_helm
### MariaDB ### MariaDB
# Fetch dashboards # Fetch dashboards
../kubezero-metrics/sync_grafana_dashboards.py dashboards-mariadb.yaml templates/mariadb/grafana-dashboards.yaml ../kubezero-metrics/sync_grafana_dashboards.py dashboards-mariadb.yaml templates/mariadb/grafana-dashboards.yaml
update_docs

View File

@ -1,3 +1,24 @@
pxc-operator:
enabled: false
# we want a clusterwide operator
watchAllNamespaces: true
# running on the control-plane
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
resources:
limits:
#cpu: 200m
memory: 512Mi
requests:
cpu: 50m
memory: 32Mi
mariadb-galera: mariadb-galera:
enabled: false enabled: false
@ -5,9 +26,8 @@ mariadb-galera:
db: db:
user: mariadb user: mariadb
galera:
# For a single node "cluster" force bootstrap # For a single node "cluster" force bootstrap
#galera:
#bootstrap: #bootstrap:
# bootstrapFromNode: 0 # bootstrapFromNode: 0
# forceSafeToBootstrap: true # forceSafeToBootstrap: true

View File

@ -1,6 +1,6 @@
# kubezero-storage # kubezero-storage
![Version: 0.8.8](https://img.shields.io/badge/Version-0.8.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.8.3](https://img.shields.io/badge/Version-0.8.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini
@ -20,10 +20,10 @@ Kubernetes: `>= 1.26.0`
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.fairwinds.com/stable | gemini | 2.1.3 | | https://charts.fairwinds.com/stable | gemini | 2.1.3 |
| https://k8up-io.github.io/k8up | k8up | 4.7.0 | | https://k8up-io.github.io/k8up | k8up | 4.4.1 |
| https://kubernetes-sigs.github.io/aws-ebs-csi-driver | aws-ebs-csi-driver | 2.32.0 | | https://kubernetes-sigs.github.io/aws-ebs-csi-driver | aws-ebs-csi-driver | 2.22.0 |
| https://kubernetes-sigs.github.io/aws-efs-csi-driver | aws-efs-csi-driver | 3.0.6 | | https://kubernetes-sigs.github.io/aws-efs-csi-driver | aws-efs-csi-driver | 2.4.9 |
| https://openebs.github.io/lvm-localpv | lvm-localpv | 1.6.0 | | https://openebs.github.io/lvm-localpv | lvm-localpv | 1.2.0 |
## Values ## Values
@ -38,7 +38,6 @@ Kubernetes: `>= 1.26.0`
| aws-ebs-csi-driver.controller.resources.requests.memory | string | `"24Mi"` | | | aws-ebs-csi-driver.controller.resources.requests.memory | string | `"24Mi"` | |
| aws-ebs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | | | aws-ebs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-ebs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | aws-ebs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| aws-ebs-csi-driver.controller.volumeModificationFeature.enabled | bool | `false` | |
| aws-ebs-csi-driver.controller.volumeMounts[0].mountPath | string | `"/var/run/secrets/sts.amazonaws.com/serviceaccount/"` | | | aws-ebs-csi-driver.controller.volumeMounts[0].mountPath | string | `"/var/run/secrets/sts.amazonaws.com/serviceaccount/"` | |
| aws-ebs-csi-driver.controller.volumeMounts[0].name | string | `"aws-token"` | | | aws-ebs-csi-driver.controller.volumeMounts[0].name | string | `"aws-token"` | |
| aws-ebs-csi-driver.controller.volumeMounts[0].readOnly | bool | `true` | | | aws-ebs-csi-driver.controller.volumeMounts[0].readOnly | bool | `true` | |
@ -47,10 +46,7 @@ Kubernetes: `>= 1.26.0`
| aws-ebs-csi-driver.controller.volumes[0].projected.sources[0].serviceAccountToken.expirationSeconds | int | `86400` | | | aws-ebs-csi-driver.controller.volumes[0].projected.sources[0].serviceAccountToken.expirationSeconds | int | `86400` | |
| aws-ebs-csi-driver.controller.volumes[0].projected.sources[0].serviceAccountToken.path | string | `"token"` | | | aws-ebs-csi-driver.controller.volumes[0].projected.sources[0].serviceAccountToken.path | string | `"token"` | |
| aws-ebs-csi-driver.enabled | bool | `false` | | | aws-ebs-csi-driver.enabled | bool | `false` | |
| aws-ebs-csi-driver.helmTester.enabled | bool | `false` | |
| aws-ebs-csi-driver.node.loggingFormat | string | `"json"` | | | aws-ebs-csi-driver.node.loggingFormat | string | `"json"` | |
| aws-ebs-csi-driver.node.priorityClassName | string | `"system-node-critical"` | |
| aws-ebs-csi-driver.node.reservedVolumeAttachments | int | `3` | |
| aws-ebs-csi-driver.node.resources.limits.memory | string | `"32Mi"` | | | aws-ebs-csi-driver.node.resources.limits.memory | string | `"32Mi"` | |
| aws-ebs-csi-driver.node.resources.requests.cpu | string | `"10m"` | | | aws-ebs-csi-driver.node.resources.requests.cpu | string | `"10m"` | |
| aws-ebs-csi-driver.node.resources.requests.memory | string | `"16Mi"` | | | aws-ebs-csi-driver.node.resources.requests.memory | string | `"16Mi"` | |
@ -80,22 +76,15 @@ Kubernetes: `>= 1.26.0`
| aws-efs-csi-driver.controller.create | bool | `true` | | | aws-efs-csi-driver.controller.create | bool | `true` | |
| aws-efs-csi-driver.controller.logLevel | int | `2` | | | aws-efs-csi-driver.controller.logLevel | int | `2` | |
| aws-efs-csi-driver.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | aws-efs-csi-driver.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| aws-efs-csi-driver.controller.regionalStsEndpoints | bool | `true` | |
| aws-efs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | | | aws-efs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-efs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | aws-efs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| aws-efs-csi-driver.controller.volumeMounts[0].mountPath | string | `"/var/run/secrets/sts.amazonaws.com/serviceaccount/"` | |
| aws-efs-csi-driver.controller.volumeMounts[0].name | string | `"aws-token"` | |
| aws-efs-csi-driver.controller.volumes[0].name | string | `"aws-token"` | |
| aws-efs-csi-driver.controller.volumes[0].projected.sources[0].serviceAccountToken.audience | string | `"sts.amazonaws.com"` | |
| aws-efs-csi-driver.controller.volumes[0].projected.sources[0].serviceAccountToken.expirationSeconds | int | `86400` | |
| aws-efs-csi-driver.controller.volumes[0].projected.sources[0].serviceAccountToken.path | string | `"token"` | |
| aws-efs-csi-driver.enabled | bool | `false` | | | aws-efs-csi-driver.enabled | bool | `false` | |
| aws-efs-csi-driver.node.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node.kubernetes.io/csi.efs.fs"` | | | aws-efs-csi-driver.node.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node.kubernetes.io/csi.efs.fs"` | |
| aws-efs-csi-driver.node.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"Exists"` | | | aws-efs-csi-driver.node.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"Exists"` | |
| aws-efs-csi-driver.node.logLevel | int | `2` | | | aws-efs-csi-driver.node.logLevel | int | `2` | |
| aws-efs-csi-driver.node.resources.limits.memory | string | `"256Mi"` | | | aws-efs-csi-driver.node.resources.limits.memory | string | `"128Mi"` | |
| aws-efs-csi-driver.node.resources.requests.cpu | string | `"20m"` | | | aws-efs-csi-driver.node.resources.requests.cpu | string | `"20m"` | |
| aws-efs-csi-driver.node.resources.requests.memory | string | `"96Mi"` | | | aws-efs-csi-driver.node.resources.requests.memory | string | `"64Mi"` | |
| aws-efs-csi-driver.node.tolerations[0].effect | string | `"NoSchedule"` | | | aws-efs-csi-driver.node.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-efs-csi-driver.node.tolerations[0].key | string | `"kubezero-workergroup"` | | | aws-efs-csi-driver.node.tolerations[0].key | string | `"kubezero-workergroup"` | |
| aws-efs-csi-driver.node.tolerations[0].operator | string | `"Exists"` | | | aws-efs-csi-driver.node.tolerations[0].operator | string | `"Exists"` | |
@ -105,8 +94,8 @@ Kubernetes: `>= 1.26.0`
| aws-efs-csi-driver.node.tolerations[2].effect | string | `"NoSchedule"` | | | aws-efs-csi-driver.node.tolerations[2].effect | string | `"NoSchedule"` | |
| aws-efs-csi-driver.node.tolerations[2].key | string | `"aws.amazon.com/neuron"` | | | aws-efs-csi-driver.node.tolerations[2].key | string | `"aws.amazon.com/neuron"` | |
| aws-efs-csi-driver.node.tolerations[2].operator | string | `"Exists"` | | | aws-efs-csi-driver.node.tolerations[2].operator | string | `"Exists"` | |
| aws-efs-csi-driver.node.volMetricsOptIn | bool | `true` | |
| aws-efs-csi-driver.replicaCount | int | `1` | | | aws-efs-csi-driver.replicaCount | int | `1` | |
| crd.volumeSnapshot | bool | `true` | |
| gemini.enabled | bool | `false` | | | gemini.enabled | bool | `false` | |
| gemini.resources.limits.cpu | string | `"400m"` | | | gemini.resources.limits.cpu | string | `"400m"` | |
| gemini.resources.limits.memory | string | `"128Mi"` | | | gemini.resources.limits.memory | string | `"128Mi"` | |
@ -123,7 +112,6 @@ Kubernetes: `>= 1.26.0`
| k8up.tolerations[0].effect | string | `"NoSchedule"` | | | k8up.tolerations[0].effect | string | `"NoSchedule"` | |
| k8up.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | k8up.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| lvm-localpv.analytics.enabled | bool | `false` | | | lvm-localpv.analytics.enabled | bool | `false` | |
| lvm-localpv.crds.csi.volumeSnapshots.enabled | bool | `false` | |
| lvm-localpv.enabled | bool | `false` | | | lvm-localpv.enabled | bool | `false` | |
| lvm-localpv.lvmController.logLevel | int | `2` | | | lvm-localpv.lvmController.logLevel | int | `2` | |
| lvm-localpv.lvmController.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | lvm-localpv.lvmController.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
@ -138,14 +126,13 @@ Kubernetes: `>= 1.26.0`
| lvm-localpv.storageClass.default | bool | `false` | | | lvm-localpv.storageClass.default | bool | `false` | |
| lvm-localpv.storageClass.vgpattern | string | `""` | | | lvm-localpv.storageClass.vgpattern | string | `""` | |
| snapshotController.enabled | bool | `false` | | | snapshotController.enabled | bool | `false` | |
| snapshotController.image.name | string | `"registry.k8s.io/sig-storage/snapshot-controller"` | |
| snapshotController.image.tag | string | `"v7.0.1"` | |
| snapshotController.logLevel | int | `2` | | | snapshotController.logLevel | int | `2` | |
| snapshotController.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | snapshotController.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| snapshotController.replicas | int | `1` | | | snapshotController.replicas | int | `1` | |
| snapshotController.resources.limits.cpu | string | `"100m"` | |
| snapshotController.resources.limits.memory | string | `"64Mi"` | | | snapshotController.resources.limits.memory | string | `"64Mi"` | |
| snapshotController.resources.requests.cpu | string | `"10m"` | | | snapshotController.resources.requests.cpu | string | `"20m"` | |
| snapshotController.resources.requests.memory | string | `"24Mi"` | | | snapshotController.resources.requests.memory | string | `"16Mi"` | |
| snapshotController.tolerations[0].effect | string | `"NoSchedule"` | | | snapshotController.tolerations[0].effect | string | `"NoSchedule"` | |
| snapshotController.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | snapshotController.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |

Binary file not shown.

Before

Width:  |  Height:  |  Size: 130 B

After

Width:  |  Height:  |  Size: 35 KiB

View File

@ -244,7 +244,7 @@ aws-efs-csi-driver:
cpu: 20m cpu: 20m
memory: 96Mi memory: 96Mi
limits: limits:
memory: 256Mi memory: 128Mi
affinity: affinity:
nodeAffinity: nodeAffinity:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero name: kubezero
description: KubeZero - Root App of Apps chart description: KubeZero - Root App of Apps chart
type: application type: application
version: 1.30.5 version: 1.29.7-1
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:

Some files were not shown because too many files have changed in this diff Show More