Compare commits

..

14 Commits

Author SHA1 Message Date
a8e4ce7a15 chore(deps): update helm release neo4j to v5.26.4 2025-03-13 03:01:44 +00:00
4c10271ec6 Merge pull request 'chore(deps): update helm release argo-cd to v7.8.9' (#54) from renovate/kubezero-argo-kubezero-argo-dependencies into main
Reviewed-on: #54
2025-03-11 18:17:08 +00:00
5246f57329 chore(deps): update helm release argo-cd to v7.8.9 2025-03-11 18:17:08 +00:00
5bc6e6e435 fix: reduce load on api-server on single node control planes, more argo related fixes 2025-03-11 16:37:27 +00:00
cbcaec807a fix: replace apps during 1.31 2025-03-11 14:07:40 +01:00
bfafccaf32 feat: tooling tweaks, Istio ingress option to preserver external request Ids 2025-03-10 17:49:24 +00:00
3304363986 Fix: fix for minimal ES version in logging, clustered control plane upgrade fix, tooling cleanup 2025-03-04 11:47:19 +00:00
9fc9843283 feat: more envoy-ratelimit tuning, cleanups 2025-02-27 15:11:37 +00:00
ed48d93aaf feat: bump Istio to latest, migrate ratelimit to its own subchart 2025-02-26 15:39:09 +00:00
ce5b5de1c2 Merge pull request 'chore(deps): update helm release gateway to v1.24.3' (#47) from renovate/kubezero-istio-gateway-kubezero-istio-gateway-dependencies into main
Reviewed-on: #47
2025-02-26 15:30:52 +00:00
e2d3e89dd1 chore(deps): update helm release gateway to v1.24.3 2025-02-26 15:30:52 +00:00
1946bf3aed Merge pull request 'chore(deps): update kubezero-istio-dependencies' (#48) from renovate/kubezero-istio-kubezero-istio-dependencies into main
Reviewed-on: #48
2025-02-26 15:30:42 +00:00
efbd119cb6 chore(deps): update kubezero-istio-dependencies 2025-02-26 15:30:42 +00:00
adaf4fd114 fix: pin kubezero-lib chart dep, nvidia time-slicing support 2025-02-26 15:14:47 +00:00
66 changed files with 504 additions and 430 deletions

View File

@ -4,10 +4,10 @@
set -x set -x
ARTIFACTS=($(echo $1 | tr "," "\n")) ARTIFACTS=($(echo $1 | tr "," "\n"))
ACTION=$2 ACTION="${2:-apply}"
ARGOCD="${3:-False}"
LOCAL_DEV=1 LOCAL_DEV=1
ARGOCD="False"
#VERSION="latest" #VERSION="latest"
KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)" KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"
@ -85,7 +85,7 @@ helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $
# Root KubeZero apply directly and exit # Root KubeZero apply directly and exit
if [ ${ARTIFACTS[0]} == "kubezero" ]; then if [ ${ARTIFACTS[0]} == "kubezero" ]; then
kubectl replace -f $WORKDIR/kubezero/templates kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
exit $? exit $?
# "catch all" apply all enabled modules # "catch all" apply all enabled modules
@ -100,12 +100,12 @@ if [ "$ACTION" == "delete" ]; then
_helm delete ${ARTIFACTS[idx]} || true _helm delete ${ARTIFACTS[idx]} || true
done done
else else
if [ "$ACTION" == "" -o "$ACTION" == "crds" ]; then if [ "$ACTION" == "apply" -o "$ACTION" == "crds" ]; then
for t in ${ARTIFACTS[@]}; do for t in ${ARTIFACTS[@]}; do
_helm crds $t || true _helm crds $t || true
done done
fi fi
for t in ${ARTIFACTS[@]}; do for t in ${ARTIFACTS[@]}; do
_helm apply $t || true _helm $ACTION $t || true
done done
fi fi

View File

@ -20,20 +20,28 @@ post_control_plane_upgrade_cluster() {
# All things AFTER all contollers are on the new version # All things AFTER all contollers are on the new version
pre_cluster_upgrade_final() { pre_cluster_upgrade_final() {
set +e
if [ "$PLATFORM" == "aws" ];then if [ "$PLATFORM" == "aws" ];then
# cleanup aws-iam-authenticator # cleanup aws-iam-authenticator
kubectl delete clusterrolebinding aws-iam-authenticator || true kubectl delete clusterrolebinding aws-iam-authenticator
kubectl delete clusterrole aws-iam-authenticator || true kubectl delete clusterrole aws-iam-authenticator
kubectl delete serviceaccount aws-iam-authenticator -n kube-system || true kubectl delete serviceaccount aws-iam-authenticator -n kube-system
kubectl delete cm aws-iam-authenticator -n kube-system || true kubectl delete cm aws-iam-authenticator -n kube-system
kubectl delete ds aws-iam-authenticator -n kube-system || true kubectl delete ds aws-iam-authenticator -n kube-system
kubectl delete IAMIdentityMapping kubezero-worker-nodes || true kubectl delete IAMIdentityMapping kubezero-worker-nodes
kubectl delete IAMIdentityMapping kubernetes-admin || true kubectl delete IAMIdentityMapping kubernetes-admin
kubectl delete crd iamidentitymappings.iamauthenticator.k8s.aws || true kubectl delete crd iamidentitymappings.iamauthenticator.k8s.aws
kubectl delete secret aws-iam-certs -n kube-system
kubectl delete secret aws-iam-certs -n kube-system || true
fi fi
# Remove any helm hook related resources
kubectl delete rolebinding argo-argocd-redis-secret-init -n argocd
kubectl delete sa argo-argocd-redis-secret-init -n argocd
kubectl delete role argo-argocd-redis-secret-init -n argocd
kubectl delete job argo-argocd-redis-secret-init -n argocd
set -e
} }

View File

@ -104,9 +104,9 @@ pre_kubeadm() {
# Shared steps after calling kubeadm # Shared steps after calling kubeadm
post_kubeadm() { post_kubeadm() {
# KubeZero resources # KubeZero resources - will never be applied by ArgoCD
for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do
kubectl apply -f $f $LOG kubectl apply -f $f --server-side --force-conflicts $LOG
done done
} }
@ -115,9 +115,13 @@ post_kubeadm() {
control_plane_upgrade() { control_plane_upgrade() {
CMD=$1 CMD=$1
ARGOCD=$(argo_used)
render_kubeadm upgrade render_kubeadm upgrade
if [[ "$CMD" =~ ^(cluster)$ ]]; then if [[ "$CMD" =~ ^(cluster)$ ]]; then
pre_control_plane_upgrade_cluster
# get current values, argo app over cm # get current values, argo app over cm
get_kubezero_values $ARGOCD get_kubezero_values $ARGOCD
@ -133,7 +137,7 @@ control_plane_upgrade() {
kubectl get application kubezero -n argocd -o yaml | \ kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \ yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml > $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi fi
pre_kubeadm pre_kubeadm
@ -147,13 +151,19 @@ control_plane_upgrade() {
# install re-certed kubectl config for root # install re-certed kubectl config for root
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
post_control_plane_upgrade_cluster
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm." echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
elif [[ "$CMD" =~ ^(final)$ ]]; then elif [[ "$CMD" =~ ^(final)$ ]]; then
pre_cluster_upgrade_final
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase # Finally upgrade addons last, with 1.32 we can ONLY call addon phase
#_kubeadm upgrade apply phase addon all $KUBE_VERSION #_kubeadm upgrade apply phase addon all $KUBE_VERSION
_kubeadm upgrade apply $KUBE_VERSION _kubeadm upgrade apply $KUBE_VERSION
post_cluster_upgrade_final
echo "Upgraded kubeadm addons." echo "Upgraded kubeadm addons."
fi fi
@ -318,7 +328,10 @@ apply_module() {
done done
for t in $MODULES; do for t in $MODULES; do
_helm apply $t #_helm apply $t
# During 1.31 we change the ArgoCD tracking so replace
_helm replace $t
done done
echo "Applied KubeZero modules: $MODULES" echo "Applied KubeZero modules: $MODULES"
@ -394,17 +407,10 @@ for t in $@; do
join) control_plane_node join;; join) control_plane_node join;;
restore) control_plane_node restore;; restore) control_plane_node restore;;
kubeadm_upgrade) kubeadm_upgrade)
ARGOCD=$(argo_used)
# call hooks
pre_control_plane_upgrade_cluster
control_plane_upgrade cluster control_plane_upgrade cluster
post_control_plane_upgrade_cluster
;; ;;
finalize_cluster_upgrade) finalize_cluster_upgrade)
ARGOCD=$(argo_used)
pre_cluster_upgrade_final
control_plane_upgrade final control_plane_upgrade final
post_cluster_upgrade_final
;; ;;
apply_*) apply_*)
ARGOCD=$(argo_used) ARGOCD=$(argo_used)

View File

@ -2,11 +2,10 @@
# Simulate well-known CRDs being available # Simulate well-known CRDs being available
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1" API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
LOCAL_DEV=${LOCAL_DEV:-""}
export HELM_SECRETS_BACKEND="vals" export HELM_SECRETS_BACKEND="vals"
LOCAL_DEV=${LOCAL_DEV:-""}
# Waits for max 300s and retries # Waits for max 300s and retries
function wait_for() { function wait_for() {
local TRIES=0 local TRIES=0
@ -34,6 +33,32 @@ function argo_used() {
} }
function field_manager() {
local argo=${1:-"False"}
if [ "$argo" == "True" ]; then
echo "--field-manager argo-controller"
else
echo ""
fi
}
function get_kubezero_secret() {
export _key="$1"
kubectl get secrets -n kubezero kubezero-secrets -o yaml | yq '.data.[env(_key)]' | base64 -d -w0
}
function set_kubezero_secret() {
local key="$1"
local val="$2"
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"$(echo -n $val |base64 -w0)\" }}"
}
# get kubezero-values from ArgoCD if available or use in-cluster CM # get kubezero-values from ArgoCD if available or use in-cluster CM
function get_kubezero_values() { function get_kubezero_values() {
local argo=${1:-"False"} local argo=${1:-"False"}
@ -96,25 +121,12 @@ function waitSystemPodsRunning() {
done done
} }
function argo_app_synced() {
APP=$1
# Ensure we are synced otherwise bail out
status=$(kubectl get application $APP -n argocd -o yaml | yq .status.sync.status)
if [ "$status" != "Synced" ]; then
echo "ArgoCD Application $APP not 'Synced'!"
return 1
fi
return 0
}
# make sure namespace exists prior to calling helm as the create-namespace options doesn't work # make sure namespace exists prior to calling helm as the create-namespace options doesn't work
function create_ns() { function create_ns() {
local namespace=$1 local namespace=$1
if [ "$namespace" != "kube-system" ]; then if [ "$namespace" != "kube-system" ]; then
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace kubectl get ns $namespace > /dev/null || kubectl create ns $namespace $(field_manager $ARGOCD)
fi fi
} }
@ -144,7 +156,7 @@ for manifest in yaml.safe_load_all(sys.stdin):
# Only apply if there are actually any crds # Only apply if there are actually any crds
if [ -s $WORKDIR/crds.yaml ]; then if [ -s $WORKDIR/crds.yaml ]; then
[ -n "$DEBUG" ] && cat $WORKDIR/crds.yaml [ -n "$DEBUG" ] && cat $WORKDIR/crds.yaml
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts $(field_manager $ARGOCD)
fi fi
} }
@ -193,7 +205,7 @@ function _helm() {
# Allow custom CRD handling # Allow custom CRD handling
declare -F ${module}-crds && ${module}-crds || _crds declare -F ${module}-crds && ${module}-crds || _crds
elif [ $action == "apply" ]; then elif [ $action == "apply" -o $action == "replace" ]; then
echo "using values to $action of module $module: " echo "using values to $action of module $module: "
cat $WORKDIR/values.yaml cat $WORKDIR/values.yaml
@ -204,7 +216,8 @@ function _helm() {
declare -F ${module}-pre && ${module}-pre declare -F ${module}-pre && ${module}-pre
render render
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$? [ $action == "apply" ] && kubectl apply -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$?
[ $action == "replace" ] && kubectl replace -f $WORKDIR/helm.yaml $(field_manager $ARGOCD) && rc=$? || rc=$?
# Optional post hook # Optional post hook
declare -F ${module}-post && ${module}-post declare -F ${module}-post && ${module}-post

View File

@ -14,6 +14,6 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.26.0"

3
charts/envoy-ratelimit/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
istioctl
istio
istio.zdt

View File

@ -0,0 +1,32 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
README.md.gotmpl
*.patch
*.sh
*.py
istioctl
istio
istio.zdt

View File

@ -0,0 +1,19 @@
apiVersion: v2
name: envoy-ratelimit
description: Envoy gobal ratelimiting service - part of KubeZero
type: application
version: 0.1.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- envoy
- istio
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
kubeVersion: ">= 1.31.0-0"

View File

@ -0,0 +1,37 @@
# envoy-ratelimit
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
Envoy gobal ratelimiting service - part of KubeZero
**Homepage:** <https://kubezero.com>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Stefan Reimer | <stefan@zero-downtime.net> | |
## Requirements
Kubernetes: `>= 1.31.0-0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| descriptors.ingress[0].key | string | `"remote_address"` | |
| descriptors.ingress[0].rate_limit.requests_per_unit | int | `10` | |
| descriptors.ingress[0].rate_limit.unit | string | `"second"` | |
| descriptors.privateIngress[0].key | string | `"remote_address"` | |
| descriptors.privateIngress[0].rate_limit.requests_per_unit | int | `10` | |
| descriptors.privateIngress[0].rate_limit.unit | string | `"second"` | |
| failureModeDeny | bool | `false` | |
| localCacheSize | int | `1048576` | |
| log.format | string | `"json"` | |
| log.level | string | `"warn"` | |
| metrics.enabled | bool | `true` | |

View File

@ -0,0 +1,16 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
{{ template "chart.description" . }}
{{ template "chart.homepageLine" . }}
{{ template "chart.maintainersSection" . }}
{{ template "chart.sourcesSection" . }}
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}

View File

@ -1,4 +1,3 @@
{{- if .Values.rateLimiting.enabled }}
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
@ -10,10 +9,9 @@ data:
ingress.yaml: | ingress.yaml: |
domain: ingress domain: ingress
descriptors: descriptors:
{{- toYaml .Values.rateLimiting.descriptors.ingress | nindent 4 }} {{- toYaml .Values.descriptors.ingress | nindent 4 }}
private-ingress.yaml: | private-ingress.yaml: |
domain: private-ingress domain: private-ingress
descriptors: descriptors:
{{- toYaml .Values.rateLimiting.descriptors.privateIngress | nindent 4 }} {{- toYaml .Values.descriptors.privateIngress | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,63 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ratelimit
namespace: {{ .Release.Namespace }}
spec:
replicas: 1
selector:
matchLabels:
app: ratelimit
strategy:
type: Recreate
template:
metadata:
labels:
app: ratelimit
spec:
containers:
- image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: IfNotPresent
name: ratelimit
command: ["/bin/ratelimit"]
env:
- name: LOG_LEVEL
value: {{ default "WARN" .Values.log.level }}
- name: LOG_FORMAT
value: {{ default "text" .Values.log.format }}
- name: REDIS_SOCKET_TYPE
value: tcp
- name: REDIS_URL
value: ratelimit-valkey:6379
- name: USE_PROMETHEUS
value: "true"
- name: USE_STATSD
value: "false"
- name: RUNTIME_ROOT
value: /data
- name: RUNTIME_SUBDIRECTORY
value: ratelimit
- name: RUNTIME_WATCH_ROOT
value: "false"
- name: RUNTIME_IGNOREDOTFILES
value: "true"
- name: LOCAL_CACHE_SIZE_IN_BYTES
value: "{{ default 0 .Values.localCacheSize | int }}"
ports:
- containerPort: 8081
#- containerPort: 8080
#- containerPort: 6070
volumeMounts:
- name: ratelimit-config
mountPath: /data/ratelimit/config
resources:
requests:
cpu: 50m
memory: 32Mi
limits:
cpu: 1
memory: 256Mi
volumes:
- name: ratelimit-config
configMap:
name: ratelimit-config

View File

@ -1,4 +1,3 @@
{{- if .Values.rateLimiting.enabled }}
apiVersion: networking.istio.io/v1alpha3 apiVersion: networking.istio.io/v1alpha3
kind: EnvoyFilter kind: EnvoyFilter
metadata: metadata:
@ -27,7 +26,7 @@ spec:
typed_config: typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit "@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit
domain: ingress domain: ingress
failure_mode_deny: {{ .Values.rateLimiting.failureModeDeny }} failure_mode_deny: {{ .Values.failureModeDeny }}
timeout: 0.5s timeout: 0.5s
rate_limit_service: rate_limit_service:
grpc_service: grpc_service:
@ -85,7 +84,7 @@ spec:
typed_config: typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit "@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit
domain: private-ingress domain: private-ingress
failure_mode_deny: {{ .Values.rateLimiting.failureModeDeny }} failure_mode_deny: {{ .Values.failureModeDeny }}
timeout: 0.5s timeout: 0.5s
rate_limit_service: rate_limit_service:
grpc_service: grpc_service:
@ -113,4 +112,3 @@ spec:
socket_address: socket_address:
address: ratelimit.istio-system address: ratelimit.istio-system
port_value: 8081 port_value: 8081
{{- end }}

View File

@ -0,0 +1,27 @@
apiVersion: v1
kind: Service
metadata:
name: ratelimit
namespace: {{ .Release.Namespace }}
labels:
app: ratelimit
spec:
ports:
#- name: http-port
# port: 8080
# targetPort: 8080
# protocol: TCP
- name: grpc-port
port: 8081
targetPort: 8081
protocol: TCP
#- name: http-debug
# port: 6070
# targetPort: 6070
# protocol: TCP
- name: http-monitoring
port: 9090
targetPort: 9090
protocol: TCP
selector:
app: ratelimit

View File

@ -1,4 +1,4 @@
{{- if and .Values.istiod.telemetry.enabled .Values.rateLimiting.enabled }} {{- if and .Values.metrics.enabled }}
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor kind: ServiceMonitor
metadata: metadata:

View File

@ -0,0 +1,24 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ratelimit-valkey
namespace: {{ .Release.Namespace }}
spec:
replicas: 1
selector:
matchLabels:
app: ratelimit-valkey
template:
metadata:
labels:
app: ratelimit-valkey
spec:
containers:
- image: valkey/valkey:8.1-alpine3.21
imagePullPolicy: IfNotPresent
name: valkey
ports:
- name: valkey
containerPort: 6379
restartPolicy: Always
serviceAccountName: ""

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: ratelimit-valkey
namespace: {{ .Release.Namespace }}
labels:
app: ratelimit-valkey
spec:
ports:
- name: valkey
port: 6379
selector:
app: ratelimit-valkey

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -ex
. ../../scripts/lib-update.sh
#login_ecr_public
update_helm
update_docs

View File

@ -0,0 +1,38 @@
image:
repository: envoyproxy/ratelimit
# see: https://hub.docker.com/r/envoyproxy/ratelimit/tags
tag: 80b15778
log:
level: warn
format: json
# 1MB local cache for already reached limits to reduce calls to Redis
localCacheSize: 1048576
# Wether to block requests if ratelimiting is down
failureModeDeny: false
# rate limit descriptors for each domain
# - slow: 1 req/s over a minute per sourceIP
descriptors:
ingress:
- key: speed
value: slow
descriptors:
- key: remote_address
rate_limit:
unit: minute
requests_per_unit: 60
privateIngress:
- key: speed
value: slow
descriptors:
- key: remote_address
rate_limit:
unit: minute
requests_per_unit: 60
metrics:
enabled: false

6
charts/kubeadm/TODO Normal file
View File

@ -0,0 +1,6 @@
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_256_GCM_SHA384"}
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_128_GCM_SHA256"}
"RuntimeConfig from runtime service failed","err":"rpc error: code = Unimplemented desc = unknown method RuntimeConfig for service runtime.v1.RuntimeService"}
"CRI implementation should be updated to support RuntimeConfig when KubeletCgroupDriverFromCRI feature gate has been enabled. Falling back to using cgroupDriver from kubelet config."

View File

@ -2,8 +2,8 @@ apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration kind: ClusterConfiguration
kubernetesVersion: {{ .Chart.Version }} kubernetesVersion: {{ .Chart.Version }}
clusterName: {{ .Values.global.clusterName }} clusterName: {{ .Values.global.clusterName }}
#featureGates: featureGates:
# NonGracefulFailover: true ControlPlaneKubeletLocalMode: true
controlPlaneEndpoint: {{ .Values.api.endpoint }} controlPlaneEndpoint: {{ .Values.api.endpoint }}
networking: networking:
podSubnet: 10.244.0.0/16 podSubnet: 10.244.0.0/16

View File

@ -3,7 +3,7 @@
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}} {{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}} {{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
{{- define "kubeadm.featuregates" }} {{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "AuthorizeWithSelectors" "AuthorizeNodeWithSelectors" "ConsistentListFromCache" "VolumeAttributesClass" "WatchList"}} {{- $gates := list "CustomCPUCFSQuotaPeriod" "AuthorizeWithSelectors" "AuthorizeNodeWithSelectors" "ConsistentListFromCache" "VolumeAttributesClass" "WatchList" }}
{{- if eq .return "csv" }} {{- if eq .return "csv" }}
{{- range $key := $gates }} {{- range $key := $gates }}
{{- $key }}=true, {{- $key }}=true,

View File

@ -14,7 +14,7 @@ KubeZero umbrella chart for various optional cluster addons
## Requirements ## Requirements
Kubernetes: `>= 1.26.0` Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
@ -94,9 +94,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| aws-node-termination-handler.managedTag | string | `"zdt:kubezero:nth:${ClusterName}"` | "zdt:kubezero:nth:${ClusterName}" | | aws-node-termination-handler.managedTag | string | `"zdt:kubezero:nth:${ClusterName}"` | "zdt:kubezero:nth:${ClusterName}" |
| aws-node-termination-handler.metadataTries | int | `0` | | | aws-node-termination-handler.metadataTries | int | `0` | |
| aws-node-termination-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | aws-node-termination-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| aws-node-termination-handler.podMonitor.create | bool | `false` | |
| aws-node-termination-handler.queueURL | string | `""` | https://sqs.${AWS::Region}.amazonaws.com/${AWS::AccountId}/${ClusterName}_Nth | | aws-node-termination-handler.queueURL | string | `""` | https://sqs.${AWS::Region}.amazonaws.com/${AWS::AccountId}/${ClusterName}_Nth |
| aws-node-termination-handler.rbac.pspEnabled | bool | `false` | | | aws-node-termination-handler.serviceMonitor.create | bool | `false` | |
| aws-node-termination-handler.taintNode | bool | `true` | | | aws-node-termination-handler.taintNode | bool | `true` | |
| aws-node-termination-handler.tolerations[0].effect | string | `"NoSchedule"` | | | aws-node-termination-handler.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
@ -110,7 +109,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | | | cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | | | cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
| cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | | | cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | |
| cluster-autoscaler.image.tag | string | `"v1.30.2"` | | | cluster-autoscaler.image.tag | string | `"v1.31.1"` | |
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cluster-autoscaler.podDisruptionBudget | bool | `false` | | | cluster-autoscaler.podDisruptionBudget | bool | `false` | |
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | | | cluster-autoscaler.prometheusRule.enabled | bool | `false` | |
@ -159,6 +158,9 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| neuron-helm-chart.enabled | bool | `false` | | | neuron-helm-chart.enabled | bool | `false` | |
| neuron-helm-chart.npd.enabled | bool | `false` | | | neuron-helm-chart.npd.enabled | bool | `false` | |
| nvidia-device-plugin.cdi.nvidiaHookPath | string | `"/usr/bin"` | | | nvidia-device-plugin.cdi.nvidiaHookPath | string | `"/usr/bin"` | |
| nvidia-device-plugin.config.default | string | `"default"` | |
| nvidia-device-plugin.config.map.default | string | `"version: v1\nflags:\n migStrategy: none"` | |
| nvidia-device-plugin.config.map.time-slice-4x | string | `"version: v1\nflags:\n migStrategy: none\nsharing:\n timeSlicing:\n resources:\n - name: nvidia.com/gpu\n replicas: 4"` | |
| nvidia-device-plugin.deviceDiscoveryStrategy | string | `"nvml"` | | | nvidia-device-plugin.deviceDiscoveryStrategy | string | `"nvml"` | |
| nvidia-device-plugin.enabled | bool | `false` | | | nvidia-device-plugin.enabled | bool | `false` | |
| nvidia-device-plugin.runtimeClassName | string | `"nvidia"` | | | nvidia-device-plugin.runtimeClassName | string | `"nvidia"` | |

View File

@ -185,6 +185,22 @@ neuron-helm-chart:
nvidia-device-plugin: nvidia-device-plugin:
enabled: false enabled: false
config:
default: "default"
map:
default: |-
version: v1
flags:
migStrategy: none
time-slice-4x: |-
version: v1
flags:
migStrategy: none
sharing:
timeSlicing:
resources:
- name: nvidia.com/gpu
replicas: 4
cdi: cdi:
nvidiaHookPath: /usr/bin nvidiaHookPath: /usr/bin
deviceDiscoveryStrategy: nvml deviceDiscoveryStrategy: nvml

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo name: kubezero-argo
version: 0.2.8 version: 0.2.9
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -15,14 +15,14 @@ maintainers:
# Url: https://github.com/argoproj/argo-helm/tree/main/charts # Url: https://github.com/argoproj/argo-helm/tree/main/charts
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: argo-events - name: argo-events
version: 2.4.13 version: 2.4.13
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled condition: argo-events.enabled
- name: argo-cd - name: argo-cd
version: 7.8.2 version: 7.8.9
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled condition: argo-cd.enabled
- name: argocd-apps - name: argocd-apps

View File

@ -106,10 +106,9 @@ argo-cd:
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ==" extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
params: params:
controller.status.processors: "10" controller.resource.health.persist: "false"
controller.operation.processors: "5"
controller.diff.server.side: "true" controller.diff.server.side: "true"
controller.sync.timeout.seconds: "1800" controller.sync.timeout.seconds: 1800
server.insecure: true server.insecure: true
server.enable.gzip: true server.enable.gzip: true
@ -178,6 +177,9 @@ argo-cd:
serviceMonitor: serviceMonitor:
enabled: true enabled: true
redisSecretInit:
enabled: false
# redis: # redis:
# We might want to try to keep redis close to the controller # We might want to try to keep redis close to the controller
# affinity: # affinity:

View File

@ -14,7 +14,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: keycloak - name: keycloak
repository: "oci://registry-1.docker.io/bitnamicharts" repository: "oci://registry-1.docker.io/bitnamicharts"

View File

@ -14,7 +14,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.2.1" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: redis - name: redis
version: 20.0.3 version: 20.0.3

View File

@ -13,7 +13,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: cert-manager - name: cert-manager
version: v1.17.1 version: v1.17.1

View File

@ -15,7 +15,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: 0.1.6 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: gitea - name: gitea
version: 10.6.0 version: 10.6.0

View File

@ -12,14 +12,12 @@ spec:
hosts: hosts:
- {{ .Values.gitea.istio.url }} - {{ .Values.gitea.istio.url }}
http: http:
{{- if .Values.gitea.istio.authProvider }} {{- if .Values.gitea.istio.blockApi }}
# https://github.com/go-gitea/gitea/issues/13606
- match: - match:
- uri: - uri:
regex: ^/user/login.* prefix: /api
redirect: directResponse:
uri: /user/oauth2/{{ .Values.gitea.istio.authProvider }} status: 401
redirectCode: 302
{{- end }} {{- end }}
- route: - route:
- destination: - destination:

View File

@ -16,6 +16,10 @@ gitea:
claimName: data-gitea-0 claimName: data-gitea-0
size: 4Gi size: 4Gi
service:
http:
port: 80
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities: capabilities:
@ -83,6 +87,7 @@ gitea:
enabled: false enabled: false
gateway: istio-ingress/private-ingressgateway gateway: istio-ingress/private-ingressgateway
url: git.example.com url: git.example.com
blockApi: false
jenkins: jenkins:
@ -298,7 +303,7 @@ renovate:
LOG_FORMAT: json LOG_FORMAT: json
cronjob: cronjob:
concurrencyPolicy: Forbid concurrencyPolicy: Forbid
jobBackoffLimit: 3 jobBackoffLimit: 2
schedule: "0 3 * * *" schedule: "0 3 * * *"
successfulJobsHistoryLimit: 1 successfulJobsHistoryLimit: 1

View File

@ -13,7 +13,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: falco - name: falco
version: 4.2.5 version: 4.2.5

View File

@ -13,10 +13,10 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.2.1" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: neo4j - name: neo4j
version: 5.26.3 version: 5.26.4
repository: https://helm.neo4j.com/neo4j repository: https://helm.neo4j.com/neo4j
condition: neo4j.enabled condition: neo4j.enabled

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio-gateway name: kubezero-istio-gateway
description: KubeZero Umbrella Chart for Istio gateways description: KubeZero Umbrella Chart for Istio gateways
type: application type: application
version: 0.24.2 version: 0.24.3
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -14,9 +14,9 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: gateway - name: gateway
version: 1.24.2 version: 1.24.3
repository: https://istio-release.storage.googleapis.com/charts repository: https://istio-release.storage.googleapis.com/charts
kubeVersion: ">= 1.30.0-0" kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-istio-gateway # kubezero-istio-gateway
![Version: 0.24.2](https://img.shields.io/badge/Version-0.24.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.24.3](https://img.shields.io/badge/Version-0.24.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Istio gateways KubeZero Umbrella Chart for Istio gateways
@ -20,8 +20,8 @@ Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://istio-release.storage.googleapis.com/charts | gateway | 1.24.2 | | https://istio-release.storage.googleapis.com/charts | gateway | 1.24.3 |
## Values ## Values
@ -32,8 +32,8 @@ Kubernetes: `>= 1.30.0-0`
| gateway.autoscaling.maxReplicas | int | `4` | | | gateway.autoscaling.maxReplicas | int | `4` | |
| gateway.autoscaling.minReplicas | int | `1` | | | gateway.autoscaling.minReplicas | int | `1` | |
| gateway.autoscaling.targetCPUUtilizationPercentage | int | `80` | | | gateway.autoscaling.targetCPUUtilizationPercentage | int | `80` | |
| gateway.minReadySeconds | int | `120` | | | gateway.minReadySeconds | int | `10` | |
| gateway.podAnnotations."proxy.istio.io/config" | string | `"{ \"terminationDrainDuration\": \"20s\" }"` | | | gateway.podAnnotations."proxy.istio.io/config" | string | `"{ \"terminationDrainDuration\": \"90s\" }"` | |
| gateway.replicaCount | int | `1` | | | gateway.replicaCount | int | `1` | |
| gateway.resources.limits.memory | string | `"512Mi"` | | | gateway.resources.limits.memory | string | `"512Mi"` | |
| gateway.resources.requests.cpu | string | `"50m"` | | | gateway.resources.requests.cpu | string | `"50m"` | |

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 1.24.2 appVersion: 1.24.3
description: Helm chart for deploying Istio gateways description: Helm chart for deploying Istio gateways
icon: https://istio.io/latest/favicons/android-192x192.png icon: https://istio.io/latest/favicons/android-192x192.png
keywords: keywords:
@ -9,4 +9,4 @@ name: gateway
sources: sources:
- https://github.com/istio/istio - https://github.com/istio/istio
type: application type: application
version: 1.24.2 version: 1.24.3

View File

@ -77,7 +77,7 @@ spec:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
privileged: false privileged: false
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
{{- if not (eq .Values.platform "openshift") }} {{- if not (eq (.Values.platform | default "") "openshift") }}
runAsUser: 1337 runAsUser: 1337
runAsGroup: 1337 runAsGroup: 1337
{{- end }} {{- end }}

View File

@ -49,7 +49,7 @@ Finally, we can set all of that under .Values so the chart behaves without aware
{{- $a := mustMergeOverwrite $defaults $profile }} {{- $a := mustMergeOverwrite $defaults $profile }}
{{- end }} {{- end }}
# Flatten globals, if defined on a per-chart basis # Flatten globals, if defined on a per-chart basis
{{- if false }} {{- if true }}
{{- $a := mustMergeOverwrite $defaults ($profile.global) ($.Values.global | default dict) }} {{- $a := mustMergeOverwrite $defaults ($profile.global) ($.Values.global | default dict) }}
{{- end }} {{- end }}
{{- $b := set $ "Values" (mustMergeOverwrite $defaults $.Values) }} {{- $b := set $ "Values" (mustMergeOverwrite $defaults $.Values) }}

View File

@ -32,6 +32,7 @@ spec:
use_remote_address: true use_remote_address: true
normalize_path: true normalize_path: true
merge_slashes: true merge_slashes: true
preserve_external_request_id: {{ .Values.hardening.preserveExternalRequestId }}
{{- if .Values.hardening.unescapeSlashes }} {{- if .Values.hardening.unescapeSlashes }}
path_with_escaped_slashes_action: UNESCAPE_AND_REDIRECT path_with_escaped_slashes_action: UNESCAPE_AND_REDIRECT
{{- end }} {{- end }}

View File

@ -43,3 +43,4 @@ proxyProtocol: true
hardening: hardening:
rejectUnderscoresHeaders: true rejectUnderscoresHeaders: true
unescapeSlashes: true unescapeSlashes: true
preserveExternalRequestId: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio name: kubezero-istio
description: KubeZero Umbrella Chart for Istio description: KubeZero Umbrella Chart for Istio
type: application type: application
version: 0.24.2 version: 0.24.3
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -13,16 +13,20 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: envoy-ratelimit
version: 0.1.2
repository: https://cdn.zero-downtime.net/charts/
condition: envoy-ratelimit.enabled
- name: base - name: base
version: 1.24.2 version: 1.24.3
repository: https://istio-release.storage.googleapis.com/charts repository: https://istio-release.storage.googleapis.com/charts
- name: istiod - name: istiod
version: 1.24.2 version: 1.24.3
repository: https://istio-release.storage.googleapis.com/charts repository: https://istio-release.storage.googleapis.com/charts
- name: kiali-server - name: kiali-server
version: "2.5.0" version: "2.6.0"
repository: https://kiali.org/helm-charts repository: https://kiali.org/helm-charts
condition: kiali-server.enabled condition: kiali-server.enabled
kubeVersion: ">= 1.30.0-0" kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-istio # kubezero-istio
![Version: 0.24.2](https://img.shields.io/badge/Version-0.24.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.24.3](https://img.shields.io/badge/Version-0.24.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Istio KubeZero Umbrella Chart for Istio
@ -20,15 +20,27 @@ Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | envoy-ratelimit | 0.1.2 |
| https://istio-release.storage.googleapis.com/charts | base | 1.24.2 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://istio-release.storage.googleapis.com/charts | istiod | 1.24.2 | | https://istio-release.storage.googleapis.com/charts | base | 1.24.3 |
| https://kiali.org/helm-charts | kiali-server | 2.5.0 | | https://istio-release.storage.googleapis.com/charts | istiod | 1.24.3 |
| https://kiali.org/helm-charts | kiali-server | 2.6.0 |
## Values ## Values
| Key | Type | Default | Description | | Key | Type | Default | Description |
|-----|------|---------|-------------| |-----|------|---------|-------------|
| envoy-ratelimit.descriptors.ingress[0].key | string | `"remote_address"` | |
| envoy-ratelimit.descriptors.ingress[0].rate_limit.requests_per_unit | int | `10` | |
| envoy-ratelimit.descriptors.ingress[0].rate_limit.unit | string | `"second"` | |
| envoy-ratelimit.descriptors.privateIngress[0].key | string | `"remote_address"` | |
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.requests_per_unit | int | `10` | |
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.unit | string | `"second"` | |
| envoy-ratelimit.enabled | bool | `false` | |
| envoy-ratelimit.failureModeDeny | bool | `false` | |
| envoy-ratelimit.localCacheSize | int | `1048576` | |
| envoy-ratelimit.log.format | string | `"json"` | |
| envoy-ratelimit.log.level | string | `"warn"` | |
| global.defaultPodDisruptionBudget.enabled | bool | `false` | | | global.defaultPodDisruptionBudget.enabled | bool | `false` | |
| global.logAsJson | bool | `true` | | | global.logAsJson | bool | `true` | |
| global.variant | string | `"distroless"` | | | global.variant | string | `"distroless"` | |
@ -50,17 +62,6 @@ Kubernetes: `>= 1.30.0-0`
| kiali-server.istio.enabled | bool | `false` | | | kiali-server.istio.enabled | bool | `false` | |
| kiali-server.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | | | kiali-server.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| kiali-server.server.metrics_enabled | bool | `false` | | | kiali-server.server.metrics_enabled | bool | `false` | |
| rateLimiting.descriptors.ingress[0].key | string | `"remote_address"` | |
| rateLimiting.descriptors.ingress[0].rate_limit.requests_per_unit | int | `10` | |
| rateLimiting.descriptors.ingress[0].rate_limit.unit | string | `"second"` | |
| rateLimiting.descriptors.privateIngress[0].key | string | `"remote_address"` | |
| rateLimiting.descriptors.privateIngress[0].rate_limit.requests_per_unit | int | `10` | |
| rateLimiting.descriptors.privateIngress[0].rate_limit.unit | string | `"second"` | |
| rateLimiting.enabled | bool | `false` | |
| rateLimiting.failureModeDeny | bool | `false` | |
| rateLimiting.localCacheSize | int | `1048576` | |
| rateLimiting.log.format | string | `"json"` | |
| rateLimiting.log.level | string | `"warn"` | |
## Resources ## Resources

View File

@ -1,106 +0,0 @@
{{- if .Values.rateLimiting.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: ratelimit-statsd-exporter-config
namespace: {{ .Release.Namespace }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
data:
config.yaml: |
defaults:
ttl: 1m # Resets the metrics every minute
mappings:
- match:
"ratelimit.service.rate_limit.*.*.near_limit"
name: "ratelimit_service_rate_limit_near_limit"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
- match:
"ratelimit.service.rate_limit.*.*.over_limit"
name: "ratelimit_service_rate_limit_over_limit"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
- match:
"ratelimit.service.rate_limit.*.*.total_hits"
name: "ratelimit_service_rate_limit_total_hits"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
- match:
"ratelimit.service.rate_limit.*.*.within_limit"
name: "ratelimit_service_rate_limit_within_limit"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
- match:
"ratelimit.service.rate_limit.*.*.*.near_limit"
name: "ratelimit_service_rate_limit_near_limit"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
key2: "$3"
- match:
"ratelimit.service.rate_limit.*.*.*.over_limit"
name: "ratelimit_service_rate_limit_over_limit"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
key2: "$3"
- match:
"ratelimit.service.rate_limit.*.*.*.total_hits"
name: "ratelimit_service_rate_limit_total_hits"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
key2: "$3"
- match:
"ratelimit.service.rate_limit.*.*.*.within_limit"
name: "ratelimit_service_rate_limit_within_limit"
timer_type: "histogram"
labels:
domain: "$1"
key1: "$2"
key2: "$3"
- match:
"ratelimit.service.call.should_rate_limit.*"
name: "ratelimit_service_should_rate_limit_error"
match_metric_type: counter
labels:
err_type: "$1"
- match:
"ratelimit_server.*.total_requests"
name: "ratelimit_service_total_requests"
match_metric_type: counter
labels:
grpc_method: "$1"
- match:
"ratelimit_server.*.response_time"
name: "ratelimit_service_response_time_seconds"
timer_type: histogram
labels:
grpc_method: "$1"
- match:
"ratelimit.service.config_load_success"
name: "ratelimit_service_config_load_success"
match_metric_type: counter
ttl: 3m
- match:
"ratelimit.service.config_load_error"
name: "ratelimit_service_config_load_error"
match_metric_type: counter
ttl: 3m
- match: "."
match_type: "regex"
action: "drop"
name: "dropped"
{{- end }}

View File

@ -1,154 +0,0 @@
{{- if .Values.rateLimiting.enabled }}
apiVersion: v1
kind: Service
metadata:
name: ratelimit-redis
namespace: {{ .Release.Namespace }}
labels:
app: ratelimit-redis
spec:
ports:
- name: redis
port: 6379
selector:
app: ratelimit-redis
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ratelimit-redis
namespace: {{ .Release.Namespace }}
spec:
replicas: 1
selector:
matchLabels:
app: ratelimit-redis
template:
metadata:
labels:
app: ratelimit-redis
spec:
containers:
- image: redis:6-alpine
imagePullPolicy: IfNotPresent
name: redis
ports:
- name: redis
containerPort: 6379
restartPolicy: Always
serviceAccountName: ""
---
apiVersion: v1
kind: Service
metadata:
name: ratelimit
namespace: {{ .Release.Namespace }}
labels:
app: ratelimit
spec:
ports:
#- name: http-port
# port: 8080
# targetPort: 8080
# protocol: TCP
- name: grpc-port
port: 8081
targetPort: 8081
protocol: TCP
#- name: http-debug
# port: 6070
# targetPort: 6070
# protocol: TCP
- name: http-monitoring
port: 9102
targetPort: 9102
protocol: TCP
selector:
app: ratelimit
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ratelimit
namespace: {{ .Release.Namespace }}
spec:
replicas: 1
selector:
matchLabels:
app: ratelimit
strategy:
type: Recreate
template:
metadata:
labels:
app: ratelimit
spec:
containers:
- image: envoyproxy/ratelimit:b42701cb # 2021/08/12
imagePullPolicy: IfNotPresent
name: ratelimit
command: ["/bin/ratelimit"]
env:
- name: LOG_LEVEL
value: {{ default "WARN" .Values.rateLimiting.log.level }}
- name: LOG_FORMAT
value: {{ default "text" .Values.rateLimiting.log.format }}
- name: REDIS_SOCKET_TYPE
value: tcp
- name: REDIS_URL
value: ratelimit-redis:6379
- name: USE_STATSD
value: "true"
- name: STATSD_HOST
value: "localhost"
- name: STATSD_PORT
value: "9125"
- name: RUNTIME_ROOT
value: /data
- name: RUNTIME_SUBDIRECTORY
value: ratelimit
- name: RUNTIME_WATCH_ROOT
value: "false"
- name: RUNTIME_IGNOREDOTFILES
value: "true"
- name: LOCAL_CACHE_SIZE_IN_BYTES
value: "{{ default 0 .Values.rateLimiting.localCacheSize | int }}"
ports:
#- containerPort: 8080
- containerPort: 8081
#- containerPort: 6070
volumeMounts:
- name: ratelimit-config
mountPath: /data/ratelimit/config
resources:
requests:
cpu: 50m
memory: 32Mi
limits:
cpu: 1
memory: 256Mi
- name: statsd-exporter
image: docker.io/prom/statsd-exporter:v0.21.0
imagePullPolicy: Always
args: ["--statsd.mapping-config=/etc/statsd-exporter/config.yaml"]
ports:
- containerPort: 9125
# - containerPort: 9102
resources:
requests:
cpu: 50m
memory: 32Mi
limits:
cpu: 200m
memory: 64Mi
volumeMounts:
- name: statsd-exporter-config
mountPath: /etc/statsd-exporter
volumes:
- name: ratelimit-config
configMap:
name: ratelimit-config
- name: statsd-exporter-config
configMap:
name: ratelimit-statsd-exporter-config
{{- end }}

View File

@ -56,29 +56,7 @@ kiali-server:
#url: "kiali.example.com" #url: "kiali.example.com"
rateLimiting: # for available options see envoy-ratelimit chart
envoy-ratelimit:
enabled: false enabled: false
log:
level: warn
format: json
# 1MB local cache for already reached limits to reduce calls to Redis
localCacheSize: 1048576
# Wether to block requests if ratelimiting is down
failureModeDeny: false
# rate limit descriptors for each domain, examples 10 req/s per sourceIP
descriptors:
ingress:
- key: remote_address
rate_limit:
unit: second
requests_per_unit: 10
privateIngress:
- key: remote_address
rate_limit:
unit: second
requests_per_unit: 10

View File

@ -10,4 +10,4 @@ keywords:
maintainers: maintainers:
- name: Stefan Reimer - name: Stefan Reimer
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
kubeVersion: ">= 1.30.0" kubeVersion: ">= 1.30.0-0"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack description: KubeZero Umbrella Chart for complete EFK stack
type: application type: application
version: 0.8.13 version: 0.8.14
appVersion: 1.6.0 appVersion: 1.6.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -17,7 +17,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: fluentd - name: fluentd
version: 0.5.2 version: 0.5.2

View File

@ -1,6 +1,6 @@
# kubezero-logging # kubezero-logging
![Version: 0.8.13](https://img.shields.io/badge/Version-0.8.13-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square) ![Version: 0.8.14](https://img.shields.io/badge/Version-0.8.14-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack KubeZero Umbrella Chart for complete EFK stack
@ -18,7 +18,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 | | https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 | | https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
@ -135,7 +135,7 @@ Kubernetes: `>= 1.26.0`
| kibana.istio.enabled | bool | `false` | | | kibana.istio.enabled | bool | `false` | |
| kibana.istio.gateway | string | `"istio-system/ingressgateway"` | | | kibana.istio.gateway | string | `"istio-system/ingressgateway"` | |
| kibana.istio.url | string | `""` | | | kibana.istio.url | string | `""` | |
| version | string | `"7.17.3"` | | | version | string | `"7.17.7"` | |
## Resources: ## Resources:

View File

@ -2,7 +2,7 @@
# fullnameOverride: "" # fullnameOverride: ""
# Version for ElasticSearch and Kibana have to match so we define it at top-level # Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.17.3 version: 7.17.7
elastic_password: "" # super_secret_elastic_password elastic_password: "" # super_secret_elastic_password

View File

@ -16,7 +16,7 @@ maintainers:
# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack # https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: kube-prometheus-stack - name: kube-prometheus-stack
version: 69.2.3 version: 69.2.3

View File

@ -62,12 +62,8 @@ kube-prometheus-stack:
memory: 128Mi memory: 128Mi
admissionWebhooks: admissionWebhooks:
patch: certManager:
tolerations: enabled: true
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
nodeExporter: nodeExporter:
enabled: true enabled: true

View File

@ -14,7 +14,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: nats - name: nats
version: 1.2.2 version: 1.2.2

View File

@ -16,7 +16,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: cilium - name: cilium
version: 1.16.6 version: 1.16.6

View File

@ -17,7 +17,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: opensearch-operator - name: opensearch-operator
version: 2.7.0 version: 2.7.0

View File

@ -14,7 +14,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: mariadb-galera - name: mariadb-galera
version: 14.0.10 version: 14.0.10

View File

@ -17,7 +17,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: lvm-localpv - name: lvm-localpv
version: 1.6.2 version: 1.6.2

View File

@ -16,7 +16,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: opentelemetry-collector - name: opentelemetry-collector
version: 0.108.0 version: 0.108.0

View File

@ -13,6 +13,6 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.2.1" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts repository: https://cdn.zero-downtime.net/charts
kubeVersion: ">= 1.31.0-0" kubeVersion: ">= 1.31.0-0"

View File

@ -9,6 +9,10 @@ metadata:
namespace: argocd namespace: argocd
labels: labels:
{{- include "kubezero-lib.labels" . | nindent 4 }} {{- include "kubezero-lib.labels" . | nindent 4 }}
{{- with ( index .Values $name "annotations" ) }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if not ( index .Values $name "retain" ) }} {{- if not ( index .Values $name "retain" ) }}
finalizers: finalizers:
- resources-finalizer.argocd.argoproj.io - resources-finalizer.argocd.argoproj.io

View File

@ -42,6 +42,8 @@ external-dns:
- "--aws-zone-type=public" - "--aws-zone-type=public"
- "--aws-zones-cache-duration=1h" - "--aws-zones-cache-duration=1h"
env: env:
- name: AWS_REGION
value: {{ .Values.global.aws.region }}
- name: AWS_ROLE_ARN - name: AWS_ROLE_ARN
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.externalDNS" value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.externalDNS"
- name: AWS_WEB_IDENTITY_TOKEN_FILE - name: AWS_WEB_IDENTITY_TOKEN_FILE

View File

@ -2,10 +2,22 @@
argo-cd: argo-cd:
enabled: {{ default "false" (index .Values "argo" "argo-cd" "enabled") }} enabled: {{ default "false" (index .Values "argo" "argo-cd" "enabled") }}
{{- with index .Values "argo" "argo-cd" "configs" }}
configs: configs:
{{- with index .Values "argo" "argo-cd" "configs" }}
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
params:
{{- if not $.Values.global.highAvailable }}
# Reduce load on API server on single node control plane
controller.status.processors: 2
controller.operation.processors: 1
controller.kubectl.parallelism.limit: 1
{{- else }}
controller.status.processors: 8
controller.operation.processors: 4
controller.kubectl.parallelism.limit: 4
{{- end }}
controller: controller:
metrics: metrics:

View File

@ -28,8 +28,8 @@ kiali-server:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
{{- with .Values.istio.rateLimiting }} {{- with index .Values "istio" "envoy-ratelimit" }}
rateLimiting: envoy-ratelimit:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@ -64,13 +64,13 @@ storage:
istio: istio:
enabled: false enabled: false
namespace: istio-system namespace: istio-system
targetRevision: 0.24.2 targetRevision: 0.24.3
istio-ingress: istio-ingress:
enabled: false enabled: false
chart: kubezero-istio-gateway chart: kubezero-istio-gateway
namespace: istio-ingress namespace: istio-ingress
targetRevision: 0.24.2 targetRevision: 0.24.3
gateway: gateway:
service: {} service: {}
@ -78,7 +78,7 @@ istio-private-ingress:
enabled: false enabled: false
chart: kubezero-istio-gateway chart: kubezero-istio-gateway
namespace: istio-ingress namespace: istio-ingress
targetRevision: 0.24.2 targetRevision: 0.24.3
gateway: gateway:
service: {} service: {}
@ -114,7 +114,9 @@ metrics:
logging: logging:
enabled: false enabled: false
namespace: logging namespace: logging
targetRevision: 0.8.13 targetRevision: 0.8.14
annotations:
argocd.argoproj.io/compare-options: ServerSideDiff=false
argo: argo:
enabled: false enabled: false

View File

@ -14,7 +14,7 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.4" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts repository: https://cdn.zero-downtime.net/charts
- name: manticoresearch - name: manticoresearch
version: "5.0.25" version: "5.0.25"

View File

@ -14,6 +14,6 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.5" version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.20.0"