Compare commits
14 Commits
b83d7c3d9c
...
a8e4ce7a15
Author | SHA1 | Date | |
---|---|---|---|
a8e4ce7a15 | |||
4c10271ec6 | |||
5246f57329 | |||
5bc6e6e435 | |||
cbcaec807a | |||
bfafccaf32 | |||
3304363986 | |||
9fc9843283 | |||
ed48d93aaf | |||
ce5b5de1c2 | |||
e2d3e89dd1 | |||
1946bf3aed | |||
efbd119cb6 | |||
adaf4fd114 |
@ -4,10 +4,10 @@
|
||||
set -x
|
||||
|
||||
ARTIFACTS=($(echo $1 | tr "," "\n"))
|
||||
ACTION=$2
|
||||
ACTION="${2:-apply}"
|
||||
ARGOCD="${3:-False}"
|
||||
|
||||
LOCAL_DEV=1
|
||||
ARGOCD="False"
|
||||
|
||||
#VERSION="latest"
|
||||
KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"
|
||||
@ -85,7 +85,7 @@ helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $
|
||||
|
||||
# Root KubeZero apply directly and exit
|
||||
if [ ${ARTIFACTS[0]} == "kubezero" ]; then
|
||||
kubectl replace -f $WORKDIR/kubezero/templates
|
||||
kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
|
||||
exit $?
|
||||
|
||||
# "catch all" apply all enabled modules
|
||||
@ -100,12 +100,12 @@ if [ "$ACTION" == "delete" ]; then
|
||||
_helm delete ${ARTIFACTS[idx]} || true
|
||||
done
|
||||
else
|
||||
if [ "$ACTION" == "" -o "$ACTION" == "crds" ]; then
|
||||
if [ "$ACTION" == "apply" -o "$ACTION" == "crds" ]; then
|
||||
for t in ${ARTIFACTS[@]}; do
|
||||
_helm crds $t || true
|
||||
done
|
||||
fi
|
||||
for t in ${ARTIFACTS[@]}; do
|
||||
_helm apply $t || true
|
||||
_helm $ACTION $t || true
|
||||
done
|
||||
fi
|
||||
|
@ -20,20 +20,28 @@ post_control_plane_upgrade_cluster() {
|
||||
|
||||
# All things AFTER all contollers are on the new version
|
||||
pre_cluster_upgrade_final() {
|
||||
set +e
|
||||
|
||||
if [ "$PLATFORM" == "aws" ];then
|
||||
# cleanup aws-iam-authenticator
|
||||
kubectl delete clusterrolebinding aws-iam-authenticator || true
|
||||
kubectl delete clusterrole aws-iam-authenticator || true
|
||||
kubectl delete serviceaccount aws-iam-authenticator -n kube-system || true
|
||||
kubectl delete cm aws-iam-authenticator -n kube-system || true
|
||||
kubectl delete ds aws-iam-authenticator -n kube-system || true
|
||||
kubectl delete IAMIdentityMapping kubezero-worker-nodes || true
|
||||
kubectl delete IAMIdentityMapping kubernetes-admin || true
|
||||
kubectl delete crd iamidentitymappings.iamauthenticator.k8s.aws || true
|
||||
|
||||
kubectl delete secret aws-iam-certs -n kube-system || true
|
||||
kubectl delete clusterrolebinding aws-iam-authenticator
|
||||
kubectl delete clusterrole aws-iam-authenticator
|
||||
kubectl delete serviceaccount aws-iam-authenticator -n kube-system
|
||||
kubectl delete cm aws-iam-authenticator -n kube-system
|
||||
kubectl delete ds aws-iam-authenticator -n kube-system
|
||||
kubectl delete IAMIdentityMapping kubezero-worker-nodes
|
||||
kubectl delete IAMIdentityMapping kubernetes-admin
|
||||
kubectl delete crd iamidentitymappings.iamauthenticator.k8s.aws
|
||||
kubectl delete secret aws-iam-certs -n kube-system
|
||||
fi
|
||||
|
||||
# Remove any helm hook related resources
|
||||
kubectl delete rolebinding argo-argocd-redis-secret-init -n argocd
|
||||
kubectl delete sa argo-argocd-redis-secret-init -n argocd
|
||||
kubectl delete role argo-argocd-redis-secret-init -n argocd
|
||||
kubectl delete job argo-argocd-redis-secret-init -n argocd
|
||||
|
||||
set -e
|
||||
}
|
||||
|
||||
|
||||
|
@ -104,9 +104,9 @@ pre_kubeadm() {
|
||||
|
||||
# Shared steps after calling kubeadm
|
||||
post_kubeadm() {
|
||||
# KubeZero resources
|
||||
# KubeZero resources - will never be applied by ArgoCD
|
||||
for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do
|
||||
kubectl apply -f $f $LOG
|
||||
kubectl apply -f $f --server-side --force-conflicts $LOG
|
||||
done
|
||||
}
|
||||
|
||||
@ -115,9 +115,13 @@ post_kubeadm() {
|
||||
control_plane_upgrade() {
|
||||
CMD=$1
|
||||
|
||||
ARGOCD=$(argo_used)
|
||||
|
||||
render_kubeadm upgrade
|
||||
|
||||
if [[ "$CMD" =~ ^(cluster)$ ]]; then
|
||||
pre_control_plane_upgrade_cluster
|
||||
|
||||
# get current values, argo app over cm
|
||||
get_kubezero_values $ARGOCD
|
||||
|
||||
@ -133,7 +137,7 @@ control_plane_upgrade() {
|
||||
kubectl get application kubezero -n argocd -o yaml | \
|
||||
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
|
||||
> $WORKDIR/new-argocd-app.yaml
|
||||
kubectl replace -f $WORKDIR/new-argocd-app.yaml
|
||||
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
|
||||
fi
|
||||
|
||||
pre_kubeadm
|
||||
@ -147,13 +151,19 @@ control_plane_upgrade() {
|
||||
# install re-certed kubectl config for root
|
||||
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
|
||||
|
||||
post_control_plane_upgrade_cluster
|
||||
|
||||
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
|
||||
|
||||
elif [[ "$CMD" =~ ^(final)$ ]]; then
|
||||
pre_cluster_upgrade_final
|
||||
|
||||
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase
|
||||
#_kubeadm upgrade apply phase addon all $KUBE_VERSION
|
||||
_kubeadm upgrade apply $KUBE_VERSION
|
||||
|
||||
post_cluster_upgrade_final
|
||||
|
||||
echo "Upgraded kubeadm addons."
|
||||
fi
|
||||
|
||||
@ -318,7 +328,10 @@ apply_module() {
|
||||
done
|
||||
|
||||
for t in $MODULES; do
|
||||
_helm apply $t
|
||||
#_helm apply $t
|
||||
|
||||
# During 1.31 we change the ArgoCD tracking so replace
|
||||
_helm replace $t
|
||||
done
|
||||
|
||||
echo "Applied KubeZero modules: $MODULES"
|
||||
@ -394,17 +407,10 @@ for t in $@; do
|
||||
join) control_plane_node join;;
|
||||
restore) control_plane_node restore;;
|
||||
kubeadm_upgrade)
|
||||
ARGOCD=$(argo_used)
|
||||
# call hooks
|
||||
pre_control_plane_upgrade_cluster
|
||||
control_plane_upgrade cluster
|
||||
post_control_plane_upgrade_cluster
|
||||
;;
|
||||
finalize_cluster_upgrade)
|
||||
ARGOCD=$(argo_used)
|
||||
pre_cluster_upgrade_final
|
||||
control_plane_upgrade final
|
||||
post_cluster_upgrade_final
|
||||
;;
|
||||
apply_*)
|
||||
ARGOCD=$(argo_used)
|
||||
|
@ -2,11 +2,10 @@
|
||||
|
||||
# Simulate well-known CRDs being available
|
||||
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
|
||||
LOCAL_DEV=${LOCAL_DEV:-""}
|
||||
|
||||
export HELM_SECRETS_BACKEND="vals"
|
||||
|
||||
LOCAL_DEV=${LOCAL_DEV:-""}
|
||||
|
||||
# Waits for max 300s and retries
|
||||
function wait_for() {
|
||||
local TRIES=0
|
||||
@ -34,6 +33,32 @@ function argo_used() {
|
||||
}
|
||||
|
||||
|
||||
function field_manager() {
|
||||
local argo=${1:-"False"}
|
||||
|
||||
if [ "$argo" == "True" ]; then
|
||||
echo "--field-manager argo-controller"
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function get_kubezero_secret() {
|
||||
export _key="$1"
|
||||
|
||||
kubectl get secrets -n kubezero kubezero-secrets -o yaml | yq '.data.[env(_key)]' | base64 -d -w0
|
||||
}
|
||||
|
||||
|
||||
function set_kubezero_secret() {
|
||||
local key="$1"
|
||||
local val="$2"
|
||||
|
||||
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"$(echo -n $val |base64 -w0)\" }}"
|
||||
}
|
||||
|
||||
|
||||
# get kubezero-values from ArgoCD if available or use in-cluster CM
|
||||
function get_kubezero_values() {
|
||||
local argo=${1:-"False"}
|
||||
@ -96,25 +121,12 @@ function waitSystemPodsRunning() {
|
||||
done
|
||||
}
|
||||
|
||||
function argo_app_synced() {
|
||||
APP=$1
|
||||
|
||||
# Ensure we are synced otherwise bail out
|
||||
status=$(kubectl get application $APP -n argocd -o yaml | yq .status.sync.status)
|
||||
if [ "$status" != "Synced" ]; then
|
||||
echo "ArgoCD Application $APP not 'Synced'!"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
# make sure namespace exists prior to calling helm as the create-namespace options doesn't work
|
||||
function create_ns() {
|
||||
local namespace=$1
|
||||
if [ "$namespace" != "kube-system" ]; then
|
||||
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace
|
||||
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace $(field_manager $ARGOCD)
|
||||
fi
|
||||
}
|
||||
|
||||
@ -144,7 +156,7 @@ for manifest in yaml.safe_load_all(sys.stdin):
|
||||
# Only apply if there are actually any crds
|
||||
if [ -s $WORKDIR/crds.yaml ]; then
|
||||
[ -n "$DEBUG" ] && cat $WORKDIR/crds.yaml
|
||||
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts
|
||||
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts $(field_manager $ARGOCD)
|
||||
fi
|
||||
}
|
||||
|
||||
@ -193,7 +205,7 @@ function _helm() {
|
||||
# Allow custom CRD handling
|
||||
declare -F ${module}-crds && ${module}-crds || _crds
|
||||
|
||||
elif [ $action == "apply" ]; then
|
||||
elif [ $action == "apply" -o $action == "replace" ]; then
|
||||
echo "using values to $action of module $module: "
|
||||
cat $WORKDIR/values.yaml
|
||||
|
||||
@ -204,7 +216,8 @@ function _helm() {
|
||||
declare -F ${module}-pre && ${module}-pre
|
||||
|
||||
render
|
||||
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$?
|
||||
[ $action == "apply" ] && kubectl apply -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$?
|
||||
[ $action == "replace" ] && kubectl replace -f $WORKDIR/helm.yaml $(field_manager $ARGOCD) && rc=$? || rc=$?
|
||||
|
||||
# Optional post hook
|
||||
declare -F ${module}-post && ${module}-post
|
||||
|
@ -14,6 +14,6 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
3
charts/envoy-ratelimit/.gitignore
vendored
Normal file
3
charts/envoy-ratelimit/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
istioctl
|
||||
istio
|
||||
istio.zdt
|
32
charts/envoy-ratelimit/.helmignore
Normal file
32
charts/envoy-ratelimit/.helmignore
Normal file
@ -0,0 +1,32 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
|
||||
README.md.gotmpl
|
||||
*.patch
|
||||
*.sh
|
||||
*.py
|
||||
|
||||
istioctl
|
||||
istio
|
||||
istio.zdt
|
19
charts/envoy-ratelimit/Chart.yaml
Normal file
19
charts/envoy-ratelimit/Chart.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
apiVersion: v2
|
||||
name: envoy-ratelimit
|
||||
description: Envoy gobal ratelimiting service - part of KubeZero
|
||||
type: application
|
||||
version: 0.1.2
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
- kubezero
|
||||
- envoy
|
||||
- istio
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
kubeVersion: ">= 1.31.0-0"
|
37
charts/envoy-ratelimit/README.md
Normal file
37
charts/envoy-ratelimit/README.md
Normal file
@ -0,0 +1,37 @@
|
||||
# envoy-ratelimit
|
||||
|
||||
 
|
||||
|
||||
Envoy gobal ratelimiting service - part of KubeZero
|
||||
|
||||
**Homepage:** <https://kubezero.com>
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Stefan Reimer | <stefan@zero-downtime.net> | |
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.31.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| descriptors.ingress[0].key | string | `"remote_address"` | |
|
||||
| descriptors.ingress[0].rate_limit.requests_per_unit | int | `10` | |
|
||||
| descriptors.ingress[0].rate_limit.unit | string | `"second"` | |
|
||||
| descriptors.privateIngress[0].key | string | `"remote_address"` | |
|
||||
| descriptors.privateIngress[0].rate_limit.requests_per_unit | int | `10` | |
|
||||
| descriptors.privateIngress[0].rate_limit.unit | string | `"second"` | |
|
||||
| failureModeDeny | bool | `false` | |
|
||||
| localCacheSize | int | `1048576` | |
|
||||
| log.format | string | `"json"` | |
|
||||
| log.level | string | `"warn"` | |
|
||||
| metrics.enabled | bool | `true` | |
|
16
charts/envoy-ratelimit/README.md.gotmpl
Normal file
16
charts/envoy-ratelimit/README.md.gotmpl
Normal file
@ -0,0 +1,16 @@
|
||||
{{ template "chart.header" . }}
|
||||
{{ template "chart.deprecationWarning" . }}
|
||||
|
||||
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
|
||||
|
||||
{{ template "chart.description" . }}
|
||||
|
||||
{{ template "chart.homepageLine" . }}
|
||||
|
||||
{{ template "chart.maintainersSection" . }}
|
||||
|
||||
{{ template "chart.sourcesSection" . }}
|
||||
|
||||
{{ template "chart.requirementsSection" . }}
|
||||
|
||||
{{ template "chart.valuesSection" . }}
|
@ -1,4 +1,3 @@
|
||||
{{- if .Values.rateLimiting.enabled }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
@ -10,10 +9,9 @@ data:
|
||||
ingress.yaml: |
|
||||
domain: ingress
|
||||
descriptors:
|
||||
{{- toYaml .Values.rateLimiting.descriptors.ingress | nindent 4 }}
|
||||
{{- toYaml .Values.descriptors.ingress | nindent 4 }}
|
||||
|
||||
private-ingress.yaml: |
|
||||
domain: private-ingress
|
||||
descriptors:
|
||||
{{- toYaml .Values.rateLimiting.descriptors.privateIngress | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- toYaml .Values.descriptors.privateIngress | nindent 4 }}
|
63
charts/envoy-ratelimit/templates/deployment.yaml
Normal file
63
charts/envoy-ratelimit/templates/deployment.yaml
Normal file
@ -0,0 +1,63 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ratelimit
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ratelimit
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ratelimit
|
||||
spec:
|
||||
containers:
|
||||
- image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: ratelimit
|
||||
command: ["/bin/ratelimit"]
|
||||
env:
|
||||
- name: LOG_LEVEL
|
||||
value: {{ default "WARN" .Values.log.level }}
|
||||
- name: LOG_FORMAT
|
||||
value: {{ default "text" .Values.log.format }}
|
||||
- name: REDIS_SOCKET_TYPE
|
||||
value: tcp
|
||||
- name: REDIS_URL
|
||||
value: ratelimit-valkey:6379
|
||||
- name: USE_PROMETHEUS
|
||||
value: "true"
|
||||
- name: USE_STATSD
|
||||
value: "false"
|
||||
- name: RUNTIME_ROOT
|
||||
value: /data
|
||||
- name: RUNTIME_SUBDIRECTORY
|
||||
value: ratelimit
|
||||
- name: RUNTIME_WATCH_ROOT
|
||||
value: "false"
|
||||
- name: RUNTIME_IGNOREDOTFILES
|
||||
value: "true"
|
||||
- name: LOCAL_CACHE_SIZE_IN_BYTES
|
||||
value: "{{ default 0 .Values.localCacheSize | int }}"
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
#- containerPort: 8080
|
||||
#- containerPort: 6070
|
||||
volumeMounts:
|
||||
- name: ratelimit-config
|
||||
mountPath: /data/ratelimit/config
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 32Mi
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 256Mi
|
||||
volumes:
|
||||
- name: ratelimit-config
|
||||
configMap:
|
||||
name: ratelimit-config
|
@ -1,4 +1,3 @@
|
||||
{{- if .Values.rateLimiting.enabled }}
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: EnvoyFilter
|
||||
metadata:
|
||||
@ -27,7 +26,7 @@ spec:
|
||||
typed_config:
|
||||
"@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit
|
||||
domain: ingress
|
||||
failure_mode_deny: {{ .Values.rateLimiting.failureModeDeny }}
|
||||
failure_mode_deny: {{ .Values.failureModeDeny }}
|
||||
timeout: 0.5s
|
||||
rate_limit_service:
|
||||
grpc_service:
|
||||
@ -85,7 +84,7 @@ spec:
|
||||
typed_config:
|
||||
"@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit
|
||||
domain: private-ingress
|
||||
failure_mode_deny: {{ .Values.rateLimiting.failureModeDeny }}
|
||||
failure_mode_deny: {{ .Values.failureModeDeny }}
|
||||
timeout: 0.5s
|
||||
rate_limit_service:
|
||||
grpc_service:
|
||||
@ -113,4 +112,3 @@ spec:
|
||||
socket_address:
|
||||
address: ratelimit.istio-system
|
||||
port_value: 8081
|
||||
{{- end }}
|
27
charts/envoy-ratelimit/templates/service.yaml
Normal file
27
charts/envoy-ratelimit/templates/service.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ratelimit
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: ratelimit
|
||||
spec:
|
||||
ports:
|
||||
#- name: http-port
|
||||
# port: 8080
|
||||
# targetPort: 8080
|
||||
# protocol: TCP
|
||||
- name: grpc-port
|
||||
port: 8081
|
||||
targetPort: 8081
|
||||
protocol: TCP
|
||||
#- name: http-debug
|
||||
# port: 6070
|
||||
# targetPort: 6070
|
||||
# protocol: TCP
|
||||
- name: http-monitoring
|
||||
port: 9090
|
||||
targetPort: 9090
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: ratelimit
|
@ -1,4 +1,4 @@
|
||||
{{- if and .Values.istiod.telemetry.enabled .Values.rateLimiting.enabled }}
|
||||
{{- if and .Values.metrics.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
24
charts/envoy-ratelimit/templates/valkey-deployment.yaml
Normal file
24
charts/envoy-ratelimit/templates/valkey-deployment.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ratelimit-valkey
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ratelimit-valkey
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ratelimit-valkey
|
||||
spec:
|
||||
containers:
|
||||
- image: valkey/valkey:8.1-alpine3.21
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: valkey
|
||||
ports:
|
||||
- name: valkey
|
||||
containerPort: 6379
|
||||
restartPolicy: Always
|
||||
serviceAccountName: ""
|
13
charts/envoy-ratelimit/templates/valkey-service.yaml
Normal file
13
charts/envoy-ratelimit/templates/valkey-service.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ratelimit-valkey
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: ratelimit-valkey
|
||||
spec:
|
||||
ports:
|
||||
- name: valkey
|
||||
port: 6379
|
||||
selector:
|
||||
app: ratelimit-valkey
|
9
charts/envoy-ratelimit/update.sh
Executable file
9
charts/envoy-ratelimit/update.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
#login_ecr_public
|
||||
update_helm
|
||||
|
||||
update_docs
|
38
charts/envoy-ratelimit/values.yaml
Normal file
38
charts/envoy-ratelimit/values.yaml
Normal file
@ -0,0 +1,38 @@
|
||||
image:
|
||||
repository: envoyproxy/ratelimit
|
||||
# see: https://hub.docker.com/r/envoyproxy/ratelimit/tags
|
||||
tag: 80b15778
|
||||
|
||||
log:
|
||||
level: warn
|
||||
format: json
|
||||
|
||||
# 1MB local cache for already reached limits to reduce calls to Redis
|
||||
localCacheSize: 1048576
|
||||
|
||||
# Wether to block requests if ratelimiting is down
|
||||
failureModeDeny: false
|
||||
|
||||
# rate limit descriptors for each domain
|
||||
# - slow: 1 req/s over a minute per sourceIP
|
||||
descriptors:
|
||||
ingress:
|
||||
- key: speed
|
||||
value: slow
|
||||
descriptors:
|
||||
- key: remote_address
|
||||
rate_limit:
|
||||
unit: minute
|
||||
requests_per_unit: 60
|
||||
|
||||
privateIngress:
|
||||
- key: speed
|
||||
value: slow
|
||||
descriptors:
|
||||
- key: remote_address
|
||||
rate_limit:
|
||||
unit: minute
|
||||
requests_per_unit: 60
|
||||
|
||||
metrics:
|
||||
enabled: false
|
6
charts/kubeadm/TODO
Normal file
6
charts/kubeadm/TODO
Normal file
@ -0,0 +1,6 @@
|
||||
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_256_GCM_SHA384"}
|
||||
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_128_GCM_SHA256"}
|
||||
|
||||
|
||||
"RuntimeConfig from runtime service failed","err":"rpc error: code = Unimplemented desc = unknown method RuntimeConfig for service runtime.v1.RuntimeService"}
|
||||
"CRI implementation should be updated to support RuntimeConfig when KubeletCgroupDriverFromCRI feature gate has been enabled. Falling back to using cgroupDriver from kubelet config."
|
@ -2,8 +2,8 @@ apiVersion: kubeadm.k8s.io/v1beta4
|
||||
kind: ClusterConfiguration
|
||||
kubernetesVersion: {{ .Chart.Version }}
|
||||
clusterName: {{ .Values.global.clusterName }}
|
||||
#featureGates:
|
||||
# NonGracefulFailover: true
|
||||
featureGates:
|
||||
ControlPlaneKubeletLocalMode: true
|
||||
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
||||
networking:
|
||||
podSubnet: 10.244.0.0/16
|
||||
|
@ -3,7 +3,7 @@
|
||||
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
|
||||
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
|
||||
{{- define "kubeadm.featuregates" }}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "AuthorizeWithSelectors" "AuthorizeNodeWithSelectors" "ConsistentListFromCache" "VolumeAttributesClass" "WatchList"}}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "AuthorizeWithSelectors" "AuthorizeNodeWithSelectors" "ConsistentListFromCache" "VolumeAttributesClass" "WatchList" }}
|
||||
{{- if eq .return "csv" }}
|
||||
{{- range $key := $gates }}
|
||||
{{- $key }}=true,
|
||||
|
@ -14,7 +14,7 @@ KubeZero umbrella chart for various optional cluster addons
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.26.0`
|
||||
Kubernetes: `>= 1.30.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
@ -94,9 +94,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
||||
| aws-node-termination-handler.managedTag | string | `"zdt:kubezero:nth:${ClusterName}"` | "zdt:kubezero:nth:${ClusterName}" |
|
||||
| aws-node-termination-handler.metadataTries | int | `0` | |
|
||||
| aws-node-termination-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| aws-node-termination-handler.podMonitor.create | bool | `false` | |
|
||||
| aws-node-termination-handler.queueURL | string | `""` | https://sqs.${AWS::Region}.amazonaws.com/${AWS::AccountId}/${ClusterName}_Nth |
|
||||
| aws-node-termination-handler.rbac.pspEnabled | bool | `false` | |
|
||||
| aws-node-termination-handler.serviceMonitor.create | bool | `false` | |
|
||||
| aws-node-termination-handler.taintNode | bool | `true` | |
|
||||
| aws-node-termination-handler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
@ -110,7 +109,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
||||
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
|
||||
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
|
||||
| cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | |
|
||||
| cluster-autoscaler.image.tag | string | `"v1.30.2"` | |
|
||||
| cluster-autoscaler.image.tag | string | `"v1.31.1"` | |
|
||||
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cluster-autoscaler.podDisruptionBudget | bool | `false` | |
|
||||
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | |
|
||||
@ -159,6 +158,9 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
||||
| neuron-helm-chart.enabled | bool | `false` | |
|
||||
| neuron-helm-chart.npd.enabled | bool | `false` | |
|
||||
| nvidia-device-plugin.cdi.nvidiaHookPath | string | `"/usr/bin"` | |
|
||||
| nvidia-device-plugin.config.default | string | `"default"` | |
|
||||
| nvidia-device-plugin.config.map.default | string | `"version: v1\nflags:\n migStrategy: none"` | |
|
||||
| nvidia-device-plugin.config.map.time-slice-4x | string | `"version: v1\nflags:\n migStrategy: none\nsharing:\n timeSlicing:\n resources:\n - name: nvidia.com/gpu\n replicas: 4"` | |
|
||||
| nvidia-device-plugin.deviceDiscoveryStrategy | string | `"nvml"` | |
|
||||
| nvidia-device-plugin.enabled | bool | `false` | |
|
||||
| nvidia-device-plugin.runtimeClassName | string | `"nvidia"` | |
|
||||
|
@ -185,6 +185,22 @@ neuron-helm-chart:
|
||||
nvidia-device-plugin:
|
||||
enabled: false
|
||||
|
||||
config:
|
||||
default: "default"
|
||||
map:
|
||||
default: |-
|
||||
version: v1
|
||||
flags:
|
||||
migStrategy: none
|
||||
time-slice-4x: |-
|
||||
version: v1
|
||||
flags:
|
||||
migStrategy: none
|
||||
sharing:
|
||||
timeSlicing:
|
||||
resources:
|
||||
- name: nvidia.com/gpu
|
||||
replicas: 4
|
||||
cdi:
|
||||
nvidiaHookPath: /usr/bin
|
||||
deviceDiscoveryStrategy: nvml
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: v2
|
||||
description: KubeZero Argo - Events, Workflow, CD
|
||||
name: kubezero-argo
|
||||
version: 0.2.8
|
||||
version: 0.2.9
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -15,14 +15,14 @@ maintainers:
|
||||
# Url: https://github.com/argoproj/argo-helm/tree/main/charts
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: argo-events
|
||||
version: 2.4.13
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-events.enabled
|
||||
- name: argo-cd
|
||||
version: 7.8.2
|
||||
version: 7.8.9
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-cd.enabled
|
||||
- name: argocd-apps
|
||||
|
@ -106,10 +106,9 @@ argo-cd:
|
||||
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
|
||||
|
||||
params:
|
||||
controller.status.processors: "10"
|
||||
controller.operation.processors: "5"
|
||||
controller.resource.health.persist: "false"
|
||||
controller.diff.server.side: "true"
|
||||
controller.sync.timeout.seconds: "1800"
|
||||
controller.sync.timeout.seconds: 1800
|
||||
|
||||
server.insecure: true
|
||||
server.enable.gzip: true
|
||||
@ -178,6 +177,9 @@ argo-cd:
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
|
||||
redisSecretInit:
|
||||
enabled: false
|
||||
|
||||
# redis:
|
||||
# We might want to try to keep redis close to the controller
|
||||
# affinity:
|
||||
|
@ -14,7 +14,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: keycloak
|
||||
repository: "oci://registry-1.docker.io/bitnamicharts"
|
||||
|
@ -14,7 +14,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.2.1"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: redis
|
||||
version: 20.0.3
|
||||
|
@ -13,7 +13,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: cert-manager
|
||||
version: v1.17.1
|
||||
|
@ -15,7 +15,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: 0.1.6
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: gitea
|
||||
version: 10.6.0
|
||||
|
@ -12,14 +12,12 @@ spec:
|
||||
hosts:
|
||||
- {{ .Values.gitea.istio.url }}
|
||||
http:
|
||||
{{- if .Values.gitea.istio.authProvider }}
|
||||
# https://github.com/go-gitea/gitea/issues/13606
|
||||
{{- if .Values.gitea.istio.blockApi }}
|
||||
- match:
|
||||
- uri:
|
||||
regex: ^/user/login.*
|
||||
redirect:
|
||||
uri: /user/oauth2/{{ .Values.gitea.istio.authProvider }}
|
||||
redirectCode: 302
|
||||
prefix: /api
|
||||
directResponse:
|
||||
status: 401
|
||||
{{- end }}
|
||||
- route:
|
||||
- destination:
|
||||
|
@ -16,6 +16,10 @@ gitea:
|
||||
claimName: data-gitea-0
|
||||
size: 4Gi
|
||||
|
||||
service:
|
||||
http:
|
||||
port: 80
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
@ -83,6 +87,7 @@ gitea:
|
||||
enabled: false
|
||||
gateway: istio-ingress/private-ingressgateway
|
||||
url: git.example.com
|
||||
blockApi: false
|
||||
|
||||
|
||||
jenkins:
|
||||
@ -298,7 +303,7 @@ renovate:
|
||||
LOG_FORMAT: json
|
||||
cronjob:
|
||||
concurrencyPolicy: Forbid
|
||||
jobBackoffLimit: 3
|
||||
jobBackoffLimit: 2
|
||||
schedule: "0 3 * * *"
|
||||
successfulJobsHistoryLimit: 1
|
||||
|
||||
|
@ -13,7 +13,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: falco
|
||||
version: 4.2.5
|
||||
|
@ -13,10 +13,10 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.2.1"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: neo4j
|
||||
version: 5.26.3
|
||||
version: 5.26.4
|
||||
repository: https://helm.neo4j.com/neo4j
|
||||
condition: neo4j.enabled
|
||||
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-istio-gateway
|
||||
description: KubeZero Umbrella Chart for Istio gateways
|
||||
type: application
|
||||
version: 0.24.2
|
||||
version: 0.24.3
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -14,9 +14,9 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: gateway
|
||||
version: 1.24.2
|
||||
version: 1.24.3
|
||||
repository: https://istio-release.storage.googleapis.com/charts
|
||||
kubeVersion: ">= 1.30.0-0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-istio-gateway
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
KubeZero Umbrella Chart for Istio gateways
|
||||
|
||||
@ -20,8 +20,8 @@ Kubernetes: `>= 1.30.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://istio-release.storage.googleapis.com/charts | gateway | 1.24.2 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://istio-release.storage.googleapis.com/charts | gateway | 1.24.3 |
|
||||
|
||||
## Values
|
||||
|
||||
@ -32,8 +32,8 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| gateway.autoscaling.maxReplicas | int | `4` | |
|
||||
| gateway.autoscaling.minReplicas | int | `1` | |
|
||||
| gateway.autoscaling.targetCPUUtilizationPercentage | int | `80` | |
|
||||
| gateway.minReadySeconds | int | `120` | |
|
||||
| gateway.podAnnotations."proxy.istio.io/config" | string | `"{ \"terminationDrainDuration\": \"20s\" }"` | |
|
||||
| gateway.minReadySeconds | int | `10` | |
|
||||
| gateway.podAnnotations."proxy.istio.io/config" | string | `"{ \"terminationDrainDuration\": \"90s\" }"` | |
|
||||
| gateway.replicaCount | int | `1` | |
|
||||
| gateway.resources.limits.memory | string | `"512Mi"` | |
|
||||
| gateway.resources.requests.cpu | string | `"50m"` | |
|
||||
|
@ -1,5 +1,5 @@
|
||||
apiVersion: v2
|
||||
appVersion: 1.24.2
|
||||
appVersion: 1.24.3
|
||||
description: Helm chart for deploying Istio gateways
|
||||
icon: https://istio.io/latest/favicons/android-192x192.png
|
||||
keywords:
|
||||
@ -9,4 +9,4 @@ name: gateway
|
||||
sources:
|
||||
- https://github.com/istio/istio
|
||||
type: application
|
||||
version: 1.24.2
|
||||
version: 1.24.3
|
||||
|
@ -77,7 +77,7 @@ spec:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
{{- if not (eq .Values.platform "openshift") }}
|
||||
{{- if not (eq (.Values.platform | default "") "openshift") }}
|
||||
runAsUser: 1337
|
||||
runAsGroup: 1337
|
||||
{{- end }}
|
||||
|
@ -49,7 +49,7 @@ Finally, we can set all of that under .Values so the chart behaves without aware
|
||||
{{- $a := mustMergeOverwrite $defaults $profile }}
|
||||
{{- end }}
|
||||
# Flatten globals, if defined on a per-chart basis
|
||||
{{- if false }}
|
||||
{{- if true }}
|
||||
{{- $a := mustMergeOverwrite $defaults ($profile.global) ($.Values.global | default dict) }}
|
||||
{{- end }}
|
||||
{{- $b := set $ "Values" (mustMergeOverwrite $defaults $.Values) }}
|
||||
|
@ -32,6 +32,7 @@ spec:
|
||||
use_remote_address: true
|
||||
normalize_path: true
|
||||
merge_slashes: true
|
||||
preserve_external_request_id: {{ .Values.hardening.preserveExternalRequestId }}
|
||||
{{- if .Values.hardening.unescapeSlashes }}
|
||||
path_with_escaped_slashes_action: UNESCAPE_AND_REDIRECT
|
||||
{{- end }}
|
||||
|
@ -43,3 +43,4 @@ proxyProtocol: true
|
||||
hardening:
|
||||
rejectUnderscoresHeaders: true
|
||||
unescapeSlashes: true
|
||||
preserveExternalRequestId: false
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-istio
|
||||
description: KubeZero Umbrella Chart for Istio
|
||||
type: application
|
||||
version: 0.24.2
|
||||
version: 0.24.3
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -13,16 +13,20 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: envoy-ratelimit
|
||||
version: 0.1.2
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
condition: envoy-ratelimit.enabled
|
||||
- name: base
|
||||
version: 1.24.2
|
||||
version: 1.24.3
|
||||
repository: https://istio-release.storage.googleapis.com/charts
|
||||
- name: istiod
|
||||
version: 1.24.2
|
||||
version: 1.24.3
|
||||
repository: https://istio-release.storage.googleapis.com/charts
|
||||
- name: kiali-server
|
||||
version: "2.5.0"
|
||||
version: "2.6.0"
|
||||
repository: https://kiali.org/helm-charts
|
||||
condition: kiali-server.enabled
|
||||
kubeVersion: ">= 1.30.0-0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-istio
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
KubeZero Umbrella Chart for Istio
|
||||
|
||||
@ -20,15 +20,27 @@ Kubernetes: `>= 1.30.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://istio-release.storage.googleapis.com/charts | base | 1.24.2 |
|
||||
| https://istio-release.storage.googleapis.com/charts | istiod | 1.24.2 |
|
||||
| https://kiali.org/helm-charts | kiali-server | 2.5.0 |
|
||||
| https://cdn.zero-downtime.net/charts/ | envoy-ratelimit | 0.1.2 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://istio-release.storage.googleapis.com/charts | base | 1.24.3 |
|
||||
| https://istio-release.storage.googleapis.com/charts | istiod | 1.24.3 |
|
||||
| https://kiali.org/helm-charts | kiali-server | 2.6.0 |
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| envoy-ratelimit.descriptors.ingress[0].key | string | `"remote_address"` | |
|
||||
| envoy-ratelimit.descriptors.ingress[0].rate_limit.requests_per_unit | int | `10` | |
|
||||
| envoy-ratelimit.descriptors.ingress[0].rate_limit.unit | string | `"second"` | |
|
||||
| envoy-ratelimit.descriptors.privateIngress[0].key | string | `"remote_address"` | |
|
||||
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.requests_per_unit | int | `10` | |
|
||||
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.unit | string | `"second"` | |
|
||||
| envoy-ratelimit.enabled | bool | `false` | |
|
||||
| envoy-ratelimit.failureModeDeny | bool | `false` | |
|
||||
| envoy-ratelimit.localCacheSize | int | `1048576` | |
|
||||
| envoy-ratelimit.log.format | string | `"json"` | |
|
||||
| envoy-ratelimit.log.level | string | `"warn"` | |
|
||||
| global.defaultPodDisruptionBudget.enabled | bool | `false` | |
|
||||
| global.logAsJson | bool | `true` | |
|
||||
| global.variant | string | `"distroless"` | |
|
||||
@ -50,17 +62,6 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| kiali-server.istio.enabled | bool | `false` | |
|
||||
| kiali-server.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| kiali-server.server.metrics_enabled | bool | `false` | |
|
||||
| rateLimiting.descriptors.ingress[0].key | string | `"remote_address"` | |
|
||||
| rateLimiting.descriptors.ingress[0].rate_limit.requests_per_unit | int | `10` | |
|
||||
| rateLimiting.descriptors.ingress[0].rate_limit.unit | string | `"second"` | |
|
||||
| rateLimiting.descriptors.privateIngress[0].key | string | `"remote_address"` | |
|
||||
| rateLimiting.descriptors.privateIngress[0].rate_limit.requests_per_unit | int | `10` | |
|
||||
| rateLimiting.descriptors.privateIngress[0].rate_limit.unit | string | `"second"` | |
|
||||
| rateLimiting.enabled | bool | `false` | |
|
||||
| rateLimiting.failureModeDeny | bool | `false` | |
|
||||
| rateLimiting.localCacheSize | int | `1048576` | |
|
||||
| rateLimiting.log.format | string | `"json"` | |
|
||||
| rateLimiting.log.level | string | `"warn"` | |
|
||||
|
||||
## Resources
|
||||
|
||||
|
@ -1,106 +0,0 @@
|
||||
{{- if .Values.rateLimiting.enabled }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ratelimit-statsd-exporter-config
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
data:
|
||||
config.yaml: |
|
||||
defaults:
|
||||
ttl: 1m # Resets the metrics every minute
|
||||
mappings:
|
||||
- match:
|
||||
"ratelimit.service.rate_limit.*.*.near_limit"
|
||||
name: "ratelimit_service_rate_limit_near_limit"
|
||||
timer_type: "histogram"
|
||||
labels:
|
||||
domain: "$1"
|
||||
key1: "$2"
|
||||
- match:
|
||||
"ratelimit.service.rate_limit.*.*.over_limit"
|
||||
name: "ratelimit_service_rate_limit_over_limit"
|
||||
timer_type: "histogram"
|
||||
labels:
|
||||
domain: "$1"
|
||||
key1: "$2"
|
||||
- match:
|
||||
"ratelimit.service.rate_limit.*.*.total_hits"
|
||||
name: "ratelimit_service_rate_limit_total_hits"
|
||||
timer_type: "histogram"
|
||||
labels:
|
||||
domain: "$1"
|
||||
key1: "$2"
|
||||
- match:
|
||||
"ratelimit.service.rate_limit.*.*.within_limit"
|
||||
name: "ratelimit_service_rate_limit_within_limit"
|
||||
timer_type: "histogram"
|
||||
labels:
|
||||
domain: "$1"
|
||||
key1: "$2"
|
||||
- match:
|
||||
"ratelimit.service.rate_limit.*.*.*.near_limit"
|
||||
name: "ratelimit_service_rate_limit_near_limit"
|
||||
timer_type: "histogram"
|
||||
labels:
|
||||
domain: "$1"
|
||||
key1: "$2"
|
||||
key2: "$3"
|
||||
- match:
|
||||
"ratelimit.service.rate_limit.*.*.*.over_limit"
|
||||
name: "ratelimit_service_rate_limit_over_limit"
|
||||
timer_type: "histogram"
|
||||
labels:
|
||||
domain: "$1"
|
||||
key1: "$2"
|
||||
key2: "$3"
|
||||
- match:
|
||||
"ratelimit.service.rate_limit.*.*.*.total_hits"
|
||||
name: "ratelimit_service_rate_limit_total_hits"
|
||||
timer_type: "histogram"
|
||||
labels:
|
||||
domain: "$1"
|
||||
key1: "$2"
|
||||
key2: "$3"
|
||||
- match:
|
||||
"ratelimit.service.rate_limit.*.*.*.within_limit"
|
||||
name: "ratelimit_service_rate_limit_within_limit"
|
||||
timer_type: "histogram"
|
||||
labels:
|
||||
domain: "$1"
|
||||
key1: "$2"
|
||||
key2: "$3"
|
||||
- match:
|
||||
"ratelimit.service.call.should_rate_limit.*"
|
||||
name: "ratelimit_service_should_rate_limit_error"
|
||||
match_metric_type: counter
|
||||
labels:
|
||||
err_type: "$1"
|
||||
- match:
|
||||
"ratelimit_server.*.total_requests"
|
||||
name: "ratelimit_service_total_requests"
|
||||
match_metric_type: counter
|
||||
labels:
|
||||
grpc_method: "$1"
|
||||
- match:
|
||||
"ratelimit_server.*.response_time"
|
||||
name: "ratelimit_service_response_time_seconds"
|
||||
timer_type: histogram
|
||||
labels:
|
||||
grpc_method: "$1"
|
||||
- match:
|
||||
"ratelimit.service.config_load_success"
|
||||
name: "ratelimit_service_config_load_success"
|
||||
match_metric_type: counter
|
||||
ttl: 3m
|
||||
- match:
|
||||
"ratelimit.service.config_load_error"
|
||||
name: "ratelimit_service_config_load_error"
|
||||
match_metric_type: counter
|
||||
ttl: 3m
|
||||
- match: "."
|
||||
match_type: "regex"
|
||||
action: "drop"
|
||||
name: "dropped"
|
||||
{{- end }}
|
@ -1,154 +0,0 @@
|
||||
{{- if .Values.rateLimiting.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ratelimit-redis
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: ratelimit-redis
|
||||
spec:
|
||||
ports:
|
||||
- name: redis
|
||||
port: 6379
|
||||
selector:
|
||||
app: ratelimit-redis
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ratelimit-redis
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ratelimit-redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ratelimit-redis
|
||||
spec:
|
||||
containers:
|
||||
- image: redis:6-alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: redis
|
||||
ports:
|
||||
- name: redis
|
||||
containerPort: 6379
|
||||
restartPolicy: Always
|
||||
serviceAccountName: ""
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ratelimit
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: ratelimit
|
||||
spec:
|
||||
ports:
|
||||
#- name: http-port
|
||||
# port: 8080
|
||||
# targetPort: 8080
|
||||
# protocol: TCP
|
||||
- name: grpc-port
|
||||
port: 8081
|
||||
targetPort: 8081
|
||||
protocol: TCP
|
||||
#- name: http-debug
|
||||
# port: 6070
|
||||
# targetPort: 6070
|
||||
# protocol: TCP
|
||||
- name: http-monitoring
|
||||
port: 9102
|
||||
targetPort: 9102
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: ratelimit
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ratelimit
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ratelimit
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ratelimit
|
||||
spec:
|
||||
containers:
|
||||
- image: envoyproxy/ratelimit:b42701cb # 2021/08/12
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: ratelimit
|
||||
command: ["/bin/ratelimit"]
|
||||
env:
|
||||
- name: LOG_LEVEL
|
||||
value: {{ default "WARN" .Values.rateLimiting.log.level }}
|
||||
- name: LOG_FORMAT
|
||||
value: {{ default "text" .Values.rateLimiting.log.format }}
|
||||
- name: REDIS_SOCKET_TYPE
|
||||
value: tcp
|
||||
- name: REDIS_URL
|
||||
value: ratelimit-redis:6379
|
||||
- name: USE_STATSD
|
||||
value: "true"
|
||||
- name: STATSD_HOST
|
||||
value: "localhost"
|
||||
- name: STATSD_PORT
|
||||
value: "9125"
|
||||
- name: RUNTIME_ROOT
|
||||
value: /data
|
||||
- name: RUNTIME_SUBDIRECTORY
|
||||
value: ratelimit
|
||||
- name: RUNTIME_WATCH_ROOT
|
||||
value: "false"
|
||||
- name: RUNTIME_IGNOREDOTFILES
|
||||
value: "true"
|
||||
- name: LOCAL_CACHE_SIZE_IN_BYTES
|
||||
value: "{{ default 0 .Values.rateLimiting.localCacheSize | int }}"
|
||||
ports:
|
||||
#- containerPort: 8080
|
||||
- containerPort: 8081
|
||||
#- containerPort: 6070
|
||||
volumeMounts:
|
||||
- name: ratelimit-config
|
||||
mountPath: /data/ratelimit/config
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 32Mi
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 256Mi
|
||||
- name: statsd-exporter
|
||||
image: docker.io/prom/statsd-exporter:v0.21.0
|
||||
imagePullPolicy: Always
|
||||
args: ["--statsd.mapping-config=/etc/statsd-exporter/config.yaml"]
|
||||
ports:
|
||||
- containerPort: 9125
|
||||
# - containerPort: 9102
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 32Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 64Mi
|
||||
volumeMounts:
|
||||
- name: statsd-exporter-config
|
||||
mountPath: /etc/statsd-exporter
|
||||
volumes:
|
||||
- name: ratelimit-config
|
||||
configMap:
|
||||
name: ratelimit-config
|
||||
- name: statsd-exporter-config
|
||||
configMap:
|
||||
name: ratelimit-statsd-exporter-config
|
||||
{{- end }}
|
@ -56,29 +56,7 @@ kiali-server:
|
||||
#url: "kiali.example.com"
|
||||
|
||||
|
||||
rateLimiting:
|
||||
# for available options see envoy-ratelimit chart
|
||||
envoy-ratelimit:
|
||||
enabled: false
|
||||
|
||||
log:
|
||||
level: warn
|
||||
format: json
|
||||
|
||||
# 1MB local cache for already reached limits to reduce calls to Redis
|
||||
localCacheSize: 1048576
|
||||
|
||||
# Wether to block requests if ratelimiting is down
|
||||
failureModeDeny: false
|
||||
|
||||
# rate limit descriptors for each domain, examples 10 req/s per sourceIP
|
||||
descriptors:
|
||||
ingress:
|
||||
- key: remote_address
|
||||
rate_limit:
|
||||
unit: second
|
||||
requests_per_unit: 10
|
||||
|
||||
privateIngress:
|
||||
- key: remote_address
|
||||
rate_limit:
|
||||
unit: second
|
||||
requests_per_unit: 10
|
||||
|
@ -10,4 +10,4 @@ keywords:
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
kubeVersion: ">= 1.30.0"
|
||||
kubeVersion: ">= 1.30.0-0"
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-logging
|
||||
description: KubeZero Umbrella Chart for complete EFK stack
|
||||
type: application
|
||||
version: 0.8.13
|
||||
version: 0.8.14
|
||||
appVersion: 1.6.0
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
@ -17,7 +17,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: fluentd
|
||||
version: 0.5.2
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-logging
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
KubeZero Umbrella Chart for complete EFK stack
|
||||
|
||||
@ -18,7 +18,7 @@ Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
|
||||
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
|
||||
|
||||
@ -135,7 +135,7 @@ Kubernetes: `>= 1.26.0`
|
||||
| kibana.istio.enabled | bool | `false` | |
|
||||
| kibana.istio.gateway | string | `"istio-system/ingressgateway"` | |
|
||||
| kibana.istio.url | string | `""` | |
|
||||
| version | string | `"7.17.3"` | |
|
||||
| version | string | `"7.17.7"` | |
|
||||
|
||||
## Resources:
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
# fullnameOverride: ""
|
||||
|
||||
# Version for ElasticSearch and Kibana have to match so we define it at top-level
|
||||
version: 7.17.3
|
||||
version: 7.17.7
|
||||
|
||||
elastic_password: "" # super_secret_elastic_password
|
||||
|
||||
|
@ -16,7 +16,7 @@ maintainers:
|
||||
# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: kube-prometheus-stack
|
||||
version: 69.2.3
|
||||
|
@ -62,12 +62,8 @@ kube-prometheus-stack:
|
||||
memory: 128Mi
|
||||
|
||||
admissionWebhooks:
|
||||
patch:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
certManager:
|
||||
enabled: true
|
||||
|
||||
nodeExporter:
|
||||
enabled: true
|
||||
|
@ -14,7 +14,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: nats
|
||||
version: 1.2.2
|
||||
|
@ -16,7 +16,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: cilium
|
||||
version: 1.16.6
|
||||
|
@ -17,7 +17,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: opensearch-operator
|
||||
version: 2.7.0
|
||||
|
@ -14,7 +14,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: mariadb-galera
|
||||
version: 14.0.10
|
||||
|
@ -17,7 +17,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: lvm-localpv
|
||||
version: 1.6.2
|
||||
|
@ -16,7 +16,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: opentelemetry-collector
|
||||
version: 0.108.0
|
||||
|
@ -13,6 +13,6 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.2.1"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts
|
||||
kubeVersion: ">= 1.31.0-0"
|
||||
|
@ -9,6 +9,10 @@ metadata:
|
||||
namespace: argocd
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
{{- with ( index .Values $name "annotations" ) }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if not ( index .Values $name "retain" ) }}
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
|
@ -42,6 +42,8 @@ external-dns:
|
||||
- "--aws-zone-type=public"
|
||||
- "--aws-zones-cache-duration=1h"
|
||||
env:
|
||||
- name: AWS_REGION
|
||||
value: {{ .Values.global.aws.region }}
|
||||
- name: AWS_ROLE_ARN
|
||||
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.externalDNS"
|
||||
- name: AWS_WEB_IDENTITY_TOKEN_FILE
|
||||
|
@ -2,10 +2,22 @@
|
||||
|
||||
argo-cd:
|
||||
enabled: {{ default "false" (index .Values "argo" "argo-cd" "enabled") }}
|
||||
{{- with index .Values "argo" "argo-cd" "configs" }}
|
||||
|
||||
configs:
|
||||
{{- with index .Values "argo" "argo-cd" "configs" }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
params:
|
||||
{{- if not $.Values.global.highAvailable }}
|
||||
# Reduce load on API server on single node control plane
|
||||
controller.status.processors: 2
|
||||
controller.operation.processors: 1
|
||||
controller.kubectl.parallelism.limit: 1
|
||||
{{- else }}
|
||||
controller.status.processors: 8
|
||||
controller.operation.processors: 4
|
||||
controller.kubectl.parallelism.limit: 4
|
||||
{{- end }}
|
||||
|
||||
controller:
|
||||
metrics:
|
||||
|
@ -28,8 +28,8 @@ kiali-server:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{- with .Values.istio.rateLimiting }}
|
||||
rateLimiting:
|
||||
{{- with index .Values "istio" "envoy-ratelimit" }}
|
||||
envoy-ratelimit:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -64,13 +64,13 @@ storage:
|
||||
istio:
|
||||
enabled: false
|
||||
namespace: istio-system
|
||||
targetRevision: 0.24.2
|
||||
targetRevision: 0.24.3
|
||||
|
||||
istio-ingress:
|
||||
enabled: false
|
||||
chart: kubezero-istio-gateway
|
||||
namespace: istio-ingress
|
||||
targetRevision: 0.24.2
|
||||
targetRevision: 0.24.3
|
||||
gateway:
|
||||
service: {}
|
||||
|
||||
@ -78,7 +78,7 @@ istio-private-ingress:
|
||||
enabled: false
|
||||
chart: kubezero-istio-gateway
|
||||
namespace: istio-ingress
|
||||
targetRevision: 0.24.2
|
||||
targetRevision: 0.24.3
|
||||
gateway:
|
||||
service: {}
|
||||
|
||||
@ -114,7 +114,9 @@ metrics:
|
||||
logging:
|
||||
enabled: false
|
||||
namespace: logging
|
||||
targetRevision: 0.8.13
|
||||
targetRevision: 0.8.14
|
||||
annotations:
|
||||
argocd.argoproj.io/compare-options: ServerSideDiff=false
|
||||
|
||||
argo:
|
||||
enabled: false
|
||||
|
@ -14,7 +14,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.4"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts
|
||||
- name: manticoresearch
|
||||
version: "5.0.25"
|
||||
|
@ -14,6 +14,6 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.5"
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
kubeVersion: ">= 1.20.0"
|
||||
|
Loading…
x
Reference in New Issue
Block a user