Compare commits
1 Commits
a4bd7a412f
...
2fd7bc728a
Author | SHA1 | Date | |
---|---|---|---|
2fd7bc728a |
@ -14,7 +14,7 @@ include .ci/podman.mk
|
|||||||
|
|
||||||
Add subtree to your project:
|
Add subtree to your project:
|
||||||
```
|
```
|
||||||
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash
|
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
@ -41,8 +41,7 @@ for image in sorted(images, key=lambda d: d['imagePushedAt'], reverse=True):
|
|||||||
_delete = True
|
_delete = True
|
||||||
for tag in image["imageTags"]:
|
for tag in image["imageTags"]:
|
||||||
# Look for at least one tag NOT beign a SemVer dev tag
|
# Look for at least one tag NOT beign a SemVer dev tag
|
||||||
# untagged dev builds get tagged as <tag>-g<commit>
|
if "-" not in tag:
|
||||||
if "-g" not in tag and "dirty" not in tag:
|
|
||||||
_delete = False
|
_delete = False
|
||||||
if _delete:
|
if _delete:
|
||||||
print("Deleting development image {}".format(image["imageTags"]))
|
print("Deleting development image {}".format(image["imageTags"]))
|
||||||
|
@ -8,8 +8,8 @@ SHELL := bash
|
|||||||
.PHONY: all # All targets are accessible for user
|
.PHONY: all # All targets are accessible for user
|
||||||
.DEFAULT: help # Running Make will run the help target
|
.DEFAULT: help # Running Make will run the help target
|
||||||
|
|
||||||
# Parse version from latest git semver tag, use short commit otherwise
|
# Parse version from latest git semver tag
|
||||||
GIT_TAG ?= $(shell git describe --tags --match v*.*.* --dirty 2>/dev/null || git describe --match="" --always --dirty 2>/dev/null)
|
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
|
||||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||||
|
|
||||||
TAG ::= $(GIT_TAG)
|
TAG ::= $(GIT_TAG)
|
||||||
@ -85,7 +85,7 @@ rm-image:
|
|||||||
|
|
||||||
## some useful tasks during development
|
## some useful tasks during development
|
||||||
ci-pull-upstream: ## pull latest shared .ci subtree
|
ci-pull-upstream: ## pull latest shared .ci subtree
|
||||||
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash -m "Merge latest ci-tools-lib"
|
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
|
||||||
|
|
||||||
create-repo: ## create new AWS ECR public repository
|
create-repo: ## create new AWS ECR public repository
|
||||||
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)
|
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)
|
||||||
|
@ -17,7 +17,7 @@ post_control_plane_upgrade_cluster() {
|
|||||||
# delete previous root app controlled by kubezero module
|
# delete previous root app controlled by kubezero module
|
||||||
kubectl delete application kubezero-git-sync -n argocd || true
|
kubectl delete application kubezero-git-sync -n argocd || true
|
||||||
|
|
||||||
# only patch appproject to keep SyncWindow in place
|
# Patch appproject to keep SyncWindow in place
|
||||||
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true
|
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true
|
||||||
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
|
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
|
||||||
}
|
}
|
||||||
|
@ -111,42 +111,35 @@ post_kubeadm() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# Migrate KubeZero Config to current version
|
|
||||||
upgrade_kubezero_config() {
|
|
||||||
# get current values, argo app over cm
|
|
||||||
get_kubezero_values $ARGOCD
|
|
||||||
|
|
||||||
# tumble new config through migrate.py
|
|
||||||
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
|
|
||||||
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
|
|
||||||
|
|
||||||
update_kubezero_cm
|
|
||||||
|
|
||||||
if [ "$ARGOCD" == "true" ]; then
|
|
||||||
# update argo app
|
|
||||||
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
|
|
||||||
kubectl get application kubezero -n argocd -o yaml | \
|
|
||||||
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
|
|
||||||
> $WORKDIR/new-argocd-app.yaml
|
|
||||||
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# Control plane upgrade
|
# Control plane upgrade
|
||||||
kubeadm_upgrade() {
|
control_plane_upgrade() {
|
||||||
|
CMD=$1
|
||||||
|
|
||||||
ARGOCD=$(argo_used)
|
ARGOCD=$(argo_used)
|
||||||
|
|
||||||
render_kubeadm upgrade
|
render_kubeadm upgrade
|
||||||
|
|
||||||
# Check if we already have all controllers on the current version
|
if [[ "$CMD" =~ ^(cluster)$ ]]; then
|
||||||
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
|
|
||||||
|
|
||||||
# run control plane upgrade
|
|
||||||
if [ "$OLD_CONTROLLERS" != "0" ]; then
|
|
||||||
|
|
||||||
pre_control_plane_upgrade_cluster
|
pre_control_plane_upgrade_cluster
|
||||||
|
|
||||||
|
# get current values, argo app over cm
|
||||||
|
get_kubezero_values $ARGOCD
|
||||||
|
|
||||||
|
# tumble new config through migrate.py
|
||||||
|
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
|
||||||
|
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
|
||||||
|
|
||||||
|
update_kubezero_cm
|
||||||
|
|
||||||
|
if [ "$ARGOCD" == "true" ]; then
|
||||||
|
# update argo app
|
||||||
|
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
|
||||||
|
kubectl get application kubezero -n argocd -o yaml | \
|
||||||
|
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
|
||||||
|
> $WORKDIR/new-argocd-app.yaml
|
||||||
|
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
|
||||||
|
fi
|
||||||
|
|
||||||
pre_kubeadm
|
pre_kubeadm
|
||||||
|
|
||||||
_kubeadm init phase upload-config kubeadm
|
_kubeadm init phase upload-config kubeadm
|
||||||
@ -162,8 +155,7 @@ kubeadm_upgrade() {
|
|||||||
|
|
||||||
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
|
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
|
||||||
|
|
||||||
# All controllers already on current version
|
elif [[ "$CMD" =~ ^(final)$ ]]; then
|
||||||
else
|
|
||||||
pre_cluster_upgrade_final
|
pre_cluster_upgrade_final
|
||||||
|
|
||||||
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase
|
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase
|
||||||
@ -419,8 +411,12 @@ for t in $@; do
|
|||||||
bootstrap) control_plane_node bootstrap;;
|
bootstrap) control_plane_node bootstrap;;
|
||||||
join) control_plane_node join;;
|
join) control_plane_node join;;
|
||||||
restore) control_plane_node restore;;
|
restore) control_plane_node restore;;
|
||||||
upgrade_control_plane) kubeadm_upgrade;;
|
kubeadm_upgrade)
|
||||||
upgrade_kubezero) upgrade_kubezero_config;;
|
control_plane_upgrade cluster
|
||||||
|
;;
|
||||||
|
finalize_cluster_upgrade)
|
||||||
|
control_plane_upgrade final
|
||||||
|
;;
|
||||||
apply_*)
|
apply_*)
|
||||||
ARGOCD=$(argo_used)
|
ARGOCD=$(argo_used)
|
||||||
apply_module "${t##apply_}";;
|
apply_module "${t##apply_}";;
|
||||||
|
@ -80,19 +80,6 @@ function get_kubezero_secret() {
|
|||||||
get_secret_val kubezero kubezero-secrets "$1"
|
get_secret_val kubezero kubezero-secrets "$1"
|
||||||
}
|
}
|
||||||
|
|
||||||
function ensure_kubezero_secret_key() {
|
|
||||||
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
|
|
||||||
local key=""
|
|
||||||
local val=""
|
|
||||||
|
|
||||||
for key in $@; do
|
|
||||||
val=$(echo "$secret" | yq ".data.\"$key\"")
|
|
||||||
if [ "$val" == "null" ]; then
|
|
||||||
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
function set_kubezero_secret() {
|
function set_kubezero_secret() {
|
||||||
local key="$1"
|
local key="$1"
|
||||||
@ -353,7 +340,7 @@ EOF
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function admin_job() {
|
function control_plane_upgrade() {
|
||||||
TASKS="$1"
|
TASKS="$1"
|
||||||
|
|
||||||
[ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest"
|
[ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest"
|
||||||
@ -363,7 +350,7 @@ function admin_job() {
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
name: kubezero-admin-job
|
name: kubezero-upgrade
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
app: kubezero-upgrade
|
app: kubezero-upgrade
|
||||||
@ -408,10 +395,10 @@ spec:
|
|||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
kubectl wait pod kubezero-admin-job -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
|
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
|
||||||
while true; do
|
while true; do
|
||||||
kubectl logs kubezero-admin-job -n kube-system -f 2>/dev/null && break
|
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
|
||||||
sleep 3
|
sleep 3
|
||||||
done
|
done
|
||||||
kubectl delete pod kubezero-admin-job -n kube-system
|
kubectl delete pod kubezero-upgrade -n kube-system
|
||||||
}
|
}
|
||||||
|
@ -15,28 +15,37 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
|||||||
ARGOCD=$(argo_used)
|
ARGOCD=$(argo_used)
|
||||||
|
|
||||||
echo "Checking that all pods in kube-system are running ..."
|
echo "Checking that all pods in kube-system are running ..."
|
||||||
waitSystemPodsRunning
|
#waitSystemPodsRunning
|
||||||
|
|
||||||
[ "$ARGOCD" == "true" ] && disable_argo
|
[ "$ARGOCD" == "true" ] && disable_argo
|
||||||
|
|
||||||
admin_job "upgrade_control_plane, upgrade_kubezero"
|
# Check if we already have all controllers on the current version
|
||||||
|
#OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
|
||||||
|
|
||||||
|
if [ "$OLD_CONTROLLERS" == "0" ]; then
|
||||||
|
# All controllers already on current version
|
||||||
|
control_plane_upgrade finalize_cluster_upgrade
|
||||||
|
else
|
||||||
|
# Otherwise run control plane upgrade
|
||||||
|
control_plane_upgrade kubeadm_upgrade
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "<Return> to continue"
|
||||||
|
read -r
|
||||||
|
|
||||||
#echo "Adjust kubezero values as needed:"
|
#echo "Adjust kubezero values as needed:"
|
||||||
# shellcheck disable=SC2015
|
# shellcheck disable=SC2015
|
||||||
#[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
|
#[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
|
||||||
|
|
||||||
#echo "<Return> to continue"
|
|
||||||
#read -r
|
|
||||||
|
|
||||||
# upgrade modules
|
# upgrade modules
|
||||||
admin_job "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators"
|
control_plane_upgrade "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators"
|
||||||
|
|
||||||
echo "Checking that all pods in kube-system are running ..."
|
echo "Checking that all pods in kube-system are running ..."
|
||||||
waitSystemPodsRunning
|
waitSystemPodsRunning
|
||||||
|
|
||||||
echo "Applying remaining KubeZero modules..."
|
echo "Applying remaining KubeZero modules..."
|
||||||
|
|
||||||
admin_job "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
|
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
|
||||||
|
|
||||||
# we replace the project during v1.31 so disable again
|
# we replace the project during v1.31 so disable again
|
||||||
[ "$ARGOCD" == "true" ] && disable_argo
|
[ "$ARGOCD" == "true" ] && disable_argo
|
||||||
@ -51,12 +60,6 @@ while true; do
|
|||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "Once all controller nodes are running on $KUBE_VERSION, <return> to continue"
|
|
||||||
read -r
|
|
||||||
|
|
||||||
# Final control plane upgrades
|
|
||||||
admin_job "upgrade_control_plane"
|
|
||||||
|
|
||||||
echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your cluster."
|
echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your cluster."
|
||||||
echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades."
|
echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades."
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ name: kubezero-addons
|
|||||||
description: KubeZero umbrella chart for various optional cluster addons
|
description: KubeZero umbrella chart for various optional cluster addons
|
||||||
type: application
|
type: application
|
||||||
version: 0.8.14
|
version: 0.8.14
|
||||||
appVersion: v1.31
|
appVersion: v1.30
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
|
@ -21,6 +21,3 @@ fi
|
|||||||
# Redis secret
|
# Redis secret
|
||||||
kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \
|
kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \
|
||||||
--from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
|
--from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
|
||||||
|
|
||||||
# required keys in kubezero-secrets, as --ignore-missing-values in helm-secrets doesnt work with vals ;-(
|
|
||||||
ensure_kubezero_secret_key argo-cd.kubezero.username argo-cd.kubezero.password argo-cd.kubezero.sshPrivateKey
|
|
||||||
|
@ -25,7 +25,7 @@ argo-events:
|
|||||||
# do NOT use -alpine tag as the entrypoint differs
|
# do NOT use -alpine tag as the entrypoint differs
|
||||||
versions:
|
versions:
|
||||||
- version: 2.10.11
|
- version: 2.10.11
|
||||||
natsImage: nats:2.11.1-scratch
|
natsImage: nats:2.10.11-scratch
|
||||||
metricsExporterImage: natsio/prometheus-nats-exporter:0.16.0
|
metricsExporterImage: natsio/prometheus-nats-exporter:0.16.0
|
||||||
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
|
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
|
||||||
startCommand: /nats-server
|
startCommand: /nats-server
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-cache
|
name: kubezero-cache
|
||||||
description: KubeZero Cache module
|
description: KubeZero Cache module
|
||||||
type: application
|
type: application
|
||||||
version: 0.1.1
|
version: 0.1.0
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -17,11 +17,11 @@ dependencies:
|
|||||||
version: 0.2.1
|
version: 0.2.1
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: redis
|
- name: redis
|
||||||
version: 20.11.5
|
version: 20.0.3
|
||||||
repository: https://charts.bitnami.com/bitnami
|
repository: https://charts.bitnami.com/bitnami
|
||||||
condition: redis.enabled
|
condition: redis.enabled
|
||||||
- name: redis-cluster
|
- name: redis-cluster
|
||||||
version: 11.5.0
|
version: 11.0.2
|
||||||
repository: https://charts.bitnami.com/bitnami
|
repository: https://charts.bitnami.com/bitnami
|
||||||
condition: redis-cluster.enabled
|
condition: redis-cluster.enabled
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-graph
|
name: kubezero-graph
|
||||||
description: KubeZero GraphQL and GraphDB
|
description: KubeZero GraphQL and GraphDB
|
||||||
type: application
|
type: application
|
||||||
version: 0.1.1
|
version: 0.1.0
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -16,7 +16,7 @@ dependencies:
|
|||||||
version: 0.2.1
|
version: 0.2.1
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: neo4j
|
- name: neo4j
|
||||||
version: 2025.3.0
|
version: 5.26.0
|
||||||
repository: https://helm.neo4j.com/neo4j
|
repository: https://helm.neo4j.com/neo4j
|
||||||
condition: neo4j.enabled
|
condition: neo4j.enabled
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-graph
|
# kubezero-graph
|
||||||
|
|
||||||
 
|
 
|
||||||
|
|
||||||
KubeZero GraphQL and GraphDB
|
KubeZero GraphQL and GraphDB
|
||||||
|
|
||||||
@ -18,8 +18,8 @@ Kubernetes: `>= 1.29.0-0`
|
|||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.2.1 |
|
||||||
| https://helm.neo4j.com/neo4j | neo4j | 2025.3.0 |
|
| https://helm.neo4j.com/neo4j | neo4j | 5.26.0 |
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
@ -28,8 +28,6 @@ Kubernetes: `>= 1.29.0-0`
|
|||||||
| neo4j.disableLookups | bool | `true` | |
|
| neo4j.disableLookups | bool | `true` | |
|
||||||
| neo4j.enabled | bool | `false` | |
|
| neo4j.enabled | bool | `false` | |
|
||||||
| neo4j.neo4j.name | string | `"test-db"` | |
|
| neo4j.neo4j.name | string | `"test-db"` | |
|
||||||
| neo4j.neo4j.password | string | `"secret"` | |
|
|
||||||
| neo4j.neo4j.passwordFromSecret | string | `"neo4j-admin"` | |
|
|
||||||
| neo4j.serviceMonitor.enabled | bool | `false` | |
|
| neo4j.serviceMonitor.enabled | bool | `false` | |
|
||||||
| neo4j.services.neo4j.enabled | bool | `false` | |
|
| neo4j.services.neo4j.enabled | bool | `false` | |
|
||||||
| neo4j.volumes.data.mode | string | `"defaultStorageClass"` | |
|
| neo4j.volumes.data.mode | string | `"defaultStorageClass"` | |
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-mq
|
name: kubezero-mq
|
||||||
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
||||||
type: application
|
type: application
|
||||||
version: 0.3.11
|
version: 0.3.10
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -17,11 +17,11 @@ dependencies:
|
|||||||
version: 0.2.1
|
version: 0.2.1
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: nats
|
- name: nats
|
||||||
version: 1.3.3
|
version: 1.2.2
|
||||||
repository: https://nats-io.github.io/k8s/helm/charts/
|
repository: https://nats-io.github.io/k8s/helm/charts/
|
||||||
condition: nats.enabled
|
condition: nats.enabled
|
||||||
- name: rabbitmq
|
- name: rabbitmq
|
||||||
version: 14.7.0
|
version: 14.6.6
|
||||||
repository: https://charts.bitnami.com/bitnami
|
repository: https://charts.bitnami.com/bitnami
|
||||||
condition: rabbitmq.enabled
|
condition: rabbitmq.enabled
|
||||||
kubeVersion: ">= 1.26.0"
|
kubeVersion: ">= 1.26.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-mq
|
# kubezero-mq
|
||||||
|
|
||||||
 
|
 
|
||||||
|
|
||||||
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
||||||
|
|
||||||
@ -18,9 +18,9 @@ Kubernetes: `>= 1.26.0`
|
|||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||||
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
|
| https://charts.bitnami.com/bitnami | rabbitmq | 14.6.6 |
|
||||||
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 |
|
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.2.2 |
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
@ -34,6 +34,13 @@ Kubernetes: `>= 1.26.0`
|
|||||||
| nats.natsBox.enabled | bool | `false` | |
|
| nats.natsBox.enabled | bool | `false` | |
|
||||||
| nats.promExporter.enabled | bool | `false` | |
|
| nats.promExporter.enabled | bool | `false` | |
|
||||||
| nats.promExporter.podMonitor.enabled | bool | `false` | |
|
| nats.promExporter.podMonitor.enabled | bool | `false` | |
|
||||||
|
| rabbitmq-cluster-operator.clusterOperator.metrics.enabled | bool | `false` | |
|
||||||
|
| rabbitmq-cluster-operator.clusterOperator.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||||
|
| rabbitmq-cluster-operator.enabled | bool | `false` | |
|
||||||
|
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.enabled | bool | `false` | |
|
||||||
|
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||||
|
| rabbitmq-cluster-operator.rabbitmqImage.tag | string | `"3.11.4-debian-11-r0"` | |
|
||||||
|
| rabbitmq-cluster-operator.useCertManager | bool | `true` | |
|
||||||
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
|
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
|
||||||
| rabbitmq.auth.existingPasswordSecret | string | `"rabbitmq"` | |
|
| rabbitmq.auth.existingPasswordSecret | string | `"rabbitmq"` | |
|
||||||
| rabbitmq.auth.tls.enabled | bool | `false` | |
|
| rabbitmq.auth.tls.enabled | bool | `false` | |
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
{{- if .Values.nats.promExporter.podMonitor.enabled }}
|
{{- if .Values.nats.exporter.serviceMonitor.enabled }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -6,12 +6,6 @@ nats:
|
|||||||
jetstream:
|
jetstream:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
||||||
podTemplate:
|
|
||||||
topologySpreadConstraints:
|
|
||||||
kubernetes.io/hostname:
|
|
||||||
maxSkew: 1
|
|
||||||
whenUnsatisfiable: DoNotSchedule
|
|
||||||
|
|
||||||
natsBox:
|
natsBox:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user