Compare commits

..

20 Commits

Author SHA1 Message Date
a4bd7a412f chore(deps): update kubezero-addons-dependencies 2025-04-18 03:02:57 +00:00
c675e7aa1b Merge latest ci-tools-lib 2025-04-17 23:00:48 +00:00
00daef3b0b Squashed '.ci/' changes from a5cd89d7..9725c2ef
9725c2ef fix: ensure we dont remove rc builds

git-subtree-dir: .ci
git-subtree-split: 9725c2ef8842467951ec60adb1b45dfeca7618f5
2025-04-17 23:00:48 +00:00
9bb0e0e91a feat: reorg cluster upgrade scripts to allow support for KubeZero only clusters like GKE 2025-04-17 22:42:39 +00:00
e022db091c Squashed '.ci/' changes from 15e4d1f5..a5cd89d7
a5cd89d7 feat: improve tag parsing, ensure dirty is added if needed

git-subtree-dir: .ci
git-subtree-split: a5cd89d73157c829eaf12f91a68f73826fbb35e7
2025-04-17 22:37:10 +00:00
da2510c8df Merge latest ci-tools-lib 2025-04-17 22:37:10 +00:00
c4aab252e8 Squashed '.ci/' changes from a3928364..15e4d1f5
15e4d1f5 ci: make work with main branch
3feaf6fa chore: migrate to main branch

git-subtree-dir: .ci
git-subtree-split: 15e4d1f589c8e055944b2a4b58a9a50728e245b4
2025-04-17 22:00:32 +00:00
0a813c525c Merge latest ci-tools-lib 2025-04-17 22:00:32 +00:00
9d28705079 docs: typos 2025-04-17 12:08:06 +00:00
024a0fcfaf feat: ensure central secret keys exists 2025-04-17 13:06:28 +01:00
f88d6a2f0d docs: update 2025-04-17 10:35:40 +00:00
2c47a28e10 feat: MQ various version bumps 2025-04-17 10:35:13 +00:00
dfdf50f85f Merge pull request 'chore(deps): update kubezero-mq-dependencies' (#8) from renovate/kubezero-mq-kubezero-mq-dependencies into main
Reviewed-on: #8
2025-04-14 13:03:42 +00:00
d4ba1d1a01 chore(deps): update kubezero-mq-dependencies 2025-04-14 13:03:42 +00:00
fa06c13805 Merge pull request 'chore(deps): update nats docker tag to v2.11.1' (#9) from renovate/nats-2.x into main
Reviewed-on: #9
2025-04-14 13:03:18 +00:00
7f2208fea4 chore(deps): update nats docker tag to v2.11.1 2025-04-14 13:03:18 +00:00
c427e73f79 Merge pull request 'chore(deps): update kubezero-cache-dependencies' (#42) from renovate/kubezero-cache-kubezero-cache-dependencies into main
Reviewed-on: #42
2025-04-14 12:26:09 +00:00
2fd775624b chore(deps): update kubezero-cache-dependencies 2025-04-14 12:26:09 +00:00
ffaf037483 Merge pull request 'chore(deps): update helm release neo4j to v2025' (#62) from renovate/kubezero-graph-major-kubezero-graph-dependencies into main
Reviewed-on: #62
2025-04-14 11:40:03 +00:00
0664b2bed3 chore(deps): update helm release neo4j to v2025 2025-04-14 11:40:03 +00:00
17 changed files with 100 additions and 81 deletions

View File

@ -14,7 +14,7 @@ include .ci/podman.mk
Add subtree to your project:
```
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash
```

View File

@ -41,7 +41,8 @@ for image in sorted(images, key=lambda d: d['imagePushedAt'], reverse=True):
_delete = True
for tag in image["imageTags"]:
# Look for at least one tag NOT beign a SemVer dev tag
if "-" not in tag:
# untagged dev builds get tagged as <tag>-g<commit>
if "-g" not in tag and "dirty" not in tag:
_delete = False
if _delete:
print("Deleting development image {}".format(image["imageTags"]))

View File

@ -8,8 +8,8 @@ SHELL := bash
.PHONY: all # All targets are accessible for user
.DEFAULT: help # Running Make will run the help target
# Parse version from latest git semver tag
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
# Parse version from latest git semver tag, use short commit otherwise
GIT_TAG ?= $(shell git describe --tags --match v*.*.* --dirty 2>/dev/null || git describe --match="" --always --dirty 2>/dev/null)
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
TAG ::= $(GIT_TAG)
@ -85,7 +85,7 @@ rm-image:
## some useful tasks during development
ci-pull-upstream: ## pull latest shared .ci subtree
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash -m "Merge latest ci-tools-lib"
create-repo: ## create new AWS ECR public repository
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)

View File

@ -17,7 +17,7 @@ post_control_plane_upgrade_cluster() {
# delete previous root app controlled by kubezero module
kubectl delete application kubezero-git-sync -n argocd || true
# Patch appproject to keep SyncWindow in place
# only patch appproject to keep SyncWindow in place
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
}

View File

@ -111,35 +111,42 @@ post_kubeadm() {
}
# Control plane upgrade
control_plane_upgrade() {
CMD=$1
# Migrate KubeZero Config to current version
upgrade_kubezero_config() {
# get current values, argo app over cm
get_kubezero_values $ARGOCD
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
update_kubezero_cm
if [ "$ARGOCD" == "true" ]; then
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi
}
# Control plane upgrade
kubeadm_upgrade() {
ARGOCD=$(argo_used)
render_kubeadm upgrade
if [[ "$CMD" =~ ^(cluster)$ ]]; then
# Check if we already have all controllers on the current version
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
# run control plane upgrade
if [ "$OLD_CONTROLLERS" != "0" ]; then
pre_control_plane_upgrade_cluster
# get current values, argo app over cm
get_kubezero_values $ARGOCD
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
update_kubezero_cm
if [ "$ARGOCD" == "true" ]; then
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi
pre_kubeadm
_kubeadm init phase upload-config kubeadm
@ -155,7 +162,8 @@ control_plane_upgrade() {
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
elif [[ "$CMD" =~ ^(final)$ ]]; then
# All controllers already on current version
else
pre_cluster_upgrade_final
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase
@ -411,12 +419,8 @@ for t in $@; do
bootstrap) control_plane_node bootstrap;;
join) control_plane_node join;;
restore) control_plane_node restore;;
kubeadm_upgrade)
control_plane_upgrade cluster
;;
finalize_cluster_upgrade)
control_plane_upgrade final
;;
upgrade_control_plane) kubeadm_upgrade;;
upgrade_kubezero) upgrade_kubezero_config;;
apply_*)
ARGOCD=$(argo_used)
apply_module "${t##apply_}";;

View File

@ -80,6 +80,19 @@ function get_kubezero_secret() {
get_secret_val kubezero kubezero-secrets "$1"
}
function ensure_kubezero_secret_key() {
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
local key=""
local val=""
for key in $@; do
val=$(echo "$secret" | yq ".data.\"$key\"")
if [ "$val" == "null" ]; then
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}"
fi
done
}
function set_kubezero_secret() {
local key="$1"
@ -340,7 +353,7 @@ EOF
}
function control_plane_upgrade() {
function admin_job() {
TASKS="$1"
[ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest"
@ -350,7 +363,7 @@ function control_plane_upgrade() {
apiVersion: v1
kind: Pod
metadata:
name: kubezero-upgrade
name: kubezero-admin-job
namespace: kube-system
labels:
app: kubezero-upgrade
@ -395,10 +408,10 @@ spec:
restartPolicy: Never
EOF
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
kubectl wait pod kubezero-admin-job -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
while true; do
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
kubectl logs kubezero-admin-job -n kube-system -f 2>/dev/null && break
sleep 3
done
kubectl delete pod kubezero-upgrade -n kube-system
kubectl delete pod kubezero-admin-job -n kube-system
}

View File

@ -15,37 +15,28 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
ARGOCD=$(argo_used)
echo "Checking that all pods in kube-system are running ..."
#waitSystemPodsRunning
waitSystemPodsRunning
[ "$ARGOCD" == "true" ] && disable_argo
# Check if we already have all controllers on the current version
#OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
if [ "$OLD_CONTROLLERS" == "0" ]; then
# All controllers already on current version
control_plane_upgrade finalize_cluster_upgrade
else
# Otherwise run control plane upgrade
control_plane_upgrade kubeadm_upgrade
fi
echo "<Return> to continue"
read -r
admin_job "upgrade_control_plane, upgrade_kubezero"
#echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015
#[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
#echo "<Return> to continue"
#read -r
# upgrade modules
control_plane_upgrade "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators"
admin_job "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators"
echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning
echo "Applying remaining KubeZero modules..."
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
admin_job "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
# we replace the project during v1.31 so disable again
[ "$ARGOCD" == "true" ] && disable_argo
@ -60,6 +51,12 @@ while true; do
sleep 1
done
echo "Once all controller nodes are running on $KUBE_VERSION, <return> to continue"
read -r
# Final control plane upgrades
admin_job "upgrade_control_plane"
echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your cluster."
echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades."

View File

@ -3,7 +3,7 @@ name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons
type: application
version: 0.8.14
appVersion: v1.30
appVersion: v1.31
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -21,3 +21,6 @@ fi
# Redis secret
kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \
--from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
# required keys in kubezero-secrets, as --ignore-missing-values in helm-secrets doesnt work with vals ;-(
ensure_kubezero_secret_key argo-cd.kubezero.username argo-cd.kubezero.password argo-cd.kubezero.sshPrivateKey

View File

@ -25,7 +25,7 @@ argo-events:
# do NOT use -alpine tag as the entrypoint differs
versions:
- version: 2.10.11
natsImage: nats:2.10.11-scratch
natsImage: nats:2.11.1-scratch
metricsExporterImage: natsio/prometheus-nats-exporter:0.16.0
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cache
description: KubeZero Cache module
type: application
version: 0.1.0
version: 0.1.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -17,11 +17,11 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: redis
version: 20.0.3
version: 20.11.5
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled
- name: redis-cluster
version: 11.0.2
version: 11.5.0
repository: https://charts.bitnami.com/bitnami
condition: redis-cluster.enabled

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-graph
description: KubeZero GraphQL and GraphDB
type: application
version: 0.1.0
version: 0.1.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -16,7 +16,7 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: neo4j
version: 5.26.0
version: 2025.3.0
repository: https://helm.neo4j.com/neo4j
condition: neo4j.enabled

View File

@ -1,6 +1,6 @@
# kubezero-graph
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero GraphQL and GraphDB
@ -18,8 +18,8 @@ Kubernetes: `>= 1.29.0-0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.2.1 |
| https://helm.neo4j.com/neo4j | neo4j | 5.26.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://helm.neo4j.com/neo4j | neo4j | 2025.3.0 |
## Values
@ -28,6 +28,8 @@ Kubernetes: `>= 1.29.0-0`
| neo4j.disableLookups | bool | `true` | |
| neo4j.enabled | bool | `false` | |
| neo4j.neo4j.name | string | `"test-db"` | |
| neo4j.neo4j.password | string | `"secret"` | |
| neo4j.neo4j.passwordFromSecret | string | `"neo4j-admin"` | |
| neo4j.serviceMonitor.enabled | bool | `false` | |
| neo4j.services.neo4j.enabled | bool | `false` | |
| neo4j.volumes.data.mode | string | `"defaultStorageClass"` | |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-mq
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
type: application
version: 0.3.10
version: 0.3.11
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -17,11 +17,11 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: nats
version: 1.2.2
version: 1.3.3
repository: https://nats-io.github.io/k8s/helm/charts/
condition: nats.enabled
- name: rabbitmq
version: 14.6.6
version: 14.7.0
repository: https://charts.bitnami.com/bitnami
condition: rabbitmq.enabled
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-mq
![Version: 0.3.10](https://img.shields.io/badge/Version-0.3.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.3.11](https://img.shields.io/badge/Version-0.3.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
@ -18,9 +18,9 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.bitnami.com/bitnami | rabbitmq | 14.6.6 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.2.2 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 |
## Values
@ -34,13 +34,6 @@ Kubernetes: `>= 1.26.0`
| nats.natsBox.enabled | bool | `false` | |
| nats.promExporter.enabled | bool | `false` | |
| nats.promExporter.podMonitor.enabled | bool | `false` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.enabled | bool | `false` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.serviceMonitor.enabled | bool | `true` | |
| rabbitmq-cluster-operator.enabled | bool | `false` | |
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.enabled | bool | `false` | |
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.serviceMonitor.enabled | bool | `true` | |
| rabbitmq-cluster-operator.rabbitmqImage.tag | string | `"3.11.4-debian-11-r0"` | |
| rabbitmq-cluster-operator.useCertManager | bool | `true` | |
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
| rabbitmq.auth.existingPasswordSecret | string | `"rabbitmq"` | |
| rabbitmq.auth.tls.enabled | bool | `false` | |

View File

@ -1,4 +1,4 @@
{{- if .Values.nats.exporter.serviceMonitor.enabled }}
{{- if .Values.nats.promExporter.podMonitor.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:

View File

@ -6,6 +6,12 @@ nats:
jetstream:
enabled: true
podTemplate:
topologySpreadConstraints:
kubernetes.io/hostname:
maxSkew: 1
whenUnsatisfiable: DoNotSchedule
natsBox:
enabled: false