Compare commits

..

12 Commits

Author SHA1 Message Date
232733c3f8 chore(deps): update helm release argo-cd to v7.8.27 2025-04-21 03:01:57 +00:00
c675e7aa1b Merge latest ci-tools-lib 2025-04-17 23:00:48 +00:00
00daef3b0b Squashed '.ci/' changes from a5cd89d7..9725c2ef
9725c2ef fix: ensure we dont remove rc builds

git-subtree-dir: .ci
git-subtree-split: 9725c2ef8842467951ec60adb1b45dfeca7618f5
2025-04-17 23:00:48 +00:00
9bb0e0e91a feat: reorg cluster upgrade scripts to allow support for KubeZero only clusters like GKE 2025-04-17 22:42:39 +00:00
e022db091c Squashed '.ci/' changes from 15e4d1f5..a5cd89d7
a5cd89d7 feat: improve tag parsing, ensure dirty is added if needed

git-subtree-dir: .ci
git-subtree-split: a5cd89d73157c829eaf12f91a68f73826fbb35e7
2025-04-17 22:37:10 +00:00
da2510c8df Merge latest ci-tools-lib 2025-04-17 22:37:10 +00:00
c4aab252e8 Squashed '.ci/' changes from a3928364..15e4d1f5
15e4d1f5 ci: make work with main branch
3feaf6fa chore: migrate to main branch

git-subtree-dir: .ci
git-subtree-split: 15e4d1f589c8e055944b2a4b58a9a50728e245b4
2025-04-17 22:00:32 +00:00
0a813c525c Merge latest ci-tools-lib 2025-04-17 22:00:32 +00:00
9d28705079 docs: typos 2025-04-17 12:08:06 +00:00
024a0fcfaf feat: ensure central secret keys exists 2025-04-17 13:06:28 +01:00
f88d6a2f0d docs: update 2025-04-17 10:35:40 +00:00
2c47a28e10 feat: MQ various version bumps 2025-04-17 10:35:13 +00:00
14 changed files with 92 additions and 73 deletions

View File

@ -14,7 +14,7 @@ include .ci/podman.mk
Add subtree to your project: Add subtree to your project:
``` ```
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash
``` ```

View File

@ -41,7 +41,8 @@ for image in sorted(images, key=lambda d: d['imagePushedAt'], reverse=True):
_delete = True _delete = True
for tag in image["imageTags"]: for tag in image["imageTags"]:
# Look for at least one tag NOT beign a SemVer dev tag # Look for at least one tag NOT beign a SemVer dev tag
if "-" not in tag: # untagged dev builds get tagged as <tag>-g<commit>
if "-g" not in tag and "dirty" not in tag:
_delete = False _delete = False
if _delete: if _delete:
print("Deleting development image {}".format(image["imageTags"])) print("Deleting development image {}".format(image["imageTags"]))

View File

@ -8,8 +8,8 @@ SHELL := bash
.PHONY: all # All targets are accessible for user .PHONY: all # All targets are accessible for user
.DEFAULT: help # Running Make will run the help target .DEFAULT: help # Running Make will run the help target
# Parse version from latest git semver tag # Parse version from latest git semver tag, use short commit otherwise
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null) GIT_TAG ?= $(shell git describe --tags --match v*.*.* --dirty 2>/dev/null || git describe --match="" --always --dirty 2>/dev/null)
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
TAG ::= $(GIT_TAG) TAG ::= $(GIT_TAG)
@ -85,7 +85,7 @@ rm-image:
## some useful tasks during development ## some useful tasks during development
ci-pull-upstream: ## pull latest shared .ci subtree ci-pull-upstream: ## pull latest shared .ci subtree
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib" git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash -m "Merge latest ci-tools-lib"
create-repo: ## create new AWS ECR public repository create-repo: ## create new AWS ECR public repository
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION) aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)

View File

@ -17,7 +17,7 @@ post_control_plane_upgrade_cluster() {
# delete previous root app controlled by kubezero module # delete previous root app controlled by kubezero module
kubectl delete application kubezero-git-sync -n argocd || true kubectl delete application kubezero-git-sync -n argocd || true
# Patch appproject to keep SyncWindow in place # only patch appproject to keep SyncWindow in place
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
} }

View File

@ -111,35 +111,42 @@ post_kubeadm() {
} }
# Control plane upgrade # Migrate KubeZero Config to current version
control_plane_upgrade() { upgrade_kubezero_config() {
CMD=$1 # get current values, argo app over cm
get_kubezero_values $ARGOCD
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
update_kubezero_cm
if [ "$ARGOCD" == "true" ]; then
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi
}
# Control plane upgrade
kubeadm_upgrade() {
ARGOCD=$(argo_used) ARGOCD=$(argo_used)
render_kubeadm upgrade render_kubeadm upgrade
if [[ "$CMD" =~ ^(cluster)$ ]]; then # Check if we already have all controllers on the current version
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
# run control plane upgrade
if [ "$OLD_CONTROLLERS" != "0" ]; then
pre_control_plane_upgrade_cluster pre_control_plane_upgrade_cluster
# get current values, argo app over cm
get_kubezero_values $ARGOCD
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
update_kubezero_cm
if [ "$ARGOCD" == "true" ]; then
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi
pre_kubeadm pre_kubeadm
_kubeadm init phase upload-config kubeadm _kubeadm init phase upload-config kubeadm
@ -155,7 +162,8 @@ control_plane_upgrade() {
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm." echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
elif [[ "$CMD" =~ ^(final)$ ]]; then # All controllers already on current version
else
pre_cluster_upgrade_final pre_cluster_upgrade_final
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase # Finally upgrade addons last, with 1.32 we can ONLY call addon phase
@ -411,12 +419,8 @@ for t in $@; do
bootstrap) control_plane_node bootstrap;; bootstrap) control_plane_node bootstrap;;
join) control_plane_node join;; join) control_plane_node join;;
restore) control_plane_node restore;; restore) control_plane_node restore;;
kubeadm_upgrade) upgrade_control_plane) kubeadm_upgrade;;
control_plane_upgrade cluster upgrade_kubezero) upgrade_kubezero_config;;
;;
finalize_cluster_upgrade)
control_plane_upgrade final
;;
apply_*) apply_*)
ARGOCD=$(argo_used) ARGOCD=$(argo_used)
apply_module "${t##apply_}";; apply_module "${t##apply_}";;

View File

@ -80,6 +80,19 @@ function get_kubezero_secret() {
get_secret_val kubezero kubezero-secrets "$1" get_secret_val kubezero kubezero-secrets "$1"
} }
function ensure_kubezero_secret_key() {
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
local key=""
local val=""
for key in $@; do
val=$(echo "$secret" | yq ".data.\"$key\"")
if [ "$val" == "null" ]; then
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}"
fi
done
}
function set_kubezero_secret() { function set_kubezero_secret() {
local key="$1" local key="$1"
@ -340,7 +353,7 @@ EOF
} }
function control_plane_upgrade() { function admin_job() {
TASKS="$1" TASKS="$1"
[ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest" [ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest"
@ -350,7 +363,7 @@ function control_plane_upgrade() {
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: kubezero-upgrade name: kubezero-admin-job
namespace: kube-system namespace: kube-system
labels: labels:
app: kubezero-upgrade app: kubezero-upgrade
@ -395,10 +408,10 @@ spec:
restartPolicy: Never restartPolicy: Never
EOF EOF
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null kubectl wait pod kubezero-admin-job -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
while true; do while true; do
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break kubectl logs kubezero-admin-job -n kube-system -f 2>/dev/null && break
sleep 3 sleep 3
done done
kubectl delete pod kubezero-upgrade -n kube-system kubectl delete pod kubezero-admin-job -n kube-system
} }

View File

@ -15,37 +15,28 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
ARGOCD=$(argo_used) ARGOCD=$(argo_used)
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
#waitSystemPodsRunning waitSystemPodsRunning
[ "$ARGOCD" == "true" ] && disable_argo [ "$ARGOCD" == "true" ] && disable_argo
# Check if we already have all controllers on the current version admin_job "upgrade_control_plane, upgrade_kubezero"
#OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
if [ "$OLD_CONTROLLERS" == "0" ]; then
# All controllers already on current version
control_plane_upgrade finalize_cluster_upgrade
else
# Otherwise run control plane upgrade
control_plane_upgrade kubeadm_upgrade
fi
echo "<Return> to continue"
read -r
#echo "Adjust kubezero values as needed:" #echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015 # shellcheck disable=SC2015
#[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero #[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
#echo "<Return> to continue"
#read -r
# upgrade modules # upgrade modules
control_plane_upgrade "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators" admin_job "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators"
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning waitSystemPodsRunning
echo "Applying remaining KubeZero modules..." echo "Applying remaining KubeZero modules..."
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo" admin_job "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
# we replace the project during v1.31 so disable again # we replace the project during v1.31 so disable again
[ "$ARGOCD" == "true" ] && disable_argo [ "$ARGOCD" == "true" ] && disable_argo
@ -60,6 +51,12 @@ while true; do
sleep 1 sleep 1
done done
echo "Once all controller nodes are running on $KUBE_VERSION, <return> to continue"
read -r
# Final control plane upgrades
admin_job "upgrade_control_plane"
echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your cluster." echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your cluster."
echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades." echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades."

View File

@ -3,7 +3,7 @@ name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons description: KubeZero umbrella chart for various optional cluster addons
type: application type: application
version: 0.8.13 version: 0.8.13
appVersion: v1.30 appVersion: v1.31
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:

View File

@ -22,7 +22,7 @@ dependencies:
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled condition: argo-events.enabled
- name: argo-cd - name: argo-cd
version: 7.8.26 version: 7.8.27
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled condition: argo-cd.enabled
- name: argocd-image-updater - name: argocd-image-updater

View File

@ -21,3 +21,6 @@ fi
# Redis secret # Redis secret
kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \ kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \
--from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo) --from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
# required keys in kubezero-secrets, as --ignore-missing-values in helm-secrets doesnt work with vals ;-(
ensure_kubezero_secret_key argo-cd.kubezero.username argo-cd.kubezero.password argo-cd.kubezero.sshPrivateKey

View File

@ -1,6 +1,6 @@
# kubezero-graph # kubezero-graph
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero GraphQL and GraphDB KubeZero GraphQL and GraphDB
@ -18,8 +18,8 @@ Kubernetes: `>= 1.29.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://helm.neo4j.com/neo4j | neo4j | 5.26.0 | | https://helm.neo4j.com/neo4j | neo4j | 2025.3.0 |
## Values ## Values
@ -28,6 +28,8 @@ Kubernetes: `>= 1.29.0-0`
| neo4j.disableLookups | bool | `true` | | | neo4j.disableLookups | bool | `true` | |
| neo4j.enabled | bool | `false` | | | neo4j.enabled | bool | `false` | |
| neo4j.neo4j.name | string | `"test-db"` | | | neo4j.neo4j.name | string | `"test-db"` | |
| neo4j.neo4j.password | string | `"secret"` | |
| neo4j.neo4j.passwordFromSecret | string | `"neo4j-admin"` | |
| neo4j.serviceMonitor.enabled | bool | `false` | | | neo4j.serviceMonitor.enabled | bool | `false` | |
| neo4j.services.neo4j.enabled | bool | `false` | | | neo4j.services.neo4j.enabled | bool | `false` | |
| neo4j.volumes.data.mode | string | `"defaultStorageClass"` | | | neo4j.volumes.data.mode | string | `"defaultStorageClass"` | |

View File

@ -1,6 +1,6 @@
# kubezero-mq # kubezero-mq
![Version: 0.3.10](https://img.shields.io/badge/Version-0.3.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.3.11](https://img.shields.io/badge/Version-0.3.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
@ -18,9 +18,9 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://charts.bitnami.com/bitnami | rabbitmq | 14.6.6 | | https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.2.2 | | https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 |
## Values ## Values
@ -34,13 +34,6 @@ Kubernetes: `>= 1.26.0`
| nats.natsBox.enabled | bool | `false` | | | nats.natsBox.enabled | bool | `false` | |
| nats.promExporter.enabled | bool | `false` | | | nats.promExporter.enabled | bool | `false` | |
| nats.promExporter.podMonitor.enabled | bool | `false` | | | nats.promExporter.podMonitor.enabled | bool | `false` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.enabled | bool | `false` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.serviceMonitor.enabled | bool | `true` | |
| rabbitmq-cluster-operator.enabled | bool | `false` | |
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.enabled | bool | `false` | |
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.serviceMonitor.enabled | bool | `true` | |
| rabbitmq-cluster-operator.rabbitmqImage.tag | string | `"3.11.4-debian-11-r0"` | |
| rabbitmq-cluster-operator.useCertManager | bool | `true` | |
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | | | rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
| rabbitmq.auth.existingPasswordSecret | string | `"rabbitmq"` | | | rabbitmq.auth.existingPasswordSecret | string | `"rabbitmq"` | |
| rabbitmq.auth.tls.enabled | bool | `false` | | | rabbitmq.auth.tls.enabled | bool | `false` | |

View File

@ -1,4 +1,4 @@
{{- if .Values.nats.exporter.serviceMonitor.enabled }} {{- if .Values.nats.promExporter.podMonitor.enabled }}
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:

View File

@ -6,6 +6,12 @@ nats:
jetstream: jetstream:
enabled: true enabled: true
podTemplate:
topologySpreadConstraints:
kubernetes.io/hostname:
maxSkew: 1
whenUnsatisfiable: DoNotSchedule
natsBox: natsBox:
enabled: false enabled: false