Compare commits
20 Commits
2fd7bc728a
...
a4bd7a412f
Author | SHA1 | Date | |
---|---|---|---|
a4bd7a412f | |||
c675e7aa1b | |||
00daef3b0b | |||
9bb0e0e91a | |||
e022db091c | |||
da2510c8df | |||
c4aab252e8 | |||
0a813c525c | |||
9d28705079 | |||
024a0fcfaf | |||
f88d6a2f0d | |||
2c47a28e10 | |||
dfdf50f85f | |||
d4ba1d1a01 | |||
fa06c13805 | |||
7f2208fea4 | |||
c427e73f79 | |||
2fd775624b | |||
ffaf037483 | |||
0664b2bed3 |
@ -14,7 +14,7 @@ include .ci/podman.mk
|
||||
|
||||
Add subtree to your project:
|
||||
```
|
||||
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash
|
||||
git subtree add --prefix .ci https://git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash
|
||||
```
|
||||
|
||||
|
||||
|
@ -41,7 +41,8 @@ for image in sorted(images, key=lambda d: d['imagePushedAt'], reverse=True):
|
||||
_delete = True
|
||||
for tag in image["imageTags"]:
|
||||
# Look for at least one tag NOT beign a SemVer dev tag
|
||||
if "-" not in tag:
|
||||
# untagged dev builds get tagged as <tag>-g<commit>
|
||||
if "-g" not in tag and "dirty" not in tag:
|
||||
_delete = False
|
||||
if _delete:
|
||||
print("Deleting development image {}".format(image["imageTags"]))
|
||||
|
@ -8,8 +8,8 @@ SHELL := bash
|
||||
.PHONY: all # All targets are accessible for user
|
||||
.DEFAULT: help # Running Make will run the help target
|
||||
|
||||
# Parse version from latest git semver tag
|
||||
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
|
||||
# Parse version from latest git semver tag, use short commit otherwise
|
||||
GIT_TAG ?= $(shell git describe --tags --match v*.*.* --dirty 2>/dev/null || git describe --match="" --always --dirty 2>/dev/null)
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
|
||||
TAG ::= $(GIT_TAG)
|
||||
@ -85,7 +85,7 @@ rm-image:
|
||||
|
||||
## some useful tasks during development
|
||||
ci-pull-upstream: ## pull latest shared .ci subtree
|
||||
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
|
||||
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git main --squash -m "Merge latest ci-tools-lib"
|
||||
|
||||
create-repo: ## create new AWS ECR public repository
|
||||
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)
|
||||
|
@ -17,7 +17,7 @@ post_control_plane_upgrade_cluster() {
|
||||
# delete previous root app controlled by kubezero module
|
||||
kubectl delete application kubezero-git-sync -n argocd || true
|
||||
|
||||
# Patch appproject to keep SyncWindow in place
|
||||
# only patch appproject to keep SyncWindow in place
|
||||
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/labels"}]' || true
|
||||
kubectl patch appproject kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
|
||||
}
|
||||
|
@ -111,35 +111,42 @@ post_kubeadm() {
|
||||
}
|
||||
|
||||
|
||||
# Control plane upgrade
|
||||
control_plane_upgrade() {
|
||||
CMD=$1
|
||||
# Migrate KubeZero Config to current version
|
||||
upgrade_kubezero_config() {
|
||||
# get current values, argo app over cm
|
||||
get_kubezero_values $ARGOCD
|
||||
|
||||
# tumble new config through migrate.py
|
||||
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
|
||||
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
|
||||
|
||||
update_kubezero_cm
|
||||
|
||||
if [ "$ARGOCD" == "true" ]; then
|
||||
# update argo app
|
||||
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
|
||||
kubectl get application kubezero -n argocd -o yaml | \
|
||||
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
|
||||
> $WORKDIR/new-argocd-app.yaml
|
||||
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Control plane upgrade
|
||||
kubeadm_upgrade() {
|
||||
ARGOCD=$(argo_used)
|
||||
|
||||
render_kubeadm upgrade
|
||||
|
||||
if [[ "$CMD" =~ ^(cluster)$ ]]; then
|
||||
# Check if we already have all controllers on the current version
|
||||
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
|
||||
|
||||
# run control plane upgrade
|
||||
if [ "$OLD_CONTROLLERS" != "0" ]; then
|
||||
|
||||
pre_control_plane_upgrade_cluster
|
||||
|
||||
# get current values, argo app over cm
|
||||
get_kubezero_values $ARGOCD
|
||||
|
||||
# tumble new config through migrate.py
|
||||
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
|
||||
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
|
||||
|
||||
update_kubezero_cm
|
||||
|
||||
if [ "$ARGOCD" == "true" ]; then
|
||||
# update argo app
|
||||
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
|
||||
kubectl get application kubezero -n argocd -o yaml | \
|
||||
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
|
||||
> $WORKDIR/new-argocd-app.yaml
|
||||
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
|
||||
fi
|
||||
|
||||
pre_kubeadm
|
||||
|
||||
_kubeadm init phase upload-config kubeadm
|
||||
@ -155,7 +162,8 @@ control_plane_upgrade() {
|
||||
|
||||
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
|
||||
|
||||
elif [[ "$CMD" =~ ^(final)$ ]]; then
|
||||
# All controllers already on current version
|
||||
else
|
||||
pre_cluster_upgrade_final
|
||||
|
||||
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase
|
||||
@ -411,12 +419,8 @@ for t in $@; do
|
||||
bootstrap) control_plane_node bootstrap;;
|
||||
join) control_plane_node join;;
|
||||
restore) control_plane_node restore;;
|
||||
kubeadm_upgrade)
|
||||
control_plane_upgrade cluster
|
||||
;;
|
||||
finalize_cluster_upgrade)
|
||||
control_plane_upgrade final
|
||||
;;
|
||||
upgrade_control_plane) kubeadm_upgrade;;
|
||||
upgrade_kubezero) upgrade_kubezero_config;;
|
||||
apply_*)
|
||||
ARGOCD=$(argo_used)
|
||||
apply_module "${t##apply_}";;
|
||||
|
@ -80,6 +80,19 @@ function get_kubezero_secret() {
|
||||
get_secret_val kubezero kubezero-secrets "$1"
|
||||
}
|
||||
|
||||
function ensure_kubezero_secret_key() {
|
||||
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
|
||||
local key=""
|
||||
local val=""
|
||||
|
||||
for key in $@; do
|
||||
val=$(echo "$secret" | yq ".data.\"$key\"")
|
||||
if [ "$val" == "null" ]; then
|
||||
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function set_kubezero_secret() {
|
||||
local key="$1"
|
||||
@ -340,7 +353,7 @@ EOF
|
||||
}
|
||||
|
||||
|
||||
function control_plane_upgrade() {
|
||||
function admin_job() {
|
||||
TASKS="$1"
|
||||
|
||||
[ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest"
|
||||
@ -350,7 +363,7 @@ function control_plane_upgrade() {
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kubezero-upgrade
|
||||
name: kubezero-admin-job
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: kubezero-upgrade
|
||||
@ -395,10 +408,10 @@ spec:
|
||||
restartPolicy: Never
|
||||
EOF
|
||||
|
||||
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
|
||||
kubectl wait pod kubezero-admin-job -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
|
||||
while true; do
|
||||
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
|
||||
kubectl logs kubezero-admin-job -n kube-system -f 2>/dev/null && break
|
||||
sleep 3
|
||||
done
|
||||
kubectl delete pod kubezero-upgrade -n kube-system
|
||||
kubectl delete pod kubezero-admin-job -n kube-system
|
||||
}
|
||||
|
@ -15,37 +15,28 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
ARGOCD=$(argo_used)
|
||||
|
||||
echo "Checking that all pods in kube-system are running ..."
|
||||
#waitSystemPodsRunning
|
||||
waitSystemPodsRunning
|
||||
|
||||
[ "$ARGOCD" == "true" ] && disable_argo
|
||||
|
||||
# Check if we already have all controllers on the current version
|
||||
#OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
|
||||
|
||||
if [ "$OLD_CONTROLLERS" == "0" ]; then
|
||||
# All controllers already on current version
|
||||
control_plane_upgrade finalize_cluster_upgrade
|
||||
else
|
||||
# Otherwise run control plane upgrade
|
||||
control_plane_upgrade kubeadm_upgrade
|
||||
fi
|
||||
|
||||
echo "<Return> to continue"
|
||||
read -r
|
||||
admin_job "upgrade_control_plane, upgrade_kubezero"
|
||||
|
||||
#echo "Adjust kubezero values as needed:"
|
||||
# shellcheck disable=SC2015
|
||||
#[ "$ARGOCD" == "true" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
|
||||
|
||||
#echo "<Return> to continue"
|
||||
#read -r
|
||||
|
||||
# upgrade modules
|
||||
control_plane_upgrade "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators"
|
||||
admin_job "apply_kubezero, apply_network, apply_addons, apply_storage, apply_operators"
|
||||
|
||||
echo "Checking that all pods in kube-system are running ..."
|
||||
waitSystemPodsRunning
|
||||
|
||||
echo "Applying remaining KubeZero modules..."
|
||||
|
||||
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
|
||||
admin_job "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
|
||||
|
||||
# we replace the project during v1.31 so disable again
|
||||
[ "$ARGOCD" == "true" ] && disable_argo
|
||||
@ -60,6 +51,12 @@ while true; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "Once all controller nodes are running on $KUBE_VERSION, <return> to continue"
|
||||
read -r
|
||||
|
||||
# Final control plane upgrades
|
||||
admin_job "upgrade_control_plane"
|
||||
|
||||
echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your cluster."
|
||||
echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades."
|
||||
|
||||
|
@ -3,7 +3,7 @@ name: kubezero-addons
|
||||
description: KubeZero umbrella chart for various optional cluster addons
|
||||
type: application
|
||||
version: 0.8.14
|
||||
appVersion: v1.30
|
||||
appVersion: v1.31
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -21,3 +21,6 @@ fi
|
||||
# Redis secret
|
||||
kubectl get secret argocd-redis -n argocd || kubectl create secret generic argocd-redis -n argocd \
|
||||
--from-literal=auth=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
|
||||
|
||||
# required keys in kubezero-secrets, as --ignore-missing-values in helm-secrets doesnt work with vals ;-(
|
||||
ensure_kubezero_secret_key argo-cd.kubezero.username argo-cd.kubezero.password argo-cd.kubezero.sshPrivateKey
|
||||
|
@ -25,7 +25,7 @@ argo-events:
|
||||
# do NOT use -alpine tag as the entrypoint differs
|
||||
versions:
|
||||
- version: 2.10.11
|
||||
natsImage: nats:2.10.11-scratch
|
||||
natsImage: nats:2.11.1-scratch
|
||||
metricsExporterImage: natsio/prometheus-nats-exporter:0.16.0
|
||||
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
|
||||
startCommand: /nats-server
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-cache
|
||||
description: KubeZero Cache module
|
||||
type: application
|
||||
version: 0.1.0
|
||||
version: 0.1.1
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -17,11 +17,11 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: redis
|
||||
version: 20.0.3
|
||||
version: 20.11.5
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
||||
- name: redis-cluster
|
||||
version: 11.0.2
|
||||
version: 11.5.0
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis-cluster.enabled
|
||||
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-graph
|
||||
description: KubeZero GraphQL and GraphDB
|
||||
type: application
|
||||
version: 0.1.0
|
||||
version: 0.1.1
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -16,7 +16,7 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: neo4j
|
||||
version: 5.26.0
|
||||
version: 2025.3.0
|
||||
repository: https://helm.neo4j.com/neo4j
|
||||
condition: neo4j.enabled
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-graph
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
KubeZero GraphQL and GraphDB
|
||||
|
||||
@ -18,8 +18,8 @@ Kubernetes: `>= 1.29.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.2.1 |
|
||||
| https://helm.neo4j.com/neo4j | neo4j | 5.26.0 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://helm.neo4j.com/neo4j | neo4j | 2025.3.0 |
|
||||
|
||||
## Values
|
||||
|
||||
@ -28,6 +28,8 @@ Kubernetes: `>= 1.29.0-0`
|
||||
| neo4j.disableLookups | bool | `true` | |
|
||||
| neo4j.enabled | bool | `false` | |
|
||||
| neo4j.neo4j.name | string | `"test-db"` | |
|
||||
| neo4j.neo4j.password | string | `"secret"` | |
|
||||
| neo4j.neo4j.passwordFromSecret | string | `"neo4j-admin"` | |
|
||||
| neo4j.serviceMonitor.enabled | bool | `false` | |
|
||||
| neo4j.services.neo4j.enabled | bool | `false` | |
|
||||
| neo4j.volumes.data.mode | string | `"defaultStorageClass"` | |
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-mq
|
||||
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
||||
type: application
|
||||
version: 0.3.10
|
||||
version: 0.3.11
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -17,11 +17,11 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: nats
|
||||
version: 1.2.2
|
||||
version: 1.3.3
|
||||
repository: https://nats-io.github.io/k8s/helm/charts/
|
||||
condition: nats.enabled
|
||||
- name: rabbitmq
|
||||
version: 14.6.6
|
||||
version: 14.7.0
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: rabbitmq.enabled
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-mq
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
||||
|
||||
@ -18,9 +18,9 @@ Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://charts.bitnami.com/bitnami | rabbitmq | 14.6.6 |
|
||||
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.2.2 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
|
||||
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 |
|
||||
|
||||
## Values
|
||||
|
||||
@ -34,13 +34,6 @@ Kubernetes: `>= 1.26.0`
|
||||
| nats.natsBox.enabled | bool | `false` | |
|
||||
| nats.promExporter.enabled | bool | `false` | |
|
||||
| nats.promExporter.podMonitor.enabled | bool | `false` | |
|
||||
| rabbitmq-cluster-operator.clusterOperator.metrics.enabled | bool | `false` | |
|
||||
| rabbitmq-cluster-operator.clusterOperator.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| rabbitmq-cluster-operator.enabled | bool | `false` | |
|
||||
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.enabled | bool | `false` | |
|
||||
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| rabbitmq-cluster-operator.rabbitmqImage.tag | string | `"3.11.4-debian-11-r0"` | |
|
||||
| rabbitmq-cluster-operator.useCertManager | bool | `true` | |
|
||||
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
|
||||
| rabbitmq.auth.existingPasswordSecret | string | `"rabbitmq"` | |
|
||||
| rabbitmq.auth.tls.enabled | bool | `false` | |
|
||||
|
@ -1,4 +1,4 @@
|
||||
{{- if .Values.nats.exporter.serviceMonitor.enabled }}
|
||||
{{- if .Values.nats.promExporter.podMonitor.enabled }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
|
@ -6,6 +6,12 @@ nats:
|
||||
jetstream:
|
||||
enabled: true
|
||||
|
||||
podTemplate:
|
||||
topologySpreadConstraints:
|
||||
kubernetes.io/hostname:
|
||||
maxSkew: 1
|
||||
whenUnsatisfiable: DoNotSchedule
|
||||
|
||||
natsBox:
|
||||
enabled: false
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user