feat: first try of 1.24 upgrade flow

This commit is contained in:
Stefan Reimer 2022-11-09 17:08:22 +01:00
parent 6891ae41a7
commit 1171d13e86
17 changed files with 82 additions and 168 deletions

View File

@ -56,7 +56,7 @@ render_kubeadm() {
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
done done
# hack to "uncloack" the json patches after they go processed by helm # "uncloak" the json patches after they got processed by helm
for s in apiserver controller-manager scheduler; do for s in apiserver controller-manager scheduler; do
yq eval '.json' ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml > /tmp/_tmp.yaml && \ yq eval '.json' ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml > /tmp/_tmp.yaml && \
mv /tmp/_tmp.yaml ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml mv /tmp/_tmp.yaml ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml

View File

@ -8,34 +8,6 @@ import yaml
def migrate(values): def migrate(values):
"""Actual changes here""" """Actual changes here"""
# migrate ClusterName to clusterName
if "ClusterName" in values:
values["clusterName"] = values["ClusterName"]
values.pop("ClusterName")
# Remove HighAvailableControlplane
try:
values["global"]["highAvailable"] = values["HighAvailableControlplane"]
values.pop("HighAvailableControlplane")
except KeyError:
pass
# Create new clusterwide cloudprovider data if possible
# IamArn: arn:aws:iam::<ACCOUNT_ID>:role/<REGION>.<CLUSTERNAME>.cert-manager
try:
if values["cert-manager"]["IamArn"]:
account_id = values["cert-manager"]["IamArn"].split(":")[4]
region = values["cert-manager"]["IamArn"].split(":")[5].split('.')[0].split('/')[1]
if "global" not in values:
values["global"] = {}
if "aws" not in values["global"]:
values["global"]["aws"] = {}
values["global"]["aws"]["region"] = region
values["global"]["aws"]["accountId"] = account_id
except KeyError:
pass
return values return values

View File

@ -1,11 +1,12 @@
#!/bin/bash -e #!/bin/bash -e
VERSION="v1.23"
#VERSION="latest" #VERSION="latest"
VERSION="v1.24"
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml} ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
. $SCRIPT_DIR/libhelm.sh # shellcheck disable=SC1091
. "$SCRIPT_DIR"/libhelm.sh
[ -n "$DEBUG" ] && set -x [ -n "$DEBUG" ] && set -x
@ -38,6 +39,9 @@ spec:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
operator: Exists operator: Exists
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
initContainers: initContainers:
- name: node-upgrade - name: node-upgrade
image: busybox image: busybox
@ -75,7 +79,7 @@ EOF
control_plane_upgrade() { control_plane_upgrade() {
TASKS="$1" TASKS="$1"
echo "Deploy cluster admin task: $TASK" echo "Deploy cluster admin task: $TASKS"
cat <<EOF | kubectl apply -f - cat <<EOF | kubectl apply -f -
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
@ -138,47 +142,25 @@ waitSystemPodsRunning
argo_used && disable_argo argo_used && disable_argo
all_nodes_upgrade "nsenter -m/hostproc/1/ns/mnt mount --make-shared /sys/fs/cgroup; nsenter -m/hostproc/1/ns/mnt mount --make-shared /sys; nsenter -r/host /usr/bin/podman image prune -a -f;" # all_nodes_upgrade ""
control_plane_upgrade kubeadm_upgrade control_plane_upgrade kubeadm_upgrade
echo "Adjust kubezero values as needed: (eg. set cilium cluster id and ensure no IP space overlap !!):" echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015
argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
# Remove multus DS due to label changes, if this fails:
# kubezero-network $ helm template . --set multus.enabled=true | kubectl apply -f -
kubectl delete ds kube-multus-ds -n kube-system || true
# Required due to chart upgrade to 4.X part of prometheus-stack 40.X
kubectl delete daemonset metrics-prometheus-node-exporter -n monitoring || true
# AWS EBS CSI driver change their fsGroupPolicy
kubectl delete CSIDriver ebs.csi.aws.com || true
# Delete external-dns deployment as upstream changed strategy to 'recreate'
kubectl delete deployment addons-external-dns -n kube-system || true
control_plane_upgrade "apply_network, apply_addons, apply_storage" control_plane_upgrade "apply_network, apply_addons, apply_storage"
kubectl rollout restart daemonset/calico-node -n kube-system
kubectl rollout restart daemonset/cilium -n kube-system kubectl rollout restart daemonset/cilium -n kube-system
kubectl rollout restart daemonset/kube-multus-ds -n kube-system
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning waitSystemPodsRunning
echo "Applying remaining KubeZero modules..." echo "Applying remaining KubeZero modules..."
# Delete outdated cert-manager CRDs, otherwise serverside apply will fail
for c in certificaterequests.cert-manager.io certificates.cert-manager.io challenges.acme.cert-manager.io clusterissuers.cert-manager.io issuers.cert-manager.io orders.acme.cert-manager.io; do
kubectl delete crd $c
done
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_argocd" control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_argocd"
# delete legace ArgCD controller which is now a statefulSet
kubectl delete deployment argocd-application-controller -n argocd || true
# Final step is to commit the new argocd kubezero app # Final step is to commit the new argocd kubezero app
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..) | .spec.source.helm.values |= (from_yaml | to_yaml)' > $ARGO_APP kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..) | .spec.source.helm.values |= (from_yaml | to_yaml)' > $ARGO_APP
@ -186,6 +168,6 @@ echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your
echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades." echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades."
echo "<Return> to continue and re-enable ArgoCD:" echo "<Return> to continue and re-enable ArgoCD:"
read read -r
argo_used && enable_argo argo_used && enable_argo

View File

@ -1,52 +0,0 @@
#!/bin/bash
# Migrate addons and network values from local kubeadm-values.yaml on controllers into CM
# - enable cilium
# Create emtpy CM if not exists yet
kubectl get cm -n kube-system kubezero-values || \
kubectl create configmap -n kube-system kubezero-values
kubectl get cm -n kube-system kubeadm-values || \
kubectl create configmap -n kube-system kubeadm-values
# tweak local kubeadm for upgrade later on
yq eval -i '.global.clusterName = strenv(CLUSTERNAME) |
.global.highAvailable = env(HIGHAVAILABLE)' \
${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
# extract network
yq e '.network |
.cilium.enabled = true |
.calico.enabled = true |
.multus.enabled = true |
.multus.defaultNetworks = ["cilium"] |
.cilium.cluster.name = strenv(CLUSTERNAME) |
{"network": .}' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml > $WORKDIR/network-values.yaml
# get current argo cd values
kubectl get application kubezero -n argocd -o yaml | yq '.spec.source.helm.values' > ${WORKDIR}/argo-values.yaml
# merge all into new CM and set new minimal addons
yq ea '. as $item ireduce ({}; . * $item ) |
.global.clusterName = strenv(CLUSTERNAME) |
.global.highAvailable = env(HIGHAVAILABLE) |
.addons.clusterBackup.image.tag = "v1.23" ' ${WORKDIR}/network-values.yaml $WORKDIR/argo-values.yaml > $WORKDIR/kubezero-pre-values.yaml
# tumble new config through migrate.py
cat $WORKDIR/kubezero-pre-values.yaml | migrate_argo_values.py > $WORKDIR/kubezero-values.yaml
# Update kubezero-values CM
kubectl get cm -n kube-system kubezero-values -o=yaml | \
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/kubezero-values.yaml")' | \
kubectl replace -f -
# update argo app
kubectl get application kubezero -n argocd -o yaml | \
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
kubectl apply -f -
# finally remove annotation to allow argo to sync again
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'

View File

@ -0,0 +1,21 @@
#!/bin/bash
# get current argo cd values
kubectl get application kubezero -n argocd -o yaml | yq '.spec.source.helm.values' > "${WORKDIR}"/argo-values.yaml
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/argo-values.yaml > "$WORKDIR"/kubezero-values.yaml
# Update kubezero-values CM
kubectl get cm -n kube-system kubezero-values -o=yaml | \
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/kubezero-values.yaml")' | \
kubectl replace -f -
# update argo app
kubectl get application kubezero -n argocd -o yaml | \
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
kubectl apply -f -
# finally remove annotation to allow argo to sync again
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'

View File

@ -6,6 +6,7 @@ metadata:
namespace: kube-system namespace: kube-system
spec: spec:
schedule: "0 * * * *" schedule: "0 * * * *"
concurrencyPolicy: "Replace"
jobTemplate: jobTemplate:
spec: spec:
backoffLimit: 1 backoffLimit: 1

View File

@ -1,6 +1,6 @@
# kubezero-ci # kubezero-ci
![Version: 0.5.15](https://img.shields.io/badge/Version-0.5.15-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.5.17](https://img.shields.io/badge/Version-0.5.17-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI KubeZero umbrella chart for all things CI
@ -20,7 +20,7 @@ Kubernetes: `>= 1.20.0`
|------------|------|---------| |------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.17 | | https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.17 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://charts.jenkins.io | jenkins | 4.2.8 | | https://charts.jenkins.io | jenkins | 4.2.10 |
| https://dl.gitea.io/charts/ | gitea | 5.0.9 | | https://dl.gitea.io/charts/ | gitea | 5.0.9 |
| https://gocd.github.io/helm-chart | gocd | 1.40.8 | | https://gocd.github.io/helm-chart | gocd | 1.40.8 |
@ -95,16 +95,16 @@ Kubernetes: `>= 1.20.0`
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | | | jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | | | jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | | | jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3724.v0920c1e0ec69"` | | | jenkins.controller.installPlugins[0] | string | `"kubernetes:3734.v562b_b_a_627ea_c"` | |
| jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | | | jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | |
| jenkins.controller.installPlugins[2] | string | `"git:4.12.1"` | | | jenkins.controller.installPlugins[2] | string | `"git:4.13.0"` | |
| jenkins.controller.installPlugins[3] | string | `"configuration-as-code:1512.vb_79d418d5fc8"` | | | jenkins.controller.installPlugins[3] | string | `"configuration-as-code:1569.vb_72405b_80249"` | |
| jenkins.controller.installPlugins[4] | string | `"antisamy-markup-formatter:2.7"` | | | jenkins.controller.installPlugins[4] | string | `"antisamy-markup-formatter:2.7"` | |
| jenkins.controller.installPlugins[5] | string | `"prometheus:2.0.11"` | | | jenkins.controller.installPlugins[5] | string | `"prometheus:2.0.11"` | |
| jenkins.controller.installPlugins[6] | string | `"htmlpublisher:1.31"` | | | jenkins.controller.installPlugins[6] | string | `"htmlpublisher:1.31"` | |
| jenkins.controller.installPlugins[7] | string | `"build-discarder:139.v05696a_7fe240"` | | | jenkins.controller.installPlugins[7] | string | `"build-discarder:139.v05696a_7fe240"` | |
| jenkins.controller.installPlugins[8] | string | `"dark-theme:245.vb_a_2b_b_010ea_96"` | | | jenkins.controller.installPlugins[8] | string | `"dark-theme:262.v0202a_4c8fb_6a"` | |
| jenkins.controller.installPlugins[9] | string | `"kubernetes-credentials-provider:1.199.v4a_1d1f5d074f"` | | | jenkins.controller.installPlugins[9] | string | `"kubernetes-credentials-provider:1.206.v7ce2cf7b_0c8b"` | |
| jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | | | jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | |
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | | | jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | |
| jenkins.controller.prometheus.enabled | bool | `false` | | | jenkins.controller.prometheus.enabled | bool | `false` | |
@ -112,7 +112,7 @@ Kubernetes: `>= 1.20.0`
| jenkins.controller.resources.limits.memory | string | `"4096Mi"` | | | jenkins.controller.resources.limits.memory | string | `"4096Mi"` | |
| jenkins.controller.resources.requests.cpu | string | `"250m"` | | | jenkins.controller.resources.requests.cpu | string | `"250m"` | |
| jenkins.controller.resources.requests.memory | string | `"1280Mi"` | | | jenkins.controller.resources.requests.memory | string | `"1280Mi"` | |
| jenkins.controller.tag | string | `"2.370-alpine-jdk17"` | | | jenkins.controller.tag | string | `"alpine-jdk17"` | |
| jenkins.controller.testEnabled | bool | `false` | | | jenkins.controller.testEnabled | bool | `false` | |
| jenkins.enabled | bool | `false` | | | jenkins.enabled | bool | `false` | |
| jenkins.istio.agent.enabled | bool | `false` | | | jenkins.istio.agent.enabled | bool | `false` | |

View File

@ -1,6 +1,6 @@
# kubezero-mq # kubezero-mq
![Version: 0.2.3](https://img.shields.io/badge/Version-0.2.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.3.0](https://img.shields.io/badge/Version-0.3.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
@ -19,8 +19,8 @@ Kubernetes: `>= 1.20.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| | nats | 0.8.4 | | | nats | 0.8.4 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.3 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://charts.bitnami.com/bitnami | rabbitmq | 9.0.3 | | https://charts.bitnami.com/bitnami | rabbitmq | 11.1.1 |
## Values ## Values
@ -34,8 +34,8 @@ Kubernetes: `>= 1.20.0`
| nats.nats.advertise | bool | `false` | | | nats.nats.advertise | bool | `false` | |
| nats.nats.jetstream.enabled | bool | `true` | | | nats.nats.jetstream.enabled | bool | `true` | |
| nats.natsbox.enabled | bool | `false` | | | nats.natsbox.enabled | bool | `false` | |
| rabbitmq.auth.erlangCookie | string | `"randomlongerlangcookie"` | | | rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
| rabbitmq.auth.password | string | `"supersecret"` | | | rabbitmq.auth.existingPasswordSecret | string | `"rabbitmq"` | |
| rabbitmq.auth.tls.enabled | bool | `false` | | | rabbitmq.auth.tls.enabled | bool | `false` | |
| rabbitmq.auth.tls.existingSecret | string | `"rabbitmq-server-certificate"` | | | rabbitmq.auth.tls.existingSecret | string | `"rabbitmq-server-certificate"` | |
| rabbitmq.auth.tls.existingSecretFullChain | bool | `true` | | | rabbitmq.auth.tls.existingSecretFullChain | bool | `true` | |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-network name: kubezero-network
description: KubeZero umbrella chart for all things network description: KubeZero umbrella chart for all things network
type: application type: application
version: 0.4.0 version: 0.4.1
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -19,14 +19,14 @@ dependencies:
version: ">= 0.1.5" version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: cilium - name: cilium
version: 1.12.2 version: 1.12.3
repository: https://helm.cilium.io/ repository: https://helm.cilium.io/
condition: cilium.enabled condition: cilium.enabled
- name: metallb - name: metallb
version: 0.13.5 version: 0.13.7
repository: https://metallb.github.io/metallb repository: https://metallb.github.io/metallb
condition: metallb.enabled condition: metallb.enabled
- name: calico - name: calico
version: 0.2.2 version: 0.2.2
condition: calico.enabled condition: calico.enabled
kubeVersion: ">= 1.24.0" kubeVersion: ">= 1.20.0"

View File

@ -1,6 +1,6 @@
# kubezero-network # kubezero-network
![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.4.1](https://img.shields.io/badge/Version-0.4.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things network KubeZero umbrella chart for all things network
@ -14,14 +14,14 @@ KubeZero umbrella chart for all things network
## Requirements ## Requirements
Kubernetes: `>= 1.24.0` Kubernetes: `>= 1.20.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| | calico | 0.2.2 | | | calico | 0.2.2 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://helm.cilium.io/ | cilium | 1.12.2 | | https://helm.cilium.io/ | cilium | 1.12.3 |
| https://metallb.github.io/metallb | metallb | 0.13.5 | | https://metallb.github.io/metallb | metallb | 0.13.7 |
## Values ## Values
@ -34,22 +34,19 @@ Kubernetes: `>= 1.24.0`
| cilium.cluster.id | int | `240` | | | cilium.cluster.id | int | `240` | |
| cilium.cluster.name | string | `"default"` | | | cilium.cluster.name | string | `"default"` | |
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | | | cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
| cilium.cni.chainingMode | string | `"generic-veth"` | |
| cilium.cni.exclusive | bool | `false` | |
| cilium.containerRuntime.integration | string | `"crio"` | | | cilium.containerRuntime.integration | string | `"crio"` | |
| cilium.enabled | bool | `false` | | | cilium.enabled | bool | `false` | |
| cilium.hubble.enabled | bool | `false` | | | cilium.hubble.enabled | bool | `false` | |
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | | | cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
| cilium.l2NeighDiscovery.enabled | bool | `false` | | | cilium.l2NeighDiscovery.enabled | bool | `false` | |
| cilium.l7Proxy | bool | `false` | | | cilium.l7Proxy | bool | `false` | |
| cilium.nodePort.enabled | bool | `false` | | | cilium.nodePort.enabled | bool | `true` | |
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cilium.operator.replicas | int | `1` | | | cilium.operator.replicas | int | `1` | |
| cilium.operator.tolerations[0].effect | string | `"NoSchedule"` | | | cilium.operator.tolerations[0].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | | | cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | | | cilium.operator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cilium.policyEnforcementMode | string | `"never"` | |
| cilium.prometheus.enabled | bool | `false` | | | cilium.prometheus.enabled | bool | `false` | |
| cilium.prometheus.port | int | `9091` | | | cilium.prometheus.port | int | `9091` | |
| cilium.securityContext.privileged | bool | `true` | | | cilium.securityContext.privileged | bool | `true` | |
@ -61,8 +58,5 @@ Kubernetes: `>= 1.24.0`
| metallb.controller.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | | | metallb.controller.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| metallb.enabled | bool | `false` | | | metallb.enabled | bool | `false` | |
| metallb.ipAddressPools | list | `[]` | | | metallb.ipAddressPools | list | `[]` | |
| multus.clusterNetwork | string | `"calico"` | |
| multus.defaultNetworks | list | `[]` | |
| multus.enabled | bool | `false` | | | multus.enabled | bool | `false` | |
| multus.readinessindicatorfile | string | `"/etc/cni/net.d/10-calico.conflist"` | | | multus.tag | string | `"v3.9.2"` | |
| multus.tag | string | `"v3.9.1"` | |

View File

@ -18,12 +18,12 @@ metallb:
multus: multus:
enabled: false enabled: false
tag: "v3.9.1" tag: "v3.9.2"
clusterNetwork: "calico" #clusterNetwork: "calico"
defaultNetworks: [] #defaultNetworks: []
# - "cilium" # - "cilium"
readinessindicatorfile: "/etc/cni/net.d/10-calico.conflist" #readinessindicatorfile: "/etc/cni/net.d/10-calico.conflist"
cilium: cilium:
enabled: false enabled: false
@ -38,17 +38,13 @@ cilium:
cni: cni:
binPath: "/usr/libexec/cni" binPath: "/usr/libexec/cni"
#-- Ensure this is false if multus is enabled #-- Ensure this is false if multus is enabled
exclusive: false # exclusive: false
chainingMode: generic-veth # chainingMode: generic-veth
bpf: bpf:
hostLegacyRouting: true hostLegacyRouting: true
# tproxy: false # tproxy: false
# enableIPv4Masquerade: false
# enableIdentityMark: false
policyEnforcementMode: "never"
cluster: cluster:
# This should match the second octet of clusterPoolIPv4PodCIDRList # This should match the second octet of clusterPoolIPv4PodCIDRList
# to prevent IP space overlap and easy tracking # to prevent IP space overlap and easy tracking
@ -63,7 +59,7 @@ cilium:
# Should be handled by multus # Should be handled by multus
nodePort: nodePort:
enabled: false enabled: true
# Keep it simple for now # Keep it simple for now
l7Proxy: false l7Proxy: false

View File

@ -40,6 +40,9 @@ Kubernetes: `>= 1.20.0`
| mariadb-galera.replicaCount | int | `2` | | | mariadb-galera.replicaCount | int | `2` | |
| pxc-operator.enabled | bool | `false` | | | pxc-operator.enabled | bool | `false` | |
| pxc-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | pxc-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| pxc-operator.resources.limits.memory | string | `"512Mi"` | |
| pxc-operator.resources.requests.cpu | string | `"50m"` | |
| pxc-operator.resources.requests.memory | string | `"32Mi"` | |
| pxc-operator.tolerations[0].effect | string | `"NoSchedule"` | | | pxc-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| pxc-operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | pxc-operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| pxc-operator.tolerations[1].effect | string | `"NoSchedule"` | | | pxc-operator.tolerations[1].effect | string | `"NoSchedule"` | |

View File

@ -13,6 +13,13 @@ pxc-operator:
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
resources:
limits:
#cpu: 200m
memory: 512Mi
requests:
cpu: 50m
memory: 32Mi
mariadb-galera: mariadb-galera:
enabled: false enabled: false

View File

@ -68,7 +68,7 @@ Kubernetes: `>= 1.24.0`
| network.cilium.cluster | object | `{}` | | | network.cilium.cluster | object | `{}` | |
| network.enabled | bool | `true` | | | network.enabled | bool | `true` | |
| network.retain | bool | `true` | | | network.retain | bool | `true` | |
| network.targetRevision | string | `"0.4.0"` | | | network.targetRevision | string | `"0.4.1"` | |
| storage.aws-ebs-csi-driver.enabled | bool | `false` | | | storage.aws-ebs-csi-driver.enabled | bool | `false` | |
| storage.aws-efs-csi-driver.enabled | bool | `false` | | | storage.aws-efs-csi-driver.enabled | bool | `false` | |
| storage.enabled | bool | `false` | | | storage.enabled | bool | `false` | |

View File

@ -1,15 +1,5 @@
{{- define "network-values" }} {{- define "network-values" }}
# until 1.24
calico:
enabled: true
# since 1.22
multus:
enabled: true
defaultNetworks:
- "cilium"
# since 1.23 # since 1.23
cilium: cilium:
enabled: true enabled: true

View File

@ -25,7 +25,7 @@ addons:
network: network:
enabled: true enabled: true
retain: true retain: true
targetRevision: 0.4.0 targetRevision: 0.4.1
cilium: cilium:
cluster: {} cluster: {}

View File

@ -1,8 +1,8 @@
# uptime-kuma # uptime-kuma
![Version: 0.1.10](https://img.shields.io/badge/Version-0.1.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.9.1](https://img.shields.io/badge/AppVersion-1.9.1-informational?style=flat-square) ![Version: 0.2.0](https://img.shields.io/badge/Version-0.2.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.18.5](https://img.shields.io/badge/AppVersion-1.18.5-informational?style=flat-square)
Chart for deploying uptime-kuma Chart for deploying uptime-kuma on KubeZero
**Homepage:** <https://kubezero.com> **Homepage:** <https://kubezero.com>
@ -10,15 +10,15 @@ Chart for deploying uptime-kuma
| Name | Email | Url | | Name | Email | Url |
| ---- | ------ | --- | | ---- | ------ | --- |
| Quarky9 | | | | Stefan Reimer | <stefan@zero-downtime.net> | |
## Requirements ## Requirements
Kubernetes: `>= 1.18.0` Kubernetes: `>= 1.20.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.4 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
## Values ## Values