feat: first working v1.30 base

This commit is contained in:
Stefan Reimer 2024-10-16 12:20:20 +01:00
parent e6248e9765
commit a3166859af
26 changed files with 121 additions and 93 deletions

View File

@ -3,9 +3,9 @@ ARG ALPINE_VERSION=3.20
FROM docker.io/alpine:${ALPINE_VERSION}
ARG ALPINE_VERSION
ARG KUBE_VERSION=1.29.7
ARG SECRETS_VERSION=4.6.0
ARG VALS_VERSION=0.37.3
ARG KUBE_VERSION=1.30.5
ARG SECRETS_VERSION=4.6.1
ARG VALS_VERSION=0.37.5
RUN cd /etc/apk/keys && \
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
@ -22,11 +22,11 @@ RUN cd /etc/apk/keys && \
py3-yaml \
restic \
helm \
etcd-ctl@edge-community \
cri-tools@kubezero \
kubeadm@kubezero~=${KUBE_VERSION} \
kubectl@kubezero~=${KUBE_VERSION} \
etcdhelper@kubezero \
etcd-ctl@edge-testing
kubeadm@kubezero~=${KUBE_VERSION} \
kubectl@kubezero~=${KUBE_VERSION}
RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \
mkdir -p /var/lib/kubezero

View File

@ -1,8 +1,8 @@
# Cluster upgrade flow
## During 1.23 upgrade
- create new kubezero-values CM if not exists yet, by merging parts of the legacy /etc/kubernetes/kubeadm-values.yaml values with potentially existing values from kubezero ArgoCD app values
## Hard refresh
```kubectl annotate app/kubezero -n argocd argocd.argoproj.io/refresh="hard"
```
# General flow

View File

@ -47,15 +47,24 @@ _kubeadm() {
# Render cluster config
render_kubeadm() {
helm template $CHARTS/kubeadm --output-dir ${WORKDIR} -f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
local phase=$1
helm template $CHARTS/kubeadm --output-dir ${WORKDIR} \
-f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml \
--set patches=/etc/kubernetes/patches
# Assemble kubeadm config
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
for f in Cluster Init Join KubeProxy Kubelet; do
for f in Cluster KubeProxy Kubelet; do
# echo "---" >> /etc/kubernetes/kubeadm.yaml
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
done
# skip InitConfig during upgrade
if [ "$phase" != "upgrade" ]; then
cat ${WORKDIR}/kubeadm/templates/InitConfiguration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
fi
# "uncloak" the json patches after they got processed by helm
for s in apiserver controller-manager scheduler; do
yq eval '.json' ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml > /tmp/_tmp.yaml && \
@ -98,7 +107,7 @@ pre_kubeadm() {
fi
# copy patches to host to make --rootfs of kubeadm work
cp -r ${WORKDIR}/kubeadm/templates/patches /host/tmp/
cp -r ${WORKDIR}/kubeadm/templates/patches ${HOSTFS}/etc/kubernetes
}
@ -111,8 +120,6 @@ post_kubeadm() {
# Patch coreDNS addon, ideally we prevent kubeadm to reset coreDNS to its defaults
kubectl patch deployment coredns -n kube-system --patch-file ${WORKDIR}/kubeadm/templates/patches/coredns0.yaml $LOG
rm -rf /host/tmp/patches
}
@ -126,26 +133,28 @@ kubeadm_upgrade() {
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
# Update kubezero-values CM
kubectl get cm -n kube-system kubezero-values -o=yaml | \
kubectl get cm -n kubezero kubezero-values -o=yaml | \
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
kubectl replace -f -
# update argo app
kubectl get application kubezero -n argocd -o yaml | \
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
yq 'del (.spec.source.helm.values) | .spec.source.helm.valuesObject |= load("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
kubectl apply -f -
# finally remove annotation to allow argo to sync again
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'
# Local node upgrade
render_kubeadm
render_kubeadm upgrade
pre_kubeadm
# Upgrade
_kubeadm upgrade apply -y --patches /tmp/patches
# Upgrade - we upload the new config first so we can use --patch during 1.30
_kubeadm init phase upload-config kubeadm
kubeadm upgrade apply --yes --patches /etc/kubernetes/patches $KUBE_VERSION --rootfs ${HOSTFS} $LOG
post_kubeadm
@ -172,7 +181,7 @@ kubeadm_upgrade() {
control_plane_node() {
CMD=$1
render_kubeadm
render_kubeadm $CMD
# Ensure clean slate if bootstrap, restore PKI otherwise
if [[ "$CMD" =~ ^(bootstrap)$ ]]; then
@ -193,9 +202,7 @@ control_plane_node() {
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
# Fallback to old config remove with 1.30 !!
cp ${WORKDIR}/super-admin.conf ${HOSTFS}/root/.kube/config || \
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
cp ${WORKDIR}/super-admin.conf ${HOSTFS}/root/.kube/config
# Only restore etcd data during "restore" and none exists already
if [[ "$CMD" =~ ^(restore)$ ]]; then
@ -254,7 +261,7 @@ control_plane_node() {
yq eval -i '.etcd.state = "existing"
| .etcd.initialCluster = strenv(ETCD_INITIAL_CLUSTER)
' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
render_kubeadm
render_kubeadm join
fi
# Generate our custom etcd yaml
@ -263,12 +270,7 @@ control_plane_node() {
_kubeadm init phase kubelet-start
# Remove conditional with 1.30
if [ -f ${HOSTFS}/etc/kubernetes/super-admin.conf ]; then
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
else
cp ${HOSTFS}/etc/kubernetes/admin.conf ${HOSTFS}/root/.kube/config
fi
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
# Wait for api to be online
echo "Waiting for Kubernetes API to be online ..."
@ -372,9 +374,7 @@ backup() {
# pki & cluster-admin access
cp -r ${HOSTFS}/etc/kubernetes/pki ${WORKDIR}
cp ${HOSTFS}/etc/kubernetes/admin.conf ${WORKDIR}
# Remove conditional with 1.30
[ -f ${HOSTFS}/etc/kubernetes/super-admin.conf ] && cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${WORKDIR}
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${WORKDIR}
# Backup via restic
restic backup ${WORKDIR} -H $CLUSTERNAME --tag $CLUSTER_VERSION

View File

@ -34,11 +34,18 @@ function argo_used() {
# get kubezero-values from ArgoCD if available or use in-cluster CM without Argo
function get_kubezero_values() {
local _namespace="kube-system"
[ "$PLATFORM" == "gke" ] && _namespace=kubezero
### Remove with 1.31
### Migrate the kubezero CM from kube-system to kubezero NS during the 1.30 cycle
kubectl get cm kubezero-values -n kubezero > /dev/null || \
{ create_ns kubezero; kubectl get cm kubezero-values -n kube-system -o yaml | \
sed 's/^ namespace: kube-system/ namespace: kubezero/' | \
kubectl create -f - && \
kubectl delete cm kubezero-values -n kube-system ; }
###
argo_used && \
{ kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.values > ${WORKDIR}/kubezero-values.yaml; } || \
{ kubectl get configmap -n $_namespace kubezero-values -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml ;}
{ kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml ; } || \
{ kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml ; }
}
@ -96,7 +103,7 @@ function argo_app_synced() {
function create_ns() {
local namespace=$1
if [ "$namespace" != "kube-system" ]; then
kubectl get ns $namespace || kubectl create ns $namespace
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace
fi
}
@ -169,7 +176,7 @@ function _helm() {
[ -n "$_version" ] && targetRevision="--version $_version"
fi
yq eval '.spec.source.helm.values' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
yq eval '.spec.source.helm.valuesObject' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
if [ $action == "crds" ]; then
# Allow custom CRD handling

View File

@ -8,14 +8,6 @@ import yaml
def migrate(values):
"""Actual changes here"""
# argoCD moves to argo module
try:
if values["argocd"]["enabled"]:
values["argo"] = { "enabled": True, "argo-cd": values["argocd"] }
values.pop("argocd")
except KeyError:
pass
return values

View File

@ -2,7 +2,7 @@
set -eE
set -o pipefail
KUBE_VERSION=v1.29
KUBE_VERSION=v1.30
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
@ -26,9 +26,9 @@ read -r
#echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015
#argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
#argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
### v1.29
### v1.30
#
# upgrade modules
@ -42,7 +42,7 @@ echo "Applying remaining KubeZero modules..."
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
# Final step is to commit the new argocd kubezero app
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..) | .spec.source.helm.values |= (from_yaml | to_yaml)' > $ARGO_APP
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..)' > $ARGO_APP
# Trigger backup of upgraded cluster state
kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$KUBE_VERSION -n kube-system

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubeadm
description: KubeZero Kubeadm cluster config
type: application
version: 1.29.7
version: 1.30.5
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -2,8 +2,7 @@ apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: {{ .Chart.Version }}
clusterName: {{ .Values.global.clusterName }}
featureGates:
EtcdLearnerMode: true # becomes beta in 1.29
#featureGates:
# NonGracefulFailover: true
controlPlaneEndpoint: {{ .Values.api.endpoint }}
networking:

View File

@ -3,8 +3,10 @@ kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: {{ .Values.listenAddress }}
bindPort: {{ .Values.api.listenPort }}
{{- with .Values.patches }}
patches:
directory: /tmp/patches
directory: {{ . }}
{{- end }}
nodeRegistration:
criSocket: "unix:///var/run/crio/crio.sock"
ignorePreflightErrors:

View File

@ -2,9 +2,8 @@
{{- /* Issues: MemoryQoS */ -}}
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
{{- /* v1.30: remove/beta KubeProxyDrainingTerminatingNodes */ -}}
{{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "KubeProxyDrainingTerminatingNodes" "ImageMaximumGCAge" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" }}
{{- if eq .return "csv" }}
{{- range $key := $gates }}
{{- $key }}=true,

View File

@ -36,3 +36,5 @@ etcd:
# -- Set to false for openrc, eg. on Gentoo or Alpine
systemd: false
protectKernelDefaults: false
# patches: /tmp/patches

View File

@ -1,6 +1,6 @@
# kubezero-addons
![Version: 0.8.8](https://img.shields.io/badge/Version-0.8.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29](https://img.shields.io/badge/AppVersion-v1.29-informational?style=flat-square)
![Version: 0.8.9](https://img.shields.io/badge/Version-0.8.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29](https://img.shields.io/badge/AppVersion-v1.29-informational?style=flat-square)
KubeZero umbrella chart for various optional cluster addons
@ -18,12 +18,12 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.16.0 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.14.5 |
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.37.0 |
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.16.0 |
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.16.1 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.15.0 |
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.43.0 |
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.16.2 |
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.24.0 |
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.24.1 |
# MetalLB
@ -101,7 +101,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| aws-node-termination-handler.useProviderId | bool | `true` | |
| awsNeuron.enabled | bool | `false` | |
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
| awsNeuron.image.tag | string | `"2.19.16.0"` | |
| awsNeuron.image.tag | string | `"2.22.4.0"` | |
| cluster-autoscaler.autoDiscovery.clusterName | string | `""` | |
| cluster-autoscaler.awsRegion | string | `"us-west-2"` | |
| cluster-autoscaler.enabled | bool | `false` | |
@ -110,7 +110,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
| cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | |
| cluster-autoscaler.image.tag | string | `"v1.29.4"` | |
| cluster-autoscaler.image.tag | string | `"v1.30.2"` | |
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cluster-autoscaler.podDisruptionBudget | bool | `false` | |
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | |

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 1.22.0
appVersion: 1.22.1
description: A Helm chart for the AWS Node Termination Handler.
home: https://github.com/aws/aws-node-termination-handler/
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
@ -21,4 +21,4 @@ name: aws-node-termination-handler
sources:
- https://github.com/aws/aws-node-termination-handler/
type: application
version: 0.24.0
version: 0.24.1

View File

@ -6,6 +6,14 @@ set -ex
login_ecr_public
update_helm
# Abandon for now in favor of KRR
# get latest VPA resources, from https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/hack/vpa-process-yamls.sh
# COMPONENTS="vpa-v1-crd-gen vpa-rbac updater-deployment recommender-deployment admission-controller-deployment"
# mkdir -p templates/vertical-pod-autoscaler
#for c in $COMPONENTS; do
# wget -q -O templates/vertical-pod-autoscaler/${c}.yaml https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/vertical-pod-autoscaler/deploy/${c}.yaml
#done
patch_chart aws-node-termination-handler
patch_chart aws-eks-asg-rolling-update-handler

View File

@ -160,7 +160,7 @@ awsNeuron:
image:
name: public.ecr.aws/neuron/neuron-device-plugin
tag: 2.19.16.0
tag: 2.22.4.0
nvidia-device-plugin:
enabled: false
@ -200,7 +200,7 @@ cluster-autoscaler:
image:
repository: registry.k8s.io/autoscaling/cluster-autoscaler
tag: v1.29.4
tag: v1.30.2
autoDiscovery:
clusterName: ""

View File

@ -17,14 +17,6 @@ Installs Istio Ingress Gateways, requires kubezero-istio to be installed !
{{ template "chart.valuesSection" . }}
## ToDo
- exclude certain ports from any Envoyfilters
```
- filter_disabled:
destination_port_range:
end: 1026
start: 1025
```
## Resources
- https://github.com/cilium/cilium/blob/main/operator/pkg/model/translation/envoy_listener.go#L134

View File

@ -1,6 +1,6 @@
# kubezero-network
![Version: 0.5.3](https://img.shields.io/badge/Version-0.5.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.5.4](https://img.shields.io/badge/Version-0.5.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things network
@ -19,9 +19,9 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://haproxytech.github.io/helm-charts | haproxy | 1.22.0 |
| https://helm.cilium.io/ | cilium | 1.15.7 |
| https://metallb.github.io/metallb | metallb | 0.14.7 |
| https://haproxytech.github.io/helm-charts | haproxy | 1.23.0 |
| https://helm.cilium.io/ | cilium | 1.16.2 |
| https://metallb.github.io/metallb | metallb | 0.14.8 |
## Values
@ -35,6 +35,7 @@ Kubernetes: `>= 1.26.0`
| cilium.cni.exclusive | bool | `false` | |
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
| cilium.enabled | bool | `false` | |
| cilium.envoy.enabled | bool | `false` | |
| cilium.hubble.enabled | bool | `false` | |
| cilium.hubble.relay.enabled | bool | `false` | |
| cilium.hubble.tls.auto.certManagerIssuerRef.group | string | `"cert-manager.io"` | |
@ -42,6 +43,7 @@ Kubernetes: `>= 1.26.0`
| cilium.hubble.tls.auto.certManagerIssuerRef.name | string | `"kubezero-local-ca-issuer"` | |
| cilium.hubble.tls.auto.method | string | `"cert-manager"` | |
| cilium.hubble.ui.enabled | bool | `false` | |
| cilium.image.pullPolicy | string | `"Never"` | |
| cilium.image.useDigest | bool | `false` | |
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
| cilium.l7Proxy | bool | `false` | |
@ -60,6 +62,7 @@ Kubernetes: `>= 1.26.0`
| cilium.resources.requests.cpu | string | `"10m"` | |
| cilium.resources.requests.memory | string | `"256Mi"` | |
| cilium.routingMode | string | `"tunnel"` | |
| cilium.sysctlfix.enabled | bool | `false` | |
| cilium.tunnelProtocol | string | `"geneve"` | |
| haproxy.PodDisruptionBudget.enable | bool | `false` | |
| haproxy.PodDisruptionBudget.minAvailable | int | `1` | |

File diff suppressed because one or more lines are too long

View File

@ -29,7 +29,7 @@ spec:
- name: kube-multus
image: {{ .Values.multus.image.repository }}:{{ .Values.multus.image.tag }}
# Always used cached images
imagePullPolicy: Never
imagePullPolicy: {{ .Values.multus.image.pullPolicy }}
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=/tmp/multus-conf/00-multus.conf"

View File

@ -30,12 +30,11 @@ cilium:
# Always use cached images
image:
useDigest: false
pullPolicy: Never
resources:
requests:
cpu: 10m
memory: 256Mi
memory: 160Mi
limits:
memory: 1024Mi
# cpu: 4000m
@ -60,7 +59,8 @@ cilium:
# Keep it simple for now
l7Proxy: false
envoy:
enabled: false
#rollOutCiliumPods: true
cgroup:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero
description: KubeZero - Root App of Apps chart
type: application
version: 1.29.7-1
version: 1.30.5
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -25,8 +25,8 @@ spec:
repoURL: {{ .Values.kubezero.repoURL }}
targetRevision: {{ default .Values.kubezero.targetRevision ( index .Values $name "targetRevision" ) | quote }}
helm:
values: |
{{- include (print $name "-values") $ | nindent 8 }}
valuesObject:
{{- include (print $name "-values") $ | nindent 8 }}
destination:
server: {{ .Values.kubezero.server }}

View File

@ -1,5 +1,4 @@
{{- define "addons-values" }}
clusterBackup:
enabled: {{ ternary "true" "false" (or (hasKey .Values.global.aws "region") .Values.addons.clusterBackup.enabled) }}

View File

@ -1,12 +1,21 @@
{{- define "network-values" }}
multus:
enabled: true
clusterNetwork: "cilium"
{{- if eq .Values.global.platform "aws" }}
image:
pullPolicy: Never
{{- end }}
cilium:
enabled: true
{{- if eq .Values.global.platform "aws" }}
image:
pullPolicy: Never
{{- end }}
cluster:
name: {{ .Values.global.clusterName }}
{{- with .Values.network.cilium.cluster.id }}

View File

@ -17,7 +17,7 @@ global:
addons:
enabled: true
targetRevision: 0.8.8
targetRevision: 0.8.9
external-dns:
enabled: false
forseti:
@ -36,7 +36,7 @@ addons:
network:
enabled: true
retain: true
targetRevision: 0.5.3
targetRevision: 0.5.4
cilium:
cluster: {}

16
docs/v1.30.md Normal file
View File

@ -0,0 +1,16 @@
# ![k8s-v1.30](images/k8s-v130.png) KubeZero 1.30 - Uwubernetes
## What's new - Major themes
- all KubeZero and support AMIs based on Alpine 3.20.3
- reduced memory consumption of CNI agent on each node
## Version upgrades
- cilium 1.16.2
- istio 1.22.3
- ArgoCD 2.11.5
- Prometheus 2.53 / Grafana 11.1 ( fixing many of the previous warnings )
- ...
## Resources
- [Kubernetes v1.30 upstream release blog](https://kubernetes.io/blog/2024/04/17/kubernetes-v1-30-release/)