feat: first working v1.30 base
This commit is contained in:
parent
e6248e9765
commit
a3166859af
12
Dockerfile
12
Dockerfile
@ -3,9 +3,9 @@ ARG ALPINE_VERSION=3.20
|
|||||||
FROM docker.io/alpine:${ALPINE_VERSION}
|
FROM docker.io/alpine:${ALPINE_VERSION}
|
||||||
|
|
||||||
ARG ALPINE_VERSION
|
ARG ALPINE_VERSION
|
||||||
ARG KUBE_VERSION=1.29.7
|
ARG KUBE_VERSION=1.30.5
|
||||||
ARG SECRETS_VERSION=4.6.0
|
ARG SECRETS_VERSION=4.6.1
|
||||||
ARG VALS_VERSION=0.37.3
|
ARG VALS_VERSION=0.37.5
|
||||||
|
|
||||||
RUN cd /etc/apk/keys && \
|
RUN cd /etc/apk/keys && \
|
||||||
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
||||||
@ -22,11 +22,11 @@ RUN cd /etc/apk/keys && \
|
|||||||
py3-yaml \
|
py3-yaml \
|
||||||
restic \
|
restic \
|
||||||
helm \
|
helm \
|
||||||
|
etcd-ctl@edge-community \
|
||||||
cri-tools@kubezero \
|
cri-tools@kubezero \
|
||||||
kubeadm@kubezero~=${KUBE_VERSION} \
|
|
||||||
kubectl@kubezero~=${KUBE_VERSION} \
|
|
||||||
etcdhelper@kubezero \
|
etcdhelper@kubezero \
|
||||||
etcd-ctl@edge-testing
|
kubeadm@kubezero~=${KUBE_VERSION} \
|
||||||
|
kubectl@kubezero~=${KUBE_VERSION}
|
||||||
|
|
||||||
RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \
|
RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \
|
||||||
mkdir -p /var/lib/kubezero
|
mkdir -p /var/lib/kubezero
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
# Cluster upgrade flow
|
# Cluster upgrade flow
|
||||||
|
|
||||||
## During 1.23 upgrade
|
## Hard refresh
|
||||||
- create new kubezero-values CM if not exists yet, by merging parts of the legacy /etc/kubernetes/kubeadm-values.yaml values with potentially existing values from kubezero ArgoCD app values
|
```kubectl annotate app/kubezero -n argocd argocd.argoproj.io/refresh="hard"
|
||||||
|
```
|
||||||
|
|
||||||
# General flow
|
# General flow
|
||||||
|
|
||||||
|
@ -47,15 +47,24 @@ _kubeadm() {
|
|||||||
|
|
||||||
# Render cluster config
|
# Render cluster config
|
||||||
render_kubeadm() {
|
render_kubeadm() {
|
||||||
helm template $CHARTS/kubeadm --output-dir ${WORKDIR} -f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
|
local phase=$1
|
||||||
|
|
||||||
|
helm template $CHARTS/kubeadm --output-dir ${WORKDIR} \
|
||||||
|
-f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml \
|
||||||
|
--set patches=/etc/kubernetes/patches
|
||||||
|
|
||||||
# Assemble kubeadm config
|
# Assemble kubeadm config
|
||||||
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||||
for f in Cluster Init Join KubeProxy Kubelet; do
|
for f in Cluster KubeProxy Kubelet; do
|
||||||
# echo "---" >> /etc/kubernetes/kubeadm.yaml
|
# echo "---" >> /etc/kubernetes/kubeadm.yaml
|
||||||
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# skip InitConfig during upgrade
|
||||||
|
if [ "$phase" != "upgrade" ]; then
|
||||||
|
cat ${WORKDIR}/kubeadm/templates/InitConfiguration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||||
|
fi
|
||||||
|
|
||||||
# "uncloak" the json patches after they got processed by helm
|
# "uncloak" the json patches after they got processed by helm
|
||||||
for s in apiserver controller-manager scheduler; do
|
for s in apiserver controller-manager scheduler; do
|
||||||
yq eval '.json' ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml > /tmp/_tmp.yaml && \
|
yq eval '.json' ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml > /tmp/_tmp.yaml && \
|
||||||
@ -98,7 +107,7 @@ pre_kubeadm() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# copy patches to host to make --rootfs of kubeadm work
|
# copy patches to host to make --rootfs of kubeadm work
|
||||||
cp -r ${WORKDIR}/kubeadm/templates/patches /host/tmp/
|
cp -r ${WORKDIR}/kubeadm/templates/patches ${HOSTFS}/etc/kubernetes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -111,8 +120,6 @@ post_kubeadm() {
|
|||||||
|
|
||||||
# Patch coreDNS addon, ideally we prevent kubeadm to reset coreDNS to its defaults
|
# Patch coreDNS addon, ideally we prevent kubeadm to reset coreDNS to its defaults
|
||||||
kubectl patch deployment coredns -n kube-system --patch-file ${WORKDIR}/kubeadm/templates/patches/coredns0.yaml $LOG
|
kubectl patch deployment coredns -n kube-system --patch-file ${WORKDIR}/kubeadm/templates/patches/coredns0.yaml $LOG
|
||||||
|
|
||||||
rm -rf /host/tmp/patches
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -126,26 +133,28 @@ kubeadm_upgrade() {
|
|||||||
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
|
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
|
||||||
|
|
||||||
# Update kubezero-values CM
|
# Update kubezero-values CM
|
||||||
kubectl get cm -n kube-system kubezero-values -o=yaml | \
|
kubectl get cm -n kubezero kubezero-values -o=yaml | \
|
||||||
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
|
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
|
||||||
kubectl replace -f -
|
kubectl replace -f -
|
||||||
|
|
||||||
# update argo app
|
# update argo app
|
||||||
kubectl get application kubezero -n argocd -o yaml | \
|
kubectl get application kubezero -n argocd -o yaml | \
|
||||||
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
|
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
|
||||||
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
|
yq 'del (.spec.source.helm.values) | .spec.source.helm.valuesObject |= load("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
|
||||||
kubectl apply -f -
|
kubectl apply -f -
|
||||||
|
|
||||||
# finally remove annotation to allow argo to sync again
|
# finally remove annotation to allow argo to sync again
|
||||||
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'
|
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'
|
||||||
|
|
||||||
# Local node upgrade
|
# Local node upgrade
|
||||||
render_kubeadm
|
render_kubeadm upgrade
|
||||||
|
|
||||||
pre_kubeadm
|
pre_kubeadm
|
||||||
|
|
||||||
# Upgrade
|
# Upgrade - we upload the new config first so we can use --patch during 1.30
|
||||||
_kubeadm upgrade apply -y --patches /tmp/patches
|
_kubeadm init phase upload-config kubeadm
|
||||||
|
|
||||||
|
kubeadm upgrade apply --yes --patches /etc/kubernetes/patches $KUBE_VERSION --rootfs ${HOSTFS} $LOG
|
||||||
|
|
||||||
post_kubeadm
|
post_kubeadm
|
||||||
|
|
||||||
@ -172,7 +181,7 @@ kubeadm_upgrade() {
|
|||||||
control_plane_node() {
|
control_plane_node() {
|
||||||
CMD=$1
|
CMD=$1
|
||||||
|
|
||||||
render_kubeadm
|
render_kubeadm $CMD
|
||||||
|
|
||||||
# Ensure clean slate if bootstrap, restore PKI otherwise
|
# Ensure clean slate if bootstrap, restore PKI otherwise
|
||||||
if [[ "$CMD" =~ ^(bootstrap)$ ]]; then
|
if [[ "$CMD" =~ ^(bootstrap)$ ]]; then
|
||||||
@ -193,9 +202,7 @@ control_plane_node() {
|
|||||||
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
|
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
|
||||||
|
|
||||||
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
|
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
|
||||||
# Fallback to old config remove with 1.30 !!
|
cp ${WORKDIR}/super-admin.conf ${HOSTFS}/root/.kube/config
|
||||||
cp ${WORKDIR}/super-admin.conf ${HOSTFS}/root/.kube/config || \
|
|
||||||
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
|
|
||||||
|
|
||||||
# Only restore etcd data during "restore" and none exists already
|
# Only restore etcd data during "restore" and none exists already
|
||||||
if [[ "$CMD" =~ ^(restore)$ ]]; then
|
if [[ "$CMD" =~ ^(restore)$ ]]; then
|
||||||
@ -254,7 +261,7 @@ control_plane_node() {
|
|||||||
yq eval -i '.etcd.state = "existing"
|
yq eval -i '.etcd.state = "existing"
|
||||||
| .etcd.initialCluster = strenv(ETCD_INITIAL_CLUSTER)
|
| .etcd.initialCluster = strenv(ETCD_INITIAL_CLUSTER)
|
||||||
' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
|
' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
|
||||||
render_kubeadm
|
render_kubeadm join
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Generate our custom etcd yaml
|
# Generate our custom etcd yaml
|
||||||
@ -263,12 +270,7 @@ control_plane_node() {
|
|||||||
|
|
||||||
_kubeadm init phase kubelet-start
|
_kubeadm init phase kubelet-start
|
||||||
|
|
||||||
# Remove conditional with 1.30
|
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
|
||||||
if [ -f ${HOSTFS}/etc/kubernetes/super-admin.conf ]; then
|
|
||||||
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
|
|
||||||
else
|
|
||||||
cp ${HOSTFS}/etc/kubernetes/admin.conf ${HOSTFS}/root/.kube/config
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Wait for api to be online
|
# Wait for api to be online
|
||||||
echo "Waiting for Kubernetes API to be online ..."
|
echo "Waiting for Kubernetes API to be online ..."
|
||||||
@ -372,9 +374,7 @@ backup() {
|
|||||||
# pki & cluster-admin access
|
# pki & cluster-admin access
|
||||||
cp -r ${HOSTFS}/etc/kubernetes/pki ${WORKDIR}
|
cp -r ${HOSTFS}/etc/kubernetes/pki ${WORKDIR}
|
||||||
cp ${HOSTFS}/etc/kubernetes/admin.conf ${WORKDIR}
|
cp ${HOSTFS}/etc/kubernetes/admin.conf ${WORKDIR}
|
||||||
|
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${WORKDIR}
|
||||||
# Remove conditional with 1.30
|
|
||||||
[ -f ${HOSTFS}/etc/kubernetes/super-admin.conf ] && cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${WORKDIR}
|
|
||||||
|
|
||||||
# Backup via restic
|
# Backup via restic
|
||||||
restic backup ${WORKDIR} -H $CLUSTERNAME --tag $CLUSTER_VERSION
|
restic backup ${WORKDIR} -H $CLUSTERNAME --tag $CLUSTER_VERSION
|
||||||
|
@ -34,11 +34,18 @@ function argo_used() {
|
|||||||
|
|
||||||
# get kubezero-values from ArgoCD if available or use in-cluster CM without Argo
|
# get kubezero-values from ArgoCD if available or use in-cluster CM without Argo
|
||||||
function get_kubezero_values() {
|
function get_kubezero_values() {
|
||||||
local _namespace="kube-system"
|
### Remove with 1.31
|
||||||
[ "$PLATFORM" == "gke" ] && _namespace=kubezero
|
### Migrate the kubezero CM from kube-system to kubezero NS during the 1.30 cycle
|
||||||
|
kubectl get cm kubezero-values -n kubezero > /dev/null || \
|
||||||
|
{ create_ns kubezero; kubectl get cm kubezero-values -n kube-system -o yaml | \
|
||||||
|
sed 's/^ namespace: kube-system/ namespace: kubezero/' | \
|
||||||
|
kubectl create -f - && \
|
||||||
|
kubectl delete cm kubezero-values -n kube-system ; }
|
||||||
|
###
|
||||||
|
|
||||||
argo_used && \
|
argo_used && \
|
||||||
{ kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.values > ${WORKDIR}/kubezero-values.yaml; } || \
|
{ kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml ; } || \
|
||||||
{ kubectl get configmap -n $_namespace kubezero-values -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml ;}
|
{ kubectl get configmap kubezero-values -n kubezero -o yaml | yq '.data."values.yaml"' > ${WORKDIR}/kubezero-values.yaml ; }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -96,7 +103,7 @@ function argo_app_synced() {
|
|||||||
function create_ns() {
|
function create_ns() {
|
||||||
local namespace=$1
|
local namespace=$1
|
||||||
if [ "$namespace" != "kube-system" ]; then
|
if [ "$namespace" != "kube-system" ]; then
|
||||||
kubectl get ns $namespace || kubectl create ns $namespace
|
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,7 +176,7 @@ function _helm() {
|
|||||||
[ -n "$_version" ] && targetRevision="--version $_version"
|
[ -n "$_version" ] && targetRevision="--version $_version"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
yq eval '.spec.source.helm.values' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
|
yq eval '.spec.source.helm.valuesObject' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
|
||||||
|
|
||||||
if [ $action == "crds" ]; then
|
if [ $action == "crds" ]; then
|
||||||
# Allow custom CRD handling
|
# Allow custom CRD handling
|
||||||
|
@ -8,14 +8,6 @@ import yaml
|
|||||||
def migrate(values):
|
def migrate(values):
|
||||||
"""Actual changes here"""
|
"""Actual changes here"""
|
||||||
|
|
||||||
# argoCD moves to argo module
|
|
||||||
try:
|
|
||||||
if values["argocd"]["enabled"]:
|
|
||||||
values["argo"] = { "enabled": True, "argo-cd": values["argocd"] }
|
|
||||||
values.pop("argocd")
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return values
|
return values
|
||||||
|
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
set -eE
|
set -eE
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
KUBE_VERSION=v1.29
|
KUBE_VERSION=v1.30
|
||||||
|
|
||||||
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
|
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
|
||||||
|
|
||||||
@ -26,9 +26,9 @@ read -r
|
|||||||
|
|
||||||
#echo "Adjust kubezero values as needed:"
|
#echo "Adjust kubezero values as needed:"
|
||||||
# shellcheck disable=SC2015
|
# shellcheck disable=SC2015
|
||||||
#argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
|
#argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
|
||||||
|
|
||||||
### v1.29
|
### v1.30
|
||||||
#
|
#
|
||||||
|
|
||||||
# upgrade modules
|
# upgrade modules
|
||||||
@ -42,7 +42,7 @@ echo "Applying remaining KubeZero modules..."
|
|||||||
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
|
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
|
||||||
|
|
||||||
# Final step is to commit the new argocd kubezero app
|
# Final step is to commit the new argocd kubezero app
|
||||||
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..) | .spec.source.helm.values |= (from_yaml | to_yaml)' > $ARGO_APP
|
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..)' > $ARGO_APP
|
||||||
|
|
||||||
# Trigger backup of upgraded cluster state
|
# Trigger backup of upgraded cluster state
|
||||||
kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$KUBE_VERSION -n kube-system
|
kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$KUBE_VERSION -n kube-system
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubeadm
|
name: kubeadm
|
||||||
description: KubeZero Kubeadm cluster config
|
description: KubeZero Kubeadm cluster config
|
||||||
type: application
|
type: application
|
||||||
version: 1.29.7
|
version: 1.30.5
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
|
@ -2,8 +2,7 @@ apiVersion: kubeadm.k8s.io/v1beta3
|
|||||||
kind: ClusterConfiguration
|
kind: ClusterConfiguration
|
||||||
kubernetesVersion: {{ .Chart.Version }}
|
kubernetesVersion: {{ .Chart.Version }}
|
||||||
clusterName: {{ .Values.global.clusterName }}
|
clusterName: {{ .Values.global.clusterName }}
|
||||||
featureGates:
|
#featureGates:
|
||||||
EtcdLearnerMode: true # becomes beta in 1.29
|
|
||||||
# NonGracefulFailover: true
|
# NonGracefulFailover: true
|
||||||
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
||||||
networking:
|
networking:
|
||||||
|
@ -3,8 +3,10 @@ kind: InitConfiguration
|
|||||||
localAPIEndpoint:
|
localAPIEndpoint:
|
||||||
advertiseAddress: {{ .Values.listenAddress }}
|
advertiseAddress: {{ .Values.listenAddress }}
|
||||||
bindPort: {{ .Values.api.listenPort }}
|
bindPort: {{ .Values.api.listenPort }}
|
||||||
|
{{- with .Values.patches }}
|
||||||
patches:
|
patches:
|
||||||
directory: /tmp/patches
|
directory: {{ . }}
|
||||||
|
{{- end }}
|
||||||
nodeRegistration:
|
nodeRegistration:
|
||||||
criSocket: "unix:///var/run/crio/crio.sock"
|
criSocket: "unix:///var/run/crio/crio.sock"
|
||||||
ignorePreflightErrors:
|
ignorePreflightErrors:
|
||||||
|
@ -2,9 +2,8 @@
|
|||||||
{{- /* Issues: MemoryQoS */ -}}
|
{{- /* Issues: MemoryQoS */ -}}
|
||||||
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
|
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
|
||||||
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
|
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
|
||||||
{{- /* v1.30: remove/beta KubeProxyDrainingTerminatingNodes */ -}}
|
|
||||||
{{- define "kubeadm.featuregates" }}
|
{{- define "kubeadm.featuregates" }}
|
||||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "KubeProxyDrainingTerminatingNodes" "ImageMaximumGCAge" }}
|
{{- $gates := list "CustomCPUCFSQuotaPeriod" }}
|
||||||
{{- if eq .return "csv" }}
|
{{- if eq .return "csv" }}
|
||||||
{{- range $key := $gates }}
|
{{- range $key := $gates }}
|
||||||
{{- $key }}=true,
|
{{- $key }}=true,
|
||||||
|
@ -36,3 +36,5 @@ etcd:
|
|||||||
# -- Set to false for openrc, eg. on Gentoo or Alpine
|
# -- Set to false for openrc, eg. on Gentoo or Alpine
|
||||||
systemd: false
|
systemd: false
|
||||||
protectKernelDefaults: false
|
protectKernelDefaults: false
|
||||||
|
|
||||||
|
# patches: /tmp/patches
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-addons
|
# kubezero-addons
|
||||||
|
|
||||||
![Version: 0.8.8](https://img.shields.io/badge/Version-0.8.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29](https://img.shields.io/badge/AppVersion-v1.29-informational?style=flat-square)
|
![Version: 0.8.9](https://img.shields.io/badge/Version-0.8.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29](https://img.shields.io/badge/AppVersion-v1.29-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero umbrella chart for various optional cluster addons
|
KubeZero umbrella chart for various optional cluster addons
|
||||||
|
|
||||||
@ -18,12 +18,12 @@ Kubernetes: `>= 1.26.0`
|
|||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.16.0 |
|
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.16.1 |
|
||||||
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.14.5 |
|
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.15.0 |
|
||||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.37.0 |
|
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.43.0 |
|
||||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.16.0 |
|
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.16.2 |
|
||||||
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
|
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
|
||||||
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.24.0 |
|
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.24.1 |
|
||||||
|
|
||||||
# MetalLB
|
# MetalLB
|
||||||
|
|
||||||
@ -101,7 +101,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||||||
| aws-node-termination-handler.useProviderId | bool | `true` | |
|
| aws-node-termination-handler.useProviderId | bool | `true` | |
|
||||||
| awsNeuron.enabled | bool | `false` | |
|
| awsNeuron.enabled | bool | `false` | |
|
||||||
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
|
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
|
||||||
| awsNeuron.image.tag | string | `"2.19.16.0"` | |
|
| awsNeuron.image.tag | string | `"2.22.4.0"` | |
|
||||||
| cluster-autoscaler.autoDiscovery.clusterName | string | `""` | |
|
| cluster-autoscaler.autoDiscovery.clusterName | string | `""` | |
|
||||||
| cluster-autoscaler.awsRegion | string | `"us-west-2"` | |
|
| cluster-autoscaler.awsRegion | string | `"us-west-2"` | |
|
||||||
| cluster-autoscaler.enabled | bool | `false` | |
|
| cluster-autoscaler.enabled | bool | `false` | |
|
||||||
@ -110,7 +110,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||||||
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
|
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
|
||||||
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
|
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
|
||||||
| cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | |
|
| cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | |
|
||||||
| cluster-autoscaler.image.tag | string | `"v1.29.4"` | |
|
| cluster-autoscaler.image.tag | string | `"v1.30.2"` | |
|
||||||
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
| cluster-autoscaler.podDisruptionBudget | bool | `false` | |
|
| cluster-autoscaler.podDisruptionBudget | bool | `false` | |
|
||||||
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | |
|
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | |
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
appVersion: 1.22.0
|
appVersion: 1.22.1
|
||||||
description: A Helm chart for the AWS Node Termination Handler.
|
description: A Helm chart for the AWS Node Termination Handler.
|
||||||
home: https://github.com/aws/aws-node-termination-handler/
|
home: https://github.com/aws/aws-node-termination-handler/
|
||||||
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
||||||
@ -21,4 +21,4 @@ name: aws-node-termination-handler
|
|||||||
sources:
|
sources:
|
||||||
- https://github.com/aws/aws-node-termination-handler/
|
- https://github.com/aws/aws-node-termination-handler/
|
||||||
type: application
|
type: application
|
||||||
version: 0.24.0
|
version: 0.24.1
|
||||||
|
@ -6,6 +6,14 @@ set -ex
|
|||||||
login_ecr_public
|
login_ecr_public
|
||||||
update_helm
|
update_helm
|
||||||
|
|
||||||
|
# Abandon for now in favor of KRR
|
||||||
|
# get latest VPA resources, from https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/hack/vpa-process-yamls.sh
|
||||||
|
# COMPONENTS="vpa-v1-crd-gen vpa-rbac updater-deployment recommender-deployment admission-controller-deployment"
|
||||||
|
# mkdir -p templates/vertical-pod-autoscaler
|
||||||
|
#for c in $COMPONENTS; do
|
||||||
|
# wget -q -O templates/vertical-pod-autoscaler/${c}.yaml https://raw.githubusercontent.com/kubernetes/autoscaler/refs/heads/master/vertical-pod-autoscaler/deploy/${c}.yaml
|
||||||
|
#done
|
||||||
|
|
||||||
patch_chart aws-node-termination-handler
|
patch_chart aws-node-termination-handler
|
||||||
patch_chart aws-eks-asg-rolling-update-handler
|
patch_chart aws-eks-asg-rolling-update-handler
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ awsNeuron:
|
|||||||
|
|
||||||
image:
|
image:
|
||||||
name: public.ecr.aws/neuron/neuron-device-plugin
|
name: public.ecr.aws/neuron/neuron-device-plugin
|
||||||
tag: 2.19.16.0
|
tag: 2.22.4.0
|
||||||
|
|
||||||
nvidia-device-plugin:
|
nvidia-device-plugin:
|
||||||
enabled: false
|
enabled: false
|
||||||
@ -200,7 +200,7 @@ cluster-autoscaler:
|
|||||||
|
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/autoscaling/cluster-autoscaler
|
repository: registry.k8s.io/autoscaling/cluster-autoscaler
|
||||||
tag: v1.29.4
|
tag: v1.30.2
|
||||||
|
|
||||||
autoDiscovery:
|
autoDiscovery:
|
||||||
clusterName: ""
|
clusterName: ""
|
||||||
|
@ -17,14 +17,6 @@ Installs Istio Ingress Gateways, requires kubezero-istio to be installed !
|
|||||||
|
|
||||||
{{ template "chart.valuesSection" . }}
|
{{ template "chart.valuesSection" . }}
|
||||||
|
|
||||||
## ToDo
|
|
||||||
- exclude certain ports from any Envoyfilters
|
|
||||||
```
|
|
||||||
- filter_disabled:
|
|
||||||
destination_port_range:
|
|
||||||
end: 1026
|
|
||||||
start: 1025
|
|
||||||
```
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
- https://github.com/cilium/cilium/blob/main/operator/pkg/model/translation/envoy_listener.go#L134
|
- https://github.com/cilium/cilium/blob/main/operator/pkg/model/translation/envoy_listener.go#L134
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-network
|
# kubezero-network
|
||||||
|
|
||||||
![Version: 0.5.3](https://img.shields.io/badge/Version-0.5.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
![Version: 0.5.4](https://img.shields.io/badge/Version-0.5.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero umbrella chart for all things network
|
KubeZero umbrella chart for all things network
|
||||||
|
|
||||||
@ -19,9 +19,9 @@ Kubernetes: `>= 1.26.0`
|
|||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||||
| https://haproxytech.github.io/helm-charts | haproxy | 1.22.0 |
|
| https://haproxytech.github.io/helm-charts | haproxy | 1.23.0 |
|
||||||
| https://helm.cilium.io/ | cilium | 1.15.7 |
|
| https://helm.cilium.io/ | cilium | 1.16.2 |
|
||||||
| https://metallb.github.io/metallb | metallb | 0.14.7 |
|
| https://metallb.github.io/metallb | metallb | 0.14.8 |
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
@ -35,6 +35,7 @@ Kubernetes: `>= 1.26.0`
|
|||||||
| cilium.cni.exclusive | bool | `false` | |
|
| cilium.cni.exclusive | bool | `false` | |
|
||||||
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
|
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
|
||||||
| cilium.enabled | bool | `false` | |
|
| cilium.enabled | bool | `false` | |
|
||||||
|
| cilium.envoy.enabled | bool | `false` | |
|
||||||
| cilium.hubble.enabled | bool | `false` | |
|
| cilium.hubble.enabled | bool | `false` | |
|
||||||
| cilium.hubble.relay.enabled | bool | `false` | |
|
| cilium.hubble.relay.enabled | bool | `false` | |
|
||||||
| cilium.hubble.tls.auto.certManagerIssuerRef.group | string | `"cert-manager.io"` | |
|
| cilium.hubble.tls.auto.certManagerIssuerRef.group | string | `"cert-manager.io"` | |
|
||||||
@ -42,6 +43,7 @@ Kubernetes: `>= 1.26.0`
|
|||||||
| cilium.hubble.tls.auto.certManagerIssuerRef.name | string | `"kubezero-local-ca-issuer"` | |
|
| cilium.hubble.tls.auto.certManagerIssuerRef.name | string | `"kubezero-local-ca-issuer"` | |
|
||||||
| cilium.hubble.tls.auto.method | string | `"cert-manager"` | |
|
| cilium.hubble.tls.auto.method | string | `"cert-manager"` | |
|
||||||
| cilium.hubble.ui.enabled | bool | `false` | |
|
| cilium.hubble.ui.enabled | bool | `false` | |
|
||||||
|
| cilium.image.pullPolicy | string | `"Never"` | |
|
||||||
| cilium.image.useDigest | bool | `false` | |
|
| cilium.image.useDigest | bool | `false` | |
|
||||||
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
|
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
|
||||||
| cilium.l7Proxy | bool | `false` | |
|
| cilium.l7Proxy | bool | `false` | |
|
||||||
@ -60,6 +62,7 @@ Kubernetes: `>= 1.26.0`
|
|||||||
| cilium.resources.requests.cpu | string | `"10m"` | |
|
| cilium.resources.requests.cpu | string | `"10m"` | |
|
||||||
| cilium.resources.requests.memory | string | `"256Mi"` | |
|
| cilium.resources.requests.memory | string | `"256Mi"` | |
|
||||||
| cilium.routingMode | string | `"tunnel"` | |
|
| cilium.routingMode | string | `"tunnel"` | |
|
||||||
|
| cilium.sysctlfix.enabled | bool | `false` | |
|
||||||
| cilium.tunnelProtocol | string | `"geneve"` | |
|
| cilium.tunnelProtocol | string | `"geneve"` | |
|
||||||
| haproxy.PodDisruptionBudget.enable | bool | `false` | |
|
| haproxy.PodDisruptionBudget.enable | bool | `false` | |
|
||||||
| haproxy.PodDisruptionBudget.minAvailable | int | `1` | |
|
| haproxy.PodDisruptionBudget.minAvailable | int | `1` | |
|
||||||
|
File diff suppressed because one or more lines are too long
@ -29,7 +29,7 @@ spec:
|
|||||||
- name: kube-multus
|
- name: kube-multus
|
||||||
image: {{ .Values.multus.image.repository }}:{{ .Values.multus.image.tag }}
|
image: {{ .Values.multus.image.repository }}:{{ .Values.multus.image.tag }}
|
||||||
# Always used cached images
|
# Always used cached images
|
||||||
imagePullPolicy: Never
|
imagePullPolicy: {{ .Values.multus.image.pullPolicy }}
|
||||||
command: ["/entrypoint.sh"]
|
command: ["/entrypoint.sh"]
|
||||||
args:
|
args:
|
||||||
- "--multus-conf-file=/tmp/multus-conf/00-multus.conf"
|
- "--multus-conf-file=/tmp/multus-conf/00-multus.conf"
|
||||||
|
@ -30,12 +30,11 @@ cilium:
|
|||||||
# Always use cached images
|
# Always use cached images
|
||||||
image:
|
image:
|
||||||
useDigest: false
|
useDigest: false
|
||||||
pullPolicy: Never
|
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
memory: 256Mi
|
memory: 160Mi
|
||||||
limits:
|
limits:
|
||||||
memory: 1024Mi
|
memory: 1024Mi
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
@ -60,7 +59,8 @@ cilium:
|
|||||||
|
|
||||||
# Keep it simple for now
|
# Keep it simple for now
|
||||||
l7Proxy: false
|
l7Proxy: false
|
||||||
|
envoy:
|
||||||
|
enabled: false
|
||||||
#rollOutCiliumPods: true
|
#rollOutCiliumPods: true
|
||||||
|
|
||||||
cgroup:
|
cgroup:
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero
|
name: kubezero
|
||||||
description: KubeZero - Root App of Apps chart
|
description: KubeZero - Root App of Apps chart
|
||||||
type: application
|
type: application
|
||||||
version: 1.29.7-1
|
version: 1.30.5
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
|
@ -25,8 +25,8 @@ spec:
|
|||||||
repoURL: {{ .Values.kubezero.repoURL }}
|
repoURL: {{ .Values.kubezero.repoURL }}
|
||||||
targetRevision: {{ default .Values.kubezero.targetRevision ( index .Values $name "targetRevision" ) | quote }}
|
targetRevision: {{ default .Values.kubezero.targetRevision ( index .Values $name "targetRevision" ) | quote }}
|
||||||
helm:
|
helm:
|
||||||
values: |
|
valuesObject:
|
||||||
{{- include (print $name "-values") $ | nindent 8 }}
|
{{- include (print $name "-values") $ | nindent 8 }}
|
||||||
|
|
||||||
destination:
|
destination:
|
||||||
server: {{ .Values.kubezero.server }}
|
server: {{ .Values.kubezero.server }}
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
{{- define "addons-values" }}
|
{{- define "addons-values" }}
|
||||||
|
|
||||||
clusterBackup:
|
clusterBackup:
|
||||||
enabled: {{ ternary "true" "false" (or (hasKey .Values.global.aws "region") .Values.addons.clusterBackup.enabled) }}
|
enabled: {{ ternary "true" "false" (or (hasKey .Values.global.aws "region") .Values.addons.clusterBackup.enabled) }}
|
||||||
|
|
||||||
|
@ -1,12 +1,21 @@
|
|||||||
{{- define "network-values" }}
|
{{- define "network-values" }}
|
||||||
|
|
||||||
multus:
|
multus:
|
||||||
enabled: true
|
enabled: true
|
||||||
clusterNetwork: "cilium"
|
clusterNetwork: "cilium"
|
||||||
|
|
||||||
|
{{- if eq .Values.global.platform "aws" }}
|
||||||
|
image:
|
||||||
|
pullPolicy: Never
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
cilium:
|
cilium:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
||||||
|
{{- if eq .Values.global.platform "aws" }}
|
||||||
|
image:
|
||||||
|
pullPolicy: Never
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
cluster:
|
cluster:
|
||||||
name: {{ .Values.global.clusterName }}
|
name: {{ .Values.global.clusterName }}
|
||||||
{{- with .Values.network.cilium.cluster.id }}
|
{{- with .Values.network.cilium.cluster.id }}
|
||||||
|
@ -17,7 +17,7 @@ global:
|
|||||||
|
|
||||||
addons:
|
addons:
|
||||||
enabled: true
|
enabled: true
|
||||||
targetRevision: 0.8.8
|
targetRevision: 0.8.9
|
||||||
external-dns:
|
external-dns:
|
||||||
enabled: false
|
enabled: false
|
||||||
forseti:
|
forseti:
|
||||||
@ -36,7 +36,7 @@ addons:
|
|||||||
network:
|
network:
|
||||||
enabled: true
|
enabled: true
|
||||||
retain: true
|
retain: true
|
||||||
targetRevision: 0.5.3
|
targetRevision: 0.5.4
|
||||||
cilium:
|
cilium:
|
||||||
cluster: {}
|
cluster: {}
|
||||||
|
|
||||||
|
16
docs/v1.30.md
Normal file
16
docs/v1.30.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# ![k8s-v1.30](images/k8s-v130.png) KubeZero 1.30 - Uwubernetes
|
||||||
|
|
||||||
|
## What's new - Major themes
|
||||||
|
- all KubeZero and support AMIs based on Alpine 3.20.3
|
||||||
|
- reduced memory consumption of CNI agent on each node
|
||||||
|
|
||||||
|
|
||||||
|
## Version upgrades
|
||||||
|
- cilium 1.16.2
|
||||||
|
- istio 1.22.3
|
||||||
|
- ArgoCD 2.11.5
|
||||||
|
- Prometheus 2.53 / Grafana 11.1 ( fixing many of the previous warnings )
|
||||||
|
- ...
|
||||||
|
|
||||||
|
## Resources
|
||||||
|
- [Kubernetes v1.30 upstream release blog](https://kubernetes.io/blog/2024/04/17/kubernetes-v1-30-release/)
|
Loading…
Reference in New Issue
Block a user