Compare commits

..

11 Commits

15 changed files with 67 additions and 46 deletions

View File

@ -2,7 +2,13 @@
# All things BEFORE the first controller / control plane upgrade # All things BEFORE the first controller / control plane upgrade
pre_control_plane_upgrade_cluster() { pre_control_plane_upgrade_cluster() {
echo if [ "$PLATFORM" != "gke" ];then
# patch multus DS to ONLY run pods on 1.31 controllers
kubectl patch ds kube-multus-ds -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
# patch kube-proxy DS to ONLY run pods on 1.31 controllers
kubectl patch ds kube-proxy -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
fi
} }
@ -16,7 +22,20 @@ post_control_plane_upgrade_cluster() {
pre_cluster_upgrade_final() { pre_cluster_upgrade_final() {
set +e set +e
echo if [ "$PLATFORM" != "gke" ];then
# cleanup multus
kubectl delete clusterrolebinding multus
kubectl delete clusterrole multus
kubectl delete serviceaccount multus -n kube-system
kubectl delete cm multus-cni-config -n kube-system
kubectl delete ds kube-multus-ds -n kube-system
kubectl delete NetworkAttachmentDefinition cilium
kubectl delete crd network-attachment-definitions.k8s.cni.cncf.io
# remove kube-proxy
kubectl -n kube-system delete ds kube-proxy
kubectl -n kube-system delete cm kube-proxy
fi
set -e set -e
} }

View File

@ -63,7 +63,7 @@ render_kubeadm() {
# Assemble kubeadm config # Assemble kubeadm config
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
for f in Cluster KubeProxy Kubelet; do for f in Cluster Kubelet; do
# echo "---" >> /etc/kubernetes/kubeadm.yaml # echo "---" >> /etc/kubernetes/kubeadm.yaml
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
done done
@ -169,7 +169,7 @@ kubeadm_upgrade() {
else else
pre_cluster_upgrade_final pre_cluster_upgrade_final
_kubeadm upgrade apply phase addon all $KUBE_VERSION _kubeadm upgrade apply phase addon coredns $KUBE_VERSION
post_cluster_upgrade_final post_cluster_upgrade_final
@ -239,7 +239,7 @@ control_plane_node() {
if [[ "$CMD" =~ ^(join)$ ]]; then if [[ "$CMD" =~ ^(join)$ ]]; then
# Delete any former self in case forseti did not delete yet # Delete any former self in case forseti did not delete yet
kubectl delete node ${NODENAME} --wait=true || true kubectl delete node ${NODENAME} --wait=true || true
# Wait for all pods to be deleted otherwise we end up with stale pods eg. kube-proxy and all goes to .... # Wait for all pods to be deleted otherwise we end up with stale pods
kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME} kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME}
# get current running etcd pods for etcdctl commands # get current running etcd pods for etcdctl commands
@ -309,8 +309,9 @@ control_plane_node() {
_kubeadm init phase mark-control-plane _kubeadm init phase mark-control-plane
_kubeadm init phase kubelet-finalize all _kubeadm init phase kubelet-finalize all
# we skip kube-proxy
if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then
_kubeadm init phase addon all _kubeadm init phase addon coredns
fi fi
post_kubeadm post_kubeadm

View File

@ -81,15 +81,16 @@ function get_kubezero_secret() {
get_secret_val kubezero kubezero-secrets "$1" get_secret_val kubezero kubezero-secrets "$1"
} }
function ensure_kubezero_secret_key() {
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
local key=""
local val=""
for key in $@; do function ensure_kubezero_secret_key() {
val=$(echo "$secret" | yq ".data.\"$key\"") local secret="$(kubectl get secret -n $ns $secret -o yaml)"
local key
local val
for key in $1; do
val=$(echo $secret | yq ".data.\"$key\"")
if [ "$val" == "null" ]; then if [ "$val" == "null" ]; then
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}" set_kubezero_secret $key ""
fi fi
done done
} }

View File

@ -47,7 +47,6 @@ Kubernetes: `>= 1.32.0-0`
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 - https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go - https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/ - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
- https://github.com/awslabs/amazon-eks-ami - https://github.com/awslabs/amazon-eks-ami

View File

@ -22,7 +22,6 @@
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 - https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go - https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/ - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
- https://github.com/awslabs/amazon-eks-ami - https://github.com/awslabs/amazon-eks-ami

View File

@ -29,12 +29,6 @@ kind: Policy
rules: rules:
# The following requests were manually identified as high-volume and low-risk, # The following requests were manually identified as high-volume and low-risk,
# so drop them. # so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None - level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port. # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account. # TODO(#46983): Change this to the ingress controller service account.

View File

@ -6,6 +6,8 @@ featureGates:
ControlPlaneKubeletLocalMode: true ControlPlaneKubeletLocalMode: true
NodeLocalCRISocket: true NodeLocalCRISocket: true
controlPlaneEndpoint: {{ .Values.api.endpoint }} controlPlaneEndpoint: {{ .Values.api.endpoint }}
proxy:
disabled: true
networking: networking:
podSubnet: 10.244.0.0/16 podSubnet: 10.244.0.0/16
etcd: etcd:

View File

@ -7,6 +7,8 @@ localAPIEndpoint:
patches: patches:
directory: {{ . }} directory: {{ . }}
{{- end }} {{- end }}
skipPhases:
- addon/kube-proxy
nodeRegistration: nodeRegistration:
criSocket: "unix:///run/containerd/containerd.sock" criSocket: "unix:///run/containerd/containerd.sock"
ignorePreflightErrors: ignorePreflightErrors:

View File

@ -1,10 +0,0 @@
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways
metricsBindAddress: "0.0.0.0:10249"
mode: "iptables"
logging:
format: json
iptables:
localhostNodePorts: false
#nodePortAddresses: primary

View File

@ -3,12 +3,6 @@ kind: Policy
rules: rules:
# The following requests were manually identified as high-volume and low-risk, # The following requests were manually identified as high-volume and low-risk,
# so drop them. # so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None - level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port. # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account. # TODO(#46983): Change this to the ingress controller service account.

View File

@ -30,7 +30,7 @@ dependencies:
repository: https://aquasecurity.github.io/helm-charts/ repository: https://aquasecurity.github.io/helm-charts/
condition: trivy.enabled condition: trivy.enabled
- name: renovate - name: renovate
version: 40.35.3 version: 40.36.8
repository: https://docs.renovatebot.com/helm-charts repository: https://docs.renovatebot.com/helm-charts
condition: renovate.enabled condition: renovate.enabled
kubeVersion: ">= 1.25.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-network # kubezero-network
![Version: 0.5.8](https://img.shields.io/badge/Version-0.5.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.5.9](https://img.shields.io/badge/Version-0.5.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things network KubeZero umbrella chart for all things network
@ -20,7 +20,7 @@ Kubernetes: `>= 1.30.0-0`
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 | | https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 |
| https://helm.cilium.io/ | cilium | 1.17.3 | | https://helm.cilium.io/ | cilium | 1.17.4 |
| https://metallb.github.io/metallb | metallb | 0.14.9 | | https://metallb.github.io/metallb | metallb | 0.14.9 |
## Values ## Values
@ -45,6 +45,8 @@ Kubernetes: `>= 1.30.0-0`
| cilium.hubble.ui.enabled | bool | `false` | | | cilium.hubble.ui.enabled | bool | `false` | |
| cilium.image.useDigest | bool | `false` | | | cilium.image.useDigest | bool | `false` | |
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | | | cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
| cilium.k8s.apiServerURLs | string | `""` | |
| cilium.kubeProxyReplacement | bool | `true` | |
| cilium.l7Proxy | bool | `false` | | | cilium.l7Proxy | bool | `false` | |
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cilium.operator.prometheus.enabled | bool | `false` | | | cilium.operator.prometheus.enabled | bool | `false` | |

View File

@ -43,7 +43,8 @@ cilium:
binPath: "/usr/libexec/cni" binPath: "/usr/libexec/cni"
logFile: /var/log/cilium-cni.log logFile: /var/log/cilium-cni.log
#-- Ensure this is false if multus is enabled #-- Ensure this is false if multus is enabled
exclusive: false exclusive: true
iptablesRemoveAWSRules: false
cluster: cluster:
# This should match the second octet of clusterPoolIPv4PodCIDRList # This should match the second octet of clusterPoolIPv4PodCIDRList
@ -63,6 +64,17 @@ cilium:
enabled: false enabled: false
#rollOutCiliumPods: true #rollOutCiliumPods: true
kubeProxyReplacement: true
dnsProxy:
enableTransparentMode: true
k8sServiceHost: ""
k8sServicePort: 6443
# k8s:
# # This has to be set to the DNS name of all API servers
# # For example "https://192.168.0.1:6443 https://192.168.0.2:6443"
# apiServerURLs: ""
cgroup: cgroup:
autoMount: autoMount:
enabled: false enabled: false
@ -91,9 +103,11 @@ cilium:
- key: node-role.kubernetes.io/control-plane - key: node-role.kubernetes.io/control-plane
effect: NoSchedule effect: NoSchedule
# the operator removes the taints, # the operator removes the taints,
# so we need to break chicken egg on single controller # so we need to break chicken egg
- key: node.cilium.io/agent-not-ready - key: node.cilium.io/agent-not-ready
effect: NoSchedule effect: NoSchedule
- key: node.kubernetes.io/not-ready
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""

View File

@ -1,6 +1,6 @@
{{- define "network-values" }} {{- define "network-values" }}
multus: multus:
enabled: true enabled: false
clusterNetwork: "cilium" clusterNetwork: "cilium"
# {{- if eq .Values.global.platform "aws" }} # {{- if eq .Values.global.platform "aws" }}
@ -15,6 +15,9 @@ cilium:
# image: # image:
# pullPolicy: Never # pullPolicy: Never
# {{- end }} # {{- end }}
k8sServiceHost: {{ .Values.global.apiServerUrl }}
# k8s:
# apiServerURLs: "https://{{ .Values.global.apiServerUrl }}"
cluster: cluster:
name: {{ .Values.global.clusterName }} name: {{ .Values.global.clusterName }}

View File

@ -1,5 +1,6 @@
global: global:
clusterName: zdt-trial-cluster clusterName: zdt-trial-cluster
apiServerUrl: localhost:6443
# platform: aws (kubeadm, default), gke, or nocloud # platform: aws (kubeadm, default), gke, or nocloud
platform: "aws" platform: "aws"
@ -32,7 +33,7 @@ addons:
network: network:
enabled: true enabled: true
retain: true retain: true
targetRevision: 0.5.8 targetRevision: 0.5.9
cilium: cilium:
cluster: {} cluster: {}