diff --git a/admin/kubezero.sh b/admin/kubezero.sh index b43d78a2..e25d33b7 100755 --- a/admin/kubezero.sh +++ b/admin/kubezero.sh @@ -9,7 +9,7 @@ fi export WORKDIR=/tmp/kubezero export HOSTFS=/host export CHARTS=/charts -export VERSION=v1.23 +export VERSION=$(kubeadm version --output json | jq -r .clientVersion.gitVersion) export KUBECONFIG="${HOSTFS}/root/.kube/config" diff --git a/charts/kubeadm/templates/admin-aws-iam.yaml b/charts/kubeadm/templates/admin-aws-iam.yaml index 99e9c7a3..4d91b3ac 100644 --- a/charts/kubeadm/templates/admin-aws-iam.yaml +++ b/charts/kubeadm/templates/admin-aws-iam.yaml @@ -16,7 +16,7 @@ users: - name: kubernetes-admin user: exec: - apiVersion: client.authentication.k8s.io/v1alpha1 + apiVersion: client.authentication.k8s.io/v1beta1 command: aws-iam-authenticator args: - "token" diff --git a/charts/kubezero-ci/Chart.yaml b/charts/kubezero-ci/Chart.yaml index 2f97d0ba..31dcb928 100644 --- a/charts/kubezero-ci/Chart.yaml +++ b/charts/kubezero-ci/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: kubezero-ci description: KubeZero umbrella chart for all things CI type: application -version: 0.5.4 +version: 0.5.5 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: @@ -22,7 +22,7 @@ dependencies: repository: https://gocd.github.io/helm-chart condition: gocd.enabled - name: gitea - version: 5.0.5 + version: 5.0.9 repository: https://dl.gitea.io/charts/ condition: gitea.enabled - name: jenkins diff --git a/charts/kubezero-ci/values.yaml b/charts/kubezero-ci/values.yaml index cf1b6e27..b7a09018 100644 --- a/charts/kubezero-ci/values.yaml +++ b/charts/kubezero-ci/values.yaml @@ -17,7 +17,7 @@ gitea: enabled: false image: - tag: 1.16.8 + tag: 1.17.0 rootless: true securityContext: diff --git a/charts/kubezero-network/values.yaml b/charts/kubezero-network/values.yaml index 6ba7b1c1..47fddc4d 100644 --- a/charts/kubezero-network/values.yaml +++ b/charts/kubezero-network/values.yaml @@ -33,6 +33,27 @@ cilium: #-- Ensure this is false if multus is enabled exclusive: false + cluster: + # This should match the second octet + 1 of clusterPoolIPv4PodCIDRList, + # to prevent IP space overlap and easy tracking + id: 1 + name: default + + ipam: + operator: + clusterPoolIPv4PodCIDRList: + - 10.0.0.0/16 + + hostServices: + enabled: true + + # Does this conflict with Calico in parallel ? + nodePort: + enabled: true + + # Keep it simple for now + l7Proxy: false + cgroup: autoMount: enabled: false