From 1455c8b800c86f6c51a005ff0b73af50719a9aff Mon Sep 17 00:00:00 2001 From: Stefan Reimer Date: Thu, 4 May 2023 17:00:09 +0000 Subject: [PATCH] Fix for snapshot controller, other tweaks --- admin/kubezero.sh | 6 +++--- admin/libhelm.sh | 3 +++ .../51-aws-iam-authenticator-deployment.yaml | 4 ++-- charts/kubezero-redis/Chart.yaml | 4 ++-- .../kubezero-redis/templates/snapshotgroup.yaml | 2 +- .../templates/snapshot-controller/rbac.yaml | 16 ++++++++++------ 6 files changed, 21 insertions(+), 14 deletions(-) diff --git a/admin/kubezero.sh b/admin/kubezero.sh index c0d24de..926a9b4 100755 --- a/admin/kubezero.sh +++ b/admin/kubezero.sh @@ -161,7 +161,7 @@ control_plane_node() { else # restore latest backup - retry 10 60 30 restic restore latest --no-lock -t / #Review: Use latest no matter what for now: --tag $KUBE_VERSION_MINOR + retry 10 60 30 restic restore latest --no-lock -t / # --tag $KUBE_VERSION_MINOR # Make last etcd snapshot available cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes @@ -214,11 +214,11 @@ control_plane_node() { sleep 3 done - # see if we are a former member + # see if we are a former member and remove our former self if so MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//') [ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints - # flush etcd data directory as joining with previous store seems flaky, especially during etcd version upgrades + # flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades rm -rf ${HOSTFS}/var/lib/etcd/member # Announce new etcd member and capture ETCD_INITIAL_CLUSTER, retry needed in case another node joining causes temp quorum loss diff --git a/admin/libhelm.sh b/admin/libhelm.sh index 2f3e2f8..7b982f3 100644 --- a/admin/libhelm.sh +++ b/admin/libhelm.sh @@ -166,6 +166,9 @@ function _helm() { render kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$? + # Try again without server-side, review with 1.26, required for cert-manager during 1.25 + [ $rc -ne 0 ] && kubectl $action -f $WORKDIR/helm.yaml && rc=$? || rc=$? + # Optional post hook declare -F ${module}-post && ${module}-post diff --git a/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml b/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml index 6896cea..febbeb2 100644 --- a/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml +++ b/charts/kubeadm/templates/resources/51-aws-iam-authenticator-deployment.yaml @@ -1,6 +1,6 @@ {{- if .Values.api.awsIamAuth.enabled }} -kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole metadata: name: aws-iam-authenticator rules: @@ -51,8 +51,8 @@ metadata: namespace: kube-system --- -kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: name: aws-iam-authenticator namespace: kube-system diff --git a/charts/kubezero-redis/Chart.yaml b/charts/kubezero-redis/Chart.yaml index 1d24358..6d6beb2 100644 --- a/charts/kubezero-redis/Chart.yaml +++ b/charts/kubezero-redis/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: kubezero-redis description: KubeZero Umbrella Chart for Redis HA type: application -version: 0.4.0 +version: 0.4.1 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: @@ -25,4 +25,4 @@ dependencies: repository: https://charts.bitnami.com/bitnami condition: redis-cluster.enabled -kubeVersion: ">= 1.20.0" +kubeVersion: ">= 1.25.0" diff --git a/charts/kubezero-redis/templates/snapshotgroup.yaml b/charts/kubezero-redis/templates/snapshotgroup.yaml index 800cd20..0329714 100644 --- a/charts/kubezero-redis/templates/snapshotgroup.yaml +++ b/charts/kubezero-redis/templates/snapshotgroup.yaml @@ -1,5 +1,5 @@ {{- range $ssg := .Values.snapshotgroups }} -apiVersion: gemini.fairwinds.com/v1beta1 +apiVersion: gemini.fairwinds.com/v1 kind: SnapshotGroup metadata: name: {{ $ssg.name }} diff --git a/charts/kubezero-storage/templates/snapshot-controller/rbac.yaml b/charts/kubezero-storage/templates/snapshot-controller/rbac.yaml index b3ca826..ed5d067 100644 --- a/charts/kubezero-storage/templates/snapshot-controller/rbac.yaml +++ b/charts/kubezero-storage/templates/snapshot-controller/rbac.yaml @@ -25,9 +25,6 @@ rules: - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] @@ -36,13 +33,20 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "update", "patch"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshots/status"] - verbs: ["update"] + verbs: ["update", "patch"] + # Enable this RBAC rule only when using distributed snapshotting, i.e. when the enable-distributed-snapshotting flag is set to true + # - apiGroups: [""] + # resources: ["nodes"] + # verbs: ["get", "list", "watch"] --- kind: ClusterRoleBinding