Fix for snapshot controller, other tweaks

This commit is contained in:
Stefan Reimer 2023-05-04 17:00:09 +00:00
parent 435c2b8213
commit 3863798782
6 changed files with 21 additions and 14 deletions

View File

@ -161,7 +161,7 @@ control_plane_node() {
else else
# restore latest backup # restore latest backup
retry 10 60 30 restic restore latest --no-lock -t / #Review: Use latest no matter what for now: --tag $KUBE_VERSION_MINOR retry 10 60 30 restic restore latest --no-lock -t / # --tag $KUBE_VERSION_MINOR
# Make last etcd snapshot available # Make last etcd snapshot available
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
@ -214,11 +214,11 @@ control_plane_node() {
sleep 3 sleep 3
done done
# see if we are a former member # see if we are a former member and remove our former self if so
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//') MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints [ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
# flush etcd data directory as joining with previous store seems flaky, especially during etcd version upgrades # flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
rm -rf ${HOSTFS}/var/lib/etcd/member rm -rf ${HOSTFS}/var/lib/etcd/member
# Announce new etcd member and capture ETCD_INITIAL_CLUSTER, retry needed in case another node joining causes temp quorum loss # Announce new etcd member and capture ETCD_INITIAL_CLUSTER, retry needed in case another node joining causes temp quorum loss

View File

@ -166,6 +166,9 @@ function _helm() {
render render
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$? kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$?
# Try again without server-side, review with 1.26, required for cert-manager during 1.25
[ $rc -ne 0 ] && kubectl $action -f $WORKDIR/helm.yaml && rc=$? || rc=$?
# Optional post hook # Optional post hook
declare -F ${module}-post && ${module}-post declare -F ${module}-post && ${module}-post

View File

@ -1,6 +1,6 @@
{{- if .Values.api.awsIamAuth.enabled }} {{- if .Values.api.awsIamAuth.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata: metadata:
name: aws-iam-authenticator name: aws-iam-authenticator
rules: rules:
@ -51,8 +51,8 @@ metadata:
namespace: kube-system namespace: kube-system
--- ---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata: metadata:
name: aws-iam-authenticator name: aws-iam-authenticator
namespace: kube-system namespace: kube-system

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-redis name: kubezero-redis
description: KubeZero Umbrella Chart for Redis HA description: KubeZero Umbrella Chart for Redis HA
type: application type: application
version: 0.4.0 version: 0.4.1
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -25,4 +25,4 @@ dependencies:
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: redis-cluster.enabled condition: redis-cluster.enabled
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.25.0"

View File

@ -1,5 +1,5 @@
{{- range $ssg := .Values.snapshotgroups }} {{- range $ssg := .Values.snapshotgroups }}
apiVersion: gemini.fairwinds.com/v1beta1 apiVersion: gemini.fairwinds.com/v1
kind: SnapshotGroup kind: SnapshotGroup
metadata: metadata:
name: {{ $ssg.name }} name: {{ $ssg.name }}

View File

@ -25,9 +25,6 @@ rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["persistentvolumeclaims"] resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"] verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""] - apiGroups: [""]
resources: ["events"] resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"] verbs: ["list", "watch", "create", "update", "patch"]
@ -36,13 +33,20 @@ rules:
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"] - apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"] resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"] verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["patch"]
- apiGroups: ["snapshot.storage.k8s.io"] - apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"] resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"] verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"] - apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"] resources: ["volumesnapshots/status"]
verbs: ["update"] verbs: ["update", "patch"]
# Enable this RBAC rule only when using distributed snapshotting, i.e. when the enable-distributed-snapshotting flag is set to true
# - apiGroups: [""]
# resources: ["nodes"]
# verbs: ["get", "list", "watch"]
--- ---
kind: ClusterRoleBinding kind: ClusterRoleBinding