feat: bootstrap / upgrade reorg as part of 1.23

This commit is contained in:
Stefan Reimer 2022-09-11 13:54:56 +02:00
parent bdf6ff1d27
commit 56f8eb0233
30 changed files with 428 additions and 159 deletions

View File

@ -14,6 +14,9 @@ RUN cd /etc/apk/keys && \
jq \ jq \
yq \ yq \
diffutils \ diffutils \
bash \
python3 \
py3-yaml \
cri-tools@kubezero \ cri-tools@kubezero \
kubeadm@kubezero~=${KUBE_VERSION} \ kubeadm@kubezero~=${KUBE_VERSION} \
kubectl@kubezero~=${KUBE_VERSION} \ kubectl@kubezero~=${KUBE_VERSION} \
@ -22,9 +25,10 @@ RUN cd /etc/apk/keys && \
restic@testing \ restic@testing \
helm@testing helm@testing
ADD admin/kubezero.sh /usr/bin RUN helm repo add kubezero https://cdn.zero-downtime.net/charts
ADD admin/kubezero.sh admin/libhelm.sh /usr/bin
ADD charts/kubeadm /charts/kubeadm ADD charts/kubeadm /charts/kubeadm
ADD charts/kubezero-addons /charts/kubezero-addons ADD charts/kubezero /charts/kubezero
ADD charts/kubezero-network /charts/kubezero-network
ENTRYPOINT ["kubezero.sh"] ENTRYPOINT ["kubezero.sh"]

View File

@ -1,10 +1,13 @@
#!/bin/sh #!/bin/bash
if [ -n "$DEBUG" ]; then if [ -n "$DEBUG" ]; then
set -x set -x
LOG="--v=5" LOG="--v=5"
fi fi
# include helm lib
. libhelm.sh
# Export vars to ease use in debug_shell etc # Export vars to ease use in debug_shell etc
export WORKDIR=/tmp/kubezero export WORKDIR=/tmp/kubezero
export HOSTFS=/host export HOSTFS=/host
@ -44,7 +47,7 @@ _kubeadm() {
# Render cluster config # Render cluster config
render_kubeadm() { render_kubeadm() {
helm template $CHARTS/kubeadm --output-dir ${WORKDIR} -f ${HOSTFS}/etc/kubernetes/kubezero.yaml helm template $CHARTS/kubeadm --output-dir ${WORKDIR} -f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
# Assemble kubeadm config # Assemble kubeadm config
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
@ -62,13 +65,17 @@ render_kubeadm() {
parse_kubezero() { parse_kubezero() {
[ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] || { echo "Missing /etc/kubernetes/kubezero.yaml!"; return 1; } # remove with 1.24
if [ ! -f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml ]; then
[ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] && cp ${HOSTFS}/etc/kubernetes/kubezero.yaml ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
fi
export CLUSTERNAME=$(yq eval '.clusterName' ${HOSTFS}/etc/kubernetes/kubezero.yaml) export CLUSTERNAME=$(yq eval '.global.clusterName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml) export HIGHAVAILABLE=$(yq eval '.global.highAvailable // "false"' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
export NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml) export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
export PROVIDER_ID=$(yq eval '.providerID' ${HOSTFS}/etc/kubernetes/kubezero.yaml) export NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
export AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml) export PROVIDER_ID=$(yq eval '.providerID' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
export AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled // "false"' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
# From here on bail out, allows debug_shell even in error cases # From here on bail out, allows debug_shell even in error cases
set -e set -e
@ -117,11 +124,27 @@ cluster_upgrade() {
### PRE 1.23 specific ### PRE 1.23 specific
##################### #####################
# Migrate addons and network values into CM from kubezero.yaml # Migrate addons and network values from local kubeadm-values.yaml on controllers into CM
# - remove secrets from addons
# - enable cilium
if [[ $PROVIDER_ID =~ ^aws ]]; then
REGION=$(echo $PROVIDER_ID | sed -e 's,aws:///,,' -e 's,/.*,,' -e 's/\w$//')
fi
kubectl get cm -n kube-system kubezero-values || \ kubectl get cm -n kube-system kubezero-values || \
kubectl create configmap -n kube-system kubezero-values \ kubectl create configmap -n kube-system kubezero-values \
--from-literal addons="$(yq e '.addons | del .clusterBackup.repository | del .clusterBackup.password' ${HOSTFS}/etc/kubernetes/kubezero.yaml)" \ --from-literal values.yaml="$(yq e 'del .addons.clusterBackup.repository | del .addons.clusterBackup.password | \
--from-literal network="$(yq e .network ${HOSTFS}/etc/kubernetes/kubezero.yaml)" .addons.clusterBackup.image.tag =strenv(KUBE_VERSION) | \
.network.cilium.enabled = true | .network.multus.defaultNetworks = ["cilium"] | \
.network.cilium.cluster.name = strenv(CLUSTERNAME) | \
.global.clusterName = strenv(CLUSTERNAME) | \
.global.highAvailable = strenv(HIGHAVAILABLE) | \
.global.aws.region = strenv(REGION)' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)"
# Create kubeadm-values CM if not available
kubectl get cm -n kube-system kubeadm-values || \
kubectl create configmap -n kube-system kubeadm-values
##################### #####################
@ -254,10 +277,10 @@ control_plane_node() {
export ETCD_INITIAL_CLUSTER=$(echo ${_cluster%%,} | sed -e 's/ //g') export ETCD_INITIAL_CLUSTER=$(echo ${_cluster%%,} | sed -e 's/ //g')
fi fi
# Patch kubezero.yaml and re-render to get etcd manifest patched # Patch kubeadm-values.yaml and re-render to get etcd manifest patched
yq eval -i '.etcd.state = "existing" yq eval -i '.etcd.state = "existing"
| .etcd.initialCluster = strenv(ETCD_INITIAL_CLUSTER) | .etcd.initialCluster = strenv(ETCD_INITIAL_CLUSTER)
' ${HOSTFS}/etc/kubernetes/kubezero.yaml ' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
render_kubeadm render_kubeadm
fi fi
@ -318,24 +341,27 @@ control_plane_node() {
apply_module() { apply_module() {
MODULE=$1 MODULES=$1
# network kubectl get configmap -n kube-system kubezero-values -o yaml | yq '.data."values.yaml"' > $WORKDIR/_values.yaml
kubectl get configmap -n kube-system kubezero-values -o custom-columns=NAME:".data.$MODULE" --no-headers=true > _values.yaml
helm template $CHARTS/kubezero-$MODULE --namespace kube-system --name-template $MODULE --skip-crds --set installCRDs=false -f _values.yaml --kube-version $KUBE_VERSION > helm-no-crds.yaml # Always use embedded kubezero chart
helm template $CHARTS/kubezero-$MODULE --namespace kube-system --name-template $MODULE --include-crds --set installCRDs=true -f _values.yaml --kube-version $KUBE_VERSION > helm-crds.yaml helm template $CHARTS/kubezero -f $WORKDIR/_values.yaml --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
diff -e helm-no-crds.yaml helm-crds.yaml | head -n-1 | tail -n+2 > crds.yaml
# Only apply if there are actually any crds # Resolve all the all enabled modules
if [ -s crds.yaml ]; then
kubectl apply -f crds.yaml --server-side $LOG
fi
helm template $CHARTS/kubezero-$MODULE --namespace kube-system --include-crds --name-template $MODULE \ [ -z "$MODULES" ] && MODULES="$(ls ${WORKDIR}/kubezero/templates | sed -e 's/.yaml//g')"
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
echo "Applied KubeZero module: $MODULE" # CRDs first
for t in $MODULES; do
_helm crds $t
done
for t in $MODULES; do
_helm apply $t
done
echo "Applied KubeZero modules: $MODULES"
} }
@ -380,10 +406,10 @@ debug_shell() {
printf "For manual etcdctl commands use:\n # export ETCDCTL_ENDPOINTS=$ETCD_NODENAME:2379\n" printf "For manual etcdctl commands use:\n # export ETCDCTL_ENDPOINTS=$ETCD_NODENAME:2379\n"
/bin/sh /bin/bash
} }
# First parse kubezero.yaml # First parse kubeadm-values.yaml
parse_kubezero parse_kubezero
# Execute tasks # Execute tasks

110
admin/libhelm.sh Executable file
View File

@ -0,0 +1,110 @@
#!/bin/bash
# Simulate well-known CRDs being available
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1"
# Waits for max 300s and retries
function wait_for() {
local TRIES=0
while true; do
eval " $@" && break
[ $TRIES -eq 100 ] && return 1
let TRIES=$TRIES+1
sleep 3
done
}
function chart_location() {
echo "$1 --repo https://cdn.zero-downtime.net/charts"
}
# make sure namespace exists prior to calling helm as the create-namespace options doesn't work
function create_ns() {
local namespace=$1
if [ "$namespace" != "kube-system" ]; then
kubectl get ns $namespace || kubectl create ns $namespace
fi
}
# delete non kube-system ns
function delete_ns() {
local namespace=$1
[ "$namespace" != "kube-system" ] && kubectl delete ns $namespace
}
# Extract crds via helm calls and apply delta=crds only
function _crds() {
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds --set ${module}.installCRDs=false -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION > $WORKDIR/helm-no-crds.yaml
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds --set ${module}.installCRDs=true -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION > $WORKDIR/helm-crds.yaml
diff -e $WORKDIR/helm-no-crds.yaml $WORKDIR/helm-crds.yaml | head -n-1 | tail -n+2 > $WORKDIR/crds.yaml
# Only apply if there are actually any crds
if [ -s $WORKDIR/crds.yaml ]; then
kubectl apply -f $WORKDIR/crds.yaml --server-side
fi
}
# helm template | kubectl apply -f -
# confine to one namespace if possible
function apply() {
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
| python3 -c '
#!/usr/bin/python3
import yaml
import sys
for manifest in yaml.safe_load_all(sys.stdin):
if manifest:
if "metadata" in manifest and "namespace" not in manifest["metadata"]:
manifest["metadata"]["namespace"] = sys.argv[1]
print("---")
print(yaml.dump(manifest))' $namespace > $WORKDIR/helm.yaml
kubectl $action -f $WORKDIR/helm.yaml && rc=$? || rc=$?
}
function _helm() {
local action=$1
local module=$2
local chart="$(yq eval '.spec.source.chart' $WORKDIR/kubezero/templates/${module}.yaml)"
local namespace="$(yq eval '.spec.destination.namespace' $WORKDIR/kubezero/templates/${module}.yaml)"
targetRevision=""
_version="$(yq eval '.spec.source.targetRevision' $WORKDIR/kubezero/templates/${module}.yaml)"
[ -n "$_version" ] && targetRevision="--version $_version"
yq eval '.spec.source.helm.values' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
if [ $action == "crds" ]; then
# Allow custom CRD handling
declare -F ${module}-crds && ${module}-crds || _crds
elif [ $action == "apply" ]; then
# namespace must exist prior to apply
create_ns $namespace
# Optional pre hook
declare -F ${module}-pre && ${module}-pre
apply
# Optional post hook
declare -F ${module}-post && ${module}-post
elif [ $action == "delete" ]; then
apply
# Delete dedicated namespace if not kube-system
[ -n "$DELETE_NS" ] && delete_ns $namespace
fi
return 0
}

View File

@ -11,11 +11,34 @@ yaml.explicit_start = True
yaml.indent(mapping=2, sequence=4, offset=2) yaml.indent(mapping=2, sequence=4, offset=2)
parser = argparse.ArgumentParser(description="Update Route53 entries") def rec_sort(d):
if isinstance(d, dict):
res = dict()
# Always have "enabled" first if present
if "enabled" in d.keys():
res["enabled"] = rec_sort(d["enabled"])
d.pop("enabled")
# next is "name" if present
if "name" in d.keys():
res["name"] = rec_sort(d["name"])
d.pop("name")
for k in sorted(d.keys()):
res[k] = rec_sort(d[k])
return res
if isinstance(d, list):
for idx, elem in enumerate(d):
d[idx] = rec_sort(elem)
return d
parser = argparse.ArgumentParser(description="Migrate ArgoCD Kubezero values to new cluster config")
parser.add_argument( parser.add_argument(
"--version", "--version",
dest="version", dest="version",
default="1.22.8-10", default="1.23.10",
action="store", action="store",
required=False, required=False,
help="Update KubeZero version", help="Update KubeZero version",
@ -34,62 +57,25 @@ values = yaml.load(application["spec"]["source"]["helm"]["values"])
### Do your thing ### Do your thing
# New Istio Gateway charts # migrate ClusterName to clusterName
if "private" in values["istio-ingress"]: if "ClusterName" in values:
values["istio-private-ingress"] = { values["clusterName"] = values["ClusterName"]
"enabled": True, values.pop("ClusterName")
"certificates": values["istio-ingress"]["private"]["certificates"].copy()
}
if "gateway" in values["istio-ingress"]["private"]:
values["istio-private-ingress"]["gateway"] = {}
# Create new clusterwide cloudprovider data if possible
try: try:
values["istio-private-ingress"]["gateway"]["replicaCount"] = values["istio-ingress"]["private"]["gateway"]["replicaCount"] if values["cert-manager"]["clusterIssuer"]["solvers"][0]["dns01"]["route53"]["regions"]:
if "aws" not in values:
values["aws"] = {}
values["aws"]["region"] = values["cert-manager"]["clusterIssuer"]["solvers"][0]["dns01"]["route53"]["region"]
except KeyError: except KeyError:
pass pass
if "ports" in values["istio-ingress"]["private"]["gateway"]:
values["istio-private-ingress"]["gateway"]["service"] = {}
values["istio-private-ingress"]["gateway"]["service"]["ports"] = []
for port in values["istio-ingress"]["private"]["gateway"]["ports"]:
if port["name"] not in ["status-port", "http2", "https"]:
values["istio-private-ingress"]["gateway"]["service"]["ports"].append(port)
values["istio-ingress"].pop("private")
if "public" in values["istio-ingress"]:
values["istio-ingress"]["certificates"] = values["istio-ingress"]["public"]["certificates"].copy()
if "gateway" in values["istio-ingress"]["public"]:
values["istio-ingress"]["gateway"] = {}
try:
values["istio-ingress"]["gateway"]["replicaCount"] = values["istio-ingress"]["public"]["gateway"]["replicaCount"]
except KeyError:
pass
if "ports" in values["istio-ingress"]["public"]["gateway"]:
values["istio-ingress"]["gateway"]["service"] = {}
values["istio-ingress"]["gateway"]["service"]["ports"] = []
for port in values["istio-ingress"]["public"]["gateway"]["ports"]:
if port["name"] not in ["status-port", "http2", "https"]:
values["istio-ingress"]["gateway"]["service"]["ports"].append(port)
values["istio-ingress"].pop("public")
if "global" in values["istio-ingress"]:
values["istio-ingress"].pop("global")
# Remove Kiam
if "kiam" in values:
values.pop("kiam")
### End ### End
# Merge new values # Merge new values
buffer = io.StringIO() buffer = io.StringIO()
yaml.dump(values, buffer) yaml.dump(rec_sort(values), buffer)
application["spec"]["source"]["helm"]["values"] = buffer.getvalue() application["spec"]["source"]["helm"]["values"] = buffer.getvalue()
# Dump final yaml # Dump final yaml

View File

@ -1,13 +1,16 @@
#!/bin/bash -e #!/bin/bash -e
VERSION="v1.23" VERSION="v1.23.10-1"
[ -n "$DEBUG" ] && set -x [ -n "$DEBUG" ] && set -x
# unset any AWS_DEFAULT_PROFILE as it will break aws-iam-auth # unset any AWS_DEFAULT_PROFILE as it will break aws-iam-auth
unset AWS_DEFAULT_PROFILE unset AWS_DEFAULT_PROFILE
controller_nodes_upgrade() {
all_nodes_upgrade() {
CMD="$1"
echo "Deploying node upgrade daemonSet..." echo "Deploying node upgrade daemonSet..."
cat <<EOF | kubectl apply -f - cat <<EOF | kubectl apply -f -
apiVersion: apps/v1 apiVersion: apps/v1
@ -34,10 +37,13 @@ spec:
- name: kubezero-upgrade-${VERSION//.} - name: kubezero-upgrade-${VERSION//.}
image: busybox image: busybox
command: ["/bin/sh"] command: ["/bin/sh"]
args: ["-x", "-c", "[ -d /host/opt/cni/bin ] && { mkdir -p /host/usr/libexec/cni && cp /host/opt/cni/bin/* /host/usr/libexec/cni; } || true" ] args: ["-x", "-c", "$CMD" ]
volumeMounts: volumeMounts:
- name: host - name: host
mountPath: /host mountPath: /host
securityContext:
capabilities:
add: ["SYS_ADMIN"]
containers: containers:
- name: kubezero-upgrade-${VERSION//.}-wait - name: kubezero-upgrade-${VERSION//.}-wait
image: busybox image: busybox
@ -49,12 +55,15 @@ spec:
type: Directory type: Directory
EOF EOF
#kubectl rollout status daemonset -n kube-system kubezero-upgrade-${VERSION//.} --timeout 300s kubectl rollout status daemonset -n kube-system kubezero-upgrade-${VERSION//.} --timeout 300s
kubectl delete ds kubezero-upgrade-${VERSION//.} -n kube-system kubectl delete ds kubezero-upgrade-${VERSION//.} -n kube-system
} }
echo "Deploying cluster upgrade job ..."
control_plane_upgrade() {
TASKS="$1"
echo "Deploying cluster upgrade job ..."
cat <<EOF | kubectl apply -f - cat <<EOF | kubectl apply -f -
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
@ -72,10 +81,7 @@ spec:
image: public.ecr.aws/zero-downtime/kubezero-admin:${VERSION} image: public.ecr.aws/zero-downtime/kubezero-admin:${VERSION}
imagePullPolicy: Always imagePullPolicy: Always
command: ["kubezero.sh"] command: ["kubezero.sh"]
args: args: [$TASKS]
- cluster_upgrade
- apply_network
- apply_addons
env: env:
- name: DEBUG - name: DEBUG
value: "$DEBUG" value: "$DEBUG"
@ -112,3 +118,21 @@ while true; do
sleep 3 sleep 3
done done
kubectl delete pod kubezero-upgrade-${VERSION//.} -n kube-system kubectl delete pod kubezero-upgrade-${VERSION//.} -n kube-system
}
all_nodes_upgrade "mount --make-shared /host/sys/fs/cgroup; mount --make-shared /host/sys;"
control_plane_upgrade cluster_upgrade
echo "Adjust kubezero-values CM !!"
read
#kubectl delete ds kube-multus-ds -n kube-system
control_plane_upgrade "apply_network, apply_addons"
exit 0
kubectl rollout restart daemonset/calico-node -n kube-system
kubectl rollout restart daemonset/cilium -n kube-system
kubectl rollout restart daemonset/kube-multus-ds -n kube-system

View File

@ -30,12 +30,12 @@ Kubernetes: `>= 1.20.0`
| api.listenPort | int | `6443` | | | api.listenPort | int | `6443` | |
| api.oidcEndpoint | string | `""` | s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME | | api.oidcEndpoint | string | `""` | s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |
| api.serviceAccountIssuer | string | `""` | https://s3.${REGION}.amazonaws.com/${CFN[ConfigBucket]}/k8s/$CLUSTERNAME | | api.serviceAccountIssuer | string | `""` | https://s3.${REGION}.amazonaws.com/${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |
| clusterName | string | `"pleasechangeme"` | |
| domain | string | `"changeme.org"` | | | domain | string | `"changeme.org"` | |
| etcd.extraArgs | object | `{}` | | | etcd.extraArgs | object | `{}` | |
| etcd.nodeName | string | `"etcd"` | | | etcd.nodeName | string | `"etcd"` | |
| etcd.state | string | `"new"` | | | etcd.state | string | `"new"` | |
| highAvailable | bool | `false` | | | global.clusterName | string | `"pleasechangeme"` | |
| global.highAvailable | bool | `false` | |
| listenAddress | string | `"0.0.0.0"` | Needs to be set to primary node IP | | listenAddress | string | `"0.0.0.0"` | Needs to be set to primary node IP |
| nodeName | string | `"kubezero-node"` | set to $HOSTNAME | | nodeName | string | `"kubezero-node"` | set to $HOSTNAME |
| protectKernelDefaults | bool | `false` | | | protectKernelDefaults | bool | `false` | |

View File

@ -1,7 +1,7 @@
apiVersion: kubeadm.k8s.io/v1beta3 apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration kind: ClusterConfiguration
kubernetesVersion: {{ .Chart.Version }} kubernetesVersion: {{ .Chart.Version }}
clusterName: {{ .Values.clusterName }} clusterName: {{ .Values.global.clusterName }}
featureGates: featureGates:
UnversionedKubeletConfigMap: true UnversionedKubeletConfigMap: true
controlPlaneEndpoint: {{ .Values.api.endpoint }} controlPlaneEndpoint: {{ .Values.api.endpoint }}
@ -13,12 +13,12 @@ etcd:
extraArgs: extraArgs:
### DNS discovery ### DNS discovery
#discovery-srv: {{ .Values.domain }} #discovery-srv: {{ .Values.domain }}
#discovery-srv-name: {{ .Values.clusterName }} #discovery-srv-name: {{ .Values.global.clusterName }}
advertise-client-urls: https://{{ .Values.etcd.nodeName }}:2379 advertise-client-urls: https://{{ .Values.etcd.nodeName }}:2379
initial-advertise-peer-urls: https://{{ .Values.etcd.nodeName }}:2380 initial-advertise-peer-urls: https://{{ .Values.etcd.nodeName }}:2380
initial-cluster: {{ include "kubeadm.etcd.initialCluster" .Values.etcd | quote }} initial-cluster: {{ include "kubeadm.etcd.initialCluster" .Values.etcd | quote }}
initial-cluster-state: {{ .Values.etcd.state }} initial-cluster-state: {{ .Values.etcd.state }}
initial-cluster-token: etcd-{{ .Values.clusterName }} initial-cluster-token: etcd-{{ .Values.global.clusterName }}
name: {{ .Values.etcd.nodeName }} name: {{ .Values.etcd.nodeName }}
listen-peer-urls: https://{{ .Values.listenAddress }}:2380 listen-peer-urls: https://{{ .Values.listenAddress }}:2380
listen-client-urls: https://{{ .Values.listenAddress }}:2379 listen-client-urls: https://{{ .Values.listenAddress }}:2379
@ -40,13 +40,13 @@ controllerManager:
extraArgs: extraArgs:
profiling: "false" profiling: "false"
terminated-pod-gc-threshold: "300" terminated-pod-gc-threshold: "300"
leader-elect: {{ .Values.highAvailable | quote }} leader-elect: {{ .Values.global.highAvailable | quote }}
logging-format: json logging-format: json
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }} feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
scheduler: scheduler:
extraArgs: extraArgs:
profiling: "false" profiling: "false"
leader-elect: {{ .Values.highAvailable | quote }} leader-elect: {{ .Values.global.highAvailable | quote }}
logging-format: json logging-format: json
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }} feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
apiServer: apiServer:
@ -73,7 +73,7 @@ apiServer:
{{- end }} {{- end }}
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }} feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit
# {{- if .Values.highAvailable }} # {{- if .Values.global.highAvailable }}
# goaway-chance: ".001" # goaway-chance: ".001"
# {{- end }} # {{- end }}
logging-format: json logging-format: json

View File

@ -4,13 +4,13 @@ kind: Config
clusters: clusters:
- cluster: - cluster:
server: https://{{ .Values.api.endpoint }} server: https://{{ .Values.api.endpoint }}
name: {{ .Values.clusterName }} name: {{ .Values.global.clusterName }}
contexts: contexts:
- context: - context:
cluster: {{ .Values.clusterName }} cluster: {{ .Values.global.clusterName }}
user: kubernetes-admin user: kubernetes-admin
name: kubernetes-admin@{{ .Values.clusterName }} name: kubernetes-admin@{{ .Values.global.clusterName }}
current-context: kubernetes-admin@{{ .Values.clusterName }} current-context: kubernetes-admin@{{ .Values.global.clusterName }}
preferences: {} preferences: {}
users: users:
- name: kubernetes-admin - name: kubernetes-admin
@ -21,7 +21,7 @@ users:
args: args:
- "token" - "token"
- "-i" - "-i"
- "{{ .Values.clusterName }}" - "{{ .Values.global.clusterName }}"
- "-r" - "-r"
- "{{ .Values.api.awsIamAuth.kubeAdminRole }}" - "{{ .Values.api.awsIamAuth.kubeAdminRole }}"
{{- end }} {{- end }}

View File

@ -1,5 +1,5 @@
spec: spec:
replicas: {{ ternary 3 1 .Values.highAvailable }} replicas: {{ ternary 3 1 .Values.global.highAvailable }}
template: template:
spec: spec:
containers: containers:

View File

@ -75,7 +75,7 @@ metadata:
k8s-app: aws-iam-authenticator k8s-app: aws-iam-authenticator
data: data:
config.yaml: | config.yaml: |
clusterID: {{ .Values.clusterName }} clusterID: {{ .Values.global.clusterName }}
--- ---
apiVersion: apps/v1 apiVersion: apps/v1

View File

@ -1,4 +1,7 @@
global:
clusterName: pleasechangeme clusterName: pleasechangeme
highAvailable: false
# -- set to $HOSTNAME # -- set to $HOSTNAME
nodeName: kubezero-node nodeName: kubezero-node
domain: changeme.org domain: changeme.org
@ -22,8 +25,6 @@ api:
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode" workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode" kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"
highAvailable: false
etcd: etcd:
nodeName: etcd nodeName: etcd
state: new state: new

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-addons name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons description: KubeZero umbrella chart for various optional cluster addons
type: application type: application
version: 0.6.0 version: 0.6.1
appVersion: v1.23.10 appVersion: v1.23.10
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -23,4 +23,8 @@ dependencies:
version: 1.11.0 version: 1.11.0
repository: https://kubernetes-sigs.github.io/external-dns/ repository: https://kubernetes-sigs.github.io/external-dns/
condition: external-dns.enabled condition: external-dns.enabled
- name: cluster-autoscaler
version: 9.21.0
repository: https://kubernetes.github.io/autoscaler
condition: cluster-autoscaler.enabled
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.20.0"

View File

@ -1,6 +1,6 @@
# kubezero-addons # kubezero-addons
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.23.10](https://img.shields.io/badge/AppVersion-v1.23.10-informational?style=flat-square) ![Version: 0.6.1](https://img.shields.io/badge/Version-0.6.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.23.10](https://img.shields.io/badge/AppVersion-v1.23.10-informational?style=flat-square)
KubeZero umbrella chart for various optional cluster addons KubeZero umbrella chart for various optional cluster addons
@ -20,6 +20,7 @@ Kubernetes: `>= 1.20.0`
|------------|------|---------| |------------|------|---------|
| | aws-node-termination-handler | 0.18.5 | | | aws-node-termination-handler | 0.18.5 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.11.0 | | https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.11.0 |
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.21.0 |
# MetalLB # MetalLB
@ -59,7 +60,17 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| aws-node-termination-handler.useProviderId | bool | `true` | | | aws-node-termination-handler.useProviderId | bool | `true` | |
| awsNeuron.enabled | bool | `false` | | | awsNeuron.enabled | bool | `false` | |
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | | | awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
| awsNeuron.image.tag | string | `"1.9.0.0"` | | | awsNeuron.image.tag | string | `"1.9.3.0"` | |
| cluster-autoscaler.autoDiscovery.clusterName | string | `""` | |
| cluster-autoscaler.awsRegion | string | `"us-west-2"` | |
| cluster-autoscaler.enabled | bool | `false` | |
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cluster-autoscaler.podDisruptionBudget | bool | `false` | |
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | |
| cluster-autoscaler.serviceMonitor.enabled | bool | `false` | |
| cluster-autoscaler.serviceMonitor.interval | string | `"30s"` | |
| cluster-autoscaler.tolerations[0].effect | string | `"NoSchedule"` | |
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| clusterBackup.enabled | bool | `false` | | | clusterBackup.enabled | bool | `false` | |
| clusterBackup.extraEnv | list | `[]` | | | clusterBackup.extraEnv | list | `[]` | |
| clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | | | clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | |

View File

@ -81,7 +81,53 @@ awsNeuron:
image: image:
name: public.ecr.aws/neuron/neuron-device-plugin name: public.ecr.aws/neuron/neuron-device-plugin
tag: 1.9.0.0 tag: 1.9.3.0
cluster-autoscaler:
enabled: false
autoDiscovery:
clusterName: ""
awsRegion: "us-west-2"
serviceMonitor:
enabled: false
interval: 30s
prometheusRule:
enabled: false
# Disable pdb for now
podDisruptionBudget: false
#securityContext:
# runAsNonRoot: true
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# On AWS enable Projected Service Accounts to assume IAM role
#extraEnv:
# AWS_ROLE_ARN: <IamArn>
# AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
# AWS_STS_REGIONAL_ENDPOINTS: "regional"
#extraVolumes:
#- name: aws-token
# projected:
# sources:
# - serviceAccountToken:
# path: token
# expirationSeconds: 86400
# audience: "sts.amazonaws.com"
#extraVolumeMounts:
#- name: aws-token
# mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
# readOnly: true
external-dns: external-dns:
enabled: false enabled: false

View File

@ -1,6 +1,6 @@
# kubezero-auth # kubezero-auth
![Version: 0.2.2](https://img.shields.io/badge/Version-0.2.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 18.0.1](https://img.shields.io/badge/AppVersion-18.0.1-informational?style=flat-square) ![Version: 0.2.4](https://img.shields.io/badge/Version-0.2.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 19.0.1](https://img.shields.io/badge/AppVersion-19.0.1-informational?style=flat-square)
KubeZero umbrella chart for all things Authentication and Identity management KubeZero umbrella chart for all things Authentication and Identity management
@ -18,8 +18,8 @@ Kubernetes: `>= 1.20.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.4 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://charts.bitnami.com/bitnami | postgresql | 11.6.7 | | https://charts.bitnami.com/bitnami | postgresql | 11.8.1 |
# Keycloak # Keycloak
@ -42,6 +42,7 @@ https://github.com/keycloak/keycloak/tree/main/operator
| keycloak.istio.url | string | `""` | | | keycloak.istio.url | string | `""` | |
| keycloak.metrics.enabled | bool | `false` | | | keycloak.metrics.enabled | bool | `false` | |
| postgresql.auth.database | string | `"keycloak"` | | | postgresql.auth.database | string | `"keycloak"` | |
| postgresql.auth.existingSecret | string | `"kubezero-auth-postgresql"` | |
| postgresql.auth.username | string | `"keycloak"` | | | postgresql.auth.username | string | `"keycloak"` | |
| postgresql.enabled | bool | `false` | | | postgresql.enabled | bool | `false` | |
| postgresql.primary.persistence.size | string | `"1Gi"` | | | postgresql.primary.persistence.size | string | `"1Gi"` | |

View File

@ -88,7 +88,7 @@ Kubernetes: `>= 1.20.0`
| fluent-bit.daemonSetVolumes[1].hostPath.path | string | `"/var/lib/containers/logs"` | | | fluent-bit.daemonSetVolumes[1].hostPath.path | string | `"/var/lib/containers/logs"` | |
| fluent-bit.daemonSetVolumes[1].name | string | `"newlog"` | | | fluent-bit.daemonSetVolumes[1].name | string | `"newlog"` | |
| fluent-bit.enabled | bool | `false` | | | fluent-bit.enabled | bool | `false` | |
| fluent-bit.image.tag | string | `"1.9.3"` | | | fluent-bit.image.tag | string | `"1.9.8"` | |
| fluent-bit.luaScripts."kubezero.lua" | string | `"function nest_k8s_ns(tag, timestamp, record)\n if not record['kubernetes']['namespace_name'] then\n return 0, 0, 0\n end\n new_record = {}\n for key, val in pairs(record) do\n if key == 'kube' then\n new_record[key] = {}\n new_record[key][record['kubernetes']['namespace_name']] = record[key]\n else\n new_record[key] = record[key]\n end\n end\n return 1, timestamp, new_record\nend\n"` | | | fluent-bit.luaScripts."kubezero.lua" | string | `"function nest_k8s_ns(tag, timestamp, record)\n if not record['kubernetes']['namespace_name'] then\n return 0, 0, 0\n end\n new_record = {}\n for key, val in pairs(record) do\n if key == 'kube' then\n new_record[key] = {}\n new_record[key][record['kubernetes']['namespace_name']] = record[key]\n else\n new_record[key] = record[key]\n end\n end\n return 1, timestamp, new_record\nend\n"` | |
| fluent-bit.resources.limits.memory | string | `"64Mi"` | | | fluent-bit.resources.limits.memory | string | `"64Mi"` | |
| fluent-bit.resources.requests.cpu | string | `"20m"` | | | fluent-bit.resources.requests.cpu | string | `"20m"` | |

View File

@ -244,7 +244,7 @@ fluent-bit:
image: image:
#repository: public.ecr.aws/zero-downtime/fluent-bit #repository: public.ecr.aws/zero-downtime/fluent-bit
tag: 1.9.7 tag: 1.9.8
serviceMonitor: serviceMonitor:
enabled: false enabled: false

View File

@ -34,18 +34,21 @@ Kubernetes: `>= 1.20.0`
| cilium.cluster.name | string | `"default"` | | | cilium.cluster.name | string | `"default"` | |
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | | | cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
| cilium.cni.exclusive | bool | `false` | | | cilium.cni.exclusive | bool | `false` | |
| cilium.containerRuntime.integration | string | `"crio"` | |
| cilium.enabled | bool | `false` | | | cilium.enabled | bool | `false` | |
| cilium.hostServices.enabled | bool | `true` | |
| cilium.hubble.enabled | bool | `false` | | | cilium.hubble.enabled | bool | `false` | |
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.0.0.0/16"` | | | cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.1.0.0/16"` | |
| cilium.l2NeighDiscovery.enabled | bool | `false` | |
| cilium.l7Proxy | bool | `false` | | | cilium.l7Proxy | bool | `false` | |
| cilium.nodePort.enabled | bool | `true` | | | cilium.nodePort.enabled | bool | `false` | |
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cilium.operator.replicas | int | `1` | | | cilium.operator.replicas | int | `1` | |
| cilium.operator.tolerations[0].effect | string | `"NoSchedule"` | | | cilium.operator.tolerations[0].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cilium.policyEnforcementMode | string | `"audit"` | |
| cilium.prometheus.enabled | bool | `false` | | | cilium.prometheus.enabled | bool | `false` | |
| cilium.prometheus.port | int | `9091` | | | cilium.prometheus.port | int | `9091` | |
| cilium.securityContext.privileged | bool | `true` | |
| cilium.tunnel | string | `"geneve"` | | | cilium.tunnel | string | `"geneve"` | |
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | | | metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |

View File

@ -424,6 +424,8 @@ spec:
# Auto-detect the BGP IP address. # Auto-detect the BGP IP address.
- name: IP - name: IP
value: "autodetect" value: "autodetect"
- name: IP_AUTODETECTION_METHOD
value: "interface=eth.*"
# Enable IPIP # Enable IPIP
- name: CALICO_IPV4POOL_IPIP - name: CALICO_IPV4POOL_IPIP
value: "Never" value: "Never"

View File

@ -1,4 +1,4 @@
{{- if .Values.calico.enabled }} {{- if and .Values.multus.enabled .Values.calico.enabled }}
apiVersion: k8s.cni.cncf.io/v1 apiVersion: k8s.cni.cncf.io/v1
kind: NetworkAttachmentDefinition kind: NetworkAttachmentDefinition
metadata: metadata:
@ -11,7 +11,7 @@ spec:
"log_level": "info", "log_level": "info",
"log_file_path": "/var/log/calico/cni/cni.log", "log_file_path": "/var/log/calico/cni/cni.log",
"datastore_type": "kubernetes", "datastore_type": "kubernetes",
"mtu": 8941, "mtu": {{ .Values.calico.mtu }},
"ipam": { "ipam": {
"type": "calico-ipam" "type": "calico-ipam"
}, },

View File

@ -1,4 +1,4 @@
{{- if .Values.cilium.enabled }} {{- if and .Values.multus.enabled .Values.cilium.enabled }}
apiVersion: k8s.cni.cncf.io/v1 apiVersion: k8s.cni.cncf.io/v1
kind: NetworkAttachmentDefinition kind: NetworkAttachmentDefinition
metadata: metadata:

View File

@ -28,13 +28,30 @@ multus:
cilium: cilium:
enabled: false enabled: false
containerRuntime:
integration: crio
# Until we figured out AppArmore on Alpine and Gentoo
securityContext:
privileged: true
cni: cni:
binPath: "/usr/libexec/cni" binPath: "/usr/libexec/cni"
#-- Ensure this is false if multus is enabled #-- Ensure this is false if multus is enabled
exclusive: false exclusive: false
# chainingMode: generic-veth
# Until we switch to Cilium
#bpf:
# hostLegacyRouting: true
# tproxy: false
# enableIPv4Masquerade: false
# enableIdentityMark: false
policyEnforcementMode: "audit"
cluster: cluster:
# This should match the second octet + 1 of clusterPoolIPv4PodCIDRList, # This should match the second octet of clusterPoolIPv4PodCIDRList
# to prevent IP space overlap and easy tracking # to prevent IP space overlap and easy tracking
id: 1 id: 1
name: default name: default
@ -42,17 +59,16 @@ cilium:
ipam: ipam:
operator: operator:
clusterPoolIPv4PodCIDRList: clusterPoolIPv4PodCIDRList:
- 10.0.0.0/16 - 10.1.0.0/16
hostServices: # Should be handled by multus
enabled: true
# Does this conflict with Calico in parallel ?
nodePort: nodePort:
enabled: true enabled: false
# Keep it simple for now # Keep it simple for now
l7Proxy: false l7Proxy: false
l2NeighDiscovery:
enabled: false
cgroup: cgroup:
autoMount: autoMount:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero name: kubezero
description: KubeZero - Root App of Apps chart description: KubeZero - Root App of Apps chart
type: application type: application
version: 1.23.10 version: 1.23.10-1
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:

View File

@ -1,6 +1,6 @@
# kubezero # kubezero
![Version: 1.23.10](https://img.shields.io/badge/Version-1.23.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.23.10-1](https://img.shields.io/badge/Version-1.23.10--1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero - Root App of Apps chart KubeZero - Root App of Apps chart
@ -25,8 +25,8 @@ Kubernetes: `>= 1.20.0`
| Key | Type | Default | Description | | Key | Type | Default | Description |
|-----|------|---------|-------------| |-----|------|---------|-------------|
| HighAvailableControlplane | bool | `false` | | | HighAvailableControlplane | bool | `false` | |
| addons.enabled | bool | `false` | | | addons.enabled | bool | `true` | |
| addons.targetRevision | string | `"0.6.0"` | | | addons.targetRevision | string | `"0.6.1"` | |
| argocd.enabled | bool | `false` | | | argocd.enabled | bool | `false` | |
| argocd.istio.enabled | bool | `false` | | | argocd.istio.enabled | bool | `false` | |
| argocd.namespace | string | `"argocd"` | | | argocd.namespace | string | `"argocd"` | |
@ -59,7 +59,7 @@ Kubernetes: `>= 1.20.0`
| metrics.istio.prometheus | object | `{}` | | | metrics.istio.prometheus | object | `{}` | |
| metrics.namespace | string | `"monitoring"` | | | metrics.namespace | string | `"monitoring"` | |
| metrics.targetRevision | string | `"0.8.1"` | | | metrics.targetRevision | string | `"0.8.1"` | |
| network.enabled | bool | `false` | | | network.enabled | bool | `true` | |
| network.retain | bool | `true` | | | network.retain | bool | `true` | |
| network.targetRevision | string | `"0.3.2"` | | | network.targetRevision | string | `"0.3.2"` | |
| storage.aws-ebs-csi-driver.enabled | bool | `false` | | | storage.aws-ebs-csi-driver.enabled | bool | `false` | |

View File

@ -13,7 +13,9 @@ forseti:
{{- with index .Values "addons" "aws-node-termination-handler" }} {{- with index .Values "addons" "aws-node-termination-handler" }}
aws-node-termination-handler: aws-node-termination-handler:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
enablePrometheusServer: {{ .Values.metrics.enabled }} {{- with $.Values.metrics }}
enablePrometheusServer: {{ .enabled }}
{{- end }}
{{- end }} {{- end }}
{{- with .Values.addons.fuseDevicePlugin }} {{- with .Values.addons.fuseDevicePlugin }}
@ -31,6 +33,44 @@ external-dns:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
{{- with index .Values "addons" "cluster-autoscaler" }}
cluster-autoscaler:
{{- toYaml . | nindent 2 }}
autoDiscovery:
clusterName: {{ $.Values.global.clusterName }}
{{- with $.Values.global.aws }}
awsRegion: {{ .region }}
{{- end }}
{{- with $.Values.metrics }}
serviceMonitor:
enabled: {{ .enabled }}
prometheusRule:
enabled: {{ .enabled }}
{{- end }}
{{- with .IamArn }}
extraEnv:
AWS_ROLE_ARN: "{{ . }}"
AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
AWS_STS_REGIONAL_ENDPOINTS: "regional"
extraVolumes:
- name: aws-token
projected:
sources:
- serviceAccountToken:
path: token
expirationSeconds: 86400
audience: "sts.amazonaws.com"
extraVolumeMounts:
- name: aws-token
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
readOnly: true
{{- end }}
{{- end }}
{{- end }} {{- end }}
{{- define "addons-argo" }} {{- define "addons-argo" }}

View File

@ -30,17 +30,8 @@ rateLimiting:
- group: admissionregistration.k8s.io - group: admissionregistration.k8s.io
kind: ValidatingWebhookConfiguration kind: ValidatingWebhookConfiguration
jsonPointers: jsonPointers:
- /webhooks/0/clientConfig/caBundle
- /webhooks/0/failurePolicy - /webhooks/0/failurePolicy
- /webhooks/1/clientConfig/caBundle
- /webhooks/1/failurePolicy - /webhooks/1/failurePolicy
- group: admissionregistration.k8s.io
kind: MutatingWebhookConfiguration
jsonPointers:
- /webhooks/0/clientConfig/caBundle
- /webhooks/1/clientConfig/caBundle
- /webhooks/2/clientConfig/caBundle
- /webhooks/3/clientConfig/caBundle
{{- end }} {{- end }}

View File

@ -16,7 +16,7 @@ aws-ebs-csi-driver:
enabled: {{ default false (index .Values "storage" "aws-ebs-csi-driver" "enabled")}} enabled: {{ default false (index .Values "storage" "aws-ebs-csi-driver" "enabled")}}
controller: controller:
replicaCount: {{ ternary 2 1 .Values.HighAvailableControlplane }} replicaCount: {{ ternary 2 1 .Values.HighAvailableControlplane }}
k8sTagClusterId: {{ .Values.ClusterName }} k8sTagClusterId: {{ .Values.global.clusterName }}
env: env:
- name: AWS_ROLE_ARN - name: AWS_ROLE_ARN
value: {{ index .Values "storage" "aws-ebs-csi-driver" "IamArn" | quote }} value: {{ index .Values "storage" "aws-ebs-csi-driver" "IamArn" | quote }}

View File

@ -7,11 +7,11 @@ kubezero:
HighAvailableControlplane: false HighAvailableControlplane: false
addons: addons:
enabled: false enabled: true
targetRevision: 0.6.0 targetRevision: 0.6.1
network: network:
enabled: false enabled: true
retain: true retain: true
targetRevision: 0.3.2 targetRevision: 0.3.2

View File

@ -50,4 +50,8 @@ function publish_chart() {
publish_chart publish_chart
CF_DIST=E1YFUJXMCXT2RN
aws cloudfront create-invalidation --distribution $CF_DIST --paths "/charts/*"
#reset_index #reset_index