feat: bootstrap / upgrade reorg as part of 1.23
This commit is contained in:
parent
bdf6ff1d27
commit
56f8eb0233
10
Dockerfile
10
Dockerfile
@ -14,6 +14,9 @@ RUN cd /etc/apk/keys && \
|
||||
jq \
|
||||
yq \
|
||||
diffutils \
|
||||
bash \
|
||||
python3 \
|
||||
py3-yaml \
|
||||
cri-tools@kubezero \
|
||||
kubeadm@kubezero~=${KUBE_VERSION} \
|
||||
kubectl@kubezero~=${KUBE_VERSION} \
|
||||
@ -22,9 +25,10 @@ RUN cd /etc/apk/keys && \
|
||||
restic@testing \
|
||||
helm@testing
|
||||
|
||||
ADD admin/kubezero.sh /usr/bin
|
||||
RUN helm repo add kubezero https://cdn.zero-downtime.net/charts
|
||||
|
||||
ADD admin/kubezero.sh admin/libhelm.sh /usr/bin
|
||||
ADD charts/kubeadm /charts/kubeadm
|
||||
ADD charts/kubezero-addons /charts/kubezero-addons
|
||||
ADD charts/kubezero-network /charts/kubezero-network
|
||||
ADD charts/kubezero /charts/kubezero
|
||||
|
||||
ENTRYPOINT ["kubezero.sh"]
|
||||
|
@ -1,10 +1,13 @@
|
||||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$DEBUG" ]; then
|
||||
set -x
|
||||
LOG="--v=5"
|
||||
fi
|
||||
|
||||
# include helm lib
|
||||
. libhelm.sh
|
||||
|
||||
# Export vars to ease use in debug_shell etc
|
||||
export WORKDIR=/tmp/kubezero
|
||||
export HOSTFS=/host
|
||||
@ -44,7 +47,7 @@ _kubeadm() {
|
||||
|
||||
# Render cluster config
|
||||
render_kubeadm() {
|
||||
helm template $CHARTS/kubeadm --output-dir ${WORKDIR} -f ${HOSTFS}/etc/kubernetes/kubezero.yaml
|
||||
helm template $CHARTS/kubeadm --output-dir ${WORKDIR} -f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
|
||||
|
||||
# Assemble kubeadm config
|
||||
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||
@ -62,13 +65,17 @@ render_kubeadm() {
|
||||
|
||||
|
||||
parse_kubezero() {
|
||||
[ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] || { echo "Missing /etc/kubernetes/kubezero.yaml!"; return 1; }
|
||||
# remove with 1.24
|
||||
if [ ! -f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml ]; then
|
||||
[ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] && cp ${HOSTFS}/etc/kubernetes/kubezero.yaml ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
|
||||
fi
|
||||
|
||||
export CLUSTERNAME=$(yq eval '.clusterName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
||||
export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
||||
export NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
||||
export PROVIDER_ID=$(yq eval '.providerID' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
||||
export AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
|
||||
export CLUSTERNAME=$(yq eval '.global.clusterName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||
export HIGHAVAILABLE=$(yq eval '.global.highAvailable // "false"' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||
export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||
export NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||
export PROVIDER_ID=$(yq eval '.providerID' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||
export AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled // "false"' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||
|
||||
# From here on bail out, allows debug_shell even in error cases
|
||||
set -e
|
||||
@ -117,11 +124,27 @@ cluster_upgrade() {
|
||||
### PRE 1.23 specific
|
||||
#####################
|
||||
|
||||
# Migrate addons and network values into CM from kubezero.yaml
|
||||
# Migrate addons and network values from local kubeadm-values.yaml on controllers into CM
|
||||
# - remove secrets from addons
|
||||
# - enable cilium
|
||||
|
||||
if [[ $PROVIDER_ID =~ ^aws ]]; then
|
||||
REGION=$(echo $PROVIDER_ID | sed -e 's,aws:///,,' -e 's,/.*,,' -e 's/\w$//')
|
||||
fi
|
||||
|
||||
kubectl get cm -n kube-system kubezero-values || \
|
||||
kubectl create configmap -n kube-system kubezero-values \
|
||||
--from-literal addons="$(yq e '.addons | del .clusterBackup.repository | del .clusterBackup.password' ${HOSTFS}/etc/kubernetes/kubezero.yaml)" \
|
||||
--from-literal network="$(yq e .network ${HOSTFS}/etc/kubernetes/kubezero.yaml)"
|
||||
--from-literal values.yaml="$(yq e 'del .addons.clusterBackup.repository | del .addons.clusterBackup.password | \
|
||||
.addons.clusterBackup.image.tag =strenv(KUBE_VERSION) | \
|
||||
.network.cilium.enabled = true | .network.multus.defaultNetworks = ["cilium"] | \
|
||||
.network.cilium.cluster.name = strenv(CLUSTERNAME) | \
|
||||
.global.clusterName = strenv(CLUSTERNAME) | \
|
||||
.global.highAvailable = strenv(HIGHAVAILABLE) | \
|
||||
.global.aws.region = strenv(REGION)' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)"
|
||||
|
||||
# Create kubeadm-values CM if not available
|
||||
kubectl get cm -n kube-system kubeadm-values || \
|
||||
kubectl create configmap -n kube-system kubeadm-values
|
||||
|
||||
#####################
|
||||
|
||||
@ -254,10 +277,10 @@ control_plane_node() {
|
||||
export ETCD_INITIAL_CLUSTER=$(echo ${_cluster%%,} | sed -e 's/ //g')
|
||||
fi
|
||||
|
||||
# Patch kubezero.yaml and re-render to get etcd manifest patched
|
||||
# Patch kubeadm-values.yaml and re-render to get etcd manifest patched
|
||||
yq eval -i '.etcd.state = "existing"
|
||||
| .etcd.initialCluster = strenv(ETCD_INITIAL_CLUSTER)
|
||||
' ${HOSTFS}/etc/kubernetes/kubezero.yaml
|
||||
' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
|
||||
render_kubeadm
|
||||
fi
|
||||
|
||||
@ -318,30 +341,33 @@ control_plane_node() {
|
||||
|
||||
|
||||
apply_module() {
|
||||
MODULE=$1
|
||||
MODULES=$1
|
||||
|
||||
# network
|
||||
kubectl get configmap -n kube-system kubezero-values -o custom-columns=NAME:".data.$MODULE" --no-headers=true > _values.yaml
|
||||
kubectl get configmap -n kube-system kubezero-values -o yaml | yq '.data."values.yaml"' > $WORKDIR/_values.yaml
|
||||
|
||||
helm template $CHARTS/kubezero-$MODULE --namespace kube-system --name-template $MODULE --skip-crds --set installCRDs=false -f _values.yaml --kube-version $KUBE_VERSION > helm-no-crds.yaml
|
||||
helm template $CHARTS/kubezero-$MODULE --namespace kube-system --name-template $MODULE --include-crds --set installCRDs=true -f _values.yaml --kube-version $KUBE_VERSION > helm-crds.yaml
|
||||
diff -e helm-no-crds.yaml helm-crds.yaml | head -n-1 | tail -n+2 > crds.yaml
|
||||
# Always use embedded kubezero chart
|
||||
helm template $CHARTS/kubezero -f $WORKDIR/_values.yaml --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
|
||||
|
||||
# Only apply if there are actually any crds
|
||||
if [ -s crds.yaml ]; then
|
||||
kubectl apply -f crds.yaml --server-side $LOG
|
||||
fi
|
||||
# Resolve all the all enabled modules
|
||||
|
||||
helm template $CHARTS/kubezero-$MODULE --namespace kube-system --include-crds --name-template $MODULE \
|
||||
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
|
||||
[ -z "$MODULES" ] && MODULES="$(ls ${WORKDIR}/kubezero/templates | sed -e 's/.yaml//g')"
|
||||
|
||||
echo "Applied KubeZero module: $MODULE"
|
||||
# CRDs first
|
||||
for t in $MODULES; do
|
||||
_helm crds $t
|
||||
done
|
||||
|
||||
for t in $MODULES; do
|
||||
_helm apply $t
|
||||
done
|
||||
|
||||
echo "Applied KubeZero modules: $MODULES"
|
||||
}
|
||||
|
||||
|
||||
# backup etcd + /etc/kubernetes/pki
|
||||
backup() {
|
||||
# Display all ENVs, careful this exposes the password !
|
||||
# Display all ENVs, careful this exposes the password !
|
||||
[ -n "$DEBUG" ] && env
|
||||
|
||||
restic snapshots || restic init || exit 1
|
||||
@ -380,10 +406,10 @@ debug_shell() {
|
||||
|
||||
printf "For manual etcdctl commands use:\n # export ETCDCTL_ENDPOINTS=$ETCD_NODENAME:2379\n"
|
||||
|
||||
/bin/sh
|
||||
/bin/bash
|
||||
}
|
||||
|
||||
# First parse kubezero.yaml
|
||||
# First parse kubeadm-values.yaml
|
||||
parse_kubezero
|
||||
|
||||
# Execute tasks
|
||||
|
110
admin/libhelm.sh
Executable file
110
admin/libhelm.sh
Executable file
@ -0,0 +1,110 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Simulate well-known CRDs being available
|
||||
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1"
|
||||
|
||||
# Waits for max 300s and retries
|
||||
function wait_for() {
|
||||
local TRIES=0
|
||||
while true; do
|
||||
eval " $@" && break
|
||||
[ $TRIES -eq 100 ] && return 1
|
||||
let TRIES=$TRIES+1
|
||||
sleep 3
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function chart_location() {
|
||||
echo "$1 --repo https://cdn.zero-downtime.net/charts"
|
||||
}
|
||||
|
||||
|
||||
# make sure namespace exists prior to calling helm as the create-namespace options doesn't work
|
||||
function create_ns() {
|
||||
local namespace=$1
|
||||
if [ "$namespace" != "kube-system" ]; then
|
||||
kubectl get ns $namespace || kubectl create ns $namespace
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# delete non kube-system ns
|
||||
function delete_ns() {
|
||||
local namespace=$1
|
||||
[ "$namespace" != "kube-system" ] && kubectl delete ns $namespace
|
||||
}
|
||||
|
||||
|
||||
# Extract crds via helm calls and apply delta=crds only
|
||||
function _crds() {
|
||||
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds --set ${module}.installCRDs=false -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION > $WORKDIR/helm-no-crds.yaml
|
||||
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds --set ${module}.installCRDs=true -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION > $WORKDIR/helm-crds.yaml
|
||||
diff -e $WORKDIR/helm-no-crds.yaml $WORKDIR/helm-crds.yaml | head -n-1 | tail -n+2 > $WORKDIR/crds.yaml
|
||||
|
||||
# Only apply if there are actually any crds
|
||||
if [ -s $WORKDIR/crds.yaml ]; then
|
||||
kubectl apply -f $WORKDIR/crds.yaml --server-side
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# helm template | kubectl apply -f -
|
||||
# confine to one namespace if possible
|
||||
function apply() {
|
||||
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
|
||||
| python3 -c '
|
||||
#!/usr/bin/python3
|
||||
import yaml
|
||||
import sys
|
||||
|
||||
for manifest in yaml.safe_load_all(sys.stdin):
|
||||
if manifest:
|
||||
if "metadata" in manifest and "namespace" not in manifest["metadata"]:
|
||||
manifest["metadata"]["namespace"] = sys.argv[1]
|
||||
print("---")
|
||||
print(yaml.dump(manifest))' $namespace > $WORKDIR/helm.yaml
|
||||
|
||||
kubectl $action -f $WORKDIR/helm.yaml && rc=$? || rc=$?
|
||||
}
|
||||
|
||||
|
||||
function _helm() {
|
||||
local action=$1
|
||||
local module=$2
|
||||
|
||||
local chart="$(yq eval '.spec.source.chart' $WORKDIR/kubezero/templates/${module}.yaml)"
|
||||
local namespace="$(yq eval '.spec.destination.namespace' $WORKDIR/kubezero/templates/${module}.yaml)"
|
||||
|
||||
targetRevision=""
|
||||
_version="$(yq eval '.spec.source.targetRevision' $WORKDIR/kubezero/templates/${module}.yaml)"
|
||||
|
||||
[ -n "$_version" ] && targetRevision="--version $_version"
|
||||
|
||||
yq eval '.spec.source.helm.values' $WORKDIR/kubezero/templates/${module}.yaml > $WORKDIR/values.yaml
|
||||
|
||||
if [ $action == "crds" ]; then
|
||||
# Allow custom CRD handling
|
||||
declare -F ${module}-crds && ${module}-crds || _crds
|
||||
|
||||
elif [ $action == "apply" ]; then
|
||||
# namespace must exist prior to apply
|
||||
create_ns $namespace
|
||||
|
||||
# Optional pre hook
|
||||
declare -F ${module}-pre && ${module}-pre
|
||||
|
||||
apply
|
||||
|
||||
# Optional post hook
|
||||
declare -F ${module}-post && ${module}-post
|
||||
|
||||
elif [ $action == "delete" ]; then
|
||||
apply
|
||||
|
||||
# Delete dedicated namespace if not kube-system
|
||||
[ -n "$DELETE_NS" ] && delete_ns $namespace
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
@ -11,11 +11,34 @@ yaml.explicit_start = True
|
||||
yaml.indent(mapping=2, sequence=4, offset=2)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="Update Route53 entries")
|
||||
def rec_sort(d):
|
||||
if isinstance(d, dict):
|
||||
res = dict()
|
||||
|
||||
# Always have "enabled" first if present
|
||||
if "enabled" in d.keys():
|
||||
res["enabled"] = rec_sort(d["enabled"])
|
||||
d.pop("enabled")
|
||||
|
||||
# next is "name" if present
|
||||
if "name" in d.keys():
|
||||
res["name"] = rec_sort(d["name"])
|
||||
d.pop("name")
|
||||
|
||||
for k in sorted(d.keys()):
|
||||
res[k] = rec_sort(d[k])
|
||||
return res
|
||||
if isinstance(d, list):
|
||||
for idx, elem in enumerate(d):
|
||||
d[idx] = rec_sort(elem)
|
||||
return d
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="Migrate ArgoCD Kubezero values to new cluster config")
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
dest="version",
|
||||
default="1.22.8-10",
|
||||
default="1.23.10",
|
||||
action="store",
|
||||
required=False,
|
||||
help="Update KubeZero version",
|
||||
@ -34,62 +57,25 @@ values = yaml.load(application["spec"]["source"]["helm"]["values"])
|
||||
|
||||
### Do your thing
|
||||
|
||||
# New Istio Gateway charts
|
||||
if "private" in values["istio-ingress"]:
|
||||
values["istio-private-ingress"] = {
|
||||
"enabled": True,
|
||||
"certificates": values["istio-ingress"]["private"]["certificates"].copy()
|
||||
}
|
||||
# migrate ClusterName to clusterName
|
||||
if "ClusterName" in values:
|
||||
values["clusterName"] = values["ClusterName"]
|
||||
values.pop("ClusterName")
|
||||
|
||||
if "gateway" in values["istio-ingress"]["private"]:
|
||||
values["istio-private-ingress"]["gateway"] = {}
|
||||
|
||||
try:
|
||||
values["istio-private-ingress"]["gateway"]["replicaCount"] = values["istio-ingress"]["private"]["gateway"]["replicaCount"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if "ports" in values["istio-ingress"]["private"]["gateway"]:
|
||||
values["istio-private-ingress"]["gateway"]["service"] = {}
|
||||
values["istio-private-ingress"]["gateway"]["service"]["ports"] = []
|
||||
for port in values["istio-ingress"]["private"]["gateway"]["ports"]:
|
||||
if port["name"] not in ["status-port", "http2", "https"]:
|
||||
values["istio-private-ingress"]["gateway"]["service"]["ports"].append(port)
|
||||
|
||||
values["istio-ingress"].pop("private")
|
||||
|
||||
if "public" in values["istio-ingress"]:
|
||||
values["istio-ingress"]["certificates"] = values["istio-ingress"]["public"]["certificates"].copy()
|
||||
|
||||
if "gateway" in values["istio-ingress"]["public"]:
|
||||
values["istio-ingress"]["gateway"] = {}
|
||||
|
||||
try:
|
||||
values["istio-ingress"]["gateway"]["replicaCount"] = values["istio-ingress"]["public"]["gateway"]["replicaCount"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if "ports" in values["istio-ingress"]["public"]["gateway"]:
|
||||
values["istio-ingress"]["gateway"]["service"] = {}
|
||||
values["istio-ingress"]["gateway"]["service"]["ports"] = []
|
||||
for port in values["istio-ingress"]["public"]["gateway"]["ports"]:
|
||||
if port["name"] not in ["status-port", "http2", "https"]:
|
||||
values["istio-ingress"]["gateway"]["service"]["ports"].append(port)
|
||||
|
||||
values["istio-ingress"].pop("public")
|
||||
|
||||
if "global" in values["istio-ingress"]:
|
||||
values["istio-ingress"].pop("global")
|
||||
|
||||
# Remove Kiam
|
||||
if "kiam" in values:
|
||||
values.pop("kiam")
|
||||
# Create new clusterwide cloudprovider data if possible
|
||||
try:
|
||||
if values["cert-manager"]["clusterIssuer"]["solvers"][0]["dns01"]["route53"]["regions"]:
|
||||
if "aws" not in values:
|
||||
values["aws"] = {}
|
||||
values["aws"]["region"] = values["cert-manager"]["clusterIssuer"]["solvers"][0]["dns01"]["route53"]["region"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
### End
|
||||
|
||||
# Merge new values
|
||||
buffer = io.StringIO()
|
||||
yaml.dump(values, buffer)
|
||||
yaml.dump(rec_sort(values), buffer)
|
||||
application["spec"]["source"]["helm"]["values"] = buffer.getvalue()
|
||||
|
||||
# Dump final yaml
|
||||
|
@ -1,13 +1,16 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
VERSION="v1.23"
|
||||
VERSION="v1.23.10-1"
|
||||
|
||||
[ -n "$DEBUG" ] && set -x
|
||||
|
||||
# unset any AWS_DEFAULT_PROFILE as it will break aws-iam-auth
|
||||
unset AWS_DEFAULT_PROFILE
|
||||
|
||||
controller_nodes_upgrade() {
|
||||
|
||||
all_nodes_upgrade() {
|
||||
CMD="$1"
|
||||
|
||||
echo "Deploying node upgrade daemonSet..."
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: apps/v1
|
||||
@ -34,10 +37,13 @@ spec:
|
||||
- name: kubezero-upgrade-${VERSION//.}
|
||||
image: busybox
|
||||
command: ["/bin/sh"]
|
||||
args: ["-x", "-c", "[ -d /host/opt/cni/bin ] && { mkdir -p /host/usr/libexec/cni && cp /host/opt/cni/bin/* /host/usr/libexec/cni; } || true" ]
|
||||
args: ["-x", "-c", "$CMD" ]
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
securityContext:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
containers:
|
||||
- name: kubezero-upgrade-${VERSION//.}-wait
|
||||
image: busybox
|
||||
@ -49,13 +55,16 @@ spec:
|
||||
type: Directory
|
||||
EOF
|
||||
|
||||
#kubectl rollout status daemonset -n kube-system kubezero-upgrade-${VERSION//.} --timeout 300s
|
||||
kubectl rollout status daemonset -n kube-system kubezero-upgrade-${VERSION//.} --timeout 300s
|
||||
kubectl delete ds kubezero-upgrade-${VERSION//.} -n kube-system
|
||||
}
|
||||
|
||||
echo "Deploying cluster upgrade job ..."
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
control_plane_upgrade() {
|
||||
TASKS="$1"
|
||||
|
||||
echo "Deploying cluster upgrade job ..."
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
@ -72,10 +81,7 @@ spec:
|
||||
image: public.ecr.aws/zero-downtime/kubezero-admin:${VERSION}
|
||||
imagePullPolicy: Always
|
||||
command: ["kubezero.sh"]
|
||||
args:
|
||||
- cluster_upgrade
|
||||
- apply_network
|
||||
- apply_addons
|
||||
args: [$TASKS]
|
||||
env:
|
||||
- name: DEBUG
|
||||
value: "$DEBUG"
|
||||
@ -106,9 +112,27 @@ spec:
|
||||
restartPolicy: Never
|
||||
EOF
|
||||
|
||||
kubectl wait pod kubezero-upgrade-${VERSION//.} -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
|
||||
while true; do
|
||||
kubectl logs kubezero-upgrade-${VERSION//.} -n kube-system -f 2>/dev/null && break
|
||||
sleep 3
|
||||
done
|
||||
kubectl delete pod kubezero-upgrade-${VERSION//.} -n kube-system
|
||||
kubectl wait pod kubezero-upgrade-${VERSION//.} -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
|
||||
while true; do
|
||||
kubectl logs kubezero-upgrade-${VERSION//.} -n kube-system -f 2>/dev/null && break
|
||||
sleep 3
|
||||
done
|
||||
kubectl delete pod kubezero-upgrade-${VERSION//.} -n kube-system
|
||||
}
|
||||
|
||||
all_nodes_upgrade "mount --make-shared /host/sys/fs/cgroup; mount --make-shared /host/sys;"
|
||||
|
||||
control_plane_upgrade cluster_upgrade
|
||||
|
||||
echo "Adjust kubezero-values CM !!"
|
||||
read
|
||||
|
||||
#kubectl delete ds kube-multus-ds -n kube-system
|
||||
|
||||
control_plane_upgrade "apply_network, apply_addons"
|
||||
exit 0
|
||||
|
||||
kubectl rollout restart daemonset/calico-node -n kube-system
|
||||
kubectl rollout restart daemonset/cilium -n kube-system
|
||||
|
||||
kubectl rollout restart daemonset/kube-multus-ds -n kube-system
|
||||
|
@ -30,12 +30,12 @@ Kubernetes: `>= 1.20.0`
|
||||
| api.listenPort | int | `6443` | |
|
||||
| api.oidcEndpoint | string | `""` | s3://${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |
|
||||
| api.serviceAccountIssuer | string | `""` | https://s3.${REGION}.amazonaws.com/${CFN[ConfigBucket]}/k8s/$CLUSTERNAME |
|
||||
| clusterName | string | `"pleasechangeme"` | |
|
||||
| domain | string | `"changeme.org"` | |
|
||||
| etcd.extraArgs | object | `{}` | |
|
||||
| etcd.nodeName | string | `"etcd"` | |
|
||||
| etcd.state | string | `"new"` | |
|
||||
| highAvailable | bool | `false` | |
|
||||
| global.clusterName | string | `"pleasechangeme"` | |
|
||||
| global.highAvailable | bool | `false` | |
|
||||
| listenAddress | string | `"0.0.0.0"` | Needs to be set to primary node IP |
|
||||
| nodeName | string | `"kubezero-node"` | set to $HOSTNAME |
|
||||
| protectKernelDefaults | bool | `false` | |
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: ClusterConfiguration
|
||||
kubernetesVersion: {{ .Chart.Version }}
|
||||
clusterName: {{ .Values.clusterName }}
|
||||
clusterName: {{ .Values.global.clusterName }}
|
||||
featureGates:
|
||||
UnversionedKubeletConfigMap: true
|
||||
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
||||
@ -13,12 +13,12 @@ etcd:
|
||||
extraArgs:
|
||||
### DNS discovery
|
||||
#discovery-srv: {{ .Values.domain }}
|
||||
#discovery-srv-name: {{ .Values.clusterName }}
|
||||
#discovery-srv-name: {{ .Values.global.clusterName }}
|
||||
advertise-client-urls: https://{{ .Values.etcd.nodeName }}:2379
|
||||
initial-advertise-peer-urls: https://{{ .Values.etcd.nodeName }}:2380
|
||||
initial-cluster: {{ include "kubeadm.etcd.initialCluster" .Values.etcd | quote }}
|
||||
initial-cluster-state: {{ .Values.etcd.state }}
|
||||
initial-cluster-token: etcd-{{ .Values.clusterName }}
|
||||
initial-cluster-token: etcd-{{ .Values.global.clusterName }}
|
||||
name: {{ .Values.etcd.nodeName }}
|
||||
listen-peer-urls: https://{{ .Values.listenAddress }}:2380
|
||||
listen-client-urls: https://{{ .Values.listenAddress }}:2379
|
||||
@ -40,13 +40,13 @@ controllerManager:
|
||||
extraArgs:
|
||||
profiling: "false"
|
||||
terminated-pod-gc-threshold: "300"
|
||||
leader-elect: {{ .Values.highAvailable | quote }}
|
||||
leader-elect: {{ .Values.global.highAvailable | quote }}
|
||||
logging-format: json
|
||||
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
|
||||
scheduler:
|
||||
extraArgs:
|
||||
profiling: "false"
|
||||
leader-elect: {{ .Values.highAvailable | quote }}
|
||||
leader-elect: {{ .Values.global.highAvailable | quote }}
|
||||
logging-format: json
|
||||
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
|
||||
apiServer:
|
||||
@ -73,7 +73,7 @@ apiServer:
|
||||
{{- end }}
|
||||
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
|
||||
enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit
|
||||
# {{- if .Values.highAvailable }}
|
||||
# {{- if .Values.global.highAvailable }}
|
||||
# goaway-chance: ".001"
|
||||
# {{- end }}
|
||||
logging-format: json
|
||||
|
@ -4,13 +4,13 @@ kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://{{ .Values.api.endpoint }}
|
||||
name: {{ .Values.clusterName }}
|
||||
name: {{ .Values.global.clusterName }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: {{ .Values.clusterName }}
|
||||
cluster: {{ .Values.global.clusterName }}
|
||||
user: kubernetes-admin
|
||||
name: kubernetes-admin@{{ .Values.clusterName }}
|
||||
current-context: kubernetes-admin@{{ .Values.clusterName }}
|
||||
name: kubernetes-admin@{{ .Values.global.clusterName }}
|
||||
current-context: kubernetes-admin@{{ .Values.global.clusterName }}
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubernetes-admin
|
||||
@ -21,7 +21,7 @@ users:
|
||||
args:
|
||||
- "token"
|
||||
- "-i"
|
||||
- "{{ .Values.clusterName }}"
|
||||
- "{{ .Values.global.clusterName }}"
|
||||
- "-r"
|
||||
- "{{ .Values.api.awsIamAuth.kubeAdminRole }}"
|
||||
{{- end }}
|
||||
|
@ -1,5 +1,5 @@
|
||||
spec:
|
||||
replicas: {{ ternary 3 1 .Values.highAvailable }}
|
||||
replicas: {{ ternary 3 1 .Values.global.highAvailable }}
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
|
@ -75,7 +75,7 @@ metadata:
|
||||
k8s-app: aws-iam-authenticator
|
||||
data:
|
||||
config.yaml: |
|
||||
clusterID: {{ .Values.clusterName }}
|
||||
clusterID: {{ .Values.global.clusterName }}
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
|
@ -1,4 +1,7 @@
|
||||
clusterName: pleasechangeme
|
||||
global:
|
||||
clusterName: pleasechangeme
|
||||
highAvailable: false
|
||||
|
||||
# -- set to $HOSTNAME
|
||||
nodeName: kubezero-node
|
||||
domain: changeme.org
|
||||
@ -22,8 +25,6 @@ api:
|
||||
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
|
||||
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"
|
||||
|
||||
highAvailable: false
|
||||
|
||||
etcd:
|
||||
nodeName: etcd
|
||||
state: new
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-addons
|
||||
description: KubeZero umbrella chart for various optional cluster addons
|
||||
type: application
|
||||
version: 0.6.0
|
||||
version: 0.6.1
|
||||
appVersion: v1.23.10
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
@ -23,4 +23,8 @@ dependencies:
|
||||
version: 1.11.0
|
||||
repository: https://kubernetes-sigs.github.io/external-dns/
|
||||
condition: external-dns.enabled
|
||||
- name: cluster-autoscaler
|
||||
version: 9.21.0
|
||||
repository: https://kubernetes.github.io/autoscaler
|
||||
condition: cluster-autoscaler.enabled
|
||||
kubeVersion: ">= 1.20.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-addons
|
||||
|
||||
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.23.10](https://img.shields.io/badge/AppVersion-v1.23.10-informational?style=flat-square)
|
||||
![Version: 0.6.1](https://img.shields.io/badge/Version-0.6.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.23.10](https://img.shields.io/badge/AppVersion-v1.23.10-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for various optional cluster addons
|
||||
|
||||
@ -20,6 +20,7 @@ Kubernetes: `>= 1.20.0`
|
||||
|------------|------|---------|
|
||||
| | aws-node-termination-handler | 0.18.5 |
|
||||
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.11.0 |
|
||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.21.0 |
|
||||
|
||||
# MetalLB
|
||||
|
||||
@ -59,7 +60,17 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
||||
| aws-node-termination-handler.useProviderId | bool | `true` | |
|
||||
| awsNeuron.enabled | bool | `false` | |
|
||||
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
|
||||
| awsNeuron.image.tag | string | `"1.9.0.0"` | |
|
||||
| awsNeuron.image.tag | string | `"1.9.3.0"` | |
|
||||
| cluster-autoscaler.autoDiscovery.clusterName | string | `""` | |
|
||||
| cluster-autoscaler.awsRegion | string | `"us-west-2"` | |
|
||||
| cluster-autoscaler.enabled | bool | `false` | |
|
||||
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cluster-autoscaler.podDisruptionBudget | bool | `false` | |
|
||||
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | |
|
||||
| cluster-autoscaler.serviceMonitor.enabled | bool | `false` | |
|
||||
| cluster-autoscaler.serviceMonitor.interval | string | `"30s"` | |
|
||||
| cluster-autoscaler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| clusterBackup.enabled | bool | `false` | |
|
||||
| clusterBackup.extraEnv | list | `[]` | |
|
||||
| clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | |
|
||||
|
@ -81,7 +81,53 @@ awsNeuron:
|
||||
|
||||
image:
|
||||
name: public.ecr.aws/neuron/neuron-device-plugin
|
||||
tag: 1.9.0.0
|
||||
tag: 1.9.3.0
|
||||
|
||||
cluster-autoscaler:
|
||||
enabled: false
|
||||
|
||||
autoDiscovery:
|
||||
clusterName: ""
|
||||
awsRegion: "us-west-2"
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
interval: 30s
|
||||
|
||||
prometheusRule:
|
||||
enabled: false
|
||||
|
||||
# Disable pdb for now
|
||||
podDisruptionBudget: false
|
||||
|
||||
#securityContext:
|
||||
# runAsNonRoot: true
|
||||
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
# On AWS enable Projected Service Accounts to assume IAM role
|
||||
#extraEnv:
|
||||
# AWS_ROLE_ARN: <IamArn>
|
||||
# AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
||||
# AWS_STS_REGIONAL_ENDPOINTS: "regional"
|
||||
|
||||
#extraVolumes:
|
||||
#- name: aws-token
|
||||
# projected:
|
||||
# sources:
|
||||
# - serviceAccountToken:
|
||||
# path: token
|
||||
# expirationSeconds: 86400
|
||||
# audience: "sts.amazonaws.com"
|
||||
|
||||
#extraVolumeMounts:
|
||||
#- name: aws-token
|
||||
# mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
|
||||
# readOnly: true
|
||||
|
||||
external-dns:
|
||||
enabled: false
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-auth
|
||||
|
||||
![Version: 0.2.2](https://img.shields.io/badge/Version-0.2.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 18.0.1](https://img.shields.io/badge/AppVersion-18.0.1-informational?style=flat-square)
|
||||
![Version: 0.2.4](https://img.shields.io/badge/Version-0.2.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 19.0.1](https://img.shields.io/badge/AppVersion-19.0.1-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for all things Authentication and Identity management
|
||||
|
||||
@ -18,8 +18,8 @@ Kubernetes: `>= 1.20.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.4 |
|
||||
| https://charts.bitnami.com/bitnami | postgresql | 11.6.7 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
||||
| https://charts.bitnami.com/bitnami | postgresql | 11.8.1 |
|
||||
|
||||
# Keycloak
|
||||
|
||||
@ -42,6 +42,7 @@ https://github.com/keycloak/keycloak/tree/main/operator
|
||||
| keycloak.istio.url | string | `""` | |
|
||||
| keycloak.metrics.enabled | bool | `false` | |
|
||||
| postgresql.auth.database | string | `"keycloak"` | |
|
||||
| postgresql.auth.existingSecret | string | `"kubezero-auth-postgresql"` | |
|
||||
| postgresql.auth.username | string | `"keycloak"` | |
|
||||
| postgresql.enabled | bool | `false` | |
|
||||
| postgresql.primary.persistence.size | string | `"1Gi"` | |
|
||||
|
@ -88,7 +88,7 @@ Kubernetes: `>= 1.20.0`
|
||||
| fluent-bit.daemonSetVolumes[1].hostPath.path | string | `"/var/lib/containers/logs"` | |
|
||||
| fluent-bit.daemonSetVolumes[1].name | string | `"newlog"` | |
|
||||
| fluent-bit.enabled | bool | `false` | |
|
||||
| fluent-bit.image.tag | string | `"1.9.3"` | |
|
||||
| fluent-bit.image.tag | string | `"1.9.8"` | |
|
||||
| fluent-bit.luaScripts."kubezero.lua" | string | `"function nest_k8s_ns(tag, timestamp, record)\n if not record['kubernetes']['namespace_name'] then\n return 0, 0, 0\n end\n new_record = {}\n for key, val in pairs(record) do\n if key == 'kube' then\n new_record[key] = {}\n new_record[key][record['kubernetes']['namespace_name']] = record[key]\n else\n new_record[key] = record[key]\n end\n end\n return 1, timestamp, new_record\nend\n"` | |
|
||||
| fluent-bit.resources.limits.memory | string | `"64Mi"` | |
|
||||
| fluent-bit.resources.requests.cpu | string | `"20m"` | |
|
||||
|
@ -244,7 +244,7 @@ fluent-bit:
|
||||
|
||||
image:
|
||||
#repository: public.ecr.aws/zero-downtime/fluent-bit
|
||||
tag: 1.9.7
|
||||
tag: 1.9.8
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
|
@ -34,18 +34,21 @@ Kubernetes: `>= 1.20.0`
|
||||
| cilium.cluster.name | string | `"default"` | |
|
||||
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
|
||||
| cilium.cni.exclusive | bool | `false` | |
|
||||
| cilium.containerRuntime.integration | string | `"crio"` | |
|
||||
| cilium.enabled | bool | `false` | |
|
||||
| cilium.hostServices.enabled | bool | `true` | |
|
||||
| cilium.hubble.enabled | bool | `false` | |
|
||||
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.0.0.0/16"` | |
|
||||
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.1.0.0/16"` | |
|
||||
| cilium.l2NeighDiscovery.enabled | bool | `false` | |
|
||||
| cilium.l7Proxy | bool | `false` | |
|
||||
| cilium.nodePort.enabled | bool | `true` | |
|
||||
| cilium.nodePort.enabled | bool | `false` | |
|
||||
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cilium.operator.replicas | int | `1` | |
|
||||
| cilium.operator.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cilium.policyEnforcementMode | string | `"audit"` | |
|
||||
| cilium.prometheus.enabled | bool | `false` | |
|
||||
| cilium.prometheus.port | int | `9091` | |
|
||||
| cilium.securityContext.privileged | bool | `true` | |
|
||||
| cilium.tunnel | string | `"geneve"` | |
|
||||
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
|
@ -424,6 +424,8 @@ spec:
|
||||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: "autodetect"
|
||||
- name: IP_AUTODETECTION_METHOD
|
||||
value: "interface=eth.*"
|
||||
# Enable IPIP
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "Never"
|
||||
|
@ -1,4 +1,4 @@
|
||||
{{- if .Values.calico.enabled }}
|
||||
{{- if and .Values.multus.enabled .Values.calico.enabled }}
|
||||
apiVersion: k8s.cni.cncf.io/v1
|
||||
kind: NetworkAttachmentDefinition
|
||||
metadata:
|
||||
@ -11,7 +11,7 @@ spec:
|
||||
"log_level": "info",
|
||||
"log_file_path": "/var/log/calico/cni/cni.log",
|
||||
"datastore_type": "kubernetes",
|
||||
"mtu": 8941,
|
||||
"mtu": {{ .Values.calico.mtu }},
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
},
|
||||
|
@ -1,4 +1,4 @@
|
||||
{{- if .Values.cilium.enabled }}
|
||||
{{- if and .Values.multus.enabled .Values.cilium.enabled }}
|
||||
apiVersion: k8s.cni.cncf.io/v1
|
||||
kind: NetworkAttachmentDefinition
|
||||
metadata:
|
||||
|
@ -28,13 +28,30 @@ multus:
|
||||
cilium:
|
||||
enabled: false
|
||||
|
||||
containerRuntime:
|
||||
integration: crio
|
||||
|
||||
# Until we figured out AppArmore on Alpine and Gentoo
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
cni:
|
||||
binPath: "/usr/libexec/cni"
|
||||
#-- Ensure this is false if multus is enabled
|
||||
exclusive: false
|
||||
# chainingMode: generic-veth
|
||||
|
||||
# Until we switch to Cilium
|
||||
#bpf:
|
||||
# hostLegacyRouting: true
|
||||
# tproxy: false
|
||||
|
||||
# enableIPv4Masquerade: false
|
||||
# enableIdentityMark: false
|
||||
policyEnforcementMode: "audit"
|
||||
|
||||
cluster:
|
||||
# This should match the second octet + 1 of clusterPoolIPv4PodCIDRList,
|
||||
# This should match the second octet of clusterPoolIPv4PodCIDRList
|
||||
# to prevent IP space overlap and easy tracking
|
||||
id: 1
|
||||
name: default
|
||||
@ -42,17 +59,16 @@ cilium:
|
||||
ipam:
|
||||
operator:
|
||||
clusterPoolIPv4PodCIDRList:
|
||||
- 10.0.0.0/16
|
||||
- 10.1.0.0/16
|
||||
|
||||
hostServices:
|
||||
enabled: true
|
||||
|
||||
# Does this conflict with Calico in parallel ?
|
||||
# Should be handled by multus
|
||||
nodePort:
|
||||
enabled: true
|
||||
enabled: false
|
||||
|
||||
# Keep it simple for now
|
||||
l7Proxy: false
|
||||
l2NeighDiscovery:
|
||||
enabled: false
|
||||
|
||||
cgroup:
|
||||
autoMount:
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero
|
||||
description: KubeZero - Root App of Apps chart
|
||||
type: application
|
||||
version: 1.23.10
|
||||
version: 1.23.10-1
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero
|
||||
|
||||
![Version: 1.23.10](https://img.shields.io/badge/Version-1.23.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 1.23.10-1](https://img.shields.io/badge/Version-1.23.10--1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero - Root App of Apps chart
|
||||
|
||||
@ -25,8 +25,8 @@ Kubernetes: `>= 1.20.0`
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| HighAvailableControlplane | bool | `false` | |
|
||||
| addons.enabled | bool | `false` | |
|
||||
| addons.targetRevision | string | `"0.6.0"` | |
|
||||
| addons.enabled | bool | `true` | |
|
||||
| addons.targetRevision | string | `"0.6.1"` | |
|
||||
| argocd.enabled | bool | `false` | |
|
||||
| argocd.istio.enabled | bool | `false` | |
|
||||
| argocd.namespace | string | `"argocd"` | |
|
||||
@ -59,7 +59,7 @@ Kubernetes: `>= 1.20.0`
|
||||
| metrics.istio.prometheus | object | `{}` | |
|
||||
| metrics.namespace | string | `"monitoring"` | |
|
||||
| metrics.targetRevision | string | `"0.8.1"` | |
|
||||
| network.enabled | bool | `false` | |
|
||||
| network.enabled | bool | `true` | |
|
||||
| network.retain | bool | `true` | |
|
||||
| network.targetRevision | string | `"0.3.2"` | |
|
||||
| storage.aws-ebs-csi-driver.enabled | bool | `false` | |
|
||||
|
@ -13,7 +13,9 @@ forseti:
|
||||
{{- with index .Values "addons" "aws-node-termination-handler" }}
|
||||
aws-node-termination-handler:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
enablePrometheusServer: {{ .Values.metrics.enabled }}
|
||||
{{- with $.Values.metrics }}
|
||||
enablePrometheusServer: {{ .enabled }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- with .Values.addons.fuseDevicePlugin }}
|
||||
@ -31,6 +33,44 @@ external-dns:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{- with index .Values "addons" "cluster-autoscaler" }}
|
||||
cluster-autoscaler:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
autoDiscovery:
|
||||
clusterName: {{ $.Values.global.clusterName }}
|
||||
|
||||
{{- with $.Values.global.aws }}
|
||||
awsRegion: {{ .region }}
|
||||
{{- end }}
|
||||
|
||||
{{- with $.Values.metrics }}
|
||||
serviceMonitor:
|
||||
enabled: {{ .enabled }}
|
||||
prometheusRule:
|
||||
enabled: {{ .enabled }}
|
||||
{{- end }}
|
||||
|
||||
{{- with .IamArn }}
|
||||
extraEnv:
|
||||
AWS_ROLE_ARN: "{{ . }}"
|
||||
AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
||||
AWS_STS_REGIONAL_ENDPOINTS: "regional"
|
||||
extraVolumes:
|
||||
- name: aws-token
|
||||
projected:
|
||||
sources:
|
||||
- serviceAccountToken:
|
||||
path: token
|
||||
expirationSeconds: 86400
|
||||
audience: "sts.amazonaws.com"
|
||||
extraVolumeMounts:
|
||||
- name: aws-token
|
||||
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
||||
|
||||
{{- define "addons-argo" }}
|
||||
|
@ -30,17 +30,8 @@ rateLimiting:
|
||||
- group: admissionregistration.k8s.io
|
||||
kind: ValidatingWebhookConfiguration
|
||||
jsonPointers:
|
||||
- /webhooks/0/clientConfig/caBundle
|
||||
- /webhooks/0/failurePolicy
|
||||
- /webhooks/1/clientConfig/caBundle
|
||||
- /webhooks/1/failurePolicy
|
||||
- group: admissionregistration.k8s.io
|
||||
kind: MutatingWebhookConfiguration
|
||||
jsonPointers:
|
||||
- /webhooks/0/clientConfig/caBundle
|
||||
- /webhooks/1/clientConfig/caBundle
|
||||
- /webhooks/2/clientConfig/caBundle
|
||||
- /webhooks/3/clientConfig/caBundle
|
||||
|
||||
{{- end }}
|
||||
|
||||
|
@ -16,7 +16,7 @@ aws-ebs-csi-driver:
|
||||
enabled: {{ default false (index .Values "storage" "aws-ebs-csi-driver" "enabled")}}
|
||||
controller:
|
||||
replicaCount: {{ ternary 2 1 .Values.HighAvailableControlplane }}
|
||||
k8sTagClusterId: {{ .Values.ClusterName }}
|
||||
k8sTagClusterId: {{ .Values.global.clusterName }}
|
||||
env:
|
||||
- name: AWS_ROLE_ARN
|
||||
value: {{ index .Values "storage" "aws-ebs-csi-driver" "IamArn" | quote }}
|
||||
|
@ -7,11 +7,11 @@ kubezero:
|
||||
HighAvailableControlplane: false
|
||||
|
||||
addons:
|
||||
enabled: false
|
||||
targetRevision: 0.6.0
|
||||
enabled: true
|
||||
targetRevision: 0.6.1
|
||||
|
||||
network:
|
||||
enabled: false
|
||||
enabled: true
|
||||
retain: true
|
||||
targetRevision: 0.3.2
|
||||
|
||||
|
@ -50,4 +50,8 @@ function publish_chart() {
|
||||
|
||||
|
||||
publish_chart
|
||||
|
||||
CF_DIST=E1YFUJXMCXT2RN
|
||||
aws cloudfront create-invalidation --distribution $CF_DIST --paths "/charts/*"
|
||||
|
||||
#reset_index
|
||||
|
Loading…
Reference in New Issue
Block a user