feat: reorg cluster upgrade logic, migrate aws-iam-authenticator to system service, basic network and addons for 1.31
This commit is contained in:
parent
172f54ce54
commit
da32f87d3c
@ -3,7 +3,7 @@ ARG ALPINE_VERSION=3.21
|
|||||||
FROM docker.io/alpine:${ALPINE_VERSION}
|
FROM docker.io/alpine:${ALPINE_VERSION}
|
||||||
|
|
||||||
ARG ALPINE_VERSION
|
ARG ALPINE_VERSION
|
||||||
ARG KUBE_VERSION=1.31.4
|
ARG KUBE_VERSION=1.31
|
||||||
|
|
||||||
ARG SOPS_VERSION="3.9.1"
|
ARG SOPS_VERSION="3.9.1"
|
||||||
ARG VALS_VERSION="0.37.6"
|
ARG VALS_VERSION="0.37.6"
|
||||||
@ -41,8 +41,8 @@ RUN mkdir -p $(helm env HELM_PLUGINS) && \
|
|||||||
# vals
|
# vals
|
||||||
RUN wget -qO - https://github.com/helmfile/vals/releases/download/v${VALS_VERSION}/vals_${VALS_VERSION}_linux_amd64.tar.gz | tar -C /usr/local/bin -xzf- vals
|
RUN wget -qO - https://github.com/helmfile/vals/releases/download/v${VALS_VERSION}/vals_${VALS_VERSION}_linux_amd64.tar.gz | tar -C /usr/local/bin -xzf- vals
|
||||||
|
|
||||||
ADD admin/kubezero.sh admin/libhelm.sh admin/migrate_argo_values.py /usr/bin
|
ADD admin/kubezero.sh admin/migrate_argo_values.py /usr/bin
|
||||||
ADD admin/libhelm.sh /var/lib/kubezero
|
ADD admin/libhelm.sh admin/hooks-$KUBE_VERSION.sh /var/lib/kubezero
|
||||||
|
|
||||||
ADD charts/kubeadm /charts/kubeadm
|
ADD charts/kubeadm /charts/kubeadm
|
||||||
ADD charts/kubezero /charts/kubezero
|
ADD charts/kubezero /charts/kubezero
|
||||||
|
43
admin/hooks-1.31.sh
Normal file
43
admin/hooks-1.31.sh
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
### v1.31
|
||||||
|
|
||||||
|
# All things BEFORE the first controller / control plane upgrade
|
||||||
|
pre_control_plane_upgrade_cluster() {
|
||||||
|
# add kubezero version label to existing controller nodes for aws-iam migration
|
||||||
|
for n in $(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" | grep v1.30 | awk {'print $1}'); do
|
||||||
|
kubectl label node $n 'node.kubernetes.io/kubezero.version=v1.30.6' || true
|
||||||
|
done
|
||||||
|
|
||||||
|
# patch aws-iam-authentiator DS to NOT run pods on 1.31 controllers
|
||||||
|
kubectl patch ds aws-iam-authentiator -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.30.6"}}}}}' || true
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# All things after the first controller / control plane upgrade
|
||||||
|
post_control_plane_upgrade_cluster() {
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# All things AFTER all contollers are on the new version
|
||||||
|
pre_cluster_upgrade_final() {
|
||||||
|
|
||||||
|
if [ "$PLATFORM" == "aws" ];then
|
||||||
|
# cleanup aws-iam-authentiator
|
||||||
|
kubectl delete clusterrolebinding aws-iam-authentiator || true
|
||||||
|
kubectl delete clusterrole aws-iam-authentiator || true
|
||||||
|
kubectl delete serviceaccount aws-iam-authentiator -n kube-system || true
|
||||||
|
kubectl delete cm aws-iam-authentiator -n kube-system || true
|
||||||
|
kubectl delete ds aws-iam-authentiator -n kube-system || true
|
||||||
|
kubectl delete IAMIdentityMapping kubezero-worker-nodes || true
|
||||||
|
kubectl delete IAMIdentityMapping kubernetes-admin || true
|
||||||
|
kubectl delete crd iamidentitymappings.iamauthenticator.k8s.aws || true
|
||||||
|
|
||||||
|
kubectl delete secret aws-iam-certs -n kube-system || true
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Last call
|
||||||
|
post_cluster_upgrade_final() {
|
||||||
|
echo
|
||||||
|
}
|
@ -29,6 +29,9 @@ export ETCDCTL_KEY=${HOSTFS}/etc/kubernetes/pki/apiserver-etcd-client.key
|
|||||||
|
|
||||||
mkdir -p ${WORKDIR}
|
mkdir -p ${WORKDIR}
|
||||||
|
|
||||||
|
# Import version specific hooks
|
||||||
|
. /var/lib/kubezero/hooks-${KUBE_VERSION_MINOR##v}.sh
|
||||||
|
|
||||||
# Generic retry utility
|
# Generic retry utility
|
||||||
retry() {
|
retry() {
|
||||||
local tries=$1
|
local tries=$1
|
||||||
@ -64,7 +67,9 @@ render_kubeadm() {
|
|||||||
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||||
done
|
done
|
||||||
|
|
||||||
if [[ "$phase" =~ ^(bootstrap|join|restore)$ ]]; then
|
if [[ "$phase" == "upgrade" ]]; then
|
||||||
|
cat ${WORKDIR}/kubeadm/templates/UpgradeConfiguration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||||
|
elif [[ "$phase" =~ ^(bootstrap|join|restore)$ ]]; then
|
||||||
cat ${WORKDIR}/kubeadm/templates/InitConfiguration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
cat ${WORKDIR}/kubeadm/templates/InitConfiguration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -83,7 +88,6 @@ parse_kubezero() {
|
|||||||
export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||||
export NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
export NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||||
export PROVIDER_ID=$(yq eval '.providerID // ""' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
export PROVIDER_ID=$(yq eval '.providerID // ""' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||||
export AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled // "false"' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -92,20 +96,6 @@ pre_kubeadm() {
|
|||||||
# update all apiserver addons first
|
# update all apiserver addons first
|
||||||
cp -r ${WORKDIR}/kubeadm/templates/apiserver ${HOSTFS}/etc/kubernetes
|
cp -r ${WORKDIR}/kubeadm/templates/apiserver ${HOSTFS}/etc/kubernetes
|
||||||
|
|
||||||
# aws-iam-authenticator enabled ?
|
|
||||||
if [ "$AWS_IAM_AUTH" == "true" ]; then
|
|
||||||
|
|
||||||
# Initialize webhook
|
|
||||||
if [ ! -f ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt ]; then
|
|
||||||
${HOSTFS}/usr/bin/aws-iam-authenticator init -i ${CLUSTERNAME}
|
|
||||||
mv key.pem ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.key
|
|
||||||
mv cert.pem ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Patch the aws-iam-authenticator config with the actual cert.pem
|
|
||||||
yq eval -Mi ".clusters[0].cluster.certificate-authority-data = \"$(cat ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt| base64 -w0)\"" ${HOSTFS}/etc/kubernetes/apiserver/aws-iam-authenticator.yaml
|
|
||||||
fi
|
|
||||||
|
|
||||||
# copy patches to host to make --rootfs of kubeadm work
|
# copy patches to host to make --rootfs of kubeadm work
|
||||||
cp -r ${WORKDIR}/kubeadm/templates/patches ${HOSTFS}/etc/kubernetes
|
cp -r ${WORKDIR}/kubeadm/templates/patches ${HOSTFS}/etc/kubernetes
|
||||||
}
|
}
|
||||||
@ -120,12 +110,14 @@ post_kubeadm() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
kubeadm_upgrade() {
|
# Control plane upgrade
|
||||||
# pre upgrade hook
|
control_plane_upgrade() {
|
||||||
|
CMD=$1
|
||||||
|
|
||||||
# get current values, argo app over cm
|
# get current values, argo app over cm
|
||||||
get_kubezero_values $ARGOCD
|
get_kubezero_values $ARGOCD
|
||||||
|
|
||||||
|
if [[ "$CMD" =~ ^(cluster)$ ]]; then
|
||||||
# tumble new config through migrate.py
|
# tumble new config through migrate.py
|
||||||
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
|
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
|
||||||
|
|
||||||
@ -151,29 +143,30 @@ kubeadm_upgrade() {
|
|||||||
|
|
||||||
pre_kubeadm
|
pre_kubeadm
|
||||||
|
|
||||||
# Upgrade - we upload the new config first so we can use --patch during 1.30
|
|
||||||
_kubeadm init phase upload-config kubeadm
|
_kubeadm init phase upload-config kubeadm
|
||||||
|
|
||||||
kubeadm upgrade apply --yes --patches /etc/kubernetes/patches $KUBE_VERSION --rootfs ${HOSTFS} $LOG
|
_kubeadm upgrade apply $KUBE_VERSION
|
||||||
|
|
||||||
post_kubeadm
|
post_kubeadm
|
||||||
|
|
||||||
# install re-certed kubectl config for root
|
# install re-certed kubectl config for root
|
||||||
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
|
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
|
||||||
|
|
||||||
# post upgrade
|
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
|
||||||
|
|
||||||
|
elif [[ "$CMD" =~ ^(final)$ ]]; then
|
||||||
|
render_kubeadm upgrade
|
||||||
|
|
||||||
|
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase
|
||||||
|
#_kubeadm upgrade apply phase addon all $KUBE_VERSION
|
||||||
|
_kubeadm upgrade apply $KUBE_VERSION
|
||||||
|
|
||||||
|
echo "Upgraded addons and applied final migrations"
|
||||||
|
fi
|
||||||
|
|
||||||
# Cleanup after kubeadm on the host
|
# Cleanup after kubeadm on the host
|
||||||
rm -rf ${HOSTFS}/etc/kubernetes/tmp
|
rm -rf ${HOSTFS}/etc/kubernetes/tmp
|
||||||
|
|
||||||
echo "Successfully upgraded kubeadm control plane."
|
|
||||||
|
|
||||||
# TODO
|
|
||||||
# Send Notification currently done via CloudBender -> SNS -> Slack
|
|
||||||
# Better deploy https://github.com/opsgenie/kubernetes-event-exporter and set proper routes and labels on this Job
|
|
||||||
|
|
||||||
# Removed:
|
|
||||||
# - update oidc do we need that ?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -200,6 +193,10 @@ control_plane_node() {
|
|||||||
# Put PKI in place
|
# Put PKI in place
|
||||||
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
|
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
|
||||||
|
|
||||||
|
### 1.31 only to clean up previous aws-iam-auth certs
|
||||||
|
rm -f ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.key ${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt
|
||||||
|
###
|
||||||
|
|
||||||
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
|
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
|
||||||
cp ${WORKDIR}/super-admin.conf ${HOSTFS}/root/.kube/config
|
cp ${WORKDIR}/super-admin.conf ${HOSTFS}/root/.kube/config
|
||||||
|
|
||||||
@ -220,7 +217,7 @@ control_plane_node() {
|
|||||||
rm -f ${HOSTFS}/etc/kubernetes/pki/etcd/peer.* ${HOSTFS}/etc/kubernetes/pki/etcd/server.* ${HOSTFS}/etc/kubernetes/pki/etcd/healthcheck-client.* \
|
rm -f ${HOSTFS}/etc/kubernetes/pki/etcd/peer.* ${HOSTFS}/etc/kubernetes/pki/etcd/server.* ${HOSTFS}/etc/kubernetes/pki/etcd/healthcheck-client.* \
|
||||||
${HOSTFS}/etc/kubernetes/pki/apiserver* ${HOSTFS}/etc/kubernetes/pki/front-proxy-client.*
|
${HOSTFS}/etc/kubernetes/pki/apiserver* ${HOSTFS}/etc/kubernetes/pki/front-proxy-client.*
|
||||||
|
|
||||||
# Issue all certs first, needed for eg. aws-iam-authenticator setup
|
# Issue all certs first
|
||||||
_kubeadm init phase certs all
|
_kubeadm init phase certs all
|
||||||
|
|
||||||
pre_kubeadm
|
pre_kubeadm
|
||||||
@ -286,6 +283,9 @@ control_plane_node() {
|
|||||||
-endpoint https://${ETCD_NODENAME}:2379 \
|
-endpoint https://${ETCD_NODENAME}:2379 \
|
||||||
change-provider-id ${NODENAME} $PROVIDER_ID
|
change-provider-id ${NODENAME} $PROVIDER_ID
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# update node label for single node control plane
|
||||||
|
kubectl label node $NODENAME "node.kubernetes.io/kubezero.version=$KUBE_VERSION" --overwrite=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
_kubeadm init phase upload-config all
|
_kubeadm init phase upload-config all
|
||||||
@ -305,17 +305,6 @@ control_plane_node() {
|
|||||||
_kubeadm init phase addon all
|
_kubeadm init phase addon all
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Ensure aws-iam-authenticator secret is in place
|
|
||||||
if [ "$AWS_IAM_AUTH" == "true" ]; then
|
|
||||||
kubectl get secrets -n kube-system aws-iam-certs || \
|
|
||||||
kubectl create secret generic aws-iam-certs -n kube-system \
|
|
||||||
--from-file=key.pem=${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.key \
|
|
||||||
--from-file=cert.pem=${HOSTFS}/etc/kubernetes/pki/aws-iam-authenticator.crt
|
|
||||||
|
|
||||||
# Store aws-iam-auth admin on SSM
|
|
||||||
yq eval -M ".clusters[0].cluster.certificate-authority-data = \"$(cat ${HOSTFS}/etc/kubernetes/pki/ca.crt | base64 -w0)\"" ${WORKDIR}/kubeadm/templates/admin-aws-iam.yaml > ${HOSTFS}/etc/kubernetes/admin-aws-iam.yaml
|
|
||||||
fi
|
|
||||||
|
|
||||||
post_kubeadm
|
post_kubeadm
|
||||||
|
|
||||||
echo "${CMD}ed cluster $CLUSTERNAME successfully."
|
echo "${CMD}ed cluster $CLUSTERNAME successfully."
|
||||||
@ -413,7 +402,17 @@ for t in $@; do
|
|||||||
restore) control_plane_node restore;;
|
restore) control_plane_node restore;;
|
||||||
kubeadm_upgrade)
|
kubeadm_upgrade)
|
||||||
ARGOCD=$(argo_used)
|
ARGOCD=$(argo_used)
|
||||||
kubeadm_upgrade;;
|
# call hooks
|
||||||
|
pre_control_plane_upgrade_cluster
|
||||||
|
control_plane_upgrade cluster
|
||||||
|
post_control_plane_upgrade_cluster
|
||||||
|
;;
|
||||||
|
finalize_cluster_upgrade)
|
||||||
|
ARGOCD=$(argo_used)
|
||||||
|
pre_cluster_upgrade_final
|
||||||
|
control_plane_upgrade final
|
||||||
|
post_cluster_upgrade_final
|
||||||
|
;;
|
||||||
apply_*)
|
apply_*)
|
||||||
ARGOCD=$(argo_used)
|
ARGOCD=$(argo_used)
|
||||||
apply_module "${t##apply_}";;
|
apply_module "${t##apply_}";;
|
||||||
|
@ -148,7 +148,7 @@ for manifest in yaml.safe_load_all(sys.stdin):
|
|||||||
# helm template | kubectl apply -f -
|
# helm template | kubectl apply -f -
|
||||||
# confine to one namespace if possible
|
# confine to one namespace if possible
|
||||||
function render() {
|
function render() {
|
||||||
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
|
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-tests --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
|
||||||
| python3 -c '
|
| python3 -c '
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
import yaml
|
import yaml
|
||||||
|
@ -19,22 +19,26 @@ echo "Checking that all pods in kube-system are running ..."
|
|||||||
|
|
||||||
[ "$ARGOCD" == "True" ] && disable_argo
|
[ "$ARGOCD" == "True" ] && disable_argo
|
||||||
|
|
||||||
control_plane_upgrade kubeadm_upgrade
|
# Check if we already have all controllers on the current version
|
||||||
|
OLD_CONTROLLERS=$(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" --no-headers=true | grep -cv $KUBE_VERSION || true)
|
||||||
|
|
||||||
echo "Control plane upgraded, <Return> to continue"
|
# All controllers already on current version
|
||||||
read -r
|
if [ "$OLD_CONTROLLERS" == "0" ]; then
|
||||||
|
control_plane_upgrade finalize_cluster_upgrade
|
||||||
|
exit
|
||||||
|
|
||||||
|
# Otherwise run control plane upgrade
|
||||||
|
else
|
||||||
|
control_plane_upgrade kubeadm_upgrade
|
||||||
|
echo "<Return> to continue"
|
||||||
|
read -r
|
||||||
|
fi
|
||||||
|
|
||||||
#echo "Adjust kubezero values as needed:"
|
#echo "Adjust kubezero values as needed:"
|
||||||
# shellcheck disable=SC2015
|
# shellcheck disable=SC2015
|
||||||
#[ "$ARGOCD" == "True" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
|
#[ "$ARGOCD" == "True" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
|
||||||
|
|
||||||
### v1.31
|
|
||||||
|
|
||||||
# upgrade modules
|
# upgrade modules
|
||||||
#
|
|
||||||
# Preload cilium images to running nodes, disabled till 1.31
|
|
||||||
# all_nodes_upgrade "chroot /host crictl pull quay.io/cilium/cilium:v1.16.3; chroot /host crictl pull ghcr.io/k8snetworkplumbingwg/multus-cni:v3.9.3"
|
|
||||||
|
|
||||||
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators"
|
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators"
|
||||||
|
|
||||||
echo "Checking that all pods in kube-system are running ..."
|
echo "Checking that all pods in kube-system are running ..."
|
||||||
@ -45,8 +49,7 @@ echo "Applying remaining KubeZero modules..."
|
|||||||
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
|
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
|
||||||
|
|
||||||
# Final step is to commit the new argocd kubezero app
|
# Final step is to commit the new argocd kubezero app
|
||||||
# remove the del(.spec.source.helm.values) with 1.31
|
kubectl get app kubezero -n argocd -o yaml | del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..)' > $ARGO_APP
|
||||||
kubectl get app kubezero -n argocd -o yaml | yq 'del(.spec.source.helm.values) | del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..)' > $ARGO_APP
|
|
||||||
|
|
||||||
# Trigger backup of upgraded cluster state
|
# Trigger backup of upgraded cluster state
|
||||||
kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$KUBE_VERSION -n kube-system
|
kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$KUBE_VERSION -n kube-system
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubeadm
|
name: kubeadm
|
||||||
description: KubeZero Kubeadm cluster config
|
description: KubeZero Kubeadm cluster config
|
||||||
type: application
|
type: application
|
||||||
version: 1.31.4
|
version: 1.31.5
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
apiVersion: kubeadm.k8s.io/v1beta3
|
apiVersion: kubeadm.k8s.io/v1beta4
|
||||||
kind: ClusterConfiguration
|
kind: ClusterConfiguration
|
||||||
kubernetesVersion: {{ .Chart.Version }}
|
kubernetesVersion: {{ .Chart.Version }}
|
||||||
clusterName: {{ .Values.global.clusterName }}
|
clusterName: {{ .Values.global.clusterName }}
|
||||||
@ -11,20 +11,33 @@ etcd:
|
|||||||
local:
|
local:
|
||||||
# imageTag: 3.5.12-0
|
# imageTag: 3.5.12-0
|
||||||
extraArgs:
|
extraArgs:
|
||||||
|
- name: advertise-client-urls
|
||||||
|
value: https://{{ .Values.etcd.nodeName }}:2379
|
||||||
|
- name: initial-advertise-peer-urls
|
||||||
|
value: https://{{ .Values.etcd.nodeName }}:2380
|
||||||
|
- name: initial-cluster
|
||||||
|
value: {{ include "kubeadm.etcd.initialCluster" .Values.etcd | quote }}
|
||||||
|
- name: initial-cluster-state
|
||||||
|
value: {{ .Values.etcd.state }}
|
||||||
|
- name: initial-cluster-token
|
||||||
|
value: etcd-{{ .Values.global.clusterName }}
|
||||||
|
- name: name
|
||||||
|
value: {{ .Values.etcd.nodeName }}
|
||||||
|
- name: listen-peer-urls
|
||||||
|
value: https://{{ .Values.listenAddress }}:2380
|
||||||
|
- name: listen-client-urls
|
||||||
|
value: https://{{ .Values.listenAddress }}:2379
|
||||||
|
- name: listen-metrics-urls
|
||||||
|
value: http://0.0.0.0:2381
|
||||||
|
- name: logger
|
||||||
|
value: zap
|
||||||
|
- name: log-level
|
||||||
|
value: warn
|
||||||
### DNS discovery
|
### DNS discovery
|
||||||
#discovery-srv: {{ .Values.domain }}
|
#- name: discovery-srv
|
||||||
#discovery-srv-name: {{ .Values.global.clusterName }}
|
# value: {{ .Values.domain }}
|
||||||
advertise-client-urls: https://{{ .Values.etcd.nodeName }}:2379
|
#- name: discovery-srv-name
|
||||||
initial-advertise-peer-urls: https://{{ .Values.etcd.nodeName }}:2380
|
# value: {{ .Values.global.clusterName }}
|
||||||
initial-cluster: {{ include "kubeadm.etcd.initialCluster" .Values.etcd | quote }}
|
|
||||||
initial-cluster-state: {{ .Values.etcd.state }}
|
|
||||||
initial-cluster-token: etcd-{{ .Values.global.clusterName }}
|
|
||||||
name: {{ .Values.etcd.nodeName }}
|
|
||||||
listen-peer-urls: https://{{ .Values.listenAddress }}:2380
|
|
||||||
listen-client-urls: https://{{ .Values.listenAddress }}:2379
|
|
||||||
listen-metrics-urls: http://0.0.0.0:2381
|
|
||||||
logger: zap
|
|
||||||
# log-level: "warn"
|
|
||||||
{{- with .Values.etcd.extraArgs }}
|
{{- with .Values.etcd.extraArgs }}
|
||||||
{{- toYaml . | nindent 6 }}
|
{{- toYaml . | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@ -38,49 +51,82 @@ etcd:
|
|||||||
- "{{ .Values.domain }}"
|
- "{{ .Values.domain }}"
|
||||||
controllerManager:
|
controllerManager:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
profiling: "false"
|
- name: profiling
|
||||||
terminated-pod-gc-threshold: "300"
|
value: "false"
|
||||||
leader-elect: {{ .Values.global.highAvailable | quote }}
|
- name: terminated-pod-gc-threshold
|
||||||
logging-format: json
|
value: "300"
|
||||||
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
|
- name: leader-elect
|
||||||
|
value: {{ .Values.global.highAvailable | quote }}
|
||||||
|
- name: logging-format
|
||||||
|
value: json
|
||||||
|
- name: feature-gates
|
||||||
|
value: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
|
||||||
scheduler:
|
scheduler:
|
||||||
extraArgs:
|
extraArgs:
|
||||||
profiling: "false"
|
- name: feature-gates
|
||||||
leader-elect: {{ .Values.global.highAvailable | quote }}
|
value: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
|
||||||
logging-format: json
|
- name: leader-elect
|
||||||
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
|
value: {{ .Values.global.highAvailable | quote }}
|
||||||
|
- name: logging-format
|
||||||
|
value: json
|
||||||
|
- name: profiling
|
||||||
|
value: "false"
|
||||||
apiServer:
|
apiServer:
|
||||||
certSANs:
|
certSANs:
|
||||||
- {{ regexSplit ":" .Values.api.endpoint -1 | first }}
|
- {{ regexSplit ":" .Values.api.endpoint -1 | first }}
|
||||||
extraArgs:
|
extraArgs:
|
||||||
etcd-servers: {{ .Values.api.etcdServers }}
|
- name: profiling
|
||||||
profiling: "false"
|
value: "false"
|
||||||
audit-log-path: "/var/log/kubernetes/audit.log"
|
- name: etcd-servers
|
||||||
audit-policy-file: /etc/kubernetes/apiserver/audit-policy.yaml
|
value: {{ .Values.api.etcdServers }}
|
||||||
audit-log-maxage: "7"
|
- name: audit-log-path
|
||||||
audit-log-maxsize: "100"
|
value: /var/log/kubernetes/audit.log
|
||||||
audit-log-maxbackup: "1"
|
- name: audit-policy-file
|
||||||
audit-log-compress: "true"
|
value: /etc/kubernetes/apiserver/audit-policy.yaml
|
||||||
|
- name: audit-log-maxage
|
||||||
|
value: "7"
|
||||||
|
- name: audit-log-maxsize
|
||||||
|
value: "100"
|
||||||
|
- name: audit-log-maxbackup
|
||||||
|
value: "1"
|
||||||
|
- name: audit-log-compress
|
||||||
|
value: "true"
|
||||||
{{- if .Values.api.falco.enabled }}
|
{{- if .Values.api.falco.enabled }}
|
||||||
audit-webhook-config-file: /etc/kubernetes/apiserver/audit-webhook.yaml
|
- name: audit-webhook-config-file
|
||||||
|
value: /etc/kubernetes/apiserver/audit-webhook.yaml
|
||||||
{{- end }}
|
{{- end }}
|
||||||
tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
|
- name: tls-cipher-suites
|
||||||
admission-control-config-file: /etc/kubernetes/apiserver/admission-configuration.yaml
|
value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
|
||||||
api-audiences: {{ .Values.api.apiAudiences }}
|
- name: admission-control-config-file
|
||||||
|
value: /etc/kubernetes/apiserver/admission-configuration.yaml
|
||||||
|
- name: api-audiences
|
||||||
|
value: {{ .Values.api.apiAudiences }}
|
||||||
{{- if .Values.api.serviceAccountIssuer }}
|
{{- if .Values.api.serviceAccountIssuer }}
|
||||||
service-account-issuer: "{{ .Values.api.serviceAccountIssuer }}"
|
- name: service-account-issuer
|
||||||
service-account-jwks-uri: "{{ .Values.api.serviceAccountIssuer }}/openid/v1/jwks"
|
value: "{{ .Values.api.serviceAccountIssuer }}"
|
||||||
|
- name: service-account-jwks-uri
|
||||||
|
value: "{{ .Values.api.serviceAccountIssuer }}/openid/v1/jwks"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.api.awsIamAuth.enabled }}
|
{{- if .Values.api.awsIamAuth }}
|
||||||
authentication-token-webhook-config-file: /etc/kubernetes/apiserver/aws-iam-authenticator.yaml
|
- name: authentication-token-webhook-config-file
|
||||||
authentication-token-webhook-cache-ttl: 3600s
|
value: /etc/kubernetes/apiserver/aws-iam-authenticator.yaml
|
||||||
|
- name: authentication-token-webhook-cache-ttl
|
||||||
|
value: 3600s
|
||||||
|
- name: authentication-token-webhook-version
|
||||||
|
value: v1
|
||||||
{{- end }}
|
{{- end }}
|
||||||
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
|
- name: feature-gates
|
||||||
enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit,ExtendedResourceToleration
|
value: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
|
||||||
|
- name: authorization-config
|
||||||
|
value: /etc/kubernetes/apiserver/authz-config.yaml
|
||||||
|
- name: enable-admission-plugins
|
||||||
|
value: DenyServiceExternalIPs,NodeRestriction,EventRateLimit,ExtendedResourceToleration
|
||||||
{{- if .Values.global.highAvailable }}
|
{{- if .Values.global.highAvailable }}
|
||||||
goaway-chance: ".001"
|
- name: goaway-chance
|
||||||
|
value: ".001"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
logging-format: json
|
- name: logging-format
|
||||||
|
value: json
|
||||||
{{- with .Values.api.extraArgs }}
|
{{- with .Values.api.extraArgs }}
|
||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
apiVersion: kubeadm.k8s.io/v1beta3
|
apiVersion: kubeadm.k8s.io/v1beta4
|
||||||
kind: InitConfiguration
|
kind: InitConfiguration
|
||||||
localAPIEndpoint:
|
localAPIEndpoint:
|
||||||
advertiseAddress: {{ .Values.listenAddress }}
|
advertiseAddress: {{ .Values.listenAddress }}
|
||||||
@ -17,10 +17,13 @@ nodeRegistration:
|
|||||||
- Swap
|
- Swap
|
||||||
- KubeletVersion
|
- KubeletVersion
|
||||||
kubeletExtraArgs:
|
kubeletExtraArgs:
|
||||||
node-labels: {{ .Values.nodeLabels | quote }}
|
- name: node-labels
|
||||||
|
value: {{ .Values.nodeLabels | quote }}
|
||||||
{{- with .Values.providerID }}
|
{{- with .Values.providerID }}
|
||||||
provider-id: {{ . }}
|
- name: provider-id
|
||||||
|
value: {{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if ne .Values.listenAddress "0.0.0.0" }}
|
{{- if ne .Values.listenAddress "0.0.0.0" }}
|
||||||
node-ip: {{ .Values.listenAddress }}
|
- name: node-ip
|
||||||
|
value: {{ .Values.listenAddress }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
16
charts/kubeadm/templates/UpgradeConfiguration.yaml
Normal file
16
charts/kubeadm/templates/UpgradeConfiguration.yaml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
apiVersion: kubeadm.k8s.io/v1beta4
|
||||||
|
kind: UpgradeConfiguration
|
||||||
|
apply:
|
||||||
|
forceUpgrade: true
|
||||||
|
{{- with .Values.patches }}
|
||||||
|
patches:
|
||||||
|
directory: {{ . }}
|
||||||
|
{{- end }}
|
||||||
|
ignorePreflightErrors:
|
||||||
|
- DirAvailable--var-lib-etcd
|
||||||
|
- DirAvailable--etc-kubernetes-manifests
|
||||||
|
- FileAvailable--etc-kubernetes-pki-ca.crt
|
||||||
|
- FileAvailable--etc-kubernetes-manifests-etcd.yaml
|
||||||
|
- Swap
|
||||||
|
- KubeletVersion
|
||||||
|
skipPhases: []
|
@ -1,27 +0,0 @@
|
|||||||
{{- if .Values.api.awsIamAuth.enabled }}
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
server: https://{{ .Values.api.endpoint }}
|
|
||||||
name: {{ .Values.global.clusterName }}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: {{ .Values.global.clusterName }}
|
|
||||||
user: kubernetes-admin
|
|
||||||
name: kubernetes-admin@{{ .Values.global.clusterName }}
|
|
||||||
current-context: kubernetes-admin@{{ .Values.global.clusterName }}
|
|
||||||
preferences: {}
|
|
||||||
users:
|
|
||||||
- name: kubernetes-admin
|
|
||||||
user:
|
|
||||||
exec:
|
|
||||||
apiVersion: client.authentication.k8s.io/v1beta1
|
|
||||||
command: aws-iam-authenticator
|
|
||||||
args:
|
|
||||||
- "token"
|
|
||||||
- "-i"
|
|
||||||
- "{{ .Values.global.clusterName }}"
|
|
||||||
- "-r"
|
|
||||||
- "{{ .Values.api.awsIamAuth.kubeAdminRole }}"
|
|
||||||
{{- end }}
|
|
32
charts/kubeadm/templates/apiserver/authz-config.yaml
Normal file
32
charts/kubeadm/templates/apiserver/authz-config.yaml
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
apiVersion: apiserver.config.k8s.io/v1beta1
|
||||||
|
kind: AuthorizationConfiguration
|
||||||
|
authorizers:
|
||||||
|
- type: Node
|
||||||
|
name: node
|
||||||
|
- type: RBAC
|
||||||
|
name: rbac
|
||||||
|
# - type: Webhook
|
||||||
|
# name: Example
|
||||||
|
# webhook:
|
||||||
|
# authorizedTTL: 300s
|
||||||
|
# unauthorizedTTL: 30s
|
||||||
|
# timeout: 3s
|
||||||
|
# subjectAccessReviewVersion: v1
|
||||||
|
# matchConditionSubjectAccessReviewVersion: v1
|
||||||
|
# failurePolicy: NoOpinion
|
||||||
|
# connectionInfo:
|
||||||
|
# type: KubeConfigFile
|
||||||
|
# kubeConfigFile: /etc/kubernetes/apiserver/example.yaml
|
||||||
|
# matchConditions:
|
||||||
|
# # only send resource requests to the webhook
|
||||||
|
# - expression: has(request.resourceAttributes)
|
||||||
|
# # Don't intercept requests from kube-system service accounts
|
||||||
|
# - expression: "!('system:serviceaccounts:kube-system' in request.groups)"
|
||||||
|
# ## Below expressions avoid issues with kubeadm init and other system components that should be authorized by Node and RBAC
|
||||||
|
# # Don't process node and bootstrap token requests with the webhook
|
||||||
|
# - expression: "!('system:nodes' in request.groups)"
|
||||||
|
# - expression: "!('system:bootstrappers' in request.groups)"
|
||||||
|
# - expression: "!('system:bootstrappers:kubeadm:default-node-token' in request.groups)"
|
||||||
|
# # Don't process kubeadm requests with the webhook
|
||||||
|
# - expression: "!('kubeadm:cluster-admins' in request.groups)"
|
||||||
|
# - expression: "!('system:masters' in request.groups)"
|
@ -1,19 +0,0 @@
|
|||||||
{{- if .Values.api.awsIamAuth.enabled }}
|
|
||||||
# clusters refers to the remote service.
|
|
||||||
clusters:
|
|
||||||
- name: aws-iam-authenticator
|
|
||||||
cluster:
|
|
||||||
certificate-authority-data: "replaced at runtime"
|
|
||||||
server: https://localhost:21362/authenticate
|
|
||||||
# users refers to the API Server's webhook configuration
|
|
||||||
# (we don't need to authenticate the API server).
|
|
||||||
users:
|
|
||||||
- name: apiserver
|
|
||||||
# kubeconfig files require a context. Provide one for the API Server.
|
|
||||||
current-context: webhook
|
|
||||||
contexts:
|
|
||||||
- name: webhook
|
|
||||||
context:
|
|
||||||
cluster: aws-iam-authenticator
|
|
||||||
user: apiserver
|
|
||||||
{{- end }}
|
|
@ -1,46 +0,0 @@
|
|||||||
{{- if .Values.api.awsIamAuth.enabled }}
|
|
||||||
apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
metadata:
|
|
||||||
name: iamidentitymappings.iamauthenticator.k8s.aws
|
|
||||||
spec:
|
|
||||||
group: iamauthenticator.k8s.aws
|
|
||||||
scope: Cluster
|
|
||||||
names:
|
|
||||||
plural: iamidentitymappings
|
|
||||||
singular: iamidentitymapping
|
|
||||||
kind: IAMIdentityMapping
|
|
||||||
categories:
|
|
||||||
- all
|
|
||||||
versions:
|
|
||||||
- name: v1alpha1
|
|
||||||
served: true
|
|
||||||
storage: true
|
|
||||||
schema:
|
|
||||||
openAPIV3Schema:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
spec:
|
|
||||||
type: object
|
|
||||||
required:
|
|
||||||
- arn
|
|
||||||
- username
|
|
||||||
properties:
|
|
||||||
arn:
|
|
||||||
type: string
|
|
||||||
username:
|
|
||||||
type: string
|
|
||||||
groups:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
status:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
canonicalARN:
|
|
||||||
type: string
|
|
||||||
userID:
|
|
||||||
type: string
|
|
||||||
subresources:
|
|
||||||
status: {}
|
|
||||||
{{- end }}
|
|
@ -1,155 +0,0 @@
|
|||||||
{{- if .Values.api.awsIamAuth.enabled }}
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: aws-iam-authenticator
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- iamauthenticator.k8s.aws
|
|
||||||
resources:
|
|
||||||
- iamidentitymappings
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- iamauthenticator.k8s.aws
|
|
||||||
resources:
|
|
||||||
- iamidentitymappings/status
|
|
||||||
verbs:
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- events
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- update
|
|
||||||
- patch
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- configmaps
|
|
||||||
verbs:
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- configmaps
|
|
||||||
resourceNames:
|
|
||||||
- aws-auth
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: aws-iam-authenticator
|
|
||||||
namespace: kube-system
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: aws-iam-authenticator
|
|
||||||
namespace: kube-system
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: aws-iam-authenticator
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: aws-iam-authenticator
|
|
||||||
namespace: kube-system
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
namespace: kube-system
|
|
||||||
name: aws-iam-authenticator
|
|
||||||
labels:
|
|
||||||
k8s-app: aws-iam-authenticator
|
|
||||||
data:
|
|
||||||
config.yaml: |
|
|
||||||
clusterID: {{ .Values.global.clusterName }}
|
|
||||||
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: DaemonSet
|
|
||||||
metadata:
|
|
||||||
namespace: kube-system
|
|
||||||
name: aws-iam-authenticator
|
|
||||||
labels:
|
|
||||||
k8s-app: aws-iam-authenticator
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
k8s-app: aws-iam-authenticator
|
|
||||||
updateStrategy:
|
|
||||||
type: RollingUpdate
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
k8s-app: aws-iam-authenticator
|
|
||||||
spec:
|
|
||||||
securityContext:
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
|
|
||||||
priorityClassName: system-cluster-critical
|
|
||||||
|
|
||||||
# use service account with access to
|
|
||||||
serviceAccountName: aws-iam-authenticator
|
|
||||||
|
|
||||||
# run on the host network (don't depend on CNI)
|
|
||||||
hostNetwork: true
|
|
||||||
|
|
||||||
# run on each controller
|
|
||||||
nodeSelector:
|
|
||||||
node-role.kubernetes.io/control-plane: ""
|
|
||||||
tolerations:
|
|
||||||
- effect: NoSchedule
|
|
||||||
key: node-role.kubernetes.io/control-plane
|
|
||||||
|
|
||||||
containers:
|
|
||||||
- name: aws-iam-authenticator
|
|
||||||
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.27
|
|
||||||
args:
|
|
||||||
- server
|
|
||||||
- --backend-mode=CRD,MountedFile
|
|
||||||
- --config=/etc/aws-iam-authenticator/config.yaml
|
|
||||||
- --state-dir=/var/aws-iam-authenticator
|
|
||||||
- --kubeconfig-pregenerated=true
|
|
||||||
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 32Mi
|
|
||||||
cpu: 10m
|
|
||||||
limits:
|
|
||||||
memory: 64Mi
|
|
||||||
#cpu: 100m
|
|
||||||
|
|
||||||
volumeMounts:
|
|
||||||
- name: config
|
|
||||||
mountPath: /etc/aws-iam-authenticator/
|
|
||||||
- name: state
|
|
||||||
mountPath: /var/aws-iam-authenticator/
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- name: config
|
|
||||||
configMap:
|
|
||||||
name: aws-iam-authenticator
|
|
||||||
- name: state
|
|
||||||
secret:
|
|
||||||
secretName: aws-iam-certs
|
|
||||||
{{- end }}
|
|
@ -1,23 +0,0 @@
|
|||||||
{{- if .Values.api.awsIamAuth.enabled }}
|
|
||||||
apiVersion: iamauthenticator.k8s.aws/v1alpha1
|
|
||||||
kind: IAMIdentityMapping
|
|
||||||
metadata:
|
|
||||||
name: kubezero-worker-nodes
|
|
||||||
spec:
|
|
||||||
arn: {{ .Values.api.awsIamAuth.workerNodeRole }}
|
|
||||||
username: system:node:{{ "{{" }}EC2PrivateDNSName{{ "}}" }}
|
|
||||||
groups:
|
|
||||||
- system:bootstrappers:kubeadm:default-node-token
|
|
||||||
---
|
|
||||||
|
|
||||||
# Admin Role for remote access
|
|
||||||
apiVersion: iamauthenticator.k8s.aws/v1alpha1
|
|
||||||
kind: IAMIdentityMapping
|
|
||||||
metadata:
|
|
||||||
name: kubernetes-admin
|
|
||||||
spec:
|
|
||||||
arn: {{ .Values.api.awsIamAuth.kubeAdminRole }}
|
|
||||||
username: kubernetes-admin
|
|
||||||
groups:
|
|
||||||
- system:masters
|
|
||||||
{{- end }}
|
|
@ -20,10 +20,7 @@ api:
|
|||||||
oidcEndpoint: ""
|
oidcEndpoint: ""
|
||||||
apiAudiences: "istio-ca"
|
apiAudiences: "istio-ca"
|
||||||
|
|
||||||
awsIamAuth:
|
awsIamAuth: false
|
||||||
enabled: false
|
|
||||||
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
|
|
||||||
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"
|
|
||||||
|
|
||||||
falco:
|
falco:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
@ -54,4 +54,4 @@ dependencies:
|
|||||||
version: 0.2.12
|
version: 0.2.12
|
||||||
repository: https://caas-team.github.io/helm-charts/
|
repository: https://caas-team.github.io/helm-charts/
|
||||||
condition: py-kube-downscaler.enabled
|
condition: py-kube-downscaler.enabled
|
||||||
kubeVersion: ">= 1.26.0"
|
kubeVersion: ">= 1.30.0-0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-addons
|
# kubezero-addons
|
||||||
|
|
||||||
  
|
  
|
||||||
|
|
||||||
KubeZero umbrella chart for various optional cluster addons
|
KubeZero umbrella chart for various optional cluster addons
|
||||||
|
|
||||||
@ -18,13 +18,13 @@ Kubernetes: `>= 1.26.0`
|
|||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.16.2 |
|
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.17.1 |
|
||||||
| https://caas-team.github.io/helm-charts/ | py-kube-downscaler | 0.2.11 |
|
| https://caas-team.github.io/helm-charts/ | py-kube-downscaler | 0.2.12 |
|
||||||
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.15.0 |
|
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.15.1 |
|
||||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.43.2 |
|
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.46.0 |
|
||||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.17.0 |
|
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.17.0 |
|
||||||
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
|
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
|
||||||
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.24.1 |
|
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.26.0 |
|
||||||
| oci://public.ecr.aws/neuron | neuron-helm-chart | 1.1.1 |
|
| oci://public.ecr.aws/neuron | neuron-helm-chart | 1.1.1 |
|
||||||
|
|
||||||
# MetalLB
|
# MetalLB
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
appVersion: 1.22.1
|
appVersion: 1.24.0
|
||||||
description: A Helm chart for the AWS Node Termination Handler.
|
description: A Helm chart for the AWS Node Termination Handler.
|
||||||
home: https://github.com/aws/aws-node-termination-handler/
|
home: https://github.com/aws/aws-node-termination-handler/
|
||||||
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
||||||
@ -21,4 +21,4 @@ name: aws-node-termination-handler
|
|||||||
sources:
|
sources:
|
||||||
- https://github.com/aws/aws-node-termination-handler/
|
- https://github.com/aws/aws-node-termination-handler/
|
||||||
type: application
|
type: application
|
||||||
version: 0.24.1
|
version: 0.26.0
|
||||||
|
@ -143,6 +143,8 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
|
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
|
||||||
value: {{ .Values.enableSpotInterruptionDraining | quote }}
|
value: {{ .Values.enableSpotInterruptionDraining | quote }}
|
||||||
|
- name: ENABLE_ASG_LIFECYCLE_DRAINING
|
||||||
|
value: {{ .Values.enableASGLifecycleDraining | quote }}
|
||||||
- name: ENABLE_SCHEDULED_EVENT_DRAINING
|
- name: ENABLE_SCHEDULED_EVENT_DRAINING
|
||||||
value: {{ .Values.enableScheduledEventDraining | quote }}
|
value: {{ .Values.enableScheduledEventDraining | quote }}
|
||||||
- name: ENABLE_REBALANCE_MONITORING
|
- name: ENABLE_REBALANCE_MONITORING
|
||||||
|
@ -143,6 +143,8 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
|
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
|
||||||
value: {{ .Values.enableSpotInterruptionDraining | quote }}
|
value: {{ .Values.enableSpotInterruptionDraining | quote }}
|
||||||
|
- name: ENABLE_ASG_LIFECYCLE_DRAINING
|
||||||
|
value: {{ .Values.enableASGLifecycleDraining | quote }}
|
||||||
- name: ENABLE_SCHEDULED_EVENT_DRAINING
|
- name: ENABLE_SCHEDULED_EVENT_DRAINING
|
||||||
value: {{ .Values.enableScheduledEventDraining | quote }}
|
value: {{ .Values.enableScheduledEventDraining | quote }}
|
||||||
- name: ENABLE_REBALANCE_MONITORING
|
- name: ENABLE_REBALANCE_MONITORING
|
||||||
|
@ -168,6 +168,10 @@ spec:
|
|||||||
value: {{ .Values.deleteSqsMsgIfNodeNotFound | quote }}
|
value: {{ .Values.deleteSqsMsgIfNodeNotFound | quote }}
|
||||||
- name: WORKERS
|
- name: WORKERS
|
||||||
value: {{ .Values.workers | quote }}
|
value: {{ .Values.workers | quote }}
|
||||||
|
- name: HEARTBEAT_INTERVAL
|
||||||
|
value: {{ .Values.heartbeatInterval | quote }}
|
||||||
|
- name: HEARTBEAT_UNTIL
|
||||||
|
value: {{ .Values.heartbeatUntil | quote }}
|
||||||
{{- with .Values.extraEnv }}
|
{{- with .Values.extraEnv }}
|
||||||
{{- toYaml . | nindent 12 }}
|
{{- toYaml . | nindent 12 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -270,6 +270,9 @@ metadataTries: 3
|
|||||||
# enableSpotInterruptionDraining If false, do not drain nodes when the spot interruption termination notice is received. Only used in IMDS mode.
|
# enableSpotInterruptionDraining If false, do not drain nodes when the spot interruption termination notice is received. Only used in IMDS mode.
|
||||||
enableSpotInterruptionDraining: true
|
enableSpotInterruptionDraining: true
|
||||||
|
|
||||||
|
# enableASGLifecycleDraining If false, do not drain nodes when ASG target lifecycle state Terminated is received. Only used in IMDS mode.
|
||||||
|
enableASGLifecycleDraining: true
|
||||||
|
|
||||||
# enableScheduledEventDraining If false, do not drain nodes before the maintenance window starts for an EC2 instance scheduled event. Only used in IMDS mode.
|
# enableScheduledEventDraining If false, do not drain nodes before the maintenance window starts for an EC2 instance scheduled event. Only used in IMDS mode.
|
||||||
enableScheduledEventDraining: true
|
enableScheduledEventDraining: true
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ aws-node-termination-handler:
|
|||||||
value: "regional"
|
value: "regional"
|
||||||
|
|
||||||
enablePrometheusServer: false
|
enablePrometheusServer: false
|
||||||
podMonitor:
|
serviceMonitor:
|
||||||
create: false
|
create: false
|
||||||
|
|
||||||
jsonLogging: true
|
jsonLogging: true
|
||||||
@ -146,9 +146,6 @@ aws-node-termination-handler:
|
|||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/control-plane: ""
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
|
||||||
rbac:
|
|
||||||
pspEnabled: false
|
|
||||||
|
|
||||||
fuseDevicePlugin:
|
fuseDevicePlugin:
|
||||||
enabled: false
|
enabled: false
|
||||||
image:
|
image:
|
||||||
@ -206,7 +203,7 @@ cluster-autoscaler:
|
|||||||
|
|
||||||
image:
|
image:
|
||||||
repository: registry.k8s.io/autoscaling/cluster-autoscaler
|
repository: registry.k8s.io/autoscaling/cluster-autoscaler
|
||||||
tag: v1.30.2
|
tag: v1.31.1
|
||||||
|
|
||||||
autoDiscovery:
|
autoDiscovery:
|
||||||
clusterName: ""
|
clusterName: ""
|
||||||
|
@ -192,6 +192,8 @@ jenkins:
|
|||||||
annotations:
|
annotations:
|
||||||
container.apparmor.security.beta.kubernetes.io/jnlp: "unconfined"
|
container.apparmor.security.beta.kubernetes.io/jnlp: "unconfined"
|
||||||
cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
|
cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
|
||||||
|
garbageCollection:
|
||||||
|
enabled: true
|
||||||
customJenkinsLabels:
|
customJenkinsLabels:
|
||||||
- podman-aws-trivy
|
- podman-aws-trivy
|
||||||
idleMinutes: 30
|
idleMinutes: 30
|
||||||
|
9
charts/kubezero-graph/templates/neo4j/secrets.yaml
Normal file
9
charts/kubezero-graph/templates/neo4j/secrets.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: neo4j-admin
|
||||||
|
labels:
|
||||||
|
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
NEO4J_AUTH: {{.Values.neo4j.neo4j.password | printf "neo4j/%v" | b64enc -}}
|
@ -6,6 +6,9 @@ neo4j:
|
|||||||
neo4j:
|
neo4j:
|
||||||
name: test-db
|
name: test-db
|
||||||
|
|
||||||
|
password: secret
|
||||||
|
passwordFromSecret: "neo4j-admin"
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
data:
|
data:
|
||||||
mode: defaultStorageClass
|
mode: defaultStorageClass
|
||||||
|
@ -51,7 +51,7 @@ data:
|
|||||||
{
|
{
|
||||||
"name": "static_layer_0",
|
"name": "static_layer_0",
|
||||||
"staticLayer": {
|
"staticLayer": {
|
||||||
"overload.global_downstream_max_connections": 50000
|
"envoy.resource_monitors.downstream_connections": 50000
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
@ -4,7 +4,6 @@ fluentd is deployed with the default values
|
|||||||
If the fluentd config is overriden and the metrics server removed
|
If the fluentd config is overriden and the metrics server removed
|
||||||
this will fail.
|
this will fail.
|
||||||
*/}}
|
*/}}
|
||||||
{{- if .Values.testFramework.enabled }}
|
|
||||||
{{ if empty .Values.service.ports }}
|
{{ if empty .Values.service.ports }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
@ -28,4 +27,3 @@ spec:
|
|||||||
wget '{{ include "fluentd.fullname" . }}:24231/metrics'
|
wget '{{ include "fluentd.fullname" . }}:24231/metrics'
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
{{ end }}
|
{{ end }}
|
||||||
{{- end }}
|
|
||||||
|
@ -13,9 +13,6 @@ image:
|
|||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
tag: ""
|
tag: ""
|
||||||
|
|
||||||
testFramework:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
## Optional array of imagePullSecrets containing private registry credentials
|
## Optional array of imagePullSecrets containing private registry credentials
|
||||||
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
|
@ -9,36 +9,3 @@ diff -rtuN charts/fluentd.orig/templates/fluentd-configurations-cm.yaml charts/f
|
|||||||
+ {{- (tpl $value $) | nindent 4 }}
|
+ {{- (tpl $value $) | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
diff -rtuN charts/fluentd.orig/templates/tests/test-connection.yaml charts/fluentd/templates/tests/test-connection.yaml
|
|
||||||
--- charts/fluentd.orig/templates/tests/test-connection.yaml 2024-04-08 11:00:03.030515998 +0000
|
|
||||||
+++ charts/fluentd/templates/tests/test-connection.yaml 2024-04-08 11:03:16.254774985 +0000
|
|
||||||
@@ -4,6 +4,7 @@
|
|
||||||
If the fluentd config is overriden and the metrics server removed
|
|
||||||
this will fail.
|
|
||||||
*/}}
|
|
||||||
+{{- if .Values.testFramework.enabled }}
|
|
||||||
{{ if empty .Values.service.ports }}
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
@@ -26,4 +27,5 @@
|
|
||||||
while :; do nc -vz {{ include "fluentd.fullname" . }}:24231 && break; sleep 1; done
|
|
||||||
wget '{{ include "fluentd.fullname" . }}:24231/metrics'
|
|
||||||
restartPolicy: Never
|
|
||||||
-{{ end }}
|
|
||||||
\ No newline at end of file
|
|
||||||
+{{ end }}
|
|
||||||
+{{- end }}
|
|
||||||
diff -rtuN charts/fluentd.orig/values.yaml charts/fluentd/values.yaml
|
|
||||||
--- charts/fluentd.orig/values.yaml 2024-04-08 11:00:03.030515998 +0000
|
|
||||||
+++ charts/fluentd/values.yaml 2024-04-08 11:00:03.040516045 +0000
|
|
||||||
@@ -13,6 +13,9 @@
|
|
||||||
pullPolicy: "IfNotPresent"
|
|
||||||
tag: ""
|
|
||||||
|
|
||||||
+testFramework:
|
|
||||||
+ enabled: false
|
|
||||||
+
|
|
||||||
## Optional array of imagePullSecrets containing private registry credentials
|
|
||||||
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
||||||
imagePullSecrets: []
|
|
||||||
|
@ -30,4 +30,4 @@ dependencies:
|
|||||||
version: 1.23.0
|
version: 1.23.0
|
||||||
repository: https://haproxytech.github.io/helm-charts
|
repository: https://haproxytech.github.io/helm-charts
|
||||||
condition: haproxy.enabled
|
condition: haproxy.enabled
|
||||||
kubeVersion: ">= 1.26.0"
|
kubeVersion: ">= 1.29.0-0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-network
|
# kubezero-network
|
||||||
|
|
||||||
 
|
 
|
||||||
|
|
||||||
KubeZero umbrella chart for all things network
|
KubeZero umbrella chart for all things network
|
||||||
|
|
||||||
@ -14,13 +14,13 @@ KubeZero umbrella chart for all things network
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.26.0`
|
Kubernetes: `>= 1.29.0-0`
|
||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||||
| https://haproxytech.github.io/helm-charts | haproxy | 1.23.0 |
|
| https://haproxytech.github.io/helm-charts | haproxy | 1.23.0 |
|
||||||
| https://helm.cilium.io/ | cilium | 1.16.5 |
|
| https://helm.cilium.io/ | cilium | 1.16.6 |
|
||||||
| https://metallb.github.io/metallb | metallb | 0.14.9 |
|
| https://metallb.github.io/metallb | metallb | 0.14.9 |
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
@ -30,7 +30,8 @@ spec:
|
|||||||
image: {{ .Values.multus.image.repository }}:{{ .Values.multus.image.tag }}
|
image: {{ .Values.multus.image.repository }}:{{ .Values.multus.image.tag }}
|
||||||
# Always used cached images
|
# Always used cached images
|
||||||
imagePullPolicy: {{ .Values.multus.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.multus.image.pullPolicy }}
|
||||||
command: ["/entrypoint.sh"]
|
#command: ["/entrypoint.sh"]
|
||||||
|
command: ["/thin_entrypoint"]
|
||||||
args:
|
args:
|
||||||
- "--multus-conf-file=/tmp/multus-conf/00-multus.conf"
|
- "--multus-conf-file=/tmp/multus-conf/00-multus.conf"
|
||||||
- "--rename-conf-file=false"
|
- "--rename-conf-file=false"
|
||||||
@ -39,10 +40,10 @@ spec:
|
|||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: "10m"
|
cpu: "10m"
|
||||||
# memory: "64Mi"
|
memory: "32Mi"
|
||||||
# limits:
|
limits:
|
||||||
# cpu: "100m"
|
# cpu: "100m"
|
||||||
# memory: "256Mi"
|
memory: "64Mi"
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
capabilities:
|
capabilities:
|
||||||
|
@ -29,8 +29,10 @@ Kubernetes: `>= 1.26.0`
|
|||||||
|
|
||||||
| Key | Type | Default | Description |
|
| Key | Type | Default | Description |
|
||||||
|-----|------|---------|-------------|
|
|-----|------|---------|-------------|
|
||||||
|
| data-prepper.config."data-prepper-config.yaml" | string | `"ssl: false\npeer_forwarder:\n ssl: false\n"` | |
|
||||||
| data-prepper.config."log4j2-rolling.properties" | string | `"status = error\ndest = err\nname = PropertiesConfig\n\nappender.console.type = Console\nappender.console.name = STDOUT\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{ISO8601} [%t] %-5p %40C - %m%n\n\nrootLogger.level = warn\nrootLogger.appenderRef.stdout.ref = STDOUT\n\nlogger.pipeline.name = org.opensearch.dataprepper.pipeline\nlogger.pipeline.level = info\n\nlogger.parser.name = org.opensearch.dataprepper.parser\nlogger.parser.level = info\n\nlogger.plugins.name = org.opensearch.dataprepper.plugins\nlogger.plugins.level = info\n"` | |
|
| data-prepper.config."log4j2-rolling.properties" | string | `"status = error\ndest = err\nname = PropertiesConfig\n\nappender.console.type = Console\nappender.console.name = STDOUT\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{ISO8601} [%t] %-5p %40C - %m%n\n\nrootLogger.level = warn\nrootLogger.appenderRef.stdout.ref = STDOUT\n\nlogger.pipeline.name = org.opensearch.dataprepper.pipeline\nlogger.pipeline.level = info\n\nlogger.parser.name = org.opensearch.dataprepper.parser\nlogger.parser.level = info\n\nlogger.plugins.name = org.opensearch.dataprepper.plugins\nlogger.plugins.level = info\n"` | |
|
||||||
| data-prepper.enabled | bool | `false` | |
|
| data-prepper.enabled | bool | `false` | |
|
||||||
|
| data-prepper.image.tag | string | `"2.10.1"` | |
|
||||||
| data-prepper.pipelineConfig.config.otel-service-map-pipeline.buffer.bounded_blocking | string | `nil` | |
|
| data-prepper.pipelineConfig.config.otel-service-map-pipeline.buffer.bounded_blocking | string | `nil` | |
|
||||||
| data-prepper.pipelineConfig.config.otel-service-map-pipeline.delay | int | `3000` | |
|
| data-prepper.pipelineConfig.config.otel-service-map-pipeline.delay | int | `3000` | |
|
||||||
| data-prepper.pipelineConfig.config.otel-service-map-pipeline.processor[0].service_map.window_duration | int | `180` | |
|
| data-prepper.pipelineConfig.config.otel-service-map-pipeline.processor[0].service_map.window_duration | int | `180` | |
|
||||||
@ -72,7 +74,7 @@ Kubernetes: `>= 1.26.0`
|
|||||||
| fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n # Exclude ourselves to current error spam, https://github.com/fluent/fluent-bit/issues/5769\n # Todo: Rather limit / filter spam message than exclude all together -> ideally locally, next dataprepper\n Exclude_Path *logging-fluent-bit*\n multiline.parser cri\n Tag cri.*\n Skip_Long_Lines On\n Skip_Empty_Lines On\n DB /var/log/flb_kube.db\n DB.Sync Normal\n DB.locking true\n # Buffer_Max_Size 1M\n {{- with .Values.config.input }}\n Mem_Buf_Limit {{ .memBufLimit }}\n Refresh_Interval {{ .refreshInterval }}\n {{- end }}\n\n[INPUT]\n Name opentelemetry\n Tag otel\n"` | |
|
| fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n # Exclude ourselves to current error spam, https://github.com/fluent/fluent-bit/issues/5769\n # Todo: Rather limit / filter spam message than exclude all together -> ideally locally, next dataprepper\n Exclude_Path *logging-fluent-bit*\n multiline.parser cri\n Tag cri.*\n Skip_Long_Lines On\n Skip_Empty_Lines On\n DB /var/log/flb_kube.db\n DB.Sync Normal\n DB.locking true\n # Buffer_Max_Size 1M\n {{- with .Values.config.input }}\n Mem_Buf_Limit {{ .memBufLimit }}\n Refresh_Interval {{ .refreshInterval }}\n {{- end }}\n\n[INPUT]\n Name opentelemetry\n Tag otel\n"` | |
|
||||||
| fluent-bit.config.logLevel | string | `"info"` | |
|
| fluent-bit.config.logLevel | string | `"info"` | |
|
||||||
| fluent-bit.config.output.host | string | `"telemetry-fluentd"` | |
|
| fluent-bit.config.output.host | string | `"telemetry-fluentd"` | |
|
||||||
| fluent-bit.config.output.sharedKey | string | `"secretref+k8s://v1/Secret/kube-system/kubezero-secrets/telemetry.fluentd.source.sharedKey"` | |
|
| fluent-bit.config.output.sharedKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey"` | |
|
||||||
| fluent-bit.config.output.tls | bool | `false` | |
|
| fluent-bit.config.output.tls | bool | `false` | |
|
||||||
| fluent-bit.config.output_otel.host | string | `"telemetry-opentelemetry-collector"` | |
|
| fluent-bit.config.output_otel.host | string | `"telemetry-opentelemetry-collector"` | |
|
||||||
| fluent-bit.config.outputs | string | `"[OUTPUT]\n Match kube.*\n Name forward\n Host {{ .Values.config.output.host }}\n Port 24224\n Shared_Key {{ .Values.config.output.sharedKey }}\n tls {{ ternary \"on\" \"off\" .Values.config.output.tls }}\n Send_options true\n Require_ack_response true\n\n[OUTPUT]\n Name opentelemetry\n Match otel\n Host {{ .Values.config.output_otel.host }}\n Port 4318\n #Metrics_uri /v1/metrics\n Traces_uri /v1/traces\n #Logs_uri /v1/logs\n"` | |
|
| fluent-bit.config.outputs | string | `"[OUTPUT]\n Match kube.*\n Name forward\n Host {{ .Values.config.output.host }}\n Port 24224\n Shared_Key {{ .Values.config.output.sharedKey }}\n tls {{ ternary \"on\" \"off\" .Values.config.output.tls }}\n Send_options true\n Require_ack_response true\n\n[OUTPUT]\n Name opentelemetry\n Match otel\n Host {{ .Values.config.output_otel.host }}\n Port 4318\n #Metrics_uri /v1/metrics\n Traces_uri /v1/traces\n #Logs_uri /v1/logs\n"` | |
|
||||||
@ -133,7 +135,7 @@ Kubernetes: `>= 1.26.0`
|
|||||||
| fluentd.service.ports[1].containerPort | int | `9880` | |
|
| fluentd.service.ports[1].containerPort | int | `9880` | |
|
||||||
| fluentd.service.ports[1].name | string | `"http-fluentd"` | |
|
| fluentd.service.ports[1].name | string | `"http-fluentd"` | |
|
||||||
| fluentd.service.ports[1].protocol | string | `"TCP"` | |
|
| fluentd.service.ports[1].protocol | string | `"TCP"` | |
|
||||||
| fluentd.source.sharedKey | string | `"secretref+k8s://v1/Secret/kube-system/kubezero-secrets/telemetry.fluentd.source.sharedKey"` | |
|
| fluentd.source.sharedKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey"` | |
|
||||||
| fluentd.volumeMounts[0].mountPath | string | `"/run/pki"` | |
|
| fluentd.volumeMounts[0].mountPath | string | `"/run/pki"` | |
|
||||||
| fluentd.volumeMounts[0].name | string | `"trust-store"` | |
|
| fluentd.volumeMounts[0].name | string | `"trust-store"` | |
|
||||||
| fluentd.volumeMounts[0].readOnly | bool | `true` | |
|
| fluentd.volumeMounts[0].readOnly | bool | `true` | |
|
||||||
@ -164,6 +166,7 @@ Kubernetes: `>= 1.26.0`
|
|||||||
| jaeger.storage.elasticsearch.scheme | string | `"https"` | |
|
| jaeger.storage.elasticsearch.scheme | string | `"https"` | |
|
||||||
| jaeger.storage.elasticsearch.user | string | `"admin"` | |
|
| jaeger.storage.elasticsearch.user | string | `"admin"` | |
|
||||||
| jaeger.storage.type | string | `"elasticsearch"` | |
|
| jaeger.storage.type | string | `"elasticsearch"` | |
|
||||||
|
| metrics.enabled | bool | `false` | |
|
||||||
| opensearch.dashboard.enabled | bool | `false` | |
|
| opensearch.dashboard.enabled | bool | `false` | |
|
||||||
| opensearch.dashboard.istio.enabled | bool | `false` | |
|
| opensearch.dashboard.istio.enabled | bool | `false` | |
|
||||||
| opensearch.dashboard.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
| opensearch.dashboard.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||||
@ -179,9 +182,6 @@ Kubernetes: `>= 1.26.0`
|
|||||||
| opentelemetry-collector.config.receivers.otlp.protocols.grpc.endpoint | string | `"${env:MY_POD_IP}:4317"` | |
|
| opentelemetry-collector.config.receivers.otlp.protocols.grpc.endpoint | string | `"${env:MY_POD_IP}:4317"` | |
|
||||||
| opentelemetry-collector.config.receivers.otlp.protocols.http.endpoint | string | `"${env:MY_POD_IP}:4318"` | |
|
| opentelemetry-collector.config.receivers.otlp.protocols.http.endpoint | string | `"${env:MY_POD_IP}:4318"` | |
|
||||||
| opentelemetry-collector.config.service.extensions[0] | string | `"health_check"` | |
|
| opentelemetry-collector.config.service.extensions[0] | string | `"health_check"` | |
|
||||||
| opentelemetry-collector.config.service.extensions[1] | string | `"memory_ballast"` | |
|
|
||||||
| opentelemetry-collector.config.service.pipelines.logs | string | `nil` | |
|
|
||||||
| opentelemetry-collector.config.service.pipelines.metrics | string | `nil` | |
|
|
||||||
| opentelemetry-collector.config.service.pipelines.traces.exporters[0] | string | `"otlp/jaeger"` | |
|
| opentelemetry-collector.config.service.pipelines.traces.exporters[0] | string | `"otlp/jaeger"` | |
|
||||||
| opentelemetry-collector.config.service.pipelines.traces.exporters[1] | string | `"otlp/data-prepper"` | |
|
| opentelemetry-collector.config.service.pipelines.traces.exporters[1] | string | `"otlp/data-prepper"` | |
|
||||||
| opentelemetry-collector.config.service.pipelines.traces.processors[0] | string | `"memory_limiter"` | |
|
| opentelemetry-collector.config.service.pipelines.traces.processors[0] | string | `"memory_limiter"` | |
|
||||||
|
@ -4,7 +4,6 @@ fluentd is deployed with the default values
|
|||||||
If the fluentd config is overriden and the metrics server removed
|
If the fluentd config is overriden and the metrics server removed
|
||||||
this will fail.
|
this will fail.
|
||||||
*/}}
|
*/}}
|
||||||
{{- if .Values.testFramework.enabled }}
|
|
||||||
{{ if empty .Values.service.ports }}
|
{{ if empty .Values.service.ports }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
@ -28,4 +27,3 @@ spec:
|
|||||||
wget '{{ include "fluentd.fullname" . }}:24231/metrics'
|
wget '{{ include "fluentd.fullname" . }}:24231/metrics'
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
{{ end }}
|
{{ end }}
|
||||||
{{- end }}
|
|
||||||
|
@ -13,9 +13,6 @@ image:
|
|||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
tag: ""
|
tag: ""
|
||||||
|
|
||||||
testFramework:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
## Optional array of imagePullSecrets containing private registry credentials
|
## Optional array of imagePullSecrets containing private registry credentials
|
||||||
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
|
@ -9,36 +9,3 @@ diff -rtuN charts/fluentd.orig/templates/fluentd-configurations-cm.yaml charts/f
|
|||||||
+ {{- (tpl $value $) | nindent 4 }}
|
+ {{- (tpl $value $) | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
diff -rtuN charts/fluentd.orig/templates/tests/test-connection.yaml charts/fluentd/templates/tests/test-connection.yaml
|
|
||||||
--- charts/fluentd.orig/templates/tests/test-connection.yaml 2024-04-08 11:00:03.030515998 +0000
|
|
||||||
+++ charts/fluentd/templates/tests/test-connection.yaml 2024-04-08 11:03:16.254774985 +0000
|
|
||||||
@@ -4,6 +4,7 @@
|
|
||||||
If the fluentd config is overriden and the metrics server removed
|
|
||||||
this will fail.
|
|
||||||
*/}}
|
|
||||||
+{{- if .Values.testFramework.enabled }}
|
|
||||||
{{ if empty .Values.service.ports }}
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
@@ -26,4 +27,5 @@
|
|
||||||
while :; do nc -vz {{ include "fluentd.fullname" . }}:24231 && break; sleep 1; done
|
|
||||||
wget '{{ include "fluentd.fullname" . }}:24231/metrics'
|
|
||||||
restartPolicy: Never
|
|
||||||
-{{ end }}
|
|
||||||
\ No newline at end of file
|
|
||||||
+{{ end }}
|
|
||||||
+{{- end }}
|
|
||||||
diff -rtuN charts/fluentd.orig/values.yaml charts/fluentd/values.yaml
|
|
||||||
--- charts/fluentd.orig/values.yaml 2024-04-08 11:00:03.030515998 +0000
|
|
||||||
+++ charts/fluentd/values.yaml 2024-04-08 11:00:03.040516045 +0000
|
|
||||||
@@ -13,6 +13,9 @@
|
|
||||||
pullPolicy: "IfNotPresent"
|
|
||||||
tag: ""
|
|
||||||
|
|
||||||
+testFramework:
|
|
||||||
+ enabled: false
|
|
||||||
+
|
|
||||||
## Optional array of imagePullSecrets containing private registry credentials
|
|
||||||
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
|
||||||
imagePullSecrets: []
|
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero
|
name: kubezero
|
||||||
description: KubeZero - Root App of Apps chart
|
description: KubeZero - Root App of Apps chart
|
||||||
type: application
|
type: application
|
||||||
version: 1.31.4-alpha
|
version: 1.31.5-alpha
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
|
@ -25,6 +25,7 @@ spec:
|
|||||||
repoURL: {{ .Values.kubezero.repoURL }}
|
repoURL: {{ .Values.kubezero.repoURL }}
|
||||||
targetRevision: {{ default .Values.kubezero.targetRevision ( index .Values $name "targetRevision" ) | quote }}
|
targetRevision: {{ default .Values.kubezero.targetRevision ( index .Values $name "targetRevision" ) | quote }}
|
||||||
helm:
|
helm:
|
||||||
|
skipTests: true
|
||||||
valuesObject:
|
valuesObject:
|
||||||
{{- include (print $name "-values") $ | nindent 8 }}
|
{{- include (print $name "-values") $ | nindent 8 }}
|
||||||
|
|
||||||
|
@ -135,8 +135,8 @@ py-kube-downscaler:
|
|||||||
{{- toYaml . | nindent 2 }}
|
{{- toYaml . | nindent 2 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
# AWS only
|
|
||||||
{{- if eq .Values.global.platform "aws" }}
|
{{- if eq .Values.global.platform "aws" }}
|
||||||
|
# AWS only
|
||||||
aws-node-termination-handler:
|
aws-node-termination-handler:
|
||||||
enabled: {{ default "true" (index .Values "addons" "aws-node-termination-handler" "enabled") }}
|
enabled: {{ default "true" (index .Values "addons" "aws-node-termination-handler" "enabled") }}
|
||||||
|
|
||||||
@ -146,6 +146,8 @@ aws-node-termination-handler:
|
|||||||
|
|
||||||
{{- with .Values.metrics }}
|
{{- with .Values.metrics }}
|
||||||
enablePrometheusServer: {{ .enabled }}
|
enablePrometheusServer: {{ .enabled }}
|
||||||
|
serviceMonitor:
|
||||||
|
create: true
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
queueURL: "https://sqs.{{ .Values.global.aws.region }}.amazonaws.com/{{ .Values.global.aws.accountId }}/{{ .Values.global.clusterName }}_Nth"
|
queueURL: "https://sqs.{{ .Values.global.aws.region }}.amazonaws.com/{{ .Values.global.aws.accountId }}/{{ .Values.global.clusterName }}_Nth"
|
||||||
@ -187,8 +189,8 @@ aws-eks-asg-rolling-update-handler:
|
|||||||
- name: AWS_STS_REGIONAL_ENDPOINTS
|
- name: AWS_STS_REGIONAL_ENDPOINTS
|
||||||
value: "regional"
|
value: "regional"
|
||||||
|
|
||||||
{{- with .Values.addons.awsNeuron }}
|
{{- with (index .Values "addons" "neuron-helm-chart") }}
|
||||||
awsNeuron:
|
neuron-helm-chart:
|
||||||
{{- toYaml . | nindent 2 }}
|
{{- toYaml . | nindent 2 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
@ -1,5 +1,9 @@
|
|||||||
{{- define "istio-ingress-values" }}
|
{{- define "istio-ingress-values" }}
|
||||||
|
|
||||||
|
{{- if eq .Values.global.platform "aws" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- $ingressLabel := "node.kubernetes.io/ingress.public" }}
|
||||||
|
|
||||||
gateway:
|
gateway:
|
||||||
name: istio-ingressgateway
|
name: istio-ingressgateway
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ global:
|
|||||||
|
|
||||||
addons:
|
addons:
|
||||||
enabled: true
|
enabled: true
|
||||||
targetRevision: 0.8.11
|
targetRevision: 0.8.13
|
||||||
external-dns:
|
external-dns:
|
||||||
enabled: false
|
enabled: false
|
||||||
forseti:
|
forseti:
|
||||||
@ -36,7 +36,7 @@ addons:
|
|||||||
network:
|
network:
|
||||||
enabled: true
|
enabled: true
|
||||||
retain: true
|
retain: true
|
||||||
targetRevision: 0.5.6
|
targetRevision: 0.5.7
|
||||||
cilium:
|
cilium:
|
||||||
cluster: {}
|
cluster: {}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user