Compare commits
1 Commits
main
...
renovate/k
Author | SHA1 | Date | |
---|---|---|---|
2541ce353a |
@ -2,13 +2,7 @@
|
|||||||
|
|
||||||
# All things BEFORE the first controller / control plane upgrade
|
# All things BEFORE the first controller / control plane upgrade
|
||||||
pre_control_plane_upgrade_cluster() {
|
pre_control_plane_upgrade_cluster() {
|
||||||
if [ "$PLATFORM" != "gke" ];then
|
echo
|
||||||
# patch multus DS to ONLY run pods on 1.31 controllers
|
|
||||||
kubectl patch ds kube-multus-ds -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
|
|
||||||
|
|
||||||
# patch kube-proxy DS to ONLY run pods on 1.31 controllers
|
|
||||||
kubectl patch ds kube-proxy -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -22,20 +16,7 @@ post_control_plane_upgrade_cluster() {
|
|||||||
pre_cluster_upgrade_final() {
|
pre_cluster_upgrade_final() {
|
||||||
set +e
|
set +e
|
||||||
|
|
||||||
if [ "$PLATFORM" != "gke" ];then
|
echo
|
||||||
# cleanup multus
|
|
||||||
kubectl delete clusterrolebinding multus
|
|
||||||
kubectl delete clusterrole multus
|
|
||||||
kubectl delete serviceaccount multus -n kube-system
|
|
||||||
kubectl delete cm multus-cni-config -n kube-system
|
|
||||||
kubectl delete ds kube-multus-ds -n kube-system
|
|
||||||
kubectl delete NetworkAttachmentDefinition cilium
|
|
||||||
kubectl delete crd network-attachment-definitions.k8s.cni.cncf.io
|
|
||||||
|
|
||||||
# remove kube-proxy
|
|
||||||
kubectl -n kube-system delete ds kube-proxy
|
|
||||||
kubectl -n kube-system delete cm kube-proxy
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
}
|
}
|
||||||
|
@ -63,7 +63,7 @@ render_kubeadm() {
|
|||||||
|
|
||||||
# Assemble kubeadm config
|
# Assemble kubeadm config
|
||||||
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||||
for f in Cluster Kubelet; do
|
for f in Cluster KubeProxy Kubelet; do
|
||||||
# echo "---" >> /etc/kubernetes/kubeadm.yaml
|
# echo "---" >> /etc/kubernetes/kubeadm.yaml
|
||||||
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||||
done
|
done
|
||||||
@ -169,7 +169,7 @@ kubeadm_upgrade() {
|
|||||||
else
|
else
|
||||||
pre_cluster_upgrade_final
|
pre_cluster_upgrade_final
|
||||||
|
|
||||||
_kubeadm upgrade apply phase addon coredns $KUBE_VERSION
|
_kubeadm upgrade apply phase addon all $KUBE_VERSION
|
||||||
|
|
||||||
post_cluster_upgrade_final
|
post_cluster_upgrade_final
|
||||||
|
|
||||||
@ -239,7 +239,7 @@ control_plane_node() {
|
|||||||
if [[ "$CMD" =~ ^(join)$ ]]; then
|
if [[ "$CMD" =~ ^(join)$ ]]; then
|
||||||
# Delete any former self in case forseti did not delete yet
|
# Delete any former self in case forseti did not delete yet
|
||||||
kubectl delete node ${NODENAME} --wait=true || true
|
kubectl delete node ${NODENAME} --wait=true || true
|
||||||
# Wait for all pods to be deleted otherwise we end up with stale pods
|
# Wait for all pods to be deleted otherwise we end up with stale pods eg. kube-proxy and all goes to ....
|
||||||
kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME}
|
kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME}
|
||||||
|
|
||||||
# get current running etcd pods for etcdctl commands
|
# get current running etcd pods for etcdctl commands
|
||||||
@ -251,7 +251,7 @@ control_plane_node() {
|
|||||||
done
|
done
|
||||||
|
|
||||||
# see if we are a former member and remove our former self if so
|
# see if we are a former member and remove our former self if so
|
||||||
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//' || true)
|
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
|
||||||
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
|
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
|
||||||
|
|
||||||
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
|
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
|
||||||
@ -309,9 +309,8 @@ control_plane_node() {
|
|||||||
_kubeadm init phase mark-control-plane
|
_kubeadm init phase mark-control-plane
|
||||||
_kubeadm init phase kubelet-finalize all
|
_kubeadm init phase kubelet-finalize all
|
||||||
|
|
||||||
# we skip kube-proxy
|
|
||||||
if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then
|
if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then
|
||||||
_kubeadm init phase addon coredns
|
_kubeadm init phase addon all
|
||||||
fi
|
fi
|
||||||
|
|
||||||
post_kubeadm
|
post_kubeadm
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
# Simulate well-known CRDs being available
|
# Simulate well-known CRDs being available
|
||||||
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
|
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
|
||||||
LOCAL_DEV=${LOCAL_DEV:-""}
|
LOCAL_DEV=${LOCAL_DEV:-""}
|
||||||
ENV_VALUES=""
|
|
||||||
|
|
||||||
export HELM_SECRETS_BACKEND="vals"
|
export HELM_SECRETS_BACKEND="vals"
|
||||||
|
|
||||||
@ -81,19 +80,15 @@ function get_kubezero_secret() {
|
|||||||
get_secret_val kubezero kubezero-secrets "$1"
|
get_secret_val kubezero kubezero-secrets "$1"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function ensure_kubezero_secret_key() {
|
function ensure_kubezero_secret_key() {
|
||||||
local ns=$1
|
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
|
||||||
local secret=$2
|
local key=""
|
||||||
|
local val=""
|
||||||
|
|
||||||
local secret="$(kubectl get secret -n $ns $secret -o yaml)"
|
for key in $@; do
|
||||||
local key
|
val=$(echo "$secret" | yq ".data.\"$key\"")
|
||||||
local val
|
|
||||||
|
|
||||||
for key in $1; do
|
|
||||||
val=$(echo $secret | yq ".data.\"$key\"")
|
|
||||||
if [ "$val" == "null" ]; then
|
if [ "$val" == "null" ]; then
|
||||||
set_kubezero_secret $key ""
|
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
@ -267,11 +262,6 @@ function _helm() {
|
|||||||
|
|
||||||
crds
|
crds
|
||||||
|
|
||||||
elif [ $action == "dryrun" ]; then
|
|
||||||
cat $WORKDIR/values.yaml
|
|
||||||
render
|
|
||||||
cat $WORKDIR/helm.yaml
|
|
||||||
|
|
||||||
elif [ $action == "apply" -o $action == "replace" ]; then
|
elif [ $action == "apply" -o $action == "replace" ]; then
|
||||||
echo "using values to $action of module $module: "
|
echo "using values to $action of module $module: "
|
||||||
cat $WORKDIR/values.yaml
|
cat $WORKDIR/values.yaml
|
||||||
|
@ -10,14 +10,7 @@ def migrate(values):
|
|||||||
|
|
||||||
# 1.32
|
# 1.32
|
||||||
try:
|
try:
|
||||||
values["istio-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-ingress"]["gateway"]["service"]["ports"]
|
|
||||||
values["istio-ingress"]["gateway"]["service"].pop("ports")
|
|
||||||
except KeyError:
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
|
||||||
values["istio-private-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-private-ingress"]["gateway"]["service"]["ports"]
|
|
||||||
values["istio-private-ingress"]["gateway"]["service"].pop("ports")
|
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ Kubernetes: `>= 1.32.0-0`
|
|||||||
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||||
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
||||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
||||||
|
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
|
||||||
|
|
||||||
- https://github.com/awslabs/amazon-eks-ami
|
- https://github.com/awslabs/amazon-eks-ami
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||||
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
||||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
||||||
|
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
|
||||||
|
|
||||||
- https://github.com/awslabs/amazon-eks-ami
|
- https://github.com/awslabs/amazon-eks-ami
|
||||||
|
|
||||||
|
@ -29,6 +29,12 @@ kind: Policy
|
|||||||
rules:
|
rules:
|
||||||
# The following requests were manually identified as high-volume and low-risk,
|
# The following requests were manually identified as high-volume and low-risk,
|
||||||
# so drop them.
|
# so drop them.
|
||||||
|
- level: None
|
||||||
|
users: ["system:kube-proxy"]
|
||||||
|
verbs: ["watch"]
|
||||||
|
resources:
|
||||||
|
- group: "" # core
|
||||||
|
resources: ["endpoints", "services", "services/status"]
|
||||||
- level: None
|
- level: None
|
||||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||||
# TODO(#46983): Change this to the ingress controller service account.
|
# TODO(#46983): Change this to the ingress controller service account.
|
||||||
|
@ -6,8 +6,6 @@ featureGates:
|
|||||||
ControlPlaneKubeletLocalMode: true
|
ControlPlaneKubeletLocalMode: true
|
||||||
NodeLocalCRISocket: true
|
NodeLocalCRISocket: true
|
||||||
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
||||||
proxy:
|
|
||||||
disabled: true
|
|
||||||
networking:
|
networking:
|
||||||
podSubnet: 10.244.0.0/16
|
podSubnet: 10.244.0.0/16
|
||||||
etcd:
|
etcd:
|
||||||
|
@ -7,8 +7,6 @@ localAPIEndpoint:
|
|||||||
patches:
|
patches:
|
||||||
directory: {{ . }}
|
directory: {{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
skipPhases:
|
|
||||||
- addon/kube-proxy
|
|
||||||
nodeRegistration:
|
nodeRegistration:
|
||||||
criSocket: "unix:///run/containerd/containerd.sock"
|
criSocket: "unix:///run/containerd/containerd.sock"
|
||||||
ignorePreflightErrors:
|
ignorePreflightErrors:
|
||||||
|
10
charts/kubeadm/templates/KubeProxyConfiguration.yaml
Normal file
10
charts/kubeadm/templates/KubeProxyConfiguration.yaml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||||
|
kind: KubeProxyConfiguration
|
||||||
|
# kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways
|
||||||
|
metricsBindAddress: "0.0.0.0:10249"
|
||||||
|
mode: "iptables"
|
||||||
|
logging:
|
||||||
|
format: json
|
||||||
|
iptables:
|
||||||
|
localhostNodePorts: false
|
||||||
|
#nodePortAddresses: primary
|
@ -3,6 +3,12 @@ kind: Policy
|
|||||||
rules:
|
rules:
|
||||||
# The following requests were manually identified as high-volume and low-risk,
|
# The following requests were manually identified as high-volume and low-risk,
|
||||||
# so drop them.
|
# so drop them.
|
||||||
|
- level: None
|
||||||
|
users: ["system:kube-proxy"]
|
||||||
|
verbs: ["watch"]
|
||||||
|
resources:
|
||||||
|
- group: "" # core
|
||||||
|
resources: ["endpoints", "services", "services/status"]
|
||||||
- level: None
|
- level: None
|
||||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||||
# TODO(#46983): Change this to the ingress controller service account.
|
# TODO(#46983): Change this to the ingress controller service account.
|
||||||
@ -108,7 +114,7 @@ rules:
|
|||||||
# Get responses can be large; skip them.
|
# Get responses can be large; skip them.
|
||||||
- level: Request
|
- level: Request
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
resources:
|
resources:
|
||||||
- group: "" # core
|
- group: "" # core
|
||||||
- group: "admissionregistration.k8s.io"
|
- group: "admissionregistration.k8s.io"
|
||||||
- group: "apiextensions.k8s.io"
|
- group: "apiextensions.k8s.io"
|
||||||
@ -131,7 +137,7 @@ rules:
|
|||||||
- "RequestReceived"
|
- "RequestReceived"
|
||||||
# Default level for known APIs
|
# Default level for known APIs
|
||||||
- level: RequestResponse
|
- level: RequestResponse
|
||||||
resources:
|
resources:
|
||||||
- group: "" # core
|
- group: "" # core
|
||||||
- group: "admissionregistration.k8s.io"
|
- group: "admissionregistration.k8s.io"
|
||||||
- group: "apiextensions.k8s.io"
|
- group: "apiextensions.k8s.io"
|
||||||
|
@ -2,8 +2,8 @@ apiVersion: v2
|
|||||||
name: kubezero-addons
|
name: kubezero-addons
|
||||||
description: KubeZero umbrella chart for various optional cluster addons
|
description: KubeZero umbrella chart for various optional cluster addons
|
||||||
type: application
|
type: application
|
||||||
version: 0.8.15
|
version: 0.8.14
|
||||||
appVersion: v1.32
|
appVersion: v1.31
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -34,7 +34,7 @@ dependencies:
|
|||||||
repository: https://nvidia.github.io/k8s-device-plugin
|
repository: https://nvidia.github.io/k8s-device-plugin
|
||||||
condition: nvidia-device-plugin.enabled
|
condition: nvidia-device-plugin.enabled
|
||||||
- name: neuron-helm-chart
|
- name: neuron-helm-chart
|
||||||
version: 1.1.2
|
version: 1.1.1
|
||||||
# https://github.com/aws-neuron/neuron-helm-charts/tree/main/charts/neuron-helm-chart
|
# https://github.com/aws-neuron/neuron-helm-charts/tree/main/charts/neuron-helm-chart
|
||||||
repository: oci://public.ecr.aws/neuron #/neuron-helm-chart
|
repository: oci://public.ecr.aws/neuron #/neuron-helm-chart
|
||||||
condition: neuron-helm-chart.enabled
|
condition: neuron-helm-chart.enabled
|
||||||
@ -43,7 +43,7 @@ dependencies:
|
|||||||
repository: https://bitnami-labs.github.io/sealed-secrets
|
repository: https://bitnami-labs.github.io/sealed-secrets
|
||||||
condition: sealed-secrets.enabled
|
condition: sealed-secrets.enabled
|
||||||
- name: aws-node-termination-handler
|
- name: aws-node-termination-handler
|
||||||
version: 0.27.1
|
version: 0.27.0
|
||||||
repository: "oci://public.ecr.aws/aws-ec2/helm"
|
repository: "oci://public.ecr.aws/aws-ec2/helm"
|
||||||
condition: aws-node-termination-handler.enabled
|
condition: aws-node-termination-handler.enabled
|
||||||
- name: aws-eks-asg-rolling-update-handler
|
- name: aws-eks-asg-rolling-update-handler
|
||||||
@ -54,4 +54,4 @@ dependencies:
|
|||||||
version: 0.3.2
|
version: 0.3.2
|
||||||
repository: https://caas-team.github.io/helm-charts/
|
repository: https://caas-team.github.io/helm-charts/
|
||||||
condition: py-kube-downscaler.enabled
|
condition: py-kube-downscaler.enabled
|
||||||
kubeVersion: ">= 1.31.0-0"
|
kubeVersion: ">= 1.30.0-0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-addons
|
# kubezero-addons
|
||||||
|
|
||||||
  
|
  
|
||||||
|
|
||||||
KubeZero umbrella chart for various optional cluster addons
|
KubeZero umbrella chart for various optional cluster addons
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ KubeZero umbrella chart for various optional cluster addons
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.31.0-0`
|
Kubernetes: `>= 1.30.0-0`
|
||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
@ -24,8 +24,8 @@ Kubernetes: `>= 1.31.0-0`
|
|||||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.46.6 |
|
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.46.6 |
|
||||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.17.1 |
|
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.17.1 |
|
||||||
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
|
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
|
||||||
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.27.1 |
|
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.27.0 |
|
||||||
| oci://public.ecr.aws/neuron | neuron-helm-chart | 1.1.2 |
|
| oci://public.ecr.aws/neuron | neuron-helm-chart | 1.1.1 |
|
||||||
|
|
||||||
# MetalLB
|
# MetalLB
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
appVersion: 1.25.1
|
appVersion: 1.25.0
|
||||||
description: A Helm chart for the AWS Node Termination Handler.
|
description: A Helm chart for the AWS Node Termination Handler.
|
||||||
home: https://github.com/aws/aws-node-termination-handler/
|
home: https://github.com/aws/aws-node-termination-handler/
|
||||||
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
||||||
@ -21,4 +21,4 @@ name: aws-node-termination-handler
|
|||||||
sources:
|
sources:
|
||||||
- https://github.com/aws/aws-node-termination-handler/
|
- https://github.com/aws/aws-node-termination-handler/
|
||||||
type: application
|
type: application
|
||||||
version: 0.27.1
|
version: 0.27.0
|
||||||
|
@ -25,8 +25,8 @@ argo-events:
|
|||||||
# do NOT use -alpine tag as the entrypoint differs
|
# do NOT use -alpine tag as the entrypoint differs
|
||||||
versions:
|
versions:
|
||||||
- version: 2.10.11
|
- version: 2.10.11
|
||||||
natsImage: nats:2.11.4-scratch
|
natsImage: nats:2.11.1-scratch
|
||||||
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.3
|
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.2
|
||||||
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
|
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
|
||||||
startCommand: /nats-server
|
startCommand: /nats-server
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-auth
|
name: kubezero-auth
|
||||||
description: KubeZero umbrella chart for all things Authentication and Identity management
|
description: KubeZero umbrella chart for all things Authentication and Identity management
|
||||||
type: application
|
type: application
|
||||||
version: 0.6.3
|
version: 0.6.4
|
||||||
appVersion: 26.0.5
|
appVersion: 26.0.5
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
@ -18,6 +18,6 @@ dependencies:
|
|||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: keycloak
|
- name: keycloak
|
||||||
repository: "oci://registry-1.docker.io/bitnamicharts"
|
repository: "oci://registry-1.docker.io/bitnamicharts"
|
||||||
version: 24.7.1
|
version: 24.7.3
|
||||||
condition: keycloak.enabled
|
condition: keycloak.enabled
|
||||||
kubeVersion: ">= 1.30.0-0"
|
kubeVersion: ">= 1.30.0-0"
|
||||||
|
@ -289,7 +289,7 @@ trivy:
|
|||||||
#tag: 0.57.0
|
#tag: 0.57.0
|
||||||
persistence:
|
persistence:
|
||||||
enabled: true
|
enabled: true
|
||||||
size: 2Gi
|
size: 1Gi
|
||||||
rbac:
|
rbac:
|
||||||
create: false
|
create: false
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-falco
|
name: kubezero-falco
|
||||||
description: Falco Container Security and Audit components
|
description: Falco Container Security and Audit components
|
||||||
type: application
|
type: application
|
||||||
version: 0.1.3
|
version: 0.1.2
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -16,7 +16,7 @@ dependencies:
|
|||||||
version: 0.2.1
|
version: 0.2.1
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: falco
|
- name: falco
|
||||||
version: 5.0.0
|
version: 4.2.5
|
||||||
repository: https://falcosecurity.github.io/charts
|
repository: https://falcosecurity.github.io/charts
|
||||||
condition: k8saudit.enabled
|
condition: k8saudit.enabled
|
||||||
alias: k8saudit
|
alias: k8saudit
|
||||||
|
@ -42,3 +42,17 @@ helm.sh/chart: {{ include "kubezero-lib.chart" . }}
|
|||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
app.kubernetes.io/part-of: kubezero
|
app.kubernetes.io/part-of: kubezero
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
||||||
|
{{- /*
|
||||||
|
kubezero-lib.util.merge will merge two YAML templates and output the result.
|
||||||
|
This takes an array of three values:
|
||||||
|
- the top context
|
||||||
|
- the template name of the overrides (destination)
|
||||||
|
- the template name of the base (source)
|
||||||
|
*/ -}}
|
||||||
|
{{- define "kubezero-lib.util.merge" -}}
|
||||||
|
{{- $top := first . -}}
|
||||||
|
{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}
|
||||||
|
{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}}
|
||||||
|
{{- toYaml (merge $overrides $tpl) -}}
|
||||||
|
{{- end -}}
|
||||||
|
@ -12,7 +12,7 @@ kube-prometheus-stack:
|
|||||||
kubeStateMetrics:
|
kubeStateMetrics:
|
||||||
enabled: true
|
enabled: true
|
||||||
kubeProxy:
|
kubeProxy:
|
||||||
enabled: false
|
enabled: true
|
||||||
|
|
||||||
kubeEtcd:
|
kubeEtcd:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-mq
|
name: kubezero-mq
|
||||||
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
||||||
type: application
|
type: application
|
||||||
version: 0.3.12
|
version: 0.3.11
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -17,7 +17,7 @@ dependencies:
|
|||||||
version: 0.2.1
|
version: 0.2.1
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: nats
|
- name: nats
|
||||||
version: 1.3.7
|
version: 1.3.3
|
||||||
repository: https://nats-io.github.io/k8s/helm/charts/
|
repository: https://nats-io.github.io/k8s/helm/charts/
|
||||||
condition: nats.enabled
|
condition: nats.enabled
|
||||||
- name: rabbitmq
|
- name: rabbitmq
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-mq
|
# kubezero-mq
|
||||||
|
|
||||||
 
|
 
|
||||||
|
|
||||||
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
||||||
|
|
||||||
@ -20,7 +20,7 @@ Kubernetes: `>= 1.26.0`
|
|||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||||
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
|
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
|
||||||
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.7 |
|
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 |
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
@ -32,8 +32,6 @@ Kubernetes: `>= 1.26.0`
|
|||||||
| nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
| nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||||
| nats.mqtt.enabled | bool | `false` | |
|
| nats.mqtt.enabled | bool | `false` | |
|
||||||
| nats.natsBox.enabled | bool | `false` | |
|
| nats.natsBox.enabled | bool | `false` | |
|
||||||
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".maxSkew | int | `1` | |
|
|
||||||
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".whenUnsatisfiable | string | `"DoNotSchedule"` | |
|
|
||||||
| nats.promExporter.enabled | bool | `false` | |
|
| nats.promExporter.enabled | bool | `false` | |
|
||||||
| nats.promExporter.podMonitor.enabled | bool | `false` | |
|
| nats.promExporter.podMonitor.enabled | bool | `false` | |
|
||||||
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
|
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
configmap: grafana-dashboards-nats
|
configmap: grafana-dashboards-nats
|
||||||
condition: '.Values.nats.promExporter.podMonitor.enabled'
|
condition: '.Values.nats.exporter.serviceMonitor.enabled'
|
||||||
gzip: true
|
gzip: true
|
||||||
# folder:
|
# folder:
|
||||||
dashboards:
|
dashboards:
|
||||||
- name: nats
|
- name: nats
|
||||||
url: https://grafana.com/api/dashboards/13707/revisions/1/download
|
url: https://grafana.com/api/dashboards/13707/revisions/1/download
|
||||||
|
@ -10,6 +10,7 @@ keywords:
|
|||||||
- multus
|
- multus
|
||||||
- cilium
|
- cilium
|
||||||
- aws-cni
|
- aws-cni
|
||||||
|
- metallb
|
||||||
maintainers:
|
maintainers:
|
||||||
- name: Stefan Reimer
|
- name: Stefan Reimer
|
||||||
email: stefan@zero-downtime.net
|
email: stefan@zero-downtime.net
|
||||||
@ -21,6 +22,10 @@ dependencies:
|
|||||||
version: 1.17.4
|
version: 1.17.4
|
||||||
repository: https://helm.cilium.io/
|
repository: https://helm.cilium.io/
|
||||||
condition: cilium.enabled
|
condition: cilium.enabled
|
||||||
|
- name: metallb
|
||||||
|
version: 0.14.9
|
||||||
|
repository: https://metallb.github.io/metallb
|
||||||
|
condition: metallb.enabled
|
||||||
- name: haproxy
|
- name: haproxy
|
||||||
version: 1.24.0
|
version: 1.24.0
|
||||||
repository: https://haproxytech.github.io/helm-charts
|
repository: https://haproxytech.github.io/helm-charts
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-network
|
# kubezero-network
|
||||||
|
|
||||||
 
|
 
|
||||||
|
|
||||||
KubeZero umbrella chart for all things network
|
KubeZero umbrella chart for all things network
|
||||||
|
|
||||||
@ -20,22 +20,20 @@ Kubernetes: `>= 1.30.0-0`
|
|||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||||
| https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 |
|
| https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 |
|
||||||
| https://helm.cilium.io/ | cilium | 1.17.4 |
|
| https://helm.cilium.io/ | cilium | 1.17.3 |
|
||||||
|
| https://metallb.github.io/metallb | metallb | 0.14.9 |
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
| Key | Type | Default | Description |
|
| Key | Type | Default | Description |
|
||||||
|-----|------|---------|-------------|
|
|-----|------|---------|-------------|
|
||||||
| cilium.bpf.preallocateMaps | bool | `true` | |
|
|
||||||
| cilium.cgroup.autoMount.enabled | bool | `false` | |
|
| cilium.cgroup.autoMount.enabled | bool | `false` | |
|
||||||
| cilium.cgroup.hostRoot | string | `"/sys/fs/cgroup"` | |
|
| cilium.cgroup.hostRoot | string | `"/sys/fs/cgroup"` | |
|
||||||
| cilium.cluster.id | int | `240` | |
|
| cilium.cluster.id | int | `240` | |
|
||||||
| cilium.cluster.name | string | `"default"` | |
|
| cilium.cluster.name | string | `"default"` | |
|
||||||
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
|
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
|
||||||
| cilium.cni.exclusive | bool | `true` | |
|
| cilium.cni.exclusive | bool | `false` | |
|
||||||
| cilium.cni.iptablesRemoveAWSRules | bool | `false` | |
|
|
||||||
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
|
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
|
||||||
| cilium.dnsProxy.enableTransparentMode | bool | `true` | |
|
|
||||||
| cilium.enabled | bool | `false` | |
|
| cilium.enabled | bool | `false` | |
|
||||||
| cilium.envoy.enabled | bool | `false` | |
|
| cilium.envoy.enabled | bool | `false` | |
|
||||||
| cilium.hubble.enabled | bool | `false` | |
|
| cilium.hubble.enabled | bool | `false` | |
|
||||||
@ -47,9 +45,6 @@ Kubernetes: `>= 1.30.0-0`
|
|||||||
| cilium.hubble.ui.enabled | bool | `false` | |
|
| cilium.hubble.ui.enabled | bool | `false` | |
|
||||||
| cilium.image.useDigest | bool | `false` | |
|
| cilium.image.useDigest | bool | `false` | |
|
||||||
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
|
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
|
||||||
| cilium.k8sServiceHost | string | `""` | |
|
|
||||||
| cilium.k8sServicePort | int | `6443` | |
|
|
||||||
| cilium.kubeProxyReplacement | bool | `true` | |
|
|
||||||
| cilium.l7Proxy | bool | `false` | |
|
| cilium.l7Proxy | bool | `false` | |
|
||||||
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
| cilium.operator.prometheus.enabled | bool | `false` | |
|
| cilium.operator.prometheus.enabled | bool | `false` | |
|
||||||
@ -59,13 +54,12 @@ Kubernetes: `>= 1.30.0-0`
|
|||||||
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
|
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
| cilium.operator.tolerations[1].key | string | `"node.cilium.io/agent-not-ready"` | |
|
| cilium.operator.tolerations[1].key | string | `"node.cilium.io/agent-not-ready"` | |
|
||||||
| cilium.operator.tolerations[2].effect | string | `"NoSchedule"` | |
|
|
||||||
| cilium.operator.tolerations[2].key | string | `"node.kubernetes.io/not-ready"` | |
|
|
||||||
| cilium.prometheus.enabled | bool | `false` | |
|
| cilium.prometheus.enabled | bool | `false` | |
|
||||||
| cilium.prometheus.port | int | `9091` | |
|
| cilium.prometheus.port | int | `9091` | |
|
||||||
| cilium.prometheus.serviceMonitor.enabled | bool | `false` | |
|
| cilium.prometheus.serviceMonitor.enabled | bool | `false` | |
|
||||||
| cilium.resources.requests.cpu | string | `"50m"` | |
|
| cilium.resources.limits.memory | string | `"1Gi"` | |
|
||||||
| cilium.resources.requests.memory | string | `"256Mi"` | |
|
| cilium.resources.requests.cpu | string | `"10m"` | |
|
||||||
|
| cilium.resources.requests.memory | string | `"160Mi"` | |
|
||||||
| cilium.routingMode | string | `"tunnel"` | |
|
| cilium.routingMode | string | `"tunnel"` | |
|
||||||
| cilium.sysctlfix.enabled | bool | `false` | |
|
| cilium.sysctlfix.enabled | bool | `false` | |
|
||||||
| cilium.tunnelProtocol | string | `"geneve"` | |
|
| cilium.tunnelProtocol | string | `"geneve"` | |
|
||||||
@ -113,6 +107,11 @@ Kubernetes: `>= 1.30.0-0`
|
|||||||
| haproxy.serviceMonitor.endpoints[0].path | string | `"/metrics"` | |
|
| haproxy.serviceMonitor.endpoints[0].path | string | `"/metrics"` | |
|
||||||
| haproxy.serviceMonitor.endpoints[0].port | string | `"prometheus"` | |
|
| haproxy.serviceMonitor.endpoints[0].port | string | `"prometheus"` | |
|
||||||
| haproxy.serviceMonitor.endpoints[0].scheme | string | `"http"` | |
|
| haproxy.serviceMonitor.endpoints[0].scheme | string | `"http"` | |
|
||||||
|
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
|
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
|
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
|
| metallb.enabled | bool | `false` | |
|
||||||
|
| metallb.ipAddressPools | list | `[]` | |
|
||||||
| multus.clusterNetwork | string | `"cilium"` | |
|
| multus.clusterNetwork | string | `"cilium"` | |
|
||||||
| multus.defaultNetworks | list | `[]` | |
|
| multus.defaultNetworks | list | `[]` | |
|
||||||
| multus.enabled | bool | `false` | |
|
| multus.enabled | bool | `false` | |
|
||||||
|
27
charts/kubezero-network/templates/metallb/config.yaml
Normal file
27
charts/kubezero-network/templates/metallb/config.yaml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
{{- if .Values.metallb.enabled }}
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: L2Advertisement
|
||||||
|
metadata:
|
||||||
|
name: l2advertisement1
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
ipAddressPools:
|
||||||
|
{{- range $key, $val := .Values.metallb.ipAddressPools }}
|
||||||
|
{{- if eq $val.protocol "layer2" }}
|
||||||
|
- {{ $val.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
---
|
||||||
|
|
||||||
|
{{- range $key, $val := .Values.metallb.ipAddressPools }}
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: IPAddressPool
|
||||||
|
metadata:
|
||||||
|
name: {{ $val.name }}
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
addresses:
|
||||||
|
{{- $val.addresses | toYaml | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
---
|
||||||
|
{{- end }}
|
@ -1,3 +1,19 @@
|
|||||||
|
metallb:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
controller:
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
|
||||||
|
ipAddressPools: []
|
||||||
|
#- name: my-ip-space
|
||||||
|
# protocol: layer2
|
||||||
|
# addresses:
|
||||||
|
# - 192.168.42.0/24
|
||||||
|
|
||||||
multus:
|
multus:
|
||||||
enabled: false
|
enabled: false
|
||||||
image:
|
image:
|
||||||
@ -17,18 +33,17 @@ cilium:
|
|||||||
|
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 50m
|
cpu: 10m
|
||||||
memory: 256Mi
|
memory: 160Mi
|
||||||
# limits:
|
limits:
|
||||||
# memory: 1Gi
|
memory: 1Gi
|
||||||
# cpu: 4000m
|
# cpu: 4000m
|
||||||
|
|
||||||
cni:
|
cni:
|
||||||
binPath: "/usr/libexec/cni"
|
binPath: "/usr/libexec/cni"
|
||||||
logFile: /var/log/cilium-cni.log
|
logFile: /var/log/cilium-cni.log
|
||||||
#-- Ensure this is false if multus is enabled
|
#-- Ensure this is false if multus is enabled
|
||||||
exclusive: true
|
exclusive: false
|
||||||
iptablesRemoveAWSRules: false
|
|
||||||
|
|
||||||
cluster:
|
cluster:
|
||||||
# This should match the second octet of clusterPoolIPv4PodCIDRList
|
# This should match the second octet of clusterPoolIPv4PodCIDRList
|
||||||
@ -48,32 +63,13 @@ cilium:
|
|||||||
enabled: false
|
enabled: false
|
||||||
#rollOutCiliumPods: true
|
#rollOutCiliumPods: true
|
||||||
|
|
||||||
kubeProxyReplacement: true
|
|
||||||
dnsProxy:
|
|
||||||
enableTransparentMode: true
|
|
||||||
|
|
||||||
# For LB support via L2announcement or BGP - on-prem only
|
|
||||||
# l2announcements:
|
|
||||||
# enabled: true
|
|
||||||
# Not needed normally
|
|
||||||
# externalIPs:
|
|
||||||
# enabled: true
|
|
||||||
|
|
||||||
k8sServiceHost: ""
|
|
||||||
k8sServicePort: 6443
|
|
||||||
# k8s:
|
|
||||||
# # This has to be set to the DNS name of all API servers
|
|
||||||
# # For example "https://192.168.0.1:6443 https://192.168.0.2:6443"
|
|
||||||
# apiServerURLs: ""
|
|
||||||
|
|
||||||
cgroup:
|
cgroup:
|
||||||
autoMount:
|
autoMount:
|
||||||
enabled: false
|
enabled: false
|
||||||
hostRoot: "/sys/fs/cgroup"
|
hostRoot: "/sys/fs/cgroup"
|
||||||
|
|
||||||
bpf:
|
|
||||||
preallocateMaps: true
|
|
||||||
# we need biDirectional so use helm init-container
|
# we need biDirectional so use helm init-container
|
||||||
|
#bpf:
|
||||||
# autoMount:
|
# autoMount:
|
||||||
# enabled: false
|
# enabled: false
|
||||||
|
|
||||||
@ -95,11 +91,9 @@ cilium:
|
|||||||
- key: node-role.kubernetes.io/control-plane
|
- key: node-role.kubernetes.io/control-plane
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
# the operator removes the taints,
|
# the operator removes the taints,
|
||||||
# so we need to break chicken egg
|
# so we need to break chicken egg on single controller
|
||||||
- key: node.cilium.io/agent-not-ready
|
- key: node.cilium.io/agent-not-ready
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
- key: node.kubernetes.io/not-ready
|
|
||||||
effect: NoSchedule
|
|
||||||
|
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/control-plane: ""
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
@ -1,20 +0,0 @@
|
|||||||
apiVersion: v2
|
|
||||||
name: kubezero-policy
|
|
||||||
description: KubeZero umbrella chart for Kyverno
|
|
||||||
type: application
|
|
||||||
version: 0.1.0
|
|
||||||
appVersion: v1.14
|
|
||||||
home: https://kubezero.com
|
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
|
||||||
keywords:
|
|
||||||
- kubezero
|
|
||||||
- kyverno
|
|
||||||
maintainers:
|
|
||||||
- name: Stefan Reimer
|
|
||||||
email: stefan@zero-downtime.net
|
|
||||||
dependencies:
|
|
||||||
- name: kyverno
|
|
||||||
version: 3.4.2
|
|
||||||
repository: https://kyverno.github.io/kyverno/
|
|
||||||
condition: kyverno.enabled
|
|
||||||
kubeVersion: ">= 1.30.0-0"
|
|
@ -1,29 +0,0 @@
|
|||||||
# kubezero-policy
|
|
||||||
|
|
||||||
  
|
|
||||||
|
|
||||||
KubeZero umbrella chart for Kyverno
|
|
||||||
|
|
||||||
**Homepage:** <https://kubezero.com>
|
|
||||||
|
|
||||||
## Maintainers
|
|
||||||
|
|
||||||
| Name | Email | Url |
|
|
||||||
| ---- | ------ | --- |
|
|
||||||
| Stefan Reimer | <stefan@zero-downtime.net> | |
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
Kubernetes: `>= 1.30.0-0`
|
|
||||||
|
|
||||||
| Repository | Name | Version |
|
|
||||||
|------------|------|---------|
|
|
||||||
| https://kyverno.github.io/kyverno/ | kyverno | 3.4.2 |
|
|
||||||
|
|
||||||
# Kyverno
|
|
||||||
|
|
||||||
## Values
|
|
||||||
|
|
||||||
| Key | Type | Default | Description |
|
|
||||||
|-----|------|---------|-------------|
|
|
||||||
| kyverno.enabled | bool | `false` | |
|
|
@ -1,18 +0,0 @@
|
|||||||
{{ template "chart.header" . }}
|
|
||||||
{{ template "chart.deprecationWarning" . }}
|
|
||||||
|
|
||||||
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
|
|
||||||
|
|
||||||
{{ template "chart.description" . }}
|
|
||||||
|
|
||||||
{{ template "chart.homepageLine" . }}
|
|
||||||
|
|
||||||
{{ template "chart.maintainersSection" . }}
|
|
||||||
|
|
||||||
{{ template "chart.sourcesSection" . }}
|
|
||||||
|
|
||||||
{{ template "chart.requirementsSection" . }}
|
|
||||||
|
|
||||||
# Kyverno
|
|
||||||
|
|
||||||
{{ template "chart.valuesSection" . }}
|
|
@ -1,9 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
. ../../scripts/lib-update.sh
|
|
||||||
|
|
||||||
login_ecr_public
|
|
||||||
update_helm
|
|
||||||
|
|
||||||
update_docs
|
|
@ -1,2 +0,0 @@
|
|||||||
kyverno:
|
|
||||||
enabled: false
|
|
@ -28,7 +28,7 @@ spec:
|
|||||||
helm:
|
helm:
|
||||||
skipTests: true
|
skipTests: true
|
||||||
valuesObject:
|
valuesObject:
|
||||||
{{- toYaml (merge (omit (index .Values $name) "enabled" "namespace" "retain" "targetRevision") (fromYaml (include (print $name "-values") $ ))) | nindent 8 }}
|
{{- include (print $name "-values") $ | nindent 8 }}
|
||||||
|
|
||||||
destination:
|
destination:
|
||||||
server: "https://kubernetes.default.svc"
|
server: "https://kubernetes.default.svc"
|
||||||
|
@ -67,7 +67,7 @@ gateway:
|
|||||||
gatewayProtocol: HTTPS
|
gatewayProtocol: HTTPS
|
||||||
tls:
|
tls:
|
||||||
mode: SIMPLE
|
mode: SIMPLE
|
||||||
{{- with index .Values "istio-ingress" "gateway" "service" "extraPorts" }}
|
{{- with index .Values "istio-ingress" "gateway" "service" "ports" }}
|
||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
@ -93,6 +93,7 @@ certificates:
|
|||||||
{{- toYaml $cert.dnsNames | nindent 4 }}
|
{{- toYaml $cert.dnsNames | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
proxyProtocol: {{ default true (index .Values "istio-ingress" "proxyProtocol") }}
|
||||||
{{- with (index .Values "istio-ingress" "hardening") }}
|
{{- with (index .Values "istio-ingress" "hardening") }}
|
||||||
hardening:
|
hardening:
|
||||||
{{- toYaml . | nindent 2 }}
|
{{- toYaml . | nindent 2 }}
|
||||||
|
@ -64,7 +64,7 @@ gateway:
|
|||||||
gatewayProtocol: HTTPS
|
gatewayProtocol: HTTPS
|
||||||
tls:
|
tls:
|
||||||
mode: SIMPLE
|
mode: SIMPLE
|
||||||
{{- with index .Values "istio-private-ingress" "gateway" "service" "extraPorts" }}
|
{{- with index .Values "istio-private-ingress" "gateway" "service" "ports" }}
|
||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
@ -88,6 +88,7 @@ certificates:
|
|||||||
dnsNames:
|
dnsNames:
|
||||||
{{- toYaml $cert.dnsNames | nindent 4 }}
|
{{- toYaml $cert.dnsNames | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
proxyProtocol: {{ default true (index .Values "istio-private-ingress" "proxyProtocol") }}
|
||||||
{{- with (index .Values "istio-private-ingress" "hardening") }}
|
{{- with (index .Values "istio-private-ingress" "hardening") }}
|
||||||
hardening:
|
hardening:
|
||||||
{{- toYaml . | nindent 2 }}
|
{{- toYaml . | nindent 2 }}
|
||||||
|
@ -1,22 +1,30 @@
|
|||||||
{{- define "network-values" }}
|
{{- define "network-values" }}
|
||||||
cilium:
|
multus:
|
||||||
k8sServiceHost: {{ .Values.global.apiServerUrl }}
|
enabled: true
|
||||||
|
clusterNetwork: "cilium"
|
||||||
|
|
||||||
|
# {{- if eq .Values.global.platform "aws" }}
|
||||||
|
# image:
|
||||||
|
# pullPolicy: Never
|
||||||
|
# {{- end }}
|
||||||
|
|
||||||
|
cilium:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
# {{- if eq .Values.global.platform "aws" }}
|
# {{- if eq .Values.global.platform "aws" }}
|
||||||
# image:
|
# image:
|
||||||
# pullPolicy: Never
|
# pullPolicy: Never
|
||||||
# {{- end }}
|
# {{- end }}
|
||||||
# k8s:
|
|
||||||
# apiServerURLs: "https://{{ .Values.global.apiServerUrl }}"
|
|
||||||
|
|
||||||
cluster:
|
cluster:
|
||||||
name: {{ .Values.global.clusterName }}
|
name: {{ .Values.global.clusterName }}
|
||||||
{{- with ((.Values.network.cilium).cluster).id }}
|
{{- with .Values.network.cilium.cluster.id }}
|
||||||
|
id: {{ . }}
|
||||||
ipam:
|
ipam:
|
||||||
operator:
|
operator:
|
||||||
clusterPoolIPv4PodCIDRList:
|
clusterPoolIPv4PodCIDRList:
|
||||||
- 10.{{ . }}.0.0/16
|
- 10.{{ . }}.0.0/16
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
prometheus:
|
prometheus:
|
||||||
enabled: {{ .Values.metrics.enabled }}
|
enabled: {{ .Values.metrics.enabled }}
|
||||||
@ -32,6 +40,11 @@ cilium:
|
|||||||
serviceMonitor:
|
serviceMonitor:
|
||||||
enabled: {{ .Values.metrics.enabled }}
|
enabled: {{ .Values.metrics.enabled }}
|
||||||
|
|
||||||
|
{{- with .Values.network.metallb }}
|
||||||
|
metallb:
|
||||||
|
{{- toYaml . | nindent 2 }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
{{- with .Values.network.haproxy }}
|
{{- with .Values.network.haproxy }}
|
||||||
haproxy:
|
haproxy:
|
||||||
{{- toYaml . | nindent 2 }}
|
{{- toYaml . | nindent 2 }}
|
||||||
@ -41,6 +54,12 @@ haproxy:
|
|||||||
|
|
||||||
|
|
||||||
{{- define "network-argo" }}
|
{{- define "network-argo" }}
|
||||||
|
# Metallb
|
||||||
|
ignoreDifferences:
|
||||||
|
- group: apiextensions.k8s.io
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
jsonPointers:
|
||||||
|
- /spec/conversion/webhook/clientConfig/caBundle
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{ include "kubezero-app.app" . }}
|
{{ include "kubezero-app.app" . }}
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
{{- define "policy-values" }}
|
|
||||||
kyverno:
|
|
||||||
test: true
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{- define "policy-argo" }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{ include "kubezero-app.app" . }}
|
|
@ -61,10 +61,8 @@ opensearch:
|
|||||||
{{- toYaml . | nindent 2 }}
|
{{- toYaml . | nindent 2 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
# Disabled until upstream made up their mind
|
serviceMonitor:
|
||||||
# https://github.com/opensearch-project/technical-steering/issues/35
|
enabled: {{ .Values.metrics.enabled }}
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: {{ .Values.metrics.enabled }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- if index .Values "telemetry" "opensearch-dashboards" }}
|
{{- if index .Values "telemetry" "opensearch-dashboards" }}
|
||||||
@ -73,10 +71,8 @@ opensearch-dashboards:
|
|||||||
{{- toYaml . | nindent 2 }}
|
{{- toYaml . | nindent 2 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
# Disabled until upstream made up their mind
|
serviceMonitor:
|
||||||
# https://github.com/opensearch-project/technical-steering/issues/35
|
enabled: {{ .Values.metrics.enabled }}
|
||||||
# serviceMonitor:
|
|
||||||
# enabled: {{ .Values.metrics.enabled }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
global:
|
global:
|
||||||
clusterName: zdt-trial-cluster
|
clusterName: zdt-trial-cluster
|
||||||
apiServerUrl: localhost:6443
|
|
||||||
|
|
||||||
# platform: aws (kubeadm, default), gke, or nocloud
|
# platform: aws (kubeadm, default), gke, or nocloud
|
||||||
platform: "aws"
|
platform: "aws"
|
||||||
@ -30,16 +29,12 @@ addons:
|
|||||||
aws-eks-asg-rolling-update-handler:
|
aws-eks-asg-rolling-update-handler:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
policy:
|
|
||||||
enabled: false
|
|
||||||
targetRevision: 0.1.0
|
|
||||||
|
|
||||||
network:
|
network:
|
||||||
enabled: true
|
enabled: true
|
||||||
retain: true
|
retain: true
|
||||||
targetRevision: 0.5.9
|
targetRevision: 0.5.8
|
||||||
cilium:
|
cilium:
|
||||||
enabled: true
|
cluster: {}
|
||||||
|
|
||||||
cert-manager:
|
cert-manager:
|
||||||
enabled: false
|
enabled: false
|
||||||
@ -65,13 +60,13 @@ storage:
|
|||||||
istio:
|
istio:
|
||||||
enabled: false
|
enabled: false
|
||||||
namespace: istio-system
|
namespace: istio-system
|
||||||
targetRevision: 0.24.6
|
targetRevision: 0.24.3
|
||||||
|
|
||||||
istio-ingress:
|
istio-ingress:
|
||||||
enabled: false
|
enabled: false
|
||||||
chart: kubezero-istio-gateway
|
chart: kubezero-istio-gateway
|
||||||
namespace: istio-ingress
|
namespace: istio-ingress
|
||||||
targetRevision: 0.24.6
|
targetRevision: 0.24.3
|
||||||
gateway:
|
gateway:
|
||||||
service: {}
|
service: {}
|
||||||
|
|
||||||
@ -79,7 +74,7 @@ istio-private-ingress:
|
|||||||
enabled: false
|
enabled: false
|
||||||
chart: kubezero-istio-gateway
|
chart: kubezero-istio-gateway
|
||||||
namespace: istio-ingress
|
namespace: istio-ingress
|
||||||
targetRevision: 0.24.6
|
targetRevision: 0.24.3
|
||||||
gateway:
|
gateway:
|
||||||
service: {}
|
service: {}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user