Compare commits

...

34 Commits

Author SHA1 Message Date
5ee3f2ffa6 chore(deps): update nats docker tag to v2.11.4 2025-06-03 03:01:27 +00:00
5a16422f75 Merge pull request 'chore(deps): update helm release falco to v5' (#120) from renovate/kubezero-falco-major-kubezero-falco-dependencies into main
Reviewed-on: #120
2025-06-02 17:48:45 +00:00
84081514c6 chore(deps): update helm release falco to v5 2025-06-02 17:48:45 +00:00
12fd6df3d8 Merge pull request 'chore(deps): update kubezero-addons-dependencies' (#87) from renovate/kubezero-addons-kubezero-addons-dependencies into main
Reviewed-on: #87
2025-06-02 17:48:39 +00:00
71d8919cc5 chore(deps): update kubezero-addons-dependencies 2025-06-02 17:48:39 +00:00
58986e1d5b feat: remove metallb, minor control-plane fix 2025-06-02 17:47:10 +00:00
257bedf284 chore: mq version bump, minor helm tweaks 2025-06-02 14:07:53 +00:00
6a002155a7 Merge pull request 'chore(deps): update natsio/prometheus-nats-exporter docker tag to v0.17.3' (#82) from renovate/natsio-prometheus-nats-exporter-0.x into main
Reviewed-on: #82
2025-06-02 13:49:47 +00:00
fb9865ef2c chore(deps): update natsio/prometheus-nats-exporter docker tag to v0.17.3 2025-06-02 13:49:47 +00:00
f01df7954d Merge pull request 'chore(deps): update helm release nats to v1.3.7' (#90) from renovate/kubezero-mq-kubezero-mq-dependencies into main
Reviewed-on: #90
2025-06-02 13:49:22 +00:00
e6493e9961 chore(deps): update helm release nats to v1.3.7 2025-06-02 13:49:22 +00:00
9e87f92d45 feat: merge kubezero and module values by default 2025-06-01 17:27:13 +00:00
49fa7b3c42 feat: ensure kube-proxy gets phased out during 1.32 upgrade 2025-05-30 17:55:14 +00:00
1538ea0d45 Merge pull request 'remove-kube-proxy' (#118) from remove-kube-proxy into main
Reviewed-on: #118
2025-05-30 12:36:56 +00:00
f72ef007f2 fix: more upgrade fixes 2025-05-30 12:36:56 +00:00
87e7f5fe20 fix: ensure dnsproxy is set, clean default values 2025-05-30 12:36:56 +00:00
7527e085ea feat: remove kube-proxy, enable cilium 2025-05-30 12:36:56 +00:00
7612d257aa fix: various typos, remove unnecessary LifeCycle for cilium-agent DS 2025-05-30 11:25:01 +00:00
c16a233864 ci: remove duplicated func 2025-05-30 11:14:07 +00:00
33307fccce Merge pull request 'disable-multus' (#117) from disable-multus into main
Reviewed-on: #117
2025-05-30 11:09:36 +00:00
09a2ead705 feat: add post-upgrade multus cleanup 2025-05-30 11:06:51 +00:00
7863202ca7 feat: disable multus by default 2025-05-29 23:52:18 +00:00
d3036ad1ac fix: minor tweaks 2025-05-29 17:19:51 +00:00
04ca35c676 Merge pull request 'chore(deps): update helm release cilium to v1.17.4' (#111) from renovate/kubezero-network-kubezero-network-dependencies into main
Reviewed-on: #111
2025-05-28 17:20:56 +00:00
55e22e7f6d chore(deps): update helm release cilium to v1.17.4 2025-05-28 17:20:56 +00:00
9e9ae3e8b8 Feat: tune ArgoCD sync options 2025-05-24 14:10:26 +00:00
ccdf4652cf feat: upgrade ArgoCD to V3 2025-05-23 17:46:34 +00:00
1b37ad37e7 Merge pull request 'chore(deps): update helm release argo-cd to v8' (#85) from renovate/kubezero-argo-major-kubezero-argo-dependencies into main
Reviewed-on: #85
2025-05-23 16:28:58 +00:00
94e3458364 chore(deps): update helm release argo-cd to v8 2025-05-23 16:28:58 +00:00
e7673a1e59 Feat: first working Opensearch V3 2025-05-23 14:08:35 +00:00
405eea7119 feat: add opensearch helm charts 2025-05-23 12:22:37 +00:00
1c1ff749af Merge pull request 'chore(deps): update kubezero-telemetry-dependencies' (#103) from renovate/kubezero-telemetry-kubezero-telemetry-dependencies into main
Reviewed-on: #103
2025-05-23 12:19:12 +00:00
cc6650becb chore(deps): update kubezero-telemetry-dependencies 2025-05-23 12:19:12 +00:00
4184cef843 chore: adjust control plane requests 2025-05-23 12:16:34 +00:00
163 changed files with 867 additions and 4647 deletions

View File

@ -2,7 +2,13 @@
# All things BEFORE the first controller / control plane upgrade # All things BEFORE the first controller / control plane upgrade
pre_control_plane_upgrade_cluster() { pre_control_plane_upgrade_cluster() {
echo if [ "$PLATFORM" != "gke" ];then
# patch multus DS to ONLY run pods on 1.31 controllers
kubectl patch ds kube-multus-ds -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
# patch kube-proxy DS to ONLY run pods on 1.31 controllers
kubectl patch ds kube-proxy -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
fi
} }
@ -16,7 +22,20 @@ post_control_plane_upgrade_cluster() {
pre_cluster_upgrade_final() { pre_cluster_upgrade_final() {
set +e set +e
echo if [ "$PLATFORM" != "gke" ];then
# cleanup multus
kubectl delete clusterrolebinding multus
kubectl delete clusterrole multus
kubectl delete serviceaccount multus -n kube-system
kubectl delete cm multus-cni-config -n kube-system
kubectl delete ds kube-multus-ds -n kube-system
kubectl delete NetworkAttachmentDefinition cilium
kubectl delete crd network-attachment-definitions.k8s.cni.cncf.io
# remove kube-proxy
kubectl -n kube-system delete ds kube-proxy
kubectl -n kube-system delete cm kube-proxy
fi
set -e set -e
} }

View File

@ -63,7 +63,7 @@ render_kubeadm() {
# Assemble kubeadm config # Assemble kubeadm config
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
for f in Cluster KubeProxy Kubelet; do for f in Cluster Kubelet; do
# echo "---" >> /etc/kubernetes/kubeadm.yaml # echo "---" >> /etc/kubernetes/kubeadm.yaml
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
done done
@ -169,7 +169,7 @@ kubeadm_upgrade() {
else else
pre_cluster_upgrade_final pre_cluster_upgrade_final
_kubeadm upgrade apply phase addon all $KUBE_VERSION _kubeadm upgrade apply phase addon coredns $KUBE_VERSION
post_cluster_upgrade_final post_cluster_upgrade_final
@ -239,7 +239,7 @@ control_plane_node() {
if [[ "$CMD" =~ ^(join)$ ]]; then if [[ "$CMD" =~ ^(join)$ ]]; then
# Delete any former self in case forseti did not delete yet # Delete any former self in case forseti did not delete yet
kubectl delete node ${NODENAME} --wait=true || true kubectl delete node ${NODENAME} --wait=true || true
# Wait for all pods to be deleted otherwise we end up with stale pods eg. kube-proxy and all goes to .... # Wait for all pods to be deleted otherwise we end up with stale pods
kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME} kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME}
# get current running etcd pods for etcdctl commands # get current running etcd pods for etcdctl commands
@ -251,7 +251,7 @@ control_plane_node() {
done done
# see if we are a former member and remove our former self if so # see if we are a former member and remove our former self if so
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//') MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//' || true)
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints [ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades # flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
@ -309,8 +309,9 @@ control_plane_node() {
_kubeadm init phase mark-control-plane _kubeadm init phase mark-control-plane
_kubeadm init phase kubelet-finalize all _kubeadm init phase kubelet-finalize all
# we skip kube-proxy
if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then
_kubeadm init phase addon all _kubeadm init phase addon coredns
fi fi
post_kubeadm post_kubeadm

View File

@ -3,6 +3,7 @@
# Simulate well-known CRDs being available # Simulate well-known CRDs being available
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1" API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
LOCAL_DEV=${LOCAL_DEV:-""} LOCAL_DEV=${LOCAL_DEV:-""}
ENV_VALUES=""
export HELM_SECRETS_BACKEND="vals" export HELM_SECRETS_BACKEND="vals"
@ -80,15 +81,19 @@ function get_kubezero_secret() {
get_secret_val kubezero kubezero-secrets "$1" get_secret_val kubezero kubezero-secrets "$1"
} }
function ensure_kubezero_secret_key() {
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
local key=""
local val=""
for key in $@; do function ensure_kubezero_secret_key() {
val=$(echo "$secret" | yq ".data.\"$key\"") local ns=$1
local secret=$2
local secret="$(kubectl get secret -n $ns $secret -o yaml)"
local key
local val
for key in $1; do
val=$(echo $secret | yq ".data.\"$key\"")
if [ "$val" == "null" ]; then if [ "$val" == "null" ]; then
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}" set_kubezero_secret $key ""
fi fi
done done
} }
@ -210,7 +215,7 @@ for manifest in yaml.safe_load_all(sys.stdin):
# helm template | kubectl apply -f - # helm template | kubectl apply -f -
# confine to one namespace if possible # confine to one namespace if possible
function render() { function render() {
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-tests --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \ helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-tests --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $ENV_VALUES \
| python3 -c ' | python3 -c '
#!/usr/bin/python3 #!/usr/bin/python3
import yaml import yaml
@ -262,6 +267,11 @@ function _helm() {
crds crds
elif [ $action == "dryrun" ]; then
cat $WORKDIR/values.yaml
render
cat $WORKDIR/helm.yaml
elif [ $action == "apply" -o $action == "replace" ]; then elif [ $action == "apply" -o $action == "replace" ]; then
echo "using values to $action of module $module: " echo "using values to $action of module $module: "
cat $WORKDIR/values.yaml cat $WORKDIR/values.yaml

View File

@ -10,7 +10,14 @@ def migrate(values):
# 1.32 # 1.32
try: try:
values["istio-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-ingress"]["gateway"]["service"]["ports"]
values["istio-ingress"]["gateway"]["service"].pop("ports")
except KeyError:
pass pass
try:
values["istio-private-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-private-ingress"]["gateway"]["service"]["ports"]
values["istio-private-ingress"]["gateway"]["service"].pop("ports")
except KeyError: except KeyError:
pass pass

View File

@ -47,7 +47,6 @@ Kubernetes: `>= 1.32.0-0`
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 - https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go - https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/ - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
- https://github.com/awslabs/amazon-eks-ami - https://github.com/awslabs/amazon-eks-ami

View File

@ -22,7 +22,6 @@
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 - https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go - https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/ - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
- https://github.com/awslabs/amazon-eks-ami - https://github.com/awslabs/amazon-eks-ami

View File

@ -29,12 +29,6 @@ kind: Policy
rules: rules:
# The following requests were manually identified as high-volume and low-risk, # The following requests were manually identified as high-volume and low-risk,
# so drop them. # so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None - level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port. # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account. # TODO(#46983): Change this to the ingress controller service account.

View File

@ -6,6 +6,8 @@ featureGates:
ControlPlaneKubeletLocalMode: true ControlPlaneKubeletLocalMode: true
NodeLocalCRISocket: true NodeLocalCRISocket: true
controlPlaneEndpoint: {{ .Values.api.endpoint }} controlPlaneEndpoint: {{ .Values.api.endpoint }}
proxy:
disabled: true
networking: networking:
podSubnet: 10.244.0.0/16 podSubnet: 10.244.0.0/16
etcd: etcd:

View File

@ -7,6 +7,8 @@ localAPIEndpoint:
patches: patches:
directory: {{ . }} directory: {{ . }}
{{- end }} {{- end }}
skipPhases:
- addon/kube-proxy
nodeRegistration: nodeRegistration:
criSocket: "unix:///run/containerd/containerd.sock" criSocket: "unix:///run/containerd/containerd.sock"
ignorePreflightErrors: ignorePreflightErrors:

View File

@ -1,10 +0,0 @@
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways
metricsBindAddress: "0.0.0.0:10249"
mode: "iptables"
logging:
format: json
iptables:
localhostNodePorts: false
#nodePortAddresses: primary

View File

@ -3,12 +3,6 @@ kind: Policy
rules: rules:
# The following requests were manually identified as high-volume and low-risk, # The following requests were manually identified as high-volume and low-risk,
# so drop them. # so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None - level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port. # Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account. # TODO(#46983): Change this to the ingress controller service account.
@ -114,7 +108,7 @@ rules:
# Get responses can be large; skip them. # Get responses can be large; skip them.
- level: Request - level: Request
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
resources: resources:
- group: "" # core - group: "" # core
- group: "admissionregistration.k8s.io" - group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io" - group: "apiextensions.k8s.io"
@ -137,7 +131,7 @@ rules:
- "RequestReceived" - "RequestReceived"
# Default level for known APIs # Default level for known APIs
- level: RequestResponse - level: RequestResponse
resources: resources:
- group: "" # core - group: "" # core
- group: "admissionregistration.k8s.io" - group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io" - group: "apiextensions.k8s.io"

View File

@ -4,5 +4,5 @@ spec:
- name: kube-apiserver - name: kube-apiserver
resources: resources:
requests: requests:
cpu: 250m cpu: 200m
memory: 1268Mi memory: 1536Mi

View File

@ -3,5 +3,5 @@ spec:
- name: kube-controller-manager - name: kube-controller-manager
resources: resources:
requests: requests:
cpu: 50m cpu: 30m
memory: 192Mi memory: 128Mi

View File

@ -3,5 +3,5 @@ spec:
- name: kube-scheduler - name: kube-scheduler
resources: resources:
requests: requests:
cpu: 50m cpu: 30m
memory: 96Mi memory: 64Mi

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-addons name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons description: KubeZero umbrella chart for various optional cluster addons
type: application type: application
version: 0.8.14 version: 0.8.15
appVersion: v1.31 appVersion: v1.31
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -34,7 +34,7 @@ dependencies:
repository: https://nvidia.github.io/k8s-device-plugin repository: https://nvidia.github.io/k8s-device-plugin
condition: nvidia-device-plugin.enabled condition: nvidia-device-plugin.enabled
- name: neuron-helm-chart - name: neuron-helm-chart
version: 1.1.1 version: 1.1.2
# https://github.com/aws-neuron/neuron-helm-charts/tree/main/charts/neuron-helm-chart # https://github.com/aws-neuron/neuron-helm-charts/tree/main/charts/neuron-helm-chart
repository: oci://public.ecr.aws/neuron #/neuron-helm-chart repository: oci://public.ecr.aws/neuron #/neuron-helm-chart
condition: neuron-helm-chart.enabled condition: neuron-helm-chart.enabled
@ -43,7 +43,7 @@ dependencies:
repository: https://bitnami-labs.github.io/sealed-secrets repository: https://bitnami-labs.github.io/sealed-secrets
condition: sealed-secrets.enabled condition: sealed-secrets.enabled
- name: aws-node-termination-handler - name: aws-node-termination-handler
version: 0.27.0 version: 0.27.1
repository: "oci://public.ecr.aws/aws-ec2/helm" repository: "oci://public.ecr.aws/aws-ec2/helm"
condition: aws-node-termination-handler.enabled condition: aws-node-termination-handler.enabled
- name: aws-eks-asg-rolling-update-handler - name: aws-eks-asg-rolling-update-handler

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo name: kubezero-argo
version: 0.3.3 version: 0.4.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -22,7 +22,7 @@ dependencies:
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled condition: argo-events.enabled
- name: argo-cd - name: argo-cd
version: 7.9.0 version: 8.0.9
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled condition: argo-cd.enabled
- name: argocd-image-updater - name: argocd-image-updater

View File

@ -1,6 +1,6 @@
# kubezero-argo # kubezero-argo
![Version: 0.3.3](https://img.shields.io/badge/Version-0.3.3-informational?style=flat-square) ![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square)
KubeZero Argo - Events, Workflow, CD KubeZero Argo - Events, Workflow, CD
@ -18,7 +18,7 @@ Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 7.9.0 | | https://argoproj.github.io/argo-helm | argo-cd | 8.0.9 |
| https://argoproj.github.io/argo-helm | argo-events | 2.4.15 | | https://argoproj.github.io/argo-helm | argo-events | 2.4.15 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.1 | | https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.1 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
@ -28,17 +28,16 @@ Kubernetes: `>= 1.30.0-0`
| Key | Type | Default | Description | | Key | Type | Default | Description |
|-----|------|---------|-------------| |-----|------|---------|-------------|
| argo-cd.configs.cm."application.instanceLabelKey" | string | `nil` | | | argo-cd.configs.cm."application.instanceLabelKey" | string | `nil` | |
| argo-cd.configs.cm."application.resourceTrackingMethod" | string | `"annotation"` | |
| argo-cd.configs.cm."resource.customizations" | string | `"argoproj.io/Application:\n health.lua: |\n hs = {}\n hs.status = \"Progressing\"\n hs.message = \"\"\n if obj.status ~= nil then\n if obj.status.health ~= nil then\n hs.status = obj.status.health.status\n if obj.status.health.message ~= nil then\n hs.message = obj.status.health.message\n end\n end\n end\n return hs\n"` | | | argo-cd.configs.cm."resource.customizations" | string | `"argoproj.io/Application:\n health.lua: |\n hs = {}\n hs.status = \"Progressing\"\n hs.message = \"\"\n if obj.status ~= nil then\n if obj.status.health ~= nil then\n hs.status = obj.status.health.status\n if obj.status.health.message ~= nil then\n hs.message = obj.status.health.message\n end\n end\n end\n return hs\n"` | |
| argo-cd.configs.cm."server.rbac.log.enforce.enable" | string | `nil` | |
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | | | argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.31 - Release notes"` | | | argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.32 - Release notes"` | |
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | | | argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | | | argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.31"` | | | argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.32"` | |
| argo-cd.configs.cm.installationID | string | `"KubeZero-ArgoCD"` | | | argo-cd.configs.cm.installationID | string | `"KubeZero-ArgoCD"` | |
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | | | argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
| argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | | | argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | |
| argo-cd.configs.params."controller.resource.health.persist" | string | `"false"` | |
| argo-cd.configs.params."controller.sync.timeout.seconds" | int | `1800` | | | argo-cd.configs.params."controller.sync.timeout.seconds" | int | `1800` | |
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | | | argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
| argo-cd.configs.params."server.insecure" | bool | `true` | | | argo-cd.configs.params."server.insecure" | bool | `true` | |
@ -54,7 +53,7 @@ Kubernetes: `>= 1.30.0-0`
| argo-cd.dex.enabled | bool | `false` | | | argo-cd.dex.enabled | bool | `false` | |
| argo-cd.enabled | bool | `false` | | | argo-cd.enabled | bool | `false` | |
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | | | argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
| argo-cd.global.image.tag | string | `"v2.14.9-1"` | | | argo-cd.global.image.tag | string | `"v3.0.3"` | |
| argo-cd.global.logging.format | string | `"json"` | | | argo-cd.global.logging.format | string | `"json"` | |
| argo-cd.global.networkPolicy.create | bool | `true` | | | argo-cd.global.networkPolicy.create | bool | `true` | |
| argo-cd.istio.enabled | bool | `false` | | | argo-cd.istio.enabled | bool | `false` | |

View File

@ -4,6 +4,6 @@ condition: 'index .Values "argo-cd" "controller" "metrics" "enabled"'
folder: KubeZero folder: KubeZero
dashboards: dashboards:
- name: ArgoCD - name: ArgoCD
url: https://grafana.com/api/dashboards/14584/revisions/1/download url: https://raw.githubusercontent.com/argoproj/argo-cd/refs/heads/master/examples/dashboard.json
tags: tags:
- ArgoCD - ArgoCD

File diff suppressed because one or more lines are too long

View File

@ -25,9 +25,8 @@ spec:
automated: automated:
prune: true prune: true
syncOptions: syncOptions:
- ServerSideApply=true
- ApplyOutOfSyncOnly=true - ApplyOutOfSyncOnly=true
info: info:
- name: "Source:" - name: "Source:"
value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.31/" value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.32/"
{{- end }} {{- end }}

View File

@ -25,8 +25,8 @@ argo-events:
# do NOT use -alpine tag as the entrypoint differs # do NOT use -alpine tag as the entrypoint differs
versions: versions:
- version: 2.10.11 - version: 2.10.11
natsImage: nats:2.11.1-scratch natsImage: nats:2.11.4-scratch
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.2 metricsExporterImage: natsio/prometheus-nats-exporter:0.17.3
configReloaderImage: natsio/nats-server-config-reloader:0.14.1 configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server startCommand: /nats-server
@ -38,7 +38,7 @@ argo-cd:
format: json format: json
image: image:
repository: public.ecr.aws/zero-downtime/zdt-argocd repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v2.14.9-1 tag: v3.0.3
networkPolicy: networkPolicy:
create: true create: true
@ -49,8 +49,8 @@ argo-cd:
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); } .sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
cm: cm:
ui.bannercontent: "KubeZero v1.31 - Release notes" ui.bannercontent: "KubeZero v1.32 - Release notes"
ui.bannerurl: "https://kubezero.com/releases/v1.31" ui.bannerurl: "https://kubezero.com/releases/v1.32"
ui.bannerpermanent: "true" ui.bannerpermanent: "true"
ui.bannerposition: "bottom" ui.bannerposition: "bottom"
@ -59,9 +59,9 @@ argo-cd:
timeout.reconciliation: 300s timeout.reconciliation: 300s
application.resourceTrackingMethod: annotation
installationID: "KubeZero-ArgoCD" installationID: "KubeZero-ArgoCD"
application.instanceLabelKey: Null application.instanceLabelKey: Null
server.rbac.log.enforce.enable: Null
resource.customizations: | resource.customizations: |
argoproj.io/Application: argoproj.io/Application:
@ -89,7 +89,6 @@ argo-cd:
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ==" extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
params: params:
controller.resource.health.persist: "false"
controller.diff.server.side: "true" controller.diff.server.side: "true"
controller.sync.timeout.seconds: 1800 controller.sync.timeout.seconds: 1800

View File

@ -289,7 +289,7 @@ trivy:
#tag: 0.57.0 #tag: 0.57.0
persistence: persistence:
enabled: true enabled: true
size: 1Gi size: 2Gi
rbac: rbac:
create: false create: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-falco name: kubezero-falco
description: Falco Container Security and Audit components description: Falco Container Security and Audit components
type: application type: application
version: 0.1.2 version: 0.1.3
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -16,7 +16,7 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: falco - name: falco
version: 4.2.5 version: 5.0.0
repository: https://falcosecurity.github.io/charts repository: https://falcosecurity.github.io/charts
condition: k8saudit.enabled condition: k8saudit.enabled
alias: k8saudit alias: k8saudit

View File

@ -42,17 +42,3 @@ helm.sh/chart: {{ include "kubezero-lib.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: kubezero app.kubernetes.io/part-of: kubezero
{{- end -}} {{- end -}}
{{- /*
kubezero-lib.util.merge will merge two YAML templates and output the result.
This takes an array of three values:
- the top context
- the template name of the overrides (destination)
- the template name of the base (source)
*/ -}}
{{- define "kubezero-lib.util.merge" -}}
{{- $top := first . -}}
{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}
{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}}
{{- toYaml (merge $overrides $tpl) -}}
{{- end -}}

View File

@ -12,7 +12,7 @@ kube-prometheus-stack:
kubeStateMetrics: kubeStateMetrics:
enabled: true enabled: true
kubeProxy: kubeProxy:
enabled: true enabled: false
kubeEtcd: kubeEtcd:
enabled: true enabled: true

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-mq name: kubezero-mq
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
type: application type: application
version: 0.3.11 version: 0.3.12
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -17,7 +17,7 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: nats - name: nats
version: 1.3.3 version: 1.3.7
repository: https://nats-io.github.io/k8s/helm/charts/ repository: https://nats-io.github.io/k8s/helm/charts/
condition: nats.enabled condition: nats.enabled
- name: rabbitmq - name: rabbitmq

View File

@ -1,6 +1,6 @@
# kubezero-mq # kubezero-mq
![Version: 0.3.11](https://img.shields.io/badge/Version-0.3.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.3.12](https://img.shields.io/badge/Version-0.3.12-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
@ -20,7 +20,7 @@ Kubernetes: `>= 1.26.0`
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 | | https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 | | https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.7 |
## Values ## Values
@ -32,6 +32,8 @@ Kubernetes: `>= 1.26.0`
| nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | | | nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| nats.mqtt.enabled | bool | `false` | | | nats.mqtt.enabled | bool | `false` | |
| nats.natsBox.enabled | bool | `false` | | | nats.natsBox.enabled | bool | `false` | |
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".maxSkew | int | `1` | |
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".whenUnsatisfiable | string | `"DoNotSchedule"` | |
| nats.promExporter.enabled | bool | `false` | | | nats.promExporter.enabled | bool | `false` | |
| nats.promExporter.podMonitor.enabled | bool | `false` | | | nats.promExporter.podMonitor.enabled | bool | `false` | |
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | | | rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |

View File

@ -1,7 +1,7 @@
configmap: grafana-dashboards-nats configmap: grafana-dashboards-nats
condition: '.Values.nats.exporter.serviceMonitor.enabled' condition: '.Values.nats.promExporter.podMonitor.enabled'
gzip: true gzip: true
# folder: # folder:
dashboards: dashboards:
- name: nats - name: nats
url: https://grafana.com/api/dashboards/13707/revisions/1/download url: https://grafana.com/api/dashboards/13707/revisions/1/download

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-network name: kubezero-network
description: KubeZero umbrella chart for all things network description: KubeZero umbrella chart for all things network
type: application type: application
version: 0.5.8 version: 0.5.9
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -10,7 +10,6 @@ keywords:
- multus - multus
- cilium - cilium
- aws-cni - aws-cni
- metallb
maintainers: maintainers:
- name: Stefan Reimer - name: Stefan Reimer
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
@ -19,13 +18,9 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: cilium - name: cilium
version: 1.17.3 version: 1.17.4
repository: https://helm.cilium.io/ repository: https://helm.cilium.io/
condition: cilium.enabled condition: cilium.enabled
- name: metallb
version: 0.14.9
repository: https://metallb.github.io/metallb
condition: metallb.enabled
- name: haproxy - name: haproxy
version: 1.24.0 version: 1.24.0
repository: https://haproxytech.github.io/helm-charts repository: https://haproxytech.github.io/helm-charts

View File

@ -1,6 +1,6 @@
# kubezero-network # kubezero-network
![Version: 0.5.8](https://img.shields.io/badge/Version-0.5.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.5.9](https://img.shields.io/badge/Version-0.5.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things network KubeZero umbrella chart for all things network
@ -20,20 +20,22 @@ Kubernetes: `>= 1.30.0-0`
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 | | https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 |
| https://helm.cilium.io/ | cilium | 1.17.3 | | https://helm.cilium.io/ | cilium | 1.17.4 |
| https://metallb.github.io/metallb | metallb | 0.14.9 |
## Values ## Values
| Key | Type | Default | Description | | Key | Type | Default | Description |
|-----|------|---------|-------------| |-----|------|---------|-------------|
| cilium.bpf.preallocateMaps | bool | `true` | |
| cilium.cgroup.autoMount.enabled | bool | `false` | | | cilium.cgroup.autoMount.enabled | bool | `false` | |
| cilium.cgroup.hostRoot | string | `"/sys/fs/cgroup"` | | | cilium.cgroup.hostRoot | string | `"/sys/fs/cgroup"` | |
| cilium.cluster.id | int | `240` | | | cilium.cluster.id | int | `240` | |
| cilium.cluster.name | string | `"default"` | | | cilium.cluster.name | string | `"default"` | |
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | | | cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
| cilium.cni.exclusive | bool | `false` | | | cilium.cni.exclusive | bool | `true` | |
| cilium.cni.iptablesRemoveAWSRules | bool | `false` | |
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | | | cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
| cilium.dnsProxy.enableTransparentMode | bool | `true` | |
| cilium.enabled | bool | `false` | | | cilium.enabled | bool | `false` | |
| cilium.envoy.enabled | bool | `false` | | | cilium.envoy.enabled | bool | `false` | |
| cilium.hubble.enabled | bool | `false` | | | cilium.hubble.enabled | bool | `false` | |
@ -45,6 +47,9 @@ Kubernetes: `>= 1.30.0-0`
| cilium.hubble.ui.enabled | bool | `false` | | | cilium.hubble.ui.enabled | bool | `false` | |
| cilium.image.useDigest | bool | `false` | | | cilium.image.useDigest | bool | `false` | |
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | | | cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
| cilium.k8sServiceHost | string | `""` | |
| cilium.k8sServicePort | int | `6443` | |
| cilium.kubeProxyReplacement | bool | `true` | |
| cilium.l7Proxy | bool | `false` | | | cilium.l7Proxy | bool | `false` | |
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cilium.operator.prometheus.enabled | bool | `false` | | | cilium.operator.prometheus.enabled | bool | `false` | |
@ -54,12 +59,13 @@ Kubernetes: `>= 1.30.0-0`
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | | | cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[1].key | string | `"node.cilium.io/agent-not-ready"` | | | cilium.operator.tolerations[1].key | string | `"node.cilium.io/agent-not-ready"` | |
| cilium.operator.tolerations[2].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[2].key | string | `"node.kubernetes.io/not-ready"` | |
| cilium.prometheus.enabled | bool | `false` | | | cilium.prometheus.enabled | bool | `false` | |
| cilium.prometheus.port | int | `9091` | | | cilium.prometheus.port | int | `9091` | |
| cilium.prometheus.serviceMonitor.enabled | bool | `false` | | | cilium.prometheus.serviceMonitor.enabled | bool | `false` | |
| cilium.resources.limits.memory | string | `"1Gi"` | | | cilium.resources.requests.cpu | string | `"50m"` | |
| cilium.resources.requests.cpu | string | `"10m"` | | | cilium.resources.requests.memory | string | `"256Mi"` | |
| cilium.resources.requests.memory | string | `"160Mi"` | |
| cilium.routingMode | string | `"tunnel"` | | | cilium.routingMode | string | `"tunnel"` | |
| cilium.sysctlfix.enabled | bool | `false` | | | cilium.sysctlfix.enabled | bool | `false` | |
| cilium.tunnelProtocol | string | `"geneve"` | | | cilium.tunnelProtocol | string | `"geneve"` | |
@ -107,11 +113,6 @@ Kubernetes: `>= 1.30.0-0`
| haproxy.serviceMonitor.endpoints[0].path | string | `"/metrics"` | | | haproxy.serviceMonitor.endpoints[0].path | string | `"/metrics"` | |
| haproxy.serviceMonitor.endpoints[0].port | string | `"prometheus"` | | | haproxy.serviceMonitor.endpoints[0].port | string | `"prometheus"` | |
| haproxy.serviceMonitor.endpoints[0].scheme | string | `"http"` | | | haproxy.serviceMonitor.endpoints[0].scheme | string | `"http"` | |
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| metallb.enabled | bool | `false` | |
| metallb.ipAddressPools | list | `[]` | |
| multus.clusterNetwork | string | `"cilium"` | | | multus.clusterNetwork | string | `"cilium"` | |
| multus.defaultNetworks | list | `[]` | | | multus.defaultNetworks | list | `[]` | |
| multus.enabled | bool | `false` | | | multus.enabled | bool | `false` | |

View File

@ -1,27 +0,0 @@
{{- if .Values.metallb.enabled }}
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: l2advertisement1
namespace: kube-system
spec:
ipAddressPools:
{{- range $key, $val := .Values.metallb.ipAddressPools }}
{{- if eq $val.protocol "layer2" }}
- {{ $val.name }}
{{- end }}
{{- end }}
---
{{- range $key, $val := .Values.metallb.ipAddressPools }}
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: {{ $val.name }}
namespace: kube-system
spec:
addresses:
{{- $val.addresses | toYaml | nindent 4 }}
{{- end }}
---
{{- end }}

View File

@ -1,19 +1,3 @@
metallb:
enabled: false
controller:
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
ipAddressPools: []
#- name: my-ip-space
# protocol: layer2
# addresses:
# - 192.168.42.0/24
multus: multus:
enabled: false enabled: false
image: image:
@ -33,17 +17,18 @@ cilium:
resources: resources:
requests: requests:
cpu: 10m cpu: 50m
memory: 160Mi memory: 256Mi
limits: # limits:
memory: 1Gi # memory: 1Gi
# cpu: 4000m # cpu: 4000m
cni: cni:
binPath: "/usr/libexec/cni" binPath: "/usr/libexec/cni"
logFile: /var/log/cilium-cni.log logFile: /var/log/cilium-cni.log
#-- Ensure this is false if multus is enabled #-- Ensure this is false if multus is enabled
exclusive: false exclusive: true
iptablesRemoveAWSRules: false
cluster: cluster:
# This should match the second octet of clusterPoolIPv4PodCIDRList # This should match the second octet of clusterPoolIPv4PodCIDRList
@ -63,13 +48,32 @@ cilium:
enabled: false enabled: false
#rollOutCiliumPods: true #rollOutCiliumPods: true
kubeProxyReplacement: true
dnsProxy:
enableTransparentMode: true
# For LB support via L2announcement or BGP - on-prem only
# l2announcements:
# enabled: true
# Not needed normally
# externalIPs:
# enabled: true
k8sServiceHost: ""
k8sServicePort: 6443
# k8s:
# # This has to be set to the DNS name of all API servers
# # For example "https://192.168.0.1:6443 https://192.168.0.2:6443"
# apiServerURLs: ""
cgroup: cgroup:
autoMount: autoMount:
enabled: false enabled: false
hostRoot: "/sys/fs/cgroup" hostRoot: "/sys/fs/cgroup"
bpf:
preallocateMaps: true
# we need biDirectional so use helm init-container # we need biDirectional so use helm init-container
#bpf:
# autoMount: # autoMount:
# enabled: false # enabled: false
@ -91,9 +95,11 @@ cilium:
- key: node-role.kubernetes.io/control-plane - key: node-role.kubernetes.io/control-plane
effect: NoSchedule effect: NoSchedule
# the operator removes the taints, # the operator removes the taints,
# so we need to break chicken egg on single controller # so we need to break chicken egg
- key: node.cilium.io/agent-not-ready - key: node.cilium.io/agent-not-ready
effect: NoSchedule effect: NoSchedule
- key: node.kubernetes.io/not-ready
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-telemetry name: kubezero-telemetry
description: KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc. description: KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
type: application type: application
version: 0.4.1 version: 0.5.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -19,23 +19,31 @@ dependencies:
version: 0.2.1 version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: opentelemetry-collector - name: opentelemetry-collector
version: 0.108.0 version: 0.125.0
repository: https://open-telemetry.github.io/opentelemetry-helm-charts repository: https://open-telemetry.github.io/opentelemetry-helm-charts
condition: opentelemetry-collector.enabled condition: opentelemetry-collector.enabled
- name: opensearch
version: 3.0.0
repository: https://opensearch-project.github.io/helm-charts/
condition: opensearch.enabled
- name: opensearch-dashboards
version: 3.0.0
repository: https://opensearch-project.github.io/helm-charts/
condition: opensearch-dashboards.enabled
- name: data-prepper - name: data-prepper
version: 0.1.0 version: 0.3.1
repository: https://opensearch-project.github.io/helm-charts/ repository: https://opensearch-project.github.io/helm-charts/
condition: data-prepper.enabled condition: data-prepper.enabled
- name: jaeger - name: jaeger
version: 3.3.1 version: 3.4.1
repository: https://jaegertracing.github.io/helm-charts repository: https://jaegertracing.github.io/helm-charts
condition: jaeger.enabled condition: jaeger.enabled
- name: fluentd - name: fluentd
version: 0.5.2 version: 0.5.3
repository: https://fluent.github.io/helm-charts repository: https://fluent.github.io/helm-charts
condition: fluentd.enabled condition: fluentd.enabled
- name: fluent-bit - name: fluent-bit
version: 0.47.10 version: 0.49.0
repository: https://fluent.github.io/helm-charts repository: https://fluent.github.io/helm-charts
condition: fluent-bit.enabled condition: fluent-bit.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-telemetry # kubezero-telemetry
![Version: 0.4.1](https://img.shields.io/badge/Version-0.4.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.4.2](https://img.shields.io/badge/Version-0.4.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc. KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
@ -14,16 +14,18 @@ KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
## Requirements ## Requirements
Kubernetes: `>= 1.26.0` Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 | | https://fluent.github.io/helm-charts | fluent-bit | 0.49.0 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 | | https://fluent.github.io/helm-charts | fluentd | 0.5.3 |
| https://jaegertracing.github.io/helm-charts | jaeger | 3.3.1 | | https://jaegertracing.github.io/helm-charts | jaeger | 3.4.1 |
| https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.108.0 | | https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.125.0 |
| https://opensearch-project.github.io/helm-charts/ | data-prepper | 0.1.0 | | https://opensearch-project.github.io/helm-charts/ | data-prepper | 0.3.1 |
| https://opensearch-project.github.io/helm-charts/ | opensearch | 3.0.0 |
| https://opensearch-project.github.io/helm-charts/ | opensearch-dashboards | 3.0.0 |
## Values ## Values
@ -135,7 +137,7 @@ Kubernetes: `>= 1.26.0`
| fluentd.service.ports[1].containerPort | int | `9880` | | | fluentd.service.ports[1].containerPort | int | `9880` | |
| fluentd.service.ports[1].name | string | `"http-fluentd"` | | | fluentd.service.ports[1].name | string | `"http-fluentd"` | |
| fluentd.service.ports[1].protocol | string | `"TCP"` | | | fluentd.service.ports[1].protocol | string | `"TCP"` | |
| fluentd.source.sharedKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey"` | | | fluentd.source.sharedKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey?inCluster"` | |
| fluentd.volumeMounts[0].mountPath | string | `"/run/pki"` | | | fluentd.volumeMounts[0].mountPath | string | `"/run/pki"` | |
| fluentd.volumeMounts[0].name | string | `"trust-store"` | | | fluentd.volumeMounts[0].name | string | `"trust-store"` | |
| fluentd.volumeMounts[0].readOnly | bool | `true` | | | fluentd.volumeMounts[0].readOnly | bool | `true` | |
@ -167,13 +169,27 @@ Kubernetes: `>= 1.26.0`
| jaeger.storage.elasticsearch.user | string | `"admin"` | | | jaeger.storage.elasticsearch.user | string | `"admin"` | |
| jaeger.storage.type | string | `"elasticsearch"` | | | jaeger.storage.type | string | `"elasticsearch"` | |
| metrics.enabled | bool | `false` | | | metrics.enabled | bool | `false` | |
| opensearch.dashboard.enabled | bool | `false` | | | opensearch-dashboards.enabled | bool | `false` | |
| opensearch.dashboard.istio.enabled | bool | `false` | | | opensearch-dashboards.istio.enabled | bool | `false` | |
| opensearch.dashboard.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | | | opensearch-dashboards.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| opensearch.dashboard.istio.url | string | `"telemetry-dashboard.example.com"` | | | opensearch-dashboards.istio.url | string | `"telemetry-dashboard.example.com"` | |
| opensearch.nodeSets | list | `[]` | | | opensearch-dashboards.resources.limits.cpu | string | `nil` | |
| opensearch.prometheus | bool | `false` | | | opensearch-dashboards.resources.limits.memory | string | `"512M"` | |
| opensearch.version | string | `"2.17.0"` | | | opensearch-dashboards.resources.requests.cpu | string | `"100m"` | |
| opensearch-dashboards.resources.requests.memory | string | `"512M"` | |
| opensearch-dashboards.serviceMonitor.enabled | bool | `false` | |
| opensearch-dashboards.serviceMonitor.interval | string | `"30s"` | |
| opensearch.config."opensearch.yml" | string | `"cluster.name: opensearch-cluster\nnetwork.host: 0.0.0.0\ndiscovery.type: single-node\n"` | |
| opensearch.enabled | bool | `false` | |
| opensearch.maxUnavailable | int | `0` | |
| opensearch.opensearchJavaOpts | string | `"-Xmx1024M -Xms1024M"` | |
| opensearch.persistence.size | string | `"8Gi"` | |
| opensearch.resources.limits.memory | string | `"2Gi"` | |
| opensearch.resources.requests.cpu | string | `"500m"` | |
| opensearch.resources.requests.memory | string | `"2Gi"` | |
| opensearch.serviceMonitor.enabled | bool | `false` | |
| opensearch.serviceMonitor.interval | string | `"30s"` | |
| opensearch.singleNode | bool | `true` | |
| opentelemetry-collector.config.exporters.otlp/data-prepper.endpoint | string | `"telemetry-data-prepper:21890"` | | | opentelemetry-collector.config.exporters.otlp/data-prepper.endpoint | string | `"telemetry-data-prepper:21890"` | |
| opentelemetry-collector.config.exporters.otlp/data-prepper.tls.insecure | bool | `true` | | | opentelemetry-collector.config.exporters.otlp/data-prepper.tls.insecure | bool | `true` | |
| opentelemetry-collector.config.exporters.otlp/jaeger.endpoint | string | `"telemetry-jaeger-collector:4317"` | | | opentelemetry-collector.config.exporters.otlp/jaeger.endpoint | string | `"telemetry-jaeger-collector:4317"` | |

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: v1.16.2 appVersion: v1.17.1
description: A Helm chart for Kubernetes description: A Helm chart for Kubernetes
home: https://www.fluentd.org/ home: https://www.fluentd.org/
icon: https://www.fluentd.org/images/miscellany/fluentd-logo_2x.png icon: https://www.fluentd.org/images/miscellany/fluentd-logo_2x.png
@ -12,4 +12,4 @@ name: fluentd
sources: sources:
- https://github.com/fluent/fluentd/ - https://github.com/fluent/fluentd/
- https://github.com/fluent/fluentd-kubernetes-daemonset - https://github.com/fluent/fluentd-kubernetes-daemonset
version: 0.5.2 version: 0.5.3

View File

@ -1,5 +1,5 @@
{{- define "fluentd.pod" -}} {{- define "fluentd.pod" -}}
{{- $defaultTag := printf "%s-debian-%s-1.0" (.Chart.AppVersion) (.Values.variant) -}} {{- $defaultTag := printf "%s-debian-%s-1.2" (.Chart.AppVersion) (.Values.variant) -}}
{{- with .Values.imagePullSecrets }} {{- with .Values.imagePullSecrets }}
imagePullSecrets: imagePullSecrets:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}

View File

@ -21,7 +21,7 @@
.idea/ .idea/
*.tmproj *.tmproj
.vscode/ .vscode/
examples/
# Ignore unittest # Ignore unittest
tests/ tests/
*/__snapshot__/* */__snapshot__/*

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 0.111.0 appVersion: 0.126.0
description: OpenTelemetry Collector Helm chart for Kubernetes description: OpenTelemetry Collector Helm chart for Kubernetes
home: https://opentelemetry.io/ home: https://opentelemetry.io/
icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png
@ -12,4 +12,4 @@ sources:
- https://github.com/open-telemetry/opentelemetry-collector - https://github.com/open-telemetry/opentelemetry-collector
- https://github.com/open-telemetry/opentelemetry-collector-contrib - https://github.com/open-telemetry/opentelemetry-collector-contrib
type: application type: application
version: 0.108.0 version: 0.125.0

View File

@ -19,7 +19,7 @@ helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm
To install the chart with the release name my-opentelemetry-collector, run the following command: To install the chart with the release name my-opentelemetry-collector, run the following command:
```console ```console
helm install my-opentelemetry-collector open-telemetry/opentelemetry-collector --set mode=<value> --set image.repository="otel/opentelemetry-collector-k8s" --set command.name="otelcol-k8s" helm install my-opentelemetry-collector open-telemetry/opentelemetry-collector --set mode=<value> --set image.repository="ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s" --set command.name="otelcol-k8s"
``` ```
Where the `mode` value needs to be set to one of `daemonset`, `deployment` or `statefulset`. Where the `mode` value needs to be set to one of `daemonset`, `deployment` or `statefulset`.
@ -35,7 +35,7 @@ See [UPGRADING.md](UPGRADING.md).
OpenTelemetry Collector recommends to bind receivers' servers to addresses that limit connections to authorized users. OpenTelemetry Collector recommends to bind receivers' servers to addresses that limit connections to authorized users.
For this reason, by default the chart binds all the Collector's endpoints to the pod's IP. For this reason, by default the chart binds all the Collector's endpoints to the pod's IP.
More info is available in the [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks) More info is available in the [Security Best Practices documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks)
Some care must be taken when using `hostNetwork: true`, as then OpenTelemetry Collector will listen on all the addresses in the host network namespace. Some care must be taken when using `hostNetwork: true`, as then OpenTelemetry Collector will listen on all the addresses in the host network namespace.
@ -106,6 +106,29 @@ to read the files where Kubernetes container runtime writes all containers' cons
#### :warning: Warning: Risk of looping the exported logs back into the receiver, causing "log explosion" #### :warning: Warning: Risk of looping the exported logs back into the receiver, causing "log explosion"
#### Log collection for a subset of pods or containers
The `logsCollection` preset will by default ingest the logs of all kubernetes containers.
This is achieved by using an include path of `/var/log/pods/*/*/*.log` for the `filelog`receiver.
To limit the import to a certain subset of pods or containers, the `filelog`
receivers `include` list can be overwritten by supplying explicit configuration.
E.g. The following configuration would only import logs for pods within the namespace: `example-namespace`:
```yaml
mode: daemonset
presets:
logsCollection:
enabled: true
config:
receivers:
filelog:
include:
- /var/log/pods/example-namespace_*/*/*.log
```
The container logs pipeline uses the `debug` exporter by default. The container logs pipeline uses the `debug` exporter by default.
Paired with the default `filelog` receiver that receives all containers' console output, Paired with the default `filelog` receiver that receives all containers' console output,
it is easy to accidentally feed the exported logs back into the receiver. it is easy to accidentally feed the exported logs back into the receiver.
@ -149,6 +172,10 @@ This feature is disabled by default. It has the following requirements:
- It requires the [Kubernetes Attributes processor](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image. - It requires the [Kubernetes Attributes processor](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
#### :memo: Note: Changing or supplementing `k8sattributes` scopes
In order to minimize the collector's privileges, the [Kubernetes RBAC Rules](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) that are applied to the collector as part of this chart are the minimum required for the `presets.kubernetesAttributes` preset to work. If additional configuration scopes are desired outside of the preset you must apply the corresponding RBAC rules to grant the collector access.
To enable this feature, set the `presets.kubernetesAttributes.enabled` property to `true`. To enable this feature, set the `presets.kubernetesAttributes.enabled` property to `true`.
Here is an example `values.yaml`: Here is an example `values.yaml`:

View File

@ -4,6 +4,14 @@ These upgrade guidelines only contain instructions for version upgrades which re
If the version you want to upgrade to is not listed here, then there is nothing to do for you. If the version you want to upgrade to is not listed here, then there is nothing to do for you.
Just upgrade and enjoy. Just upgrade and enjoy.
## 0.121.0 to 0.122.0
In the v0.123.1 Collector release we stopped pushing images to Dockerhub due to how their new rate limit changes affected our CI. If you're using `otel/opentelemetry-collector-k8s` for the image you should switch to `ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s`. See https://github.com/open-telemetry/community/issues/2641 for more details.
## 0.110.0 to 0.110.1 or 0.110.2
We broke the selector labels in `0.110.0`, which causes `helm upgrades` to fail. Do not attempt to upgrade from `0.110.0` to either `0.110.1` or `0.110.2`. Go straight to `0.110.3` instead.
## 0.97.2 to 0.98.0 ## 0.97.2 to 0.98.0
> [!WARNING] > [!WARNING]

View File

@ -1,7 +1,7 @@
mode: deployment mode: deployment
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset mode: daemonset
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset mode: daemonset
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset mode: daemonset
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -4,7 +4,7 @@ global:
mode: deployment mode: deployment
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"
@ -19,6 +19,10 @@ resources:
podLabels: podLabels:
testLabel: "{{ .Values.global.test }}" testLabel: "{{ .Values.global.test }}"
additionalLabels:
testLabel: "{{ .Values.global.test }}"
someLabel: "someValue"
ingress: ingress:
enabled: true enabled: true
hosts: hosts:

View File

@ -1,7 +1,7 @@
mode: deployment mode: deployment
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: deployment mode: deployment
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: statefulset mode: statefulset
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -0,0 +1,21 @@
mode: deployment
image:
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
resources:
limits:
cpu: 100m
memory: 200M
internalTelemetryViaOTLP:
endpoint: "http://localhost:4318"
headers:
- name: "x-opentelemetry-customer"
value: "a value"
traces:
enabled: true
metrics:
enabled: true
logs:
enabled: true

View File

@ -1,7 +1,7 @@
mode: deployment mode: deployment
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset mode: daemonset
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: deployment mode: deployment
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -0,0 +1,15 @@
mode: deployment
image:
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
resources:
limits:
cpu: 100m
memory: 200M
config:
service:
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,17 @@
mode: deployment
image:
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
resources:
limits:
cpu: 100m
memory: 200M
config:
service:
telemetry:
metrics:
address: 0.0.0.0:8888
resource:
"k8s.namespace.name": "default"

View File

@ -1,7 +1,7 @@
mode: deployment mode: deployment
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset mode: daemonset
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: deployment mode: deployment
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset mode: daemonset
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset mode: daemonset
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset mode: daemonset
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset mode: daemonset
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: statefulset mode: statefulset
image: image:
repository: "otel/opentelemetry-collector-k8s" repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command: command:
name: "otelcol-k8s" name: "otelcol-k8s"

View File

@ -1,17 +0,0 @@
# Examples of chart configuration
Here is a collection of common configurations for the OpenTelemetry collector. Each folder contains an example `values.yaml` and the resulting configurations that are generated by the opentelemetry-collector helm charts.
- [Daemonset only](daemonset-only)
- [Deployment only](deployment-only)
- [Daemonset and deployment](daemonset-and-deployment)
- [Log collection, including collector logs](daemonset-collector-logs)
- [Add component (hostmetrics)](daemonset-hostmetrics)
The manifests are rendered using the `helm template` command and the specific example folder's values.yaml.
Examples are generated by (from root of the repo):
```sh
make generate-examples CHARTS=opentelemetry-collector
```

View File

@ -1,41 +0,0 @@
---
# Source: opentelemetry-collector/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: example-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["pods", "namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events", "namespaces", "namespaces/status", "nodes", "nodes/spec", "pods", "pods/status", "replicationcontrollers", "replicationcontrollers/status", "resourcequotas", "services" ]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["daemonsets", "deployments", "replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["autoscaling"]
resources: ["horizontalpodautoscalers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch", "list"]

View File

@ -1,21 +0,0 @@
---
# Source: opentelemetry-collector/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: example-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: example-opentelemetry-collector
subjects:
- kind: ServiceAccount
name: example-opentelemetry-collector
namespace: default

View File

@ -1,68 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
processors:
k8sattributes:
extract:
metadata:
- k8s.namespace.name
- k8s.deployment.name
- k8s.statefulset.name
- k8s.daemonset.name
- k8s.cronjob.name
- k8s.job.name
- k8s.node.name
- k8s.pod.name
- k8s.pod.uid
- k8s.pod.start_time
passthrough: false
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
receivers:
k8s_cluster:
collection_interval: 10s
k8sobjects:
objects:
- exclude_watch_type:
- DELETED
group: events.k8s.io
mode: watch
name: events
service:
pipelines:
logs:
exporters:
- debug
processors:
- k8sattributes
receivers:
- k8sobjects
metrics:
exporters:
- debug
processors:
- k8sattributes
receivers:
- k8s_cluster

View File

@ -1,100 +0,0 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 360fc84164ca26f5a57ecb44cbcec02ca473b09fc86dba876f71c9fa3617f656
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "3276MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 2
memory: 4Gi
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,34 +0,0 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
replicaCount: 1
resources:
limits:
cpu: 2
memory: 4Gi
presets:
clusterMetrics:
enabled: true
kubernetesAttributes:
enabled: true
kubernetesEvents:
enabled: true
alternateConfig:
exporters:
debug: {}
service:
pipelines:
logs:
exporters:
- debug
metrics:
exporters:
- debug

View File

@ -1,34 +0,0 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
config:
exporters:
otlp:
endpoint: example-opentelemetry-collector:4317
tls:
insecure: true
service:
pipelines:
logs:
exporters:
- otlp
- debug
metrics:
exporters:
- otlp
- debug
traces:
exporters:
- otlp
- debug
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -1,13 +0,0 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -1,93 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
otlp:
endpoint: example-opentelemetry-collector:4317
tls:
insecure: true
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,86 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,104 +0,0 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: a2d0d31bd929305e52879f78f502d56ad49d9ef9396838490646e9034d2243de
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,100 +0,0 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: f67e5b63bd16a7b09fc73afd9b6d71dfbeeb2afc8471c0b0f005233ef45df91d
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,100 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
filelog:
exclude: []
include:
- /var/log/pods/*/*/*.log
include_file_name: false
include_file_path: true
operators:
- id: container-parser
max_log_size: 102400
type: container
retry_on_failure:
enabled: true
start_at: end
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- filelog
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,110 +0,0 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 5237e54a1cdaad762876da10a5bab6f686506211aaa2c70b901a74fec8b82140
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- name: varlogpods
mountPath: /var/log/pods
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
hostNetwork: false

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,12 +0,0 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
logsCollection:
enabled: true
includeCollectorLogs: true

View File

@ -1,133 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
hostmetrics:
collection_interval: 10s
root_path: /hostfs
scrapers:
cpu: null
disk: null
filesystem:
exclude_fs_types:
fs_types:
- autofs
- binfmt_misc
- bpf
- cgroup2
- configfs
- debugfs
- devpts
- devtmpfs
- fusectl
- hugetlbfs
- iso9660
- mqueue
- nsfs
- overlay
- proc
- procfs
- pstore
- rpc_pipefs
- securityfs
- selinuxfs
- squashfs
- sysfs
- tracefs
match_type: strict
exclude_mount_points:
match_type: regexp
mount_points:
- /dev/*
- /proc/*
- /sys/*
- /run/k3s/containerd/*
- /var/lib/docker/*
- /var/lib/kubelet/*
- /snap/*
load: null
memory: null
network: null
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
- hostmetrics
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,105 +0,0 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 98dea268c8a8fe987e082a4e85801387f2b60fefc281f9b1edd1080f0af62574
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- name: hostfs
mountPath: /hostfs
readOnly: true
mountPropagation: HostToContainer
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- name: hostfs
hostPath:
path: /
hostNetwork: false

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,12 +0,0 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
hostMetrics:
enabled: true

View File

@ -1,86 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,119 +0,0 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: c81f167b09355b9a9b3d7e1a4f994dfb1ef88379ded1a4b902cd4a2ace196a79
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
lifecycle:
preStop:
exec:
command:
- /test/sleep
- "5"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- mountPath: /test
name: test
initContainers:
- args:
- /bin/sleep
- /test/sleep
command:
- cp
image: 'busybox:latest'
name: test
volumeMounts:
- mountPath: /test
name: test
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- emptyDir: {}
name: test
hostNetwork: false

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,37 +0,0 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
global:
image: busybox:latest
initContainers:
- name: test
command:
- cp
args:
- /bin/sleep
- /test/sleep
image: "{{ .Values.global.image }}"
volumeMounts:
- name: test
mountPath: /test
extraVolumes:
- name: test
emptyDir: {}
extraVolumeMounts:
- name: test
mountPath: /test
lifecycleHooks:
preStop:
exec:
command:
- /test/sleep
- "5"

View File

@ -1,86 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,98 +0,0 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: c81f167b09355b9a9b3d7e1a4f994dfb1ef88379ded1a4b902cd4a2ace196a79
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,7 +0,0 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,86 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,100 +0,0 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: f67e5b63bd16a7b09fc73afd9b6d71dfbeeb2afc8471c0b0f005233ef45df91d
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "3276MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 2
memory: 4Gi
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,14 +0,0 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
replicaCount: 3
resources:
limits:
cpu: 2
memory: 4Gi

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
service:
extensions:
- health_check
pipelines:
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

Some files were not shown because too many files have changed in this diff Show More