Compare commits

..

10 Commits

179 changed files with 8254 additions and 3193 deletions

View File

@ -61,6 +61,8 @@ function cert-manager-post() {
# ArgoCD # # ArgoCD #
########### ###########
function argocd-pre() { function argocd-pre() {
kubectl delete job argo-argocd-redis-secret-init -n argocd || true
for f in $CLUSTER/secrets/argocd-*.yaml; do for f in $CLUSTER/secrets/argocd-*.yaml; do
kubectl apply -f $f kubectl apply -f $f
done done

View File

@ -129,6 +129,8 @@ kubeadm_upgrade() {
### Remove with 1.31 ### Remove with 1.31
# migrate kubezero CM to kubezero NS # migrate kubezero CM to kubezero NS
# migrate ArgoCD app from values to valuesObject # migrate ArgoCD app from values to valuesObject
create_ns kubezero
if [ "$ARGOCD" == "True" ]; then if [ "$ARGOCD" == "True" ]; then
kubectl get app kubezero -n argocd -o yaml > $WORKDIR/kubezero-argo-app.yaml kubectl get app kubezero -n argocd -o yaml > $WORKDIR/kubezero-argo-app.yaml
if [ "$(yq '(.spec.source.helm | has "values")' $WORKDIR/kubezero-argo-app.yaml)" == "true" ]; then if [ "$(yq '(.spec.source.helm | has "values")' $WORKDIR/kubezero-argo-app.yaml)" == "true" ]; then
@ -137,11 +139,12 @@ kubeadm_upgrade() {
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/spec/source/helm/values"}]' kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/spec/source/helm/values"}]'
kubectl delete cm kubezero-values -n kube-system > /dev/null || true kubectl delete cm kubezero-values -n kube-system > /dev/null || true
kubectl create configmap -n kubezero kubezero-values || true
fi fi
else else
kubectl get cm kubezero-values -n kubezero > /dev/null || \ kubectl get cm kubezero-values -n kubezero > /dev/null || \
{ create_ns kubezero; kubectl get cm kubezero-values -n kube-system -o yaml | \ { kubectl get cm kubezero-values -n kube-system -o yaml | \
sed 's/^ namespace: kube-system/ namespace: kubezero/' | \ sed 's/^ namespace: kube-system/ namespace: kubezero/' | \
kubectl create -f - && \ kubectl create -f - && \
kubectl delete cm kubezero-values -n kube-system ; } kubectl delete cm kubezero-values -n kube-system ; }
@ -157,16 +160,18 @@ kubeadm_upgrade() {
# Update kubezero-values CM # Update kubezero-values CM
kubectl get cm -n kubezero kubezero-values -o=yaml | \ kubectl get cm -n kubezero kubezero-values -o=yaml | \
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \ yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
kubectl replace -f -
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq '.spec.source.helm.valuesObject |= load("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
kubectl apply --server-side --force-conflicts -f - kubectl apply --server-side --force-conflicts -f -
# finally remove annotation to allow argo to sync again if [ "$ARGOCD" == "True" ]; then
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' # update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq '.spec.source.helm.valuesObject |= load("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
kubectl apply --server-side --force-conflicts -f -
# finally remove annotation to allow argo to sync again
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]' || true
fi
# Local node upgrade # Local node upgrade
render_kubeadm upgrade render_kubeadm upgrade

View File

@ -267,6 +267,8 @@ EOF
function control_plane_upgrade() { function control_plane_upgrade() {
TASKS="$1" TASKS="$1"
[ -z "$KUBE_VERSION" ] && KUBE_VERSION="latest"
echo "Deploy cluster admin task: $TASKS" echo "Deploy cluster admin task: $TASKS"
cat <<EOF | kubectl apply -f - cat <<EOF | kubectl apply -f -
apiVersion: v1 apiVersion: v1

View File

@ -19,9 +19,6 @@ echo "Checking that all pods in kube-system are running ..."
[ "$ARGOCD" == "True" ] && disable_argo [ "$ARGOCD" == "True" ] && disable_argo
# Preload cilium images to running nodes
all_nodes_upgrade "chroot /host crictl pull quay.io/cilium/cilium:v1.16.3"
control_plane_upgrade kubeadm_upgrade control_plane_upgrade kubeadm_upgrade
echo "Control plane upgraded, <Return> to continue" echo "Control plane upgraded, <Return> to continue"
@ -35,6 +32,10 @@ read -r
# #
# upgrade modules # upgrade modules
#
# Preload cilium images to running nodes
all_nodes_upgrade "chroot /host crictl pull quay.io/cilium/cilium:v1.16.3"
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators" control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators"
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."

View File

@ -5,3 +5,6 @@ metricsBindAddress: "0.0.0.0:10249"
mode: "iptables" mode: "iptables"
logging: logging:
format: json format: json
iptables:
localhostNodePorts: false
#nodePortAddresses: primary

View File

@ -117,7 +117,7 @@ spec:
containers: containers:
- name: aws-iam-authenticator - name: aws-iam-authenticator
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.22 image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.27
args: args:
- server - server
- --backend-mode=CRD,MountedFile - --backend-mode=CRD,MountedFile

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack description: KubeZero Umbrella Chart for complete EFK stack
type: application type: application
version: 0.8.12 version: 0.8.13
appVersion: 1.6.0 appVersion: 1.6.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -24,7 +24,7 @@ dependencies:
repository: https://fluent.github.io/helm-charts repository: https://fluent.github.io/helm-charts
condition: fluentd.enabled condition: fluentd.enabled
- name: fluent-bit - name: fluent-bit
version: 0.46.2 version: 0.47.10
repository: https://fluent.github.io/helm-charts repository: https://fluent.github.io/helm-charts
condition: fluent-bit.enabled condition: fluent-bit.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-logging # kubezero-logging
![Version: 0.8.12](https://img.shields.io/badge/Version-0.8.12-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square) ![Version: 0.8.13](https://img.shields.io/badge/Version-0.8.13-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack KubeZero Umbrella Chart for complete EFK stack
@ -19,7 +19,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.46.2 | | https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 | | https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
## Changes from upstream ## Changes from upstream

View File

@ -1,9 +1,9 @@
annotations: annotations:
artifacthub.io/changes: | artifacthub.io/changes: |
- kind: changed - kind: changed
description: "Updated _Fluent Bit_ OCI image to [v3.0.2](https://github.com/fluent/fluent-bit/releases/tag/v3.0.2)." description: "Updated Fluent Bit OCI image to v3.1.9"
apiVersion: v1 apiVersion: v1
appVersion: 3.0.2 appVersion: 3.1.9
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems. family operating systems.
home: https://fluentbit.io/ home: https://fluentbit.io/
@ -24,4 +24,4 @@ maintainers:
name: fluent-bit name: fluent-bit
sources: sources:
- https://github.com/fluent/fluent-bit/ - https://github.com/fluent/fluent-bit/
version: 0.46.2 version: 0.47.10

View File

@ -5,3 +5,4 @@ logLevel: debug
dashboards: dashboards:
enabled: true enabled: true
deterministicUid: true

View File

@ -1559,7 +1559,7 @@
}, },
"timezone": "", "timezone": "",
"title": "{{ include "fluent-bit.fullname" . }}", "title": "{{ include "fluent-bit.fullname" . }}",
"uid": "d557c8f6-cac1-445f-8ade-4c351a9076b1", "uid": {{ ternary (printf "\"%s\"" (sha1sum (printf "%s-%s" .Release.Namespace (include "fluent-bit.fullname" .)))) "null" .Values.dashboards.deterministicUid }},
"version": 7, "version": 7,
"weekStart": "" "weekStart": ""
} }

View File

@ -119,7 +119,11 @@ containers:
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.extraContainers }} {{- if .Values.extraContainers }}
{{- toYaml .Values.extraContainers | nindent 2 }} {{- if kindIs "string" .Values.extraContainers }}
{{- tpl .Values.extraContainers $ | nindent 2 }}
{{- else }}
{{- toYaml .Values.extraContainers | nindent 2 }}
{{- end -}}
{{- end }} {{- end }}
volumes: volumes:
- name: config - name: config

View File

@ -13,6 +13,7 @@ rules:
- pods - pods
{{- if .Values.rbac.nodeAccess }} {{- if .Values.rbac.nodeAccess }}
- nodes - nodes
- nodes/metrics
- nodes/proxy - nodes/proxy
{{- end }} {{- end }}
{{- if .Values.rbac.eventsAccess }} {{- if .Values.rbac.eventsAccess }}

View File

@ -17,6 +17,9 @@ spec:
{{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }} {{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }}
clusterIP: {{ .Values.service.clusterIP }} clusterIP: {{ .Values.service.clusterIP }}
{{- end }} {{- end }}
{{- if .Values.service.externalIPs }}
externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }}
{{- end }}
{{- if (eq .Values.kind "DaemonSet") }} {{- if (eq .Values.kind "DaemonSet") }}
{{- with .Values.service.internalTrafficPolicy }} {{- with .Values.service.internalTrafficPolicy }}
internalTrafficPolicy: {{ . }} internalTrafficPolicy: {{ . }}

View File

@ -101,6 +101,10 @@ service:
# prometheus.io/path: "/api/v1/metrics/prometheus" # prometheus.io/path: "/api/v1/metrics/prometheus"
# prometheus.io/port: "2020" # prometheus.io/port: "2020"
# prometheus.io/scrape: "true" # prometheus.io/scrape: "true"
externalIPs: []
# externalIPs:
# - 2.2.2.2
serviceMonitor: serviceMonitor:
enabled: false enabled: false
@ -178,6 +182,7 @@ dashboards:
labelValue: 1 labelValue: 1
annotations: {} annotations: {}
namespace: "" namespace: ""
deterministicUid: false
lifecycle: {} lifecycle: {}
# preStop: # preStop:
@ -314,11 +319,21 @@ envWithTpl: []
envFrom: [] envFrom: []
# This supports either a structured array or a templatable string
extraContainers: [] extraContainers: []
# Array mode
# extraContainers:
# - name: do-something # - name: do-something
# image: busybox # image: busybox
# command: ['do', 'something'] # command: ['do', 'something']
# String mode
# extraContainers: |-
# - name: do-something
# image: bitnami/kubectl:{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}
# command: ['kubectl', 'version']
flush: 1 flush: 1
metricsPort: 2020 metricsPort: 2020

View File

@ -9,7 +9,7 @@ FLUENT_BIT_VERSION=$(yq eval '.dependencies[] | select(.name=="fluent-bit") | .v
FLUENTD_VERSION=$(yq eval '.dependencies[] | select(.name=="fluentd") | .version' Chart.yaml) FLUENTD_VERSION=$(yq eval '.dependencies[] | select(.name=="fluentd") | .version' Chart.yaml)
# fluent-bit # fluent-bit
patch_chart fluent-bit # patch_chart fluent-bit
# FluentD # FluentD
patch_chart fluentd patch_chart fluentd

View File

@ -165,11 +165,13 @@ Kubernetes: `>= 1.26.0`
| kube-prometheus-stack.prometheus.prometheusSpec.logFormat | string | `"json"` | | | kube-prometheus-stack.prometheus.prometheusSpec.logFormat | string | `"json"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues | bool | `false` | | | kube-prometheus-stack.prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues | bool | `false` | |
| kube-prometheus-stack.prometheus.prometheusSpec.portName | string | `"http-prometheus"` | | | kube-prometheus-stack.prometheus.prometheusSpec.portName | string | `"http-prometheus"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.probeSelectorNilUsesHelmValues | bool | `false` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.limits.memory | string | `"4Gi"` | | | kube-prometheus-stack.prometheus.prometheusSpec.resources.limits.memory | string | `"4Gi"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.cpu | string | `"500m"` | | | kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.cpu | string | `"500m"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.memory | string | `"2Gi"` | | | kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.memory | string | `"2Gi"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.retention | string | `"8d"` | | | kube-prometheus-stack.prometheus.prometheusSpec.retention | string | `"8d"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.ruleSelectorNilUsesHelmValues | bool | `false` | | | kube-prometheus-stack.prometheus.prometheusSpec.ruleSelectorNilUsesHelmValues | bool | `false` | |
| kube-prometheus-stack.prometheus.prometheusSpec.scrapeConfigSelectorNilUsesHelmValues | bool | `false` | |
| kube-prometheus-stack.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues | bool | `false` | | | kube-prometheus-stack.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues | bool | `false` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | | kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | | | kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | |

View File

@ -18,7 +18,7 @@
"subdir": "contrib/mixin" "subdir": "contrib/mixin"
} }
}, },
"version": "55de68d18c63fde8747f2cd9f7c2ff242346f756", "version": "18eb5c6881d43064f4559034bf12c3ef6ce89e4b",
"sum": "IXI3LQIT9NmTPJAk8WLUJd5+qZfcGpeNCyWIK7oEpws=" "sum": "IXI3LQIT9NmTPJAk8WLUJd5+qZfcGpeNCyWIK7oEpws="
}, },
{ {
@ -88,7 +88,7 @@
"subdir": "grafana-builder" "subdir": "grafana-builder"
} }
}, },
"version": "3805ed5082bb7db66cd75badb57a673c965aa24c", "version": "b9761545ddeac33ffe25bd87121076eab258c6f1",
"sum": "yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo=" "sum": "yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo="
}, },
{ {
@ -128,7 +128,7 @@
"subdir": "jsonnet/kube-state-metrics" "subdir": "jsonnet/kube-state-metrics"
} }
}, },
"version": "9652c29fe6ac08a1cb76112061f7d1010319d634", "version": "45b7e20ee3cbecd6c5c2960a581fd5a931bd7826",
"sum": "lO7jUSzAIy8Yk9pOWJIWgPRhubkWzVh56W6wtYfbVH4=" "sum": "lO7jUSzAIy8Yk9pOWJIWgPRhubkWzVh56W6wtYfbVH4="
}, },
{ {
@ -138,7 +138,7 @@
"subdir": "jsonnet/kube-state-metrics-mixin" "subdir": "jsonnet/kube-state-metrics-mixin"
} }
}, },
"version": "9652c29fe6ac08a1cb76112061f7d1010319d634", "version": "45b7e20ee3cbecd6c5c2960a581fd5a931bd7826",
"sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c=" "sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c="
}, },
{ {
@ -148,8 +148,8 @@
"subdir": "jsonnet/kube-prometheus" "subdir": "jsonnet/kube-prometheus"
} }
}, },
"version": "a030693b39a019a7475f6fb918abd37cb6d9d1ba", "version": "69d9636b64192418d64912c032f5437361e88ea5",
"sum": "NKlS33HxtOMyC2GWUlb9W2F4WBLMqBJMWNFRxVIMA/Y=" "sum": "W4HnSyscMMutOCaDyjNZy1XXcdhRPibYuV1yVgqxXm0="
}, },
{ {
"source": { "source": {
@ -158,7 +158,7 @@
"subdir": "jsonnet/mixin" "subdir": "jsonnet/mixin"
} }
}, },
"version": "94031224b1c186ff6671ec9f447716f748ac9d31", "version": "d2599cfe67beb97b9208e79422457b0f7cde3c4a",
"sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=", "sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=",
"name": "prometheus-operator-mixin" "name": "prometheus-operator-mixin"
}, },
@ -169,8 +169,8 @@
"subdir": "jsonnet/prometheus-operator" "subdir": "jsonnet/prometheus-operator"
} }
}, },
"version": "94031224b1c186ff6671ec9f447716f748ac9d31", "version": "d2599cfe67beb97b9208e79422457b0f7cde3c4a",
"sum": "5G3bgQK3WFueFwjZDcTaI16cyf19rqxK+H1oGsiIgx0=" "sum": "qZwYjsYpalWEkkS0cytnksmRz8/NkMlmytI3G29s5kA="
}, },
{ {
"source": { "source": {
@ -179,7 +179,7 @@
"subdir": "doc/alertmanager-mixin" "subdir": "doc/alertmanager-mixin"
} }
}, },
"version": "4fcb18ee41ecaff4ee1816ad363dc63fdf3583b9", "version": "d04ef60a1675db13ffd42613bc74aec298cbe67a",
"sum": "IpF46ZXsm+0wJJAPtAre8+yxTNZA57mBqGpBP/r7/kw=", "sum": "IpF46ZXsm+0wJJAPtAre8+yxTNZA57mBqGpBP/r7/kw=",
"name": "alertmanager" "name": "alertmanager"
}, },
@ -190,7 +190,7 @@
"subdir": "docs/node-mixin" "subdir": "docs/node-mixin"
} }
}, },
"version": "e6a9cfbdcdaa21bf9676c6cd37bef8160227f423", "version": "f35a592dd65d6b3488426a789b328c8ca5b2bc22",
"sum": "cQCW+1N0Xae5yXecCWDK2oAlN0luBS/5GrwBYSlaFms=" "sum": "cQCW+1N0Xae5yXecCWDK2oAlN0luBS/5GrwBYSlaFms="
}, },
{ {
@ -200,7 +200,7 @@
"subdir": "documentation/prometheus-mixin" "subdir": "documentation/prometheus-mixin"
} }
}, },
"version": "6b36a5592a8f7e51f9801e5c8944ad9ca185e5cc", "version": "f131cdd4c5471deeda4db376d2f2b804e386dd96",
"sum": "dYLcLzGH4yF3qB7OGC/7z4nqeTNjv42L7Q3BENU8XJI=", "sum": "dYLcLzGH4yF3qB7OGC/7z4nqeTNjv42L7Q3BENU8XJI=",
"name": "prometheus" "name": "prometheus"
}, },
@ -222,7 +222,7 @@
"subdir": "mixin" "subdir": "mixin"
} }
}, },
"version": "7d95913c50e8999bce12089b582d33896b8475e1", "version": "a31af1da03a9fb7586794ce1a94671050617cced",
"sum": "ieCD4eMgGbOlrI8GmckGPHBGQDcLasE1rULYq56W/bs=", "sum": "ieCD4eMgGbOlrI8GmckGPHBGQDcLasE1rULYq56W/bs=",
"name": "thanos-mixin" "name": "thanos-mixin"
} }

View File

@ -3,13 +3,13 @@ gzip: true
folder: Kubernetes folder: Kubernetes
dashboards: dashboards:
- name: coreDNS - name: coreDNS
url: https://grafana.com/api/dashboards/12539/revisions/5/download url: https://grafana.com/api/dashboards/15762/revisions/18/download
tags: ['kubernetes', 'DNS'] tags: ['kubernetes', 'DNS']
- name: etcd - name: etcd
url: https://grafana.com/api/dashboards/15308/revisions/1/download url: https://grafana.com/api/dashboards/21473/revisions/3/download
tags: ['kubernetes', 'etcd'] tags: ['kubernetes', 'etcd']
- name: node - name: node
url: https://grafana.com/api/dashboards/1860/revisions/27/download url: https://grafana.com/api/dashboards/1860/revisions/37/download
tags: ['kubernetes'] tags: ['kubernetes']
# cd dashboards; for f in *.json; do echo "- name: ${f%%.json}" >> ../dashboards.yaml; echo " url: file://dashboards/$f" >> ../dashboards.yaml; done; cd - # cd dashboards; for f in *.json; do echo "- name: ${f%%.json}" >> ../dashboards.yaml; echo " url: file://dashboards/$f" >> ../dashboards.yaml; done; cd -
- name: apiserver - name: apiserver

View File

@ -6,6 +6,7 @@ dashboards:
url: file://zdt/home.json url: file://zdt/home.json
- name: crio - name: crio
url: file://zdt/crio.json url: file://zdt/crio.json
#url: https://raw.githubusercontent.com/cri-o/cri-o/refs/heads/main/contrib/metrics-exporter/dashboard.json
tags: ['kubernetes'] tags: ['kubernetes']
- name: docker-registry - name: docker-registry
url: file://zdt/docker-registry.json url: file://zdt/docker-registry.json

View File

@ -101,11 +101,11 @@
"steppedLine": false, "steppedLine": false,
"targets": [ "targets": [
{ {
"expr": "rate(container_runtime_crio_operations{instance=~\"$node\"}[5m])", "expr": "rate(container_runtime_crio_operations_total{instance=~\"$node\"}[5m])",
"format": "time_series", "format": "time_series",
"instant": false, "instant": false,
"interval": "", "interval": "",
"legendFormat": "{{instance}} - {{operation_type}}", "legendFormat": "{{instance}} - {{operation}}",
"refId": "A" "refId": "A"
} }
], ],
@ -202,11 +202,11 @@
"steppedLine": false, "steppedLine": false,
"targets": [ "targets": [
{ {
"expr": "rate(container_runtime_crio_operations_errors{instance=~\"$node\"}[5m])", "expr": "rate(container_runtime_crio_operations_errors_total{instance=~\"$node\"}[5m])",
"format": "time_series", "format": "time_series",
"instant": false, "instant": false,
"interval": "", "interval": "",
"legendFormat": "{{instance}} - {{operation_type}}", "legendFormat": "{{instance}} - {{operation}}",
"refId": "A" "refId": "A"
} }
], ],
@ -303,11 +303,11 @@
"steppedLine": false, "steppedLine": false,
"targets": [ "targets": [
{ {
"expr": " rate(container_runtime_crio_operations_latency_microseconds_count{instance=~\"$node\"}[5m])", "expr": " rate(container_runtime_crio_operations_latency_seconds_total_count{instance=~\"$node\"}[5m])",
"format": "time_series", "format": "time_series",
"instant": false, "instant": false,
"interval": "", "interval": "",
"legendFormat": "{{instance}} - {{operation_type}}", "legendFormat": "{{instance}} - {{operation}}",
"refId": "A" "refId": "A"
} }
], ],
@ -333,7 +333,7 @@
"yaxes": [ "yaxes": [
{ {
"$$hashKey": "object:57", "$$hashKey": "object:57",
"format": "µs", "format": "s",
"label": null, "label": null,
"logBase": 1, "logBase": 1,
"max": null, "max": null,
@ -365,7 +365,7 @@
"allValue": null, "allValue": null,
"current": {}, "current": {},
"datasource": "${DS_PROMETHEUS}", "datasource": "${DS_PROMETHEUS}",
"definition": "container_runtime_crio_operations", "definition": "container_runtime_crio_operations_total",
"hide": 0, "hide": 0,
"includeAll": false, "includeAll": false,
"index": -1, "index": -1,
@ -373,7 +373,7 @@
"multi": true, "multi": true,
"name": "node", "name": "node",
"options": [], "options": [],
"query": "container_runtime_crio_operations", "query": "container_runtime_crio_operations_total",
"refresh": 1, "refresh": 1,
"regex": "/.*instance=\"([^\"]*).*/", "regex": "/.*instance=\"([^\"]*).*/",
"skipUrlSync": false, "skipUrlSync": false,

File diff suppressed because one or more lines are too long

View File

@ -12,6 +12,6 @@ binaryData:
home.json.gz: home.json.gz:
H4sIAAAAAAAC/9VVS2/UMBC+8yuMDwikdnezj1b0VrVCVLwKFJBoq8obTxJrHTvYTndb1P+O7byczULFDS6J55vxeB6fxz+fIISJENIQw6TQ+Aj9tJAFOdPGSpdeQjXqNcuScXMmrDLa61BKDNGyVDFYBT5XMgeTQalxYAOCLLnTG1VCgGeM7kBZLMWJ5FI5hypdkueTPTSNIvtZLPZQ9CJ0LUjuDz7uckHP0DEHZXohmLvC21Gis6UkiuJa9+D/1/b74MwxUGa2osWpAHNGLSJKzitEkSK7kJIbVlh84kHmTKYv/ZozsXJVvbz2YkEEcN3Wtalqv3qNd69JGHB6IkXC0rY51RZISMmNb9lDkKG8BaVsQatTm+xaf6nVnUvd95VZcRb4WLsE5gGwaZKr5TsnD3z7xA9aURZ9TnnQNtWAcNTCTy+/n16gtzKV188zYwp9NB7HVIzuQcl9KtfCsBxGtuZjojUYPV6VS3DKMbd79nVOON8/mI8KkaIrfE/NFX5xJb4Bjy35kJHojbX/bu3ROzCKxRo9RW0cQTK59PTDOVErdyweJFbwMmXiKyhtE3Kmh6PFaNb6wC7QV5by281z+OeMJWaoMJ5a+LUNNfBTs9PAxlRR1DH8S0SJFltMiaaPMGW2mykt47Zuye/Jk0hOQfkrGJ6Rk82ZgdwZz0L8RwnKnY/DbutMrl8DoUykejh0nPYTxJah/O4rgzXQ3TafgajYVSMhXA+Uhii1a6ch6W+q/SjDmp1DCp02o0wPieTGnJ/k/y+ZesAf2DT/ezYB0C+Kuzo142c4epigsBltcj6cCm0H3hID2qD3sNYosWMA+aFzat2gC7brfgtrWbfEfqunQUGiQLua4NmkaiXWcQY56VgxPaxgc8fr9qpVZdnjh50fecHtKyjS4YPePXEux06fVPMLC7nej7I6ZmxkjeHetoLFK1DdZvuCUxA9wjcJ3TA78NUtCZ49q1wEZI0mgTALhSjv1otgHYXCbBJqsm49DdZR/dBfN3nZJG46Vlw+ekro+CB0HJ4ynYcC7daHNIx3O5Zm6neF7RX7XgqvXyq51lZZK2ryOa45qjmmofY5waW/FPjg4wdIFy/f3VfobUul+ZOHX/6tTqj9CQAA H4sIAAAAAAAC/9VVS2/UMBC+8yuMDwikdnezj1b0VrVCVLwKFJBoq8obTxJrHTvYTndb1P+O7byczULFDS6J55vxeB6fxz+fIISJENIQw6TQ+Aj9tJAFOdPGSpdeQjXqNcuScXMmrDLa61BKDNGyVDFYBT5XMgeTQalxYAOCLLnTG1VCgGeM7kBZLMWJ5FI5hypdkueTPTSNIvtZLPZQ9CJ0LUjuDz7uckHP0DEHZXohmLvC21Gis6UkiuJa9+D/1/b74MwxUGa2osWpAHNGLSJKzitEkSK7kJIbVlh84kHmTKYv/ZozsXJVvbz2YkEEcN3Wtalqv3qNd69JGHB6IkXC0rY51RZISMmNb9lDkKG8BaVsQatTm+xaf6nVnUvd95VZcRb4WLsE5gGwaZKr5TsnD3z7xA9aURZ9TnnQNtWAcNTCTy+/n16gtzKV188zYwp9NB7HVIzuQcl9KtfCsBxGtuZjojUYPV6VS3DKMbd79nVOON8/mI8KkaIrfE/NFX5xJb4Bjy35kJHojbX/bu3ROzCKxRo9RW0cQTK59PTDOVErdyweJFbwMmXiKyhtE3Kmh6PFaNb6wC7QV5by281z+OeMJWaoMJ5a+LUNNfBTs9PAxlRR1DH8S0SJFltMiaaPMGW2mykt47Zuye/Jk0hOQfkrGJ6Rk82ZgdwZz0L8RwnKnY/DbutMrl8DoUykejh0nPYTxJah/O4rgzXQ3TafgajYVSMhXA+Uhii1a6ch6W+q/SjDmp1DCp02o0wPieTGnJ/k/y+ZesAf2DT/ezYB0C+Kuzo142c4epigsBltcj6cCm0H3hID2qD3sNYosWMA+aFzat2gC7brfgtrWbfEfqunQUGiQLua4NmkaiXWcQY56VgxPaxgc8fr9qpVZdnjh50fecHtKyjS4YPePXEux06fVPMLC7nej7I6ZmxkjeHetoLFK1DdZvuCUxA9wjcJ3TA78NUtCZ49q1wEZI0mgTALhSjv1otgHYXCbBJqsm49DdZR/dBfN3nZJG46Vlw+ekro+CB0HJ4ynYcC7daHNIx3O5Zm6neF7RX7XgqvXyq51lZZK2ryOa45qjmmofY5waW/FPjg4wdIFy/f3VfobUul+ZOHX/6tTqj9CQAA
crio.json.gz: crio.json.gz:
H4sIAAAAAAAC/+1abW/bNhD+3l8hCMXQFknqlzpNC+xD1qYvaNpkcdoPazKDls4SF4pUKSqJZ3i/fUfqjZKYtilWdMP8xRbvKPLuePfcA1CrO57nz2aUp7nK/KfeRxx73sr8ooaTBFDqP5/Ojk+O3h6cvjp4P/W3KjUjc2BafyxFAiqGPGuUIWSBpKmiguspjUItU7NoSBTJRC4DaHQpyyPKX4danzoWLfTvSrOsbc2ENf6ebxUuSfiUUwkOp6r9I0kWhJNmcRo6xVUQXnYVlyCz0rvdncc749KILfd2KeEYrP5maezcyhZbG31+D1dI6Y3B5P0wurYc7gx2Bt0AE86FIvpwdYQLQ3xGM1XHuzEPNXfvxiSL38BSLyjmf0Cgng4fj+v9cMo8p0y91jsOLanlkdtSnAOczJnWK5mDJY9p6JDSQPBnggmpF5TRnNwbbHmj4RB/JpMtb3jfXroK0X7jrveTt89AqpYJzQFk8VwQGfqlbm3+z++UZ9atimcnr7ePvLegJA0y73n9tpkLIVUdz/yIgzLlwXPGConOlVMhmKIpygdGSFtTqAJJyi2Hk70no90n493xaFDMZZRfmDIpTtbkqaNsCKMkM4EzJ76uU2VOjGRBWAZN9aMrh8AjFes9By05uKZ/4ZhvhpMFZcxOGiPAAgopcFUFxCgiScNj0eRrkSQ4fGKd5BWO96zxtb0EjnUGD6rDrZfGVAuBT0FSh3PmMEbNkONxXBLWdoNBBDxs24Yhj/h+dupOb3IZ9YVBLmXhd8uGshoOklQt3YXyG0jR1yTk2rFUQrlDKmkUq6mz5rJYXPWlCkuKORbC2OQmjHp+L9SYrrWyJbyiYZFttRStPwZ5YvYeN7CHZXEs8BTeCmOrETQHIdI2qtX5eVjXSc+mFDBzuSIR9A4/1TvpbMyzVhYYeT9X8PBCkGDAacGE8i1NCkQZe7XdHflz7HVBVRxW68hMSh4hnGPyQ1PmhTIlAbiqNFMkuOjZlilIUwgxDn03FZERWBSijf8azK7TAnGJgnsIwYrgMnImc65oAjOsbTETaYlT2YpytIEH8PNfZ/5d7fCZv/44Sc5tdNa1LmRSBMWsUnjbnlKs5KiIm0rRKscX9fKr2qL12tv2Vqva1pnG/vW6/bqERUFi9v1avC6fmvirGAlKLFjYORftywvEQAvDa/kJRGV6dl6YxnSh+m8oAx1lozmqA2zRsbp1rOyCJRJCRyELqTqAaKp1VjVAykN6ScMcY9qrEiwDjilX4lOrfi1OZhOfa3JNO5U4z4OLItFsR3Wxl9WsY+Fo4Z3ZbkSqkcdR40tyDZ/JbxfDmTy+KVtxe6k6GVeS6bahWiGiX0gGbWJUY3NvegHOPbHlcJORW7fwZe9f7kuvupb95DEN1dFxjPwQLmujW+yty7Z/MBP6BwjP3i0Jz3j3GxjPeMN4Noxnw3i+kvHM0F4ElA3x+c7Exzswcd7wnw3/2fCf/yv/2b0l/9nd+wb+82jDfzb8Z8N/fO8rCRDDaTxYzhIaSJEBvhBms0Dg7A0p+t6k6LCI/YYV/VBWdJYPBvNJtuFF/xFeVN+GZkEMCflQ35mORoVYLVl5LSgvijs97BhNuvgX+RwkBwXmArtYS0GSIhRSHn3NxSph7INOz34uN7xgtb7lhWoIC8pp1Vu+iNx+/87VhgLKA5aHsM9cnR+RAfQRb9uXvvV3BVNgmGMQeu9E2EJyP8kRnvqlWt3Xthpmq8U3OIniTznI5a19RDjXSN25qZbYI7Qn/sOdB3W/OvPvffz9zD9/cH/nwUN7jeyCpu8lmy554IiKA1AxccxBZ79WNvttbc85LXNPLmG18N5S5BmcFgsZg264wjZQWqfmouhMGPGr7WEF0tg+Spnfei2lCNOyebmM5KzqvDaS+hYQ+sOBNRjbg2HSPE+s56E9GA9sjdVKRtbzsLy0b3v6pzDcxy/Lt9VBC1lefGExeRM/OqHRy7LQL4mkmsU7Po9oNmi+shhOjKBJLePlnfXfQp2KQqEjAAA= H4sIAAAAAAAC/+1aW2/bNhR+768QhGJoiyS17DpNC+wha9OuaNpkcdqHNZlBS8cSF5pUKSqJZ3i/fYfUjbKYLilWdMP04ljnUOS5fudD6NU9z/OnU8rTXGX+c+8TPnveynyihpMFoNR/OZkenxy9Ozj9+eDDxN+q1IzMgGn9sRQLUAnkWaOMIAslTRUVXC9pFGqZmk0jokgmchlCo0tZHlP+JtL61LFpoX9fmmUdaxas8fN8q3BJwuecSnA4VZ0fSzInnDSb08gproLwelNxCTIrvdvdebozKo3Ych+XEo7B6h6WJs6jbLF10JfPcIWU3hhM3g2j68hgZ7Az2Aww4VwoopOrI1wY4jOaqTrejXmouX8/IVnyFpZ6QzH7HUL1PHg6qs/DJbOcMvVGnxhYUssjt6W4BjiZMa1XMgdLntDIIaWh4C8EE1JvKOMZeTDY8oZBgB/j8ZYXPLS3rkK037jr/eDtM5CqZUKTgCyZCSIjv9Stzd/ze2XONrvixcmb7SPvHShJw8x7Wb9t1kJE1YZnfsxBmfbgOWOFRNfKqRBM0RTlAyOkrSVUgSTlkcF479lw99lodzQcFGsZ5RemTYrMmjp1tA1hlGQmcCbj67pUZsRI5oRl0HQ/unIIPFaJPnPQkoNr+d+k+WY4mVPG7KIxAmygiAJXVUCMIpY0OhZNvRZFgo/PrExe4fOe9Xxtb4HPuoIHVXLrrbHUIuATkNThnEnGsHnkmI5LwtpuMIiBR23bMOQx389O3eVNLuOuMMylLPxu2VB2w8EiVUt3o/wKUnQ1C3Lt2GpBuUMqaZyoibPnskRcdaUKW4o5NsLY5CaMen0n1FiutbIlvKJRUW21FK0/Bnlizh41sIdtcSwwC++EsdUImkSItI1qdX0e1n3SsSkFrFyuSAyd5Kf6JF2NedaqAiPv1gomLwIJBpzmTCjf0qRAlLFX270hf4mzLqyawxodmSnJI4RzLH5o2rxQpiQEV5dmioQXHdsyBWkKEcah66YiMgaLQrTxX4PZdVogLlHwACFYEdxGTmXOFV3AFHtbTEVa4lQ2NbWxohwt4SH8+OeZf1+7feavP40X5zZG644XclGExuxV+NxeUuzk6IubGtJqylf19qvaovXa2/ZWq9ri9br9poR5wWL2/Vq8Lr81CVAJMpREsGgjMdqNVwiCFojX8hOIy/rceGGS0LnqvqEMdpST5qiOsMXH6tmxsjuWSIgcnSyk2kBE067TagJSHtFLGuUYzk6bYB9wrLkSoFoNbJEym/lck2u60YqzPLwoKs12VHd72c46Fo4ZvrHaDUk19DiafEmu4QsF7qI446c3FSoeL9VGsZVsum2oVoj4J5JBmxnV4NxZXqBzR2w53FTk1h182fuX+9LprmW3eMxEdYwcIz+Ey9roFn3bpNvfmQr9A4xn746MZ7T7FZRn1FOenvL0lOe2lAftRUDpmc+3Zj7egQl0T4B6AtQToP8rAdq9IwHa3fsKAvSkJ0A9AeoJkO/dkgExXMbDJdIYXBuVVGgaClzeE6JvSIgOi7j3jOj7MqKeDf1H2FB9C5qFCSzIx/qudDgsxGrJyutAeVHc5eGcaArFv8hnIDkoMBfXxV4KFikCIOXxbS5UCWMfdWF2q7hhA6v1HS9SI5hTTquJcst/0vvdG1cbCigPWR7BPnNNfUQG0Inetq98618VTIBhpUHkvRdRC8T9RY7w1G3V6ra2NSxb473BSRR/zkEuv9JTBHWN1xu31RKHhPbHf7zzqB5YZ/6DT7+d+eePHu48emzvkV3Q9INkkyUPHbFxwCoWkUl69ktlud/WdlzUMvfiElyLGFiKPIPTYiNj0A3X2AZQ6zKdF/MJ4361HVRQjUOklPmt11KKYC2bl8tITqvRa+OpP7ZYfzCwHkb2Q7Bovo+t74H9MBrYGmugDK3vQXlx3/b0D2HYj1+2cmuOFrK8+JXF+G3y5ITGr8umvySSah7v+IlEc0DzS4tgbARNaRkv763/AgepQqelIwAA
docker-registry.json.gz: docker-registry.json.gz:
H4sIAAAAAAAC/+1dW3PbNhZ+76/gMN2dpKu6IiXF0s70wXHqJjNx4o2dviReDURCEtYUyZKgY9Xj/74HAC8ACeriSIrkcsZjmwCIy8E533dAHIL3PxiGORwSP0xobP7b+AzXhnHPf0OOj2YYUs3Xl0P4ufj44fy3qze/fbo0W1kJD42wlxbR5bs4diISUhL4rFSRQechr9pFFMVBEjm4yAu9ZEL8ty7LD6NghukUJ3E5/33auYuiBC/wAL+vW2JgEf4zIRHWDC1rfxKhMfJRUTlxtcmZKH4vZ9ziKE5H1zuyj6y0Ey19cyHyQV7VxsKptik5WWmofdR+RENTjOgMhdWm3pQz1mtMN4mkdvr86sRtcpAx8Scejimi1SYvNXk1TeZahHw/gBugCFMj0QfTIzHNlaroGeSMEuLRt6w6q1WkShLSjxzKYB+NPJZPowRL6VPialKJE/ingRdErMJoMkLP2y3Dtiz41eu1DOuFXHU2/pNiLMY/jRMPR1TpQjGh8XQUoMg107wH/vf6h3QuTOwSWuqtOfEx5UbrJ54nUpgGXwWBR0kI6W2eSJQihOIIpfBg9brHdmdg9XsvB1x4IGb/hhuvmAo+3xpjdgLPQ2GMWc1j5MW5nKAHxL0IiokTAi1Nzle4trtSwl3W2fR6zq4zSajqZtsFLOW9uy5gjlBPGDP2YaCecRokPgw5bhmnF59axjmeBdHcQL5rnBEPG69TvAwi4xImKq4CZhR81RuFg5wpviIzHCRUknAmoCB6hZybSQTtV8XE8/9AXoL1WbGk65D2zB4MnO5LWXO4BtqdY9A8e9AyuqCM7aP+QNHCZ263izoo06lCSkuto8wjYMERNSgM1gjGBpQ1AGgcHEt3jINohmiKTlwuZ1Dve5BeoRwomWBVNWboLhOD1ZZVYEb8LENOjqfB17LI2GRNgXSmgee+Y/wYLypxjqIbzAXMDKmiZLUK3Ckp8DL9tfT62y0umV7eIq+sO6oNplIKQ0DSK6GSli5d1Zii7xIY3TJ5GjQwKL6jkp4YaZbUaanbNZVFyJ8sqcwuKqtoIMz7a1DCiwCkEKuTbzJp8IzzgAMxmITvY4cC3ChlrljLJeGFQUzH5E71fdLEs8Cnl+QvXqVltf8hFYhw9SaeVn8PF8A5oGC94MdgAOxG1kNVRFR03Xz/y0kpI8hvWCC9OAQlBj0pGdOYeJ5KUR2gJ8vqw6/+gAGE1VcAYszaqdoKq1muR1RjA8RYg45SgWyOFX3nhAX1JLOyL4qiCaYLBIfvwij1ZhjIDDn8DBn8DGMM2uDG96D5v34xf4Q/X8yWQXwo4juYJWX/fzEfjJ+YYrVVERdAlVYYERyrRTLTPEMO5VKwlWwPT7DvnuX1qDeD3ghX+mTRFOZ4FJdkkxHYJYdchqNVTtK5XNzq6tWVZy9W14D5DOavK2pqZuVLlZUXzBYP6HayCTaVfbO1yXTw8rjTthWirE/6du68Arb0k9kIR4w7+TgNtk6C+YO/Dia3MrAV2gnaEdEnSp69Enl2VyNP62XDnnvBnr1HsGfvkNjzGUajfmewLlk+wz3ktLGGIrX28u0MGeEJrIyjOVBkEKEJHnJ8GVJYdXr3elZsMRaBpI8Cg4Amv50frYX8eH9vsDaNh4etMOUVG6xxyoE1HVT8WM7s7SFngrV/d860xsd9x1IIsj5pN5xpPJ8SGr/4u1PnYEXqtBvqbKhzJ9TZswY9B69NnW3UayPrIKjzDXkytAlDaahzq9TZH7THpbVlfdKuqHNG4oY6rVWXnc1D24Y7d7TstLtu115/2Vm5bW+58xyQ56mQJxsLoOnT5c8NPa497M3PC7ELARMcExf7NNtE/hSD5ms3QLEzmlNJgZfT6GHz6IprULvh0YZH923zsxTms/be55YYFpD3ebb9GaXAM5xx4BlycKmh2YcXB7nbuQRbnzCTIo+g+DTjvftCh0YoqqA5DxR7h/0J5TFVbSUd64qvyXUnt5j5c0YCisKjpOJ5TPGMBU+J8J84ZLNEfCPdiD+SCJBwg7KWE8pxiVD6S2Jpuno+6RTjF0qqtgSSnfgn8ZU+yo9NSCXRSaIIxlfNYA8SqonEryZKsCBzMPMXNdzMdaSWkRkYxeXnDyzxK3GpElWnI84KlShQbIYY1MKnaFJ1w0J2V4RcksQy3Yv0qpaByFwcYY5bYy+QbFPAzQdQKlAFXOofALmDdcoM9u3cVFoBPQxD7L4TyK/mrQWrEaI4x1YnTLKYkoXLl4fPvdn1i62DK7MzJebjsSCrxESKWLyyu8DSL6dkTKsZKSpnaMB6VUbkPMr0XtZ9FGFXYxPsmZJq1lzxhxmuE98lt8RNUA6oDy1N5Lgcnn2H7kgJWkaJcyO0QB4OM9TUAKgq2cyfK5XWW3BuqZ+vK12co7tFPqj0dE2d1yyaX+0Aywgmr1CMK6tcAUOV4gKIKslVD6Xeta17Avjd+llR73l1wjnCa1CVp7/Dt3mnlajq/WbgP0hEwQ44+eZekXD8jBg8HMa93AFsGUiYZ2wE8I/RM0C8Rr6w2gIt91ekZbuh5YaWv42W11ry7Iaa5SXKCvTcWnPEt8Lu92jAGRLVj/fV93FHataIjUeyskfiYofMkBfr2HjBc8zGaWmcFq3T8j7fcA5C7Btj9mqVm79aFW/DGSligZd4I93GG2m8kUe9dsJ0eTh2a2l42wT8gdnS2ev9eBYgOqO+Mxk37Ls99t0TSmuot7L3vYO3n626LdT2aq8/f0wDNMBXphFx1nmveb88CxbO9ubq6iKLYXuUL9Ffc7/Bsmuk/3IdX6KiZdyZKKcu8SbKxYVJlFN36k90Gn9iPX/CwcQTi/08bmpKaTjMVHo3Gw9L46WmyHc98OI3EDK1EY+Dm70mqqrxNB6z87BtljZLr1k3zsQBruMZ2xL/5zEMYUq3z7v9FXnX6je826zjHx+lzNmWwFqe63XOu9tb1x8c1771fz6TjL5h3YZ1G9bdJeuy5QF7fp5hlvrm1lbYt/IEvXbZu1X6rTDt9vlXf15Rs/DdwcJ39ReGdrfw3dSLQhth4vzJnfpWkfERxNjQcUPHDR1vnI63F7hu15xi2v32TentEGrE1gCX2gNwd8u1T3uxqwx7I1RLfCfCAARVpgUaBD8zD3KPk1kt4dpAuI8/M5HVo9G/9fe9gZJFpxeRsopfIEJF0pLw+BtH34fCT8Qo3gF1+87829jbbti7Ye792wtHkVuqmCVdINcl/qSqHizzY/pWuFy5+gZ4tcLiqINRdzxW4EnccukgYXzxn8oMZLlTPEuNBaAoDDwwyQ/8LVb1SPi7MPAFi7aPehojiEPsgKxnVUtjbsaZApMllFzBEdlYZL7G78i+RaD4V1Oge/7Y7zSdR4WYRPzc4j3vhScAa6h79Ye3tfulQzcRR9jnnCaw6z59qAqsNvKCESM0qw2MVkdl5a8zrLJ6XHFVqNvCTGkAVEJ6fdJ4zrr6Yik16ECVpb4B8QQA7rP6U5hTBqh8i+LupIIJi15pvnvFhSwiPcvUl2amb+AqWfNqM7XxTwuQVA+XWrDUIXqdCEOP0HyqtYA0F0N7lYKWiRIamOVcvVTmFak0GLq/GFp5HLpJEO0fIoiCue0DiH4KvQC5xipYCj1usLTB0gZLvzOWVp6DHW8MSu0DdUeHCYexAwPUouMNrja42uDqvq3zN4er3UME1hnyyVh8xOC7o+p52pdVcDXrdwOqDag2oLp3C/8Nomr7AFHVAVmD1e0DqJ6KrqyCqWmvG0j9W0Fq/tlgUCy2j8uq7bQFLJgxQy70R/7BYWHpZkznXvrZ3ehGlKRoUliVeZNAD3xcHH2Rh5Jn19jhZ/WLpimehYCJArOXfrAYeV52vG8Z3/MQjvuHNb9lnH6zWF53E9/xEhef6I8P1+5KmrMEDEZTPDv4lv3lwQ1y20GYfay5QCpIBluNGJqKf9iJPlD582Ux+coRq8Wc2krqBPPzan85+qmYqWHeN0Cw55//+8W8/teLo59+MRcHwsHE89mI/5N111RzK+NiafrCKTqIgUsZSYyvREVaWHkiuoHCcGtacV8zzz/mF6WTb1dQHfEF0UxTGj3ZlZ5kAUePVpYkVL7+WjfvVs28S8FPBwETpS/B8zCcnGbyU7aDrz9bWYBPdpC2eMO/uC0kQKHF+iCT1TBz2WS/0uxJrrXVli468oVVHA1m9qT/Lfmi05ZzpDAkW/rfcoVDeJ2NgQWaSXqxtBW54pdyxXIrdle+kM5UP3bl/mZ9UcT3V8AD9MzUY8hc1NcBk6rxUfYQzIR7/+Zp8Opy8r8++Uuk3uaeiG398PB/dY9XRg2EAAA= H4sIAAAAAAAC/+1dW3PbNhZ+76/gMN2dpKu6IiXF0s70wXHqJjNx4o2dviReDURCEtYUyZKgY9Xj/74HAC8ACeriSIrkcsZjmwCIy8E533dAHIL3PxiGORwSP0xobP7b+AzXhnHPf0OOj2YYUs3Xl0P4ufj44fy3qze/fbo0W1kJD42wlxbR5bs4diISUhL4rFSRQechr9pFFMVBEjm4yAu9ZEL8ty7LD6NghukUJ3E5/33auYuiBC/wAL+vW2JgEf4zIRHWDC1rfxKhMfJRUTlxtcmZKH4vZ9ziKE5H1zuyj6y0Ey19cyHyQV7VxsKptik5WWmofdR+RENTjOgMhdWm3pQz1mtMN4mkdvr86sRtcpAx8Scejimi1SYvNXk1TeZahHw/gBugCFMj0QfTIzHNlaroGeSMEuLRt6w6q1WkShLSjxzKYB+NPJZPowRL6VPialKJE/ingRdErMJoMkLP2y3Dtiz41eu1DOuFXHU2/pNiLMY/jRMPR1TpQjGh8XQUoMg107wH/vf6h3QuTOwSWuqtOfEx5UbrJ54nUpgGXwWBR0kI6W2eSJQihOIIpfBg9brHdmdg9XsvB1x4IGb/hhuvmAo+3xpjdgLPQ2GMWc1j5MW5nKAHxL0IiokTAi1Nzle4trtSwl3W2fR6zq4zSajqZtsFLOW9uy5gjlBPGDP2YaCecRokPgw5bhmnF59axjmeBdHcQL5rnBEPG69TvAwi4xImKq4CZhR81RuFg5wpviIzHCRUknAmoCB6hZybSQTtV8XE8/9AXoL1WbGk65D2zB4MnO5LWXO4BtqdY9A8e9AyuqCM7aP+QNHCZ263izoo06lCSkuto8wjYMERNSgM1gjGBpQ1AGgcHEt3jINohmiKTlwuZ1Dve5BeoRwomWBVNWboLhOD1ZZVYEb8LENOjqfB17LI2GRNgXSmgee+Y/wYLypxjqIbzAXMDKmiZLUK3Ckp8DL9tfT62y0umV7eIq+sO6oNplIKQ0DSK6GSli5d1Zii7xIY3TJ5GjQwKL6jkp4YaZbUaanbNZVFyJ8sqcwuKqtoIMz7a1DCiwCkEKuTbzJp8IzzgAMxmITvY4cC3ChlrljLJeGFQUzH5E71fdLEs8Cnl+QvXqVltf8hFYhw9SaeVn8PF8A5oGC94MdgAOxG1kNVRFR03Xz/y0kpI8hvWCC9OAQlBj0pGdOYeJ5KUR2gJ8vqw6/+gAGE1VcAYszaqdoKq1muR1RjA8RYg45SgWyOFX3nhAX1JLOyL4qiCaYLBIfvwij1ZhjIDDn8DBn8DGMM2uDG96D5v34xf4Q/X8yWQXwo4juYJWX/fzEfjJ+YYrVVERdAlVYYERyrRTLTPEMO5VKwlWwPT7DvnuX1qDeD3ghX+mTRFOZ4FJdkkxHYJYdchqNVTtK5XNzq6tWVZy9W14D5DOavK2pqZuVLlZUXzBYP6HayCTaVfbO1yXTw8rjTthWirE/6du68Arb0k9kIR4w7+TgNtk6C+YO/Dia3MrAV2gnaEdEnSp69Enl2VyNP62XDnnvBnr1HsGfvkNjzGUajfmewLlk+wz3ktLGGIrX28u0MGeEJrIyjOVBkEKEJHnJ8GVJYdXr3elZsMRaBpI8Cg4Amv50frYX8eH9vsDaNh4etMOUVG6xxyoE1HVT8WM7s7SFngrV/d860xsd9x1IIsj5pN5xpPJ8SGr/4u1PnYEXqtBvqbKhzJ9TZswY9B69NnW3UayPrIKjzDXkytAlDaahzq9TZH7THpbVlfdKuqHNG4oY6rVWXnc1D24Y7d7TstLtu115/2Vm5bW+58xyQ56mQJxsLoOnT5c8NPa497M3PC7ELARMcExf7NNtE/hSD5ms3QLEzmlNJgZfT6GHz6IprULvh0YZH923zsxTms/be55YYFpD3ebb9GaXAM5xx4BlycKmh2YcXB7nbuQRbnzCTIo+g+DTjvftCh0YoqqA5DxR7h/0J5TFVbSUd64qvyXUnt5j5c0YCisKjpOJ5TPGMBU+J8J84ZLNEfCPdiD+SCJBwg7KWE8pxiVD6S2Jpuno+6RTjF0qqtgSSnfgn8ZU+yo9NSCXRSaIIxlfNYA8SqonEryZKsCBzMPMXNdzMdaSWkRkYxeXnDyzxK3GpElWnI84KlShQbIYY1MKnaFJ1w0J2V4RcksQy3Yv0qpaByFwcYY5bYy+QbFPAzQdQKlAFXOofALmDdcoM9u3cVFoBPQxD7L4TyK/mrQWrEaI4x1YnTLKYkoXLl4fPvdn1i62DK7MzJebjsSCrxESKWLyyu8DSL6dkTKsZKSpnaMB6VUbkPMr0XtZ9FGFXYxPsmZJq1lzxhxmuE98lt8RNUA6oDy1N5Lgcnn2H7kgJWkaJcyO0QB4OM9TUAKgq2cyfK5XWW3BuqZ+vK12co7tFPqj0dE2d1yyaX+0Aywgmr1CMK6tcAUOV4gKIKslVD6Xeta17Avjd+llR73l1wjnCa1CVp7/Dt3mnlajq/WbgP0hEwQ44+eZekXD8jBg8HMa93AFsGUiYZ2wE8I/RM0C8Rr6w2gIt91ekZbuh5YaWv42W11ry7Iaa5SXKCvTcWnPEt8Lu92jAGRLVj/fV93FHataIjUeyskfiYofMkBfr2HjBc8zGaWmcFq3T8j7fcA5C7Btj9mqVm79aFW/DGSligZd4I93GG2m8kUe9dsJ0eTh2a2l42wT8gdnS2ev9eBYgOqO+Mxk37Ls99t0TSmuot7L3vYO3n626LdT2aq8/f0wDNMBXphFx1nmveb88CxbO9ubq6iKLYXuUL9Ffc7/Bsmuk/3IdX6KiZdyZKKcu8SbKxYVJlFN36k90Gn9iPX/CwcQTi/08bmpKaTjMVHo3Gw9L46WmyHc98OI3EDK1EY+Dm70mqqrxNB6z87BtljZLr1k3zsQBruMZ2xL/5zEMYUq3z7v9FXnX6je826zjHx+lzNmWwFqe63XOu9tb1x8c1771fz6TjL5h3YZ1G9bdJeuy5QF7fp5hlvrm1lbYt/IEvXbZu1X6rTDt9vlXf15Rs/DdwcJ39ReGdrfw3dSLQhth4vzJnfpWkfERxNjQcUPHDR1vnI63F7hu15xi2v32TentEGrE1gCX2gNwd8u1T3uxqwx7I1RLfCfCAARVpgUaBD8zD3KPk1kt4dpAuI8/M5HVo9G/9fe9gZJFpxeRsopfIEJF0pLw+BtH34fCT8Qo3gF1+87829jbbti7Ye792wtHkVuqmCVdINcl/qSqHizzY/pWuFy5+gZ4tcLiqINRdzxW4EnccukgYXzxn8oMZLlTPEuNBaAoDDwwyQ/8LVb1SPi7MPAFi7aPehojiEPsgKxnVUtjbsaZApMllFzBEdlYZL7G78i+RaD4V1Oge/7Y7zSdR4WYRPzc4j3vhScAa6h79Ye3tfulQzcRR9jnnCaw6z59qAqsNvKCESM0qw2MVkdl5a8zrLJ6XHFVqNvCTGkAVEJ6fdJ4zrr6Yik16ECVpb4B8QQA7rP6U5hTBqh8i+LupIIJi15pvnvFhSwiPcvUl2amb+AqWfNqM7XxTwuQVA+XWrDUIXqdCEOP0HyqtYA0F0N7lYKWiRIamOVcvVTmFak0GLq/GFp5HLpJEO0fIoiCue0DiH4KvQC5xipYCj1usLTB0gZLvzOWVp6DHW8MSu0DdUeHCYexAwPUouMNrja42uDqvq3zN4er3UME1hnyyVh8xOC7o+p52pdVcDXrdwOqDag2oLp3C/8Nomr7AFHVAVmD1e0DqJ6KrqyCqWmvG0j9W0Fq/tlgUCy2j8uq7bQFLJgxQy70R/7BYWHpZkznXvrZ3ehGlKRoUliVeZNAD3xcHH2Rh5Jn19jhZ/WLpimehYCJArOXfrAYeV52vG8Z3/MQjvuHNb9lnH6zWF53E9/xEhef6I8P1+5KmrMEDEZTPDv4lv3lwQ1y20GYfay5QCpIBluNGJqKf9iJPlD582Ux+coRq8Wc2krqBPPzan85+qmYqWHeN0Cw55//+8W8/teLo59+MRcHwsHE89mI/5N111RzK+NiafrCKTqIgUsZSYyvREVaWHkiuoHCcGtacV8zzz/mF6WTb1dQHfEF0UxTGj3ZlZ5kAUePVpYkVL7+WjfvVs28S8FPBwETpS/B8zCcnGbyU7aDrz9bWYBPdpC2eMO/uC0kQKHF+iCT1TBz2WS/0uxJrrXVli468oVVHA1m9qT/Lfmi05ZzpDAkW/rfcoVDeJ2NgQWaSXqxtBW54pdyxXIrdle+kM5UP3bl/mZ9UcT3V8AD9MzUY8hc1NcBk6rxUfYQzIR7/+Zp8Opy8r8++Uuk3uaeiG398PB/dY9XRg2EAAA=

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-telemetry name: kubezero-telemetry
description: KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc. description: KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
type: application type: application
version: 0.4.0 version: 0.4.1
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -19,7 +19,7 @@ dependencies:
version: ">= 0.1.6" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: opentelemetry-collector - name: opentelemetry-collector
version: 0.97.1 version: 0.108.0
repository: https://open-telemetry.github.io/opentelemetry-helm-charts repository: https://open-telemetry.github.io/opentelemetry-helm-charts
condition: opentelemetry-collector.enabled condition: opentelemetry-collector.enabled
- name: data-prepper - name: data-prepper
@ -27,7 +27,7 @@ dependencies:
repository: https://opensearch-project.github.io/helm-charts/ repository: https://opensearch-project.github.io/helm-charts/
condition: data-prepper.enabled condition: data-prepper.enabled
- name: jaeger - name: jaeger
version: 3.1.1 version: 3.3.1
repository: https://jaegertracing.github.io/helm-charts repository: https://jaegertracing.github.io/helm-charts
condition: jaeger.enabled condition: jaeger.enabled
- name: fluentd - name: fluentd
@ -35,7 +35,7 @@ dependencies:
repository: https://fluent.github.io/helm-charts repository: https://fluent.github.io/helm-charts
condition: fluentd.enabled condition: fluentd.enabled
- name: fluent-bit - name: fluent-bit
version: 0.47.1 version: 0.47.10
repository: https://fluent.github.io/helm-charts repository: https://fluent.github.io/helm-charts
condition: fluent-bit.enabled condition: fluent-bit.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-telemetry # kubezero-telemetry
![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.4.1](https://img.shields.io/badge/Version-0.4.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc. KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
@ -19,10 +19,10 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.47.1 | | https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 | | https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
| https://jaegertracing.github.io/helm-charts | jaeger | 3.1.1 | | https://jaegertracing.github.io/helm-charts | jaeger | 3.3.1 |
| https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.97.1 | | https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.108.0 |
| https://opensearch-project.github.io/helm-charts/ | data-prepper | 0.1.0 | | https://opensearch-project.github.io/helm-charts/ | data-prepper | 0.1.0 |
## Values ## Values
@ -170,19 +170,14 @@ Kubernetes: `>= 1.26.0`
| opensearch.dashboard.istio.url | string | `"telemetry-dashboard.example.com"` | | | opensearch.dashboard.istio.url | string | `"telemetry-dashboard.example.com"` | |
| opensearch.nodeSets | list | `[]` | | | opensearch.nodeSets | list | `[]` | |
| opensearch.prometheus | bool | `false` | | | opensearch.prometheus | bool | `false` | |
| opensearch.version | string | `"2.16.0"` | | | opensearch.version | string | `"2.17.0"` | |
| opentelemetry-collector.config.exporters.otlp/data-prepper.endpoint | string | `"telemetry-data-prepper:21890"` | | | opentelemetry-collector.config.exporters.otlp/data-prepper.endpoint | string | `"telemetry-data-prepper:21890"` | |
| opentelemetry-collector.config.exporters.otlp/data-prepper.tls.insecure | bool | `true` | | | opentelemetry-collector.config.exporters.otlp/data-prepper.tls.insecure | bool | `true` | |
| opentelemetry-collector.config.exporters.otlp/jaeger.endpoint | string | `"telemetry-jaeger-collector:4317"` | | | opentelemetry-collector.config.exporters.otlp/jaeger.endpoint | string | `"telemetry-jaeger-collector:4317"` | |
| opentelemetry-collector.config.exporters.otlp/jaeger.tls.insecure | bool | `true` | | | opentelemetry-collector.config.exporters.otlp/jaeger.tls.insecure | bool | `true` | |
| opentelemetry-collector.config.extensions.health_check.endpoint | string | `"${env:MY_POD_IP}:13133"` | | | opentelemetry-collector.config.extensions.health_check.endpoint | string | `"${env:MY_POD_IP}:13133"` | |
| opentelemetry-collector.config.extensions.memory_ballast | object | `{}` | |
| opentelemetry-collector.config.processors.batch | object | `{}` | |
| opentelemetry-collector.config.processors.memory_limiter | string | `nil` | |
| opentelemetry-collector.config.receivers.jaeger | string | `nil` | |
| opentelemetry-collector.config.receivers.otlp.protocols.grpc.endpoint | string | `"${env:MY_POD_IP}:4317"` | | | opentelemetry-collector.config.receivers.otlp.protocols.grpc.endpoint | string | `"${env:MY_POD_IP}:4317"` | |
| opentelemetry-collector.config.receivers.otlp.protocols.http.endpoint | string | `"${env:MY_POD_IP}:4318"` | | | opentelemetry-collector.config.receivers.otlp.protocols.http.endpoint | string | `"${env:MY_POD_IP}:4318"` | |
| opentelemetry-collector.config.receivers.zipkin | string | `nil` | |
| opentelemetry-collector.config.service.extensions[0] | string | `"health_check"` | | | opentelemetry-collector.config.service.extensions[0] | string | `"health_check"` | |
| opentelemetry-collector.config.service.extensions[1] | string | `"memory_ballast"` | | | opentelemetry-collector.config.service.extensions[1] | string | `"memory_ballast"` | |
| opentelemetry-collector.config.service.pipelines.logs | string | `nil` | | | opentelemetry-collector.config.service.pipelines.logs | string | `nil` | |

View File

@ -1,27 +0,0 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: "Updated Fluent Bit OCI image to v3.1.1."
apiVersion: v1
appVersion: 3.1.1
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems.
home: https://fluentbit.io/
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/fluentd/fluentbit/icon/fluentbit-icon-color.svg
keywords:
- logging
- fluent-bit
- fluentd
maintainers:
- email: eduardo@calyptia.com
name: edsiper
- email: naseem@transit.app
name: naseemkullah
- email: towmeykaw@gmail.com
name: Towmeykaw
- email: steve.hipwell@gmail.com
name: stevehipwell
name: fluent-bit
sources:
- https://github.com/fluent/fluent-bit/
version: 0.47.1

View File

@ -1,57 +0,0 @@
# Fluent Bit Helm chart
[Fluent Bit](https://fluentbit.io) is a fast and lightweight log processor and forwarder or Linux, OSX and BSD family operating systems.
## Installation
To add the `fluent` helm repo, run:
```sh
helm repo add fluent https://fluent.github.io/helm-charts
```
To install a release named `fluent-bit`, run:
```sh
helm install fluent-bit fluent/fluent-bit
```
## Chart values
```sh
helm show values fluent/fluent-bit
```
## Using Lua scripts
Fluent Bit allows us to build filter to modify the incoming records using custom [Lua scripts.](https://docs.fluentbit.io/manual/pipeline/filters/lua)
### How to use Lua scripts with this Chart
First, you should add your Lua scripts to `luaScripts` in values.yaml, for example:
```yaml
luaScripts:
filter_example.lua: |
function filter_name(tag, timestamp, record)
-- put your lua code here.
end
```
After that, the Lua scripts will be ready to be used as filters. So next step is to add your Fluent bit [filter](https://docs.fluentbit.io/manual/concepts/data-pipeline/filter) to `config.filters` in values.yaml, for example:
```yaml
config:
filters: |
[FILTER]
Name lua
Match <your-tag>
script /fluent-bit/scripts/filter_example.lua
call filter_name
```
Under the hood, the chart will:
- Create a configmap using `luaScripts`.
- Add a volumeMounts for each Lua scripts using the path `/fluent-bit/scripts/<script>`.
- Add the Lua script's configmap as volume to the pod.
### Note
Remember to set the `script` attribute in the filter using `/fluent-bit/scripts/`, otherwise the file will not be found by fluent bit.

View File

@ -1,7 +0,0 @@
testFramework:
enabled: true
logLevel: debug
dashboards:
enabled: true

View File

@ -1,6 +0,0 @@
Get Fluent Bit build information by running these commands:
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "fluent-bit.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 2020:2020
curl http://127.0.0.1:2020

View File

@ -1,138 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "fluent-bit.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "fluent-bit.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "fluent-bit.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "fluent-bit.labels" -}}
helm.sh/chart: {{ include "fluent-bit.chart" . }}
{{ include "fluent-bit.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "fluent-bit.selectorLabels" -}}
app.kubernetes.io/name: {{ include "fluent-bit.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "fluent-bit.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "fluent-bit.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Fluent-bit image with tag/digest
*/}}
{{- define "fluent-bit.image" -}}
{{- $tag := ternary "" (printf ":%s" (toString .tag)) (or (empty .tag) (eq "-" (toString .tag))) -}}
{{- $digest := ternary "" (printf "@%s" .digest) (empty .digest) -}}
{{- printf "%s%s%s" .repository $tag $digest -}}
{{- end -}}
{{/*
Ingress ApiVersion according k8s version
*/}}
{{- define "fluent-bit.ingress.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion) -}}
networking.k8s.io/v1
{{- else if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") (semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion) -}}
networking.k8s.io/v1beta1
{{- else -}}
extensions/v1beta1
{{- end }}
{{- end }}
{{/*
Return if ingress is stable.
*/}}
{{- define "fluent-bit.ingress.isStable" -}}
{{- eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1" -}}
{{- end -}}
{{/*
Return if ingress supports ingressClassName.
*/}}
{{- define "fluent-bit.ingress.supportsIngressClassName" -}}
{{- or (eq (include "fluent-bit.ingress.isStable" .) "true") (and (eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "fluent-bit.ingress.supportsPathType" -}}
{{- or (eq (include "fluent-bit.ingress.isStable" .) "true") (and (eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}
{{/*
Pdb apiVersion according k8s version and capabilities
*/}}
{{- define "fluent-bit.pdb.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion) -}}
policy/v1
{{- else -}}
policy/v1beta1
{{- end }}
{{- end -}}
{{/*
HPA ApiVersion according k8s version
Check legacy first so helm template / kustomize will default to latest version
*/}}
{{- define "fluent-bit.hpa.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "autoscaling/v2beta2") (semverCompare "<1.23-0" .Capabilities.KubeVersion.GitVersion) -}}
autoscaling/v2beta2
{{- else -}}
autoscaling/v2
{{- end -}}
{{- end -}}
{{/*
Create the name of OpenShift SecurityContextConstraints to use
*/}}
{{- define "fluent-bit.openShiftSccName" -}}
{{- if not .Values.openShift.securityContextConstraints.create -}}
{{- printf "%s" .Values.openShift.securityContextConstraints.existingName -}}
{{- else -}}
{{- printf "%s" (default (include "fluent-bit.fullname" .) .Values.openShift.securityContextConstraints.name) -}}
{{- end -}}
{{- end -}}

View File

@ -1,155 +0,0 @@
{{- define "fluent-bit.pod" -}}
serviceAccountName: {{ include "fluent-bit.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ . }}
{{- end }}
hostNetwork: {{ .Values.hostNetwork }}
dnsPolicy: {{ .Values.dnsPolicy }}
{{- with .Values.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.hostAliases }}
hostAliases:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.initContainers }}
initContainers:
{{- if kindIs "string" . }}
{{- tpl . $ | nindent 2 }}
{{- else }}
{{- toYaml . | nindent 2 }}
{{- end -}}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 6 }}
{{- end }}
image: {{ include "fluent-bit.image" (merge .Values.image (dict "tag" (default .Chart.AppVersion .Values.image.tag))) | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if or .Values.env .Values.envWithTpl }}
env:
{{- with .Values.env }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- range $item := .Values.envWithTpl }}
- name: {{ $item.name }}
value: {{ tpl $item.value $ | quote }}
{{- end }}
{{- end }}
{{- if .Values.envFrom }}
envFrom:
{{- toYaml .Values.envFrom | nindent 6 }}
{{- end }}
{{- with .Values.command }}
command:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- if or .Values.args .Values.hotReload.enabled }}
args:
{{- toYaml .Values.args | nindent 6 }}
{{- if .Values.hotReload.enabled }}
- --enable-hot-reload
{{- end }}
{{- end}}
ports:
- name: http
containerPort: {{ .Values.metricsPort }}
protocol: TCP
{{- if .Values.extraPorts }}
{{- range .Values.extraPorts }}
- name: {{ .name }}
containerPort: {{ .containerPort }}
protocol: {{ .protocol }}
{{- end }}
{{- end }}
{{- with .Values.lifecycle }}
lifecycle:
{{- toYaml . | nindent 6 }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 6 }}
readinessProbe:
{{- toYaml .Values.readinessProbe | nindent 6 }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
volumeMounts:
- name: config
mountPath: /fluent-bit/etc/conf
{{- if or .Values.luaScripts .Values.hotReload.enabled }}
- name: luascripts
mountPath: /fluent-bit/scripts
{{- end }}
{{- if eq .Values.kind "DaemonSet" }}
{{- toYaml .Values.daemonSetVolumeMounts | nindent 6 }}
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 6 }}
{{- end }}
{{- if .Values.hotReload.enabled }}
- name: reloader
image: {{ include "fluent-bit.image" .Values.hotReload.image }}
args:
- {{ printf "-webhook-url=http://localhost:%s/api/v2/reload" (toString .Values.metricsPort) }}
- -volume-dir=/watch/config
- -volume-dir=/watch/scripts
volumeMounts:
- name: config
mountPath: /watch/config
- name: luascripts
mountPath: /watch/scripts
{{- with .Values.hotReload.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.extraContainers }}
{{- if kindIs "string" .Values.extraContainers }}
{{- tpl .Values.extraContainers $ | nindent 2 }}
{{- else }}
{{- toYaml .Values.extraContainers | nindent 2 }}
{{- end -}}
{{- end }}
volumes:
- name: config
configMap:
name: {{ default (include "fluent-bit.fullname" .) .Values.existingConfigMap }}
{{- if or .Values.luaScripts .Values.hotReload.enabled }}
- name: luascripts
configMap:
name: {{ include "fluent-bit.fullname" . }}-luascripts
{{- end }}
{{- if eq .Values.kind "DaemonSet" }}
{{- toYaml .Values.daemonSetVolumes | nindent 2 }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 2 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end -}}

View File

@ -1,45 +0,0 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
{{- if .Values.rbac.nodeAccess }}
- nodes
- nodes/proxy
{{- end }}
{{- if .Values.rbac.eventsAccess }}
- events
{{- end }}
verbs:
- get
- list
- watch
{{- if and .Values.podSecurityPolicy.create (semverCompare "<=1.25-0" .Capabilities.KubeVersion.GitVersion) }}
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ include "fluent-bit.fullname" . }}
verbs:
- use
{{- end }}
{{- if .Values.openShift.enabled }}
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
resourceNames:
- {{ include "fluent-bit.openShiftSccName" . }}
verbs:
- use
{{- end }}
{{- end -}}

View File

@ -1,16 +0,0 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "fluent-bit.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "fluent-bit.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@ -1,21 +0,0 @@
{{- if .Values.dashboards.enabled -}}
{{- range $path, $_ := .Files.Glob "dashboards/*.json" }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" $ }}-dashboard-{{ trimSuffix ".json" (base $path) }}
namespace: {{ default $.Release.Namespace $.Values.dashboards.namespace }}
{{- with $.Values.dashboards.annotations }}
annotations:
{{- toYaml . | nindent 4 -}}
{{- end }}
labels:
{{- include "fluent-bit.labels" $ | nindent 4 }}
{{ $.Values.dashboards.labelKey }}: {{ $.Values.dashboards.labelValue | quote }}
data:
{{ include "fluent-bit.fullname" $ }}-{{ base $path }}: |
{{- tpl ($.Files.Get $path) $ | nindent 4 }}
---
{{- end }}
{{- end -}}

View File

@ -1,13 +0,0 @@
{{- if or .Values.luaScripts .Values.hotReload.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" . }}-luascripts
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
data:
{{ range $key, $value := .Values.luaScripts }}
{{ $key }}: {{ $value | quote }}
{{ end }}
{{- end -}}

View File

@ -1,25 +0,0 @@
{{- if not .Values.existingConfigMap -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
data:
custom_parsers.conf: |
{{- (tpl .Values.config.customParsers $) | nindent 4 }}
fluent-bit.conf: |
{{- (tpl .Values.config.service $) | nindent 4 }}
{{- (tpl .Values.config.inputs $) | nindent 4 }}
{{- (tpl .Values.config.filters $) | nindent 4 }}
{{- (tpl .Values.config.outputs $) | nindent 4 }}
{{- range $key, $val := .Values.config.upstream }}
{{ $key }}: |
{{- (tpl $val $) | nindent 4 }}
{{- end }}
{{- range $key, $val := .Values.config.extraFiles }}
{{ $key }}: |
{{- (tpl $val $) | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -1,48 +0,0 @@
{{- if eq .Values.kind "DaemonSet" }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.minReadySeconds }}
minReadySeconds: {{ . }}
{{- end }}
template:
metadata:
labels:
{{- include "fluent-bit.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or (not .Values.hotReload.enabled) .Values.podAnnotations }}
annotations:
{{- if not .Values.hotReload.enabled }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.luaScripts }}
checksum/luascripts: {{ include (print $.Template.BasePath "/configmap-luascripts.yaml") . | sha256sum }}
{{- end }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- include "fluent-bit.pod" . | nindent 6 }}
{{- end }}

View File

@ -1,51 +0,0 @@
{{- if eq .Values.kind "Deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
{{- with .Values.updateStrategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.minReadySeconds }}
minReadySeconds: {{ . }}
{{- end }}
template:
metadata:
labels:
{{- include "fluent-bit.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or (not .Values.hotReload.enabled) .Values.podAnnotations }}
annotations:
{{- if not .Values.hotReload.enabled }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.luaScripts }}
checksum/luascripts: {{ include (print $.Template.BasePath "/configmap-luascripts.yaml") . | sha256sum }}
{{- end }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- include "fluent-bit.pod" . | nindent 6 }}
{{- end }}

View File

@ -1,65 +0,0 @@
{{- $ingressApiIsStable := eq (include "fluent-bit.ingress.isStable" .) "true" -}}
{{- $ingressSupportsIngressClassName := eq (include "fluent-bit.ingress.supportsIngressClassName" .) "true" -}}
{{- $ingressSupportsPathType := eq (include "fluent-bit.ingress.supportsPathType" .) "true" -}}
{{- $fullName := include "fluent-bit.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and ( eq .Values.kind "Deployment" ) .Values.ingress.enabled }}
apiVersion: {{ include "fluent-bit.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- range $key, $value := . }}
{{ printf "%s: %s" $key ((tpl $value $) | quote) }}
{{- end }}
{{- end }}
spec:
{{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end -}}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
{{- with .secretName }}
secretName: {{ . }}
{{- end }}
{{- end }}
{{- end }}
rules:
{{- range concat .Values.ingress.hosts .Values.ingress.extraHosts }}
- host: {{ .host | quote }}
http:
paths:
- path: /
{{- if $ingressSupportsPathType }}
pathType: Prefix
{{- end }}
backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}
port:
{{- if .port }}
number: {{ .port }}
{{- else }}
number: {{ $svcPort }}
{{- end }}
{{- else }}
serviceName: {{ $fullName }}
{{- if .port }}
servicePort: {{ .port }}
{{- else }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,23 +0,0 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: "networking.k8s.io/v1"
kind: "NetworkPolicy"
metadata:
name: {{ include "fluent-bit.fullname" . | quote }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
spec:
policyTypes:
- "Ingress"
podSelector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
ingress:
{{- with .Values.networkPolicy.ingress }}
- from:
{{- with .from }}{{- . | toYaml | nindent 8 }}{{- else }} []{{- end }}
ports:
- protocol: "TCP"
port: {{ $.Values.service.port }}
{{- end }}
{{- end }}

View File

@ -1,21 +0,0 @@
{{- if and ( eq .Values.kind "Deployment" ) .Values.podDisruptionBudget.enabled }}
apiVersion: {{ include "fluent-bit.pdb.apiVersion" . }}
kind: PodDisruptionBudget
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.podDisruptionBudget.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.prometheusRule.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ default $.Release.Namespace .Values.prometheusRule.namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- if .Values.prometheusRule.additionalLabels }}
{{- toYaml .Values.prometheusRule.additionalLabels | nindent 4 }}
{{- end }}
spec:
{{- if .Values.prometheusRule.rules }}
groups:
- name: {{ template "fluent-bit.name" . }}
rules: {{- toYaml .Values.prometheusRule.rules | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,42 +0,0 @@
{{- if and .Values.podSecurityPolicy.create (semverCompare "<=1.25-0" .Capabilities.KubeVersion.GitVersion) -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "fluent-bit.fullname" . }}
{{- if .Values.podSecurityPolicy.annotations }}
annotations:
{{- toYaml .Values.podSecurityPolicy.annotations | nindent 4 }}
{{- end }}
spec:
privileged: false
# Required to prevent escalations to root.
allowPrivilegeEscalation: false
# This is redundant with non-root + disallow privilege escalation,
# but we can provide it for defense in depth.
requiredDropCapabilities:
- ALL
volumes:
- '*'
hostNetwork: {{ .Values.hostNetwork }}
hostIPC: false
hostPID: false
runAsUser:
# TODO: Require the container to run without root privileges.
rule: 'RunAsAny'
seLinux:
# This policy assumes the nodes are using AppArmor rather than SELinux.
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
readOnlyRootFilesystem: false
{{- end }}

View File

@ -1,41 +0,0 @@
{{- if and .Values.openShift.enabled .Values.openShift.securityContextConstraints.create }}
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: {{ include "fluent-bit.openShiftSccName" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.openShift.securityContextConstraints.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
allowPrivilegedContainer: true
allowPrivilegeEscalation: true
allowHostDirVolumePlugin: true
defaultAllowPrivilegeEscalation: false
# forbid host namespaces
allowHostNetwork: false
allowHostIPC: false
allowHostPorts: false
allowHostPID: false
allowedCapabilities: []
forbiddenSysctls:
- "*"
readOnlyRootFilesystem: false
requiredDropCapabilities:
- MKNOD
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
supplementalGroups:
type: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- hostPath
- persistentVolumeClaim
- projected
- secret
{{- end }}

View File

@ -1,57 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }}
{{- end }}
{{- if (eq .Values.kind "DaemonSet") }}
{{- with .Values.service.internalTrafficPolicy }}
internalTrafficPolicy: {{ . }}
{{- end }}
{{- end }}
{{- if (eq .Values.service.type "LoadBalancer")}}
{{- with .Values.service.loadBalancerClass}}
loadBalancerClass: {{ . }}
{{- end }}
{{- with .Values.service.loadBalancerSourceRanges}}
loadBalancerSourceRanges:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if and (eq .Values.service.type "NodePort") (.Values.service.nodePort) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
{{- if .Values.extraPorts }}
{{- range .Values.extraPorts }}
- name: {{ .name }}
targetPort: {{ .name }}
protocol: {{ .protocol }}
port: {{ .port }}
{{- if and (eq $.Values.service.type "NodePort") (.nodePort) }}
nodePort: {{ .nodePort }}
{{- end }}
{{- end }}
{{- end }}
selector:
{{- include "fluent-bit.selectorLabels" . | nindent 4 }}

View File

@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "fluent-bit.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -1,51 +0,0 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "fluent-bit.fullname" . }}
namespace: {{ default .Release.Namespace .Values.serviceMonitor.namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.selector }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
jobLabel: app.kubernetes.io/instance
endpoints:
- port: http
path: {{ default "/api/v2/metrics/prometheus" .Values.serviceMonitor.path }}
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- if kindIs "string" . }}
{{- tpl . $ | nindent 8 }}
{{- else }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.serviceMonitor.scheme }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- end }}
{{- if .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml .Values.serviceMonitor.tlsConfig | nindent 8 }}
{{- end }}
{{- with .Values.serviceMonitor.additionalEndpoints }}
{{- toYaml . | nindent 4 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -1,26 +0,0 @@
{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "fluent-bit.fullname" . }}-test-connection"
namespace: {{ default .Release.Namespace .Values.testFramework.namespace }}
labels:
helm.sh/chart: {{ include "fluent-bit.chart" . }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
annotations:
helm.sh/hook: test
helm.sh/hook-delete-policy: hook-succeeded
spec:
containers:
- name: wget
image: {{ include "fluent-bit.image" .Values.testFramework.image | quote }}
imagePullPolicy: {{ .Values.testFramework.image.pullPolicy }}
command: ["sh"]
args: ["-c", "wget -O- {{ include "fluent-bit.fullname" . }}:{{ .Values.service.port }}"]
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 4 }}
{{- end }}
restartPolicy: Never
{{- end }}

View File

@ -1,39 +0,0 @@
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.autoscaling.vpa.enabled }}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.autoscaling.vpa.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
resourcePolicy:
containerPolicies:
- containerName: {{ .Chart.Name }}
{{- with .Values.autoscaling.vpa.controlledResources }}
controlledResources:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.autoscaling.vpa.maxAllowed }}
maxAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.autoscaling.vpa.minAllowed }}
minAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
targetRef:
apiVersion: apps/v1
kind: {{ .Values.kind }}
name: {{ include "fluent-bit.fullname" . }}
{{- if .Values.autoscaling.vpa.updatePolicy }}
updatePolicy:
{{- with .Values.autoscaling.vpa.updatePolicy.updateMode }}
updateMode: {{ . }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,511 +0,0 @@
# Default values for fluent-bit.
# kind -- DaemonSet or Deployment
kind: DaemonSet
# replicaCount -- Only applicable if kind=Deployment
replicaCount: 1
image:
repository: cr.fluentbit.io/fluent/fluent-bit
# Overrides the image tag whose default is {{ .Chart.AppVersion }}
# Set to "-" to not use the default value
tag:
digest:
pullPolicy: IfNotPresent
testFramework:
enabled: true
namespace:
image:
repository: busybox
pullPolicy: Always
tag: latest
digest:
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
create: true
annotations: {}
name:
rbac:
create: true
nodeAccess: false
eventsAccess: false
# Configure podsecuritypolicy
# Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
# from Kubernetes 1.25, PSP is deprecated
# See: https://kubernetes.io/blog/2022/08/23/kubernetes-v1-25-release/#pod-security-changes
# We automatically disable PSP if Kubernetes version is 1.25 or higher
podSecurityPolicy:
create: false
annotations: {}
# OpenShift-specific configuration
openShift:
enabled: false
securityContextConstraints:
# Create SCC for Fluent-bit and allow use it
create: true
name: ""
annotations: {}
# Use existing SCC in cluster, rather then create new one
existingName: ""
podSecurityContext: {}
# fsGroup: 2000
hostNetwork: false
dnsPolicy: ClusterFirst
dnsConfig: {}
# nameservers:
# - 1.2.3.4
# searches:
# - ns1.svc.cluster-domain.example
# - my.dns.search.suffix
# options:
# - name: ndots
# value: "2"
# - name: edns0
hostAliases: []
# - ip: "1.2.3.4"
# hostnames:
# - "foo.local"
# - "bar.local"
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 2020
internalTrafficPolicy:
loadBalancerClass:
loadBalancerSourceRanges: []
labels: {}
# nodePort: 30020
# clusterIP: 172.16.10.1
annotations: {}
# prometheus.io/path: "/api/v1/metrics/prometheus"
# prometheus.io/port: "2020"
# prometheus.io/scrape: "true"
externalIPs: []
# externalIPs:
# - 2.2.2.2
serviceMonitor:
enabled: false
# namespace: monitoring
# interval: 10s
# scrapeTimeout: 10s
# selector:
# prometheus: my-prometheus
# ## metric relabel configs to apply to samples before ingestion.
# ##
# metricRelabelings:
# - sourceLabels: [__meta_kubernetes_service_label_cluster]
# targetLabel: cluster
# regex: (.*)
# replacement: ${1}
# action: replace
# ## relabel configs to apply to samples after ingestion.
# ##
# relabelings:
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
# scheme: ""
# tlsConfig: {}
## Bear in mind if you want to collect metrics from a different port
## you will need to configure the new ports on the extraPorts property.
additionalEndpoints: []
# - port: metrics
# path: /metrics
# interval: 10s
# scrapeTimeout: 10s
# scheme: ""
# tlsConfig: {}
# # metric relabel configs to apply to samples before ingestion.
# #
# metricRelabelings:
# - sourceLabels: [__meta_kubernetes_service_label_cluster]
# targetLabel: cluster
# regex: (.*)
# replacement: ${1}
# action: replace
# # relabel configs to apply to samples after ingestion.
# #
# relabelings:
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
prometheusRule:
enabled: false
# namespace: ""
# additionalLabels: {}
# rules:
# - alert: NoOutputBytesProcessed
# expr: rate(fluentbit_output_proc_bytes_total[5m]) == 0
# annotations:
# message: |
# Fluent Bit instance {{ $labels.instance }}'s output plugin {{ $labels.name }} has not processed any
# bytes for at least 15 minutes.
# summary: No Output Bytes Processed
# for: 15m
# labels:
# severity: critical
dashboards:
enabled: false
labelKey: grafana_dashboard
labelValue: 1
annotations: {}
namespace: ""
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "sleep 20"]
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /api/v1/health
port: http
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## only available if kind is Deployment
ingress:
enabled: false
ingressClassName: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts: []
# - host: fluent-bit.example.tld
extraHosts: []
# - host: fluent-bit-extra.example.tld
## specify extraPort number
# port: 5170
tls: []
# - secretName: fluent-bit-example-tld
# hosts:
# - fluent-bit.example.tld
## only available if kind is Deployment
autoscaling:
vpa:
enabled: false
annotations: {}
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
controlledResources: []
# Define the max allowed resources for the pod
maxAllowed: {}
# cpu: 200m
# memory: 100Mi
# Define the min allowed resources for the pod
minAllowed: {}
# cpu: 200m
# memory: 100Mi
updatePolicy:
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
updateMode: Auto
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 75
# targetMemoryUtilizationPercentage: 75
## see https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics
customRules: []
# - type: Pods
# pods:
# metric:
# name: packets-per-second
# target:
# type: AverageValue
# averageValue: 1k
## see https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior
behavior: {}
# scaleDown:
# policies:
# - type: Pods
# value: 4
# periodSeconds: 60
# - type: Percent
# value: 10
# periodSeconds: 60
## only available if kind is Deployment
podDisruptionBudget:
enabled: false
annotations: {}
maxUnavailable: "30%"
nodeSelector: {}
tolerations: []
affinity: {}
labels: {}
annotations: {}
podAnnotations: {}
podLabels: {}
## How long (in seconds) a pods needs to be stable before progressing the deployment
##
minReadySeconds:
## How long (in seconds) a pod may take to exit (useful with lifecycle hooks to ensure lb deregistration is done)
##
terminationGracePeriodSeconds:
priorityClassName: ""
env: []
# - name: FOO
# value: "bar"
# The envWithTpl array below has the same usage as "env", but is using the tpl function to support templatable string.
# This can be useful when you want to pass dynamic values to the Chart using the helm argument "--set <variable>=<value>"
# https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function
envWithTpl: []
# - name: FOO_2
# value: "{{ .Values.foo2 }}"
#
# foo2: bar2
envFrom: []
# This supports either a structured array or a templatable string
extraContainers: []
# Array mode
# extraContainers:
# - name: do-something
# image: busybox
# command: ['do', 'something']
# String mode
# extraContainers: |-
# - name: do-something
# image: bitnami/kubectl:{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}
# command: ['kubectl', 'version']
flush: 1
metricsPort: 2020
extraPorts: []
# - port: 5170
# containerPort: 5170
# protocol: TCP
# name: tcp
# nodePort: 30517
extraVolumes: []
extraVolumeMounts: []
updateStrategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxUnavailable: 1
# Make use of a pre-defined configmap instead of the one templated here
existingConfigMap: ""
networkPolicy:
enabled: false
# ingress:
# from: []
luaScripts: {}
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file
config:
service: |
[SERVICE]
Daemon Off
Flush {{ .Values.flush }}
Log_Level {{ .Values.logLevel }}
Parsers_File /fluent-bit/etc/parsers.conf
Parsers_File /fluent-bit/etc/conf/custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port {{ .Values.metricsPort }}
Health_Check On
## https://docs.fluentbit.io/manual/pipeline/inputs
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
multiline.parser docker, cri
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
[INPUT]
Name systemd
Tag host.*
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Read_From_Tail On
## https://docs.fluentbit.io/manual/pipeline/filters
filters: |
[FILTER]
Name kubernetes
Match kube.*
Merge_Log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
## https://docs.fluentbit.io/manual/pipeline/outputs
outputs: |
[OUTPUT]
Name es
Match kube.*
Host elasticsearch-master
Logstash_Format On
Retry_Limit False
[OUTPUT]
Name es
Match host.*
Host elasticsearch-master
Logstash_Format On
Logstash_Prefix node
Retry_Limit False
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/upstream-servers
## This configuration is deprecated, please use `extraFiles` instead.
upstream: {}
## https://docs.fluentbit.io/manual/pipeline/parsers
customParsers: |
[PARSER]
Name docker_no_time
Format json
Time_Keep Off
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
# This allows adding more files with arbitrary filenames to /fluent-bit/etc/conf by providing key/value pairs.
# The key becomes the filename, the value becomes the file content.
extraFiles: {}
# upstream.conf: |
# [UPSTREAM]
# upstream1
#
# [NODE]
# name node-1
# host 127.0.0.1
# port 43000
# example.conf: |
# [OUTPUT]
# Name example
# Match foo.*
# Host bar
# The config volume is mounted by default, either to the existingConfigMap value, or the default of "fluent-bit.fullname"
volumeMounts:
- name: config
mountPath: /fluent-bit/etc/conf
daemonSetVolumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: etcmachineid
hostPath:
path: /etc/machine-id
type: File
daemonSetVolumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: etcmachineid
mountPath: /etc/machine-id
readOnly: true
command:
- /fluent-bit/bin/fluent-bit
args:
- --workdir=/fluent-bit/etc
- --config=/fluent-bit/etc/conf/fluent-bit.conf
# This supports either a structured array or a templatable string
initContainers: []
# Array mode
# initContainers:
# - name: do-something
# image: bitnami/kubectl:1.22
# command: ['kubectl', 'version']
# String mode
# initContainers: |-
# - name: do-something
# image: bitnami/kubectl:{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}
# command: ['kubectl', 'version']
logLevel: info
hotReload:
enabled: false
image:
repository: ghcr.io/jimmidyson/configmap-reload
tag: v0.11.1
digest:
pullPolicy: IfNotPresent
resources: {}

View File

@ -21,3 +21,7 @@
.idea/ .idea/
*.tmproj *.tmproj
.vscode/ .vscode/
# Ignore unittest
tests/
*/__snapshot__/*

View File

@ -0,0 +1,12 @@
# Collector Chart Contributing Guide
All changes to the chart require a bump to the version in `chart.yaml`. See the [Contributing Guide](https://github.com/open-telemetry/opentelemetry-helm-charts/blob/main/CONTRIBUTING.md#versioning) for our versioning requirements.
Once the chart version is bumped, the examples must be regenerated. You can regenerate examples by running `make generate-examples CHARTS=opentelemetry-collector`.
## Bumping Default Collector Version
1. Increase the minor version of the chart by one and set the patch version to zero.
2. Update the chart's `appVersion` to match the new collector version. This version will be used as the image tag by default.
3. Review the corresponding release notes in [Collector Core](https://github.com/open-telemetry/opentelemetry-collector/releases), [Collector Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases), and [Collector Releases](https://github.com/open-telemetry/opentelemetry-collector-releases/releases). If any changes affect the helm charts, adjust the helm chart accordingly.
4. Run `make generate-examples CHARTS=opentelemetry-collector`.

View File

@ -0,0 +1,15 @@
apiVersion: v2
appVersion: 0.111.0
description: OpenTelemetry Collector Helm chart for Kubernetes
home: https://opentelemetry.io/
icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png
maintainers:
- name: dmitryax
- name: jaronoff97
- name: TylerHelmuth
name: opentelemetry-collector
sources:
- https://github.com/open-telemetry/opentelemetry-collector
- https://github.com/open-telemetry/opentelemetry-collector-contrib
type: application
version: 0.108.0

View File

@ -0,0 +1,251 @@
# OpenTelemetry Collector Helm Chart
The helm chart installs [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector)
in kubernetes cluster.
## Prerequisites
- Kubernetes 1.24+
- Helm 3.9+
## Installing the Chart
Add OpenTelemetry Helm repository:
```console
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
```
To install the chart with the release name my-opentelemetry-collector, run the following command:
```console
helm install my-opentelemetry-collector open-telemetry/opentelemetry-collector --set mode=<value> --set image.repository="otel/opentelemetry-collector-k8s" --set command.name="otelcol-k8s"
```
Where the `mode` value needs to be set to one of `daemonset`, `deployment` or `statefulset`.
For an in-depth walk through getting started in Kubernetes using this helm chart, see [OpenTelemetry Kubernetes Getting Started](https://opentelemetry.io/docs/kubernetes/getting-started/).
## Upgrading
See [UPGRADING.md](UPGRADING.md).
## Security Considerations
OpenTelemetry Collector recommends to bind receivers' servers to addresses that limit connections to authorized users.
For this reason, by default the chart binds all the Collector's endpoints to the pod's IP.
More info is available in the [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks)
Some care must be taken when using `hostNetwork: true`, as then OpenTelemetry Collector will listen on all the addresses in the host network namespace.
## Configuration
### Default configuration
By default this chart will deploy an OpenTelemetry Collector with three pipelines (logs, metrics and traces)
and debug exporter enabled by default. The collector can be installed either as daemonset (agent), deployment or stateful set.
*Example*: Install collector as a deployment.
```yaml
mode: deployment
```
By default collector has the following receivers enabled:
- **metrics**: OTLP and prometheus. Prometheus is configured only for scraping collector's own metrics.
- **traces**: OTLP, zipkin and jaeger (thrift and grpc).
- **logs**: OTLP (to enable container logs, see [Configuration for Kubernetes container logs](#configuration-for-kubernetes-container-logs)).
### Basic Top Level Configuration
The Collector's configuration is set via the `config` section. Default components can be removed with `null`. Remember that lists in helm are not merged, so if you want to modify any default list you must specify all items, including any default items you want to keep.
*Example*: Disable metrics and logs pipelines and non-otlp receivers:
```yaml
config:
receivers:
jaeger: null
prometheus: null
zipkin: null
service:
pipelines:
traces:
receivers:
- otlp
metrics: null
logs: null
```
The chart also provides several presets, detailed below, to help configure important Kubernetes components. For more details on each component, see [Kubernetes Collector Components](https://opentelemetry.io/docs/kubernetes/collector/components/).
### Configuration for Kubernetes Container Logs
The collector can be used to collect logs sent to standard output by Kubernetes containers.
This feature is disabled by default. It has the following requirements:
- It needs agent collector to be deployed.
- It requires the [Filelog receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#filelog-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
To enable this feature, set the `presets.logsCollection.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: daemonset
presets:
logsCollection:
enabled: true
includeCollectorLogs: true
```
The way this feature works is it adds a `filelog` receiver on the `logs` pipeline. This receiver is preconfigured
to read the files where Kubernetes container runtime writes all containers' console output to.
#### :warning: Warning: Risk of looping the exported logs back into the receiver, causing "log explosion"
The container logs pipeline uses the `debug` exporter by default.
Paired with the default `filelog` receiver that receives all containers' console output,
it is easy to accidentally feed the exported logs back into the receiver.
Also note that using the `--verbosity=detailed` option for the `debug` exporter causes it to output
multiple lines per single received log, which when looped, would amplify the logs exponentially.
To prevent the looping, the default configuration of the receiver excludes logs from the collector's containers.
If you want to include the collector's logs, make sure to replace the `debug` exporter
with an exporter that does not send logs to collector's standard output.
Here's an example `values.yaml` file that replaces the default `debug` exporter on the `logs` pipeline
with an `otlphttp` exporter that sends the container logs to `https://example.com:55681` endpoint.
It also clears the `filelog` receiver's `exclude` property, for collector logs to be included in the pipeline.
```yaml
mode: daemonset
presets:
logsCollection:
enabled: true
includeCollectorLogs: true
config:
exporters:
otlphttp:
endpoint: https://example.com:55681
service:
pipelines:
logs:
exporters:
- otlphttp
```
### Configuration for Kubernetes Attributes Processor
The collector can be configured to add Kubernetes metadata, such as pod name and namespace name, as resource attributes to incoming logs, metrics and traces.
This feature is disabled by default. It has the following requirements:
- It requires the [Kubernetes Attributes processor](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
To enable this feature, set the `presets.kubernetesAttributes.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: daemonset
presets:
kubernetesAttributes:
enabled: true
# You can also configure the preset to add all of the associated pod's labels and annotations to you telemetry.
# The label/annotation name will become the resource attribute's key.
extractAllPodLabels: true
extractAllPodAnnotations: true
```
### Configuration for Retrieving Kubelet Metrics
The collector can be configured to collect node, pod, and container metrics from the API server on a kubelet.
This feature is disabled by default. It has the following requirements:
- It requires the [Kubeletstats receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#kubeletstats-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
To enable this feature, set the `presets.kubeletMetrics.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: daemonset
presets:
kubeletMetrics:
enabled: true
```
### Configuration for Kubernetes Cluster Metrics
The collector can be configured to collects cluster-level metrics from the Kubernetes API server. A single instance of this receiver can be used to monitor a cluster.
This feature is disabled by default. It has the following requirements:
- It requires the [Kubernetes Cluster receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-cluster-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
- It requires statefulset or deployment mode with a single replica.
To enable this feature, set the `presets.clusterMetrics.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: deployment
replicaCount: 1
presets:
clusterMetrics:
enabled: true
```
### Configuration for Retrieving Kubernetes Events
The collector can be configured to collect Kubernetes events.
This feature is disabled by default. It has the following requirements:
- It requires [Kubernetes Objects receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-objects-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
To enable this feature, set the `presets.kubernetesEvents.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: deployment
replicaCount: 1
presets:
kubernetesEvents:
enabled: true
```
### Configuration for Host Metrics
The collector can be configured to collect host metrics for Kubernetes nodes.
This feature is disabled by default. It has the following requirements:
- It requires [Host Metrics receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#host-metrics-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
To enable this feature, set the `presets.hostMetrics.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: daemonset
presets:
hostMetrics:
enabled: true
```
## CRDs
At this time, Prometheus CRDs are supported but other CRDs are not.
### Other configuration options
The [values.yaml](./values.yaml) file contains information about all other configuration
options for this chart.
For more examples see [Examples](examples).

View File

@ -0,0 +1,419 @@
# Upgrade guidelines
These upgrade guidelines only contain instructions for version upgrades which require manual modifications on the user's side.
If the version you want to upgrade to is not listed here, then there is nothing to do for you.
Just upgrade and enjoy.
## 0.97.2 to 0.98.0
> [!WARNING]
> Critical content demanding immediate user attention due to potential risks.
The deprecated memory ballast extension has been removed from the default config. If you depend on this component you must manually configure `config.extensions` and `config.service.extensions` to include the memory ballast extension. Setting `useGOMEMLIMIT` to `false` will no longer keep the memory ballast extension in the rendered collector config.
## 0.88.0 to 0.89.0
> [!WARNING]
> Critical content demanding immediate user attention due to potential risks.
As part of working towards using the [OpenTelemetry Collector Kubernetes Distro](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) by default, the chart now requires users to explicitly set an image repository. If you are already explicitly setting an image repository this breaking change does not affect you.
If you are using a OpenTelemetry Community distribution of the Collector we recommend you use `otel/opentelemetry-collector-k8s`, but carefully review the [components included in this distribution](https://github.com/open-telemetry/opentelemetry-collector-releases/blob/main/distributions/otelcol-k8s/manifest.yaml) to make sure it includes all the components you use in your configuration. In the future this distribution will become the default image used for the chart.
You can use the OpenTelemetry Collector Kubernetes Distro by adding these lines to your values.yaml:
```yaml
image:
repository: "otel/opentelemetry-collector-k8s"
```
If you want to stick with using the Contrib distribution, add these lines to your values.yaml:
```yaml
image:
repository: "otel/opentelemetry-collector-contrib"
```
For more details see [#1135](https://github.com/open-telemetry/opentelemetry-helm-charts/issues/1135).
## 0.84.0 to 0.85.0
The `loggingexporter` has been removed from the default configuration. Use the `debugexporter` instead.
## 0.78.2 to 0.78.3
[Update Health Check Extension's endpoints to use Pod IP Instead of 0.0.0.0](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/1012)
The [Collector's security guidelines were updated](https://github.com/open-telemetry/opentelemetry-collector/pull/6959) to include containerized environments when discussing safeguards against denial of service attacks.
To be in compliance with the Collector's security best practices the chart has been updated to use the Collector's pod IP in place of `0.0.0.0`.
The chart will continue to allow complete configuration of the Collector via the `config` field in the values.yaml. If pod IP does not suite your needs you can use `config` to set something different.
See [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks) for more details.
## 0.75.1 to 0.76.0
Enable the `useGOMEMLIMIT` feature flag by default. This means by default the chart now does not use the Memory Ballast Extension and any custom configuraiton applied to the Memory Ballast Extension is ignored.
**If you're still interested in using the Memory Ballast Extension set this back to false.**
## 0.69.3 to 0.70.0
The following deprecated fields have been removed. Please use the new values:
- `extraConfigMapMounts` -> `extraVolumes`
- `extraHostPathMounts` -> `extraVolumes`
- `secretMounts` -> `extraVolumes`
- `containerLogs` -> `presets.logsCollection`
## 0.69.0 to 0.69.1 & 0.69.2
The `loggingexporter` was replaced with the `debugexporter`. This ended up being an accidental breaking change for any user that depended on the default logging exporter config when explicitly listing the logging exporter in an exporter list.
When using versions `0.69.1` or `0.69.2` you should explicitly list the debugging exporter instead of the logging exporter. You other option is to skip these version and use `0.69.3` or newer, which includes the logging exporter configuration.
**The logging exporter will be removed in a future version.** We highly recommend switching to the debug exporter.
## 0.67 to 0.68
The `preset.kubernetesEvents` preset now excludes `DELETED` watch types so that an log is not ingested when Kubernetes deletes an event.
The intention behind this change is to cleanup the data ingested by the preset as the `DELETED` updated for a Kubernetes Events is
uninteresting. If you want to keep ingesting `DELETED` updates for Kubernetes Events you will need to configure the `k8sobjectsreceiver` manually.
## 0.62 to 0.63
The `kubernetesAttributes` preset now respects order of processors in logs, metrics and traces pipelines.
This implicitly might break your pipelines if you relied on having the `k8sAttributes` processor rendered as the first processor but also explicitly listed it in the signal's pipeline somewhere else.
## 0.55.2 to 0.56
The `tpl` function has been added to references of pod labels and ingress hosts. This adds the ability to add some reusability in
charts values through referencing global values. If you are currently using any `{{ }}` syntax in pod labels or ingress hosts it will now be rendered. To escape existing instances of {{ }}, use {{` <original content> `}}.
```yaml
global:
region: us-east-1
environment: stage
# Tests `tpl` function reference used in pod labels and
# ingress.hosts[*]
podLabels:
environment: "{{ .Values.global.environment }}"
ingress:
enabled: true
hosts:
- host: "otlp-collector-{{ .Values.global.region }}-{{ .Values.global.environment }}-example.dev"
paths:
- path: /
pathType: Prefix
port: 4318
```
Note that only global Helm values can be referenced as the Helm Chart schema currently does not allow `additionalValues`.
## 0.55.0 to 0.55.1
As of v0.55.1 Collector chart use `${env:ENV}` style syntax when getting environment variables and that $`{env:ENV}` syntax is not supported before collector 0.71. If you upgrade collector chart to v0.55.1, you need to make sure your collector version is after than 0.71 (default is v0.76.1).
## 0.53.1 to 0.54.0
As of v0.54.0 Collector chart, the default resource limits are removed. If you want to keep old values you can use the following configuration:
```
resources:
limits:
# CPU units are in fractions of 1000; memory in powers of 2
cpu: 250m
memory: 512Mi
```
See [the 644 issue](https://github.com/open-telemetry/opentelemetry-helm-charts/issues/644) for more information.
## 0.46.0 to 0.47.0
[Update Collector Endpoints to use Pod IP Instead of 0.0.0.0](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/603)
The [Collector's security guidelines were updated](https://github.com/open-telemetry/opentelemetry-collector/pull/6959) to include containerized environments when discussing safeguards against denial of service attacks.
To be in compliance with the Collector's security best practices the chart has been updated to use the Collector's pod IP in place of `0.0.0.0`.
The chart will continue to allow complete configuration of the Collector via the `config` field in the values.yaml. If pod IP does not suite your needs you can use `config` to set something different.
See [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks) for more details.
The new default of binding to the pod IP, rather than `0.0.0.0`, will cause `kubectl port-forward` to fail. If port-forwarding is desired, the following `value.yaml` snippet will allow the Collector bind to `127.0.0.1` inside the pod, in addition to the pod's IP:
```yaml
config:
receivers:
jaeger/local:
protocols:
grpc:
endpoint: 127.0.0.1:14250
thrift_compact:
endpoint: 127.0.0.1:6831
thrift_http:
endpoint: 127.0.0.1:14268
otlp/local:
protocols:
grpc:
endpoint: 127.0.0.1:4317
http:
endpoint: 127.0.0.1:4318
zipkin/local:
endpoint: 127.0.0.1:9411
service:
pipelines:
traces:
receivers:
- otlp
- otlp/local
- jaeger
- jaeger/local
- zipkin
- zipkin/local
```
## 0.40.7 to 0.41.0
[Require Kubernetes version 1.23 or later](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/541)
If you enable use of a _HorizontalPodAutoscaler_ for the collector when running in the "deployment" mode by way of `.Values.autoscaling.enabled`, the manifest now uses the "autoscaling/v2" API group version, which [is available only as recently as Kubernetes version 1.23](https://kubernetes.io/blog/2021/12/07/kubernetes-1-23-release-announcement/#horizontalpodautoscaler-v2-graduates-to-ga). As [all previous versions of this API group are deprecated and removed as of Kubernetes version 1.26](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#horizontalpodautoscaler-v126), we don't offer support for Kubernetes versions older than 1.23.
## 0.34.0 to 0.34.0
[config supports templating](TBD)
The chart now supports templating in `.Values.config`. If you are currently using any `{{ }}` syntax in `.Values.yaml` it will now be rendered. To escape existing instances of `{{ }}`, use ``` {{` <original content> `}} ```. For example, `{{ REDACTED_EMAIL }}` becomes ``` {{` {{ REDACTED_EMAIL }} `}} ```.
## 0.28.0 to 0.29.0
[Reduce requested resources](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/273)
Resource `limits` have been reduced. Upgrades/installs of chart 0.29.0 will now use fewer resources. In order to set the resources back to what they were, you will need to override the `resources` section in the `values.yaml`.
*Example*:
```yaml
resources:
limits:
cpu: 1
memory: 2Gi
```
## 0.23.1 to 0.24.0
[Remove containerLogs in favor of presets.logsCollection]()
The ability to enable logs collection from the collector has been moved from `containerLogs.enabled` to `presets.logsCollection.enabled`. If you are currently using `containerLogs.enabled`, you should instead use the preset:
```yaml
presets:
logsCollection:
enabled: true
```
If you are using `containerLogs.enabled` and also enabling collection of the collector logs you can use `includeCollectorLogs`
```yaml
presets:
logsCollection:
enabled: true
includeCollectorLogs: true
```
You no longer need to update `config.service.pipelines.logs` to include the filelog receiver yourself as the preset will automatically update the logs pipeline to include the filelog receiver.
The filelog's preset configuration can modified by `config.receivers`, but preset configuration cannot be removed. If you need to remove any filelog receiver configuration generated by the preset you should not use the preset. Instead, configure the filelog receiver manually in `config.receivers` and set any other necessary fields in the values.yaml to modify k8s as needed.
See the [daemonset-collector-logs example](https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-collector/examples/daemonset-collector-logs) to see an example of the preset in action.
## 0.18.0 to 0.19.0
[Remove agentCollector and standaloneCollector settings](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/216)
The `agentCollector` and `standaloneCollector` config sections have been removed. Upgrades/installs of chart 0.19.0 will fail if `agentCollector` or `standaloneCollector` are in the values.yaml. See the [Migrate to mode](#migrate-to-mode) steps for instructions on how to replace `agentCollector` and `standaloneCollector` with `mode`.
## 0.13.0 to 0.14.0
[Remove two-deployment mode](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/159)
The ability to install both the agent and standalone collectors simultaneous with the chart has been removed. Installs/upgrades where both `.Values.agentCollector.enabled` and `.Values.standloneCollector.enables` are true will fail. `agentCollector` and `standloneCollector` have also be deprecated, but backward compatibility has been maintained.
### To run both a deployment and daemonset
Install a deployment version of the collector. This is done by setting `.Values.mode` to `deployment`
```yaml
mode: deployment
```
Next, install an daemonset version of the collector that is configured to send traffic to the previously installed deployment. This is done by setting `.Values.mode` to `daemonset` and updating `.Values.config` so that data is exported to the deployment.
```yaml
mode: daemonset
config:
exporters:
otlp:
endpoint: example-opentelemetry-collector:4317
tls:
insecure: true
service:
pipelines:
logs:
exporters:
- otlp
- logging
metrics:
exporters:
- otlp
- logging
traces:
exporters:
- otlp
- logging
```
See the [daemonset-and-deployment](examples/daemonset-and-deployment) example to see the rendered config.
### Migrate to `mode`:
The `agentCollector` and `standaloneCollector` sections in values.yaml have been deprecated. Instead there is a new field, `mode`, that determines if the collector is being installed as a daemonset or deployment.
```yaml
# Valid values are "daemonset" and "deployment".
# If set, agentCollector and standaloneCollector are ignored.
mode: <daemonset|deployment>
```
The following fields have also been added to the root-level to replace the depracated `agentCollector` and `standaloneCollector` settings.
```yaml
containerLogs:
enabled: false
resources:
limits:
cpu: 1
memory: 2Gi
podAnnotations: {}
podLabels: {}
# Host networking requested for this pod. Use the host's network namespace.
hostNetwork: false
# only used with deployment mode
replicaCount: 1
annotations: {}
```
When using `mode`, these settings should be used instead of their counterparts in `agentCollector` and `standaloneCollector`.
Set `mode` to `daemonset` if `agentCollector` was being used. Move all `agentCollector` settings to the corresponding root-level setting. If `agentCollector.configOverride` was being used, merge the settings with `.Values.config`.
Example agentCollector values.yaml:
```yaml
agentCollector:
resources:
limits:
cpu: 3
memory: 6Gi
configOverride:
receivers:
hostmetrics:
scrapers:
cpu:
disk:
filesystem:
service:
pipelines:
metrics:
receivers: [otlp, prometheus, hostmetrics]
```
Example mode values.yaml:
```yaml
mode: daemonset
resources:
limits:
cpu: 3
memory: 6Gi
config:
receivers:
hostmetrics:
scrapers:
cpu:
disk:
filesystem:
service:
pipelines:
metrics:
receivers: [otlp, prometheus, hostmetrics]
```
Set `mode` to `deployment` if `standaloneCollector` was being used. Move all `standaloneCollector` settings to the corresponding root-level setting. If `standaloneCollector.configOverride` was being used, merge the settings with `.Values.config`.
Example standaloneCollector values.yaml:
```yaml
standaloneCollector:
enabled: true
replicaCount: 2
configOverride:
receivers:
podman_stats:
endpoint: unix://run/podman/podman.sock
timeout: 10s
collection_interval: 10s
service:
pipelines:
metrics:
receivers: [otlp, prometheus, podman_stats]
```
Example mode values.yaml:
```yaml
mode: deployment
replicaCount: 2
config:
receivers:
receivers:
podman_stats:
endpoint: unix://run/podman/podman.sock
timeout: 10s
collection_interval: 10s
service:
pipelines:
metrics:
receivers: [otlp, prometheus, podman_stats]
```
Default configuration in `.Values.config` can now be removed with `null`. When changing a pipeline, you must explicitly list all the components that are in the pipeline, including any default components.
*Example*: Disable metrics and logging pipelines and non-otlp receivers:
```yaml
config:
receivers:
jaeger: null
prometheus: null
zipkin: null
service:
pipelines:
traces:
receivers:
- otlp
metrics: null
logs: null
```

View File

@ -0,0 +1,9 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
useGOMEMLIMIT: true

View File

@ -0,0 +1,27 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
clusterRole:
create: true
name: "testing-clusterrole"
rules:
- apiGroups:
- ''
resources:
- 'pods'
- 'nodes'
verbs:
- 'get'
- 'list'
- 'watch'
clusterRoleBinding:
name: "testing-clusterrolebinding"
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,26 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
config:
receivers:
jaeger: null
otlp: null
zipkin: null
hostmetrics:
scrapers:
cpu:
disk:
filesystem:
service:
pipelines:
metrics:
receivers:
- prometheus
- hostmetrics
traces: null
logs: null

View File

@ -0,0 +1,12 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,29 @@
global:
test: templated-value
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M
# Tests `tpl` function reference used in pod labels and
# ingress.hosts[*]
podLabels:
testLabel: "{{ .Values.global.test }}"
ingress:
enabled: true
hosts:
- host: "{{ .Values.global.test }}"
paths:
- path: /
pathType: Prefix
port: 4318

View File

@ -0,0 +1,21 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
ports:
jaeger-compact:
enabled: false
jaeger-thrift:
enabled: false
jaeger-grpc:
enabled: false
zipkin:
enabled: false
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,15 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 10
behavior: {}
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

View File

@ -0,0 +1,15 @@
mode: statefulset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 10
behavior: {}
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

View File

@ -0,0 +1,49 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M
ingress:
enabled: true
ingressClassName: nginx
annotations:
test.io/collector: default
hosts:
- host: defaultcollector.example.com
paths:
- path: /
pathType: Prefix
port: 4318
additionalIngresses:
- name: additional-basic
hosts:
- host: additional-basic.example.com
paths:
- path: /
pathType: Prefix
port: 4318
- name: additional-advanced
ingressClassName: nginx
annotations:
test.io/ingress: additional-advanced
hosts:
- host: additional-advanced.example.com
paths:
- path: /
pathType: Exact
port: 4318
tls:
- secretName: somesecret
hosts:
- additional-advanced.example.com

View File

@ -0,0 +1,37 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M
networkPolicy:
enabled: true
allowIngressFrom:
- namespaceSelector: {}
- ipBlock:
cidr: 127.0.0.1/32
extraIngressRules:
- ports:
- port: metrics
protocol: TCP
from:
- ipBlock:
cidr: 127.0.0.1/32
egressRules:
- to:
- podSelector:
matchLabels:
app: jaeger
ports:
- port: 4317
protocol: TCP

View File

@ -0,0 +1,15 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M
networkPolicy:
enabled: true

View File

@ -0,0 +1,16 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
clusterMetrics:
enabled: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,16 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
hostMetrics:
enabled: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,16 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
kubernetesEvents:
enabled: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,16 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
kubeletMetrics:
enabled: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,16 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
kubernetesAttributes:
enabled: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,17 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
logsCollection:
enabled: true
includeCollectorLogs: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,37 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
livenessProbe:
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 2
terminationGracePeriodSeconds: 40
httpGet:
port: 8989
path: /healthz
readinessProbe:
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
successThreshold: 2
failureThreshold: 2
httpGet:
port: 8989
path: /healthz
startupProbe:
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 10
terminationGracePeriodSeconds: 40
httpGet:
port: 8989
path: /healthz

View File

@ -0,0 +1,13 @@
mode: statefulset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
replicaCount: 2
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,17 @@
# Examples of chart configuration
Here is a collection of common configurations for the OpenTelemetry collector. Each folder contains an example `values.yaml` and the resulting configurations that are generated by the opentelemetry-collector helm charts.
- [Daemonset only](daemonset-only)
- [Deployment only](deployment-only)
- [Daemonset and deployment](daemonset-and-deployment)
- [Log collection, including collector logs](daemonset-collector-logs)
- [Add component (hostmetrics)](daemonset-hostmetrics)
The manifests are rendered using the `helm template` command and the specific example folder's values.yaml.
Examples are generated by (from root of the repo):
```sh
make generate-examples CHARTS=opentelemetry-collector
```

View File

@ -0,0 +1,41 @@
---
# Source: opentelemetry-collector/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: example-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["pods", "namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events", "namespaces", "namespaces/status", "nodes", "nodes/spec", "pods", "pods/status", "replicationcontrollers", "replicationcontrollers/status", "resourcequotas", "services" ]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["daemonsets", "deployments", "replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["autoscaling"]
resources: ["horizontalpodautoscalers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch", "list"]

View File

@ -0,0 +1,21 @@
---
# Source: opentelemetry-collector/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: example-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: example-opentelemetry-collector
subjects:
- kind: ServiceAccount
name: example-opentelemetry-collector
namespace: default

View File

@ -0,0 +1,68 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
processors:
k8sattributes:
extract:
metadata:
- k8s.namespace.name
- k8s.deployment.name
- k8s.statefulset.name
- k8s.daemonset.name
- k8s.cronjob.name
- k8s.job.name
- k8s.node.name
- k8s.pod.name
- k8s.pod.uid
- k8s.pod.start_time
passthrough: false
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
receivers:
k8s_cluster:
collection_interval: 10s
k8sobjects:
objects:
- exclude_watch_type:
- DELETED
group: events.k8s.io
mode: watch
name: events
service:
pipelines:
logs:
exporters:
- debug
processors:
- k8sattributes
receivers:
- k8sobjects
metrics:
exporters:
- debug
processors:
- k8sattributes
receivers:
- k8s_cluster

View File

@ -0,0 +1,100 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 360fc84164ca26f5a57ecb44cbcec02ca473b09fc86dba876f71c9fa3617f656
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "3276MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 2
memory: 4Gi
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -0,0 +1,49 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -0,0 +1,13 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,34 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
replicaCount: 1
resources:
limits:
cpu: 2
memory: 4Gi
presets:
clusterMetrics:
enabled: true
kubernetesAttributes:
enabled: true
kubernetesEvents:
enabled: true
alternateConfig:
exporters:
debug: {}
service:
pipelines:
logs:
exporters:
- debug
metrics:
exporters:
- debug

View File

@ -0,0 +1,34 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
config:
exporters:
otlp:
endpoint: example-opentelemetry-collector:4317
tls:
insecure: true
service:
pipelines:
logs:
exporters:
- otlp
- debug
metrics:
exporters:
- otlp
- debug
traces:
exporters:
- otlp
- debug
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,13 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,93 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
otlp:
endpoint: example-opentelemetry-collector:4317
tls:
insecure: true
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,86 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,104 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: a2d0d31bd929305e52879f78f502d56ad49d9ef9396838490646e9034d2243de
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -0,0 +1,100 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: f67e5b63bd16a7b09fc73afd9b6d71dfbeeb2afc8471c0b0f005233ef45df91d
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -0,0 +1,49 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -0,0 +1,13 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,100 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
filelog:
exclude: []
include:
- /var/log/pods/*/*/*.log
include_file_name: false
include_file_path: true
operators:
- id: container-parser
max_log_size: 102400
type: container
retry_on_failure:
enabled: true
start_at: end
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- filelog
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,110 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 5237e54a1cdaad762876da10a5bab6f686506211aaa2c70b901a74fec8b82140
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- name: varlogpods
mountPath: /var/log/pods
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
hostNetwork: false

View File

@ -0,0 +1,13 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,12 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
logsCollection:
enabled: true
includeCollectorLogs: true

View File

@ -0,0 +1,133 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
hostmetrics:
collection_interval: 10s
root_path: /hostfs
scrapers:
cpu: null
disk: null
filesystem:
exclude_fs_types:
fs_types:
- autofs
- binfmt_misc
- bpf
- cgroup2
- configfs
- debugfs
- devpts
- devtmpfs
- fusectl
- hugetlbfs
- iso9660
- mqueue
- nsfs
- overlay
- proc
- procfs
- pstore
- rpc_pipefs
- securityfs
- selinuxfs
- squashfs
- sysfs
- tracefs
match_type: strict
exclude_mount_points:
match_type: regexp
mount_points:
- /dev/*
- /proc/*
- /sys/*
- /run/k3s/containerd/*
- /var/lib/docker/*
- /var/lib/kubelet/*
- /snap/*
load: null
memory: null
network: null
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
- hostmetrics
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,105 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 98dea268c8a8fe987e082a4e85801387f2b60fefc281f9b1edd1080f0af62574
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- name: hostfs
mountPath: /hostfs
readOnly: true
mountPropagation: HostToContainer
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- name: hostfs
hostPath:
path: /
hostNetwork: false

View File

@ -0,0 +1,13 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,12 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
hostMetrics:
enabled: true

View File

@ -0,0 +1,86 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

Some files were not shown because too many files have changed in this diff Show More