feat: latest telemetry updates, logging version bump

This commit is contained in:
Stefan Reimer 2024-10-28 14:15:54 +00:00
parent 5ff5e42cc2
commit b8f1991095
164 changed files with 8184 additions and 3140 deletions

View File

@ -117,7 +117,7 @@ spec:
containers: containers:
- name: aws-iam-authenticator - name: aws-iam-authenticator
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.22 image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.27
args: args:
- server - server
- --backend-mode=CRD,MountedFile - --backend-mode=CRD,MountedFile

View File

@ -1,6 +1,6 @@
# kubezero-logging # kubezero-logging
![Version: 0.8.12](https://img.shields.io/badge/Version-0.8.12-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square) ![Version: 0.8.13](https://img.shields.io/badge/Version-0.8.13-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack KubeZero Umbrella Chart for complete EFK stack
@ -19,7 +19,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.46.2 | | https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 | | https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
## Changes from upstream ## Changes from upstream

View File

@ -1,9 +1,9 @@
annotations: annotations:
artifacthub.io/changes: | artifacthub.io/changes: |
- kind: changed - kind: changed
description: "Updated _Fluent Bit_ OCI image to [v3.0.2](https://github.com/fluent/fluent-bit/releases/tag/v3.0.2)." description: "Updated Fluent Bit OCI image to v3.1.9"
apiVersion: v1 apiVersion: v1
appVersion: 3.0.2 appVersion: 3.1.9
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems. family operating systems.
home: https://fluentbit.io/ home: https://fluentbit.io/
@ -24,4 +24,4 @@ maintainers:
name: fluent-bit name: fluent-bit
sources: sources:
- https://github.com/fluent/fluent-bit/ - https://github.com/fluent/fluent-bit/
version: 0.46.2 version: 0.47.10

View File

@ -5,3 +5,4 @@ logLevel: debug
dashboards: dashboards:
enabled: true enabled: true
deterministicUid: true

View File

@ -1559,7 +1559,7 @@
}, },
"timezone": "", "timezone": "",
"title": "{{ include "fluent-bit.fullname" . }}", "title": "{{ include "fluent-bit.fullname" . }}",
"uid": "d557c8f6-cac1-445f-8ade-4c351a9076b1", "uid": {{ ternary (printf "\"%s\"" (sha1sum (printf "%s-%s" .Release.Namespace (include "fluent-bit.fullname" .)))) "null" .Values.dashboards.deterministicUid }},
"version": 7, "version": 7,
"weekStart": "" "weekStart": ""
} }

View File

@ -119,7 +119,11 @@ containers:
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.extraContainers }} {{- if .Values.extraContainers }}
{{- if kindIs "string" .Values.extraContainers }}
{{- tpl .Values.extraContainers $ | nindent 2 }}
{{- else }}
{{- toYaml .Values.extraContainers | nindent 2 }} {{- toYaml .Values.extraContainers | nindent 2 }}
{{- end -}}
{{- end }} {{- end }}
volumes: volumes:
- name: config - name: config

View File

@ -13,6 +13,7 @@ rules:
- pods - pods
{{- if .Values.rbac.nodeAccess }} {{- if .Values.rbac.nodeAccess }}
- nodes - nodes
- nodes/metrics
- nodes/proxy - nodes/proxy
{{- end }} {{- end }}
{{- if .Values.rbac.eventsAccess }} {{- if .Values.rbac.eventsAccess }}

View File

@ -17,6 +17,9 @@ spec:
{{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }} {{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }}
clusterIP: {{ .Values.service.clusterIP }} clusterIP: {{ .Values.service.clusterIP }}
{{- end }} {{- end }}
{{- if .Values.service.externalIPs }}
externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }}
{{- end }}
{{- if (eq .Values.kind "DaemonSet") }} {{- if (eq .Values.kind "DaemonSet") }}
{{- with .Values.service.internalTrafficPolicy }} {{- with .Values.service.internalTrafficPolicy }}
internalTrafficPolicy: {{ . }} internalTrafficPolicy: {{ . }}

View File

@ -101,6 +101,10 @@ service:
# prometheus.io/path: "/api/v1/metrics/prometheus" # prometheus.io/path: "/api/v1/metrics/prometheus"
# prometheus.io/port: "2020" # prometheus.io/port: "2020"
# prometheus.io/scrape: "true" # prometheus.io/scrape: "true"
externalIPs: []
# externalIPs:
# - 2.2.2.2
serviceMonitor: serviceMonitor:
enabled: false enabled: false
@ -178,6 +182,7 @@ dashboards:
labelValue: 1 labelValue: 1
annotations: {} annotations: {}
namespace: "" namespace: ""
deterministicUid: false
lifecycle: {} lifecycle: {}
# preStop: # preStop:
@ -314,11 +319,21 @@ envWithTpl: []
envFrom: [] envFrom: []
# This supports either a structured array or a templatable string
extraContainers: [] extraContainers: []
# Array mode
# extraContainers:
# - name: do-something # - name: do-something
# image: busybox # image: busybox
# command: ['do', 'something'] # command: ['do', 'something']
# String mode
# extraContainers: |-
# - name: do-something
# image: bitnami/kubectl:{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}
# command: ['kubectl', 'version']
flush: 1 flush: 1
metricsPort: 2020 metricsPort: 2020

View File

@ -9,7 +9,7 @@ FLUENT_BIT_VERSION=$(yq eval '.dependencies[] | select(.name=="fluent-bit") | .v
FLUENTD_VERSION=$(yq eval '.dependencies[] | select(.name=="fluentd") | .version' Chart.yaml) FLUENTD_VERSION=$(yq eval '.dependencies[] | select(.name=="fluentd") | .version' Chart.yaml)
# fluent-bit # fluent-bit
patch_chart fluent-bit # patch_chart fluent-bit
# FluentD # FluentD
patch_chart fluentd patch_chart fluentd

View File

@ -1,6 +1,6 @@
# kubezero-telemetry # kubezero-telemetry
![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.4.1](https://img.shields.io/badge/Version-0.4.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc. KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
@ -19,10 +19,10 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.47.1 | | https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 | | https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
| https://jaegertracing.github.io/helm-charts | jaeger | 3.1.1 | | https://jaegertracing.github.io/helm-charts | jaeger | 3.3.1 |
| https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.97.1 | | https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.108.0 |
| https://opensearch-project.github.io/helm-charts/ | data-prepper | 0.1.0 | | https://opensearch-project.github.io/helm-charts/ | data-prepper | 0.1.0 |
## Values ## Values
@ -170,19 +170,14 @@ Kubernetes: `>= 1.26.0`
| opensearch.dashboard.istio.url | string | `"telemetry-dashboard.example.com"` | | | opensearch.dashboard.istio.url | string | `"telemetry-dashboard.example.com"` | |
| opensearch.nodeSets | list | `[]` | | | opensearch.nodeSets | list | `[]` | |
| opensearch.prometheus | bool | `false` | | | opensearch.prometheus | bool | `false` | |
| opensearch.version | string | `"2.16.0"` | | | opensearch.version | string | `"2.17.0"` | |
| opentelemetry-collector.config.exporters.otlp/data-prepper.endpoint | string | `"telemetry-data-prepper:21890"` | | | opentelemetry-collector.config.exporters.otlp/data-prepper.endpoint | string | `"telemetry-data-prepper:21890"` | |
| opentelemetry-collector.config.exporters.otlp/data-prepper.tls.insecure | bool | `true` | | | opentelemetry-collector.config.exporters.otlp/data-prepper.tls.insecure | bool | `true` | |
| opentelemetry-collector.config.exporters.otlp/jaeger.endpoint | string | `"telemetry-jaeger-collector:4317"` | | | opentelemetry-collector.config.exporters.otlp/jaeger.endpoint | string | `"telemetry-jaeger-collector:4317"` | |
| opentelemetry-collector.config.exporters.otlp/jaeger.tls.insecure | bool | `true` | | | opentelemetry-collector.config.exporters.otlp/jaeger.tls.insecure | bool | `true` | |
| opentelemetry-collector.config.extensions.health_check.endpoint | string | `"${env:MY_POD_IP}:13133"` | | | opentelemetry-collector.config.extensions.health_check.endpoint | string | `"${env:MY_POD_IP}:13133"` | |
| opentelemetry-collector.config.extensions.memory_ballast | object | `{}` | |
| opentelemetry-collector.config.processors.batch | object | `{}` | |
| opentelemetry-collector.config.processors.memory_limiter | string | `nil` | |
| opentelemetry-collector.config.receivers.jaeger | string | `nil` | |
| opentelemetry-collector.config.receivers.otlp.protocols.grpc.endpoint | string | `"${env:MY_POD_IP}:4317"` | | | opentelemetry-collector.config.receivers.otlp.protocols.grpc.endpoint | string | `"${env:MY_POD_IP}:4317"` | |
| opentelemetry-collector.config.receivers.otlp.protocols.http.endpoint | string | `"${env:MY_POD_IP}:4318"` | | | opentelemetry-collector.config.receivers.otlp.protocols.http.endpoint | string | `"${env:MY_POD_IP}:4318"` | |
| opentelemetry-collector.config.receivers.zipkin | string | `nil` | |
| opentelemetry-collector.config.service.extensions[0] | string | `"health_check"` | | | opentelemetry-collector.config.service.extensions[0] | string | `"health_check"` | |
| opentelemetry-collector.config.service.extensions[1] | string | `"memory_ballast"` | | | opentelemetry-collector.config.service.extensions[1] | string | `"memory_ballast"` | |
| opentelemetry-collector.config.service.pipelines.logs | string | `nil` | | | opentelemetry-collector.config.service.pipelines.logs | string | `nil` | |

View File

@ -1,27 +0,0 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: "Updated Fluent Bit OCI image to v3.1.1."
apiVersion: v1
appVersion: 3.1.1
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems.
home: https://fluentbit.io/
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/fluentd/fluentbit/icon/fluentbit-icon-color.svg
keywords:
- logging
- fluent-bit
- fluentd
maintainers:
- email: eduardo@calyptia.com
name: edsiper
- email: naseem@transit.app
name: naseemkullah
- email: towmeykaw@gmail.com
name: Towmeykaw
- email: steve.hipwell@gmail.com
name: stevehipwell
name: fluent-bit
sources:
- https://github.com/fluent/fluent-bit/
version: 0.47.1

View File

@ -1,57 +0,0 @@
# Fluent Bit Helm chart
[Fluent Bit](https://fluentbit.io) is a fast and lightweight log processor and forwarder or Linux, OSX and BSD family operating systems.
## Installation
To add the `fluent` helm repo, run:
```sh
helm repo add fluent https://fluent.github.io/helm-charts
```
To install a release named `fluent-bit`, run:
```sh
helm install fluent-bit fluent/fluent-bit
```
## Chart values
```sh
helm show values fluent/fluent-bit
```
## Using Lua scripts
Fluent Bit allows us to build filter to modify the incoming records using custom [Lua scripts.](https://docs.fluentbit.io/manual/pipeline/filters/lua)
### How to use Lua scripts with this Chart
First, you should add your Lua scripts to `luaScripts` in values.yaml, for example:
```yaml
luaScripts:
filter_example.lua: |
function filter_name(tag, timestamp, record)
-- put your lua code here.
end
```
After that, the Lua scripts will be ready to be used as filters. So next step is to add your Fluent bit [filter](https://docs.fluentbit.io/manual/concepts/data-pipeline/filter) to `config.filters` in values.yaml, for example:
```yaml
config:
filters: |
[FILTER]
Name lua
Match <your-tag>
script /fluent-bit/scripts/filter_example.lua
call filter_name
```
Under the hood, the chart will:
- Create a configmap using `luaScripts`.
- Add a volumeMounts for each Lua scripts using the path `/fluent-bit/scripts/<script>`.
- Add the Lua script's configmap as volume to the pod.
### Note
Remember to set the `script` attribute in the filter using `/fluent-bit/scripts/`, otherwise the file will not be found by fluent bit.

View File

@ -1,7 +0,0 @@
testFramework:
enabled: true
logLevel: debug
dashboards:
enabled: true

View File

@ -1,6 +0,0 @@
Get Fluent Bit build information by running these commands:
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "fluent-bit.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 2020:2020
curl http://127.0.0.1:2020

View File

@ -1,138 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "fluent-bit.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "fluent-bit.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "fluent-bit.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "fluent-bit.labels" -}}
helm.sh/chart: {{ include "fluent-bit.chart" . }}
{{ include "fluent-bit.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "fluent-bit.selectorLabels" -}}
app.kubernetes.io/name: {{ include "fluent-bit.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "fluent-bit.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "fluent-bit.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Fluent-bit image with tag/digest
*/}}
{{- define "fluent-bit.image" -}}
{{- $tag := ternary "" (printf ":%s" (toString .tag)) (or (empty .tag) (eq "-" (toString .tag))) -}}
{{- $digest := ternary "" (printf "@%s" .digest) (empty .digest) -}}
{{- printf "%s%s%s" .repository $tag $digest -}}
{{- end -}}
{{/*
Ingress ApiVersion according k8s version
*/}}
{{- define "fluent-bit.ingress.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion) -}}
networking.k8s.io/v1
{{- else if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") (semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion) -}}
networking.k8s.io/v1beta1
{{- else -}}
extensions/v1beta1
{{- end }}
{{- end }}
{{/*
Return if ingress is stable.
*/}}
{{- define "fluent-bit.ingress.isStable" -}}
{{- eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1" -}}
{{- end -}}
{{/*
Return if ingress supports ingressClassName.
*/}}
{{- define "fluent-bit.ingress.supportsIngressClassName" -}}
{{- or (eq (include "fluent-bit.ingress.isStable" .) "true") (and (eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "fluent-bit.ingress.supportsPathType" -}}
{{- or (eq (include "fluent-bit.ingress.isStable" .) "true") (and (eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}
{{/*
Pdb apiVersion according k8s version and capabilities
*/}}
{{- define "fluent-bit.pdb.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion) -}}
policy/v1
{{- else -}}
policy/v1beta1
{{- end }}
{{- end -}}
{{/*
HPA ApiVersion according k8s version
Check legacy first so helm template / kustomize will default to latest version
*/}}
{{- define "fluent-bit.hpa.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "autoscaling/v2beta2") (semverCompare "<1.23-0" .Capabilities.KubeVersion.GitVersion) -}}
autoscaling/v2beta2
{{- else -}}
autoscaling/v2
{{- end -}}
{{- end -}}
{{/*
Create the name of OpenShift SecurityContextConstraints to use
*/}}
{{- define "fluent-bit.openShiftSccName" -}}
{{- if not .Values.openShift.securityContextConstraints.create -}}
{{- printf "%s" .Values.openShift.securityContextConstraints.existingName -}}
{{- else -}}
{{- printf "%s" (default (include "fluent-bit.fullname" .) .Values.openShift.securityContextConstraints.name) -}}
{{- end -}}
{{- end -}}

View File

@ -1,155 +0,0 @@
{{- define "fluent-bit.pod" -}}
serviceAccountName: {{ include "fluent-bit.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ . }}
{{- end }}
hostNetwork: {{ .Values.hostNetwork }}
dnsPolicy: {{ .Values.dnsPolicy }}
{{- with .Values.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.hostAliases }}
hostAliases:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.initContainers }}
initContainers:
{{- if kindIs "string" . }}
{{- tpl . $ | nindent 2 }}
{{- else }}
{{- toYaml . | nindent 2 }}
{{- end -}}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 6 }}
{{- end }}
image: {{ include "fluent-bit.image" (merge .Values.image (dict "tag" (default .Chart.AppVersion .Values.image.tag))) | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if or .Values.env .Values.envWithTpl }}
env:
{{- with .Values.env }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- range $item := .Values.envWithTpl }}
- name: {{ $item.name }}
value: {{ tpl $item.value $ | quote }}
{{- end }}
{{- end }}
{{- if .Values.envFrom }}
envFrom:
{{- toYaml .Values.envFrom | nindent 6 }}
{{- end }}
{{- with .Values.command }}
command:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- if or .Values.args .Values.hotReload.enabled }}
args:
{{- toYaml .Values.args | nindent 6 }}
{{- if .Values.hotReload.enabled }}
- --enable-hot-reload
{{- end }}
{{- end}}
ports:
- name: http
containerPort: {{ .Values.metricsPort }}
protocol: TCP
{{- if .Values.extraPorts }}
{{- range .Values.extraPorts }}
- name: {{ .name }}
containerPort: {{ .containerPort }}
protocol: {{ .protocol }}
{{- end }}
{{- end }}
{{- with .Values.lifecycle }}
lifecycle:
{{- toYaml . | nindent 6 }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 6 }}
readinessProbe:
{{- toYaml .Values.readinessProbe | nindent 6 }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
volumeMounts:
- name: config
mountPath: /fluent-bit/etc/conf
{{- if or .Values.luaScripts .Values.hotReload.enabled }}
- name: luascripts
mountPath: /fluent-bit/scripts
{{- end }}
{{- if eq .Values.kind "DaemonSet" }}
{{- toYaml .Values.daemonSetVolumeMounts | nindent 6 }}
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 6 }}
{{- end }}
{{- if .Values.hotReload.enabled }}
- name: reloader
image: {{ include "fluent-bit.image" .Values.hotReload.image }}
args:
- {{ printf "-webhook-url=http://localhost:%s/api/v2/reload" (toString .Values.metricsPort) }}
- -volume-dir=/watch/config
- -volume-dir=/watch/scripts
volumeMounts:
- name: config
mountPath: /watch/config
- name: luascripts
mountPath: /watch/scripts
{{- with .Values.hotReload.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.extraContainers }}
{{- if kindIs "string" .Values.extraContainers }}
{{- tpl .Values.extraContainers $ | nindent 2 }}
{{- else }}
{{- toYaml .Values.extraContainers | nindent 2 }}
{{- end -}}
{{- end }}
volumes:
- name: config
configMap:
name: {{ default (include "fluent-bit.fullname" .) .Values.existingConfigMap }}
{{- if or .Values.luaScripts .Values.hotReload.enabled }}
- name: luascripts
configMap:
name: {{ include "fluent-bit.fullname" . }}-luascripts
{{- end }}
{{- if eq .Values.kind "DaemonSet" }}
{{- toYaml .Values.daemonSetVolumes | nindent 2 }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 2 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end -}}

View File

@ -1,45 +0,0 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
{{- if .Values.rbac.nodeAccess }}
- nodes
- nodes/proxy
{{- end }}
{{- if .Values.rbac.eventsAccess }}
- events
{{- end }}
verbs:
- get
- list
- watch
{{- if and .Values.podSecurityPolicy.create (semverCompare "<=1.25-0" .Capabilities.KubeVersion.GitVersion) }}
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ include "fluent-bit.fullname" . }}
verbs:
- use
{{- end }}
{{- if .Values.openShift.enabled }}
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
resourceNames:
- {{ include "fluent-bit.openShiftSccName" . }}
verbs:
- use
{{- end }}
{{- end -}}

View File

@ -1,16 +0,0 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "fluent-bit.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "fluent-bit.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@ -1,21 +0,0 @@
{{- if .Values.dashboards.enabled -}}
{{- range $path, $_ := .Files.Glob "dashboards/*.json" }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" $ }}-dashboard-{{ trimSuffix ".json" (base $path) }}
namespace: {{ default $.Release.Namespace $.Values.dashboards.namespace }}
{{- with $.Values.dashboards.annotations }}
annotations:
{{- toYaml . | nindent 4 -}}
{{- end }}
labels:
{{- include "fluent-bit.labels" $ | nindent 4 }}
{{ $.Values.dashboards.labelKey }}: {{ $.Values.dashboards.labelValue | quote }}
data:
{{ include "fluent-bit.fullname" $ }}-{{ base $path }}: |
{{- tpl ($.Files.Get $path) $ | nindent 4 }}
---
{{- end }}
{{- end -}}

View File

@ -1,13 +0,0 @@
{{- if or .Values.luaScripts .Values.hotReload.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" . }}-luascripts
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
data:
{{ range $key, $value := .Values.luaScripts }}
{{ $key }}: {{ $value | quote }}
{{ end }}
{{- end -}}

View File

@ -1,25 +0,0 @@
{{- if not .Values.existingConfigMap -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
data:
custom_parsers.conf: |
{{- (tpl .Values.config.customParsers $) | nindent 4 }}
fluent-bit.conf: |
{{- (tpl .Values.config.service $) | nindent 4 }}
{{- (tpl .Values.config.inputs $) | nindent 4 }}
{{- (tpl .Values.config.filters $) | nindent 4 }}
{{- (tpl .Values.config.outputs $) | nindent 4 }}
{{- range $key, $val := .Values.config.upstream }}
{{ $key }}: |
{{- (tpl $val $) | nindent 4 }}
{{- end }}
{{- range $key, $val := .Values.config.extraFiles }}
{{ $key }}: |
{{- (tpl $val $) | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -1,48 +0,0 @@
{{- if eq .Values.kind "DaemonSet" }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.minReadySeconds }}
minReadySeconds: {{ . }}
{{- end }}
template:
metadata:
labels:
{{- include "fluent-bit.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or (not .Values.hotReload.enabled) .Values.podAnnotations }}
annotations:
{{- if not .Values.hotReload.enabled }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.luaScripts }}
checksum/luascripts: {{ include (print $.Template.BasePath "/configmap-luascripts.yaml") . | sha256sum }}
{{- end }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- include "fluent-bit.pod" . | nindent 6 }}
{{- end }}

View File

@ -1,51 +0,0 @@
{{- if eq .Values.kind "Deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
{{- with .Values.updateStrategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.minReadySeconds }}
minReadySeconds: {{ . }}
{{- end }}
template:
metadata:
labels:
{{- include "fluent-bit.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or (not .Values.hotReload.enabled) .Values.podAnnotations }}
annotations:
{{- if not .Values.hotReload.enabled }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.luaScripts }}
checksum/luascripts: {{ include (print $.Template.BasePath "/configmap-luascripts.yaml") . | sha256sum }}
{{- end }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- include "fluent-bit.pod" . | nindent 6 }}
{{- end }}

View File

@ -1,65 +0,0 @@
{{- $ingressApiIsStable := eq (include "fluent-bit.ingress.isStable" .) "true" -}}
{{- $ingressSupportsIngressClassName := eq (include "fluent-bit.ingress.supportsIngressClassName" .) "true" -}}
{{- $ingressSupportsPathType := eq (include "fluent-bit.ingress.supportsPathType" .) "true" -}}
{{- $fullName := include "fluent-bit.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and ( eq .Values.kind "Deployment" ) .Values.ingress.enabled }}
apiVersion: {{ include "fluent-bit.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- range $key, $value := . }}
{{ printf "%s: %s" $key ((tpl $value $) | quote) }}
{{- end }}
{{- end }}
spec:
{{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end -}}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
{{- with .secretName }}
secretName: {{ . }}
{{- end }}
{{- end }}
{{- end }}
rules:
{{- range concat .Values.ingress.hosts .Values.ingress.extraHosts }}
- host: {{ .host | quote }}
http:
paths:
- path: /
{{- if $ingressSupportsPathType }}
pathType: Prefix
{{- end }}
backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}
port:
{{- if .port }}
number: {{ .port }}
{{- else }}
number: {{ $svcPort }}
{{- end }}
{{- else }}
serviceName: {{ $fullName }}
{{- if .port }}
servicePort: {{ .port }}
{{- else }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,23 +0,0 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: "networking.k8s.io/v1"
kind: "NetworkPolicy"
metadata:
name: {{ include "fluent-bit.fullname" . | quote }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
spec:
policyTypes:
- "Ingress"
podSelector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
ingress:
{{- with .Values.networkPolicy.ingress }}
- from:
{{- with .from }}{{- . | toYaml | nindent 8 }}{{- else }} []{{- end }}
ports:
- protocol: "TCP"
port: {{ $.Values.service.port }}
{{- end }}
{{- end }}

View File

@ -1,21 +0,0 @@
{{- if and ( eq .Values.kind "Deployment" ) .Values.podDisruptionBudget.enabled }}
apiVersion: {{ include "fluent-bit.pdb.apiVersion" . }}
kind: PodDisruptionBudget
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.podDisruptionBudget.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.prometheusRule.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ default $.Release.Namespace .Values.prometheusRule.namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- if .Values.prometheusRule.additionalLabels }}
{{- toYaml .Values.prometheusRule.additionalLabels | nindent 4 }}
{{- end }}
spec:
{{- if .Values.prometheusRule.rules }}
groups:
- name: {{ template "fluent-bit.name" . }}
rules: {{- toYaml .Values.prometheusRule.rules | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,42 +0,0 @@
{{- if and .Values.podSecurityPolicy.create (semverCompare "<=1.25-0" .Capabilities.KubeVersion.GitVersion) -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "fluent-bit.fullname" . }}
{{- if .Values.podSecurityPolicy.annotations }}
annotations:
{{- toYaml .Values.podSecurityPolicy.annotations | nindent 4 }}
{{- end }}
spec:
privileged: false
# Required to prevent escalations to root.
allowPrivilegeEscalation: false
# This is redundant with non-root + disallow privilege escalation,
# but we can provide it for defense in depth.
requiredDropCapabilities:
- ALL
volumes:
- '*'
hostNetwork: {{ .Values.hostNetwork }}
hostIPC: false
hostPID: false
runAsUser:
# TODO: Require the container to run without root privileges.
rule: 'RunAsAny'
seLinux:
# This policy assumes the nodes are using AppArmor rather than SELinux.
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
readOnlyRootFilesystem: false
{{- end }}

View File

@ -1,41 +0,0 @@
{{- if and .Values.openShift.enabled .Values.openShift.securityContextConstraints.create }}
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: {{ include "fluent-bit.openShiftSccName" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.openShift.securityContextConstraints.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
allowPrivilegedContainer: true
allowPrivilegeEscalation: true
allowHostDirVolumePlugin: true
defaultAllowPrivilegeEscalation: false
# forbid host namespaces
allowHostNetwork: false
allowHostIPC: false
allowHostPorts: false
allowHostPID: false
allowedCapabilities: []
forbiddenSysctls:
- "*"
readOnlyRootFilesystem: false
requiredDropCapabilities:
- MKNOD
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
supplementalGroups:
type: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- hostPath
- persistentVolumeClaim
- projected
- secret
{{- end }}

View File

@ -1,57 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }}
{{- end }}
{{- if (eq .Values.kind "DaemonSet") }}
{{- with .Values.service.internalTrafficPolicy }}
internalTrafficPolicy: {{ . }}
{{- end }}
{{- end }}
{{- if (eq .Values.service.type "LoadBalancer")}}
{{- with .Values.service.loadBalancerClass}}
loadBalancerClass: {{ . }}
{{- end }}
{{- with .Values.service.loadBalancerSourceRanges}}
loadBalancerSourceRanges:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if and (eq .Values.service.type "NodePort") (.Values.service.nodePort) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
{{- if .Values.extraPorts }}
{{- range .Values.extraPorts }}
- name: {{ .name }}
targetPort: {{ .name }}
protocol: {{ .protocol }}
port: {{ .port }}
{{- if and (eq $.Values.service.type "NodePort") (.nodePort) }}
nodePort: {{ .nodePort }}
{{- end }}
{{- end }}
{{- end }}
selector:
{{- include "fluent-bit.selectorLabels" . | nindent 4 }}

View File

@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "fluent-bit.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -1,51 +0,0 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "fluent-bit.fullname" . }}
namespace: {{ default .Release.Namespace .Values.serviceMonitor.namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.selector }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
jobLabel: app.kubernetes.io/instance
endpoints:
- port: http
path: {{ default "/api/v2/metrics/prometheus" .Values.serviceMonitor.path }}
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- if kindIs "string" . }}
{{- tpl . $ | nindent 8 }}
{{- else }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.serviceMonitor.scheme }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- end }}
{{- if .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml .Values.serviceMonitor.tlsConfig | nindent 8 }}
{{- end }}
{{- with .Values.serviceMonitor.additionalEndpoints }}
{{- toYaml . | nindent 4 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -1,26 +0,0 @@
{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "fluent-bit.fullname" . }}-test-connection"
namespace: {{ default .Release.Namespace .Values.testFramework.namespace }}
labels:
helm.sh/chart: {{ include "fluent-bit.chart" . }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
annotations:
helm.sh/hook: test
helm.sh/hook-delete-policy: hook-succeeded
spec:
containers:
- name: wget
image: {{ include "fluent-bit.image" .Values.testFramework.image | quote }}
imagePullPolicy: {{ .Values.testFramework.image.pullPolicy }}
command: ["sh"]
args: ["-c", "wget -O- {{ include "fluent-bit.fullname" . }}:{{ .Values.service.port }}"]
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 4 }}
{{- end }}
restartPolicy: Never
{{- end }}

View File

@ -1,39 +0,0 @@
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.autoscaling.vpa.enabled }}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.autoscaling.vpa.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
resourcePolicy:
containerPolicies:
- containerName: {{ .Chart.Name }}
{{- with .Values.autoscaling.vpa.controlledResources }}
controlledResources:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.autoscaling.vpa.maxAllowed }}
maxAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.autoscaling.vpa.minAllowed }}
minAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
targetRef:
apiVersion: apps/v1
kind: {{ .Values.kind }}
name: {{ include "fluent-bit.fullname" . }}
{{- if .Values.autoscaling.vpa.updatePolicy }}
updatePolicy:
{{- with .Values.autoscaling.vpa.updatePolicy.updateMode }}
updateMode: {{ . }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,511 +0,0 @@
# Default values for fluent-bit.
# kind -- DaemonSet or Deployment
kind: DaemonSet
# replicaCount -- Only applicable if kind=Deployment
replicaCount: 1
image:
repository: cr.fluentbit.io/fluent/fluent-bit
# Overrides the image tag whose default is {{ .Chart.AppVersion }}
# Set to "-" to not use the default value
tag:
digest:
pullPolicy: IfNotPresent
testFramework:
enabled: true
namespace:
image:
repository: busybox
pullPolicy: Always
tag: latest
digest:
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
create: true
annotations: {}
name:
rbac:
create: true
nodeAccess: false
eventsAccess: false
# Configure podsecuritypolicy
# Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
# from Kubernetes 1.25, PSP is deprecated
# See: https://kubernetes.io/blog/2022/08/23/kubernetes-v1-25-release/#pod-security-changes
# We automatically disable PSP if Kubernetes version is 1.25 or higher
podSecurityPolicy:
create: false
annotations: {}
# OpenShift-specific configuration
openShift:
enabled: false
securityContextConstraints:
# Create SCC for Fluent-bit and allow use it
create: true
name: ""
annotations: {}
# Use existing SCC in cluster, rather then create new one
existingName: ""
podSecurityContext: {}
# fsGroup: 2000
hostNetwork: false
dnsPolicy: ClusterFirst
dnsConfig: {}
# nameservers:
# - 1.2.3.4
# searches:
# - ns1.svc.cluster-domain.example
# - my.dns.search.suffix
# options:
# - name: ndots
# value: "2"
# - name: edns0
hostAliases: []
# - ip: "1.2.3.4"
# hostnames:
# - "foo.local"
# - "bar.local"
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 2020
internalTrafficPolicy:
loadBalancerClass:
loadBalancerSourceRanges: []
labels: {}
# nodePort: 30020
# clusterIP: 172.16.10.1
annotations: {}
# prometheus.io/path: "/api/v1/metrics/prometheus"
# prometheus.io/port: "2020"
# prometheus.io/scrape: "true"
externalIPs: []
# externalIPs:
# - 2.2.2.2
serviceMonitor:
enabled: false
# namespace: monitoring
# interval: 10s
# scrapeTimeout: 10s
# selector:
# prometheus: my-prometheus
# ## metric relabel configs to apply to samples before ingestion.
# ##
# metricRelabelings:
# - sourceLabels: [__meta_kubernetes_service_label_cluster]
# targetLabel: cluster
# regex: (.*)
# replacement: ${1}
# action: replace
# ## relabel configs to apply to samples after ingestion.
# ##
# relabelings:
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
# scheme: ""
# tlsConfig: {}
## Bear in mind if you want to collect metrics from a different port
## you will need to configure the new ports on the extraPorts property.
additionalEndpoints: []
# - port: metrics
# path: /metrics
# interval: 10s
# scrapeTimeout: 10s
# scheme: ""
# tlsConfig: {}
# # metric relabel configs to apply to samples before ingestion.
# #
# metricRelabelings:
# - sourceLabels: [__meta_kubernetes_service_label_cluster]
# targetLabel: cluster
# regex: (.*)
# replacement: ${1}
# action: replace
# # relabel configs to apply to samples after ingestion.
# #
# relabelings:
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
prometheusRule:
enabled: false
# namespace: ""
# additionalLabels: {}
# rules:
# - alert: NoOutputBytesProcessed
# expr: rate(fluentbit_output_proc_bytes_total[5m]) == 0
# annotations:
# message: |
# Fluent Bit instance {{ $labels.instance }}'s output plugin {{ $labels.name }} has not processed any
# bytes for at least 15 minutes.
# summary: No Output Bytes Processed
# for: 15m
# labels:
# severity: critical
dashboards:
enabled: false
labelKey: grafana_dashboard
labelValue: 1
annotations: {}
namespace: ""
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "sleep 20"]
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /api/v1/health
port: http
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## only available if kind is Deployment
ingress:
enabled: false
ingressClassName: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts: []
# - host: fluent-bit.example.tld
extraHosts: []
# - host: fluent-bit-extra.example.tld
## specify extraPort number
# port: 5170
tls: []
# - secretName: fluent-bit-example-tld
# hosts:
# - fluent-bit.example.tld
## only available if kind is Deployment
autoscaling:
vpa:
enabled: false
annotations: {}
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
controlledResources: []
# Define the max allowed resources for the pod
maxAllowed: {}
# cpu: 200m
# memory: 100Mi
# Define the min allowed resources for the pod
minAllowed: {}
# cpu: 200m
# memory: 100Mi
updatePolicy:
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
updateMode: Auto
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 75
# targetMemoryUtilizationPercentage: 75
## see https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics
customRules: []
# - type: Pods
# pods:
# metric:
# name: packets-per-second
# target:
# type: AverageValue
# averageValue: 1k
## see https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior
behavior: {}
# scaleDown:
# policies:
# - type: Pods
# value: 4
# periodSeconds: 60
# - type: Percent
# value: 10
# periodSeconds: 60
## only available if kind is Deployment
podDisruptionBudget:
enabled: false
annotations: {}
maxUnavailable: "30%"
nodeSelector: {}
tolerations: []
affinity: {}
labels: {}
annotations: {}
podAnnotations: {}
podLabels: {}
## How long (in seconds) a pods needs to be stable before progressing the deployment
##
minReadySeconds:
## How long (in seconds) a pod may take to exit (useful with lifecycle hooks to ensure lb deregistration is done)
##
terminationGracePeriodSeconds:
priorityClassName: ""
env: []
# - name: FOO
# value: "bar"
# The envWithTpl array below has the same usage as "env", but is using the tpl function to support templatable string.
# This can be useful when you want to pass dynamic values to the Chart using the helm argument "--set <variable>=<value>"
# https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function
envWithTpl: []
# - name: FOO_2
# value: "{{ .Values.foo2 }}"
#
# foo2: bar2
envFrom: []
# This supports either a structured array or a templatable string
extraContainers: []
# Array mode
# extraContainers:
# - name: do-something
# image: busybox
# command: ['do', 'something']
# String mode
# extraContainers: |-
# - name: do-something
# image: bitnami/kubectl:{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}
# command: ['kubectl', 'version']
flush: 1
metricsPort: 2020
extraPorts: []
# - port: 5170
# containerPort: 5170
# protocol: TCP
# name: tcp
# nodePort: 30517
extraVolumes: []
extraVolumeMounts: []
updateStrategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxUnavailable: 1
# Make use of a pre-defined configmap instead of the one templated here
existingConfigMap: ""
networkPolicy:
enabled: false
# ingress:
# from: []
luaScripts: {}
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file
config:
service: |
[SERVICE]
Daemon Off
Flush {{ .Values.flush }}
Log_Level {{ .Values.logLevel }}
Parsers_File /fluent-bit/etc/parsers.conf
Parsers_File /fluent-bit/etc/conf/custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port {{ .Values.metricsPort }}
Health_Check On
## https://docs.fluentbit.io/manual/pipeline/inputs
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
multiline.parser docker, cri
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
[INPUT]
Name systemd
Tag host.*
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Read_From_Tail On
## https://docs.fluentbit.io/manual/pipeline/filters
filters: |
[FILTER]
Name kubernetes
Match kube.*
Merge_Log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
## https://docs.fluentbit.io/manual/pipeline/outputs
outputs: |
[OUTPUT]
Name es
Match kube.*
Host elasticsearch-master
Logstash_Format On
Retry_Limit False
[OUTPUT]
Name es
Match host.*
Host elasticsearch-master
Logstash_Format On
Logstash_Prefix node
Retry_Limit False
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/upstream-servers
## This configuration is deprecated, please use `extraFiles` instead.
upstream: {}
## https://docs.fluentbit.io/manual/pipeline/parsers
customParsers: |
[PARSER]
Name docker_no_time
Format json
Time_Keep Off
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
# This allows adding more files with arbitrary filenames to /fluent-bit/etc/conf by providing key/value pairs.
# The key becomes the filename, the value becomes the file content.
extraFiles: {}
# upstream.conf: |
# [UPSTREAM]
# upstream1
#
# [NODE]
# name node-1
# host 127.0.0.1
# port 43000
# example.conf: |
# [OUTPUT]
# Name example
# Match foo.*
# Host bar
# The config volume is mounted by default, either to the existingConfigMap value, or the default of "fluent-bit.fullname"
volumeMounts:
- name: config
mountPath: /fluent-bit/etc/conf
daemonSetVolumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: etcmachineid
hostPath:
path: /etc/machine-id
type: File
daemonSetVolumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: etcmachineid
mountPath: /etc/machine-id
readOnly: true
command:
- /fluent-bit/bin/fluent-bit
args:
- --workdir=/fluent-bit/etc
- --config=/fluent-bit/etc/conf/fluent-bit.conf
# This supports either a structured array or a templatable string
initContainers: []
# Array mode
# initContainers:
# - name: do-something
# image: bitnami/kubectl:1.22
# command: ['kubectl', 'version']
# String mode
# initContainers: |-
# - name: do-something
# image: bitnami/kubectl:{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}
# command: ['kubectl', 'version']
logLevel: info
hotReload:
enabled: false
image:
repository: ghcr.io/jimmidyson/configmap-reload
tag: v0.11.1
digest:
pullPolicy: IfNotPresent
resources: {}

View File

@ -21,3 +21,7 @@
.idea/ .idea/
*.tmproj *.tmproj
.vscode/ .vscode/
# Ignore unittest
tests/
*/__snapshot__/*

View File

@ -0,0 +1,12 @@
# Collector Chart Contributing Guide
All changes to the chart require a bump to the version in `chart.yaml`. See the [Contributing Guide](https://github.com/open-telemetry/opentelemetry-helm-charts/blob/main/CONTRIBUTING.md#versioning) for our versioning requirements.
Once the chart version is bumped, the examples must be regenerated. You can regenerate examples by running `make generate-examples CHARTS=opentelemetry-collector`.
## Bumping Default Collector Version
1. Increase the minor version of the chart by one and set the patch version to zero.
2. Update the chart's `appVersion` to match the new collector version. This version will be used as the image tag by default.
3. Review the corresponding release notes in [Collector Core](https://github.com/open-telemetry/opentelemetry-collector/releases), [Collector Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases), and [Collector Releases](https://github.com/open-telemetry/opentelemetry-collector-releases/releases). If any changes affect the helm charts, adjust the helm chart accordingly.
4. Run `make generate-examples CHARTS=opentelemetry-collector`.

View File

@ -0,0 +1,15 @@
apiVersion: v2
appVersion: 0.111.0
description: OpenTelemetry Collector Helm chart for Kubernetes
home: https://opentelemetry.io/
icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png
maintainers:
- name: dmitryax
- name: jaronoff97
- name: TylerHelmuth
name: opentelemetry-collector
sources:
- https://github.com/open-telemetry/opentelemetry-collector
- https://github.com/open-telemetry/opentelemetry-collector-contrib
type: application
version: 0.108.0

View File

@ -0,0 +1,251 @@
# OpenTelemetry Collector Helm Chart
The helm chart installs [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector)
in kubernetes cluster.
## Prerequisites
- Kubernetes 1.24+
- Helm 3.9+
## Installing the Chart
Add OpenTelemetry Helm repository:
```console
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
```
To install the chart with the release name my-opentelemetry-collector, run the following command:
```console
helm install my-opentelemetry-collector open-telemetry/opentelemetry-collector --set mode=<value> --set image.repository="otel/opentelemetry-collector-k8s" --set command.name="otelcol-k8s"
```
Where the `mode` value needs to be set to one of `daemonset`, `deployment` or `statefulset`.
For an in-depth walk through getting started in Kubernetes using this helm chart, see [OpenTelemetry Kubernetes Getting Started](https://opentelemetry.io/docs/kubernetes/getting-started/).
## Upgrading
See [UPGRADING.md](UPGRADING.md).
## Security Considerations
OpenTelemetry Collector recommends to bind receivers' servers to addresses that limit connections to authorized users.
For this reason, by default the chart binds all the Collector's endpoints to the pod's IP.
More info is available in the [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks)
Some care must be taken when using `hostNetwork: true`, as then OpenTelemetry Collector will listen on all the addresses in the host network namespace.
## Configuration
### Default configuration
By default this chart will deploy an OpenTelemetry Collector with three pipelines (logs, metrics and traces)
and debug exporter enabled by default. The collector can be installed either as daemonset (agent), deployment or stateful set.
*Example*: Install collector as a deployment.
```yaml
mode: deployment
```
By default collector has the following receivers enabled:
- **metrics**: OTLP and prometheus. Prometheus is configured only for scraping collector's own metrics.
- **traces**: OTLP, zipkin and jaeger (thrift and grpc).
- **logs**: OTLP (to enable container logs, see [Configuration for Kubernetes container logs](#configuration-for-kubernetes-container-logs)).
### Basic Top Level Configuration
The Collector's configuration is set via the `config` section. Default components can be removed with `null`. Remember that lists in helm are not merged, so if you want to modify any default list you must specify all items, including any default items you want to keep.
*Example*: Disable metrics and logs pipelines and non-otlp receivers:
```yaml
config:
receivers:
jaeger: null
prometheus: null
zipkin: null
service:
pipelines:
traces:
receivers:
- otlp
metrics: null
logs: null
```
The chart also provides several presets, detailed below, to help configure important Kubernetes components. For more details on each component, see [Kubernetes Collector Components](https://opentelemetry.io/docs/kubernetes/collector/components/).
### Configuration for Kubernetes Container Logs
The collector can be used to collect logs sent to standard output by Kubernetes containers.
This feature is disabled by default. It has the following requirements:
- It needs agent collector to be deployed.
- It requires the [Filelog receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#filelog-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
To enable this feature, set the `presets.logsCollection.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: daemonset
presets:
logsCollection:
enabled: true
includeCollectorLogs: true
```
The way this feature works is it adds a `filelog` receiver on the `logs` pipeline. This receiver is preconfigured
to read the files where Kubernetes container runtime writes all containers' console output to.
#### :warning: Warning: Risk of looping the exported logs back into the receiver, causing "log explosion"
The container logs pipeline uses the `debug` exporter by default.
Paired with the default `filelog` receiver that receives all containers' console output,
it is easy to accidentally feed the exported logs back into the receiver.
Also note that using the `--verbosity=detailed` option for the `debug` exporter causes it to output
multiple lines per single received log, which when looped, would amplify the logs exponentially.
To prevent the looping, the default configuration of the receiver excludes logs from the collector's containers.
If you want to include the collector's logs, make sure to replace the `debug` exporter
with an exporter that does not send logs to collector's standard output.
Here's an example `values.yaml` file that replaces the default `debug` exporter on the `logs` pipeline
with an `otlphttp` exporter that sends the container logs to `https://example.com:55681` endpoint.
It also clears the `filelog` receiver's `exclude` property, for collector logs to be included in the pipeline.
```yaml
mode: daemonset
presets:
logsCollection:
enabled: true
includeCollectorLogs: true
config:
exporters:
otlphttp:
endpoint: https://example.com:55681
service:
pipelines:
logs:
exporters:
- otlphttp
```
### Configuration for Kubernetes Attributes Processor
The collector can be configured to add Kubernetes metadata, such as pod name and namespace name, as resource attributes to incoming logs, metrics and traces.
This feature is disabled by default. It has the following requirements:
- It requires the [Kubernetes Attributes processor](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
To enable this feature, set the `presets.kubernetesAttributes.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: daemonset
presets:
kubernetesAttributes:
enabled: true
# You can also configure the preset to add all of the associated pod's labels and annotations to you telemetry.
# The label/annotation name will become the resource attribute's key.
extractAllPodLabels: true
extractAllPodAnnotations: true
```
### Configuration for Retrieving Kubelet Metrics
The collector can be configured to collect node, pod, and container metrics from the API server on a kubelet.
This feature is disabled by default. It has the following requirements:
- It requires the [Kubeletstats receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#kubeletstats-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
To enable this feature, set the `presets.kubeletMetrics.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: daemonset
presets:
kubeletMetrics:
enabled: true
```
### Configuration for Kubernetes Cluster Metrics
The collector can be configured to collects cluster-level metrics from the Kubernetes API server. A single instance of this receiver can be used to monitor a cluster.
This feature is disabled by default. It has the following requirements:
- It requires the [Kubernetes Cluster receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-cluster-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
- It requires statefulset or deployment mode with a single replica.
To enable this feature, set the `presets.clusterMetrics.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: deployment
replicaCount: 1
presets:
clusterMetrics:
enabled: true
```
### Configuration for Retrieving Kubernetes Events
The collector can be configured to collect Kubernetes events.
This feature is disabled by default. It has the following requirements:
- It requires [Kubernetes Objects receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-objects-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
To enable this feature, set the `presets.kubernetesEvents.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: deployment
replicaCount: 1
presets:
kubernetesEvents:
enabled: true
```
### Configuration for Host Metrics
The collector can be configured to collect host metrics for Kubernetes nodes.
This feature is disabled by default. It has the following requirements:
- It requires [Host Metrics receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#host-metrics-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
To enable this feature, set the `presets.hostMetrics.enabled` property to `true`.
Here is an example `values.yaml`:
```yaml
mode: daemonset
presets:
hostMetrics:
enabled: true
```
## CRDs
At this time, Prometheus CRDs are supported but other CRDs are not.
### Other configuration options
The [values.yaml](./values.yaml) file contains information about all other configuration
options for this chart.
For more examples see [Examples](examples).

View File

@ -0,0 +1,419 @@
# Upgrade guidelines
These upgrade guidelines only contain instructions for version upgrades which require manual modifications on the user's side.
If the version you want to upgrade to is not listed here, then there is nothing to do for you.
Just upgrade and enjoy.
## 0.97.2 to 0.98.0
> [!WARNING]
> Critical content demanding immediate user attention due to potential risks.
The deprecated memory ballast extension has been removed from the default config. If you depend on this component you must manually configure `config.extensions` and `config.service.extensions` to include the memory ballast extension. Setting `useGOMEMLIMIT` to `false` will no longer keep the memory ballast extension in the rendered collector config.
## 0.88.0 to 0.89.0
> [!WARNING]
> Critical content demanding immediate user attention due to potential risks.
As part of working towards using the [OpenTelemetry Collector Kubernetes Distro](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) by default, the chart now requires users to explicitly set an image repository. If you are already explicitly setting an image repository this breaking change does not affect you.
If you are using a OpenTelemetry Community distribution of the Collector we recommend you use `otel/opentelemetry-collector-k8s`, but carefully review the [components included in this distribution](https://github.com/open-telemetry/opentelemetry-collector-releases/blob/main/distributions/otelcol-k8s/manifest.yaml) to make sure it includes all the components you use in your configuration. In the future this distribution will become the default image used for the chart.
You can use the OpenTelemetry Collector Kubernetes Distro by adding these lines to your values.yaml:
```yaml
image:
repository: "otel/opentelemetry-collector-k8s"
```
If you want to stick with using the Contrib distribution, add these lines to your values.yaml:
```yaml
image:
repository: "otel/opentelemetry-collector-contrib"
```
For more details see [#1135](https://github.com/open-telemetry/opentelemetry-helm-charts/issues/1135).
## 0.84.0 to 0.85.0
The `loggingexporter` has been removed from the default configuration. Use the `debugexporter` instead.
## 0.78.2 to 0.78.3
[Update Health Check Extension's endpoints to use Pod IP Instead of 0.0.0.0](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/1012)
The [Collector's security guidelines were updated](https://github.com/open-telemetry/opentelemetry-collector/pull/6959) to include containerized environments when discussing safeguards against denial of service attacks.
To be in compliance with the Collector's security best practices the chart has been updated to use the Collector's pod IP in place of `0.0.0.0`.
The chart will continue to allow complete configuration of the Collector via the `config` field in the values.yaml. If pod IP does not suite your needs you can use `config` to set something different.
See [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks) for more details.
## 0.75.1 to 0.76.0
Enable the `useGOMEMLIMIT` feature flag by default. This means by default the chart now does not use the Memory Ballast Extension and any custom configuraiton applied to the Memory Ballast Extension is ignored.
**If you're still interested in using the Memory Ballast Extension set this back to false.**
## 0.69.3 to 0.70.0
The following deprecated fields have been removed. Please use the new values:
- `extraConfigMapMounts` -> `extraVolumes`
- `extraHostPathMounts` -> `extraVolumes`
- `secretMounts` -> `extraVolumes`
- `containerLogs` -> `presets.logsCollection`
## 0.69.0 to 0.69.1 & 0.69.2
The `loggingexporter` was replaced with the `debugexporter`. This ended up being an accidental breaking change for any user that depended on the default logging exporter config when explicitly listing the logging exporter in an exporter list.
When using versions `0.69.1` or `0.69.2` you should explicitly list the debugging exporter instead of the logging exporter. You other option is to skip these version and use `0.69.3` or newer, which includes the logging exporter configuration.
**The logging exporter will be removed in a future version.** We highly recommend switching to the debug exporter.
## 0.67 to 0.68
The `preset.kubernetesEvents` preset now excludes `DELETED` watch types so that an log is not ingested when Kubernetes deletes an event.
The intention behind this change is to cleanup the data ingested by the preset as the `DELETED` updated for a Kubernetes Events is
uninteresting. If you want to keep ingesting `DELETED` updates for Kubernetes Events you will need to configure the `k8sobjectsreceiver` manually.
## 0.62 to 0.63
The `kubernetesAttributes` preset now respects order of processors in logs, metrics and traces pipelines.
This implicitly might break your pipelines if you relied on having the `k8sAttributes` processor rendered as the first processor but also explicitly listed it in the signal's pipeline somewhere else.
## 0.55.2 to 0.56
The `tpl` function has been added to references of pod labels and ingress hosts. This adds the ability to add some reusability in
charts values through referencing global values. If you are currently using any `{{ }}` syntax in pod labels or ingress hosts it will now be rendered. To escape existing instances of {{ }}, use {{` <original content> `}}.
```yaml
global:
region: us-east-1
environment: stage
# Tests `tpl` function reference used in pod labels and
# ingress.hosts[*]
podLabels:
environment: "{{ .Values.global.environment }}"
ingress:
enabled: true
hosts:
- host: "otlp-collector-{{ .Values.global.region }}-{{ .Values.global.environment }}-example.dev"
paths:
- path: /
pathType: Prefix
port: 4318
```
Note that only global Helm values can be referenced as the Helm Chart schema currently does not allow `additionalValues`.
## 0.55.0 to 0.55.1
As of v0.55.1 Collector chart use `${env:ENV}` style syntax when getting environment variables and that $`{env:ENV}` syntax is not supported before collector 0.71. If you upgrade collector chart to v0.55.1, you need to make sure your collector version is after than 0.71 (default is v0.76.1).
## 0.53.1 to 0.54.0
As of v0.54.0 Collector chart, the default resource limits are removed. If you want to keep old values you can use the following configuration:
```
resources:
limits:
# CPU units are in fractions of 1000; memory in powers of 2
cpu: 250m
memory: 512Mi
```
See [the 644 issue](https://github.com/open-telemetry/opentelemetry-helm-charts/issues/644) for more information.
## 0.46.0 to 0.47.0
[Update Collector Endpoints to use Pod IP Instead of 0.0.0.0](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/603)
The [Collector's security guidelines were updated](https://github.com/open-telemetry/opentelemetry-collector/pull/6959) to include containerized environments when discussing safeguards against denial of service attacks.
To be in compliance with the Collector's security best practices the chart has been updated to use the Collector's pod IP in place of `0.0.0.0`.
The chart will continue to allow complete configuration of the Collector via the `config` field in the values.yaml. If pod IP does not suite your needs you can use `config` to set something different.
See [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks) for more details.
The new default of binding to the pod IP, rather than `0.0.0.0`, will cause `kubectl port-forward` to fail. If port-forwarding is desired, the following `value.yaml` snippet will allow the Collector bind to `127.0.0.1` inside the pod, in addition to the pod's IP:
```yaml
config:
receivers:
jaeger/local:
protocols:
grpc:
endpoint: 127.0.0.1:14250
thrift_compact:
endpoint: 127.0.0.1:6831
thrift_http:
endpoint: 127.0.0.1:14268
otlp/local:
protocols:
grpc:
endpoint: 127.0.0.1:4317
http:
endpoint: 127.0.0.1:4318
zipkin/local:
endpoint: 127.0.0.1:9411
service:
pipelines:
traces:
receivers:
- otlp
- otlp/local
- jaeger
- jaeger/local
- zipkin
- zipkin/local
```
## 0.40.7 to 0.41.0
[Require Kubernetes version 1.23 or later](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/541)
If you enable use of a _HorizontalPodAutoscaler_ for the collector when running in the "deployment" mode by way of `.Values.autoscaling.enabled`, the manifest now uses the "autoscaling/v2" API group version, which [is available only as recently as Kubernetes version 1.23](https://kubernetes.io/blog/2021/12/07/kubernetes-1-23-release-announcement/#horizontalpodautoscaler-v2-graduates-to-ga). As [all previous versions of this API group are deprecated and removed as of Kubernetes version 1.26](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#horizontalpodautoscaler-v126), we don't offer support for Kubernetes versions older than 1.23.
## 0.34.0 to 0.34.0
[config supports templating](TBD)
The chart now supports templating in `.Values.config`. If you are currently using any `{{ }}` syntax in `.Values.yaml` it will now be rendered. To escape existing instances of `{{ }}`, use ``` {{` <original content> `}} ```. For example, `{{ REDACTED_EMAIL }}` becomes ``` {{` {{ REDACTED_EMAIL }} `}} ```.
## 0.28.0 to 0.29.0
[Reduce requested resources](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/273)
Resource `limits` have been reduced. Upgrades/installs of chart 0.29.0 will now use fewer resources. In order to set the resources back to what they were, you will need to override the `resources` section in the `values.yaml`.
*Example*:
```yaml
resources:
limits:
cpu: 1
memory: 2Gi
```
## 0.23.1 to 0.24.0
[Remove containerLogs in favor of presets.logsCollection]()
The ability to enable logs collection from the collector has been moved from `containerLogs.enabled` to `presets.logsCollection.enabled`. If you are currently using `containerLogs.enabled`, you should instead use the preset:
```yaml
presets:
logsCollection:
enabled: true
```
If you are using `containerLogs.enabled` and also enabling collection of the collector logs you can use `includeCollectorLogs`
```yaml
presets:
logsCollection:
enabled: true
includeCollectorLogs: true
```
You no longer need to update `config.service.pipelines.logs` to include the filelog receiver yourself as the preset will automatically update the logs pipeline to include the filelog receiver.
The filelog's preset configuration can modified by `config.receivers`, but preset configuration cannot be removed. If you need to remove any filelog receiver configuration generated by the preset you should not use the preset. Instead, configure the filelog receiver manually in `config.receivers` and set any other necessary fields in the values.yaml to modify k8s as needed.
See the [daemonset-collector-logs example](https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-collector/examples/daemonset-collector-logs) to see an example of the preset in action.
## 0.18.0 to 0.19.0
[Remove agentCollector and standaloneCollector settings](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/216)
The `agentCollector` and `standaloneCollector` config sections have been removed. Upgrades/installs of chart 0.19.0 will fail if `agentCollector` or `standaloneCollector` are in the values.yaml. See the [Migrate to mode](#migrate-to-mode) steps for instructions on how to replace `agentCollector` and `standaloneCollector` with `mode`.
## 0.13.0 to 0.14.0
[Remove two-deployment mode](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/159)
The ability to install both the agent and standalone collectors simultaneous with the chart has been removed. Installs/upgrades where both `.Values.agentCollector.enabled` and `.Values.standloneCollector.enables` are true will fail. `agentCollector` and `standloneCollector` have also be deprecated, but backward compatibility has been maintained.
### To run both a deployment and daemonset
Install a deployment version of the collector. This is done by setting `.Values.mode` to `deployment`
```yaml
mode: deployment
```
Next, install an daemonset version of the collector that is configured to send traffic to the previously installed deployment. This is done by setting `.Values.mode` to `daemonset` and updating `.Values.config` so that data is exported to the deployment.
```yaml
mode: daemonset
config:
exporters:
otlp:
endpoint: example-opentelemetry-collector:4317
tls:
insecure: true
service:
pipelines:
logs:
exporters:
- otlp
- logging
metrics:
exporters:
- otlp
- logging
traces:
exporters:
- otlp
- logging
```
See the [daemonset-and-deployment](examples/daemonset-and-deployment) example to see the rendered config.
### Migrate to `mode`:
The `agentCollector` and `standaloneCollector` sections in values.yaml have been deprecated. Instead there is a new field, `mode`, that determines if the collector is being installed as a daemonset or deployment.
```yaml
# Valid values are "daemonset" and "deployment".
# If set, agentCollector and standaloneCollector are ignored.
mode: <daemonset|deployment>
```
The following fields have also been added to the root-level to replace the depracated `agentCollector` and `standaloneCollector` settings.
```yaml
containerLogs:
enabled: false
resources:
limits:
cpu: 1
memory: 2Gi
podAnnotations: {}
podLabels: {}
# Host networking requested for this pod. Use the host's network namespace.
hostNetwork: false
# only used with deployment mode
replicaCount: 1
annotations: {}
```
When using `mode`, these settings should be used instead of their counterparts in `agentCollector` and `standaloneCollector`.
Set `mode` to `daemonset` if `agentCollector` was being used. Move all `agentCollector` settings to the corresponding root-level setting. If `agentCollector.configOverride` was being used, merge the settings with `.Values.config`.
Example agentCollector values.yaml:
```yaml
agentCollector:
resources:
limits:
cpu: 3
memory: 6Gi
configOverride:
receivers:
hostmetrics:
scrapers:
cpu:
disk:
filesystem:
service:
pipelines:
metrics:
receivers: [otlp, prometheus, hostmetrics]
```
Example mode values.yaml:
```yaml
mode: daemonset
resources:
limits:
cpu: 3
memory: 6Gi
config:
receivers:
hostmetrics:
scrapers:
cpu:
disk:
filesystem:
service:
pipelines:
metrics:
receivers: [otlp, prometheus, hostmetrics]
```
Set `mode` to `deployment` if `standaloneCollector` was being used. Move all `standaloneCollector` settings to the corresponding root-level setting. If `standaloneCollector.configOverride` was being used, merge the settings with `.Values.config`.
Example standaloneCollector values.yaml:
```yaml
standaloneCollector:
enabled: true
replicaCount: 2
configOverride:
receivers:
podman_stats:
endpoint: unix://run/podman/podman.sock
timeout: 10s
collection_interval: 10s
service:
pipelines:
metrics:
receivers: [otlp, prometheus, podman_stats]
```
Example mode values.yaml:
```yaml
mode: deployment
replicaCount: 2
config:
receivers:
receivers:
podman_stats:
endpoint: unix://run/podman/podman.sock
timeout: 10s
collection_interval: 10s
service:
pipelines:
metrics:
receivers: [otlp, prometheus, podman_stats]
```
Default configuration in `.Values.config` can now be removed with `null`. When changing a pipeline, you must explicitly list all the components that are in the pipeline, including any default components.
*Example*: Disable metrics and logging pipelines and non-otlp receivers:
```yaml
config:
receivers:
jaeger: null
prometheus: null
zipkin: null
service:
pipelines:
traces:
receivers:
- otlp
metrics: null
logs: null
```

View File

@ -0,0 +1,9 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
useGOMEMLIMIT: true

View File

@ -0,0 +1,27 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
clusterRole:
create: true
name: "testing-clusterrole"
rules:
- apiGroups:
- ''
resources:
- 'pods'
- 'nodes'
verbs:
- 'get'
- 'list'
- 'watch'
clusterRoleBinding:
name: "testing-clusterrolebinding"
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,26 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
config:
receivers:
jaeger: null
otlp: null
zipkin: null
hostmetrics:
scrapers:
cpu:
disk:
filesystem:
service:
pipelines:
metrics:
receivers:
- prometheus
- hostmetrics
traces: null
logs: null

View File

@ -0,0 +1,12 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,29 @@
global:
test: templated-value
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M
# Tests `tpl` function reference used in pod labels and
# ingress.hosts[*]
podLabels:
testLabel: "{{ .Values.global.test }}"
ingress:
enabled: true
hosts:
- host: "{{ .Values.global.test }}"
paths:
- path: /
pathType: Prefix
port: 4318

View File

@ -0,0 +1,21 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
ports:
jaeger-compact:
enabled: false
jaeger-thrift:
enabled: false
jaeger-grpc:
enabled: false
zipkin:
enabled: false
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,15 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 10
behavior: {}
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

View File

@ -0,0 +1,15 @@
mode: statefulset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 10
behavior: {}
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

View File

@ -0,0 +1,49 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M
ingress:
enabled: true
ingressClassName: nginx
annotations:
test.io/collector: default
hosts:
- host: defaultcollector.example.com
paths:
- path: /
pathType: Prefix
port: 4318
additionalIngresses:
- name: additional-basic
hosts:
- host: additional-basic.example.com
paths:
- path: /
pathType: Prefix
port: 4318
- name: additional-advanced
ingressClassName: nginx
annotations:
test.io/ingress: additional-advanced
hosts:
- host: additional-advanced.example.com
paths:
- path: /
pathType: Exact
port: 4318
tls:
- secretName: somesecret
hosts:
- additional-advanced.example.com

View File

@ -0,0 +1,37 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M
networkPolicy:
enabled: true
allowIngressFrom:
- namespaceSelector: {}
- ipBlock:
cidr: 127.0.0.1/32
extraIngressRules:
- ports:
- port: metrics
protocol: TCP
from:
- ipBlock:
cidr: 127.0.0.1/32
egressRules:
- to:
- podSelector:
matchLabels:
app: jaeger
ports:
- port: 4317
protocol: TCP

View File

@ -0,0 +1,15 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M
networkPolicy:
enabled: true

View File

@ -0,0 +1,16 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
clusterMetrics:
enabled: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,16 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
hostMetrics:
enabled: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,16 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
kubernetesEvents:
enabled: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,16 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
kubeletMetrics:
enabled: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,16 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
kubernetesAttributes:
enabled: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,17 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
logsCollection:
enabled: true
includeCollectorLogs: true
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,37 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
livenessProbe:
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 2
terminationGracePeriodSeconds: 40
httpGet:
port: 8989
path: /healthz
readinessProbe:
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
successThreshold: 2
failureThreshold: 2
httpGet:
port: 8989
path: /healthz
startupProbe:
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 10
terminationGracePeriodSeconds: 40
httpGet:
port: 8989
path: /healthz

View File

@ -0,0 +1,13 @@
mode: statefulset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
replicaCount: 2
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,17 @@
# Examples of chart configuration
Here is a collection of common configurations for the OpenTelemetry collector. Each folder contains an example `values.yaml` and the resulting configurations that are generated by the opentelemetry-collector helm charts.
- [Daemonset only](daemonset-only)
- [Deployment only](deployment-only)
- [Daemonset and deployment](daemonset-and-deployment)
- [Log collection, including collector logs](daemonset-collector-logs)
- [Add component (hostmetrics)](daemonset-hostmetrics)
The manifests are rendered using the `helm template` command and the specific example folder's values.yaml.
Examples are generated by (from root of the repo):
```sh
make generate-examples CHARTS=opentelemetry-collector
```

View File

@ -0,0 +1,41 @@
---
# Source: opentelemetry-collector/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: example-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["pods", "namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events", "namespaces", "namespaces/status", "nodes", "nodes/spec", "pods", "pods/status", "replicationcontrollers", "replicationcontrollers/status", "resourcequotas", "services" ]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["daemonsets", "deployments", "replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["autoscaling"]
resources: ["horizontalpodautoscalers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch", "list"]

View File

@ -0,0 +1,21 @@
---
# Source: opentelemetry-collector/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: example-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: example-opentelemetry-collector
subjects:
- kind: ServiceAccount
name: example-opentelemetry-collector
namespace: default

View File

@ -0,0 +1,68 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
processors:
k8sattributes:
extract:
metadata:
- k8s.namespace.name
- k8s.deployment.name
- k8s.statefulset.name
- k8s.daemonset.name
- k8s.cronjob.name
- k8s.job.name
- k8s.node.name
- k8s.pod.name
- k8s.pod.uid
- k8s.pod.start_time
passthrough: false
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
receivers:
k8s_cluster:
collection_interval: 10s
k8sobjects:
objects:
- exclude_watch_type:
- DELETED
group: events.k8s.io
mode: watch
name: events
service:
pipelines:
logs:
exporters:
- debug
processors:
- k8sattributes
receivers:
- k8sobjects
metrics:
exporters:
- debug
processors:
- k8sattributes
receivers:
- k8s_cluster

View File

@ -0,0 +1,100 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 360fc84164ca26f5a57ecb44cbcec02ca473b09fc86dba876f71c9fa3617f656
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "3276MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 2
memory: 4Gi
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -0,0 +1,49 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -0,0 +1,13 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,34 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
replicaCount: 1
resources:
limits:
cpu: 2
memory: 4Gi
presets:
clusterMetrics:
enabled: true
kubernetesAttributes:
enabled: true
kubernetesEvents:
enabled: true
alternateConfig:
exporters:
debug: {}
service:
pipelines:
logs:
exporters:
- debug
metrics:
exporters:
- debug

View File

@ -0,0 +1,34 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
config:
exporters:
otlp:
endpoint: example-opentelemetry-collector:4317
tls:
insecure: true
service:
pipelines:
logs:
exporters:
- otlp
- debug
metrics:
exporters:
- otlp
- debug
traces:
exporters:
- otlp
- debug
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,13 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -0,0 +1,93 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
otlp:
endpoint: example-opentelemetry-collector:4317
tls:
insecure: true
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,86 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,104 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: a2d0d31bd929305e52879f78f502d56ad49d9ef9396838490646e9034d2243de
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -0,0 +1,100 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: f67e5b63bd16a7b09fc73afd9b6d71dfbeeb2afc8471c0b0f005233ef45df91d
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -0,0 +1,49 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -0,0 +1,13 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,100 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
filelog:
exclude: []
include:
- /var/log/pods/*/*/*.log
include_file_name: false
include_file_path: true
operators:
- id: container-parser
max_log_size: 102400
type: container
retry_on_failure:
enabled: true
start_at: end
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- filelog
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,110 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 5237e54a1cdaad762876da10a5bab6f686506211aaa2c70b901a74fec8b82140
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- name: varlogpods
mountPath: /var/log/pods
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
hostNetwork: false

View File

@ -0,0 +1,13 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,12 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
logsCollection:
enabled: true
includeCollectorLogs: true

View File

@ -0,0 +1,133 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
hostmetrics:
collection_interval: 10s
root_path: /hostfs
scrapers:
cpu: null
disk: null
filesystem:
exclude_fs_types:
fs_types:
- autofs
- binfmt_misc
- bpf
- cgroup2
- configfs
- debugfs
- devpts
- devtmpfs
- fusectl
- hugetlbfs
- iso9660
- mqueue
- nsfs
- overlay
- proc
- procfs
- pstore
- rpc_pipefs
- securityfs
- selinuxfs
- squashfs
- sysfs
- tracefs
match_type: strict
exclude_mount_points:
match_type: regexp
mount_points:
- /dev/*
- /proc/*
- /sys/*
- /run/k3s/containerd/*
- /var/lib/docker/*
- /var/lib/kubelet/*
- /snap/*
load: null
memory: null
network: null
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
- hostmetrics
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,105 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 98dea268c8a8fe987e082a4e85801387f2b60fefc281f9b1edd1080f0af62574
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- name: hostfs
mountPath: /hostfs
readOnly: true
mountPropagation: HostToContainer
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- name: hostfs
hostPath:
path: /
hostNetwork: false

View File

@ -0,0 +1,13 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,12 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
hostMetrics:
enabled: true

View File

@ -0,0 +1,86 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,119 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: c81f167b09355b9a9b3d7e1a4f994dfb1ef88379ded1a4b902cd4a2ace196a79
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
lifecycle:
preStop:
exec:
command:
- /test/sleep
- "5"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- mountPath: /test
name: test
initContainers:
- args:
- /bin/sleep
- /test/sleep
command:
- cp
image: 'busybox:latest'
name: test
volumeMounts:
- mountPath: /test
name: test
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- emptyDir: {}
name: test
hostNetwork: false

View File

@ -0,0 +1,13 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,37 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
global:
image: busybox:latest
initContainers:
- name: test
command:
- cp
args:
- /bin/sleep
- /test/sleep
image: "{{ .Values.global.image }}"
volumeMounts:
- name: test
mountPath: /test
extraVolumes:
- name: test
emptyDir: {}
extraVolumeMounts:
- name: test
mountPath: /test
lifecycleHooks:
preStop:
exec:
command:
- /test/sleep
- "5"

View File

@ -0,0 +1,86 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,98 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: c81f167b09355b9a9b3d7e1a4f994dfb1ef88379ded1a4b902cd4a2ace196a79
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -0,0 +1,13 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,7 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -0,0 +1,86 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,100 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: f67e5b63bd16a7b09fc73afd9b6d71dfbeeb2afc8471c0b0f005233ef45df91d
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "3276MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 2
memory: 4Gi
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -0,0 +1,49 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -0,0 +1,13 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -0,0 +1,14 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
replicaCount: 3
resources:
limits:
cpu: 2
memory: 4Gi

View File

@ -0,0 +1,49 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
service:
extensions:
- health_check
pipelines:
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,82 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 09d236d9b1f430ef0e0c20f6667eb2f1d17c9f550a8648c0bc86ecf703089397
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

Some files were not shown because too many files have changed in this diff Show More