Compare commits

..

8 Commits

131 changed files with 712 additions and 4459 deletions

View File

@ -4,5 +4,5 @@ spec:
- name: kube-apiserver
resources:
requests:
cpu: 250m
memory: 1268Mi
cpu: 200m
memory: 1536Mi

View File

@ -3,5 +3,5 @@ spec:
- name: kube-controller-manager
resources:
requests:
cpu: 50m
memory: 192Mi
cpu: 30m
memory: 128Mi

View File

@ -3,5 +3,5 @@ spec:
- name: kube-scheduler
resources:
requests:
cpu: 50m
memory: 96Mi
cpu: 30m
memory: 64Mi

View File

@ -1,7 +1,7 @@
apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo
version: 0.3.3
version: 0.3.4
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -22,7 +22,7 @@ dependencies:
repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled
- name: argo-cd
version: 7.9.0
version: 8.0.9
repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled
- name: argocd-image-updater

View File

@ -30,7 +30,7 @@ dependencies:
repository: https://aquasecurity.github.io/helm-charts/
condition: trivy.enabled
- name: renovate
version: 40.25.0
version: 40.26.1
repository: https://docs.renovatebot.com/helm-charts
condition: renovate.enabled
kubeVersion: ">= 1.25.0"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-telemetry
description: KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
type: application
version: 0.4.1
version: 0.5.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -19,23 +19,31 @@ dependencies:
version: 0.2.1
repository: https://cdn.zero-downtime.net/charts/
- name: opentelemetry-collector
version: 0.108.0
version: 0.125.0
repository: https://open-telemetry.github.io/opentelemetry-helm-charts
condition: opentelemetry-collector.enabled
- name: opensearch
version: 3.0.0
repository: https://opensearch-project.github.io/helm-charts/
condition: opensearch.enabled
- name: opensearch-dashboards
version: 3.0.0
repository: https://opensearch-project.github.io/helm-charts/
condition: opensearch-dashboards.enabled
- name: data-prepper
version: 0.1.0
version: 0.3.1
repository: https://opensearch-project.github.io/helm-charts/
condition: data-prepper.enabled
- name: jaeger
version: 3.3.1
version: 3.4.1
repository: https://jaegertracing.github.io/helm-charts
condition: jaeger.enabled
- name: fluentd
version: 0.5.2
version: 0.5.3
repository: https://fluent.github.io/helm-charts
condition: fluentd.enabled
- name: fluent-bit
version: 0.47.10
version: 0.49.0
repository: https://fluent.github.io/helm-charts
condition: fluent-bit.enabled
kubeVersion: ">= 1.26.0"
kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-telemetry
![Version: 0.4.1](https://img.shields.io/badge/Version-0.4.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.4.2](https://img.shields.io/badge/Version-0.4.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
@ -14,16 +14,18 @@ KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
## Requirements
Kubernetes: `>= 1.26.0`
Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
| https://jaegertracing.github.io/helm-charts | jaeger | 3.3.1 |
| https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.108.0 |
| https://opensearch-project.github.io/helm-charts/ | data-prepper | 0.1.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.49.0 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.3 |
| https://jaegertracing.github.io/helm-charts | jaeger | 3.4.1 |
| https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.125.0 |
| https://opensearch-project.github.io/helm-charts/ | data-prepper | 0.3.1 |
| https://opensearch-project.github.io/helm-charts/ | opensearch | 3.0.0 |
| https://opensearch-project.github.io/helm-charts/ | opensearch-dashboards | 3.0.0 |
## Values
@ -135,7 +137,7 @@ Kubernetes: `>= 1.26.0`
| fluentd.service.ports[1].containerPort | int | `9880` | |
| fluentd.service.ports[1].name | string | `"http-fluentd"` | |
| fluentd.service.ports[1].protocol | string | `"TCP"` | |
| fluentd.source.sharedKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey"` | |
| fluentd.source.sharedKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey?inCluster"` | |
| fluentd.volumeMounts[0].mountPath | string | `"/run/pki"` | |
| fluentd.volumeMounts[0].name | string | `"trust-store"` | |
| fluentd.volumeMounts[0].readOnly | bool | `true` | |
@ -167,13 +169,27 @@ Kubernetes: `>= 1.26.0`
| jaeger.storage.elasticsearch.user | string | `"admin"` | |
| jaeger.storage.type | string | `"elasticsearch"` | |
| metrics.enabled | bool | `false` | |
| opensearch.dashboard.enabled | bool | `false` | |
| opensearch.dashboard.istio.enabled | bool | `false` | |
| opensearch.dashboard.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| opensearch.dashboard.istio.url | string | `"telemetry-dashboard.example.com"` | |
| opensearch.nodeSets | list | `[]` | |
| opensearch.prometheus | bool | `false` | |
| opensearch.version | string | `"2.17.0"` | |
| opensearch-dashboards.enabled | bool | `false` | |
| opensearch-dashboards.istio.enabled | bool | `false` | |
| opensearch-dashboards.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| opensearch-dashboards.istio.url | string | `"telemetry-dashboard.example.com"` | |
| opensearch-dashboards.resources.limits.cpu | string | `nil` | |
| opensearch-dashboards.resources.limits.memory | string | `"512M"` | |
| opensearch-dashboards.resources.requests.cpu | string | `"100m"` | |
| opensearch-dashboards.resources.requests.memory | string | `"512M"` | |
| opensearch-dashboards.serviceMonitor.enabled | bool | `false` | |
| opensearch-dashboards.serviceMonitor.interval | string | `"30s"` | |
| opensearch.config."opensearch.yml" | string | `"cluster.name: opensearch-cluster\nnetwork.host: 0.0.0.0\ndiscovery.type: single-node\n"` | |
| opensearch.enabled | bool | `false` | |
| opensearch.maxUnavailable | int | `0` | |
| opensearch.opensearchJavaOpts | string | `"-Xmx1024M -Xms1024M"` | |
| opensearch.persistence.size | string | `"8Gi"` | |
| opensearch.resources.limits.memory | string | `"2Gi"` | |
| opensearch.resources.requests.cpu | string | `"500m"` | |
| opensearch.resources.requests.memory | string | `"2Gi"` | |
| opensearch.serviceMonitor.enabled | bool | `false` | |
| opensearch.serviceMonitor.interval | string | `"30s"` | |
| opensearch.singleNode | bool | `true` | |
| opentelemetry-collector.config.exporters.otlp/data-prepper.endpoint | string | `"telemetry-data-prepper:21890"` | |
| opentelemetry-collector.config.exporters.otlp/data-prepper.tls.insecure | bool | `true` | |
| opentelemetry-collector.config.exporters.otlp/jaeger.endpoint | string | `"telemetry-jaeger-collector:4317"` | |

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: v1.16.2
appVersion: v1.17.1
description: A Helm chart for Kubernetes
home: https://www.fluentd.org/
icon: https://www.fluentd.org/images/miscellany/fluentd-logo_2x.png
@ -12,4 +12,4 @@ name: fluentd
sources:
- https://github.com/fluent/fluentd/
- https://github.com/fluent/fluentd-kubernetes-daemonset
version: 0.5.2
version: 0.5.3

View File

@ -1,5 +1,5 @@
{{- define "fluentd.pod" -}}
{{- $defaultTag := printf "%s-debian-%s-1.0" (.Chart.AppVersion) (.Values.variant) -}}
{{- $defaultTag := printf "%s-debian-%s-1.2" (.Chart.AppVersion) (.Values.variant) -}}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}

View File

@ -21,7 +21,7 @@
.idea/
*.tmproj
.vscode/
examples/
# Ignore unittest
tests/
*/__snapshot__/*

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 0.111.0
appVersion: 0.126.0
description: OpenTelemetry Collector Helm chart for Kubernetes
home: https://opentelemetry.io/
icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png
@ -12,4 +12,4 @@ sources:
- https://github.com/open-telemetry/opentelemetry-collector
- https://github.com/open-telemetry/opentelemetry-collector-contrib
type: application
version: 0.108.0
version: 0.125.0

View File

@ -19,7 +19,7 @@ helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm
To install the chart with the release name my-opentelemetry-collector, run the following command:
```console
helm install my-opentelemetry-collector open-telemetry/opentelemetry-collector --set mode=<value> --set image.repository="otel/opentelemetry-collector-k8s" --set command.name="otelcol-k8s"
helm install my-opentelemetry-collector open-telemetry/opentelemetry-collector --set mode=<value> --set image.repository="ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s" --set command.name="otelcol-k8s"
```
Where the `mode` value needs to be set to one of `daemonset`, `deployment` or `statefulset`.
@ -35,7 +35,7 @@ See [UPGRADING.md](UPGRADING.md).
OpenTelemetry Collector recommends to bind receivers' servers to addresses that limit connections to authorized users.
For this reason, by default the chart binds all the Collector's endpoints to the pod's IP.
More info is available in the [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks)
More info is available in the [Security Best Practices documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks)
Some care must be taken when using `hostNetwork: true`, as then OpenTelemetry Collector will listen on all the addresses in the host network namespace.
@ -106,6 +106,29 @@ to read the files where Kubernetes container runtime writes all containers' cons
#### :warning: Warning: Risk of looping the exported logs back into the receiver, causing "log explosion"
#### Log collection for a subset of pods or containers
The `logsCollection` preset will by default ingest the logs of all kubernetes containers.
This is achieved by using an include path of `/var/log/pods/*/*/*.log` for the `filelog`receiver.
To limit the import to a certain subset of pods or containers, the `filelog`
receivers `include` list can be overwritten by supplying explicit configuration.
E.g. The following configuration would only import logs for pods within the namespace: `example-namespace`:
```yaml
mode: daemonset
presets:
logsCollection:
enabled: true
config:
receivers:
filelog:
include:
- /var/log/pods/example-namespace_*/*/*.log
```
The container logs pipeline uses the `debug` exporter by default.
Paired with the default `filelog` receiver that receives all containers' console output,
it is easy to accidentally feed the exported logs back into the receiver.
@ -149,6 +172,10 @@ This feature is disabled by default. It has the following requirements:
- It requires the [Kubernetes Attributes processor](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
#### :memo: Note: Changing or supplementing `k8sattributes` scopes
In order to minimize the collector's privileges, the [Kubernetes RBAC Rules](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) that are applied to the collector as part of this chart are the minimum required for the `presets.kubernetesAttributes` preset to work. If additional configuration scopes are desired outside of the preset you must apply the corresponding RBAC rules to grant the collector access.
To enable this feature, set the `presets.kubernetesAttributes.enabled` property to `true`.
Here is an example `values.yaml`:

View File

@ -4,6 +4,14 @@ These upgrade guidelines only contain instructions for version upgrades which re
If the version you want to upgrade to is not listed here, then there is nothing to do for you.
Just upgrade and enjoy.
## 0.121.0 to 0.122.0
In the v0.123.1 Collector release we stopped pushing images to Dockerhub due to how their new rate limit changes affected our CI. If you're using `otel/opentelemetry-collector-k8s` for the image you should switch to `ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s`. See https://github.com/open-telemetry/community/issues/2641 for more details.
## 0.110.0 to 0.110.1 or 0.110.2
We broke the selector labels in `0.110.0`, which causes `helm upgrades` to fail. Do not attempt to upgrade from `0.110.0` to either `0.110.1` or `0.110.2`. Go straight to `0.110.3` instead.
## 0.97.2 to 0.98.0
> [!WARNING]

View File

@ -1,7 +1,7 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -4,7 +4,7 @@ global:
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
@ -19,6 +19,10 @@ resources:
podLabels:
testLabel: "{{ .Values.global.test }}"
additionalLabels:
testLabel: "{{ .Values.global.test }}"
someLabel: "someValue"
ingress:
enabled: true
hosts:

View File

@ -1,7 +1,7 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: statefulset
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -0,0 +1,21 @@
mode: deployment
image:
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
resources:
limits:
cpu: 100m
memory: 200M
internalTelemetryViaOTLP:
endpoint: "http://localhost:4318"
headers:
- name: "x-opentelemetry-customer"
value: "a value"
traces:
enabled: true
metrics:
enabled: true
logs:
enabled: true

View File

@ -1,7 +1,7 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -0,0 +1,15 @@
mode: deployment
image:
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
resources:
limits:
cpu: 100m
memory: 200M
config:
service:
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -0,0 +1,17 @@
mode: deployment
image:
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
resources:
limits:
cpu: 100m
memory: 200M
config:
service:
telemetry:
metrics:
address: 0.0.0.0:8888
resource:
"k8s.namespace.name": "default"

View File

@ -1,7 +1,7 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,7 +1,7 @@
mode: statefulset
image:
repository: "otel/opentelemetry-collector-k8s"
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,17 +0,0 @@
# Examples of chart configuration
Here is a collection of common configurations for the OpenTelemetry collector. Each folder contains an example `values.yaml` and the resulting configurations that are generated by the opentelemetry-collector helm charts.
- [Daemonset only](daemonset-only)
- [Deployment only](deployment-only)
- [Daemonset and deployment](daemonset-and-deployment)
- [Log collection, including collector logs](daemonset-collector-logs)
- [Add component (hostmetrics)](daemonset-hostmetrics)
The manifests are rendered using the `helm template` command and the specific example folder's values.yaml.
Examples are generated by (from root of the repo):
```sh
make generate-examples CHARTS=opentelemetry-collector
```

View File

@ -1,41 +0,0 @@
---
# Source: opentelemetry-collector/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: example-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["pods", "namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events", "namespaces", "namespaces/status", "nodes", "nodes/spec", "pods", "pods/status", "replicationcontrollers", "replicationcontrollers/status", "resourcequotas", "services" ]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["daemonsets", "deployments", "replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["get", "list", "watch"]
- apiGroups: ["autoscaling"]
resources: ["horizontalpodautoscalers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch", "list"]

View File

@ -1,21 +0,0 @@
---
# Source: opentelemetry-collector/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: example-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: example-opentelemetry-collector
subjects:
- kind: ServiceAccount
name: example-opentelemetry-collector
namespace: default

View File

@ -1,68 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
processors:
k8sattributes:
extract:
metadata:
- k8s.namespace.name
- k8s.deployment.name
- k8s.statefulset.name
- k8s.daemonset.name
- k8s.cronjob.name
- k8s.job.name
- k8s.node.name
- k8s.pod.name
- k8s.pod.uid
- k8s.pod.start_time
passthrough: false
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
receivers:
k8s_cluster:
collection_interval: 10s
k8sobjects:
objects:
- exclude_watch_type:
- DELETED
group: events.k8s.io
mode: watch
name: events
service:
pipelines:
logs:
exporters:
- debug
processors:
- k8sattributes
receivers:
- k8sobjects
metrics:
exporters:
- debug
processors:
- k8sattributes
receivers:
- k8s_cluster

View File

@ -1,100 +0,0 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 360fc84164ca26f5a57ecb44cbcec02ca473b09fc86dba876f71c9fa3617f656
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "3276MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 2
memory: 4Gi
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,34 +0,0 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
replicaCount: 1
resources:
limits:
cpu: 2
memory: 4Gi
presets:
clusterMetrics:
enabled: true
kubernetesAttributes:
enabled: true
kubernetesEvents:
enabled: true
alternateConfig:
exporters:
debug: {}
service:
pipelines:
logs:
exporters:
- debug
metrics:
exporters:
- debug

View File

@ -1,34 +0,0 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
config:
exporters:
otlp:
endpoint: example-opentelemetry-collector:4317
tls:
insecure: true
service:
pipelines:
logs:
exporters:
- otlp
- debug
metrics:
exporters:
- otlp
- debug
traces:
exporters:
- otlp
- debug
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -1,13 +0,0 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -1,93 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
otlp:
endpoint: example-opentelemetry-collector:4317
tls:
insecure: true
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- otlp
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,86 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,104 +0,0 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: a2d0d31bd929305e52879f78f502d56ad49d9ef9396838490646e9034d2243de
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,100 +0,0 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: f67e5b63bd16a7b09fc73afd9b6d71dfbeeb2afc8471c0b0f005233ef45df91d
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,100 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
filelog:
exclude: []
include:
- /var/log/pods/*/*/*.log
include_file_name: false
include_file_path: true
operators:
- id: container-parser
max_log_size: 102400
type: container
retry_on_failure:
enabled: true
start_at: end
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- filelog
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,110 +0,0 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 5237e54a1cdaad762876da10a5bab6f686506211aaa2c70b901a74fec8b82140
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- name: varlogpods
mountPath: /var/log/pods
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- name: varlogpods
hostPath:
path: /var/log/pods
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
hostNetwork: false

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,12 +0,0 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
logsCollection:
enabled: true
includeCollectorLogs: true

View File

@ -1,133 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
hostmetrics:
collection_interval: 10s
root_path: /hostfs
scrapers:
cpu: null
disk: null
filesystem:
exclude_fs_types:
fs_types:
- autofs
- binfmt_misc
- bpf
- cgroup2
- configfs
- debugfs
- devpts
- devtmpfs
- fusectl
- hugetlbfs
- iso9660
- mqueue
- nsfs
- overlay
- proc
- procfs
- pstore
- rpc_pipefs
- securityfs
- selinuxfs
- squashfs
- sysfs
- tracefs
match_type: strict
exclude_mount_points:
match_type: regexp
mount_points:
- /dev/*
- /proc/*
- /sys/*
- /run/k3s/containerd/*
- /var/lib/docker/*
- /var/lib/kubelet/*
- /snap/*
load: null
memory: null
network: null
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
- hostmetrics
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,105 +0,0 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 98dea268c8a8fe987e082a4e85801387f2b60fefc281f9b1edd1080f0af62574
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- name: hostfs
mountPath: /hostfs
readOnly: true
mountPropagation: HostToContainer
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- name: hostfs
hostPath:
path: /
hostNetwork: false

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,12 +0,0 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
hostMetrics:
enabled: true

View File

@ -1,86 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,119 +0,0 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: c81f167b09355b9a9b3d7e1a4f994dfb1ef88379ded1a4b902cd4a2ace196a79
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
lifecycle:
preStop:
exec:
command:
- /test/sleep
- "5"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- mountPath: /test
name: test
initContainers:
- args:
- /bin/sleep
- /test/sleep
command:
- cp
image: 'busybox:latest'
name: test
volumeMounts:
- mountPath: /test
name: test
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
- emptyDir: {}
name: test
hostNetwork: false

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,37 +0,0 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
global:
image: busybox:latest
initContainers:
- name: test
command:
- cp
args:
- /bin/sleep
- /test/sleep
image: "{{ .Values.global.image }}"
volumeMounts:
- name: test
mountPath: /test
extraVolumes:
- name: test
emptyDir: {}
extraVolumeMounts:
- name: test
mountPath: /test
lifecycleHooks:
preStop:
exec:
command:
- /test/sleep
- "5"

View File

@ -1,86 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-agent.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,98 +0,0 @@
---
# Source: opentelemetry-collector/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: example-opentelemetry-collector-agent
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: c81f167b09355b9a9b3d7e1a4f994dfb1ef88379ded1a4b902cd4a2ace196a79
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: agent-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
hostPort: 6831
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
hostPort: 14250
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
hostPort: 14268
- name: otlp
containerPort: 4317
protocol: TCP
hostPort: 4317
- name: otlp-http
containerPort: 4318
protocol: TCP
hostPort: 4318
- name: zipkin
containerPort: 9411
protocol: TCP
hostPort: 9411
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-agent
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,7 +0,0 @@
mode: daemonset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"

View File

@ -1,86 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,100 +0,0 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: f67e5b63bd16a7b09fc73afd9b6d71dfbeeb2afc8471c0b0f005233ef45df91d
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "3276MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 2
memory: 4Gi
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,14 +0,0 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
replicaCount: 3
resources:
limits:
cpu: 2
memory: 4Gi

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
service:
extensions:
- health_check
pipelines:
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,82 +0,0 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 09d236d9b1f430ef0e0c20f6667eb2f1d17c9f550a8648c0bc86ecf703089397
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,33 +0,0 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,30 +0,0 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
ports:
jaeger-compact:
enabled: false
jaeger-thrift:
enabled: false
jaeger-grpc:
enabled: false
zipkin:
enabled: false
config:
receivers:
jaeger: null
prometheus: null
zipkin: null
service:
pipelines:
traces:
receivers:
- otlp
metrics: null
logs: null

View File

@ -1,28 +0,0 @@
mode: deployment
resources:
limits:
cpu: 100m
memory: 200M
configMap:
create: false
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
extraArgs: ["--config=/conf/config.yaml"]
extraVolumes:
- name: custom-otelcol-configmap
configMap:
name: custom-otel-collector-config
items:
- key: config
path: config.yaml
defaultMode: 420
extraVolumeMounts:
- name: custom-otelcol-configmap
mountPath: /conf/config.yaml

View File

@ -1,101 +0,0 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/config.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf/config.yaml
name: custom-otelcol-configmap
volumes:
- configMap:
defaultMode: 420
items:
- key: config
path: config.yaml
name: custom-otel-collector-config
name: custom-otelcol-configmap
hostNetwork: false

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,23 +0,0 @@
---
# Source: opentelemetry-collector/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: example-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups: [""]
resources: ["pods", "namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get", "list", "watch"]

View File

@ -1,21 +0,0 @@
---
# Source: opentelemetry-collector/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: example-opentelemetry-collector
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: example-opentelemetry-collector
subjects:
- kind: ServiceAccount
name: example-opentelemetry-collector
namespace: default

View File

@ -1,112 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
k8sattributes:
extract:
metadata:
- k8s.namespace.name
- k8s.deployment.name
- k8s.statefulset.name
- k8s.daemonset.name
- k8s.cronjob.name
- k8s.job.name
- k8s.node.name
- k8s.pod.name
- k8s.pod.uid
- k8s.pod.start_time
passthrough: false
pod_association:
- sources:
- from: resource_attribute
name: k8s.pod.ip
- sources:
- from: resource_attribute
name: k8s.pod.uid
- sources:
- from: connection
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- k8sattributes
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- k8sattributes
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- resource
- k8sattributes
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,94 +0,0 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 63dd878669e1f465c60f65786dc6091bd23f761e6332c1eb2c14dc626f85a246
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: standalone-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
internalTrafficPolicy: Cluster

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,21 +0,0 @@
mode: deployment
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
presets:
kubernetesAttributes:
enabled: true
config:
service:
pipelines:
traces:
processors:
- resource
- k8sattributes
- batch

View File

@ -1,86 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-statefulset.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-statefulset
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: statefulset-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: statefulset-collector
internalTrafficPolicy: Cluster

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,101 +0,0 @@
---
# Source: opentelemetry-collector/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
serviceName: example-opentelemetry-collector
podManagementPolicy: Parallel
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: statefulset-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 1d41134c1cf3e69cb683b501ecc5e5a8ef07a5fe44edec2851e0fd923cac34a6
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: statefulset-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-statefulset
items:
- key: relay
path: relay.yaml
hostNetwork: false

View File

@ -1,14 +0,0 @@
mode: statefulset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
replicaCount: 2
resources:
limits:
cpu: 100m
memory: 200M

View File

@ -1,86 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap-statefulset.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector-statefulset
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,49 +0,0 @@
---
# Source: opentelemetry-collector/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
component: statefulset-collector
spec:
type: ClusterIP
ports:
- name: jaeger-compact
port: 6831
targetPort: 6831
protocol: UDP
- name: jaeger-grpc
port: 14250
targetPort: 14250
protocol: TCP
- name: jaeger-thrift
port: 14268
targetPort: 14268
protocol: TCP
- name: otlp
port: 4317
targetPort: 4317
protocol: TCP
appProtocol: grpc
- name: otlp-http
port: 4318
targetPort: 4318
protocol: TCP
- name: zipkin
port: 9411
targetPort: 9411
protocol: TCP
selector:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: statefulset-collector
internalTrafficPolicy: Cluster

View File

@ -1,13 +0,0 @@
---
# Source: opentelemetry-collector/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm

View File

@ -1,126 +0,0 @@
---
# Source: opentelemetry-collector/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
serviceName: example-opentelemetry-collector
podManagementPolicy: Parallel
replicas: 2
persistentVolumeClaimRetentionPolicy:
whenDeleted: Delete
whenScaled: Delete
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: statefulset-collector
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: 1d41134c1cf3e69cb683b501ecc5e5a8ef07a5fe44edec2851e0fd923cac34a6
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: statefulset-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
- mountPath: /var/lib/storage/queue
name: queue
initContainers:
- command:
- sh
- -c
- 'chown -R 10001: /var/lib/storage/queue'
image: busybox:latest
name: init-fs
volumeMounts:
- mountPath: /var/lib/storage/queue
name: queue
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector-statefulset
items:
- key: relay
path: relay.yaml
hostNetwork: false
volumeClaimTemplates:
- metadata:
name: queue
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: standard

View File

@ -1,46 +0,0 @@
mode: statefulset
image:
repository: "otel/opentelemetry-collector-k8s"
command:
name: "otelcol-k8s"
replicaCount: 2
resources:
limits:
cpu: 100m
memory: 200M
statefulset:
persistentVolumeClaimRetentionPolicy:
enabled: true
whenDeleted: Delete
whenScaled: Delete
volumeClaimTemplates:
- metadata:
name: queue
spec:
storageClassName: standard
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "1Gi"
extraVolumeMounts:
- name: queue
mountPath: /var/lib/storage/queue
initContainers:
- name: init-fs
image: busybox:latest
command:
- sh
- "-c"
- "chown -R 10001: /var/lib/storage/queue"
volumeMounts:
- name: queue
mountPath: /var/lib/storage/queue

View File

@ -1,86 +0,0 @@
---
# Source: opentelemetry-collector/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
data:
relay: |
exporters:
debug: {}
extensions:
health_check:
endpoint: ${env:MY_POD_IP}:13133
processors:
batch: {}
memory_limiter:
check_interval: 5s
limit_percentage: 80
spike_limit_percentage: 25
receivers:
jaeger:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:14250
thrift_compact:
endpoint: ${env:MY_POD_IP}:6831
thrift_http:
endpoint: ${env:MY_POD_IP}:14268
otlp:
protocols:
grpc:
endpoint: ${env:MY_POD_IP}:4317
http:
endpoint: ${env:MY_POD_IP}:4318
prometheus:
config:
scrape_configs:
- job_name: opentelemetry-collector
scrape_interval: 10s
static_configs:
- targets:
- ${env:MY_POD_IP}:8888
zipkin:
endpoint: ${env:MY_POD_IP}:9411
service:
extensions:
- health_check
pipelines:
logs:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
metrics:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- prometheus
traces:
exporters:
- debug
processors:
- memory_limiter
- batch
receivers:
- otlp
- jaeger
- zipkin
telemetry:
metrics:
address: ${env:MY_POD_IP}:8888

View File

@ -1,100 +0,0 @@
---
# Source: opentelemetry-collector/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: example-opentelemetry-collector
namespace: default
labels:
helm.sh/chart: opentelemetry-collector-0.108.0
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
app.kubernetes.io/version: "0.111.0"
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
strategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: f67e5b63bd16a7b09fc73afd9b6d71dfbeeb2afc8471c0b0f005233ef45df91d
labels:
app.kubernetes.io/name: opentelemetry-collector
app.kubernetes.io/instance: example
component: standalone-collector
spec:
serviceAccountName: example-opentelemetry-collector
securityContext:
{}
containers:
- name: opentelemetry-collector
command:
- /otelcol-k8s
args:
- --config=/conf/relay.yaml
securityContext:
{}
image: "otel/opentelemetry-collector-k8s:0.111.0"
imagePullPolicy: IfNotPresent
ports:
- name: jaeger-compact
containerPort: 6831
protocol: UDP
- name: jaeger-grpc
containerPort: 14250
protocol: TCP
- name: jaeger-thrift
containerPort: 14268
protocol: TCP
- name: otlp
containerPort: 4317
protocol: TCP
- name: otlp-http
containerPort: 4318
protocol: TCP
- name: zipkin
containerPort: 9411
protocol: TCP
env:
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: GOMEMLIMIT
value: "152MiB"
livenessProbe:
httpGet:
path: /
port: 13133
readinessProbe:
httpGet:
path: /
port: 13133
resources:
limits:
cpu: 100m
memory: 200M
volumeMounts:
- mountPath: /conf
name: opentelemetry-collector-configmap
volumes:
- name: opentelemetry-collector-configmap
configMap:
name: example-opentelemetry-collector
items:
- key: relay
path: relay.yaml
hostNetwork: false

Some files were not shown because too many files have changed in this diff Show More