Compare commits
34 Commits
f44e89d848
...
5ee3f2ffa6
Author | SHA1 | Date | |
---|---|---|---|
5ee3f2ffa6 | |||
5a16422f75 | |||
84081514c6 | |||
12fd6df3d8 | |||
71d8919cc5 | |||
58986e1d5b | |||
257bedf284 | |||
6a002155a7 | |||
fb9865ef2c | |||
f01df7954d | |||
e6493e9961 | |||
9e87f92d45 | |||
49fa7b3c42 | |||
1538ea0d45 | |||
f72ef007f2 | |||
87e7f5fe20 | |||
7527e085ea | |||
7612d257aa | |||
c16a233864 | |||
33307fccce | |||
09a2ead705 | |||
7863202ca7 | |||
d3036ad1ac | |||
04ca35c676 | |||
55e22e7f6d | |||
9e9ae3e8b8 | |||
ccdf4652cf | |||
1b37ad37e7 | |||
94e3458364 | |||
e7673a1e59 | |||
405eea7119 | |||
1c1ff749af | |||
cc6650becb | |||
4184cef843 |
@ -2,7 +2,13 @@
|
||||
|
||||
# All things BEFORE the first controller / control plane upgrade
|
||||
pre_control_plane_upgrade_cluster() {
|
||||
echo
|
||||
if [ "$PLATFORM" != "gke" ];then
|
||||
# patch multus DS to ONLY run pods on 1.31 controllers
|
||||
kubectl patch ds kube-multus-ds -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
|
||||
|
||||
# patch kube-proxy DS to ONLY run pods on 1.31 controllers
|
||||
kubectl patch ds kube-proxy -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": {"node.kubernetes.io/kubezero.version": "v1.31.6"}}}}}' || true
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@ -16,7 +22,20 @@ post_control_plane_upgrade_cluster() {
|
||||
pre_cluster_upgrade_final() {
|
||||
set +e
|
||||
|
||||
echo
|
||||
if [ "$PLATFORM" != "gke" ];then
|
||||
# cleanup multus
|
||||
kubectl delete clusterrolebinding multus
|
||||
kubectl delete clusterrole multus
|
||||
kubectl delete serviceaccount multus -n kube-system
|
||||
kubectl delete cm multus-cni-config -n kube-system
|
||||
kubectl delete ds kube-multus-ds -n kube-system
|
||||
kubectl delete NetworkAttachmentDefinition cilium
|
||||
kubectl delete crd network-attachment-definitions.k8s.cni.cncf.io
|
||||
|
||||
# remove kube-proxy
|
||||
kubectl -n kube-system delete ds kube-proxy
|
||||
kubectl -n kube-system delete cm kube-proxy
|
||||
fi
|
||||
|
||||
set -e
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ render_kubeadm() {
|
||||
|
||||
# Assemble kubeadm config
|
||||
cat /dev/null > ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||
for f in Cluster KubeProxy Kubelet; do
|
||||
for f in Cluster Kubelet; do
|
||||
# echo "---" >> /etc/kubernetes/kubeadm.yaml
|
||||
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||
done
|
||||
@ -169,7 +169,7 @@ kubeadm_upgrade() {
|
||||
else
|
||||
pre_cluster_upgrade_final
|
||||
|
||||
_kubeadm upgrade apply phase addon all $KUBE_VERSION
|
||||
_kubeadm upgrade apply phase addon coredns $KUBE_VERSION
|
||||
|
||||
post_cluster_upgrade_final
|
||||
|
||||
@ -239,7 +239,7 @@ control_plane_node() {
|
||||
if [[ "$CMD" =~ ^(join)$ ]]; then
|
||||
# Delete any former self in case forseti did not delete yet
|
||||
kubectl delete node ${NODENAME} --wait=true || true
|
||||
# Wait for all pods to be deleted otherwise we end up with stale pods eg. kube-proxy and all goes to ....
|
||||
# Wait for all pods to be deleted otherwise we end up with stale pods
|
||||
kubectl delete pods -n kube-system --field-selector spec.nodeName=${NODENAME}
|
||||
|
||||
# get current running etcd pods for etcdctl commands
|
||||
@ -251,7 +251,7 @@ control_plane_node() {
|
||||
done
|
||||
|
||||
# see if we are a former member and remove our former self if so
|
||||
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
|
||||
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//' || true)
|
||||
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
|
||||
|
||||
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
|
||||
@ -309,8 +309,9 @@ control_plane_node() {
|
||||
_kubeadm init phase mark-control-plane
|
||||
_kubeadm init phase kubelet-finalize all
|
||||
|
||||
# we skip kube-proxy
|
||||
if [[ "$CMD" =~ ^(bootstrap|restore)$ ]]; then
|
||||
_kubeadm init phase addon all
|
||||
_kubeadm init phase addon coredns
|
||||
fi
|
||||
|
||||
post_kubeadm
|
||||
|
@ -3,6 +3,7 @@
|
||||
# Simulate well-known CRDs being available
|
||||
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
|
||||
LOCAL_DEV=${LOCAL_DEV:-""}
|
||||
ENV_VALUES=""
|
||||
|
||||
export HELM_SECRETS_BACKEND="vals"
|
||||
|
||||
@ -80,15 +81,19 @@ function get_kubezero_secret() {
|
||||
get_secret_val kubezero kubezero-secrets "$1"
|
||||
}
|
||||
|
||||
function ensure_kubezero_secret_key() {
|
||||
local secret="$(kubectl get secret -n kubezero kubezero-secrets -o yaml)"
|
||||
local key=""
|
||||
local val=""
|
||||
|
||||
for key in $@; do
|
||||
val=$(echo "$secret" | yq ".data.\"$key\"")
|
||||
function ensure_kubezero_secret_key() {
|
||||
local ns=$1
|
||||
local secret=$2
|
||||
|
||||
local secret="$(kubectl get secret -n $ns $secret -o yaml)"
|
||||
local key
|
||||
local val
|
||||
|
||||
for key in $1; do
|
||||
val=$(echo $secret | yq ".data.\"$key\"")
|
||||
if [ "$val" == "null" ]; then
|
||||
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"\" }}"
|
||||
set_kubezero_secret $key ""
|
||||
fi
|
||||
done
|
||||
}
|
||||
@ -210,7 +215,7 @@ for manifest in yaml.safe_load_all(sys.stdin):
|
||||
# helm template | kubectl apply -f -
|
||||
# confine to one namespace if possible
|
||||
function render() {
|
||||
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-tests --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
|
||||
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-tests --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $ENV_VALUES \
|
||||
| python3 -c '
|
||||
#!/usr/bin/python3
|
||||
import yaml
|
||||
@ -262,6 +267,11 @@ function _helm() {
|
||||
|
||||
crds
|
||||
|
||||
elif [ $action == "dryrun" ]; then
|
||||
cat $WORKDIR/values.yaml
|
||||
render
|
||||
cat $WORKDIR/helm.yaml
|
||||
|
||||
elif [ $action == "apply" -o $action == "replace" ]; then
|
||||
echo "using values to $action of module $module: "
|
||||
cat $WORKDIR/values.yaml
|
||||
|
@ -10,7 +10,14 @@ def migrate(values):
|
||||
|
||||
# 1.32
|
||||
try:
|
||||
values["istio-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-ingress"]["gateway"]["service"]["ports"]
|
||||
values["istio-ingress"]["gateway"]["service"].pop("ports")
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
values["istio-private-ingress"]["gateway"]["service"]["extraPorts"] = values["istio-private-ingress"]["gateway"]["service"]["ports"]
|
||||
values["istio-private-ingress"]["gateway"]["service"].pop("ports")
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
@ -47,7 +47,6 @@ Kubernetes: `>= 1.32.0-0`
|
||||
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
||||
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
|
||||
|
||||
- https://github.com/awslabs/amazon-eks-ami
|
||||
|
||||
|
@ -22,7 +22,6 @@
|
||||
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
||||
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
|
||||
|
||||
- https://github.com/awslabs/amazon-eks-ami
|
||||
|
||||
|
@ -29,12 +29,6 @@ kind: Policy
|
||||
rules:
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
|
@ -6,6 +6,8 @@ featureGates:
|
||||
ControlPlaneKubeletLocalMode: true
|
||||
NodeLocalCRISocket: true
|
||||
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
||||
proxy:
|
||||
disabled: true
|
||||
networking:
|
||||
podSubnet: 10.244.0.0/16
|
||||
etcd:
|
||||
|
@ -7,6 +7,8 @@ localAPIEndpoint:
|
||||
patches:
|
||||
directory: {{ . }}
|
||||
{{- end }}
|
||||
skipPhases:
|
||||
- addon/kube-proxy
|
||||
nodeRegistration:
|
||||
criSocket: "unix:///run/containerd/containerd.sock"
|
||||
ignorePreflightErrors:
|
||||
|
@ -1,10 +0,0 @@
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
# kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways
|
||||
metricsBindAddress: "0.0.0.0:10249"
|
||||
mode: "iptables"
|
||||
logging:
|
||||
format: json
|
||||
iptables:
|
||||
localhostNodePorts: false
|
||||
#nodePortAddresses: primary
|
@ -3,12 +3,6 @@ kind: Policy
|
||||
rules:
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
@ -114,7 +108,7 @@ rules:
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources:
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
@ -137,7 +131,7 @@ rules:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
|
@ -4,5 +4,5 @@ spec:
|
||||
- name: kube-apiserver
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 1268Mi
|
||||
cpu: 200m
|
||||
memory: 1536Mi
|
||||
|
@ -3,5 +3,5 @@ spec:
|
||||
- name: kube-controller-manager
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 192Mi
|
||||
cpu: 30m
|
||||
memory: 128Mi
|
||||
|
@ -3,5 +3,5 @@ spec:
|
||||
- name: kube-scheduler
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 96Mi
|
||||
cpu: 30m
|
||||
memory: 64Mi
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-addons
|
||||
description: KubeZero umbrella chart for various optional cluster addons
|
||||
type: application
|
||||
version: 0.8.14
|
||||
version: 0.8.15
|
||||
appVersion: v1.31
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
@ -34,7 +34,7 @@ dependencies:
|
||||
repository: https://nvidia.github.io/k8s-device-plugin
|
||||
condition: nvidia-device-plugin.enabled
|
||||
- name: neuron-helm-chart
|
||||
version: 1.1.1
|
||||
version: 1.1.2
|
||||
# https://github.com/aws-neuron/neuron-helm-charts/tree/main/charts/neuron-helm-chart
|
||||
repository: oci://public.ecr.aws/neuron #/neuron-helm-chart
|
||||
condition: neuron-helm-chart.enabled
|
||||
@ -43,7 +43,7 @@ dependencies:
|
||||
repository: https://bitnami-labs.github.io/sealed-secrets
|
||||
condition: sealed-secrets.enabled
|
||||
- name: aws-node-termination-handler
|
||||
version: 0.27.0
|
||||
version: 0.27.1
|
||||
repository: "oci://public.ecr.aws/aws-ec2/helm"
|
||||
condition: aws-node-termination-handler.enabled
|
||||
- name: aws-eks-asg-rolling-update-handler
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: v2
|
||||
description: KubeZero Argo - Events, Workflow, CD
|
||||
name: kubezero-argo
|
||||
version: 0.3.3
|
||||
version: 0.4.0
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -22,7 +22,7 @@ dependencies:
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-events.enabled
|
||||
- name: argo-cd
|
||||
version: 7.9.0
|
||||
version: 8.0.9
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-cd.enabled
|
||||
- name: argocd-image-updater
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-argo
|
||||
|
||||

|
||||

|
||||
|
||||
KubeZero Argo - Events, Workflow, CD
|
||||
|
||||
@ -18,7 +18,7 @@ Kubernetes: `>= 1.30.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 7.9.0 |
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 8.0.9 |
|
||||
| https://argoproj.github.io/argo-helm | argo-events | 2.4.15 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.12.1 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
@ -28,17 +28,16 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| argo-cd.configs.cm."application.instanceLabelKey" | string | `nil` | |
|
||||
| argo-cd.configs.cm."application.resourceTrackingMethod" | string | `"annotation"` | |
|
||||
| argo-cd.configs.cm."resource.customizations" | string | `"argoproj.io/Application:\n health.lua: |\n hs = {}\n hs.status = \"Progressing\"\n hs.message = \"\"\n if obj.status ~= nil then\n if obj.status.health ~= nil then\n hs.status = obj.status.health.status\n if obj.status.health.message ~= nil then\n hs.message = obj.status.health.message\n end\n end\n end\n return hs\n"` | |
|
||||
| argo-cd.configs.cm."server.rbac.log.enforce.enable" | string | `nil` | |
|
||||
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
|
||||
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.31 - Release notes"` | |
|
||||
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.32 - Release notes"` | |
|
||||
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
|
||||
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
|
||||
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.31"` | |
|
||||
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.32"` | |
|
||||
| argo-cd.configs.cm.installationID | string | `"KubeZero-ArgoCD"` | |
|
||||
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
|
||||
| argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | |
|
||||
| argo-cd.configs.params."controller.resource.health.persist" | string | `"false"` | |
|
||||
| argo-cd.configs.params."controller.sync.timeout.seconds" | int | `1800` | |
|
||||
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
|
||||
| argo-cd.configs.params."server.insecure" | bool | `true` | |
|
||||
@ -54,7 +53,7 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| argo-cd.dex.enabled | bool | `false` | |
|
||||
| argo-cd.enabled | bool | `false` | |
|
||||
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
|
||||
| argo-cd.global.image.tag | string | `"v2.14.9-1"` | |
|
||||
| argo-cd.global.image.tag | string | `"v3.0.3"` | |
|
||||
| argo-cd.global.logging.format | string | `"json"` | |
|
||||
| argo-cd.global.networkPolicy.create | bool | `true` | |
|
||||
| argo-cd.istio.enabled | bool | `false` | |
|
||||
|
@ -4,6 +4,6 @@ condition: 'index .Values "argo-cd" "controller" "metrics" "enabled"'
|
||||
folder: KubeZero
|
||||
dashboards:
|
||||
- name: ArgoCD
|
||||
url: https://grafana.com/api/dashboards/14584/revisions/1/download
|
||||
url: https://raw.githubusercontent.com/argoproj/argo-cd/refs/heads/master/examples/dashboard.json
|
||||
tags:
|
||||
- ArgoCD
|
||||
|
File diff suppressed because one or more lines are too long
@ -25,9 +25,8 @@ spec:
|
||||
automated:
|
||||
prune: true
|
||||
syncOptions:
|
||||
- ServerSideApply=true
|
||||
- ApplyOutOfSyncOnly=true
|
||||
info:
|
||||
- name: "Source:"
|
||||
value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.31/"
|
||||
value: "https://git.zero-downtime.net/ZeroDownTime/KubeZero/src/branch/release/v1.32/"
|
||||
{{- end }}
|
||||
|
@ -26,7 +26,7 @@ argo-events:
|
||||
versions:
|
||||
- version: 2.10.11
|
||||
natsImage: nats:2.11.4-scratch
|
||||
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.2
|
||||
metricsExporterImage: natsio/prometheus-nats-exporter:0.17.3
|
||||
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
|
||||
startCommand: /nats-server
|
||||
|
||||
@ -38,7 +38,7 @@ argo-cd:
|
||||
format: json
|
||||
image:
|
||||
repository: public.ecr.aws/zero-downtime/zdt-argocd
|
||||
tag: v2.14.9-1
|
||||
tag: v3.0.3
|
||||
networkPolicy:
|
||||
create: true
|
||||
|
||||
@ -49,8 +49,8 @@ argo-cd:
|
||||
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
|
||||
|
||||
cm:
|
||||
ui.bannercontent: "KubeZero v1.31 - Release notes"
|
||||
ui.bannerurl: "https://kubezero.com/releases/v1.31"
|
||||
ui.bannercontent: "KubeZero v1.32 - Release notes"
|
||||
ui.bannerurl: "https://kubezero.com/releases/v1.32"
|
||||
ui.bannerpermanent: "true"
|
||||
ui.bannerposition: "bottom"
|
||||
|
||||
@ -59,9 +59,9 @@ argo-cd:
|
||||
|
||||
timeout.reconciliation: 300s
|
||||
|
||||
application.resourceTrackingMethod: annotation
|
||||
installationID: "KubeZero-ArgoCD"
|
||||
application.instanceLabelKey: Null
|
||||
server.rbac.log.enforce.enable: Null
|
||||
|
||||
resource.customizations: |
|
||||
argoproj.io/Application:
|
||||
@ -89,7 +89,6 @@ argo-cd:
|
||||
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
|
||||
|
||||
params:
|
||||
controller.resource.health.persist: "false"
|
||||
controller.diff.server.side: "true"
|
||||
controller.sync.timeout.seconds: 1800
|
||||
|
||||
|
@ -289,7 +289,7 @@ trivy:
|
||||
#tag: 0.57.0
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 1Gi
|
||||
size: 2Gi
|
||||
rbac:
|
||||
create: false
|
||||
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-falco
|
||||
description: Falco Container Security and Audit components
|
||||
type: application
|
||||
version: 0.1.2
|
||||
version: 0.1.3
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -16,7 +16,7 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: falco
|
||||
version: 4.2.5
|
||||
version: 5.0.0
|
||||
repository: https://falcosecurity.github.io/charts
|
||||
condition: k8saudit.enabled
|
||||
alias: k8saudit
|
||||
|
@ -42,17 +42,3 @@ helm.sh/chart: {{ include "kubezero-lib.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/part-of: kubezero
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
kubezero-lib.util.merge will merge two YAML templates and output the result.
|
||||
This takes an array of three values:
|
||||
- the top context
|
||||
- the template name of the overrides (destination)
|
||||
- the template name of the base (source)
|
||||
*/ -}}
|
||||
{{- define "kubezero-lib.util.merge" -}}
|
||||
{{- $top := first . -}}
|
||||
{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}
|
||||
{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}}
|
||||
{{- toYaml (merge $overrides $tpl) -}}
|
||||
{{- end -}}
|
||||
|
@ -12,7 +12,7 @@ kube-prometheus-stack:
|
||||
kubeStateMetrics:
|
||||
enabled: true
|
||||
kubeProxy:
|
||||
enabled: true
|
||||
enabled: false
|
||||
|
||||
kubeEtcd:
|
||||
enabled: true
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-mq
|
||||
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
||||
type: application
|
||||
version: 0.3.11
|
||||
version: 0.3.12
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -17,7 +17,7 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: nats
|
||||
version: 1.3.3
|
||||
version: 1.3.7
|
||||
repository: https://nats-io.github.io/k8s/helm/charts/
|
||||
condition: nats.enabled
|
||||
- name: rabbitmq
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-mq
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
|
||||
|
||||
@ -20,7 +20,7 @@ Kubernetes: `>= 1.26.0`
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://charts.bitnami.com/bitnami | rabbitmq | 14.7.0 |
|
||||
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.3 |
|
||||
| https://nats-io.github.io/k8s/helm/charts/ | nats | 1.3.7 |
|
||||
|
||||
## Values
|
||||
|
||||
@ -32,6 +32,8 @@ Kubernetes: `>= 1.26.0`
|
||||
| nats.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| nats.mqtt.enabled | bool | `false` | |
|
||||
| nats.natsBox.enabled | bool | `false` | |
|
||||
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".maxSkew | int | `1` | |
|
||||
| nats.podTemplate.topologySpreadConstraints."kubernetes.io/hostname".whenUnsatisfiable | string | `"DoNotSchedule"` | |
|
||||
| nats.promExporter.enabled | bool | `false` | |
|
||||
| nats.promExporter.podMonitor.enabled | bool | `false` | |
|
||||
| rabbitmq.auth.existingErlangSecret | string | `"rabbitmq"` | |
|
||||
|
@ -1,7 +1,7 @@
|
||||
configmap: grafana-dashboards-nats
|
||||
condition: '.Values.nats.exporter.serviceMonitor.enabled'
|
||||
condition: '.Values.nats.promExporter.podMonitor.enabled'
|
||||
gzip: true
|
||||
# folder:
|
||||
# folder:
|
||||
dashboards:
|
||||
- name: nats
|
||||
url: https://grafana.com/api/dashboards/13707/revisions/1/download
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-network
|
||||
description: KubeZero umbrella chart for all things network
|
||||
type: application
|
||||
version: 0.5.8
|
||||
version: 0.5.9
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -10,7 +10,6 @@ keywords:
|
||||
- multus
|
||||
- cilium
|
||||
- aws-cni
|
||||
- metallb
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
@ -19,13 +18,9 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: cilium
|
||||
version: 1.17.3
|
||||
version: 1.17.4
|
||||
repository: https://helm.cilium.io/
|
||||
condition: cilium.enabled
|
||||
- name: metallb
|
||||
version: 0.14.9
|
||||
repository: https://metallb.github.io/metallb
|
||||
condition: metallb.enabled
|
||||
- name: haproxy
|
||||
version: 1.24.0
|
||||
repository: https://haproxytech.github.io/helm-charts
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-network
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
KubeZero umbrella chart for all things network
|
||||
|
||||
@ -20,20 +20,22 @@ Kubernetes: `>= 1.30.0-0`
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://haproxytech.github.io/helm-charts | haproxy | 1.24.0 |
|
||||
| https://helm.cilium.io/ | cilium | 1.17.3 |
|
||||
| https://metallb.github.io/metallb | metallb | 0.14.9 |
|
||||
| https://helm.cilium.io/ | cilium | 1.17.4 |
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| cilium.bpf.preallocateMaps | bool | `true` | |
|
||||
| cilium.cgroup.autoMount.enabled | bool | `false` | |
|
||||
| cilium.cgroup.hostRoot | string | `"/sys/fs/cgroup"` | |
|
||||
| cilium.cluster.id | int | `240` | |
|
||||
| cilium.cluster.name | string | `"default"` | |
|
||||
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
|
||||
| cilium.cni.exclusive | bool | `false` | |
|
||||
| cilium.cni.exclusive | bool | `true` | |
|
||||
| cilium.cni.iptablesRemoveAWSRules | bool | `false` | |
|
||||
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
|
||||
| cilium.dnsProxy.enableTransparentMode | bool | `true` | |
|
||||
| cilium.enabled | bool | `false` | |
|
||||
| cilium.envoy.enabled | bool | `false` | |
|
||||
| cilium.hubble.enabled | bool | `false` | |
|
||||
@ -45,6 +47,9 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| cilium.hubble.ui.enabled | bool | `false` | |
|
||||
| cilium.image.useDigest | bool | `false` | |
|
||||
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
|
||||
| cilium.k8sServiceHost | string | `""` | |
|
||||
| cilium.k8sServicePort | int | `6443` | |
|
||||
| cilium.kubeProxyReplacement | bool | `true` | |
|
||||
| cilium.l7Proxy | bool | `false` | |
|
||||
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cilium.operator.prometheus.enabled | bool | `false` | |
|
||||
@ -54,12 +59,13 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cilium.operator.tolerations[1].key | string | `"node.cilium.io/agent-not-ready"` | |
|
||||
| cilium.operator.tolerations[2].effect | string | `"NoSchedule"` | |
|
||||
| cilium.operator.tolerations[2].key | string | `"node.kubernetes.io/not-ready"` | |
|
||||
| cilium.prometheus.enabled | bool | `false` | |
|
||||
| cilium.prometheus.port | int | `9091` | |
|
||||
| cilium.prometheus.serviceMonitor.enabled | bool | `false` | |
|
||||
| cilium.resources.limits.memory | string | `"1Gi"` | |
|
||||
| cilium.resources.requests.cpu | string | `"10m"` | |
|
||||
| cilium.resources.requests.memory | string | `"160Mi"` | |
|
||||
| cilium.resources.requests.cpu | string | `"50m"` | |
|
||||
| cilium.resources.requests.memory | string | `"256Mi"` | |
|
||||
| cilium.routingMode | string | `"tunnel"` | |
|
||||
| cilium.sysctlfix.enabled | bool | `false` | |
|
||||
| cilium.tunnelProtocol | string | `"geneve"` | |
|
||||
@ -107,11 +113,6 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| haproxy.serviceMonitor.endpoints[0].path | string | `"/metrics"` | |
|
||||
| haproxy.serviceMonitor.endpoints[0].port | string | `"prometheus"` | |
|
||||
| haproxy.serviceMonitor.endpoints[0].scheme | string | `"http"` | |
|
||||
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| metallb.enabled | bool | `false` | |
|
||||
| metallb.ipAddressPools | list | `[]` | |
|
||||
| multus.clusterNetwork | string | `"cilium"` | |
|
||||
| multus.defaultNetworks | list | `[]` | |
|
||||
| multus.enabled | bool | `false` | |
|
||||
|
@ -1,27 +0,0 @@
|
||||
{{- if .Values.metallb.enabled }}
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: l2advertisement1
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ipAddressPools:
|
||||
{{- range $key, $val := .Values.metallb.ipAddressPools }}
|
||||
{{- if eq $val.protocol "layer2" }}
|
||||
- {{ $val.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
---
|
||||
|
||||
{{- range $key, $val := .Values.metallb.ipAddressPools }}
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: {{ $val.name }}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
addresses:
|
||||
{{- $val.addresses | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
@ -1,19 +1,3 @@
|
||||
metallb:
|
||||
enabled: false
|
||||
|
||||
controller:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
ipAddressPools: []
|
||||
#- name: my-ip-space
|
||||
# protocol: layer2
|
||||
# addresses:
|
||||
# - 192.168.42.0/24
|
||||
|
||||
multus:
|
||||
enabled: false
|
||||
image:
|
||||
@ -33,17 +17,18 @@ cilium:
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 160Mi
|
||||
limits:
|
||||
memory: 1Gi
|
||||
# cpu: 4000m
|
||||
cpu: 50m
|
||||
memory: 256Mi
|
||||
# limits:
|
||||
# memory: 1Gi
|
||||
# cpu: 4000m
|
||||
|
||||
cni:
|
||||
binPath: "/usr/libexec/cni"
|
||||
logFile: /var/log/cilium-cni.log
|
||||
#-- Ensure this is false if multus is enabled
|
||||
exclusive: false
|
||||
exclusive: true
|
||||
iptablesRemoveAWSRules: false
|
||||
|
||||
cluster:
|
||||
# This should match the second octet of clusterPoolIPv4PodCIDRList
|
||||
@ -63,13 +48,32 @@ cilium:
|
||||
enabled: false
|
||||
#rollOutCiliumPods: true
|
||||
|
||||
kubeProxyReplacement: true
|
||||
dnsProxy:
|
||||
enableTransparentMode: true
|
||||
|
||||
# For LB support via L2announcement or BGP - on-prem only
|
||||
# l2announcements:
|
||||
# enabled: true
|
||||
# Not needed normally
|
||||
# externalIPs:
|
||||
# enabled: true
|
||||
|
||||
k8sServiceHost: ""
|
||||
k8sServicePort: 6443
|
||||
# k8s:
|
||||
# # This has to be set to the DNS name of all API servers
|
||||
# # For example "https://192.168.0.1:6443 https://192.168.0.2:6443"
|
||||
# apiServerURLs: ""
|
||||
|
||||
cgroup:
|
||||
autoMount:
|
||||
enabled: false
|
||||
hostRoot: "/sys/fs/cgroup"
|
||||
|
||||
bpf:
|
||||
preallocateMaps: true
|
||||
# we need biDirectional so use helm init-container
|
||||
#bpf:
|
||||
# autoMount:
|
||||
# enabled: false
|
||||
|
||||
@ -91,9 +95,11 @@ cilium:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
# the operator removes the taints,
|
||||
# so we need to break chicken egg on single controller
|
||||
# so we need to break chicken egg
|
||||
- key: node.cilium.io/agent-not-ready
|
||||
effect: NoSchedule
|
||||
- key: node.kubernetes.io/not-ready
|
||||
effect: NoSchedule
|
||||
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-telemetry
|
||||
description: KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
|
||||
type: application
|
||||
version: 0.4.1
|
||||
version: 0.5.0
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -19,23 +19,31 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: opentelemetry-collector
|
||||
version: 0.108.0
|
||||
version: 0.125.0
|
||||
repository: https://open-telemetry.github.io/opentelemetry-helm-charts
|
||||
condition: opentelemetry-collector.enabled
|
||||
- name: opensearch
|
||||
version: 3.0.0
|
||||
repository: https://opensearch-project.github.io/helm-charts/
|
||||
condition: opensearch.enabled
|
||||
- name: opensearch-dashboards
|
||||
version: 3.0.0
|
||||
repository: https://opensearch-project.github.io/helm-charts/
|
||||
condition: opensearch-dashboards.enabled
|
||||
- name: data-prepper
|
||||
version: 0.1.0
|
||||
version: 0.3.1
|
||||
repository: https://opensearch-project.github.io/helm-charts/
|
||||
condition: data-prepper.enabled
|
||||
- name: jaeger
|
||||
version: 3.3.1
|
||||
version: 3.4.1
|
||||
repository: https://jaegertracing.github.io/helm-charts
|
||||
condition: jaeger.enabled
|
||||
- name: fluentd
|
||||
version: 0.5.2
|
||||
version: 0.5.3
|
||||
repository: https://fluent.github.io/helm-charts
|
||||
condition: fluentd.enabled
|
||||
- name: fluent-bit
|
||||
version: 0.47.10
|
||||
version: 0.49.0
|
||||
repository: https://fluent.github.io/helm-charts
|
||||
condition: fluent-bit.enabled
|
||||
kubeVersion: ">= 1.26.0"
|
||||
kubeVersion: ">= 1.30.0-0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-telemetry
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
|
||||
|
||||
@ -14,16 +14,18 @@ KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.26.0`
|
||||
Kubernetes: `>= 1.30.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
|
||||
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
|
||||
| https://jaegertracing.github.io/helm-charts | jaeger | 3.3.1 |
|
||||
| https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.108.0 |
|
||||
| https://opensearch-project.github.io/helm-charts/ | data-prepper | 0.1.0 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://fluent.github.io/helm-charts | fluent-bit | 0.49.0 |
|
||||
| https://fluent.github.io/helm-charts | fluentd | 0.5.3 |
|
||||
| https://jaegertracing.github.io/helm-charts | jaeger | 3.4.1 |
|
||||
| https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.125.0 |
|
||||
| https://opensearch-project.github.io/helm-charts/ | data-prepper | 0.3.1 |
|
||||
| https://opensearch-project.github.io/helm-charts/ | opensearch | 3.0.0 |
|
||||
| https://opensearch-project.github.io/helm-charts/ | opensearch-dashboards | 3.0.0 |
|
||||
|
||||
## Values
|
||||
|
||||
@ -135,7 +137,7 @@ Kubernetes: `>= 1.26.0`
|
||||
| fluentd.service.ports[1].containerPort | int | `9880` | |
|
||||
| fluentd.service.ports[1].name | string | `"http-fluentd"` | |
|
||||
| fluentd.service.ports[1].protocol | string | `"TCP"` | |
|
||||
| fluentd.source.sharedKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey"` | |
|
||||
| fluentd.source.sharedKey | string | `"secretref+k8s://v1/Secret/kubezero/kubezero-secrets/telemetry.fluentd.source.sharedKey?inCluster"` | |
|
||||
| fluentd.volumeMounts[0].mountPath | string | `"/run/pki"` | |
|
||||
| fluentd.volumeMounts[0].name | string | `"trust-store"` | |
|
||||
| fluentd.volumeMounts[0].readOnly | bool | `true` | |
|
||||
@ -167,13 +169,27 @@ Kubernetes: `>= 1.26.0`
|
||||
| jaeger.storage.elasticsearch.user | string | `"admin"` | |
|
||||
| jaeger.storage.type | string | `"elasticsearch"` | |
|
||||
| metrics.enabled | bool | `false` | |
|
||||
| opensearch.dashboard.enabled | bool | `false` | |
|
||||
| opensearch.dashboard.istio.enabled | bool | `false` | |
|
||||
| opensearch.dashboard.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| opensearch.dashboard.istio.url | string | `"telemetry-dashboard.example.com"` | |
|
||||
| opensearch.nodeSets | list | `[]` | |
|
||||
| opensearch.prometheus | bool | `false` | |
|
||||
| opensearch.version | string | `"2.17.0"` | |
|
||||
| opensearch-dashboards.enabled | bool | `false` | |
|
||||
| opensearch-dashboards.istio.enabled | bool | `false` | |
|
||||
| opensearch-dashboards.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| opensearch-dashboards.istio.url | string | `"telemetry-dashboard.example.com"` | |
|
||||
| opensearch-dashboards.resources.limits.cpu | string | `nil` | |
|
||||
| opensearch-dashboards.resources.limits.memory | string | `"512M"` | |
|
||||
| opensearch-dashboards.resources.requests.cpu | string | `"100m"` | |
|
||||
| opensearch-dashboards.resources.requests.memory | string | `"512M"` | |
|
||||
| opensearch-dashboards.serviceMonitor.enabled | bool | `false` | |
|
||||
| opensearch-dashboards.serviceMonitor.interval | string | `"30s"` | |
|
||||
| opensearch.config."opensearch.yml" | string | `"cluster.name: opensearch-cluster\nnetwork.host: 0.0.0.0\ndiscovery.type: single-node\n"` | |
|
||||
| opensearch.enabled | bool | `false` | |
|
||||
| opensearch.maxUnavailable | int | `0` | |
|
||||
| opensearch.opensearchJavaOpts | string | `"-Xmx1024M -Xms1024M"` | |
|
||||
| opensearch.persistence.size | string | `"8Gi"` | |
|
||||
| opensearch.resources.limits.memory | string | `"2Gi"` | |
|
||||
| opensearch.resources.requests.cpu | string | `"500m"` | |
|
||||
| opensearch.resources.requests.memory | string | `"2Gi"` | |
|
||||
| opensearch.serviceMonitor.enabled | bool | `false` | |
|
||||
| opensearch.serviceMonitor.interval | string | `"30s"` | |
|
||||
| opensearch.singleNode | bool | `true` | |
|
||||
| opentelemetry-collector.config.exporters.otlp/data-prepper.endpoint | string | `"telemetry-data-prepper:21890"` | |
|
||||
| opentelemetry-collector.config.exporters.otlp/data-prepper.tls.insecure | bool | `true` | |
|
||||
| opentelemetry-collector.config.exporters.otlp/jaeger.endpoint | string | `"telemetry-jaeger-collector:4317"` | |
|
||||
|
@ -1,5 +1,5 @@
|
||||
apiVersion: v2
|
||||
appVersion: v1.16.2
|
||||
appVersion: v1.17.1
|
||||
description: A Helm chart for Kubernetes
|
||||
home: https://www.fluentd.org/
|
||||
icon: https://www.fluentd.org/images/miscellany/fluentd-logo_2x.png
|
||||
@ -12,4 +12,4 @@ name: fluentd
|
||||
sources:
|
||||
- https://github.com/fluent/fluentd/
|
||||
- https://github.com/fluent/fluentd-kubernetes-daemonset
|
||||
version: 0.5.2
|
||||
version: 0.5.3
|
||||
|
@ -1,5 +1,5 @@
|
||||
{{- define "fluentd.pod" -}}
|
||||
{{- $defaultTag := printf "%s-debian-%s-1.0" (.Chart.AppVersion) (.Values.variant) -}}
|
||||
{{- $defaultTag := printf "%s-debian-%s-1.2" (.Chart.AppVersion) (.Values.variant) -}}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
|
@ -21,7 +21,7 @@
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
|
||||
examples/
|
||||
# Ignore unittest
|
||||
tests/
|
||||
*/__snapshot__/*
|
||||
|
@ -1,5 +1,5 @@
|
||||
apiVersion: v2
|
||||
appVersion: 0.111.0
|
||||
appVersion: 0.126.0
|
||||
description: OpenTelemetry Collector Helm chart for Kubernetes
|
||||
home: https://opentelemetry.io/
|
||||
icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png
|
||||
@ -12,4 +12,4 @@ sources:
|
||||
- https://github.com/open-telemetry/opentelemetry-collector
|
||||
- https://github.com/open-telemetry/opentelemetry-collector-contrib
|
||||
type: application
|
||||
version: 0.108.0
|
||||
version: 0.125.0
|
||||
|
@ -19,7 +19,7 @@ helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm
|
||||
To install the chart with the release name my-opentelemetry-collector, run the following command:
|
||||
|
||||
```console
|
||||
helm install my-opentelemetry-collector open-telemetry/opentelemetry-collector --set mode=<value> --set image.repository="otel/opentelemetry-collector-k8s" --set command.name="otelcol-k8s"
|
||||
helm install my-opentelemetry-collector open-telemetry/opentelemetry-collector --set mode=<value> --set image.repository="ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s" --set command.name="otelcol-k8s"
|
||||
```
|
||||
|
||||
Where the `mode` value needs to be set to one of `daemonset`, `deployment` or `statefulset`.
|
||||
@ -35,7 +35,7 @@ See [UPGRADING.md](UPGRADING.md).
|
||||
OpenTelemetry Collector recommends to bind receivers' servers to addresses that limit connections to authorized users.
|
||||
For this reason, by default the chart binds all the Collector's endpoints to the pod's IP.
|
||||
|
||||
More info is available in the [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks)
|
||||
More info is available in the [Security Best Practices documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks)
|
||||
|
||||
Some care must be taken when using `hostNetwork: true`, as then OpenTelemetry Collector will listen on all the addresses in the host network namespace.
|
||||
|
||||
@ -106,6 +106,29 @@ to read the files where Kubernetes container runtime writes all containers' cons
|
||||
|
||||
#### :warning: Warning: Risk of looping the exported logs back into the receiver, causing "log explosion"
|
||||
|
||||
#### Log collection for a subset of pods or containers
|
||||
|
||||
The `logsCollection` preset will by default ingest the logs of all kubernetes containers.
|
||||
This is achieved by using an include path of `/var/log/pods/*/*/*.log` for the `filelog`receiver.
|
||||
|
||||
To limit the import to a certain subset of pods or containers, the `filelog`
|
||||
receivers `include` list can be overwritten by supplying explicit configuration.
|
||||
|
||||
E.g. The following configuration would only import logs for pods within the namespace: `example-namespace`:
|
||||
|
||||
```yaml
|
||||
mode: daemonset
|
||||
|
||||
presets:
|
||||
logsCollection:
|
||||
enabled: true
|
||||
config:
|
||||
receivers:
|
||||
filelog:
|
||||
include:
|
||||
- /var/log/pods/example-namespace_*/*/*.log
|
||||
```
|
||||
|
||||
The container logs pipeline uses the `debug` exporter by default.
|
||||
Paired with the default `filelog` receiver that receives all containers' console output,
|
||||
it is easy to accidentally feed the exported logs back into the receiver.
|
||||
@ -149,6 +172,10 @@ This feature is disabled by default. It has the following requirements:
|
||||
|
||||
- It requires the [Kubernetes Attributes processor](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image.
|
||||
|
||||
#### :memo: Note: Changing or supplementing `k8sattributes` scopes
|
||||
|
||||
In order to minimize the collector's privileges, the [Kubernetes RBAC Rules](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) that are applied to the collector as part of this chart are the minimum required for the `presets.kubernetesAttributes` preset to work. If additional configuration scopes are desired outside of the preset you must apply the corresponding RBAC rules to grant the collector access.
|
||||
|
||||
To enable this feature, set the `presets.kubernetesAttributes.enabled` property to `true`.
|
||||
Here is an example `values.yaml`:
|
||||
|
||||
|
@ -4,6 +4,14 @@ These upgrade guidelines only contain instructions for version upgrades which re
|
||||
If the version you want to upgrade to is not listed here, then there is nothing to do for you.
|
||||
Just upgrade and enjoy.
|
||||
|
||||
## 0.121.0 to 0.122.0
|
||||
|
||||
In the v0.123.1 Collector release we stopped pushing images to Dockerhub due to how their new rate limit changes affected our CI. If you're using `otel/opentelemetry-collector-k8s` for the image you should switch to `ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s`. See https://github.com/open-telemetry/community/issues/2641 for more details.
|
||||
|
||||
## 0.110.0 to 0.110.1 or 0.110.2
|
||||
|
||||
We broke the selector labels in `0.110.0`, which causes `helm upgrades` to fail. Do not attempt to upgrade from `0.110.0` to either `0.110.1` or `0.110.2`. Go straight to `0.110.3` instead.
|
||||
|
||||
## 0.97.2 to 0.98.0
|
||||
|
||||
> [!WARNING]
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -4,7 +4,7 @@ global:
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
@ -19,6 +19,10 @@ resources:
|
||||
podLabels:
|
||||
testLabel: "{{ .Values.global.test }}"
|
||||
|
||||
additionalLabels:
|
||||
testLabel: "{{ .Values.global.test }}"
|
||||
someLabel: "someValue"
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: statefulset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -0,0 +1,21 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200M
|
||||
|
||||
internalTelemetryViaOTLP:
|
||||
endpoint: "http://localhost:4318"
|
||||
headers:
|
||||
- name: "x-opentelemetry-customer"
|
||||
value: "a value"
|
||||
traces:
|
||||
enabled: true
|
||||
metrics:
|
||||
enabled: true
|
||||
logs:
|
||||
enabled: true
|
@ -1,7 +1,7 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -0,0 +1,15 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200M
|
||||
|
||||
config:
|
||||
service:
|
||||
telemetry:
|
||||
metrics:
|
||||
address: ${env:MY_POD_IP}:8888
|
@ -0,0 +1,17 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200M
|
||||
|
||||
config:
|
||||
service:
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
resource:
|
||||
"k8s.namespace.name": "default"
|
@ -1,7 +1,7 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,7 +1,7 @@
|
||||
mode: statefulset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
repository: "ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
@ -1,17 +0,0 @@
|
||||
# Examples of chart configuration
|
||||
|
||||
Here is a collection of common configurations for the OpenTelemetry collector. Each folder contains an example `values.yaml` and the resulting configurations that are generated by the opentelemetry-collector helm charts.
|
||||
|
||||
- [Daemonset only](daemonset-only)
|
||||
- [Deployment only](deployment-only)
|
||||
- [Daemonset and deployment](daemonset-and-deployment)
|
||||
- [Log collection, including collector logs](daemonset-collector-logs)
|
||||
- [Add component (hostmetrics)](daemonset-hostmetrics)
|
||||
|
||||
The manifests are rendered using the `helm template` command and the specific example folder's values.yaml.
|
||||
|
||||
Examples are generated by (from root of the repo):
|
||||
|
||||
```sh
|
||||
make generate-examples CHARTS=opentelemetry-collector
|
||||
```
|
@ -1,41 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "namespaces"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["replicasets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["replicasets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events", "namespaces", "namespaces/status", "nodes", "nodes/spec", "pods", "pods/status", "replicationcontrollers", "replicationcontrollers/status", "resourcequotas", "services" ]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["daemonsets", "deployments", "replicasets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs", "cronjobs"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["autoscaling"]
|
||||
resources: ["horizontalpodautoscalers"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["events.k8s.io"]
|
||||
resources: ["events"]
|
||||
verbs: ["watch", "list"]
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: example-opentelemetry-collector
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
@ -1,68 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
data:
|
||||
relay: |
|
||||
exporters:
|
||||
debug: {}
|
||||
processors:
|
||||
k8sattributes:
|
||||
extract:
|
||||
metadata:
|
||||
- k8s.namespace.name
|
||||
- k8s.deployment.name
|
||||
- k8s.statefulset.name
|
||||
- k8s.daemonset.name
|
||||
- k8s.cronjob.name
|
||||
- k8s.job.name
|
||||
- k8s.node.name
|
||||
- k8s.pod.name
|
||||
- k8s.pod.uid
|
||||
- k8s.pod.start_time
|
||||
passthrough: false
|
||||
pod_association:
|
||||
- sources:
|
||||
- from: resource_attribute
|
||||
name: k8s.pod.ip
|
||||
- sources:
|
||||
- from: resource_attribute
|
||||
name: k8s.pod.uid
|
||||
- sources:
|
||||
- from: connection
|
||||
receivers:
|
||||
k8s_cluster:
|
||||
collection_interval: 10s
|
||||
k8sobjects:
|
||||
objects:
|
||||
- exclude_watch_type:
|
||||
- DELETED
|
||||
group: events.k8s.io
|
||||
mode: watch
|
||||
name: events
|
||||
service:
|
||||
pipelines:
|
||||
logs:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- k8sattributes
|
||||
receivers:
|
||||
- k8sobjects
|
||||
metrics:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- k8sattributes
|
||||
receivers:
|
||||
- k8s_cluster
|
@ -1,100 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: standalone-collector
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: 360fc84164ca26f5a57ecb44cbcec02ca473b09fc86dba876f71c9fa3617f656
|
||||
|
||||
labels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: standalone-collector
|
||||
|
||||
spec:
|
||||
|
||||
serviceAccountName: example-opentelemetry-collector
|
||||
securityContext:
|
||||
{}
|
||||
containers:
|
||||
- name: opentelemetry-collector
|
||||
command:
|
||||
- /otelcol-k8s
|
||||
args:
|
||||
- --config=/conf/relay.yaml
|
||||
securityContext:
|
||||
{}
|
||||
image: "otel/opentelemetry-collector-k8s:0.111.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
|
||||
- name: jaeger-compact
|
||||
containerPort: 6831
|
||||
protocol: UDP
|
||||
- name: jaeger-grpc
|
||||
containerPort: 14250
|
||||
protocol: TCP
|
||||
- name: jaeger-thrift
|
||||
containerPort: 14268
|
||||
protocol: TCP
|
||||
- name: otlp
|
||||
containerPort: 4317
|
||||
protocol: TCP
|
||||
- name: otlp-http
|
||||
containerPort: 4318
|
||||
protocol: TCP
|
||||
- name: zipkin
|
||||
containerPort: 9411
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
- name: GOMEMLIMIT
|
||||
value: "3276MiB"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
volumeMounts:
|
||||
- mountPath: /conf
|
||||
name: opentelemetry-collector-configmap
|
||||
volumes:
|
||||
- name: opentelemetry-collector-configmap
|
||||
configMap:
|
||||
name: example-opentelemetry-collector
|
||||
items:
|
||||
- key: relay
|
||||
path: relay.yaml
|
||||
hostNetwork: false
|
@ -1,49 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
component: standalone-collector
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
|
||||
- name: jaeger-compact
|
||||
port: 6831
|
||||
targetPort: 6831
|
||||
protocol: UDP
|
||||
- name: jaeger-grpc
|
||||
port: 14250
|
||||
targetPort: 14250
|
||||
protocol: TCP
|
||||
- name: jaeger-thrift
|
||||
port: 14268
|
||||
targetPort: 14268
|
||||
protocol: TCP
|
||||
- name: otlp
|
||||
port: 4317
|
||||
targetPort: 4317
|
||||
protocol: TCP
|
||||
appProtocol: grpc
|
||||
- name: otlp-http
|
||||
port: 4318
|
||||
targetPort: 4318
|
||||
protocol: TCP
|
||||
- name: zipkin
|
||||
port: 9411
|
||||
targetPort: 9411
|
||||
protocol: TCP
|
||||
selector:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: standalone-collector
|
||||
internalTrafficPolicy: Cluster
|
@ -1,13 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
@ -1,34 +0,0 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
|
||||
presets:
|
||||
clusterMetrics:
|
||||
enabled: true
|
||||
kubernetesAttributes:
|
||||
enabled: true
|
||||
kubernetesEvents:
|
||||
enabled: true
|
||||
|
||||
alternateConfig:
|
||||
exporters:
|
||||
debug: {}
|
||||
service:
|
||||
pipelines:
|
||||
logs:
|
||||
exporters:
|
||||
- debug
|
||||
metrics:
|
||||
exporters:
|
||||
- debug
|
@ -1,34 +0,0 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
||||
config:
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: example-opentelemetry-collector:4317
|
||||
tls:
|
||||
insecure: true
|
||||
service:
|
||||
pipelines:
|
||||
logs:
|
||||
exporters:
|
||||
- otlp
|
||||
- debug
|
||||
metrics:
|
||||
exporters:
|
||||
- otlp
|
||||
- debug
|
||||
traces:
|
||||
exporters:
|
||||
- otlp
|
||||
- debug
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200M
|
||||
|
@ -1,13 +0,0 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200M
|
||||
|
@ -1,93 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/configmap-agent.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: example-opentelemetry-collector-agent
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
data:
|
||||
relay: |
|
||||
exporters:
|
||||
debug: {}
|
||||
otlp:
|
||||
endpoint: example-opentelemetry-collector:4317
|
||||
tls:
|
||||
insecure: true
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: ${env:MY_POD_IP}:13133
|
||||
processors:
|
||||
batch: {}
|
||||
memory_limiter:
|
||||
check_interval: 5s
|
||||
limit_percentage: 80
|
||||
spike_limit_percentage: 25
|
||||
receivers:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:14250
|
||||
thrift_compact:
|
||||
endpoint: ${env:MY_POD_IP}:6831
|
||||
thrift_http:
|
||||
endpoint: ${env:MY_POD_IP}:14268
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:4317
|
||||
http:
|
||||
endpoint: ${env:MY_POD_IP}:4318
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: opentelemetry-collector
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets:
|
||||
- ${env:MY_POD_IP}:8888
|
||||
zipkin:
|
||||
endpoint: ${env:MY_POD_IP}:9411
|
||||
service:
|
||||
extensions:
|
||||
- health_check
|
||||
pipelines:
|
||||
logs:
|
||||
exporters:
|
||||
- otlp
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
metrics:
|
||||
exporters:
|
||||
- otlp
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- prometheus
|
||||
traces:
|
||||
exporters:
|
||||
- otlp
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- jaeger
|
||||
- zipkin
|
||||
telemetry:
|
||||
metrics:
|
||||
address: ${env:MY_POD_IP}:8888
|
@ -1,86 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
data:
|
||||
relay: |
|
||||
exporters:
|
||||
debug: {}
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: ${env:MY_POD_IP}:13133
|
||||
processors:
|
||||
batch: {}
|
||||
memory_limiter:
|
||||
check_interval: 5s
|
||||
limit_percentage: 80
|
||||
spike_limit_percentage: 25
|
||||
receivers:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:14250
|
||||
thrift_compact:
|
||||
endpoint: ${env:MY_POD_IP}:6831
|
||||
thrift_http:
|
||||
endpoint: ${env:MY_POD_IP}:14268
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:4317
|
||||
http:
|
||||
endpoint: ${env:MY_POD_IP}:4318
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: opentelemetry-collector
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets:
|
||||
- ${env:MY_POD_IP}:8888
|
||||
zipkin:
|
||||
endpoint: ${env:MY_POD_IP}:9411
|
||||
service:
|
||||
extensions:
|
||||
- health_check
|
||||
pipelines:
|
||||
logs:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
metrics:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- prometheus
|
||||
traces:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- jaeger
|
||||
- zipkin
|
||||
telemetry:
|
||||
metrics:
|
||||
address: ${env:MY_POD_IP}:8888
|
@ -1,104 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: example-opentelemetry-collector-agent
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: agent-collector
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: a2d0d31bd929305e52879f78f502d56ad49d9ef9396838490646e9034d2243de
|
||||
|
||||
labels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: agent-collector
|
||||
|
||||
spec:
|
||||
|
||||
serviceAccountName: example-opentelemetry-collector
|
||||
securityContext:
|
||||
{}
|
||||
containers:
|
||||
- name: opentelemetry-collector
|
||||
command:
|
||||
- /otelcol-k8s
|
||||
args:
|
||||
- --config=/conf/relay.yaml
|
||||
securityContext:
|
||||
{}
|
||||
image: "otel/opentelemetry-collector-k8s:0.111.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
|
||||
- name: jaeger-compact
|
||||
containerPort: 6831
|
||||
protocol: UDP
|
||||
hostPort: 6831
|
||||
- name: jaeger-grpc
|
||||
containerPort: 14250
|
||||
protocol: TCP
|
||||
hostPort: 14250
|
||||
- name: jaeger-thrift
|
||||
containerPort: 14268
|
||||
protocol: TCP
|
||||
hostPort: 14268
|
||||
- name: otlp
|
||||
containerPort: 4317
|
||||
protocol: TCP
|
||||
hostPort: 4317
|
||||
- name: otlp-http
|
||||
containerPort: 4318
|
||||
protocol: TCP
|
||||
hostPort: 4318
|
||||
- name: zipkin
|
||||
containerPort: 9411
|
||||
protocol: TCP
|
||||
hostPort: 9411
|
||||
env:
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
- name: GOMEMLIMIT
|
||||
value: "152MiB"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200M
|
||||
volumeMounts:
|
||||
- mountPath: /conf
|
||||
name: opentelemetry-collector-configmap
|
||||
volumes:
|
||||
- name: opentelemetry-collector-configmap
|
||||
configMap:
|
||||
name: example-opentelemetry-collector-agent
|
||||
items:
|
||||
- key: relay
|
||||
path: relay.yaml
|
||||
hostNetwork: false
|
@ -1,100 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: standalone-collector
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: f67e5b63bd16a7b09fc73afd9b6d71dfbeeb2afc8471c0b0f005233ef45df91d
|
||||
|
||||
labels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: standalone-collector
|
||||
|
||||
spec:
|
||||
|
||||
serviceAccountName: example-opentelemetry-collector
|
||||
securityContext:
|
||||
{}
|
||||
containers:
|
||||
- name: opentelemetry-collector
|
||||
command:
|
||||
- /otelcol-k8s
|
||||
args:
|
||||
- --config=/conf/relay.yaml
|
||||
securityContext:
|
||||
{}
|
||||
image: "otel/opentelemetry-collector-k8s:0.111.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
|
||||
- name: jaeger-compact
|
||||
containerPort: 6831
|
||||
protocol: UDP
|
||||
- name: jaeger-grpc
|
||||
containerPort: 14250
|
||||
protocol: TCP
|
||||
- name: jaeger-thrift
|
||||
containerPort: 14268
|
||||
protocol: TCP
|
||||
- name: otlp
|
||||
containerPort: 4317
|
||||
protocol: TCP
|
||||
- name: otlp-http
|
||||
containerPort: 4318
|
||||
protocol: TCP
|
||||
- name: zipkin
|
||||
containerPort: 9411
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
- name: GOMEMLIMIT
|
||||
value: "152MiB"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200M
|
||||
volumeMounts:
|
||||
- mountPath: /conf
|
||||
name: opentelemetry-collector-configmap
|
||||
volumes:
|
||||
- name: opentelemetry-collector-configmap
|
||||
configMap:
|
||||
name: example-opentelemetry-collector
|
||||
items:
|
||||
- key: relay
|
||||
path: relay.yaml
|
||||
hostNetwork: false
|
@ -1,49 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
component: standalone-collector
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
|
||||
- name: jaeger-compact
|
||||
port: 6831
|
||||
targetPort: 6831
|
||||
protocol: UDP
|
||||
- name: jaeger-grpc
|
||||
port: 14250
|
||||
targetPort: 14250
|
||||
protocol: TCP
|
||||
- name: jaeger-thrift
|
||||
port: 14268
|
||||
targetPort: 14268
|
||||
protocol: TCP
|
||||
- name: otlp
|
||||
port: 4317
|
||||
targetPort: 4317
|
||||
protocol: TCP
|
||||
appProtocol: grpc
|
||||
- name: otlp-http
|
||||
port: 4318
|
||||
targetPort: 4318
|
||||
protocol: TCP
|
||||
- name: zipkin
|
||||
port: 9411
|
||||
targetPort: 9411
|
||||
protocol: TCP
|
||||
selector:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: standalone-collector
|
||||
internalTrafficPolicy: Cluster
|
@ -1,13 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
@ -1,100 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/configmap-agent.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: example-opentelemetry-collector-agent
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
data:
|
||||
relay: |
|
||||
exporters:
|
||||
debug: {}
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: ${env:MY_POD_IP}:13133
|
||||
processors:
|
||||
batch: {}
|
||||
memory_limiter:
|
||||
check_interval: 5s
|
||||
limit_percentage: 80
|
||||
spike_limit_percentage: 25
|
||||
receivers:
|
||||
filelog:
|
||||
exclude: []
|
||||
include:
|
||||
- /var/log/pods/*/*/*.log
|
||||
include_file_name: false
|
||||
include_file_path: true
|
||||
operators:
|
||||
- id: container-parser
|
||||
max_log_size: 102400
|
||||
type: container
|
||||
retry_on_failure:
|
||||
enabled: true
|
||||
start_at: end
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:14250
|
||||
thrift_compact:
|
||||
endpoint: ${env:MY_POD_IP}:6831
|
||||
thrift_http:
|
||||
endpoint: ${env:MY_POD_IP}:14268
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:4317
|
||||
http:
|
||||
endpoint: ${env:MY_POD_IP}:4318
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: opentelemetry-collector
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets:
|
||||
- ${env:MY_POD_IP}:8888
|
||||
zipkin:
|
||||
endpoint: ${env:MY_POD_IP}:9411
|
||||
service:
|
||||
extensions:
|
||||
- health_check
|
||||
pipelines:
|
||||
logs:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- filelog
|
||||
metrics:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- prometheus
|
||||
traces:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- jaeger
|
||||
- zipkin
|
||||
telemetry:
|
||||
metrics:
|
||||
address: ${env:MY_POD_IP}:8888
|
@ -1,110 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: example-opentelemetry-collector-agent
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: agent-collector
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: 5237e54a1cdaad762876da10a5bab6f686506211aaa2c70b901a74fec8b82140
|
||||
|
||||
labels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: agent-collector
|
||||
|
||||
spec:
|
||||
|
||||
serviceAccountName: example-opentelemetry-collector
|
||||
securityContext:
|
||||
{}
|
||||
containers:
|
||||
- name: opentelemetry-collector
|
||||
command:
|
||||
- /otelcol-k8s
|
||||
args:
|
||||
- --config=/conf/relay.yaml
|
||||
securityContext:
|
||||
{}
|
||||
image: "otel/opentelemetry-collector-k8s:0.111.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
|
||||
- name: jaeger-compact
|
||||
containerPort: 6831
|
||||
protocol: UDP
|
||||
hostPort: 6831
|
||||
- name: jaeger-grpc
|
||||
containerPort: 14250
|
||||
protocol: TCP
|
||||
hostPort: 14250
|
||||
- name: jaeger-thrift
|
||||
containerPort: 14268
|
||||
protocol: TCP
|
||||
hostPort: 14268
|
||||
- name: otlp
|
||||
containerPort: 4317
|
||||
protocol: TCP
|
||||
hostPort: 4317
|
||||
- name: otlp-http
|
||||
containerPort: 4318
|
||||
protocol: TCP
|
||||
hostPort: 4318
|
||||
- name: zipkin
|
||||
containerPort: 9411
|
||||
protocol: TCP
|
||||
hostPort: 9411
|
||||
env:
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
volumeMounts:
|
||||
- mountPath: /conf
|
||||
name: opentelemetry-collector-configmap
|
||||
- name: varlogpods
|
||||
mountPath: /var/log/pods
|
||||
readOnly: true
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: opentelemetry-collector-configmap
|
||||
configMap:
|
||||
name: example-opentelemetry-collector-agent
|
||||
items:
|
||||
- key: relay
|
||||
path: relay.yaml
|
||||
- name: varlogpods
|
||||
hostPath:
|
||||
path: /var/log/pods
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
hostNetwork: false
|
@ -1,13 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
@ -1,12 +0,0 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
||||
presets:
|
||||
logsCollection:
|
||||
enabled: true
|
||||
includeCollectorLogs: true
|
@ -1,133 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/configmap-agent.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: example-opentelemetry-collector-agent
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
data:
|
||||
relay: |
|
||||
exporters:
|
||||
debug: {}
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: ${env:MY_POD_IP}:13133
|
||||
processors:
|
||||
batch: {}
|
||||
memory_limiter:
|
||||
check_interval: 5s
|
||||
limit_percentage: 80
|
||||
spike_limit_percentage: 25
|
||||
receivers:
|
||||
hostmetrics:
|
||||
collection_interval: 10s
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu: null
|
||||
disk: null
|
||||
filesystem:
|
||||
exclude_fs_types:
|
||||
fs_types:
|
||||
- autofs
|
||||
- binfmt_misc
|
||||
- bpf
|
||||
- cgroup2
|
||||
- configfs
|
||||
- debugfs
|
||||
- devpts
|
||||
- devtmpfs
|
||||
- fusectl
|
||||
- hugetlbfs
|
||||
- iso9660
|
||||
- mqueue
|
||||
- nsfs
|
||||
- overlay
|
||||
- proc
|
||||
- procfs
|
||||
- pstore
|
||||
- rpc_pipefs
|
||||
- securityfs
|
||||
- selinuxfs
|
||||
- squashfs
|
||||
- sysfs
|
||||
- tracefs
|
||||
match_type: strict
|
||||
exclude_mount_points:
|
||||
match_type: regexp
|
||||
mount_points:
|
||||
- /dev/*
|
||||
- /proc/*
|
||||
- /sys/*
|
||||
- /run/k3s/containerd/*
|
||||
- /var/lib/docker/*
|
||||
- /var/lib/kubelet/*
|
||||
- /snap/*
|
||||
load: null
|
||||
memory: null
|
||||
network: null
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:14250
|
||||
thrift_compact:
|
||||
endpoint: ${env:MY_POD_IP}:6831
|
||||
thrift_http:
|
||||
endpoint: ${env:MY_POD_IP}:14268
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:4317
|
||||
http:
|
||||
endpoint: ${env:MY_POD_IP}:4318
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: opentelemetry-collector
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets:
|
||||
- ${env:MY_POD_IP}:8888
|
||||
zipkin:
|
||||
endpoint: ${env:MY_POD_IP}:9411
|
||||
service:
|
||||
extensions:
|
||||
- health_check
|
||||
pipelines:
|
||||
logs:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
metrics:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- prometheus
|
||||
- hostmetrics
|
||||
traces:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- jaeger
|
||||
- zipkin
|
||||
telemetry:
|
||||
metrics:
|
||||
address: ${env:MY_POD_IP}:8888
|
@ -1,105 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: example-opentelemetry-collector-agent
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: agent-collector
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: 98dea268c8a8fe987e082a4e85801387f2b60fefc281f9b1edd1080f0af62574
|
||||
|
||||
labels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: agent-collector
|
||||
|
||||
spec:
|
||||
|
||||
serviceAccountName: example-opentelemetry-collector
|
||||
securityContext:
|
||||
{}
|
||||
containers:
|
||||
- name: opentelemetry-collector
|
||||
command:
|
||||
- /otelcol-k8s
|
||||
args:
|
||||
- --config=/conf/relay.yaml
|
||||
securityContext:
|
||||
{}
|
||||
image: "otel/opentelemetry-collector-k8s:0.111.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
|
||||
- name: jaeger-compact
|
||||
containerPort: 6831
|
||||
protocol: UDP
|
||||
hostPort: 6831
|
||||
- name: jaeger-grpc
|
||||
containerPort: 14250
|
||||
protocol: TCP
|
||||
hostPort: 14250
|
||||
- name: jaeger-thrift
|
||||
containerPort: 14268
|
||||
protocol: TCP
|
||||
hostPort: 14268
|
||||
- name: otlp
|
||||
containerPort: 4317
|
||||
protocol: TCP
|
||||
hostPort: 4317
|
||||
- name: otlp-http
|
||||
containerPort: 4318
|
||||
protocol: TCP
|
||||
hostPort: 4318
|
||||
- name: zipkin
|
||||
containerPort: 9411
|
||||
protocol: TCP
|
||||
hostPort: 9411
|
||||
env:
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
volumeMounts:
|
||||
- mountPath: /conf
|
||||
name: opentelemetry-collector-configmap
|
||||
- name: hostfs
|
||||
mountPath: /hostfs
|
||||
readOnly: true
|
||||
mountPropagation: HostToContainer
|
||||
volumes:
|
||||
- name: opentelemetry-collector-configmap
|
||||
configMap:
|
||||
name: example-opentelemetry-collector-agent
|
||||
items:
|
||||
- key: relay
|
||||
path: relay.yaml
|
||||
- name: hostfs
|
||||
hostPath:
|
||||
path: /
|
||||
hostNetwork: false
|
@ -1,13 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
@ -1,12 +0,0 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
||||
presets:
|
||||
hostMetrics:
|
||||
enabled: true
|
||||
|
@ -1,86 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/configmap-agent.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: example-opentelemetry-collector-agent
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
data:
|
||||
relay: |
|
||||
exporters:
|
||||
debug: {}
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: ${env:MY_POD_IP}:13133
|
||||
processors:
|
||||
batch: {}
|
||||
memory_limiter:
|
||||
check_interval: 5s
|
||||
limit_percentage: 80
|
||||
spike_limit_percentage: 25
|
||||
receivers:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:14250
|
||||
thrift_compact:
|
||||
endpoint: ${env:MY_POD_IP}:6831
|
||||
thrift_http:
|
||||
endpoint: ${env:MY_POD_IP}:14268
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:4317
|
||||
http:
|
||||
endpoint: ${env:MY_POD_IP}:4318
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: opentelemetry-collector
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets:
|
||||
- ${env:MY_POD_IP}:8888
|
||||
zipkin:
|
||||
endpoint: ${env:MY_POD_IP}:9411
|
||||
service:
|
||||
extensions:
|
||||
- health_check
|
||||
pipelines:
|
||||
logs:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
metrics:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- prometheus
|
||||
traces:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- jaeger
|
||||
- zipkin
|
||||
telemetry:
|
||||
metrics:
|
||||
address: ${env:MY_POD_IP}:8888
|
@ -1,119 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: example-opentelemetry-collector-agent
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: agent-collector
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: c81f167b09355b9a9b3d7e1a4f994dfb1ef88379ded1a4b902cd4a2ace196a79
|
||||
|
||||
labels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: agent-collector
|
||||
|
||||
spec:
|
||||
|
||||
serviceAccountName: example-opentelemetry-collector
|
||||
securityContext:
|
||||
{}
|
||||
containers:
|
||||
- name: opentelemetry-collector
|
||||
command:
|
||||
- /otelcol-k8s
|
||||
args:
|
||||
- --config=/conf/relay.yaml
|
||||
securityContext:
|
||||
{}
|
||||
image: "otel/opentelemetry-collector-k8s:0.111.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
|
||||
- name: jaeger-compact
|
||||
containerPort: 6831
|
||||
protocol: UDP
|
||||
hostPort: 6831
|
||||
- name: jaeger-grpc
|
||||
containerPort: 14250
|
||||
protocol: TCP
|
||||
hostPort: 14250
|
||||
- name: jaeger-thrift
|
||||
containerPort: 14268
|
||||
protocol: TCP
|
||||
hostPort: 14268
|
||||
- name: otlp
|
||||
containerPort: 4317
|
||||
protocol: TCP
|
||||
hostPort: 4317
|
||||
- name: otlp-http
|
||||
containerPort: 4318
|
||||
protocol: TCP
|
||||
hostPort: 4318
|
||||
- name: zipkin
|
||||
containerPort: 9411
|
||||
protocol: TCP
|
||||
hostPort: 9411
|
||||
env:
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /test/sleep
|
||||
- "5"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
volumeMounts:
|
||||
- mountPath: /conf
|
||||
name: opentelemetry-collector-configmap
|
||||
- mountPath: /test
|
||||
name: test
|
||||
initContainers:
|
||||
- args:
|
||||
- /bin/sleep
|
||||
- /test/sleep
|
||||
command:
|
||||
- cp
|
||||
image: 'busybox:latest'
|
||||
name: test
|
||||
volumeMounts:
|
||||
- mountPath: /test
|
||||
name: test
|
||||
volumes:
|
||||
- name: opentelemetry-collector-configmap
|
||||
configMap:
|
||||
name: example-opentelemetry-collector-agent
|
||||
items:
|
||||
- key: relay
|
||||
path: relay.yaml
|
||||
- emptyDir: {}
|
||||
name: test
|
||||
hostNetwork: false
|
@ -1,13 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
@ -1,37 +0,0 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
||||
global:
|
||||
image: busybox:latest
|
||||
initContainers:
|
||||
- name: test
|
||||
command:
|
||||
- cp
|
||||
args:
|
||||
- /bin/sleep
|
||||
- /test/sleep
|
||||
image: "{{ .Values.global.image }}"
|
||||
volumeMounts:
|
||||
- name: test
|
||||
mountPath: /test
|
||||
|
||||
extraVolumes:
|
||||
- name: test
|
||||
emptyDir: {}
|
||||
|
||||
extraVolumeMounts:
|
||||
- name: test
|
||||
mountPath: /test
|
||||
|
||||
lifecycleHooks:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /test/sleep
|
||||
- "5"
|
||||
|
@ -1,86 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/configmap-agent.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: example-opentelemetry-collector-agent
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
data:
|
||||
relay: |
|
||||
exporters:
|
||||
debug: {}
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: ${env:MY_POD_IP}:13133
|
||||
processors:
|
||||
batch: {}
|
||||
memory_limiter:
|
||||
check_interval: 5s
|
||||
limit_percentage: 80
|
||||
spike_limit_percentage: 25
|
||||
receivers:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:14250
|
||||
thrift_compact:
|
||||
endpoint: ${env:MY_POD_IP}:6831
|
||||
thrift_http:
|
||||
endpoint: ${env:MY_POD_IP}:14268
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:4317
|
||||
http:
|
||||
endpoint: ${env:MY_POD_IP}:4318
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: opentelemetry-collector
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets:
|
||||
- ${env:MY_POD_IP}:8888
|
||||
zipkin:
|
||||
endpoint: ${env:MY_POD_IP}:9411
|
||||
service:
|
||||
extensions:
|
||||
- health_check
|
||||
pipelines:
|
||||
logs:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
metrics:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- prometheus
|
||||
traces:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- jaeger
|
||||
- zipkin
|
||||
telemetry:
|
||||
metrics:
|
||||
address: ${env:MY_POD_IP}:8888
|
@ -1,98 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: example-opentelemetry-collector-agent
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: agent-collector
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: c81f167b09355b9a9b3d7e1a4f994dfb1ef88379ded1a4b902cd4a2ace196a79
|
||||
|
||||
labels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: agent-collector
|
||||
|
||||
spec:
|
||||
|
||||
serviceAccountName: example-opentelemetry-collector
|
||||
securityContext:
|
||||
{}
|
||||
containers:
|
||||
- name: opentelemetry-collector
|
||||
command:
|
||||
- /otelcol-k8s
|
||||
args:
|
||||
- --config=/conf/relay.yaml
|
||||
securityContext:
|
||||
{}
|
||||
image: "otel/opentelemetry-collector-k8s:0.111.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
|
||||
- name: jaeger-compact
|
||||
containerPort: 6831
|
||||
protocol: UDP
|
||||
hostPort: 6831
|
||||
- name: jaeger-grpc
|
||||
containerPort: 14250
|
||||
protocol: TCP
|
||||
hostPort: 14250
|
||||
- name: jaeger-thrift
|
||||
containerPort: 14268
|
||||
protocol: TCP
|
||||
hostPort: 14268
|
||||
- name: otlp
|
||||
containerPort: 4317
|
||||
protocol: TCP
|
||||
hostPort: 4317
|
||||
- name: otlp-http
|
||||
containerPort: 4318
|
||||
protocol: TCP
|
||||
hostPort: 4318
|
||||
- name: zipkin
|
||||
containerPort: 9411
|
||||
protocol: TCP
|
||||
hostPort: 9411
|
||||
env:
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
volumeMounts:
|
||||
- mountPath: /conf
|
||||
name: opentelemetry-collector-configmap
|
||||
volumes:
|
||||
- name: opentelemetry-collector-configmap
|
||||
configMap:
|
||||
name: example-opentelemetry-collector-agent
|
||||
items:
|
||||
- key: relay
|
||||
path: relay.yaml
|
||||
hostNetwork: false
|
@ -1,13 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
@ -1,7 +0,0 @@
|
||||
mode: daemonset
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
@ -1,86 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
data:
|
||||
relay: |
|
||||
exporters:
|
||||
debug: {}
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: ${env:MY_POD_IP}:13133
|
||||
processors:
|
||||
batch: {}
|
||||
memory_limiter:
|
||||
check_interval: 5s
|
||||
limit_percentage: 80
|
||||
spike_limit_percentage: 25
|
||||
receivers:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:14250
|
||||
thrift_compact:
|
||||
endpoint: ${env:MY_POD_IP}:6831
|
||||
thrift_http:
|
||||
endpoint: ${env:MY_POD_IP}:14268
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:4317
|
||||
http:
|
||||
endpoint: ${env:MY_POD_IP}:4318
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: opentelemetry-collector
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets:
|
||||
- ${env:MY_POD_IP}:8888
|
||||
zipkin:
|
||||
endpoint: ${env:MY_POD_IP}:9411
|
||||
service:
|
||||
extensions:
|
||||
- health_check
|
||||
pipelines:
|
||||
logs:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
metrics:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- prometheus
|
||||
traces:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
- jaeger
|
||||
- zipkin
|
||||
telemetry:
|
||||
metrics:
|
||||
address: ${env:MY_POD_IP}:8888
|
@ -1,100 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
spec:
|
||||
replicas: 3
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: standalone-collector
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: f67e5b63bd16a7b09fc73afd9b6d71dfbeeb2afc8471c0b0f005233ef45df91d
|
||||
|
||||
labels:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: standalone-collector
|
||||
|
||||
spec:
|
||||
|
||||
serviceAccountName: example-opentelemetry-collector
|
||||
securityContext:
|
||||
{}
|
||||
containers:
|
||||
- name: opentelemetry-collector
|
||||
command:
|
||||
- /otelcol-k8s
|
||||
args:
|
||||
- --config=/conf/relay.yaml
|
||||
securityContext:
|
||||
{}
|
||||
image: "otel/opentelemetry-collector-k8s:0.111.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
|
||||
- name: jaeger-compact
|
||||
containerPort: 6831
|
||||
protocol: UDP
|
||||
- name: jaeger-grpc
|
||||
containerPort: 14250
|
||||
protocol: TCP
|
||||
- name: jaeger-thrift
|
||||
containerPort: 14268
|
||||
protocol: TCP
|
||||
- name: otlp
|
||||
containerPort: 4317
|
||||
protocol: TCP
|
||||
- name: otlp-http
|
||||
containerPort: 4318
|
||||
protocol: TCP
|
||||
- name: zipkin
|
||||
containerPort: 9411
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: MY_POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: status.podIP
|
||||
- name: GOMEMLIMIT
|
||||
value: "3276MiB"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
volumeMounts:
|
||||
- mountPath: /conf
|
||||
name: opentelemetry-collector-configmap
|
||||
volumes:
|
||||
- name: opentelemetry-collector-configmap
|
||||
configMap:
|
||||
name: example-opentelemetry-collector
|
||||
items:
|
||||
- key: relay
|
||||
path: relay.yaml
|
||||
hostNetwork: false
|
@ -1,49 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
component: standalone-collector
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
|
||||
- name: jaeger-compact
|
||||
port: 6831
|
||||
targetPort: 6831
|
||||
protocol: UDP
|
||||
- name: jaeger-grpc
|
||||
port: 14250
|
||||
targetPort: 14250
|
||||
protocol: TCP
|
||||
- name: jaeger-thrift
|
||||
port: 14268
|
||||
targetPort: 14268
|
||||
protocol: TCP
|
||||
- name: otlp
|
||||
port: 4317
|
||||
targetPort: 4317
|
||||
protocol: TCP
|
||||
appProtocol: grpc
|
||||
- name: otlp-http
|
||||
port: 4318
|
||||
targetPort: 4318
|
||||
protocol: TCP
|
||||
- name: zipkin
|
||||
port: 9411
|
||||
targetPort: 9411
|
||||
protocol: TCP
|
||||
selector:
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
component: standalone-collector
|
||||
internalTrafficPolicy: Cluster
|
@ -1,13 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
@ -1,14 +0,0 @@
|
||||
mode: deployment
|
||||
|
||||
image:
|
||||
repository: "otel/opentelemetry-collector-k8s"
|
||||
|
||||
command:
|
||||
name: "otelcol-k8s"
|
||||
|
||||
replicaCount: 3
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
@ -1,49 +0,0 @@
|
||||
---
|
||||
# Source: opentelemetry-collector/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: example-opentelemetry-collector
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: opentelemetry-collector-0.108.0
|
||||
app.kubernetes.io/name: opentelemetry-collector
|
||||
app.kubernetes.io/instance: example
|
||||
app.kubernetes.io/version: "0.111.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
|
||||
data:
|
||||
relay: |
|
||||
exporters:
|
||||
debug: {}
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: ${env:MY_POD_IP}:13133
|
||||
processors:
|
||||
batch: {}
|
||||
memory_limiter:
|
||||
check_interval: 5s
|
||||
limit_percentage: 80
|
||||
spike_limit_percentage: 25
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:MY_POD_IP}:4317
|
||||
http:
|
||||
endpoint: ${env:MY_POD_IP}:4318
|
||||
service:
|
||||
extensions:
|
||||
- health_check
|
||||
pipelines:
|
||||
traces:
|
||||
exporters:
|
||||
- debug
|
||||
processors:
|
||||
- memory_limiter
|
||||
- batch
|
||||
receivers:
|
||||
- otlp
|
||||
telemetry:
|
||||
metrics:
|
||||
address: ${env:MY_POD_IP}:8888
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user