KubeZero/charts/kubezero-logging/values.yaml

400 lines
9.4 KiB
YAML
Raw Permalink Normal View History

2020-08-14 22:02:30 +00:00
# use this for backwards compatability
# fullnameOverride: ""
2020-08-11 14:09:48 +00:00
# Version for ElasticSearch and Kibana have to match so we define it at top-level
2022-04-22 20:49:27 +00:00
version: 7.17.3
2020-08-11 14:09:48 +00:00
2020-08-15 13:25:07 +00:00
elastic_password: "" # super_secret_elastic_password
2020-08-11 14:09:48 +00:00
es:
nodeSets: []
#- count: 2
# storage:
# size: 16Gi
# class: local-sc-xfs
# zone: us-west-2a
# nodeAffinity:
# key: node label name
# value: node label value
s3Snapshot:
enabled: false
iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots
2020-08-11 14:09:48 +00:00
prometheus: false
2020-08-11 14:09:48 +00:00
kibana:
count: 1
2020-08-11 14:09:48 +00:00
#servicename: kibana.example.com
#nodeAffinity:
# key: node label name
# value: node label value
2020-08-11 14:09:48 +00:00
istio:
enabled: false
gateway: "istio-system/ingressgateway"
url: "" # kibana.example.com
2020-08-18 10:34:34 +00:00
fluentd:
enabled: false
image:
repository: public.ecr.aws/zero-downtime/fluentd-concenter
tag: v1.16.5
2020-08-18 10:34:34 +00:00
istio:
enabled: false
kind: Deployment
replicaCount: 1
2020-08-18 10:34:34 +00:00
#plugins:
#- fluent-plugin-s3
2020-08-18 10:34:34 +00:00
service:
ports:
- name: tcp-forward
protocol: TCP
containerPort: 24224
- name: http-fluentd
protocol: TCP
containerPort: 9880
metrics:
serviceMonitor:
enabled: false
2020-08-18 10:34:34 +00:00
additionalLabels:
release: metrics
dashboards:
enabled: false
2021-08-30 09:35:52 +00:00
podSecurityPolicy:
enabled: false
# No need for docker nor /var/log
2023-08-29 16:30:43 +00:00
mountVarLogDirectory: false
mountDockerContainersDirectory: false
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
memory: 512Mi
2020-08-18 10:34:34 +00:00
output:
# Default should be "logging-kubezero-logging-es-http" if fullnameOverride is NOT used
2020-08-18 10:34:34 +00:00
host: logging-es-http
2020-08-18 10:58:37 +00:00
shared_key: "cloudbender"
# OUTPUT_USER: elastic
# OUTPUT_SSL_VERIFY: "false"
2020-08-18 10:34:34 +00:00
# Same here the secret names change if fullnameOverride is not used !!
env:
2020-08-18 10:34:34 +00:00
- name: OUTPUT_PASSWORD
valueFrom:
secretKeyRef:
name: logging-es-elastic-user
key: elastic
2023-08-29 16:30:43 +00:00
configMapConfigs:
- fluentd-prometheus-conf
fileConfigs:
00_system.conf: |-
<system>
root_dir /fluentd/log
log_level info
ignore_repeated_log_interval 60s
ignore_same_log_interval 60s
workers 1
</system>
01_sources.conf: |-
2020-10-02 21:41:40 +00:00
<source>
@type http
@label @KUBERNETES
2020-10-02 21:41:40 +00:00
port 9880
bind 0.0.0.0
keepalive_timeout 30
</source>
2020-08-18 10:34:34 +00:00
<source>
@type forward
@label @KUBERNETES
2020-08-18 10:34:34 +00:00
port 24224
bind 0.0.0.0
# skip_invalid_event true
send_keepalive_packet true
2020-08-18 10:34:34 +00:00
<security>
self_hostname "#{ENV['HOSTNAME']}"
shared_key {{ .Values.shared_key }}
2020-08-18 10:34:34 +00:00
</security>
</source>
02_filters.conf: |-
<label @KUBERNETES>
# prevent log feedback loops eg. ES has issues etc.
# discard logs from our own pods
<match kube.logging.fluentd>
@type relabel
@label @FLUENT_LOG
</match>
# Exclude current fluent-bit multiline noise
<filter kube.logging.fluent-bit>
@type grep
<exclude>
key log
pattern /could not append content to multiline context/
</exclude>
</filter>
# Generate Hash ID to break endless loop for already ingested events during retries
<filter **>
@type elasticsearch_genid
use_entire_record true
</filter>
# Route through DISPATCH for Prometheus metrics
<match **>
@type relabel
@label @DISPATCH
</match>
</label>
04_outputs.conf: |-
<label @OUTPUT>
<match **>
@id out_es
@type elasticsearch
# @log_level debug
include_tag_key true
id_key _hash
remove_keys _hash
write_operation create
# KubeZero pipeline incl. GeoIP etc.
pipeline fluentd
hosts "{{ .Values.output.host }}"
port 9200
scheme http
user elastic
password "#{ENV['OUTPUT_PASSWORD']}"
log_es_400_reason
logstash_format true
reconnect_on_error true
reload_on_failure true
request_timeout 300s
slow_flush_log_threshold 55.0
2021-06-15 14:14:29 +00:00
#with_transporter_log true
verify_es_version_at_startup false
default_elasticsearch_version 7
suppress_type_name true
2021-06-15 14:14:29 +00:00
# Retry failed bulk requests
# https://github.com/uken/fluent-plugin-elasticsearch#unrecoverable-error-types
unrecoverable_error_types ["out_of_memory_error"]
bulk_message_request_threshold 1048576
<buffer>
2021-06-15 14:14:29 +00:00
@type file
flush_mode interval
flush_thread_count 2
flush_interval 10s
chunk_limit_size 2MB
total_limit_size 1GB
flush_at_shutdown true
retry_type exponential_backoff
retry_timeout 6h
overflow_action drop_oldest_chunk
disable_chunk_backup true
</buffer>
</match>
</label>
fluent-bit:
enabled: false
2023-11-30 17:59:55 +00:00
#image:
2021-07-29 11:53:40 +00:00
#repository: public.ecr.aws/zero-downtime/fluent-bit
2023-08-23 12:20:50 +00:00
#tag: 2.0.10
2023-04-12 11:14:31 +00:00
testFramework:
enabled: false
serviceMonitor:
enabled: false
selector:
release: metrics
#rbac:
# nodeAccess: true
#hostNetwork: true
#dnsPolicy: ClusterFirstWithHostNet
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
- key: kubezero-workergroup
effect: NoSchedule
operator: Exists
- key: nvidia.com/gpu
effect: NoSchedule
operator: Exists
- key: aws.amazon.com/neuron
effect: NoSchedule
2022-09-28 15:41:30 +00:00
operator: Exists
resources:
requests:
cpu: 20m
2023-12-01 16:56:56 +00:00
memory: 48Mi
limits:
2023-08-29 16:30:43 +00:00
memory: 128Mi
config:
output:
host: logging-fluentd
sharedKey: cloudbender
tls: false
input:
2023-04-12 11:14:31 +00:00
memBufLimit: 16MB
refreshInterval: 5
logLevel: info
flushInterval: 5
#extraRecords:
# source.clustername: MyKubeCluster
service: |
[SERVICE]
Flush {{ .Values.config.flushInterval }}
Daemon Off
Log_Level {{ .Values.config.logLevel }}
Parsers_File parsers.conf
Parsers_File custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port {{ .Values.service.port }}
Health_Check On
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
2023-04-12 11:14:31 +00:00
# Exclude ourselves to current error spam, https://github.com/fluent/fluent-bit/issues/5769
Exclude_Path *logging-fluent-bit*
2021-07-29 11:53:40 +00:00
multiline.parser cri
Tag cri.*
Skip_Long_Lines On
2023-04-12 11:14:31 +00:00
Skip_Empty_Lines On
DB /var/log/flb_kube.db
DB.Sync Normal
DB.locking true
# Buffer_Max_Size 1M
{{- with .Values.config.input }}
2023-04-12 11:14:31 +00:00
Mem_Buf_Limit {{ default "16MB" .memBufLimit }}
Refresh_Interval {{ default 5 .refreshInterval }}
{{- end }}
filters: |
[FILTER]
Name parser
Match cri.*
Parser cri-log
Key_Name log
[FILTER]
Name kubernetes
Match cri.*
Merge_Log On
Merge_Log_Key kube
Kube_Tag_Prefix cri.var.log.containers.
Keep_Log Off
K8S-Logging.Parser Off
K8S-Logging.Exclude Off
Kube_Meta_Cache_TTL 3600s
Buffer_Size 0
2021-03-09 09:33:40 +00:00
#Use_Kubelet true
{{- if index .Values "config" "extraRecords" }}
[FILTER]
Name record_modifier
Match cri.*
{{- range $k,$v := index .Values "config" "extraRecords" }}
Record {{ $k }} {{ $v }}
{{- end }}
{{- end }}
[FILTER]
Name rewrite_tag
Match cri.*
Emitter_Name kube_tag_rewriter
Rule $kubernetes['pod_id'] .* kube.$kubernetes['namespace_name'].$kubernetes['container_name'] false
[FILTER]
Name lua
Match kube.*
script /fluent-bit/scripts/kubezero.lua
call nest_k8s_ns
outputs: |
[OUTPUT]
Match *
Name forward
Host {{ .Values.config.output.host }}
Port 24224
Shared_Key {{ .Values.config.output.sharedKey }}
tls {{ ternary "on" "off" .Values.config.output.tls }}
Send_options true
Require_ack_response true
customParsers: |
[PARSER]
Name cri-log
Format regex
Regex ^(?<time>.+) (?<stream>stdout|stderr) (?<logtag>F|P) (?<log>.*)$
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L%z
luaScripts:
kubezero.lua: |
function nest_k8s_ns(tag, timestamp, record)
if not record['kubernetes']['namespace_name'] then
return 0, 0, 0
end
new_record = {}
for key, val in pairs(record) do
if key == 'kube' then
new_record[key] = {}
new_record[key][record['kubernetes']['namespace_name']] = record[key]
else
new_record[key] = record[key]
end
end
return 1, timestamp, new_record
end
daemonSetVolumes:
- name: varlog
hostPath:
path: /var/log
- name: newlog
hostPath:
path: /var/lib/containers/logs
daemonSetVolumeMounts:
- name: varlog
mountPath: /var/log
- name: newlog
mountPath: /var/lib/containers/logs