KubeZero/charts/kubezero-logging/values.yaml

335 lines
8.1 KiB
YAML
Raw Normal View History

2020-08-14 22:02:30 +00:00
# use this for backwards compatability
# fullnameOverride: ""
2020-08-11 14:09:48 +00:00
eck-operator:
enabled: false
installCRDs: false
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
2020-08-11 14:09:48 +00:00
# Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.11.1
2020-08-11 14:09:48 +00:00
2020-08-15 13:25:07 +00:00
elastic_password: "" # super_secret_elastic_password
2020-08-11 14:09:48 +00:00
es:
nodeSets: []
#- count: 2
# storage:
# size: 16Gi
# class: local-sc-xfs
# zone: us-west-2a
s3Snapshot:
enabled: false
iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots
2020-08-11 14:09:48 +00:00
prometheus: false
2020-08-11 14:09:48 +00:00
kibana:
count: 1
2020-08-11 14:09:48 +00:00
#servicename: kibana.example.com
istio:
enabled: false
gateway: "istio-system/ingressgateway"
url: "" # kibana.example.com
2020-08-18 10:34:34 +00:00
fluentd:
enabled: false
image:
repository: quay.io/fluentd_elasticsearch/fluentd
2020-10-01 19:32:21 +00:00
tag: v2.9.0
2020-08-18 10:34:34 +00:00
istio:
enabled: false
# broken as of 2.5.1 ;-(
# useStatefulSet: true
2020-08-18 10:34:34 +00:00
replicaCount: 2
plugins:
2020-08-18 11:36:56 +00:00
enabled: false
2020-08-18 10:34:34 +00:00
pluginsList:
2020-08-18 11:36:56 +00:00
#- fluent-plugin-detect-exceptions
#- fluent-plugin-s3
#- fluent-plugin-grok-parser
2020-08-18 10:34:34 +00:00
2020-08-18 11:08:49 +00:00
#persistence:
# enabled: true
# storageClass: "ebs-sc-gp2-xfs"
# accessMode: ReadWriteOnce
# size: 4Gi
2020-08-18 10:34:34 +00:00
service:
ports:
- name: tcp-forward
protocol: TCP
containerPort: 24224
- name: http-fluentd
protocol: TCP
containerPort: 9880
metrics:
enabled: false
serviceMonitor:
enabled: true
additionalLabels:
release: metrics
output:
# Default should be "logging-kubezero-logging-es-http" if fullnameOverride is NOT used
2020-08-18 10:34:34 +00:00
host: logging-es-http
2020-08-18 10:58:37 +00:00
shared_key: "cloudbender"
2020-08-18 10:34:34 +00:00
env:
OUTPUT_USER: elastic
OUTPUT_SSL_VERIFY: "false"
# Same here the secret names change if fullnameOverride is not used !!
2020-08-18 10:34:34 +00:00
extraEnvVars:
- name: OUTPUT_PASSWORD
valueFrom:
secretKeyRef:
name: logging-es-elastic-user
key: elastic
- name: FLUENTD_SHARED_KEY
valueFrom:
secretKeyRef:
2020-08-18 10:58:37 +00:00
name: logging-fluentd-secret
2020-08-18 10:34:34 +00:00
key: shared_key
configMaps:
2020-10-02 21:41:40 +00:00
general.conf: |
<label @FLUENT_LOG>
2020-10-02 21:46:07 +00:00
<match **>
@type null
</match>
2020-10-02 21:41:40 +00:00
</label>
<source>
@type http
port 9880
bind 0.0.0.0
keepalive_timeout 30
</source>
<source>
@type monitor_agent
bind 0.0.0.0
port 24220
tag fluentd.monitor.metrics
</source>
2020-08-18 10:34:34 +00:00
forward-input.conf: |
<source>
@type forward
port 24224
bind 0.0.0.0
skip_invalid_event true
send_keepalive_packet true
2020-08-18 10:34:34 +00:00
<security>
self_hostname "#{ENV['HOSTNAME']}"
shared_key "#{ENV['FLUENTD_SHARED_KEY']}"
</security>
</source>
output.conf: |
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
id_key id
remove_keys id
# KubeZero pipeline incl. GeoIP etc.
2020-12-07 12:33:33 +00:00
pipeline fluentd
2020-08-18 10:34:34 +00:00
hosts "#{ENV['OUTPUT_HOST']}"
2020-08-18 10:34:34 +00:00
port "#{ENV['OUTPUT_PORT']}"
scheme "#{ENV['OUTPUT_SCHEME']}"
ssl_version "#{ENV['OUTPUT_SSL_VERSION']}"
ssl_verify "#{ENV['OUTPUT_SSL_VERIFY']}"
user "#{ENV['OUTPUT_USER']}"
password "#{ENV['OUTPUT_PASSWORD']}"
2020-08-26 17:13:21 +00:00
log_es_400_reason
2020-08-18 10:34:34 +00:00
logstash_format true
reconnect_on_error true
reload_on_failure true
request_timeout 60s
suppress_type_name true
2020-08-18 10:34:34 +00:00
2020-10-01 17:11:48 +00:00
<buffer tag>
@type file_single
2020-08-18 10:34:34 +00:00
path /var/log/fluentd-buffers/kubernetes.system.buffer
2021-02-22 20:34:45 +00:00
chunk_limit_size 16MB
total_limit_size 4GB
2020-08-18 10:34:34 +00:00
flush_mode interval
2021-02-22 20:34:45 +00:00
flush_thread_count 4
flush_interval 5s
2020-08-18 10:34:34 +00:00
flush_at_shutdown true
retry_type exponential_backoff
retry_timeout 60m
overflow_action drop_oldest_chunk
disable_chunk_backup true
2020-08-18 10:34:34 +00:00
</buffer>
</match>
filter.conf: |
2020-10-02 21:41:40 +00:00
<filter disabled.kube.**>
2020-09-08 14:07:17 +00:00
@type parser
key_name message
remove_key_name_field true
reserve_data true
reserve_time true
2020-10-02 21:41:40 +00:00
# inject_key_prefix message_json.
2020-09-08 14:34:16 +00:00
emit_invalid_record_to_error false
<parse>
@type json
</parse>
</filter>
fluent-bit:
enabled: false
serviceMonitor:
enabled: true
selector:
release: metrics
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
config:
output:
host: logging-fluentd
sharedKey: cloudbender
tls: false
input:
memBufLimit: 16MB
refreshInterval: 10
logLevel: warn
flushInterval: 1
#extraRecords:
# source.clustername: MyKubeCluster
service: |
[SERVICE]
Flush {{ .Values.config.flushInterval }}
Daemon Off
Log_Level {{ .Values.config.logLevel }}
Parsers_File parsers.conf
Parsers_File custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port {{ .Values.service.port }}
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
Parser cri-log
Tag cri.*
Skip_Long_Lines On
DB /var/log/flb_kube.db
DB.Sync Normal
{{- with .Values.config.input }}
Mem_Buf_Limit {{ default "16MB" .memBufLimit }}
Refresh_Interval {{ default 10 .refreshInterval }}
{{- end }}
filters: |
[FILTER]
Name lua
Match cri.*
script /fluent-bit/scripts/kubezero.lua
call reassemble_cri_logs
[FILTER]
Name kubernetes
Match cri.*
Merge_Log On
Merge_Log_Key kube
Kube_Tag_Prefix cri.var.log.containers.
Keep_Log Off
K8S-Logging.Parser Off
K8S-Logging.Exclude Off
2021-03-09 09:33:40 +00:00
#Use_Kubelet true
#Kubelet_Port 10250
{{- if index .Values "config" "extraRecords" }}
[FILTER]
Name record_modifier
Match cri.*
{{- range $k,$v := index .Values "config" "extraRecords" }}
Record {{ $k }} {{ $v }}
{{- end }}
{{- end }}
[FILTER]
Name rewrite_tag
Match cri.*
Emitter_Name kube_tag_rewriter
Rule logtag F kube.$kubernetes['namespace_name'].$kubernetes['container_name'] false
[FILTER]
Name lua
Match kube.*
script /fluent-bit/scripts/kubezero.lua
call nest_k8s_ns
outputs: |
[OUTPUT]
Match *
Name forward
Host {{ .Values.config.output.host }}
Port 24224
Shared_Key {{ .Values.config.output.sharedKey }}
tls {{ ternary "on" "off" .Values.config.output.tls }}
Send_options true
Require_ack_response true
customParsers: |
[PARSER]
Name cri-log
Format regex
Regex ^(?<time>[^ ]+) (?<stream>stdout|stderr) (?<logtag>[^ ]*) (?<log>.*)$
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L%z
luaScripts:
kubezero.lua: |
local reassemble_state = {}
function reassemble_cri_logs(tag, timestamp, record)
local reassemble_key = tag
if record.logtag == 'P' then
reassemble_state[reassemble_key] = reassemble_state[reassemble_key] or "" .. record.log
return -1, 0, 0
end
record.log = reassemble_state[reassemble_key] or "" .. (record.log or "")
reassemble_state[reassemble_key] = nil
return 1, timestamp, record
end
function nest_k8s_ns(tag, timestamp, record)
if not record['kubernetes']['namespace_name'] then
return 0, 0, 0
end
new_record = {}
for key, val in pairs(record) do
if key == 'kube' then
new_record[key] = {}
new_record[key][record['kubernetes']['namespace_name']] = record[key]
else
new_record[key] = record[key]
end
end
return 1, timestamp, new_record
end