2020-08-14 22:02:30 +00:00
# use this for backwards compatability
2020-08-13 18:44:50 +00:00
# fullnameOverride: ""
2020-08-11 14:09:48 +00:00
# Version for ElasticSearch and Kibana have to match so we define it at top-level
version : 7.8 .1
2020-08-15 13:25:07 +00:00
elastic_password : "" # super_secret_elastic_password
2020-08-11 14:09:48 +00:00
es :
nodeSets : [ ]
#- count: 2
# storage:
# size: 16Gi
# class: local-sc-xfs
# zone: us-west-2a
2020-08-13 18:44:50 +00:00
s3Snapshot :
enabled : false
iamrole : "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots
2020-08-11 14:09:48 +00:00
2020-08-13 18:44:50 +00:00
prometheus : false
2020-08-11 14:09:48 +00:00
kibana :
2020-08-14 14:52:10 +00:00
count : 1
2020-08-11 14:09:48 +00:00
#servicename: kibana.example.com
istio :
enabled : false
gateway : "istio-system/ingressgateway"
url : "" # kibana.example.com
2020-08-18 10:34:34 +00:00
fluentd :
enabled : false
2020-08-23 16:41:37 +00:00
image :
repository : quay.io/fluentd_elasticsearch/fluentd
2020-10-01 19:32:21 +00:00
tag : v2.9.0
2020-08-18 10:34:34 +00:00
istio :
enabled : false
2020-08-18 10:41:09 +00:00
# broken as of 2.5.1 ;-(
# useStatefulSet: true
2020-08-18 10:34:34 +00:00
replicaCount : 2
plugins :
2020-08-18 11:36:56 +00:00
enabled : false
2020-08-18 10:34:34 +00:00
pluginsList :
2020-08-18 11:36:56 +00:00
#- fluent-plugin-detect-exceptions
#- fluent-plugin-s3
#- fluent-plugin-grok-parser
2020-08-18 10:34:34 +00:00
2020-08-18 11:08:49 +00:00
#persistence:
# enabled: true
# storageClass: "ebs-sc-gp2-xfs"
# accessMode: ReadWriteOnce
# size: 4Gi
2020-08-18 10:34:34 +00:00
service :
ports :
- name : tcp-forward
protocol : TCP
containerPort : 24224
- name : http-fluentd
protocol : TCP
containerPort : 9880
metrics :
enabled : false
serviceMonitor :
enabled : true
additionalLabels :
release : metrics
namespace : monitoring
output :
2020-09-18 15:12:52 +00:00
# Default should be "logging-kubezero-logging-es-http" if fullnameOverride is NOT used
2020-08-18 10:34:34 +00:00
host : logging-es-http
2020-08-18 10:58:37 +00:00
shared_key : "cloudbender"
2020-08-18 10:34:34 +00:00
env :
OUTPUT_USER : elastic
OUTPUT_SSL_VERIFY : "false"
2020-09-18 15:12:52 +00:00
# Same here the secret names change if fullnameOverride is not used !!
2020-08-18 10:34:34 +00:00
extraEnvVars :
- name : OUTPUT_PASSWORD
valueFrom :
secretKeyRef :
name : logging-es-elastic-user
key : elastic
- name : FLUENTD_SHARED_KEY
valueFrom :
secretKeyRef :
2020-08-18 10:58:37 +00:00
name : logging-fluentd-secret
2020-08-18 10:34:34 +00:00
key : shared_key
extraVolumes :
- name : fluentd-certs
secret :
secretName : fluentd-certificate
extraVolumeMounts :
- name : fluentd-certs
mountPath : /mnt/fluentd-certs
readOnly : true
configMaps :
forward-input.conf : |
<source>
@type forward
port 24224
bind 0.0.0.0
skip_invalid_event true
2020-10-01 17:14:04 +00:00
# Only for TCP not TLS
# send_keepalive_packet true
2020-08-18 10:34:34 +00:00
<transport tls>
cert_path /mnt/fluentd-certs/tls.crt
private_key_path /mnt/fluentd-certs/tls.key
</transport>
<security>
self_hostname "#{ENV['HOSTNAME']}"
shared_key "#{ENV['FLUENTD_SHARED_KEY']}"
</security>
</source>
output.conf : |
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
id_key id
remove_keys id
2020-08-22 17:27:18 +00:00
# KubeZero pipeline incl. GeoIP etc.
2020-09-28 11:54:47 +00:00
# pipeline fluentd
2020-08-18 10:34:34 +00:00
host "#{ENV['OUTPUT_HOST']}"
port "#{ENV['OUTPUT_PORT']}"
scheme "#{ENV['OUTPUT_SCHEME']}"
ssl_version "#{ENV['OUTPUT_SSL_VERSION']}"
ssl_verify "#{ENV['OUTPUT_SSL_VERIFY']}"
user "#{ENV['OUTPUT_USER']}"
password "#{ENV['OUTPUT_PASSWORD']}"
2020-08-26 17:13:21 +00:00
log_es_400_reason
2020-08-18 10:34:34 +00:00
logstash_format true
reconnect_on_error true
2020-08-26 17:13:21 +00:00
# reload_on_failure true
request_timeout 15s
2020-08-23 16:41:37 +00:00
suppress_type_name true
2020-08-18 10:34:34 +00:00
2020-10-01 17:11:48 +00:00
<buffer tag>
@type file_single
2020-08-18 10:34:34 +00:00
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
flush_thread_count 2
2020-08-27 00:13:34 +00:00
flush_interval 30s
2020-08-18 10:34:34 +00:00
flush_at_shutdown true
retry_type exponential_backoff
retry_timeout 60m
overflow_action drop_oldest_chunk
</buffer>
</match>
2020-09-08 14:05:31 +00:00
filter.conf : |
2020-09-08 14:34:16 +00:00
<filter kube.**>
2020-09-08 14:07:17 +00:00
@type parser
2020-09-08 14:05:31 +00:00
key_name message
remove_key_name_field true
2020-09-08 14:41:20 +00:00
reserve_data true
2020-09-08 14:34:16 +00:00
emit_invalid_record_to_error false
2020-09-08 14:05:31 +00:00
<parse>
@type json
</parse>
</filter>
2020-08-18 10:34:34 +00:00
# <filter auth system.auth>
# @type parser
# key_name message
# reserve_data true
# reserve_time true
# <parse>
# @type grok
#
# # SSH
# <grok>
# pattern %{DATA:system.auth.ssh.event} %{DATA:system.auth.ssh.method} for (invalid user )?%{DATA:system.auth.user} from %{IPORHOST:system.auth.ip} port %{NUMBER:system.auth.port} ssh2(: %{GREEDYDATA:system.auth.ssh.signature})?
# </grok>
# <grok>
# pattern %{DATA:system.auth.ssh.event} user %{DATA:system.auth.user} from %{IPORHOST:system.auth.ip}
# </grok>
#
# # sudo
# <grok>
# pattern \s*%{DATA:system.auth.user} :( %{DATA:system.auth.sudo.error} ;)? TTY=%{DATA:system.auth.sudo.tty} ; PWD=%{DATA:system.auth.sudo.pwd} ; USER=%{DATA:system.auth.sudo.user} ; COMMAND=%{GREEDYDATA:system.auth.sudo.command}
# </grok>
#
# # Users
# <grok>
# pattern new group: name=%{DATA:system.auth.groupadd.name}, GID=%{NUMBER:system.auth.groupadd.gid}
# </grok>
# <grok>
# pattern new user: name=%{DATA:system.auth.useradd.name}, UID=%{NUMBER:system.auth.useradd.uid}, GID=%{NUMBER:system.auth.useradd.gid}, home=%{DATA:system.auth.useradd.home}, shell=%{DATA:system.auth.useradd.shell}$
# </grok>
#
# <grok>
# pattern %{GREEDYDATA:message}
# </grok>
# </parse>
# </filter>
2020-08-22 17:27:18 +00:00
fluent-bit :
2020-08-23 16:41:37 +00:00
enabled : false
2020-08-22 17:27:18 +00:00
test :
enabled : false
config :
outputs : |
[ OUTPUT]
Match *
Name forward
2020-08-23 14:50:14 +00:00
Host logging-fluentd
2020-08-22 17:27:18 +00:00
Port 24224
tls on
tls.verify off
Shared_Key cloudbender
inputs : |
[ INPUT]
Name tail
Path /var/log/containers/*.log
Parser cri
Tag kube.*
2020-10-01 19:32:21 +00:00
Mem_Buf_Limit 8MB
2020-08-22 17:27:18 +00:00
Skip_Long_Lines On
Refresh_Interval 10
2020-10-01 19:32:21 +00:00
Exclude_Path *.gz,*.zip
2020-08-22 17:27:18 +00:00
DB /var/log/flb_kube.db
DB.Sync Normal
2020-09-08 11:40:28 +00:00
[ INPUT]
Name tail
Path /var/log/kubernetes/audit.log
Parser json
2020-09-09 19:59:03 +00:00
Tag audit.api-server
2020-10-01 19:32:21 +00:00
Mem_Buf_Limit 8MB
2020-09-08 11:40:28 +00:00
Skip_Long_Lines On
DB /var/log/flb_kube_audit.db
DB.Sync Normal
2020-08-22 17:27:18 +00:00
filters : |
2020-09-08 12:12:21 +00:00
[ FILTER]
Name lua
Match kube.*
script /fluent-bit/etc/functions.lua
call reassemble_cri_logs
2020-08-22 17:27:18 +00:00
[ FILTER]
Name kubernetes
Match kube.*
Merge_Log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
[ FILTER]
Name lua
Match kube.*
script /fluent-bit/etc/functions.lua
call dedot
service : |
[ SERVICE]
Flush 5
Daemon Off
Log_Level warn
Parsers_File parsers.conf
Parsers_File custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port 2020
lua : |
function dedot(tag, timestamp, record)
if record["kubernetes"] == nil then
return 0, 0, 0
end
dedot_keys(record["kubernetes"]["annotations"])
dedot_keys(record["kubernetes"]["labels"])
return 1, timestamp, record
end
function dedot_keys(map)
if map == nil then
return
end
local new_map = {}
local changed_keys = {}
for k, v in pairs(map) do
local dedotted = string.gsub(k, "%.", "_")
if dedotted ~= k then
new_map[dedotted] = v
changed_keys[k] = true
end
end
for k in pairs(changed_keys) do
map[k] = nil
end
for k, v in pairs(new_map) do
map[k] = v
end
end
2020-09-08 12:12:21 +00:00
local reassemble_state = {}
function reassemble_cri_logs(tag, timestamp, record)
-- IMPORTANT : reassemble_key must be unique for each parser stream
-- otherwise entries from different sources will get mixed up.
-- Either make sure that your parser tags satisfy this or construct
-- reassemble_key some other way
local reassemble_key = tag
-- if partial line, accumulate
if record.logtag == 'P' then
reassemble_state[reassemble_key] = reassemble_state[reassemble_key] or "" .. record.message
return -1, 0, 0
end
-- otherwise it's a full line, concatenate with accumulated partial lines if any
2020-09-08 12:40:09 +00:00
record.message = reassemble_state[reassemble_key] or "" .. (record.message or "")
2020-09-08 12:12:21 +00:00
reassemble_state[reassemble_key] = nil
return 1, timestamp, record
end
2020-08-22 17:27:18 +00:00
serviceMonitor :
enabled : true
namespace : monitoring
selector :
release : metrics
tolerations :
- key : node-role.kubernetes.io/master
effect : NoSchedule