# use this for backwards compatability
# fullnameOverride: ""
# Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.8.1
elastic_password: "" # super_secret_elastic_password
es:
nodeSets: []
#- count: 2
# storage:
# size: 16Gi
# class: local-sc-xfs
# zone: us-west-2a
s3Snapshot:
enabled: false
iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots
prometheus: false
kibana:
count: 1
#servicename: kibana.example.com
istio:
enabled: false
gateway: "istio-system/ingressgateway"
url: "" # kibana.example.com
fluentd:
enabled: false
image:
repository: quay.io/fluentd_elasticsearch/fluentd
tag: v2.9.0
istio:
enabled: false
# broken as of 2.5.1 ;-(
# useStatefulSet: true
replicaCount: 2
plugins:
enabled: false
pluginsList:
#- fluent-plugin-detect-exceptions
#- fluent-plugin-s3
#- fluent-plugin-grok-parser
#persistence:
# enabled: true
# storageClass: "ebs-sc-gp2-xfs"
# accessMode: ReadWriteOnce
# size: 4Gi
service:
ports:
- name: tcp-forward
protocol: TCP
containerPort: 24224
- name: http-fluentd
protocol: TCP
containerPort: 9880
metrics:
enabled: false
serviceMonitor:
enabled: true
additionalLabels:
release: metrics
namespace: monitoring
output:
host: logging-es-http
shared_key: "cloudbender"
env:
OUTPUT_USER: elastic
OUTPUT_SSL_VERIFY: "false"
extraEnvVars:
- name: OUTPUT_PASSWORD
valueFrom:
secretKeyRef:
name: logging-es-elastic-user
key: elastic
- name: FLUENTD_SHARED_KEY
valueFrom:
secretKeyRef:
name: logging-fluentd-secret
key: shared_key
extraVolumes:
- name: fluentd-certs
secret:
secretName: fluentd-certificate
extraVolumeMounts:
- name: fluentd-certs
mountPath: /mnt/fluentd-certs
readOnly: true
configMaps:
forward-input.conf: |
output.conf: |
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
id_key id
remove_keys id
# KubeZero pipeline incl. GeoIP etc.
pipeline fluentd
host "#{ENV['OUTPUT_HOST']}"
port "#{ENV['OUTPUT_PORT']}"
scheme "#{ENV['OUTPUT_SCHEME']}"
ssl_version "#{ENV['OUTPUT_SSL_VERSION']}"
ssl_verify "#{ENV['OUTPUT_SSL_VERIFY']}"
user "#{ENV['OUTPUT_USER']}"
password "#{ENV['OUTPUT_PASSWORD']}"
log_es_400_reason
logstash_format true
reconnect_on_error true
# reload_on_failure true
request_timeout 15s
suppress_type_name true
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
flush_thread_count 2
flush_interval 30s
flush_at_shutdown true
retry_type exponential_backoff
retry_timeout 60m
chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}"
queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}"
overflow_action drop_oldest_chunk
# filter.conf: |
#
# @type parser
# key_name message
# reserve_data true
# reserve_time true
#
# @type grok
#
# # SSH
#
# pattern %{DATA:system.auth.ssh.event} %{DATA:system.auth.ssh.method} for (invalid user )?%{DATA:system.auth.user} from %{IPORHOST:system.auth.ip} port %{NUMBER:system.auth.port} ssh2(: %{GREEDYDATA:system.auth.ssh.signature})?
#
#
# pattern %{DATA:system.auth.ssh.event} user %{DATA:system.auth.user} from %{IPORHOST:system.auth.ip}
#
#
# # sudo
#
# pattern \s*%{DATA:system.auth.user} :( %{DATA:system.auth.sudo.error} ;)? TTY=%{DATA:system.auth.sudo.tty} ; PWD=%{DATA:system.auth.sudo.pwd} ; USER=%{DATA:system.auth.sudo.user} ; COMMAND=%{GREEDYDATA:system.auth.sudo.command}
#
#
# # Users
#
# pattern new group: name=%{DATA:system.auth.groupadd.name}, GID=%{NUMBER:system.auth.groupadd.gid}
#
#
# pattern new user: name=%{DATA:system.auth.useradd.name}, UID=%{NUMBER:system.auth.useradd.uid}, GID=%{NUMBER:system.auth.useradd.gid}, home=%{DATA:system.auth.useradd.home}, shell=%{DATA:system.auth.useradd.shell}$
#
#
#
# pattern %{GREEDYDATA:message}
#
#
#
fluent-bit:
enabled: false
test:
enabled: false
config:
outputs: |
[OUTPUT]
Match *
Name forward
Host logging-fluentd
Port 24224
tls on
tls.verify off
Shared_Key cloudbender
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
Parser cri
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
Refresh_Interval 10
DB /var/log/flb_kube.db
DB.Sync Normal
filters: |
[FILTER]
Name kubernetes
Match kube.*
Merge_Log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
[FILTER]
Name lua
Match kube.*
script /fluent-bit/etc/functions.lua
call dedot
service: |
[SERVICE]
Flush 5
Daemon Off
Log_Level warn
Parsers_File parsers.conf
Parsers_File custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port 2020
lua: |
function dedot(tag, timestamp, record)
if record["kubernetes"] == nil then
return 0, 0, 0
end
dedot_keys(record["kubernetes"]["annotations"])
dedot_keys(record["kubernetes"]["labels"])
return 1, timestamp, record
end
function dedot_keys(map)
if map == nil then
return
end
local new_map = {}
local changed_keys = {}
for k, v in pairs(map) do
local dedotted = string.gsub(k, "%.", "_")
if dedotted ~= k then
new_map[dedotted] = v
changed_keys[k] = true
end
end
for k in pairs(changed_keys) do
map[k] = nil
end
for k, v in pairs(new_map) do
map[k] = v
end
end
serviceMonitor:
enabled: true
namespace: monitoring
selector:
release: metrics
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule