340 lines
8.8 KiB
YAML
340 lines
8.8 KiB
YAML
# use this for backwards compatability
|
|
# fullnameOverride: ""
|
|
|
|
# Version for ElasticSearch and Kibana have to match so we define it at top-level
|
|
version: 7.8.1
|
|
|
|
elastic_password: "" # super_secret_elastic_password
|
|
|
|
es:
|
|
nodeSets: []
|
|
#- count: 2
|
|
# storage:
|
|
# size: 16Gi
|
|
# class: local-sc-xfs
|
|
# zone: us-west-2a
|
|
s3Snapshot:
|
|
enabled: false
|
|
iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots
|
|
|
|
prometheus: false
|
|
|
|
kibana:
|
|
count: 1
|
|
#servicename: kibana.example.com
|
|
istio:
|
|
enabled: false
|
|
gateway: "istio-system/ingressgateway"
|
|
url: "" # kibana.example.com
|
|
|
|
fluentd:
|
|
enabled: false
|
|
image:
|
|
repository: quay.io/fluentd_elasticsearch/fluentd
|
|
tag: v3.0.4
|
|
istio:
|
|
enabled: false
|
|
|
|
# broken as of 2.5.1 ;-(
|
|
# useStatefulSet: true
|
|
replicaCount: 2
|
|
|
|
plugins:
|
|
enabled: false
|
|
pluginsList:
|
|
#- fluent-plugin-detect-exceptions
|
|
#- fluent-plugin-s3
|
|
#- fluent-plugin-grok-parser
|
|
|
|
#persistence:
|
|
# enabled: true
|
|
# storageClass: "ebs-sc-gp2-xfs"
|
|
# accessMode: ReadWriteOnce
|
|
# size: 4Gi
|
|
|
|
service:
|
|
ports:
|
|
- name: tcp-forward
|
|
protocol: TCP
|
|
containerPort: 24224
|
|
- name: http-fluentd
|
|
protocol: TCP
|
|
containerPort: 9880
|
|
|
|
metrics:
|
|
enabled: false
|
|
serviceMonitor:
|
|
enabled: true
|
|
additionalLabels:
|
|
release: metrics
|
|
namespace: monitoring
|
|
|
|
output:
|
|
# Default should be "logging-kubezero-logging-es-http" if fullnameOverride is NOT used
|
|
host: logging-es-http
|
|
|
|
shared_key: "cloudbender"
|
|
|
|
env:
|
|
OUTPUT_USER: elastic
|
|
OUTPUT_SSL_VERIFY: "false"
|
|
|
|
# Same here the secret names change if fullnameOverride is not used !!
|
|
extraEnvVars:
|
|
- name: OUTPUT_PASSWORD
|
|
valueFrom:
|
|
secretKeyRef:
|
|
name: logging-es-elastic-user
|
|
key: elastic
|
|
- name: FLUENTD_SHARED_KEY
|
|
valueFrom:
|
|
secretKeyRef:
|
|
name: logging-fluentd-secret
|
|
key: shared_key
|
|
|
|
extraVolumes:
|
|
- name: fluentd-certs
|
|
secret:
|
|
secretName: fluentd-certificate
|
|
extraVolumeMounts:
|
|
- name: fluentd-certs
|
|
mountPath: /mnt/fluentd-certs
|
|
readOnly: true
|
|
|
|
configMaps:
|
|
forward-input.conf: |
|
|
<source>
|
|
@type forward
|
|
port 24224
|
|
bind 0.0.0.0
|
|
skip_invalid_event true
|
|
<transport tls>
|
|
cert_path /mnt/fluentd-certs/tls.crt
|
|
private_key_path /mnt/fluentd-certs/tls.key
|
|
</transport>
|
|
<security>
|
|
self_hostname "#{ENV['HOSTNAME']}"
|
|
shared_key "#{ENV['FLUENTD_SHARED_KEY']}"
|
|
</security>
|
|
</source>
|
|
|
|
output.conf: |
|
|
<match **>
|
|
@id elasticsearch
|
|
@type elasticsearch
|
|
@log_level info
|
|
include_tag_key true
|
|
id_key id
|
|
remove_keys id
|
|
|
|
# KubeZero pipeline incl. GeoIP etc.
|
|
# Freaking ES jams under load and all is lost ...
|
|
# pipeline fluentd
|
|
|
|
host "#{ENV['OUTPUT_HOST']}"
|
|
port "#{ENV['OUTPUT_PORT']}"
|
|
scheme "#{ENV['OUTPUT_SCHEME']}"
|
|
ssl_version "#{ENV['OUTPUT_SSL_VERSION']}"
|
|
ssl_verify "#{ENV['OUTPUT_SSL_VERIFY']}"
|
|
user "#{ENV['OUTPUT_USER']}"
|
|
password "#{ENV['OUTPUT_PASSWORD']}"
|
|
|
|
log_es_400_reason
|
|
logstash_format true
|
|
reconnect_on_error true
|
|
# reload_on_failure true
|
|
request_timeout 15s
|
|
suppress_type_name true
|
|
|
|
<buffer>
|
|
@type file
|
|
path /var/log/fluentd-buffers/kubernetes.system.buffer
|
|
flush_mode interval
|
|
flush_thread_count 2
|
|
flush_interval 30s
|
|
flush_at_shutdown true
|
|
retry_type exponential_backoff
|
|
retry_timeout 60m
|
|
chunk_limit_size 16M
|
|
overflow_action drop_oldest_chunk
|
|
</buffer>
|
|
</match>
|
|
|
|
filter.conf: |
|
|
<filter kube.**>
|
|
@type parser
|
|
key_name message
|
|
remove_key_name_field true
|
|
reserve_data true
|
|
emit_invalid_record_to_error false
|
|
<parse>
|
|
@type json
|
|
</parse>
|
|
</filter>
|
|
|
|
# <filter auth system.auth>
|
|
# @type parser
|
|
# key_name message
|
|
# reserve_data true
|
|
# reserve_time true
|
|
# <parse>
|
|
# @type grok
|
|
#
|
|
# # SSH
|
|
# <grok>
|
|
# pattern %{DATA:system.auth.ssh.event} %{DATA:system.auth.ssh.method} for (invalid user )?%{DATA:system.auth.user} from %{IPORHOST:system.auth.ip} port %{NUMBER:system.auth.port} ssh2(: %{GREEDYDATA:system.auth.ssh.signature})?
|
|
# </grok>
|
|
# <grok>
|
|
# pattern %{DATA:system.auth.ssh.event} user %{DATA:system.auth.user} from %{IPORHOST:system.auth.ip}
|
|
# </grok>
|
|
#
|
|
# # sudo
|
|
# <grok>
|
|
# pattern \s*%{DATA:system.auth.user} :( %{DATA:system.auth.sudo.error} ;)? TTY=%{DATA:system.auth.sudo.tty} ; PWD=%{DATA:system.auth.sudo.pwd} ; USER=%{DATA:system.auth.sudo.user} ; COMMAND=%{GREEDYDATA:system.auth.sudo.command}
|
|
# </grok>
|
|
#
|
|
# # Users
|
|
# <grok>
|
|
# pattern new group: name=%{DATA:system.auth.groupadd.name}, GID=%{NUMBER:system.auth.groupadd.gid}
|
|
# </grok>
|
|
# <grok>
|
|
# pattern new user: name=%{DATA:system.auth.useradd.name}, UID=%{NUMBER:system.auth.useradd.uid}, GID=%{NUMBER:system.auth.useradd.gid}, home=%{DATA:system.auth.useradd.home}, shell=%{DATA:system.auth.useradd.shell}$
|
|
# </grok>
|
|
#
|
|
# <grok>
|
|
# pattern %{GREEDYDATA:message}
|
|
# </grok>
|
|
# </parse>
|
|
# </filter>
|
|
|
|
|
|
fluent-bit:
|
|
enabled: false
|
|
test:
|
|
enabled: false
|
|
|
|
config:
|
|
outputs: |
|
|
[OUTPUT]
|
|
Match *
|
|
Name forward
|
|
Host logging-fluentd
|
|
Port 24224
|
|
tls on
|
|
tls.verify off
|
|
Shared_Key cloudbender
|
|
|
|
inputs: |
|
|
[INPUT]
|
|
Name tail
|
|
Path /var/log/containers/*.log
|
|
Parser cri
|
|
Tag kube.*
|
|
Mem_Buf_Limit 5MB
|
|
Skip_Long_Lines On
|
|
Refresh_Interval 10
|
|
DB /var/log/flb_kube.db
|
|
DB.Sync Normal
|
|
[INPUT]
|
|
Name tail
|
|
Path /var/log/kubernetes/audit.log
|
|
Parser json
|
|
Tag audit.api-server
|
|
Mem_Buf_Limit 5MB
|
|
Skip_Long_Lines On
|
|
Refresh_Interval 60
|
|
DB /var/log/flb_kube_audit.db
|
|
DB.Sync Normal
|
|
|
|
filters: |
|
|
[FILTER]
|
|
Name lua
|
|
Match kube.*
|
|
script /fluent-bit/etc/functions.lua
|
|
call reassemble_cri_logs
|
|
|
|
[FILTER]
|
|
Name kubernetes
|
|
Match kube.*
|
|
Merge_Log On
|
|
Keep_Log Off
|
|
K8S-Logging.Parser On
|
|
K8S-Logging.Exclude On
|
|
|
|
[FILTER]
|
|
Name lua
|
|
Match kube.*
|
|
script /fluent-bit/etc/functions.lua
|
|
call dedot
|
|
|
|
service: |
|
|
[SERVICE]
|
|
Flush 5
|
|
Daemon Off
|
|
Log_Level warn
|
|
Parsers_File parsers.conf
|
|
Parsers_File custom_parsers.conf
|
|
HTTP_Server On
|
|
HTTP_Listen 0.0.0.0
|
|
HTTP_Port 2020
|
|
|
|
lua: |
|
|
function dedot(tag, timestamp, record)
|
|
if record["kubernetes"] == nil then
|
|
return 0, 0, 0
|
|
end
|
|
dedot_keys(record["kubernetes"]["annotations"])
|
|
dedot_keys(record["kubernetes"]["labels"])
|
|
return 1, timestamp, record
|
|
end
|
|
|
|
function dedot_keys(map)
|
|
if map == nil then
|
|
return
|
|
end
|
|
local new_map = {}
|
|
local changed_keys = {}
|
|
for k, v in pairs(map) do
|
|
local dedotted = string.gsub(k, "%.", "_")
|
|
if dedotted ~= k then
|
|
new_map[dedotted] = v
|
|
changed_keys[k] = true
|
|
end
|
|
end
|
|
for k in pairs(changed_keys) do
|
|
map[k] = nil
|
|
end
|
|
for k, v in pairs(new_map) do
|
|
map[k] = v
|
|
end
|
|
end
|
|
|
|
local reassemble_state = {}
|
|
|
|
function reassemble_cri_logs(tag, timestamp, record)
|
|
-- IMPORTANT: reassemble_key must be unique for each parser stream
|
|
-- otherwise entries from different sources will get mixed up.
|
|
-- Either make sure that your parser tags satisfy this or construct
|
|
-- reassemble_key some other way
|
|
local reassemble_key = tag
|
|
-- if partial line, accumulate
|
|
if record.logtag == 'P' then
|
|
reassemble_state[reassemble_key] = reassemble_state[reassemble_key] or "" .. record.message
|
|
return -1, 0, 0
|
|
end
|
|
-- otherwise it's a full line, concatenate with accumulated partial lines if any
|
|
record.message = reassemble_state[reassemble_key] or "" .. (record.message or "")
|
|
reassemble_state[reassemble_key] = nil
|
|
return 1, timestamp, record
|
|
end
|
|
|
|
serviceMonitor:
|
|
enabled: true
|
|
namespace: monitoring
|
|
selector:
|
|
release: metrics
|
|
|
|
tolerations:
|
|
- key: node-role.kubernetes.io/master
|
|
effect: NoSchedule
|