# use this for backwards compatability # fullnameOverride: "" # Version for ElasticSearch and Kibana have to match so we define it at top-level version: 7.8.1 elastic_password: "" # super_secret_elastic_password es: nodeSets: [] #- count: 2 # storage: # size: 16Gi # class: local-sc-xfs # zone: us-west-2a s3Snapshot: enabled: false iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots prometheus: false kibana: count: 1 #servicename: kibana.example.com istio: enabled: false gateway: "istio-system/ingressgateway" url: "" # kibana.example.com fluentd: enabled: false image: repository: quay.io/fluentd_elasticsearch/fluentd tag: v2.9.0 istio: enabled: false # broken as of 2.5.1 ;-( # useStatefulSet: true replicaCount: 2 plugins: enabled: false pluginsList: #- fluent-plugin-detect-exceptions #- fluent-plugin-s3 #- fluent-plugin-grok-parser #persistence: # enabled: true # storageClass: "ebs-sc-gp2-xfs" # accessMode: ReadWriteOnce # size: 4Gi service: ports: - name: tcp-forward protocol: TCP containerPort: 24224 - name: http-fluentd protocol: TCP containerPort: 9880 metrics: enabled: false serviceMonitor: enabled: true additionalLabels: release: metrics namespace: monitoring output: # Default should be "logging-kubezero-logging-es-http" if fullnameOverride is NOT used host: logging-es-http shared_key: "cloudbender" env: OUTPUT_USER: elastic OUTPUT_SSL_VERIFY: "false" # Same here the secret names change if fullnameOverride is not used !! extraEnvVars: - name: OUTPUT_PASSWORD valueFrom: secretKeyRef: name: logging-es-elastic-user key: elastic - name: FLUENTD_SHARED_KEY valueFrom: secretKeyRef: name: logging-fluentd-secret key: shared_key configMaps: general.conf: | @type http port 9880 bind 0.0.0.0 keepalive_timeout 30 @type monitor_agent bind 0.0.0.0 port 24220 tag fluentd.monitor.metrics forward-input.conf: | @type forward port 24224 bind 0.0.0.0 skip_invalid_event true send_keepalive_packet true self_hostname "#{ENV['HOSTNAME']}" shared_key "#{ENV['FLUENTD_SHARED_KEY']}" output.conf: | @id elasticsearch @type elasticsearch @log_level info include_tag_key true id_key id remove_keys id # KubeZero pipeline incl. GeoIP etc. # pipeline fluentd host "#{ENV['OUTPUT_HOST']}" port "#{ENV['OUTPUT_PORT']}" scheme "#{ENV['OUTPUT_SCHEME']}" ssl_version "#{ENV['OUTPUT_SSL_VERSION']}" ssl_verify "#{ENV['OUTPUT_SSL_VERIFY']}" user "#{ENV['OUTPUT_USER']}" password "#{ENV['OUTPUT_PASSWORD']}" log_es_400_reason logstash_format true reconnect_on_error true # reload_on_failure true request_timeout 15s suppress_type_name true @type file_single path /var/log/fluentd-buffers/kubernetes.system.buffer flush_mode interval flush_thread_count 2 flush_interval 30s flush_at_shutdown true retry_type exponential_backoff retry_timeout 60m overflow_action drop_oldest_chunk filter.conf: | @type parser key_name message remove_key_name_field true reserve_data true # inject_key_prefix message_json. emit_invalid_record_to_error false @type json fluent-bit: enabled: false test: enabled: false config: outputs: | [OUTPUT] Match * Name forward Host logging-fluentd Port 24224 inputs: | [INPUT] Name tail Path /var/log/containers/*.log Parser cri Tag kube.* Mem_Buf_Limit 16MB Skip_Long_Lines On Refresh_Interval 10 Exclude_Path *.gz,*.zip DB /var/log/flb_kube.db DB.Sync Normal [INPUT] Name tail Path /var/log/kubernetes/audit.log Parser json Tag audit.api-server Mem_Buf_Limit 8MB Skip_Long_Lines On DB /var/log/flb_kube_audit.db DB.Sync Normal filters: | [FILTER] Name lua Match kube.* script /fluent-bit/etc/functions.lua call reassemble_cri_logs [FILTER] Name kubernetes Match kube.* Merge_Log On Keep_Log Off K8S-Logging.Parser On K8S-Logging.Exclude On [FILTER] Name lua Match kube.* script /fluent-bit/etc/functions.lua call dedot service: | [SERVICE] Flush 5 Daemon Off Log_Level warn Parsers_File parsers.conf Parsers_File custom_parsers.conf HTTP_Server On HTTP_Listen 0.0.0.0 HTTP_Port 2020 lua: | function dedot(tag, timestamp, record) if record["kubernetes"] == nil then return 0, 0, 0 end dedot_keys(record["kubernetes"]["annotations"]) dedot_keys(record["kubernetes"]["labels"]) return 1, timestamp, record end function dedot_keys(map) if map == nil then return end local new_map = {} local changed_keys = {} for k, v in pairs(map) do local dedotted = string.gsub(k, "%.", "_") if dedotted ~= k then new_map[dedotted] = v changed_keys[k] = true end end for k in pairs(changed_keys) do map[k] = nil end for k, v in pairs(new_map) do map[k] = v end end local reassemble_state = {} function reassemble_cri_logs(tag, timestamp, record) -- IMPORTANT: reassemble_key must be unique for each parser stream -- otherwise entries from different sources will get mixed up. -- Either make sure that your parser tags satisfy this or construct -- reassemble_key some other way local reassemble_key = tag -- if partial line, accumulate if record.logtag == 'P' then reassemble_state[reassemble_key] = reassemble_state[reassemble_key] or "" .. record.message return -1, 0, 0 end -- otherwise it's a full line, concatenate with accumulated partial lines if any record.message = reassemble_state[reassemble_key] or "" .. (record.message or "") reassemble_state[reassemble_key] = nil return 1, timestamp, record end serviceMonitor: enabled: true namespace: monitoring selector: release: metrics tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule