Compare commits

...

13 Commits

63 changed files with 3897 additions and 149 deletions

View File

@ -6,6 +6,8 @@ set -x
ARTIFACTS=($(echo $1 | tr "," "\n"))
ACTION=${2:-apply}
LOCAL_DEV=1
#VERSION="latest"
KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"

View File

@ -3,6 +3,8 @@
# Simulate well-known CRDs being available
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget"
export HELM_SECRETS_BACKEND="vals"
#VERSION="latest"
VERSION="v1.28"
@ -19,7 +21,11 @@ function wait_for() {
function chart_location() {
echo "$1 --repo https://cdn.zero-downtime.net/charts"
if [ -n "$LOCAL_DEV" ]; then
echo $CHARTS/$1
else
echo "$1 --repo https://cdn.zero-downtime.net/charts"
fi
}
@ -105,8 +111,8 @@ function delete_ns() {
# Extract crds via helm calls and apply delta=crds only
function _crds() {
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds --set ${module}.installCRDs=false -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION > $WORKDIR/helm-no-crds.yaml
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds --set ${module}.installCRDs=true -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION > $WORKDIR/helm-crds.yaml
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds --set ${module}.installCRDs=false -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION > $WORKDIR/helm-no-crds.yaml
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds --set ${module}.installCRDs=true -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION > $WORKDIR/helm-crds.yaml
diff -e $WORKDIR/helm-no-crds.yaml $WORKDIR/helm-crds.yaml | head -n-1 | tail -n+2 > $WORKDIR/crds.yaml
# Only apply if there are actually any crds
@ -120,7 +126,7 @@ function _crds() {
# helm template | kubectl apply -f -
# confine to one namespace if possible
function render() {
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
| python3 -c '
#!/usr/bin/python3
import yaml
@ -169,9 +175,6 @@ function _helm() {
render
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$?
# Try again without server-side, review with 1.26, required for cert-manager during 1.25
[ $rc -ne 0 ] && kubectl $action -f $WORKDIR/helm.yaml && rc=$? || rc=$?
# Optional post hook
declare -F ${module}-post && ${module}-post

View File

@ -1,6 +1,6 @@
# kubezero-argo
![Version: 0.2.1](https://img.shields.io/badge/Version-0.2.1-informational?style=flat-square)
![Version: 0.2.2](https://img.shields.io/badge/Version-0.2.2-informational?style=flat-square)
KubeZero Argo - Events, Workflow, CD
@ -18,23 +18,22 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 6.7.10 |
| https://argoproj.github.io/argo-helm | argo-cd | 6.9.2 |
| https://argoproj.github.io/argo-helm | argo-events | 2.4.4 |
| https://argoproj.github.io/argo-helm | argocd-apps | 2.0.0 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.9.6 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.10.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| argo-cd.applicationSet.enabled | bool | `false` | |
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.27 - Release notes"` | |
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.28 - Release notes"` | |
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.27"` | |
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.28"` | |
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
@ -50,13 +49,37 @@ Kubernetes: `>= 1.26.0`
| argo-cd.controller.resources.requests.memory | string | `"512Mi"` | |
| argo-cd.dex.enabled | bool | `false` | |
| argo-cd.enabled | bool | `false` | |
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
| argo-cd.global.image.tag | string | `"v2.11.0"` | |
| argo-cd.global.logging.format | string | `"json"` | |
| argo-cd.istio.enabled | bool | `false` | |
| argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | |
| argo-cd.istio.ipBlocks | list | `[]` | |
| argo-cd.notifications.enabled | bool | `false` | |
| argo-cd.repoServer.clusterRoleRules.enabled | bool | `true` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].apiGroups[0] | string | `""` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].resources[0] | string | `"secrets"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[0] | string | `"get"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[1] | string | `"watch"` | |
| argo-cd.repoServer.clusterRoleRules.rules[0].verbs[2] | string | `"list"` | |
| argo-cd.repoServer.initContainers[0].command[0] | string | `"/usr/local/bin/sa2kubeconfig.sh"` | |
| argo-cd.repoServer.initContainers[0].command[1] | string | `"/home/argocd/.kube/config"` | |
| argo-cd.repoServer.initContainers[0].image | string | `"public.ecr.aws/zero-downtime/zdt-argocd:v2.11.0"` | |
| argo-cd.repoServer.initContainers[0].imagePullPolicy | string | `"IfNotPresent"` | |
| argo-cd.repoServer.initContainers[0].name | string | `"create-kubeconfig"` | |
| argo-cd.repoServer.initContainers[0].securityContext.allowPrivilegeEscalation | bool | `false` | |
| argo-cd.repoServer.initContainers[0].securityContext.capabilities.drop[0] | string | `"ALL"` | |
| argo-cd.repoServer.initContainers[0].securityContext.readOnlyRootFilesystem | bool | `true` | |
| argo-cd.repoServer.initContainers[0].securityContext.runAsNonRoot | bool | `true` | |
| argo-cd.repoServer.initContainers[0].securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
| argo-cd.repoServer.initContainers[0].volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | |
| argo-cd.repoServer.initContainers[0].volumeMounts[0].name | string | `"kubeconfigs"` | |
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.repoServer.volumeMounts[0].mountPath | string | `"/home/argocd/.kube"` | |
| argo-cd.repoServer.volumeMounts[0].name | string | `"kubeconfigs"` | |
| argo-cd.repoServer.volumes[0].emptyDir | object | `{}` | |
| argo-cd.repoServer.volumes[0].name | string | `"kubeconfigs"` | |
| argo-cd.server.metrics.enabled | bool | `false` | |
| argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
@ -87,6 +110,7 @@ Kubernetes: `>= 1.26.0`
| argocd-image-updater.sshConfig.config | string | `"Host *\n PubkeyAcceptedAlgorithms +ssh-rsa\n HostkeyAlgorithms +ssh-rsa\n"` | |
## Resources
- https://github.com/argoproj/argoproj/blob/main/docs/end_user_threat_model.pdf
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json

View File

@ -16,6 +16,7 @@
{{ template "chart.valuesSection" . }}
## Resources
- https://github.com/argoproj/argoproj/blob/main/docs/end_user_threat_model.pdf
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json

View File

@ -0,0 +1,22 @@
# KubeZero secrets
#
test: supergeheim
secrets:
- name: argocd-secret
optional: false
data:
admin.password: test
admin.passwordMtime: now
server.secretkey: boohoo
- name: zero-downtime-gitea
optional: true
data:
name: zero-downtime-gitea
type: git
url: ssh://git@git.zero-downtime.net/quark/kube-grandnagus.git
sshPrivateKey: |
boohooKey
metadata:
labels:
argocd.argoproj.io/secret-type: repository

View File

@ -36,19 +36,16 @@ argocd-apps:
projects: {}
applications: {}
argo-cd:
enabled: false
#configs:
# secret:
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
# argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG"
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
global:
logging:
format: json
# image:
# tag: v2.1.6
image:
repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v2.11.0
configs:
styles: |
@ -94,6 +91,10 @@ argo-cd:
secret:
createSecret: false
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
# argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG"
# argocdServerAdminPassword: "ref+file://secrets.yaml#/test"
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
ssh:
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ=="
@ -125,6 +126,41 @@ argo-cd:
serviceMonitor:
enabled: true
volumes:
- name: kubeconfigs
emptyDir: {}
volumeMounts:
- mountPath: /home/argocd/.kube
name: kubeconfigs
# Allow vals to read internal secrets across all namespaces
clusterRoleRules:
enabled: true
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "watch", "list"]
initContainers:
- name: create-kubeconfig
image: public.ecr.aws/zero-downtime/zdt-argocd:v2.11.0
imagePullPolicy: IfNotPresent
command:
- /usr/local/bin/sa2kubeconfig.sh
- /home/argocd/.kube/config
volumeMounts:
- mountPath: /home/argocd/.kube
name: kubeconfigs
securityContext:
runAsNonRoot: true
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
server:
# Rename former https port to grpc, works with istio + insecure
service:
@ -141,9 +177,6 @@ argo-cd:
dex:
enabled: false
applicationSet:
enabled: false
notifications:
enabled: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-lib
description: KubeZero helm library - common helm functions and blocks
type: library
version: 0.1.6
version: 0.2.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -43,3 +43,16 @@ app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: kubezero
{{- end -}}
{{- /*
kubezero-lib.util.merge will merge two YAML templates and output the result.
This takes an array of three values:
- the top context
- the template name of the overrides (destination)
- the template name of the base (source)
*/}}
{{- define "kubezero-lib.util.merge" -}}
{{- $top := first . -}}
{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}
{{- $tpl := fromYaml (include (index . 2) $top) | default (dict ) -}}
{{- toYaml (merge $overrides $tpl) -}}
{{- end -}}

View File

@ -240,7 +240,14 @@ fluent-bit:
#dnsPolicy: ClusterFirstWithHostNet
tolerations:
- effect: NoSchedule
- key: kubezero-workergroup
effect: NoSchedule
operator: Exists
- key: nvidia.com/gpu
effect: NoSchedule
operator: Exists
- key: aws.amazon.com/neuron
effect: NoSchedule
operator: Exists
resources:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-network
description: KubeZero umbrella chart for all things network
type: application
version: 0.5.1
version: 0.5.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -19,11 +19,15 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: cilium
version: 1.15.3
version: 1.15.5
repository: https://helm.cilium.io/
condition: cilium.enabled
- name: metallb
version: 0.14.4
version: 0.14.5
repository: https://metallb.github.io/metallb
condition: metallb.enabled
- name: haproxy
version: 1.22.0
repository: https://haproxytech.github.io/helm-charts
condition: haproxy.enabled
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-network
![Version: 0.5.1](https://img.shields.io/badge/Version-0.5.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.5.2](https://img.shields.io/badge/Version-0.5.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things network
@ -19,8 +19,9 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://helm.cilium.io/ | cilium | 1.15.3 |
| https://metallb.github.io/metallb | metallb | 0.14.4 |
| https://haproxytech.github.io/helm-charts | haproxy | 1.22.0 |
| https://helm.cilium.io/ | cilium | 1.15.5 |
| https://metallb.github.io/metallb | metallb | 0.14.5 |
## Values
@ -50,6 +51,8 @@ Kubernetes: `>= 1.26.0`
| cilium.operator.replicas | int | `1` | |
| cilium.operator.tolerations[0].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[1].key | string | `"node.cilium.io/agent-not-ready"` | |
| cilium.prometheus.enabled | bool | `false` | |
| cilium.prometheus.port | int | `9091` | |
| cilium.prometheus.serviceMonitor.enabled | bool | `false` | |
@ -57,6 +60,7 @@ Kubernetes: `>= 1.26.0`
| cilium.resources.requests.cpu | string | `"10m"` | |
| cilium.resources.requests.memory | string | `"256Mi"` | |
| cilium.tunnelProtocol | string | `"geneve"` | |
| haproxy.enabled | bool | `false` | |
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |

View File

@ -71,6 +71,7 @@ cilium:
enabled: false
hostRoot: "/sys/fs/cgroup"
routingMode: tunnel
tunnelProtocol: geneve
prometheus:
@ -109,3 +110,6 @@ cilium:
group: cert-manager.io
kind: ClusterIssuer
name: kubezero-local-ca-issuer
haproxy:
enabled: false

View File

@ -9,8 +9,9 @@ metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
{{- end }}
volumeBindingMode: WaitForFirstConsumer
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
allowVolumeExpansion: true
parameters:
storage: lvm
vgpattern: {{ default "openebs.*" ( index .Values "lvm-localpv" "storageClass" "vgpattern") }}

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-telemetry
description: KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
type: application
version: 0.2.4
version: 0.3.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -18,15 +18,19 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: opentelemetry-collector
version: 0.91.0
version: 0.92.0
repository: https://open-telemetry.github.io/opentelemetry-helm-charts
condition: opentelemetry-collector.enabled
- name: jaeger
version: 3.0.7
version: 3.0.8
repository: https://jaegertracing.github.io/helm-charts
condition: jaeger.enabled
- name: fluentd
version: 0.5.2
repository: https://fluent.github.io/helm-charts
condition: fluentd.enabled
- name: fluent-bit
version: 0.46.2
repository: https://fluent.github.io/helm-charts
condition: fluent-bit.enabled
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-telemetry
![Version: 0.2.3](https://img.shields.io/badge/Version-0.2.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.3.0](https://img.shields.io/badge/Version-0.3.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for OpenTelemetry, Jaeger etc.
@ -19,15 +19,89 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.46.2 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
| https://jaegertracing.github.io/helm-charts | jaeger | 3.0.3 |
| https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.89.0 |
| https://jaegertracing.github.io/helm-charts | jaeger | 3.0.8 |
| https://open-telemetry.github.io/opentelemetry-helm-charts | opentelemetry-collector | 0.92.0 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| fluent-bit.config.customParsers | string | `"[PARSER]\n Name cri-log\n Format regex\n Regex ^(?<time>.+) (?<stream>stdout|stderr) (?<logtag>F|P) (?<log>.*)$\n Time_Key time\n Time_Format %Y-%m-%dT%H:%M:%S.%L%z\n"` | |
| fluent-bit.config.filters | string | `"[FILTER]\n Name parser\n Match cri.*\n Parser cri-log\n Key_Name log\n\n[FILTER]\n Name kubernetes\n Match cri.*\n Merge_Log On\n Merge_Log_Key kube\n Kube_Tag_Prefix cri.var.log.containers.\n Keep_Log Off\n K8S-Logging.Parser Off\n K8S-Logging.Exclude Off\n Kube_Meta_Cache_TTL 3600s\n Buffer_Size 0\n #Use_Kubelet true\n\n{{- if index .Values \"config\" \"extraRecords\" }}\n\n[FILTER]\n Name record_modifier\n Match cri.*\n {{- range $k,$v := index .Values \"config\" \"extraRecords\" }}\n Record {{ $k }} {{ $v }}\n {{- end }}\n{{- end }}\n\n[FILTER]\n Name rewrite_tag\n Match cri.*\n Emitter_Name kube_tag_rewriter\n Rule $kubernetes['pod_id'] .* kube.$kubernetes['namespace_name'].$kubernetes['container_name'] false\n\n[FILTER]\n Name lua\n Match kube.*\n script /fluent-bit/scripts/kubezero.lua\n call nest_k8s_ns\n"` | |
| fluent-bit.config.flushInterval | int | `5` | |
| fluent-bit.config.input.memBufLimit | string | `"16MB"` | |
| fluent-bit.config.input.refreshInterval | int | `5` | |
| fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n # Exclude ourselves to current error spam, https://github.com/fluent/fluent-bit/issues/5769\n Exclude_Path *logging-fluent-bit*\n multiline.parser cri\n Tag cri.*\n Skip_Long_Lines On\n Skip_Empty_Lines On\n DB /var/log/flb_kube.db\n DB.Sync Normal\n DB.locking true\n # Buffer_Max_Size 1M\n {{- with .Values.config.input }}\n Mem_Buf_Limit {{ default \"16MB\" .memBufLimit }}\n Refresh_Interval {{ default 5 .refreshInterval }}\n {{- end }}\n"` | |
| fluent-bit.config.logLevel | string | `"info"` | |
| fluent-bit.config.output.host | string | `"telemetry-fluentd"` | |
| fluent-bit.config.output.sharedKey | string | `"secretref+k8s://v1/Secret/kube-system/kubezero-secrets/telemetry.fluentd.source.sharedKey"` | |
| fluent-bit.config.output.tls | bool | `false` | |
| fluent-bit.config.outputs | string | `"[OUTPUT]\n Match *\n Name forward\n Host {{ .Values.config.output.host }}\n Port 24224\n Shared_Key {{ .Values.config.output.sharedKey }}\n tls {{ ternary \"on\" \"off\" .Values.config.output.tls }}\n Send_options true\n Require_ack_response true\n"` | |
| fluent-bit.config.service | string | `"[SERVICE]\n Flush {{ .Values.config.flushInterval }}\n Daemon Off\n Log_Level {{ .Values.config.logLevel }}\n Parsers_File parsers.conf\n Parsers_File custom_parsers.conf\n HTTP_Server On\n HTTP_Listen 0.0.0.0\n HTTP_Port {{ .Values.service.port }}\n Health_Check On\n"` | |
| fluent-bit.daemonSetVolumeMounts[0].mountPath | string | `"/var/log"` | |
| fluent-bit.daemonSetVolumeMounts[0].name | string | `"varlog"` | |
| fluent-bit.daemonSetVolumeMounts[1].mountPath | string | `"/var/lib/containers/logs"` | |
| fluent-bit.daemonSetVolumeMounts[1].name | string | `"newlog"` | |
| fluent-bit.daemonSetVolumes[0].hostPath.path | string | `"/var/log"` | |
| fluent-bit.daemonSetVolumes[0].name | string | `"varlog"` | |
| fluent-bit.daemonSetVolumes[1].hostPath.path | string | `"/var/lib/containers/logs"` | |
| fluent-bit.daemonSetVolumes[1].name | string | `"newlog"` | |
| fluent-bit.enabled | bool | `false` | |
| fluent-bit.luaScripts."kubezero.lua" | string | `"function nest_k8s_ns(tag, timestamp, record)\n if not record['kubernetes']['namespace_name'] then\n return 0, 0, 0\n end\n new_record = {}\n for key, val in pairs(record) do\n if key == 'kube' then\n new_record[key] = {}\n new_record[key][record['kubernetes']['namespace_name']] = record[key]\n else\n new_record[key] = record[key]\n end\n end\n return 1, timestamp, new_record\nend\n"` | |
| fluent-bit.resources.limits.memory | string | `"128Mi"` | |
| fluent-bit.resources.requests.cpu | string | `"20m"` | |
| fluent-bit.resources.requests.memory | string | `"48Mi"` | |
| fluent-bit.serviceMonitor.enabled | bool | `false` | |
| fluent-bit.testFramework.enabled | bool | `false` | |
| fluent-bit.tolerations[0].effect | string | `"NoSchedule"` | |
| fluent-bit.tolerations[0].operator | string | `"Exists"` | |
| fluentd.configMapConfigs[0] | string | `"fluentd-prometheus-conf"` | |
| fluentd.dashboards.enabled | bool | `false` | |
| fluentd.enabled | bool | `false` | |
| fluentd.fileConfigs."00_system.conf" | string | `"<system>\n root_dir /fluentd/log\n log_level info\n ignore_repeated_log_interval 60s\n ignore_same_log_interval 60s\n workers 1\n</system>"` | |
| fluentd.fileConfigs."01_sources.conf" | string | `"<source>\n @type http\n @label @KUBERNETES\n port 9880\n bind 0.0.0.0\n keepalive_timeout 30\n</source>\n\n<source>\n @type forward\n @label @KUBERNETES\n port 24224\n bind 0.0.0.0\n # skip_invalid_event true\n send_keepalive_packet true\n <security>\n self_hostname \"telemetry-fluentd\"\n shared_key {{ .Values.source.sharedKey }}\n </security>\n</source>"` | |
| fluentd.fileConfigs."02_filters.conf" | string | `"<label @KUBERNETES>\n # prevent log feedback loops, discard logs from our own pods\n <match kube.logging.fluentd>\n @type relabel\n @label @FLUENT_LOG\n </match>\n\n # Exclude current fluent-bit multiline noise\n # Still relevant ??\n <filter kube.logging.fluent-bit>\n @type grep\n <exclude>\n key log\n pattern /could not append content to multiline context/\n </exclude>\n </filter>\n\n # Generate Hash ID to break endless loop for already ingested events during retries\n <filter **>\n @type opensearch_genid\n use_entire_record true\n </filter>\n\n # Route through DISPATCH for Prometheus metrics\n <match **>\n @type relabel\n @label @DISPATCH\n </match>\n</label>"` | |
| fluentd.fileConfigs."04_outputs.conf" | string | `"<label @OUTPUT>\n <match **>\n @id out_os\n @type opensearch\n # @log_level debug\n include_tag_key true\n\n id_key _hash\n remove_keys _hash\n write_operation create\n\n # we have oj in the fluentd-concenter image\n prefer_oj_serializer true\n\n # KubeZero pipeline incl. GeoIP etc.\n #pipeline fluentd\n\n http_backend typhoeus\n ca_file /run/pki/ca.crt\n\n port 9200\n scheme https\n hosts {{ .Values.output.host }}\n user {{ .Values.output.user }}\n password {{ .Values.output.password }}\n\n log_es_400_reason\n logstash_format true\n reconnect_on_error true\n reload_on_failure true\n request_timeout 300s\n #sniffer_class_name Fluent::Plugin::OpenSearchSimpleSniffer\n\n #with_transporter_log true\n\n verify_es_version_at_startup false\n default_opensearch_version 2\n #suppress_type_name true\n\n # Retry failed bulk requests\n # https://github.com/uken/fluent-plugin-elasticsearch#unrecoverable-error-types\n unrecoverable_error_types [\"out_of_memory_error\"]\n bulk_message_request_threshold 1048576\n\n <buffer>\n @type file\n\n flush_mode interval\n flush_thread_count 2\n flush_interval 10s\n\n chunk_limit_size 2MB\n total_limit_size 1GB\n\n flush_at_shutdown true\n retry_type exponential_backoff\n retry_timeout 6h\n overflow_action drop_oldest_chunk\n disable_chunk_backup true\n </buffer>\n </match>\n</label>"` | |
| fluentd.image.repository | string | `"public.ecr.aws/zero-downtime/fluentd-concenter"` | |
| fluentd.image.tag | string | `"v1.16.5-1-g09dc31c"` | |
| fluentd.istio.enabled | bool | `false` | |
| fluentd.kind | string | `"StatefulSet"` | |
| fluentd.metrics.serviceMonitor.enabled | bool | `false` | |
| fluentd.mountDockerContainersDirectory | bool | `false` | |
| fluentd.mountVarLogDirectory | bool | `false` | |
| fluentd.output.host | string | `"telemetry"` | |
| fluentd.output.password | string | `"admin"` | |
| fluentd.output.user | string | `"admin"` | |
| fluentd.persistence.enabled | bool | `true` | |
| fluentd.persistence.size | string | `"1Gi"` | |
| fluentd.persistence.storageClass | string | `""` | |
| fluentd.rbac.create | bool | `false` | |
| fluentd.replicaCount | int | `1` | |
| fluentd.resources.limits.memory | string | `"512Mi"` | |
| fluentd.resources.requests.cpu | string | `"200m"` | |
| fluentd.resources.requests.memory | string | `"256Mi"` | |
| fluentd.securityContext.capabilities.drop[0] | string | `"ALL"` | |
| fluentd.securityContext.runAsNonRoot | bool | `true` | |
| fluentd.securityContext.runAsUser | int | `100` | |
| fluentd.service.ports[0].containerPort | int | `24224` | |
| fluentd.service.ports[0].name | string | `"tcp-forward"` | |
| fluentd.service.ports[0].protocol | string | `"TCP"` | |
| fluentd.service.ports[1].containerPort | int | `9880` | |
| fluentd.service.ports[1].name | string | `"http-fluentd"` | |
| fluentd.service.ports[1].protocol | string | `"TCP"` | |
| fluentd.source.sharedKey | string | `"secretref+k8s://v1/Secret/kube-system/kubezero-secrets/telemetry.fluentd.source.sharedKey"` | |
| fluentd.volumeMounts[0].mountPath | string | `"/run/pki"` | |
| fluentd.volumeMounts[0].name | string | `"trust-store"` | |
| fluentd.volumeMounts[0].readOnly | bool | `true` | |
| fluentd.volumes[0].name | string | `"trust-store"` | |
| fluentd.volumes[0].secret.items[0].key | string | `"tls.crt"` | |
| fluentd.volumes[0].secret.items[0].path | string | `"ca.crt"` | |
| fluentd.volumes[0].secret.secretName | string | `"telemetry-nodes-http-tls"` | |
| jaeger.agent.enabled | bool | `false` | |
| jaeger.collector.extraEnv[0].name | string | `"ES_TAGS_AS_FIELDS_ALL"` | |
| jaeger.collector.extraEnv[0].value | string | `"true"` | |
| jaeger.collector.service.otlp.grpc.name | string | `"otlp-grpc"` | |
| jaeger.collector.service.otlp.grpc.port | int | `4317` | |
| jaeger.collector.service.otlp.http.name | string | `"otlp-http"` | |
@ -54,9 +128,10 @@ Kubernetes: `>= 1.26.0`
| opensearch.dashboard.istio.url | string | `"telemetry-dashboard.example.com"` | |
| opensearch.nodeSets | list | `[]` | |
| opensearch.prometheus | bool | `false` | |
| opensearch.version | string | `"2.13.0"` | |
| opensearch.version | string | `"2.14.0"` | |
| opentelemetry-collector.enabled | bool | `false` | |
| opentelemetry-collector.mode | string | `"deployment"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)
## Resources
- https://opensearch.org/docs/latest/dashboards/branding/#condensed-header

View File

@ -0,0 +1,20 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
{{ template "chart.description" . }}
{{ template "chart.homepageLine" . }}
{{ template "chart.maintainersSection" . }}
{{ template "chart.sourcesSection" . }}
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}
## Resources
- https://opensearch.org/docs/latest/dashboards/branding/#condensed-header

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,27 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: "Updated _Fluent Bit_ OCI image to [v3.0.2](https://github.com/fluent/fluent-bit/releases/tag/v3.0.2)."
apiVersion: v1
appVersion: 3.0.2
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems.
home: https://fluentbit.io/
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/fluentd/fluentbit/icon/fluentbit-icon-color.svg
keywords:
- logging
- fluent-bit
- fluentd
maintainers:
- email: eduardo@calyptia.com
name: edsiper
- email: naseem@transit.app
name: naseemkullah
- email: towmeykaw@gmail.com
name: Towmeykaw
- email: steve.hipwell@gmail.com
name: stevehipwell
name: fluent-bit
sources:
- https://github.com/fluent/fluent-bit/
version: 0.46.2

View File

@ -0,0 +1,57 @@
# Fluent Bit Helm chart
[Fluent Bit](https://fluentbit.io) is a fast and lightweight log processor and forwarder or Linux, OSX and BSD family operating systems.
## Installation
To add the `fluent` helm repo, run:
```sh
helm repo add fluent https://fluent.github.io/helm-charts
```
To install a release named `fluent-bit`, run:
```sh
helm install fluent-bit fluent/fluent-bit
```
## Chart values
```sh
helm show values fluent/fluent-bit
```
## Using Lua scripts
Fluent Bit allows us to build filter to modify the incoming records using custom [Lua scripts.](https://docs.fluentbit.io/manual/pipeline/filters/lua)
### How to use Lua scripts with this Chart
First, you should add your Lua scripts to `luaScripts` in values.yaml, for example:
```yaml
luaScripts:
filter_example.lua: |
function filter_name(tag, timestamp, record)
-- put your lua code here.
end
```
After that, the Lua scripts will be ready to be used as filters. So next step is to add your Fluent bit [filter](https://docs.fluentbit.io/manual/concepts/data-pipeline/filter) to `config.filters` in values.yaml, for example:
```yaml
config:
filters: |
[FILTER]
Name lua
Match <your-tag>
script /fluent-bit/scripts/filter_example.lua
call filter_name
```
Under the hood, the chart will:
- Create a configmap using `luaScripts`.
- Add a volumeMounts for each Lua scripts using the path `/fluent-bit/scripts/<script>`.
- Add the Lua script's configmap as volume to the pod.
### Note
Remember to set the `script` attribute in the filter using `/fluent-bit/scripts/`, otherwise the file will not be found by fluent bit.

View File

@ -0,0 +1,7 @@
testFramework:
enabled: true
logLevel: debug
dashboards:
enabled: true

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,6 @@
Get Fluent Bit build information by running these commands:
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "fluent-bit.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 2020:2020
curl http://127.0.0.1:2020

View File

@ -0,0 +1,138 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "fluent-bit.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "fluent-bit.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "fluent-bit.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "fluent-bit.labels" -}}
helm.sh/chart: {{ include "fluent-bit.chart" . }}
{{ include "fluent-bit.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "fluent-bit.selectorLabels" -}}
app.kubernetes.io/name: {{ include "fluent-bit.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "fluent-bit.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "fluent-bit.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Fluent-bit image with tag/digest
*/}}
{{- define "fluent-bit.image" -}}
{{- $tag := ternary "" (printf ":%s" (toString .tag)) (or (empty .tag) (eq "-" (toString .tag))) -}}
{{- $digest := ternary "" (printf "@%s" .digest) (empty .digest) -}}
{{- printf "%s%s%s" .repository $tag $digest -}}
{{- end -}}
{{/*
Ingress ApiVersion according k8s version
*/}}
{{- define "fluent-bit.ingress.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion) -}}
networking.k8s.io/v1
{{- else if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") (semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion) -}}
networking.k8s.io/v1beta1
{{- else -}}
extensions/v1beta1
{{- end }}
{{- end }}
{{/*
Return if ingress is stable.
*/}}
{{- define "fluent-bit.ingress.isStable" -}}
{{- eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1" -}}
{{- end -}}
{{/*
Return if ingress supports ingressClassName.
*/}}
{{- define "fluent-bit.ingress.supportsIngressClassName" -}}
{{- or (eq (include "fluent-bit.ingress.isStable" .) "true") (and (eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}
{{/*
Return if ingress supports pathType.
*/}}
{{- define "fluent-bit.ingress.supportsPathType" -}}
{{- or (eq (include "fluent-bit.ingress.isStable" .) "true") (and (eq (include "fluent-bit.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}}
{{- end -}}
{{/*
Pdb apiVersion according k8s version and capabilities
*/}}
{{- define "fluent-bit.pdb.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion) -}}
policy/v1
{{- else -}}
policy/v1beta1
{{- end }}
{{- end -}}
{{/*
HPA ApiVersion according k8s version
Check legacy first so helm template / kustomize will default to latest version
*/}}
{{- define "fluent-bit.hpa.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "autoscaling/v2beta2") (semverCompare "<1.23-0" .Capabilities.KubeVersion.GitVersion) -}}
autoscaling/v2beta2
{{- else -}}
autoscaling/v2
{{- end -}}
{{- end -}}
{{/*
Create the name of OpenShift SecurityContextConstraints to use
*/}}
{{- define "fluent-bit.openShiftSccName" -}}
{{- if not .Values.openShift.securityContextConstraints.create -}}
{{- printf "%s" .Values.openShift.securityContextConstraints.existingName -}}
{{- else -}}
{{- printf "%s" (default (include "fluent-bit.fullname" .) .Values.openShift.securityContextConstraints.name) -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,151 @@
{{- define "fluent-bit.pod" -}}
serviceAccountName: {{ include "fluent-bit.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ . }}
{{- end }}
hostNetwork: {{ .Values.hostNetwork }}
dnsPolicy: {{ .Values.dnsPolicy }}
{{- with .Values.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.hostAliases }}
hostAliases:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.initContainers }}
initContainers:
{{- if kindIs "string" . }}
{{- tpl . $ | nindent 2 }}
{{- else }}
{{- toYaml . | nindent 2 }}
{{- end -}}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 6 }}
{{- end }}
image: {{ include "fluent-bit.image" (merge .Values.image (dict "tag" (default .Chart.AppVersion .Values.image.tag))) | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if or .Values.env .Values.envWithTpl }}
env:
{{- with .Values.env }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- range $item := .Values.envWithTpl }}
- name: {{ $item.name }}
value: {{ tpl $item.value $ | quote }}
{{- end }}
{{- end }}
{{- if .Values.envFrom }}
envFrom:
{{- toYaml .Values.envFrom | nindent 6 }}
{{- end }}
{{- with .Values.command }}
command:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- if or .Values.args .Values.hotReload.enabled }}
args:
{{- toYaml .Values.args | nindent 6 }}
{{- if .Values.hotReload.enabled }}
- --enable-hot-reload
{{- end }}
{{- end}}
ports:
- name: http
containerPort: {{ .Values.metricsPort }}
protocol: TCP
{{- if .Values.extraPorts }}
{{- range .Values.extraPorts }}
- name: {{ .name }}
containerPort: {{ .containerPort }}
protocol: {{ .protocol }}
{{- end }}
{{- end }}
{{- with .Values.lifecycle }}
lifecycle:
{{- toYaml . | nindent 6 }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 6 }}
readinessProbe:
{{- toYaml .Values.readinessProbe | nindent 6 }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
volumeMounts:
- name: config
mountPath: /fluent-bit/etc/conf
{{- if or .Values.luaScripts .Values.hotReload.enabled }}
- name: luascripts
mountPath: /fluent-bit/scripts
{{- end }}
{{- if eq .Values.kind "DaemonSet" }}
{{- toYaml .Values.daemonSetVolumeMounts | nindent 6 }}
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 6 }}
{{- end }}
{{- if .Values.hotReload.enabled }}
- name: reloader
image: {{ include "fluent-bit.image" .Values.hotReload.image }}
args:
- {{ printf "-webhook-url=http://localhost:%s/api/v2/reload" (toString .Values.metricsPort) }}
- -volume-dir=/watch/config
- -volume-dir=/watch/scripts
volumeMounts:
- name: config
mountPath: /watch/config
- name: luascripts
mountPath: /watch/scripts
{{- with .Values.hotReload.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.extraContainers }}
{{- toYaml .Values.extraContainers | nindent 2 }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ default (include "fluent-bit.fullname" .) .Values.existingConfigMap }}
{{- if or .Values.luaScripts .Values.hotReload.enabled }}
- name: luascripts
configMap:
name: {{ include "fluent-bit.fullname" . }}-luascripts
{{- end }}
{{- if eq .Values.kind "DaemonSet" }}
{{- toYaml .Values.daemonSetVolumes | nindent 2 }}
{{- end }}
{{- if .Values.extraVolumes }}
{{- toYaml .Values.extraVolumes | nindent 2 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,45 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
{{- if .Values.rbac.nodeAccess }}
- nodes
- nodes/proxy
{{- end }}
{{- if .Values.rbac.eventsAccess }}
- events
{{- end }}
verbs:
- get
- list
- watch
{{- if and .Values.podSecurityPolicy.create (semverCompare "<=1.25-0" .Capabilities.KubeVersion.GitVersion) }}
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ include "fluent-bit.fullname" . }}
verbs:
- use
{{- end }}
{{- if .Values.openShift.enabled }}
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
resourceNames:
- {{ include "fluent-bit.openShiftSccName" . }}
verbs:
- use
{{- end }}
{{- end -}}

View File

@ -0,0 +1,16 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "fluent-bit.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "fluent-bit.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@ -0,0 +1,21 @@
{{- if .Values.dashboards.enabled -}}
{{- range $path, $_ := .Files.Glob "dashboards/*.json" }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" $ }}-dashboard-{{ trimSuffix ".json" (base $path) }}
namespace: {{ default $.Release.Namespace $.Values.dashboards.namespace }}
{{- with $.Values.dashboards.annotations }}
annotations:
{{- toYaml . | nindent 4 -}}
{{- end }}
labels:
{{- include "fluent-bit.labels" $ | nindent 4 }}
{{ $.Values.dashboards.labelKey }}: {{ $.Values.dashboards.labelValue | quote }}
data:
{{ include "fluent-bit.fullname" $ }}-{{ base $path }}: |
{{- tpl ($.Files.Get $path) $ | nindent 4 }}
---
{{- end }}
{{- end -}}

View File

@ -0,0 +1,13 @@
{{- if or .Values.luaScripts .Values.hotReload.enabled -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" . }}-luascripts
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
data:
{{ range $key, $value := .Values.luaScripts }}
{{ $key }}: {{ $value | quote }}
{{ end }}
{{- end -}}

View File

@ -0,0 +1,25 @@
{{- if not .Values.existingConfigMap -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
data:
custom_parsers.conf: |
{{- (tpl .Values.config.customParsers $) | nindent 4 }}
fluent-bit.conf: |
{{- (tpl .Values.config.service $) | nindent 4 }}
{{- (tpl .Values.config.inputs $) | nindent 4 }}
{{- (tpl .Values.config.filters $) | nindent 4 }}
{{- (tpl .Values.config.outputs $) | nindent 4 }}
{{- range $key, $val := .Values.config.upstream }}
{{ $key }}: |
{{- (tpl $val $) | nindent 4 }}
{{- end }}
{{- range $key, $val := .Values.config.extraFiles }}
{{ $key }}: |
{{- (tpl $val $) | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,48 @@
{{- if eq .Values.kind "DaemonSet" }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.updateStrategy }}
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.minReadySeconds }}
minReadySeconds: {{ . }}
{{- end }}
template:
metadata:
labels:
{{- include "fluent-bit.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or (not .Values.hotReload.enabled) .Values.podAnnotations }}
annotations:
{{- if not .Values.hotReload.enabled }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.luaScripts }}
checksum/luascripts: {{ include (print $.Template.BasePath "/configmap-luascripts.yaml") . | sha256sum }}
{{- end }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- include "fluent-bit.pod" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,51 @@
{{- if eq .Values.kind "Deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
{{- with .Values.updateStrategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.minReadySeconds }}
minReadySeconds: {{ . }}
{{- end }}
template:
metadata:
labels:
{{- include "fluent-bit.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or (not .Values.hotReload.enabled) .Values.podAnnotations }}
annotations:
{{- if not .Values.hotReload.enabled }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.luaScripts }}
checksum/luascripts: {{ include (print $.Template.BasePath "/configmap-luascripts.yaml") . | sha256sum }}
{{- end }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- include "fluent-bit.pod" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,40 @@
{{- if and ( eq .Values.kind "Deployment" ) .Values.autoscaling.enabled }}
apiVersion: {{ include "fluent-bit.hpa.apiVersion" . }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
spec:
{{- if .Values.autoscaling.behavior }}
behavior:
{{- toYaml .Values.autoscaling.behavior | nindent 4 }}
{{- end }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "fluent-bit.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
type: Utilization
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
type: Utilization
{{- end }}
{{- if .Values.autoscaling.customRules -}}
{{- toYaml .Values.autoscaling.customRules | nindent 4}}
{{- end -}}
{{- end }}

View File

@ -0,0 +1,65 @@
{{- $ingressApiIsStable := eq (include "fluent-bit.ingress.isStable" .) "true" -}}
{{- $ingressSupportsIngressClassName := eq (include "fluent-bit.ingress.supportsIngressClassName" .) "true" -}}
{{- $ingressSupportsPathType := eq (include "fluent-bit.ingress.supportsPathType" .) "true" -}}
{{- $fullName := include "fluent-bit.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and ( eq .Values.kind "Deployment" ) .Values.ingress.enabled }}
apiVersion: {{ include "fluent-bit.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- range $key, $value := . }}
{{ printf "%s: %s" $key ((tpl $value $) | quote) }}
{{- end }}
{{- end }}
spec:
{{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end -}}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
{{- with .secretName }}
secretName: {{ . }}
{{- end }}
{{- end }}
{{- end }}
rules:
{{- range concat .Values.ingress.hosts .Values.ingress.extraHosts }}
- host: {{ .host | quote }}
http:
paths:
- path: /
{{- if $ingressSupportsPathType }}
pathType: Prefix
{{- end }}
backend:
{{- if $ingressApiIsStable }}
service:
name: {{ $fullName }}
port:
{{- if .port }}
number: {{ .port }}
{{- else }}
number: {{ $svcPort }}
{{- end }}
{{- else }}
serviceName: {{ $fullName }}
{{- if .port }}
servicePort: {{ .port }}
{{- else }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,23 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: "networking.k8s.io/v1"
kind: "NetworkPolicy"
metadata:
name: {{ include "fluent-bit.fullname" . | quote }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
spec:
policyTypes:
- "Ingress"
podSelector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
ingress:
{{- with .Values.networkPolicy.ingress }}
- from:
{{- with .from }}{{- . | toYaml | nindent 8 }}{{- else }} []{{- end }}
ports:
- protocol: "TCP"
port: {{ $.Values.service.port }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if and ( eq .Values.kind "Deployment" ) .Values.podDisruptionBudget.enabled }}
apiVersion: {{ include "fluent-bit.pdb.apiVersion" . }}
kind: PodDisruptionBudget
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.podDisruptionBudget.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.prometheusRule.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ default $.Release.Namespace .Values.prometheusRule.namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- if .Values.prometheusRule.additionalLabels }}
{{- toYaml .Values.prometheusRule.additionalLabels | nindent 4 }}
{{- end }}
spec:
{{- if .Values.prometheusRule.rules }}
groups:
- name: {{ template "fluent-bit.name" . }}
rules: {{- toYaml .Values.prometheusRule.rules | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,42 @@
{{- if and .Values.podSecurityPolicy.create (semverCompare "<=1.25-0" .Capabilities.KubeVersion.GitVersion) -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ include "fluent-bit.fullname" . }}
{{- if .Values.podSecurityPolicy.annotations }}
annotations:
{{- toYaml .Values.podSecurityPolicy.annotations | nindent 4 }}
{{- end }}
spec:
privileged: false
# Required to prevent escalations to root.
allowPrivilegeEscalation: false
# This is redundant with non-root + disallow privilege escalation,
# but we can provide it for defense in depth.
requiredDropCapabilities:
- ALL
volumes:
- '*'
hostNetwork: {{ .Values.hostNetwork }}
hostIPC: false
hostPID: false
runAsUser:
# TODO: Require the container to run without root privileges.
rule: 'RunAsAny'
seLinux:
# This policy assumes the nodes are using AppArmor rather than SELinux.
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
# Forbid adding the root group.
- min: 1
max: 65535
readOnlyRootFilesystem: false
{{- end }}

View File

@ -0,0 +1,41 @@
{{- if and .Values.openShift.enabled .Values.openShift.securityContextConstraints.create }}
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: {{ include "fluent-bit.openShiftSccName" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.openShift.securityContextConstraints.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
allowPrivilegedContainer: true
allowPrivilegeEscalation: true
allowHostDirVolumePlugin: true
defaultAllowPrivilegeEscalation: false
# forbid host namespaces
allowHostNetwork: false
allowHostIPC: false
allowHostPorts: false
allowHostPID: false
allowedCapabilities: []
forbiddenSysctls:
- "*"
readOnlyRootFilesystem: false
requiredDropCapabilities:
- MKNOD
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
supplementalGroups:
type: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- hostPath
- persistentVolumeClaim
- projected
- secret
{{- end }}

View File

@ -0,0 +1,54 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.service.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if (eq .Values.kind "DaemonSet") }}
{{- with .Values.service.internalTrafficPolicy }}
internalTrafficPolicy: {{ . }}
{{- end }}
{{- end }}
{{- if (eq .Values.service.type "LoadBalancer")}}
{{- with .Values.service.loadBalancerClass}}
loadBalancerClass: {{ . }}
{{- end }}
{{- with .Values.service.loadBalancerSourceRanges}}
loadBalancerSourceRanges:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if and (eq .Values.service.type "NodePort") (.Values.service.nodePort) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
{{- if .Values.extraPorts }}
{{- range .Values.extraPorts }}
- name: {{ .name }}
targetPort: {{ .name }}
protocol: {{ .protocol }}
port: {{ .port }}
{{- if and (eq $.Values.service.type "NodePort") (.nodePort) }}
nodePort: {{ .nodePort }}
{{- end }}
{{- end }}
{{- end }}
selector:
{{- include "fluent-bit.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "fluent-bit.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,51 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "fluent-bit.fullname" . }}
namespace: {{ default .Release.Namespace .Values.serviceMonitor.namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.selector }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
jobLabel: app.kubernetes.io/instance
endpoints:
- port: http
path: {{ default "/api/v2/metrics/prometheus" .Values.serviceMonitor.path }}
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- if kindIs "string" . }}
{{- tpl . $ | nindent 8 }}
{{- else }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.serviceMonitor.scheme }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- end }}
{{- if .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml .Values.serviceMonitor.tlsConfig | nindent 8 }}
{{- end }}
{{- with .Values.serviceMonitor.additionalEndpoints }}
{{- toYaml . | nindent 4 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -0,0 +1,26 @@
{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "fluent-bit.fullname" . }}-test-connection"
namespace: {{ default .Release.Namespace .Values.testFramework.namespace }}
labels:
helm.sh/chart: {{ include "fluent-bit.chart" . }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
annotations:
helm.sh/hook: test
helm.sh/hook-delete-policy: hook-succeeded
spec:
containers:
- name: wget
image: {{ include "fluent-bit.image" .Values.testFramework.image | quote }}
imagePullPolicy: {{ .Values.testFramework.image.pullPolicy }}
command: ["sh"]
args: ["-c", "wget -O- {{ include "fluent-bit.fullname" . }}:{{ .Values.service.port }}"]
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 4 }}
{{- end }}
restartPolicy: Never
{{- end }}

View File

@ -0,0 +1,39 @@
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.autoscaling.vpa.enabled }}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ include "fluent-bit.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.autoscaling.vpa.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
resourcePolicy:
containerPolicies:
- containerName: {{ .Chart.Name }}
{{- with .Values.autoscaling.vpa.controlledResources }}
controlledResources:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.autoscaling.vpa.maxAllowed }}
maxAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.autoscaling.vpa.minAllowed }}
minAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
targetRef:
apiVersion: apps/v1
kind: {{ .Values.kind }}
name: {{ include "fluent-bit.fullname" . }}
{{- if .Values.autoscaling.vpa.updatePolicy }}
updatePolicy:
{{- with .Values.autoscaling.vpa.updatePolicy.updateMode }}
updateMode: {{ . }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,497 @@
# Default values for fluent-bit.
# kind -- DaemonSet or Deployment
kind: DaemonSet
# replicaCount -- Only applicable if kind=Deployment
replicaCount: 1
image:
repository: cr.fluentbit.io/fluent/fluent-bit
# Overrides the image tag whose default is {{ .Chart.AppVersion }}
# Set to "-" to not use the default value
tag:
digest:
pullPolicy: IfNotPresent
testFramework:
enabled: true
namespace:
image:
repository: busybox
pullPolicy: Always
tag: latest
digest:
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
create: true
annotations: {}
name:
rbac:
create: true
nodeAccess: false
eventsAccess: false
# Configure podsecuritypolicy
# Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
# from Kubernetes 1.25, PSP is deprecated
# See: https://kubernetes.io/blog/2022/08/23/kubernetes-v1-25-release/#pod-security-changes
# We automatically disable PSP if Kubernetes version is 1.25 or higher
podSecurityPolicy:
create: false
annotations: {}
# OpenShift-specific configuration
openShift:
enabled: false
securityContextConstraints:
# Create SCC for Fluent-bit and allow use it
create: true
name: ""
annotations: {}
# Use existing SCC in cluster, rather then create new one
existingName: ""
podSecurityContext: {}
# fsGroup: 2000
hostNetwork: false
dnsPolicy: ClusterFirst
dnsConfig: {}
# nameservers:
# - 1.2.3.4
# searches:
# - ns1.svc.cluster-domain.example
# - my.dns.search.suffix
# options:
# - name: ndots
# value: "2"
# - name: edns0
hostAliases: []
# - ip: "1.2.3.4"
# hostnames:
# - "foo.local"
# - "bar.local"
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 2020
internalTrafficPolicy:
loadBalancerClass:
loadBalancerSourceRanges: []
labels: {}
# nodePort: 30020
# clusterIP: 172.16.10.1
annotations: {}
# prometheus.io/path: "/api/v1/metrics/prometheus"
# prometheus.io/port: "2020"
# prometheus.io/scrape: "true"
serviceMonitor:
enabled: false
# namespace: monitoring
# interval: 10s
# scrapeTimeout: 10s
# selector:
# prometheus: my-prometheus
# ## metric relabel configs to apply to samples before ingestion.
# ##
# metricRelabelings:
# - sourceLabels: [__meta_kubernetes_service_label_cluster]
# targetLabel: cluster
# regex: (.*)
# replacement: ${1}
# action: replace
# ## relabel configs to apply to samples after ingestion.
# ##
# relabelings:
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
# scheme: ""
# tlsConfig: {}
## Bear in mind if you want to collect metrics from a different port
## you will need to configure the new ports on the extraPorts property.
additionalEndpoints: []
# - port: metrics
# path: /metrics
# interval: 10s
# scrapeTimeout: 10s
# scheme: ""
# tlsConfig: {}
# # metric relabel configs to apply to samples before ingestion.
# #
# metricRelabelings:
# - sourceLabels: [__meta_kubernetes_service_label_cluster]
# targetLabel: cluster
# regex: (.*)
# replacement: ${1}
# action: replace
# # relabel configs to apply to samples after ingestion.
# #
# relabelings:
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
prometheusRule:
enabled: false
# namespace: ""
# additionalLabels: {}
# rules:
# - alert: NoOutputBytesProcessed
# expr: rate(fluentbit_output_proc_bytes_total[5m]) == 0
# annotations:
# message: |
# Fluent Bit instance {{ $labels.instance }}'s output plugin {{ $labels.name }} has not processed any
# bytes for at least 15 minutes.
# summary: No Output Bytes Processed
# for: 15m
# labels:
# severity: critical
dashboards:
enabled: false
labelKey: grafana_dashboard
labelValue: 1
annotations: {}
namespace: ""
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "sleep 20"]
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /api/v1/health
port: http
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## only available if kind is Deployment
ingress:
enabled: false
ingressClassName: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts: []
# - host: fluent-bit.example.tld
extraHosts: []
# - host: fluent-bit-extra.example.tld
## specify extraPort number
# port: 5170
tls: []
# - secretName: fluent-bit-example-tld
# hosts:
# - fluent-bit.example.tld
## only available if kind is Deployment
autoscaling:
vpa:
enabled: false
annotations: {}
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
controlledResources: []
# Define the max allowed resources for the pod
maxAllowed: {}
# cpu: 200m
# memory: 100Mi
# Define the min allowed resources for the pod
minAllowed: {}
# cpu: 200m
# memory: 100Mi
updatePolicy:
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
updateMode: Auto
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 75
# targetMemoryUtilizationPercentage: 75
## see https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics
customRules: []
# - type: Pods
# pods:
# metric:
# name: packets-per-second
# target:
# type: AverageValue
# averageValue: 1k
## see https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-configurable-scaling-behavior
behavior: {}
# scaleDown:
# policies:
# - type: Pods
# value: 4
# periodSeconds: 60
# - type: Percent
# value: 10
# periodSeconds: 60
## only available if kind is Deployment
podDisruptionBudget:
enabled: false
annotations: {}
maxUnavailable: "30%"
nodeSelector: {}
tolerations: []
affinity: {}
labels: {}
annotations: {}
podAnnotations: {}
podLabels: {}
## How long (in seconds) a pods needs to be stable before progressing the deployment
##
minReadySeconds:
## How long (in seconds) a pod may take to exit (useful with lifecycle hooks to ensure lb deregistration is done)
##
terminationGracePeriodSeconds:
priorityClassName: ""
env: []
# - name: FOO
# value: "bar"
# The envWithTpl array below has the same usage as "env", but is using the tpl function to support templatable string.
# This can be useful when you want to pass dynamic values to the Chart using the helm argument "--set <variable>=<value>"
# https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function
envWithTpl: []
# - name: FOO_2
# value: "{{ .Values.foo2 }}"
#
# foo2: bar2
envFrom: []
extraContainers: []
# - name: do-something
# image: busybox
# command: ['do', 'something']
flush: 1
metricsPort: 2020
extraPorts: []
# - port: 5170
# containerPort: 5170
# protocol: TCP
# name: tcp
# nodePort: 30517
extraVolumes: []
extraVolumeMounts: []
updateStrategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxUnavailable: 1
# Make use of a pre-defined configmap instead of the one templated here
existingConfigMap: ""
networkPolicy:
enabled: false
# ingress:
# from: []
luaScripts: {}
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file
config:
service: |
[SERVICE]
Daemon Off
Flush {{ .Values.flush }}
Log_Level {{ .Values.logLevel }}
Parsers_File /fluent-bit/etc/parsers.conf
Parsers_File /fluent-bit/etc/conf/custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port {{ .Values.metricsPort }}
Health_Check On
## https://docs.fluentbit.io/manual/pipeline/inputs
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
multiline.parser docker, cri
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
[INPUT]
Name systemd
Tag host.*
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Read_From_Tail On
## https://docs.fluentbit.io/manual/pipeline/filters
filters: |
[FILTER]
Name kubernetes
Match kube.*
Merge_Log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
## https://docs.fluentbit.io/manual/pipeline/outputs
outputs: |
[OUTPUT]
Name es
Match kube.*
Host elasticsearch-master
Logstash_Format On
Retry_Limit False
[OUTPUT]
Name es
Match host.*
Host elasticsearch-master
Logstash_Format On
Logstash_Prefix node
Retry_Limit False
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/upstream-servers
## This configuration is deprecated, please use `extraFiles` instead.
upstream: {}
## https://docs.fluentbit.io/manual/pipeline/parsers
customParsers: |
[PARSER]
Name docker_no_time
Format json
Time_Keep Off
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
# This allows adding more files with arbitrary filenames to /fluent-bit/etc/conf by providing key/value pairs.
# The key becomes the filename, the value becomes the file content.
extraFiles: {}
# upstream.conf: |
# [UPSTREAM]
# upstream1
#
# [NODE]
# name node-1
# host 127.0.0.1
# port 43000
# example.conf: |
# [OUTPUT]
# Name example
# Match foo.*
# Host bar
# The config volume is mounted by default, either to the existingConfigMap value, or the default of "fluent-bit.fullname"
volumeMounts:
- name: config
mountPath: /fluent-bit/etc/conf
daemonSetVolumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: etcmachineid
hostPath:
path: /etc/machine-id
type: File
daemonSetVolumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: etcmachineid
mountPath: /etc/machine-id
readOnly: true
command:
- /fluent-bit/bin/fluent-bit
args:
- --workdir=/fluent-bit/etc
- --config=/fluent-bit/etc/conf/fluent-bit.conf
# This supports either a structured array or a templatable string
initContainers: []
# Array mode
# initContainers:
# - name: do-something
# image: bitnami/kubectl:1.22
# command: ['kubectl', 'version']
# String mode
# initContainers: |-
# - name: do-something
# image: bitnami/kubectl:{{ .Capabilities.KubeVersion.Major }}.{{ .Capabilities.KubeVersion.Minor }}
# command: ['kubectl', 'version']
logLevel: info
hotReload:
enabled: false
image:
repository: ghcr.io/jimmidyson/configmap-reload
tag: v0.11.1
digest:
pullPolicy: IfNotPresent
resources: {}

View File

@ -9,7 +9,7 @@ metadata:
data:
{{- range $key, $value := .Values.fileConfigs }}
{{$key }}: |-
{{- $value | nindent 4 }}
{{- (tpl $value $) | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -4,6 +4,7 @@ fluentd is deployed with the default values
If the fluentd config is overriden and the metrics server removed
this will fail.
*/}}
{{- if .Values.testFramework.enabled }}
{{ if empty .Values.service.ports }}
apiVersion: v1
kind: Pod
@ -26,4 +27,5 @@ spec:
while :; do nc -vz {{ include "fluentd.fullname" . }}:24231 && break; sleep 1; done
wget '{{ include "fluentd.fullname" . }}:24231/metrics'
restartPolicy: Never
{{ end }}
{{ end }}
{{- end }}

View File

@ -13,6 +13,9 @@ image:
pullPolicy: "IfNotPresent"
tag: ""
testFramework:
enabled: false
## Optional array of imagePullSecrets containing private registry credentials
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []

View File

@ -1,6 +1,7 @@
configmap: grafana-dashboards
gzip: true
folder: Telemetry
#condition: '.Values.telemetry.metrics.enabled'
dashboards:
- name: jaeger
url: https://grafana.com/api/dashboards/10001/revisions/2/download

View File

@ -0,0 +1,44 @@
diff -rtuN charts/fluentd.orig/templates/fluentd-configurations-cm.yaml charts/fluentd/templates/fluentd-configurations-cm.yaml
--- charts/fluentd.orig/templates/fluentd-configurations-cm.yaml 2024-04-08 11:00:03.030515998 +0000
+++ charts/fluentd/templates/fluentd-configurations-cm.yaml 2024-04-08 11:00:03.040516045 +0000
@@ -9,7 +9,7 @@
data:
{{- range $key, $value := .Values.fileConfigs }}
{{$key }}: |-
- {{- $value | nindent 4 }}
+ {{- (tpl $value $) | nindent 4 }}
{{- end }}
{{- end }}
diff -rtuN charts/fluentd.orig/templates/tests/test-connection.yaml charts/fluentd/templates/tests/test-connection.yaml
--- charts/fluentd.orig/templates/tests/test-connection.yaml 2024-04-08 11:00:03.030515998 +0000
+++ charts/fluentd/templates/tests/test-connection.yaml 2024-04-08 11:03:16.254774985 +0000
@@ -4,6 +4,7 @@
If the fluentd config is overriden and the metrics server removed
this will fail.
*/}}
+{{- if .Values.testFramework.enabled }}
{{ if empty .Values.service.ports }}
apiVersion: v1
kind: Pod
@@ -26,4 +27,5 @@
while :; do nc -vz {{ include "fluentd.fullname" . }}:24231 && break; sleep 1; done
wget '{{ include "fluentd.fullname" . }}:24231/metrics'
restartPolicy: Never
-{{ end }}
\ No newline at end of file
+{{ end }}
+{{- end }}
diff -rtuN charts/fluentd.orig/values.yaml charts/fluentd/values.yaml
--- charts/fluentd.orig/values.yaml 2024-04-08 11:00:03.030515998 +0000
+++ charts/fluentd/values.yaml 2024-04-08 11:00:03.040516045 +0000
@@ -13,6 +13,9 @@
pullPolicy: "IfNotPresent"
tag: ""
+testFramework:
+ enabled: false
+
## Optional array of imagePullSecrets containing private registry credentials
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []

View File

@ -1,3 +1,4 @@
{{- if .Values.opensearch.nodeSets }}
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
@ -68,3 +69,4 @@ spec:
commonName: {{ template "kubezero-lib.fullname" . }}-admin
privateKey:
encoding: PKCS8
{{- end }}

View File

@ -1,5 +1,5 @@
#pluginsList: ["repository-s3","https://github.com/aiven/prometheus-exporter-plugin-for-opensearch/releases/download/2.11.1.0/prometheus-exporter-2.11.1.0.zip"]
{{- if .Values.opensearch.nodeSets }}
#pluginsList: ["repository-s3","https://github.com/aiven/prometheus-exporter-plugin-for-opensearch/releases/download/2.11.1.0/prometheus-exporter-2.11.1.0.zip"]
apiVersion: opensearch.opster.io/v1
kind: OpenSearchCluster
metadata:
@ -21,7 +21,6 @@ spec:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
fsGroup: 1000
securityContext:
allowPrivilegeEscalation: false
privileged: false
@ -38,6 +37,18 @@ spec:
limits:
memory: "1Gi"
#cpu: "200m"
podSecurityContext:
runAsNonRoot: true
fsGroup: 1000
securityContext:
capabilities:
drop:
- ALL
privileged: false
additionalConfig:
opensearchDashboards.branding.useExpandedHeader: "false"
opensearchDashboards.branding.applicationTitle: "KubeZero Dashboards"
opensearchDashboards.branding.mark.defaultUrl: "https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png"
{{- end }}
nodePools:
{{- range .Values.opensearch.nodeSets }}

View File

@ -0,0 +1,34 @@
{{- if .Values.opensearch.nodeSets }}
apiVersion: opensearch.opster.io/v1
kind: OpenSearchISMPolicy
metadata:
name: {{ template "kubezero-lib.fullname" . }}-ism-default-retention
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | nindent 4 }}
spec:
opensearchCluster:
name: {{ template "kubezero-lib.fullname" . }}
description: Default KubeZero retention policy
defaultState: hot
states:
- name: hot
actions:
- replicaCount:
numberOfReplicas: 1
transitions:
- stateName: warm
conditions:
minIndexAge: "7d"
- name: warm
actions:
- replicaCount:
numberOfReplicas: 0
transitions:
- stateName: delete
conditions:
minIndexAge: "32d"
- name: delete
actions:
- delete: {}
{{- end }}

View File

@ -6,11 +6,11 @@ set -ex
#login_ecr_public
update_helm
#FLUENT_BIT_VERSION=$(yq eval '.dependencies[] | select(.name=="fluent-bit") | .version' Chart.yaml)
FLUENT_BIT_VERSION=$(yq eval '.dependencies[] | select(.name=="fluent-bit") | .version' Chart.yaml)
FLUENTD_VERSION=$(yq eval '.dependencies[] | select(.name=="fluentd") | .version' Chart.yaml)
# fluent-bit
#patch_chart fluent-bit
patch_chart fluent-bit
# FluentD
patch_chart fluentd

View File

@ -1,79 +0,0 @@
opentelemetry-collector:
enabled: false
mode: deployment
jaeger:
enabled: false
agent:
enabled: false
collector:
service:
otlp:
grpc:
name: otlp-grpc
port: 4317
http:
name: otlp-http
port: 4318
serviceMonitor:
enabled: false
# https://www.jaegertracing.io/docs/1.53/deployment/#collector
storage:
type: elasticsearch
elasticsearch:
scheme: https
host: telemetry
user: admin
password: admin
cmdlineParams:
es.tls.enabled: ""
es.tls.skip-host-verify: ""
provisionDataStore:
cassandra: false
elasticsearch: false
query:
agentSidecar:
enabled: false
serviceMonitor:
enabled: false
istio:
enabled: false
gateway: istio-ingress/private-ingressgateway
url: jaeger.example.com
opensearch:
version: 2.11.1
prometheus: false
# custom cluster settings
#settings:
# index.number_of_shards: 1
nodeSets:
- name: default
replicas: 2
storage:
size: 16Gi
class: my-fancy-SSDs
zone: us-west-2a
resources:
limits:
#cpu: 1
memory: 2Gi
requests:
cpu: 500m
memory: 2Gi
dashboard:
enabled: false
istio:
enabled: false
gateway: istio-ingress/private-ingressgateway
url: telemetry-dashboard.example.com

View File

@ -52,7 +52,7 @@ jaeger:
url: jaeger.example.com
opensearch:
version: 2.13.0
version: 2.14.0
prometheus: false
# custom cluster settings
@ -80,3 +80,382 @@ opensearch:
enabled: false
gateway: istio-ingress/private-ingressgateway
url: telemetry-dashboard.example.com
# New logging pipeline
fluentd:
enabled: false
image:
repository: public.ecr.aws/zero-downtime/fluentd-concenter
tag: v1.16.5-1-g09dc31c
istio:
enabled: false
kind: StatefulSet
replicaCount: 1
#plugins:
#- fluent-plugin-s3
source:
sharedKey: secretref+k8s://v1/Secret/kube-system/kubezero-secrets/telemetry.fluentd.source.sharedKey # "cloudbender"
output:
# Defaults to OpenSearch in same namespace
host: telemetry
user: admin
password: admin
service:
ports:
- name: tcp-forward
protocol: TCP
containerPort: 24224
- name: http-fluentd
protocol: TCP
containerPort: 9880
metrics:
serviceMonitor:
enabled: false
dashboards:
enabled: false
# No need for docker nor /var/log
mountVarLogDirectory: false
mountDockerContainersDirectory: false
# no rbac required until we need WebAuth identity for eg. s3
rbac:
create: false
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
memory: 512Mi
persistence:
enabled: true
storageClass: ""
size: 1Gi
volumes:
- name: trust-store
secret:
secretName: telemetry-nodes-http-tls
items:
- key: tls.crt
path: ca.crt
volumeMounts:
- name: trust-store
mountPath: "/run/pki"
readOnly: true
securityContext:
capabilities:
drop:
- ALL
#readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 100
configMapConfigs:
- fluentd-prometheus-conf
fileConfigs:
00_system.conf: |-
<system>
root_dir /fluentd/log
log_level info
ignore_repeated_log_interval 60s
ignore_same_log_interval 60s
workers 1
</system>
01_sources.conf: |-
<source>
@type http
@label @KUBERNETES
port 9880
bind 0.0.0.0
keepalive_timeout 30
</source>
<source>
@type forward
@label @KUBERNETES
port 24224
bind 0.0.0.0
# skip_invalid_event true
send_keepalive_packet true
<security>
self_hostname "telemetry-fluentd"
shared_key {{ .Values.source.sharedKey }}
</security>
</source>
02_filters.conf: |-
<label @KUBERNETES>
# prevent log feedback loops, discard logs from our own pods
<match kube.logging.fluentd>
@type relabel
@label @FLUENT_LOG
</match>
# Exclude current fluent-bit multiline noise
# Still relevant ??
<filter kube.logging.fluent-bit>
@type grep
<exclude>
key log
pattern /could not append content to multiline context/
</exclude>
</filter>
# Generate Hash ID to break endless loop for already ingested events during retries
<filter **>
@type opensearch_genid
use_entire_record true
</filter>
# Route through DISPATCH for Prometheus metrics
<match **>
@type relabel
@label @DISPATCH
</match>
</label>
04_outputs.conf: |-
<label @OUTPUT>
<match **>
@id out_os
@type opensearch
# @log_level debug
include_tag_key true
id_key _hash
remove_keys _hash
write_operation create
# we have oj in the fluentd-concenter image
prefer_oj_serializer true
# KubeZero pipeline incl. GeoIP etc.
#pipeline fluentd
http_backend typhoeus
ca_file /run/pki/ca.crt
port 9200
scheme https
hosts {{ .Values.output.host }}
user {{ .Values.output.user }}
password {{ .Values.output.password }}
log_es_400_reason
logstash_format true
reconnect_on_error true
reload_on_failure true
request_timeout 300s
#sniffer_class_name Fluent::Plugin::OpenSearchSimpleSniffer
#with_transporter_log true
verify_es_version_at_startup false
default_opensearch_version 2
#suppress_type_name true
# Retry failed bulk requests
# https://github.com/uken/fluent-plugin-elasticsearch#unrecoverable-error-types
unrecoverable_error_types ["out_of_memory_error"]
bulk_message_request_threshold 1048576
<buffer>
@type file
flush_mode interval
flush_thread_count 2
flush_interval 10s
chunk_limit_size 2MB
total_limit_size 1GB
flush_at_shutdown true
retry_type exponential_backoff
retry_timeout 6h
overflow_action drop_oldest_chunk
disable_chunk_backup true
</buffer>
</match>
</label>
fluent-bit:
enabled: false
#image:
#repository: public.ecr.aws/zero-downtime/fluent-bit
#tag: 2.0.10
testFramework:
enabled: false
serviceMonitor:
enabled: false
#rbac:
# nodeAccess: true
#hostNetwork: true
#dnsPolicy: ClusterFirstWithHostNet
tolerations:
- effect: NoSchedule
operator: Exists
resources:
requests:
cpu: 20m
memory: 48Mi
limits:
memory: 128Mi
config:
output:
host: telemetry-fluentd
sharedKey: secretref+k8s://v1/Secret/kube-system/kubezero-secrets/telemetry.fluentd.source.sharedKey
tls: false
input:
memBufLimit: 16MB
refreshInterval: 5
logLevel: info
flushInterval: 5
#extraRecords:
# source.clustername: MyKubeCluster
service: |
[SERVICE]
Flush {{ .Values.config.flushInterval }}
Daemon Off
Log_Level {{ .Values.config.logLevel }}
Parsers_File parsers.conf
Parsers_File custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port {{ .Values.service.port }}
Health_Check On
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
# Exclude ourselves to current error spam, https://github.com/fluent/fluent-bit/issues/5769
Exclude_Path *logging-fluent-bit*
multiline.parser cri
Tag cri.*
Skip_Long_Lines On
Skip_Empty_Lines On
DB /var/log/flb_kube.db
DB.Sync Normal
DB.locking true
# Buffer_Max_Size 1M
{{- with .Values.config.input }}
Mem_Buf_Limit {{ default "16MB" .memBufLimit }}
Refresh_Interval {{ default 5 .refreshInterval }}
{{- end }}
filters: |
[FILTER]
Name parser
Match cri.*
Parser cri-log
Key_Name log
[FILTER]
Name kubernetes
Match cri.*
Merge_Log On
Merge_Log_Key kube
Kube_Tag_Prefix cri.var.log.containers.
Keep_Log Off
K8S-Logging.Parser Off
K8S-Logging.Exclude Off
Kube_Meta_Cache_TTL 3600s
Buffer_Size 0
#Use_Kubelet true
{{- if index .Values "config" "extraRecords" }}
[FILTER]
Name record_modifier
Match cri.*
{{- range $k,$v := index .Values "config" "extraRecords" }}
Record {{ $k }} {{ $v }}
{{- end }}
{{- end }}
[FILTER]
Name rewrite_tag
Match cri.*
Emitter_Name kube_tag_rewriter
Rule $kubernetes['pod_id'] .* kube.$kubernetes['namespace_name'].$kubernetes['container_name'] false
[FILTER]
Name lua
Match kube.*
script /fluent-bit/scripts/kubezero.lua
call nest_k8s_ns
outputs: |
[OUTPUT]
Match *
Name forward
Host {{ .Values.config.output.host }}
Port 24224
Shared_Key {{ .Values.config.output.sharedKey }}
tls {{ ternary "on" "off" .Values.config.output.tls }}
Send_options true
Require_ack_response true
customParsers: |
[PARSER]
Name cri-log
Format regex
Regex ^(?<time>.+) (?<stream>stdout|stderr) (?<logtag>F|P) (?<log>.*)$
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L%z
luaScripts:
kubezero.lua: |
function nest_k8s_ns(tag, timestamp, record)
if not record['kubernetes']['namespace_name'] then
return 0, 0, 0
end
new_record = {}
for key, val in pairs(record) do
if key == 'kube' then
new_record[key] = {}
new_record[key][record['kubernetes']['namespace_name']] = record[key]
else
new_record[key] = record[key]
end
end
return 1, timestamp, new_record
end
daemonSetVolumes:
- name: varlog
hostPath:
path: /var/log
- name: newlog
hostPath:
path: /var/lib/containers/logs
daemonSetVolumeMounts:
- name: varlog
mountPath: /var/log
- name: newlog
mountPath: /var/lib/containers/logs

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero
description: KubeZero - Root App of Apps chart
type: application
version: 1.28.9
version: 1.28.9-1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -1,19 +0,0 @@
#!/bin/bash
# Istio operator resources first
kubectl delete Istiooperators kubezero-istio -n istio-system
kubectl delete Istiooperators kubezero-istio-private-ingress -n istio-system
# Istio operator itself
kubectl delete deployment istio-operator -n istio-operator
kubectl delete ns istio-operator
# Remove policy pod
kubectl delete deployment istio-policy -n istio-system
# Remove old gateways
kubectl delete gateways ingressgateway -n istio-system
kubectl delete gateways private-ingressgateway -n istio-system
# Remove old shared public cert
kubectl delete certificate public-ingress-cert -n istio-system

View File

@ -31,11 +31,16 @@ cilium:
serviceMonitor:
enabled: {{ .Values.metrics.enabled }}
{{- with index .Values "network" "metallb" }}
{{- with .Values.network.metallb }}
metallb:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.network.haproxy }}
haproxy:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end }}

View File

@ -1,5 +1,26 @@
{{- define "telemetry-values" }}
{{- if index .Values "telemetry" "fluent-bit" }}
fluent-bit:
{{- with index .Values.telemetry "fluent-bit" }}
{{- toYaml . | nindent 2 }}
{{- end }}
serviceMonitor:
enabled: {{ .Values.metrics.enabled }}
{{- end }}
{{- if .Values.telemetry.fluentd }}
fluentd:
{{- with .Values.telemetry.fluentd }}
{{- toYaml . | nindent 2 }}
{{- end }}
metrics:
serviceMonitor:
enabled: {{ .Values.metrics.enabled }}
{{- end }}
{{- if .Values.telemetry.jaeger }}
jaeger:
{{- with .Values.telemetry.jaeger }}

View File

@ -85,7 +85,7 @@ falco:
telemetry:
enabled: false
namespace: telemetry
targetRevision: 0.2.2
targetRevision: 0.3.0
operators:
enabled: false

View File

@ -1,4 +1,5 @@
# KubeZero 1.28
![aws_architecture](docs/images/logo-v1.28.png)
## What's new - Major themes
- all KubeZero and support AMIs based on Alpine 3.19.1

View File

@ -0,0 +1,3 @@
#!/bin/bash
kubectl get secret -n kube-system -l sealedsecrets.bitnami.com/sealed-secrets-key -o yaml

0
scripts/git-pre-receive-hook.sh Normal file → Executable file
View File