Patch 2.19.5 #45

Merged
stefan merged 19 commits from master into stable 2021-05-17 10:08:39 +00:00
124 changed files with 6126 additions and 444 deletions

2
.versionrc Normal file
View File

@ -0,0 +1,2 @@
# template: "/tmp/doesntexist"
linkReferences: true

View File

@ -29,6 +29,7 @@ Installs the Istio control plane
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/
- https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
- https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration

View File

@ -5,6 +5,7 @@ Common set of labels
helm.sh/chart: {{ include "kubezero-lib.chart" . }}
app.kubernetes.io/name: {{ include "kubezero-lib.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: kubezero
{{- end -}}

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack
type: application
version: 0.6.4
appVersion: 1.4.1
version: 0.6.5
appVersion: 1.5.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -19,8 +19,8 @@ dependencies:
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: eck-operator
version: 1.4.1
repository: https://helm.elastic.co
version: 1.5.0
# repository: https://helm.elastic.co
condition: eck-operator.enabled
- name: fluentd
version: 0.2.2

View File

@ -1,6 +1,6 @@
# kubezero-logging
![Version: 0.6.2](https://img.shields.io/badge/Version-0.6.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.4.1](https://img.shields.io/badge/AppVersion-1.4.1-informational?style=flat-square)
![Version: 0.6.5](https://img.shields.io/badge/Version-0.6.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.5.0](https://img.shields.io/badge/AppVersion-1.5.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack
@ -14,13 +14,13 @@ KubeZero Umbrella Chart for complete EFK stack
## Requirements
Kubernetes: `>= 1.16.0`
Kubernetes: `>= 1.18.0`
| Repository | Name | Version |
|------------|------|---------|
| | fluent-bit | 0.12.3 |
| | fluent-bit | 0.15.4 |
| | fluentd | 0.2.2 |
| https://helm.elastic.co | eck-operator | 1.4.1 |
| https://helm.elastic.co | eck-operator | 1.5.0 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## Changes from upstream
@ -70,21 +70,31 @@ Kubernetes: `>= 1.16.0`
| fluent-bit.config.customParsers | string | `"[PARSER]\n Name cri-log\n Format regex\n Regex ^(?<time>[^ ]+) (?<stream>stdout|stderr) (?<logtag>[^ ]*) (?<log>.*)$\n Time_Key time\n Time_Format %Y-%m-%dT%H:%M:%S.%L%z\n"` | |
| fluent-bit.config.filters | string | `"[FILTER]\n Name lua\n Match cri.*\n script /fluent-bit/scripts/kubezero.lua\n call reassemble_cri_logs\n\n[FILTER]\n Name kubernetes\n Match cri.*\n Merge_Log On\n Merge_Log_Key kube\n Kube_Tag_Prefix cri.var.log.containers.\n Keep_Log Off\n K8S-Logging.Parser Off\n K8S-Logging.Exclude Off\n #Use_Kubelet true\n #Kubelet_Port 10250\n\n{{- if index .Values \"config\" \"extraRecords\" }}\n\n[FILTER]\n Name record_modifier\n Match cri.*\n {{- range $k,$v := index .Values \"config\" \"extraRecords\" }}\n Record {{ $k }} {{ $v }}\n {{- end }}\n{{- end }}\n\n[FILTER]\n Name rewrite_tag\n Match cri.*\n Emitter_Name kube_tag_rewriter\n Rule logtag F kube.$kubernetes['namespace_name'].$kubernetes['container_name'] false\n\n[FILTER]\n Name lua\n Match kube.*\n script /fluent-bit/scripts/kubezero.lua\n call nest_k8s_ns\n"` | |
| fluent-bit.config.flushInterval | int | `5` | |
| fluent-bit.config.input.memBufLimit | string | `"16MB"` | |
| fluent-bit.config.input.memBufLimit | string | `"4MB"` | |
| fluent-bit.config.input.refreshInterval | int | `10` | |
| fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n Parser cri-log\n Tag cri.*\n Skip_Long_Lines On\n DB /var/log/flb_kube.db\n DB.Sync Normal\n {{- with .Values.config.input }}\n Mem_Buf_Limit {{ default \"16MB\" .memBufLimit }}\n Refresh_Interval {{ default 10 .refreshInterval }}\n {{- end }}\n"` | |
| fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n Parser cri-log\n Tag cri.*\n Skip_Long_Lines On\n DB /var/log/flb_kube.db\n DB.Sync Normal\n {{- with .Values.config.input }}\n Mem_Buf_Limit {{ default \"4MB\" .memBufLimit }}\n Refresh_Interval {{ default 10 .refreshInterval }}\n {{- end }}\n"` | |
| fluent-bit.config.logLevel | string | `"warn"` | |
| fluent-bit.config.output.host | string | `"logging-fluentd"` | |
| fluent-bit.config.output.sharedKey | string | `"cloudbender"` | |
| fluent-bit.config.output.tls | bool | `false` | |
| fluent-bit.config.outputs | string | `"[OUTPUT]\n Match *\n Name forward\n Host {{ .Values.config.output.host }}\n Port 24224\n Shared_Key {{ .Values.config.output.sharedKey }}\n tls {{ ternary \"on\" \"off\" .Values.config.output.tls }}\n Send_options true\n Require_ack_response true\n"` | |
| fluent-bit.config.service | string | `"[SERVICE]\n Flush {{ .Values.config.flushInterval }}\n Daemon Off\n Log_Level {{ .Values.config.logLevel }}\n Parsers_File parsers.conf\n Parsers_File custom_parsers.conf\n HTTP_Server On\n HTTP_Listen 0.0.0.0\n HTTP_Port {{ .Values.service.port }}\n"` | |
| fluent-bit.daemonSetVolumeMounts[0].mountPath | string | `"/var/log"` | |
| fluent-bit.daemonSetVolumeMounts[0].name | string | `"varlog"` | |
| fluent-bit.daemonSetVolumeMounts[1].mountPath | string | `"/etc/machine-id"` | |
| fluent-bit.daemonSetVolumeMounts[1].name | string | `"etcmachineid"` | |
| fluent-bit.daemonSetVolumeMounts[1].readOnly | bool | `true` | |
| fluent-bit.daemonSetVolumes[0].hostPath.path | string | `"/var/log"` | |
| fluent-bit.daemonSetVolumes[0].name | string | `"varlog"` | |
| fluent-bit.daemonSetVolumes[1].hostPath.path | string | `"/etc/machine-id"` | |
| fluent-bit.daemonSetVolumes[1].hostPath.type | string | `"File"` | |
| fluent-bit.daemonSetVolumes[1].name | string | `"etcmachineid"` | |
| fluent-bit.enabled | bool | `false` | |
| fluent-bit.luaScripts."kubezero.lua" | string | `"local reassemble_state = {}\n\nfunction reassemble_cri_logs(tag, timestamp, record)\n local reassemble_key = tag\n if record.logtag == 'P' then\n reassemble_state[reassemble_key] = reassemble_state[reassemble_key] or \"\" .. record.log\n return -1, 0, 0\n end\n record.log = reassemble_state[reassemble_key] or \"\" .. (record.log or \"\")\n reassemble_state[reassemble_key] = nil\n return 1, timestamp, record\nend\n\nfunction nest_k8s_ns(tag, timestamp, record)\n if not record['kubernetes']['namespace_name'] then\n return 0, 0, 0\n end\n new_record = {}\n for key, val in pairs(record) do\n if key == 'kube' then\n new_record[key] = {}\n new_record[key][record['kubernetes']['namespace_name']] = record[key]\n else\n new_record[key] = record[key]\n end\n end\n return 1, timestamp, new_record\nend\n"` | |
| fluent-bit.resources.limits.memory | string | `"64Mi"` | |
| fluent-bit.resources.requests.cpu | string | `"20m"` | |
| fluent-bit.resources.requests.memory | string | `"16Mi"` | |
| fluent-bit.serviceMonitor.enabled | bool | `true` | |
| fluent-bit.resources.requests.memory | string | `"32Mi"` | |
| fluent-bit.serviceMonitor.enabled | bool | `false` | |
| fluent-bit.serviceMonitor.selector.release | string | `"metrics"` | |
| fluent-bit.tolerations[0].effect | string | `"NoSchedule"` | |
| fluent-bit.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
@ -98,7 +108,7 @@ Kubernetes: `>= 1.16.0`
| fluentd.fileConfigs."00_system.conf" | string | `"<system>\n workers 2\n</system>"` | |
| fluentd.fileConfigs."01_sources.conf" | string | `"<source>\n @type http\n @label @KUBERNETES\n port 9880\n bind 0.0.0.0\n keepalive_timeout 30\n</source>\n\n<source>\n @type forward\n @label @KUBERNETES\n port 24224\n bind 0.0.0.0\n # skip_invalid_event true\n send_keepalive_packet true\n <security>\n self_hostname \"#{ENV['HOSTNAME']}\"\n shared_key {{ .Values.shared_key }}\n </security>\n</source>"` | |
| fluentd.fileConfigs."02_filters.conf" | string | `"<label @KUBERNETES>\n # prevent log feedback loops eg. ES has issues etc.\n # discard logs from our own pods\n <match kube.logging.fluentd>\n @type relabel\n @label @FLUENT_LOG\n </match>\n\n <match **>\n @type relabel\n @label @DISPATCH\n </match>\n</label>"` | |
| fluentd.fileConfigs."04_outputs.conf" | string | `"<label @OUTPUT>\n <match **>\n @id elasticsearch\n @type elasticsearch\n @log_level info\n include_tag_key true\n id_key id\n remove_keys id\n\n # KubeZero pipeline incl. GeoIP etc.\n pipeline fluentd\n\n hosts \"{{ .Values.output.host }}\"\n port 9200\n scheme http\n user elastic\n password \"#{ENV['OUTPUT_PASSWORD']}\"\n\n log_es_400_reason\n logstash_format true\n reconnect_on_error true\n reload_on_failure true\n request_timeout 60s\n suppress_type_name true\n\n <buffer tag>\n @type file_single\n path /var/log/fluentd-buffers/kubernetes.system.buffer\n chunk_limit_size 8MB\n total_limit_size 4GB\n flush_mode interval\n flush_thread_count 2\n flush_interval 10s\n flush_at_shutdown true\n retry_type exponential_backoff\n retry_timeout 300m\n overflow_action drop_oldest_chunk\n disable_chunk_backup true\n </buffer>\n </match>\n</label>"` | |
| fluentd.fileConfigs."04_outputs.conf" | string | `"<label @OUTPUT>\n <match **>\n @id elasticsearch\n @type elasticsearch\n @log_level info\n include_tag_key true\n id_key id\n remove_keys id\n\n # KubeZero pipeline incl. GeoIP etc.\n pipeline fluentd\n\n hosts \"{{ .Values.output.host }}\"\n port 9200\n scheme http\n user elastic\n password \"#{ENV['OUTPUT_PASSWORD']}\"\n\n log_es_400_reason\n logstash_format true\n reconnect_on_error true\n reload_on_failure true\n request_timeout 120s\n suppress_type_name true\n bulk_message_request_threshold 2097152\n\n <buffer tag>\n @type file_single\n path /var/log/fluentd-buffers/kubernetes.system.buffer\n chunk_limit_size 8MB\n total_limit_size 4GB\n flush_mode interval\n flush_thread_count 8\n flush_interval 10s\n flush_at_shutdown true\n retry_type exponential_backoff\n retry_timeout 300m\n overflow_action drop_oldest_chunk\n disable_chunk_backup true\n </buffer>\n </match>\n</label>"` | |
| fluentd.image.repository | string | `"fluent/fluentd-kubernetes-daemonset"` | |
| fluentd.image.tag | string | `"v1.12-debian-elasticsearch7-1"` | |
| fluentd.istio.enabled | bool | `false` | |

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,20 @@
apiVersion: v2
appVersion: 1.5.0
description: 'A Helm chart for deploying the Elastic Cloud on Kubernetes (ECK) operator: the official Kubernetes operator for orchestrating Elasticsearch, Kibana, APM Server, Enterprise Search, and Beats.'
home: https://github.com/elastic/cloud-on-k8s
icon: https://helm.elastic.co/icons/eck.png
keywords:
- Elasticsearch
- Kibana
- APM Server
- Beats
- Enterprise Search
- Elastic Stack
- Operator
kubeVersion: '>=1.12.0-0'
maintainers:
- email: eck@elastic.co
name: Elastic
name: eck-operator
type: application
version: 1.5.0

View File

@ -0,0 +1,20 @@
# ECK Operator Helm Chart
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/elastic)](https://artifacthub.io/packages/helm/elastic/eck-operator)
A Helm chart to install the ECK Operator: the official Kubernetes operator from Elastic to orchestrate Elasticsearch, Kibana, APM Server, Enterprise Search, and Beats on Kubernetes.
For more information about the ECK Operator, see:
- [Documentation](https://www.elastic.co/guide/en/cloud-on-k8s/current/index.html)
- [GitHub repo](https://github.com/elastic/cloud-on-k8s)
## Requirements
- Supported Kubernetes versions are listed in the documentation: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s_supported_versions.html
- Helm >= 3.0.0
## Usage
Refer to the documentation at https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-install-helm.html

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,6 @@
managedNamespaces: []
createClusterScopedResources: true
webhook:
enabled: true

View File

@ -0,0 +1,11 @@
managedNamespaces: []
createClusterScopedResources: true
webhook:
enabled: true
podAnnotations:
sidecar.istio.io/inject: "true"
traffic.sidecar.istio.io/includeInboundPorts: "*"
traffic.sidecar.istio.io/excludeInboundPorts: "9443"

View File

@ -0,0 +1,12 @@
managedNamespaces: ["elastic-system"]
createClusterScopedResources: false
config:
# no RBAC access to cluster-wide storage classes, hence disable storage class validation
validateStorageClass: false
installCRDs: false
webhook:
enabled: false

View File

@ -0,0 +1,18 @@
managedNamespaces: ["team-a", "team-b"]
createClusterScopedResources: true
refs:
enforceRBAC: true
webhook:
enabled: true
namespaceSelector:
matchExpressions:
- key: "eck.k8s.elastic.co/tenant"
operator: In
values: ["team-a", "team-b"]
softMultiTenancy:
enabled: true

View File

@ -0,0 +1,2 @@
1. Inspect the operator logs by running the following command:
kubectl logs -n {{ .Release.Namespace }} sts/{{ .Release.Name }}

View File

@ -0,0 +1,333 @@
{{/*
Expand the name of the chart.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "eck-operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "eck-operator.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "eck-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "eck-operator.labels" -}}
{{- include "eck-operator.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
helm.sh/chart: {{ include "eck-operator.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "eck-operator.selectorLabels" -}}
{{- if .Values.internal.manifestGen }}
control-plane: elastic-operator
{{- else }}
app.kubernetes.io/name: {{ include "eck-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "eck-operator.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "eck-operator.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Determine effective Kubernetes version
*/}}
{{- define "eck-operator.effectiveKubeVersion" -}}
{{- if .Values.internal.manifestGen -}}
{{- semver .Values.internal.kubeVersion -}}
{{- else -}}
{{- .Capabilities.KubeVersion.Version -}}
{{- end -}}
{{- end -}}
{{/*
Determine the name for the webhook
*/}}
{{- define "eck-operator.webhookName" -}}
{{- if .Values.internal.manifestGen -}}
elastic-webhook.k8s.elastic.co
{{- else -}}
{{- $name := include "eck-operator.name" . -}}
{{ printf "%s.%s.k8s.elastic.co" $name .Release.Namespace }}
{{- end -}}
{{- end -}}
{{/*
Determine the name for the webhook secret
*/}}
{{- define "eck-operator.webhookSecretName" -}}
{{- if .Values.internal.manifestGen -}}
elastic-webhook-server-cert
{{- else -}}
{{- $name := include "eck-operator.name" . -}}
{{ printf "%s-webhook-cert" $name | trunc 63 }}
{{- end -}}
{{- end -}}
{{/*
Determine the name for the webhook service
*/}}
{{- define "eck-operator.webhookServiceName" -}}
{{- if .Values.internal.manifestGen -}}
elastic-webhook-server
{{- else -}}
{{- $name := include "eck-operator.name" . -}}
{{ printf "%s-webhook" $name | trunc 63 }}
{{- end -}}
{{- end -}}
{{/*
Add the webhook sideEffects field on supported Kubernetes versions
*/}}
{{- define "eck-operator.webhookSideEffects" -}}
{{- $kubeVersion := (include "eck-operator.effectiveKubeVersion" .) -}}
{{- $kubeVersionSupported := semverCompare ">=1.13.0-0" $kubeVersion -}}
{{- if $kubeVersionSupported }}
sideEffects: "None"
{{- end }}
{{- end }}
{{/*
Use v1 of ValidatingWebhookConfiguration on supported Kubernetes versions
*/}}
{{- define "eck-operator.webhookAPIVersion" -}}
{{- $kubeVersion := (include "eck-operator.effectiveKubeVersion" .) -}}
{{- $kubeVersionSupported := semverCompare ">=1.16.0-0" $kubeVersion -}}
{{- if $kubeVersionSupported -}}
admissionregistration.k8s.io/v1
{{- else -}}
admissionregistration.k8s.io/v1beta1
{{- end -}}
{{- end }}
{{/*
Define admissionReviewVersions based on Kubernetes version
*/}}
{{- define "eck-operator.webhookAdmissionReviewVersions" -}}
{{- $kubeVersion := (include "eck-operator.effectiveKubeVersion" .) -}}
{{- $kubeVersionSupported := semverCompare ">=1.16.0-0" $kubeVersion -}}
{{- if $kubeVersionSupported }}
admissionReviewVersions: [v1beta1]
{{- end }}
{{- end }}
{{/*
Define webhook match policy based on Kubernetes version
*/}}
{{- define "eck-operator.webhookMatchPolicy" -}}
{{- $kubeVersion := (include "eck-operator.effectiveKubeVersion" .) -}}
{{- $kubeVersionSupported := semverCompare ">=1.16.0-0" $kubeVersion -}}
{{- if $kubeVersionSupported }}
matchPolicy: Exact
{{- end }}
{{- end }}
{{/*
RBAC permissions
*/}}
{{- define "eck-operator.rbacRules" -}}
- apiGroups:
- "authorization.k8s.io"
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- ""
resources:
- pods
- endpoints
- events
- persistentvolumeclaims
- secrets
- services
- configmaps
- serviceaccounts
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- apps
resources:
- deployments
- statefulsets
- daemonsets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- elasticsearch.k8s.elastic.co
resources:
- elasticsearches
- elasticsearches/status
- elasticsearches/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
- enterpriselicenses
- enterpriselicenses/status
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- kibana.k8s.elastic.co
resources:
- kibanas
- kibanas/status
- kibanas/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- apm.k8s.elastic.co
resources:
- apmservers
- apmservers/status
- apmservers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- enterprisesearch.k8s.elastic.co
resources:
- enterprisesearches
- enterprisesearches/status
- enterprisesearches/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- beat.k8s.elastic.co
resources:
- beats
- beats/status
- beats/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- agent.k8s.elastic.co
resources:
- agents
- agents/status
- agents/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
{{- end -}}
{{/*
RBAC permissions on non-namespaced resources
*/}}
{{- define "eck-operator.clusterWideRbacRules" -}}
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
{{- end -}}

View File

@ -0,0 +1,63 @@
{{- if .Values.createClusterScopedResources -}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "eck-operator.fullname" . }}
labels:
{{- include "eck-operator.labels" . | nindent 4 }}
rules:
{{ template "eck-operator.rbacRules" . | toYaml | indent 2 }}
{{ template "eck-operator.clusterWideRbacRules" . | toYaml | indent 2 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "{{ include "eck-operator.name" . }}-view"
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
{{- include "eck-operator.labels" . | nindent 4 }}
rules:
- apiGroups: ["elasticsearch.k8s.elastic.co"]
resources: ["elasticsearches"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apm.k8s.elastic.co"]
resources: ["apmservers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["kibana.k8s.elastic.co"]
resources: ["kibanas"]
verbs: ["get", "list", "watch"]
- apiGroups: ["enterprisesearch.k8s.elastic.co"]
resources: ["enterprisesearches"]
verbs: ["get", "list", "watch"]
- apiGroups: ["beat.k8s.elastic.co"]
resources: ["beats"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "{{ include "eck-operator.name" . }}-edit"
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
{{- include "eck-operator.labels" . | nindent 4 }}
rules:
- apiGroups: ["elasticsearch.k8s.elastic.co"]
resources: ["elasticsearches"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["apm.k8s.elastic.co"]
resources: ["apmservers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["kibana.k8s.elastic.co"]
resources: ["kibanas"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["enterprisesearch.k8s.elastic.co"]
resources: ["enterprisesearches"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["beat.k8s.elastic.co"]
resources: ["beats"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
{{- end -}}

View File

@ -0,0 +1,43 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "eck-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "eck-operator.labels" . | nindent 4 }}
data:
eck.yaml: |-
log-verbosity: {{ int .Values.config.logVerbosity }}
metrics-port: {{ int .Values.config.metricsPort }}
container-registry: {{ .Values.config.containerRegistry }}
max-concurrent-reconciles: {{ int .Values.config.maxConcurrentReconciles }}
ca-cert-validity: {{ .Values.config.caValidity }}
ca-cert-rotate-before: {{ .Values.config.caRotateBefore }}
cert-validity: {{ .Values.config.certificatesValidity }}
cert-rotate-before: {{ .Values.config.certificatesRotateBefore }}
set-default-security-context: {{ .Values.config.setDefaultSecurityContext }}
kube-client-timeout: {{ .Values.config.kubeClientTimeout }}
elasticsearch-client-timeout: {{ .Values.config.elasticsearchClientTimeout }}
disable-telemetry: {{ .Values.telemetry.disabled }}
{{- if .Values.telemetry.interval }}
telemetry-interval: {{ .Values.telemetry.interval }}
{{- end }}
validate-storage-class: {{ .Values.config.validateStorageClass }}
{{- if .Values.tracing.enabled }}
enable-tracing: true
{{- end }}
{{- if .Values.refs.enforceRBAC }}
enforce-rbac-on-refs: true
{{- end }}
enable-webhook: {{ .Values.webhook.enabled }}
{{- if .Values.webhook.enabled }}
webhook-name: {{ include "eck-operator.webhookName" . }}
{{- if not .Values.webhook.manageCerts }}
manage-webhook-certs: false
webhook-cert-dir: {{ .Values.webhook.certsDir }}
{{- end }}
{{- end }}
{{- if .Values.managedNamespaces }}
namespaces: [{{ join "," .Values.managedNamespaces }}]
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if .Values.softMultiTenancy.enabled -}}
{{- range .Values.managedNamespaces }}
{{- $namespace := . }}
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ $namespace }}
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
eck.k8s.elastic.co/tenant: {{ $namespace }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,228 @@
{{- if .Values.softMultiTenancy.enabled -}}
{{- $fullName := include "eck-operator.fullname" . -}}
{{- $name := include "eck-operator.name" . -}}
{{- range .Values.managedNamespaces -}}
{{- $namespace := . }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: "{{ $name }}-elasticsearch"
namespace: {{ $namespace }}
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
spec:
podSelector:
matchLabels:
common.k8s.elastic.co/type: "elasticsearch"
egress:
# Transport port
- ports:
- port: 9300
to:
# Elasticsearch within namespace
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
podSelector:
matchLabels:
common.k8s.elastic.co/type: "elasticsearch"
# DNS
- ports:
- port: 53
protocol: UDP
to: []
ingress:
# HTTP Port
- ports:
- port: 9200
from:
# Operator
- namespaceSelector:
matchLabels:
name: "{{ $.Release.Namespace }}"
podSelector:
matchLabels:
{{- include "eck-operator.selectorLabels" $ | nindent 14 }}
# Within namespace
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
# Transport port
- ports:
- port: 9300
from:
# Within namespace (from other Elasticsearch nodes)
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
podSelector:
matchLabels:
common.k8s.elastic.co/type: "elasticsearch"
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: "{{ $name }}-kibana"
namespace: {{ $namespace }}
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
spec:
podSelector:
matchLabels:
common.k8s.elastic.co/type: "kibana"
egress:
# Elasticsearch HTTP port
- ports:
- port: 9200
to:
# Elasticsearch within namespace
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
podSelector:
matchLabels:
common.k8s.elastic.co/type: "elasticsearch"
# DNS
- ports:
- port: 53
protocol: UDP
to: []
ingress:
# HTTP Port
- ports:
- port: 5601
from:
# Within namespace
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: "{{ $name }}-apm-server"
namespace: {{ $namespace }}
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
spec:
podSelector:
matchLabels:
common.k8s.elastic.co/type: "apm-server"
egress:
# Elasticsearch HTTP port
- ports:
- port: 9200
to:
# Elasticsearch within namespace
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
podSelector:
matchLabels:
common.k8s.elastic.co/type: "elasticsearch"
# Kibana HTTP port
- ports:
- port: 5601
to:
# Kibana within namespace
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
podSelector:
matchLabels:
common.k8s.elastic.co/type: "kibana"
# DNS
- ports:
- port: 53
protocol: UDP
to: []
ingress:
# HTTP Port
- ports:
- port: 8200
from:
# Within namespace
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: "{{ $name }}-enterprise-search"
namespace: {{ $namespace }}
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
spec:
podSelector:
matchLabels:
common.k8s.elastic.co/type: "enterprise-search"
egress:
# Elasticsearch HTTP port
- ports:
- port: 9200
to:
# Elasticsearch within namespace
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
podSelector:
matchLabels:
common.k8s.elastic.co/type: "elasticsearch"
# DNS
- ports:
- port: 53
protocol: UDP
to: []
ingress:
# HTTP Port
- ports:
- port: 3002
from:
# Within namespace
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: "{{ $name }}-beats"
namespace: {{ $namespace }}
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
spec:
podSelector:
matchLabels:
common.k8s.elastic.co/type: "beat"
egress:
# Elasticsearch HTTP port
- ports:
- port: 9200
to:
# Elasticsearch within namespace
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
podSelector:
matchLabels:
common.k8s.elastic.co/type: "elasticsearch"
# Kibana HTTP port
- ports:
- port: 5601
to:
# Kibana within namespace
- namespaceSelector:
matchLabels:
eck.k8s.elastic.co/tenant: {{ $namespace }}
podSelector:
matchLabels:
common.k8s.elastic.co/type: "kibana"
# DNS
- ports:
- port: 53
protocol: UDP
to: []
{{- end }}
{{- end -}}

View File

@ -0,0 +1,10 @@
{{- if (and .Values.internal.manifestGen .Values.internal.createOperatorNamespace) -}}
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Release.Namespace }}
labels:
name: {{ .Release.Namespace }}
{{- include "eck-operator.labels" $ | nindent 4 }}
{{- end -}}

View File

@ -0,0 +1,59 @@
{{- if .Values.softMultiTenancy.enabled -}}
{{- $kubeAPIServerIP := (required "kubeAPIServerIP is required" .Values.kubeAPIServerIP) -}}
{{- $metricsPort := int .Values.config.metricsPort -}}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "eck-operator.fullname" . }}
namespace: {{ .Release.Namespace}}
labels:
{{- include "eck-operator.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "eck-operator.selectorLabels" . | nindent 6 }}
egress:
# DNS
- ports:
- port: 53
protocol: UDP
to: []
# API server
- ports:
- port: 443
to:
- ipBlock:
cidr: "{{ $kubeAPIServerIP }}/32"
# Elasticsearch
- ports:
- port: 9200
to:
- namespaceSelector:
matchExpressions:
- key: "eck.k8s.elastic.co/tenant"
operator: In
values:
{{- range .Values.managedNamespaces }}
- {{ . }}
{{- end }}
podSelector:
matchLabels:
common.k8s.elastic.co/type: "elasticsearch"
{{- if or .Values.webhook.enabled (gt $metricsPort 0) }}
ingress:
{{- if .Values.webhook.enabled }}
- ports:
- port: 9443
from:
- ipBlock:
cidr: "{{ $kubeAPIServerIP }}/32"
{{- end }}
{{- if gt $metricsPort 0 }}
# Metrics
- ports:
- port: {{ $metricsPort }}
from: []
{{- end }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,80 @@
{{- $operatorNSIsManaged := has .Release.Namespace .Values.managedNamespaces -}}
{{- $fullName := include "eck-operator.fullname" . -}}
{{- $svcAccount := include "eck-operator.serviceAccountName" . }}
{{- if not .Values.createClusterScopedResources }}
{{- range .Values.managedNamespaces }}
{{- $namespace := . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: "{{ $fullName }}"
namespace: {{ $namespace }}
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
rules:
{{ template "eck-operator.rbacRules" $ | toYaml | indent 2 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ $fullName }}"
namespace: {{ $namespace }}
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ $fullName }}"
subjects:
- kind: ServiceAccount
name: {{ $svcAccount }}
namespace: {{ $.Release.Namespace }}
{{- end }} {{- /* end of range over managed namespaces */}}
{{- /* If createClusterScopedResources is false and operator namespace is not in the managed namespaces list, create additional role binding */}}
{{- if not $operatorNSIsManaged }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ $fullName }}
namespace: {{ $.Release.Namespace }}
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
rules:
{{ template "eck-operator.rbacRules" $ | toYaml | indent 2 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "{{ $fullName }}"
namespace: {{ $.Release.Namespace }}
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: "{{ $fullName }}"
subjects:
- kind: ServiceAccount
name: {{ $svcAccount }}
namespace: {{ $.Release.Namespace }}
{{- end }} {{- /* end of operator role binding if operator namespace is not managed */}}
{{- else }} {{- /* we can create cluster-scoped resources so just create a cluster role binding */}}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ $fullName }}
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ $fullName }}
subjects:
- kind: ServiceAccount
name: {{ $svcAccount }}
namespace: {{ $.Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,14 @@
{{- if .Values.serviceAccount.create }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "eck-operator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "eck-operator.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,118 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "eck-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "eck-operator.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "eck-operator.selectorLabels" . | nindent 6 }}
serviceName: {{ include "eck-operator.fullname" . }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
annotations:
# Rename the fields "error" to "error.message" and "source" to "event.source"
# This is to avoid a conflict with the ECS "error" and "source" documents.
"co.elastic.logs/raw": "[{\"type\":\"container\",\"json.keys_under_root\":true,\"paths\":[\"/var/log/containers/*${data.kubernetes.container.id}.log\"],\"processors\":[{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"error\",\"to\":\"_error\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_error\",\"to\":\"error.message\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"source\",\"to\":\"_source\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_source\",\"to\":\"event.source\"}]}}]}]"
"checksum/config": {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "eck-operator.selectorLabels" . | nindent 8 }}
spec:
terminationGracePeriodSeconds: 10
serviceAccountName: {{ include "eck-operator.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: manager
args:
- "manager"
- "--config=/conf/eck.yaml"
- "--distribution-channel={{ .Values.telemetry.distributionChannel }}"
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
env:
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
{{- if .Values.webhook.enabled }}
- name: WEBHOOK_SECRET
value: {{ include "eck-operator.webhookSecretName" . }}
{{- end }}
{{- with .Values.env }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.tracing.enabled -}}
{{- range $name, $value := .Values.tracing.config }}
- name: {{ $name }}
value: {{ $value }}
{{- end }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.webhook.enabled }}
ports:
- containerPort: 9443
name: https-webhook
protocol: TCP
{{- end }}
volumeMounts:
- mountPath: "/conf"
name: conf
readOnly: true
{{- if .Values.webhook.enabled }}
- mountPath: {{ .Values.webhook.certsDir }}
name: cert
readOnly: true
{{- end }}
{{- with .Values.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
volumes:
- name: conf
configMap:
name: {{ include "eck-operator.fullname" . }}
{{- if .Values.webhook.enabled }}
- name: cert
secret:
defaultMode: 420
secretName: {{ include "eck-operator.webhookSecretName" . }}
{{- end }}
{{- with .Values.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 12 }}
{{- end }}

View File

@ -0,0 +1,23 @@
{{- if .Values.softMultiTenancy.enabled -}}
{{- if has .Release.Namespace .Values.managedNamespaces -}}
{{- fail "Operator namespace cannot be in managed namespaces when soft multi-tenancy is enabled" -}}
{{- end -}}
{{- if empty .Values.managedNamespaces -}}
{{- fail "Managed namespaces must be defined when soft multi-tenancy is enabled" -}}
{{- end -}}
{{- if empty .Values.kubeAPIServerIP -}}
{{- fail "Soft multi-tenancy requires kubeAPIServerIP to be defined" -}}
{{- end -}}
{{- end -}}
{{- if (not .Values.createClusterScopedResources) -}}
{{- if .Values.webhook.enabled -}}
{{- fail "Webhook cannot be enabled when cluster-scoped resource creation is disabled" -}}
{{- end -}}
{{- if .Values.config.validateStorageClass -}}
{{- fail "Storage class validation cannot be enabled when cluster-scoped resource creation is disabled" -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,329 @@
{{- if .Values.webhook.enabled -}}
---
apiVersion: {{ include "eck-operator.webhookAPIVersion" $ }}
kind: ValidatingWebhookConfiguration
metadata:
name: {{ include "eck-operator.webhookName" . }}
labels:
{{- include "eck-operator.labels" . | nindent 4 }}
{{- if .Values.webhook.certManagerCert }}
annotations:
cert-manager.io/inject-ca-from: "{{ .Release.Namespace }}/{{ .Values.webhook.certManagerCert }}"
{{- end }}
webhooks:
- clientConfig:
caBundle: {{ .Values.webhook.caBundle }}
service:
name: {{ include "eck-operator.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /validate-agent-k8s-elastic-co-v1alpha1-agent
failurePolicy: {{ .Values.webhook.failurePolicy }}
{{- with .Values.webhook.namespaceSelector }}
namespaceSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.webhook.objectSelector }}
objectSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
name: elastic-agent-validation-v1alpha1.k8s.elastic.co
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
rules:
- apiGroups:
- agent.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- agents
- clientConfig:
caBundle: {{ .Values.webhook.caBundle }}
service:
name: {{ include "eck-operator.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /validate-apm-k8s-elastic-co-v1-apmserver
failurePolicy: {{ .Values.webhook.failurePolicy }}
{{- with .Values.webhook.namespaceSelector }}
namespaceSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.webhook.objectSelector }}
objectSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
name: elastic-apm-validation-v1.k8s.elastic.co
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
rules:
- apiGroups:
- apm.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- apmservers
- clientConfig:
caBundle: {{ .Values.webhook.caBundle }}
service:
name: {{ include "eck-operator.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /validate-apm-k8s-elastic-co-v1beta1-apmserver
failurePolicy: {{ .Values.webhook.failurePolicy }}
{{- with .Values.webhook.namespaceSelector }}
namespaceSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.webhook.objectSelector }}
objectSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
name: elastic-apm-validation-v1beta1.k8s.elastic.co
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
rules:
- apiGroups:
- apm.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- apmservers
- clientConfig:
caBundle: {{ .Values.webhook.caBundle }}
service:
name: {{ include "eck-operator.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /validate-beat-k8s-elastic-co-v1beta1-beat
failurePolicy: {{ .Values.webhook.failurePolicy }}
{{- with .Values.webhook.namespaceSelector }}
namespaceSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.webhook.objectSelector }}
objectSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
name: elastic-beat-validation-v1beta1.k8s.elastic.co
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
rules:
- apiGroups:
- beat.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- beats
- clientConfig:
caBundle: {{ .Values.webhook.caBundle }}
service:
name: {{ include "eck-operator.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /validate-enterprisesearch-k8s-elastic-co-v1-enterprisesearch
failurePolicy: {{ .Values.webhook.failurePolicy }}
{{- with .Values.webhook.namespaceSelector }}
namespaceSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.webhook.objectSelector }}
objectSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
name: elastic-ent-validation-v1.k8s.elastic.co
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
rules:
- apiGroups:
- enterprisesearch.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- enterprisesearches
- clientConfig:
caBundle: {{ .Values.webhook.caBundle }}
service:
name: {{ include "eck-operator.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /validate-enterprisesearch-k8s-elastic-co-v1beta1-enterprisesearch
failurePolicy: {{ .Values.webhook.failurePolicy }}
{{- with .Values.webhook.namespaceSelector }}
namespaceSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.webhook.objectSelector }}
objectSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
name: elastic-ent-validation-v1beta1.k8s.elastic.co
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
rules:
- apiGroups:
- enterprisesearch.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- enterprisesearches
- clientConfig:
caBundle: {{ .Values.webhook.caBundle }}
service:
name: {{ include "eck-operator.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /validate-elasticsearch-k8s-elastic-co-v1-elasticsearch
failurePolicy: {{ .Values.webhook.failurePolicy }}
{{- with .Values.webhook.namespaceSelector }}
namespaceSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.webhook.objectSelector }}
objectSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
name: elastic-es-validation-v1.k8s.elastic.co
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
rules:
- apiGroups:
- elasticsearch.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- elasticsearches
- clientConfig:
caBundle: {{ .Values.webhook.caBundle }}
service:
name: {{ include "eck-operator.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /validate-elasticsearch-k8s-elastic-co-v1beta1-elasticsearch
failurePolicy: {{ .Values.webhook.failurePolicy }}
{{- with .Values.webhook.namespaceSelector }}
namespaceSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.webhook.objectSelector }}
objectSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
name: elastic-es-validation-v1beta1.k8s.elastic.co
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
rules:
- apiGroups:
- elasticsearch.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- elasticsearches
- clientConfig:
caBundle: {{ .Values.webhook.caBundle }}
service:
name: {{ include "eck-operator.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /validate-kibana-k8s-elastic-co-v1-kibana
failurePolicy: {{ .Values.webhook.failurePolicy }}
{{- with .Values.webhook.namespaceSelector }}
namespaceSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.webhook.objectSelector }}
objectSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
name: elastic-kb-validation-v1.k8s.elastic.co
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
rules:
- apiGroups:
- kibana.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- kibanas
- clientConfig:
caBundle: {{ .Values.webhook.caBundle }}
service:
name: {{ include "eck-operator.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
path: /validate-kibana-k8s-elastic-co-v1beta1-kibana
failurePolicy: {{ .Values.webhook.failurePolicy }}
{{- with .Values.webhook.namespaceSelector }}
namespaceSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.webhook.objectSelector }}
objectSelector:
{{- toYaml . | nindent 4 }}
{{- end }}
name: elastic-kb-validation-v1beta1.k8s.elastic.co
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
rules:
- apiGroups:
- kibana.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- kibanas
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "eck-operator.webhookServiceName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "eck-operator.labels" . | nindent 4 }}
spec:
ports:
- name: https
port: 443
targetPort: 9443
selector:
{{- include "eck-operator.selectorLabels" . | nindent 4 }}
{{- if .Values.webhook.manageCerts }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "eck-operator.webhookSecretName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "eck-operator.labels" . | nindent 4 }}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,177 @@
# nameOverride is the short name for the deployment. Leave empty to let Helm generate a name using chart values.
nameOverride: "elastic-operator"
# fullnameOverride is the full name for the deployment. Leave empty to let Helm generate a name using chart values.
fullnameOverride: "elastic-operator"
# managedNamespaces is the set of namespaces that the operator manages. Leave empty to manage all namespaces.
managedNamespaces: []
# installCRDs determines whether Custom Resource Definitions (CRD) are installed by the chart.
# Note that CRDs are global resources and require cluster admin privileges to install.
# If you are sharing a cluster with other users who may want to install ECK on their own namespaces, setting this to true can have unintended consequences.
# 1. Upgrades will overwrite the global CRDs and could disrupt the other users of ECK who may be running a different version.
# 2. Uninstalling the chart will delete the CRDs and potentially cause Elastic resources deployed by other users to be removed as well.
installCRDs: true
# replicaCount is the number of operator pods to run.
replicaCount: 1
image:
# repository is the container image prefixed by the registry name.
repository: docker.elastic.co/eck/eck-operator
# pullPolicy is the container image pull policy.
pullPolicy: IfNotPresent
# tag is the container image tag. If not defined, defaults to chart appVersion.
tag: null
# imagePullSecrets defines the secrets to use when pulling the operator container image.
imagePullSecrets: []
# resources define the container resource limits for the operator.
resources:
limits:
cpu: 1
memory: 512Mi
requests:
cpu: 100m
memory: 150Mi
# podAnnotations define the annotations that should be added to the operator pod.
podAnnotations: {}
# podSecurityContext defines the pod security context for the operator pod.
podSecurityContext:
runAsNonRoot: true
# securityContext defines the security context of the operator container.
securityContext: {}
# nodeSelector defines the node selector for the operator pod.
nodeSelector: {}
# tolerations defines the node tolerations for the operator pod.
tolerations: []
# affinity defines the node affinity rules for the operator pod.
affinity: {}
# additional environment variables for the operator container.
env: []
# additional volume mounts for the operator container.
volumeMounts: []
# additional volumes to add to the operator pod.
volumes: []
# createClusterScopedResources determines whether cluster-scoped resources (ClusterRoles, ClusterRoleBindings) should be created.
createClusterScopedResources: true
serviceAccount:
# create specifies whether a service account should be created for the operator.
create: true
# annotations to add to the service account
annotations: {}
# name of the service account to use. If not set and create is true, a name is generated using the fullname template.
name: ""
tracing:
# enabled specifies whether APM tracing is enabled for the operator.
enabled: false
# config is a map of APM Server configuration variables that should be set in the environment.
config:
ELASTIC_APM_SERVER_URL: http://localhost:8200
ELASTIC_APM_SERVER_TIMEOUT: 30s
refs:
# enforceRBAC specifies whether RBAC should be enforced for cross-namespace associations between resources.
enforceRBAC: false
webhook:
# enabled determines whether the webhook is installed.
enabled: true
# caBundle is the PEM-encoded CA trust bundle for the webhook certificate. Only required if manageCerts is false and certManagerCert is null.
caBundle: Cg==
# certManagerCert is the name of the cert-manager certificate to use with the webhook.
certManagerCert: null
# certsDir is the directory to mount the certificates.
certsDir: "/tmp/k8s-webhook-server/serving-certs"
# failurePolicy of the webhook.
failurePolicy: Ignore
# manageCerts determines whether the operator manages the webhook certificates automatically.
manageCerts: true
# namespaceSelector corresponds to the namespaceSelector property of the webhook.
# Setting this restricts the webhook to act only on objects submitted to namespaces that match the selector.
namespaceSelector: {}
# objectSelector corresponds to the objectSelector property of the webhook.
# Setting this restricts the webhook to act only on objects that match the selector.
objectSelector: {}
softMultiTenancy:
# enabled determines whether the operator is installed with soft multi-tenancy extensions.
# This requires network policies to be enabled on the Kubernetes cluster.
enabled: false
# kubeAPIServerIP is required when softMultiTenancy is enabled.
kubeAPIServerIP: null
telemetry:
# disabled determines whether the operator periodically updates ECK telemetry data for Kibana to consume.
disabled: false
# distibutionChannel denotes which distribution channel was used to install the operator.
distributionChannel: "helm"
# config values for the operator.
config:
# logVerbosity defines the logging level. Valid values are as follows:
# -2: Errors only
# -1: Errors and warnings
# 0: Errors, warnings, and information
# number greater than 0: Errors, warnings, information, and debug details.
logVerbosity: "0"
# metricsPort defines the port to expose operator metrics. Set to 0 to disable metrics reporting.
metricsPort: "0"
# containerRegistry to use for pulling Elasticsearch and other application container images.
containerRegistry: docker.elastic.co
# maxConcurrentReconciles is the number of concurrent reconciliation operations to perform per controller.
maxConcurrentReconciles: "3"
# caValidity defines the validity period of the CA certificates generated by the operator.
caValidity: 8760h
# caRotateBefore defines when to rotate a CA certificate that is due to expire.
caRotateBefore: 24h
# certificatesValidity defines the validity period of certificates generated by the operator.
certificatesValidity: 8760h
# certificatesRotateBefore defines when to rotate a certificate that is due to expire.
certificatesRotateBefore: 24h
# setDefaultSecurityContext determines whether a default security context is set on application containers created by the operator.
setDefaultSecurityContext: true
# kubeClientTimeout sets the request timeout for Kubernetes API calls made by the operator.
kubeClientTimeout: 60s
# elasticsearchClientTimeout sets the request timeout for Elasticsearch API calls made by the operator.
elasticsearchClientTimeout: 180s
# validateStorageClass specifies whether storage classes volume expansion support should be verified.
# Can be disabled if cluster-wide storage class RBAC access is not available.
validateStorageClass: true
# Internal use only
internal:
# manifestGen specifies whether the chart is running under manifest generator.
# This is used for tasks specific to generating the all-in-one.yaml file.
manifestGen: false
# createOperatorNamespace defines whether the operator namespace manifest should be generated when in manifestGen mode.
# Usually we do want that to happen (e.g. all-in-one.yaml) but, sometimes we don't (e.g. E2E tests).
createOperatorNamespace: true
# kubeVersion is the effective Kubernetes version we target when generating the all-in-one.yaml.
kubeVersion: 1.12.0

View File

@ -24,7 +24,7 @@ spec:
{{- if .processors }}
node.processors: {{ .processors }}
{{- end }}
indices.memory.index_buffer_size: "30%"
indices.memory.index_buffer_size: "20%"
podTemplate:
{{- if $.Values.es.s3Snapshot.iamrole }}
metadata:
@ -72,7 +72,7 @@ spec:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: failure-domain.beta.kubernetes.io/zone
- key: topology.kubernetes.io/zone
operator: In
values:
- {{ .zone }}

View File

@ -1,8 +1,19 @@
#!/bin/bash
ECK_VERSION=1.5.0
FLUENT_BIT_VERSION=0.15.4
FLUENTD_VERSION=0.2.2
# fix ECK crds handling to adhere to proper helm v3 support which also fixes ArgoCD applyong updates on upgrades
helm repo list | grep elastic -qc || { helm repo add elastic https://helm.elastic.co; helm repo update; }
rm -rf charts/eck-operator && helm pull elastic/eck-operator --untar --untardir charts --version $ECK_VERSION
mkdir charts/eck-operator/crds
helm template charts/eck-operator/charts/eck-operator-crds --name-template logging > charts/eck-operator/crds/all-crds.yaml
rm -rf charts/eck-operator/charts
yq d charts/eck-operator/Chart.yaml dependencies -i
# Fluent Bit
rm -rf charts/fluent-bit
curl -L -s -o - https://github.com/fluent/helm-charts/releases/download/fluent-bit-${FLUENT_BIT_VERSION}/fluent-bit-${FLUENT_BIT_VERSION}.tgz | tar xfz - -C charts

View File

@ -119,6 +119,8 @@ fluentd:
fileConfigs:
00_system.conf: |-
<system>
root_dir /var/log/fluentd
# log_level debug
workers 2
</system>
01_sources.conf: |-
@ -161,7 +163,7 @@ fluentd:
04_outputs.conf: |-
<label @OUTPUT>
<match **>
@id elasticsearch
@id out_es
@type elasticsearch
@log_level info
include_tag_key true
@ -183,18 +185,20 @@ fluentd:
reload_on_failure true
request_timeout 60s
suppress_type_name true
slow_flush_log_threshold 40.0
# bulk_message_request_threshold 2097152
<buffer tag>
@type file_single
path /var/log/fluentd-buffers/kubernetes.system.buffer
chunk_limit_size 8MB
chunk_limit_size 16MB
total_limit_size 4GB
flush_mode interval
flush_thread_count 2
flush_interval 10s
flush_at_shutdown true
retry_type exponential_backoff
retry_timeout 300m
retry_timeout 2h
flush_thread_interval 30s
overflow_action drop_oldest_chunk
disable_chunk_backup true
</buffer>

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-metrics
description: KubeZero Umbrella Chart for prometheus-operator
type: application
version: 0.4.0
version: 0.4.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -16,11 +16,11 @@ dependencies:
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: kube-prometheus-stack
version: 15.2.0
version: 15.4.4
# Switch back to upstream once all alerts are fixed eg. etcd gpcr
# repository: https://prometheus-community.github.io/helm-charts
- name: prometheus-adapter
version: 2.12.1
version: 2.12.3
repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-adapter.enabled
kubeVersion: ">= 1.18.0"

View File

@ -1,6 +1,6 @@
# kubezero-metrics
![Version: 0.3.4](https://img.shields.io/badge/Version-0.3.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.4.1](https://img.shields.io/badge/Version-0.4.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for prometheus-operator
@ -18,8 +18,8 @@ Kubernetes: `>= 1.18.0`
| Repository | Name | Version |
|------------|------|---------|
| | kube-prometheus-stack | 14.3.0 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 2.12.1 |
| | kube-prometheus-stack | 15.4.4 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 2.12.3 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## Values
@ -45,15 +45,32 @@ Kubernetes: `>= 1.18.0`
| kube-prometheus-stack.alertmanager.enabled | bool | `false` | |
| kube-prometheus-stack.coreDns.enabled | bool | `true` | |
| kube-prometheus-stack.defaultRules.create | bool | `true` | |
| kube-prometheus-stack.global.rbac.pspEnabled | bool | `false` | |
| kube-prometheus-stack.grafana."grafana.ini"."auth.anonymous".enabled | bool | `true` | |
| kube-prometheus-stack.grafana."grafana.ini".alerting.enabled | bool | `false` | |
| kube-prometheus-stack.grafana."grafana.ini".analytics.check_for_updates | bool | `false` | |
| kube-prometheus-stack.grafana."grafana.ini".dashboards.default_home_dashboard_path | string | `"/tmp/dashboards/zdt-home.json"` | |
| kube-prometheus-stack.grafana."grafana.ini".dashboards.min_refresh_interval | string | `"30s"` | |
| kube-prometheus-stack.grafana."grafana.ini".date_formats.default_timezone | string | `"UTC"` | |
| kube-prometheus-stack.grafana."grafana.ini".security.cookie_secure | bool | `true` | |
| kube-prometheus-stack.grafana."grafana.ini".security.disable_gravatar | bool | `true` | |
| kube-prometheus-stack.grafana."grafana.ini".security.strict_transport_security | bool | `true` | |
| kube-prometheus-stack.grafana."grafana.ini".server.enable_gzip | bool | `true` | |
| kube-prometheus-stack.grafana.defaultDashboardsEnabled | bool | `false` | |
| kube-prometheus-stack.grafana.enabled | bool | `true` | |
| kube-prometheus-stack.grafana.extraContainerVolumes[0].configMap.defaultMode | int | `511` | |
| kube-prometheus-stack.grafana.extraContainerVolumes[0].configMap.name | string | `"script-configmap"` | |
| kube-prometheus-stack.grafana.extraContainerVolumes[0].name | string | `"script-volume"` | |
| kube-prometheus-stack.grafana.initChownData.enabled | bool | `false` | |
| kube-prometheus-stack.grafana.persistence.enabled | bool | `true` | |
| kube-prometheus-stack.grafana.persistence.size | string | `"4Gi"` | |
| kube-prometheus-stack.grafana.persistence.storageClassName | string | `"ebs-sc-gp2-xfs"` | |
| kube-prometheus-stack.grafana.plugins[0] | string | `"grafana-piechart-panel"` | |
| kube-prometheus-stack.grafana.rbac.pspEnabled | bool | `false` | |
| kube-prometheus-stack.grafana.service.portName | string | `"http-grafana"` | |
| kube-prometheus-stack.grafana.sidecar.dashboards.provider.foldersFromFilesStructure | bool | `true` | |
| kube-prometheus-stack.grafana.sidecar.dashboards.searchNamespace | string | `"ALL"` | |
| kube-prometheus-stack.grafana.sidecar.image.tag | string | `"1.12.0"` | |
| kube-prometheus-stack.grafana.testFramework.enabled | bool | `false` | |
| kube-prometheus-stack.kube-state-metrics.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| kube-prometheus-stack.kube-state-metrics.podSecurityPolicy.enabled | bool | `false` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kube-prometheus-stack.kubeApiServer.enabled | bool | `true` | |
@ -82,26 +99,27 @@ Kubernetes: `>= 1.18.0`
| kube-prometheus-stack.nodeExporter.serviceMonitor.relabelings[0].separator | string | `";"` | |
| kube-prometheus-stack.nodeExporter.serviceMonitor.relabelings[0].sourceLabels[0] | string | `"__meta_kubernetes_pod_node_name"` | |
| kube-prometheus-stack.nodeExporter.serviceMonitor.relabelings[0].targetLabel | string | `"node"` | |
| kube-prometheus-stack.prometheus-node-exporter.rbac.pspEnabled | bool | `false` | |
| kube-prometheus-stack.prometheus-node-exporter.resources.requests.cpu | string | `"20m"` | |
| kube-prometheus-stack.prometheus-node-exporter.resources.requests.memory | string | `"16Mi"` | |
| kube-prometheus-stack.prometheus.enabled | bool | `true` | |
| kube-prometheus-stack.prometheus.prometheusSpec.logFormat | string | `"json"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues | bool | `false` | |
| kube-prometheus-stack.prometheus.prometheusSpec.portName | string | `"http-prometheus"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.limits.memory | string | `"3Gi"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.cpu | string | `"500m"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.memory | string | `"512Mi"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.retention | string | `"8d"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues | bool | `false` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName | string | `"ebs-sc-gp2-xfs"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName | string | `"ebs-sc-gp3-xfs"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.walCompression | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kube-prometheus-stack.prometheusOperator.enabled | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.logFormat | string | `"json"` | |
| kube-prometheus-stack.prometheusOperator.namespaces.additional[0] | string | `"kube-system"` | |
| kube-prometheus-stack.prometheusOperator.namespaces.additional[1] | string | `"logging"` | |
| kube-prometheus-stack.prometheusOperator.namespaces.releaseNamespace | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| kube-prometheus-stack.prometheusOperator.resources.limits.memory | string | `"64Mi"` | |
| kube-prometheus-stack.prometheusOperator.resources.requests.cpu | string | `"20m"` | |

View File

@ -10,7 +10,7 @@ appVersion: 0.47.0
dependencies:
- condition: kubeStateMetrics.enabled
name: kube-state-metrics
repository: https://kubernetes.github.io/kube-state-metrics
repository: https://prometheus-community.github.io/helm-charts
version: 2.13.*
- condition: nodeExporter.enabled
name: prometheus-node-exporter
@ -44,4 +44,4 @@ sources:
- https://github.com/prometheus-community/helm-charts
- https://github.com/prometheus-operator/kube-prometheus
type: application
version: 15.2.0
version: 15.4.4

View File

@ -122,6 +122,12 @@ kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheu
### From 11.x to 12.x
Version 12 upgrades prometheus-operator from 0.43.x to 0.44.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating:
```console
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.44/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
```
The chart was migrated to support only helm v3 and later.
### From 10.x to 11.x

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 7.5.3
appVersion: 7.5.5
description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.net
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
@ -19,4 +19,4 @@ name: grafana
sources:
- https://github.com/grafana/grafana
type: application
version: 6.8.0
version: 6.8.3

View File

@ -158,13 +158,16 @@ This version requires Helm >= 3.1.0.
| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` |
| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
| `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` |
| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` |
| `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` |
| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` |
| `sidecar.notifiers.searchNamespace` | If specified, the sidecar will search for notifiers config-maps (or secrets) inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` |
| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |

View File

@ -96,7 +96,7 @@ initContainers:
- name: FOLDER
value: "/etc/grafana/provisioning/datasources"
- name: RESOURCE
value: "both"
value: {{ quote .Values.sidecar.datasources.resource }}
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
@ -131,7 +131,7 @@ initContainers:
- name: FOLDER
value: "/etc/grafana/provisioning/notifiers"
- name: RESOURCE
value: "both"
value: {{ quote .Values.sidecar.notifiers.resource }}
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
@ -180,7 +180,7 @@ containers:
- name: FOLDER
value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}"
- name: RESOURCE
value: "both"
value: {{ quote .Values.sidecar.dashboards.resource }}
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"

View File

@ -6,6 +6,10 @@ metadata:
namespace: {{ template "grafana.namespace" . }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
type: Opaque
data:
{{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }}

View File

@ -53,7 +53,7 @@ livenessProbe:
image:
repository: grafana/grafana
tag: 7.5.3
tag: 7.5.5
sha: ""
pullPolicy: IfNotPresent
@ -615,6 +615,8 @@ sidecar:
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# search in configmap, secret or both
resource: both
# If specified, the sidecar will look for annotation with this name to create folder and put graph here.
# You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
folderAnnotation: null
@ -644,6 +646,8 @@ sidecar:
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# search in configmap, secret or both
resource: both
notifiers:
enabled: false
# label that the configmaps with notifiers are marked with
@ -652,6 +656,8 @@ sidecar:
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# search in configmap, secret or both
resource: both
## Override the deployment namespace
##

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -172,7 +172,7 @@ resources: {}
kubeTargetVersionOverride: ""
# Enable self metrics configuration for service and Service Monitor
# Default values for telemetry configuration can be overriden
# Default values for telemetry configuration can be overridden
selfMonitor:
enabled: false
# telemetryHost: 0.0.0.0

View File

@ -49,7 +49,7 @@ The longest name that gets created adds and extra 37 characters, so truncation s
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: "{{ .Chart.Version }}"
app.kubernetes.io/part-of: {{ template "kube-prometheus-stack.name" . }}
app.kubernetes.io/part-of: {{ template "kube-prometheus-stack.name" . }}
chart: {{ template "kube-prometheus-stack.chartref" . }}
release: {{ $.Release.Name | quote }}
heritage: {{ $.Release.Service | quote }}
@ -94,4 +94,25 @@ Allow the release namespace to be overridden for multi-namespace deployments in
{{- else -}}
{{- .Release.Namespace -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/* Allow KubeVersion to be overridden. */}}
{{- define "kube-prometheus-stack.ingress.kubeVersion" -}}
{{- default .Capabilities.KubeVersion.Version .Values.kubeVersionOverride -}}
{{- end -}}
{{/* Get Ingress API Version */}}
{{- define "kube-prometheus-stack.ingress.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "kube-prometheus-stack.ingress.kubeVersion" .)) -}}
{{- print "networking.k8s.io/v1" -}}
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
{{- print "networking.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "extensions/v1beta1" -}}
{{- end -}}
{{- end -}}
{{/* Check Ingress stability */}}
{{- define "kube-prometheus-stack.ingress.isStable" -}}
{{- eq (include "kube-prometheus-stack.ingress.apiVersion" .) "networking.k8s.io/v1" -}}
{{- end -}}

View File

@ -4,13 +4,8 @@
{{- $servicePort := .Values.alertmanager.service.port -}}
{{- $routePrefix := list .Values.alertmanager.alertmanagerSpec.routePrefix }}
{{- $paths := .Values.alertmanager.ingress.paths | default $routePrefix -}}
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" -}}
apiVersion: networking.k8s.io/v1
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
{{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}}
apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" . }}
kind: Ingress
metadata:
name: {{ $serviceName }}
@ -26,7 +21,7 @@ metadata:
{{- end }}
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
spec:
{{- if or (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }}
{{- if $apiIsStable }}
{{- if .Values.alertmanager.ingress.ingressClassName }}
ingressClassName: {{ .Values.alertmanager.ingress.ingressClassName }}
{{- end }}
@ -43,7 +38,7 @@ spec:
pathType: {{ $pathType }}
{{- end }}
backend:
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
{{- if $apiIsStable }}
service:
name: {{ $serviceName }}
port:
@ -63,7 +58,7 @@ spec:
pathType: {{ $pathType }}
{{- end }}
backend:
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
{{- if $apiIsStable }}
service:
name: {{ $serviceName }}
port:

View File

@ -3,6 +3,7 @@
{{- $count := .Values.alertmanager.alertmanagerSpec.replicas | int -}}
{{- $servicePort := .Values.alertmanager.service.port -}}
{{- $ingressValues := .Values.alertmanager.ingressPerReplica -}}
{{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}}
apiVersion: v1
kind: List
metadata:
@ -11,13 +12,7 @@ metadata:
items:
{{ range $i, $e := until $count }}
- kind: Ingress
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
apiVersion: networking.k8s.io/v1
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" . }}
metadata:
name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }}
namespace: {{ template "kube-prometheus-stack.namespace" $ }}
@ -32,7 +27,7 @@ items:
{{ toYaml $ingressValues.annotations | indent 8 }}
{{- end }}
spec:
{{- if or ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") ($.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }}
{{- if $apiIsStable }}
{{- if $ingressValues.ingressClassName }}
ingressClassName: {{ $ingressValues.ingressClassName }}
{{- end }}
@ -47,7 +42,7 @@ items:
pathType: {{ $pathType }}
{{- end }}
backend:
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
{{- if $apiIsStable }}
service:
name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }}
port:

View File

@ -13,6 +13,8 @@ metadata:
annotations:
{{ toYaml .Values.alertmanager.serviceAccount.annotations | indent 4 }}
{{- end }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.global.imagePullSecrets | indent 2 }}
{{- end }}
{{- end }}

View File

@ -150,5 +150,6 @@ spec:
- {{ .Values.kubelet.namespace }}
selector:
matchLabels:
app.kubernetes.io/managed-by: prometheus-operator
k8s-app: kubelet
{{- end}}

View File

@ -10,6 +10,8 @@ metadata:
labels:
app: {{ template "kube-prometheus-stack.name" $ }}-admission
{{- include "kube-prometheus-stack.labels" $ | indent 4 }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.global.imagePullSecrets | indent 2 }}
{{- end }}
{{- end }}

View File

@ -9,6 +9,8 @@ metadata:
app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-prometheus-operator
app.kubernetes.io/component: prometheus-operator
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.global.imagePullSecrets | indent 2 }}
{{- end }}
{{- end }}

View File

@ -35,4 +35,4 @@ rules:
- "prometheus"
- "kubernetes-apps"
- "etcd"
{{- end }}
{{- end }}

View File

@ -1,6 +1,9 @@
{{- if or .Values.additionalPrometheusRules .Values.additionalPrometheusRulesMap}}
apiVersion: v1
kind: List
metadata:
name: {{ include "kube-prometheus-stack.fullname" $ }}-additional-prometheus-rules
namespace: {{ template "kube-prometheus-stack.namespace" . }}
items:
{{- if .Values.additionalPrometheusRulesMap }}
{{- range $prometheusRuleName, $prometheusRule := .Values.additionalPrometheusRulesMap }}

View File

@ -4,13 +4,8 @@
{{- $servicePort := .Values.prometheus.service.port -}}
{{- $routePrefix := list .Values.prometheus.prometheusSpec.routePrefix -}}
{{- $paths := .Values.prometheus.ingress.paths | default $routePrefix -}}
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" -}}
apiVersion: networking.k8s.io/v1
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
{{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}}
apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" . }}
kind: Ingress
metadata:
{{- if .Values.prometheus.ingress.annotations }}
@ -26,7 +21,7 @@ metadata:
{{ toYaml .Values.prometheus.ingress.labels | indent 4 }}
{{- end }}
spec:
{{- if or (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }}
{{- if $apiIsStable }}
{{- if .Values.prometheus.ingress.ingressClassName }}
ingressClassName: {{ .Values.prometheus.ingress.ingressClassName }}
{{- end }}
@ -43,7 +38,7 @@ spec:
pathType: {{ $pathType }}
{{- end }}
backend:
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
{{- if $apiIsStable }}
service:
name: {{ $serviceName }}
port:
@ -63,7 +58,7 @@ spec:
pathType: {{ $pathType }}
{{- end }}
backend:
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
{{- if $apiIsStable }}
service:
name: {{ $serviceName }}
port:

View File

@ -4,13 +4,8 @@
{{- $thanosPort := .Values.prometheus.thanosIngress.servicePort -}}
{{- $routePrefix := list .Values.prometheus.prometheusSpec.routePrefix }}
{{- $paths := .Values.prometheus.thanosIngress.paths | default $routePrefix -}}
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" -}}
apiVersion: networking.k8s.io/v1
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
{{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}}
apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" . }}
kind: Ingress
metadata:
{{- if .Values.prometheus.thanosIngress.annotations }}
@ -25,7 +20,7 @@ metadata:
{{ toYaml .Values.prometheus.thanosIngress.labels | indent 4 }}
{{- end }}
spec:
{{- if or (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }}
{{- if $apiIsStable }}
{{- if .Values.prometheus.thanosIngress.ingressClassName }}
ingressClassName: {{ .Values.prometheus.thanosIngress.ingressClassName }}
{{- end }}
@ -42,7 +37,7 @@ spec:
pathType: {{ $pathType }}
{{- end }}
backend:
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
{{- if $apiIsStable }}
service:
name: {{ $serviceName }}
port:
@ -62,7 +57,7 @@ spec:
pathType: {{ $pathType }}
{{- end }}
backend:
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
{{- if $apiIsStable }}
service:
name: {{ $serviceName }}
port:

View File

@ -3,6 +3,7 @@
{{- $count := .Values.prometheus.prometheusSpec.replicas | int -}}
{{- $servicePort := .Values.prometheus.servicePerReplica.port -}}
{{- $ingressValues := .Values.prometheus.ingressPerReplica -}}
{{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}}
apiVersion: v1
kind: List
metadata:
@ -11,13 +12,7 @@ metadata:
items:
{{ range $i, $e := until $count }}
- kind: Ingress
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
apiVersion: networking.k8s.io/v1
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" . }}
metadata:
name: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }}
namespace: {{ template "kube-prometheus-stack.namespace" $ }}
@ -32,7 +27,7 @@ items:
{{ toYaml $ingressValues.annotations | indent 8 }}
{{- end }}
spec:
{{- if or ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") ($.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }}
{{- if $apiIsStable }}
{{- if $ingressValues.ingressClassName }}
ingressClassName: {{ $ingressValues.ingressClassName }}
{{- end }}
@ -47,7 +42,7 @@ items:
pathType: {{ $pathType }}
{{- end }}
backend:
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
{{- if $apiIsStable }}
service:
name: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }}
port:

View File

@ -159,7 +159,7 @@ spec:
{{- if (or .Values.prometheus.prometheusSpec.remoteRead .Values.prometheus.prometheusSpec.additionalRemoteRead) }}
remoteRead:
{{- if .Values.prometheus.prometheusSpec.remoteRead }}
{{ toYaml .Values.prometheus.prometheusSpec.remoteRead | indent 4 }}
{{ tpl (toYaml .Values.prometheus.prometheusSpec.remoteRead | indent 4) . }}
{{- end }}
{{- if .Values.prometheus.prometheusSpec.additionalRemoteRead }}
{{ toYaml .Values.prometheus.prometheusSpec.additionalRemoteRead | indent 4 }}
@ -168,7 +168,7 @@ spec:
{{- if (or .Values.prometheus.prometheusSpec.remoteWrite .Values.prometheus.prometheusSpec.additionalRemoteWrite) }}
remoteWrite:
{{- if .Values.prometheus.prometheusSpec.remoteWrite }}
{{ toYaml .Values.prometheus.prometheusSpec.remoteWrite | indent 4 }}
{{ tpl (toYaml .Values.prometheus.prometheusSpec.remoteWrite | indent 4) . }}
{{- end }}
{{- if .Values.prometheus.prometheusSpec.additionalRemoteWrite }}
{{ toYaml .Values.prometheus.prometheusSpec.additionalRemoteWrite | indent 4 }}

View File

@ -1,5 +1,5 @@
{{- /*
Generated from 'etcd' group from https://raw.githubusercontent.com/etcd-io/website/master/content/docs/v3.4.0/op-guide/etcd3_alert.rules.yml
Generated from 'etcd' group from https://raw.githubusercontent.com/etcd-io/website/master/content/en/docs/v3.4/op-guide/etcd3_alert.rules.yml
Do not change in-place! In order to change this file first read following link:
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}

View File

@ -26,7 +26,7 @@ spec:
rules:
- expr: |-
sum by (cluster, namespace, pod, container) (
rate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!="", container!="POD"}[5m])
rate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}[5m])
) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (
1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=""})
)
@ -56,27 +56,27 @@ spec:
)
record: node_namespace_pod_container:container_memory_swap
- expr: |-
sum by (namespace) (
sum by (namespace, pod) (
max by (namespace, pod, container) (
kube_pod_container_resource_requests_memory_bytes{job="kube-state-metrics"}
) * on(namespace, pod) group_left() max by (namespace, pod) (
kube_pod_status_phase{phase=~"Pending|Running"} == 1
)
)
)
record: namespace:kube_pod_container_resource_requests_memory_bytes:sum
- expr: |-
sum by (namespace) (
sum by (namespace, pod) (
max by (namespace, pod, container) (
kube_pod_container_resource_requests_cpu_cores{job="kube-state-metrics"}
) * on(namespace, pod) group_left() max by (namespace, pod) (
sum by (namespace, cluster) (
sum by (namespace, pod, cluster) (
max by (namespace, pod, container, cluster) (
kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"}
) * on(namespace, pod, cluster) group_left() max by (namespace, pod) (
kube_pod_status_phase{phase=~"Pending|Running"} == 1
)
)
)
record: namespace:kube_pod_container_resource_requests_cpu_cores:sum
record: namespace_memory:kube_pod_container_resource_requests:sum
- expr: |-
sum by (namespace, cluster) (
sum by (namespace, pod, cluster) (
max by (namespace, pod, container, cluster) (
kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"}
) * on(namespace, pod, cluster) group_left() max by (namespace, pod) (
kube_pod_status_phase{phase=~"Pending|Running"} == 1
)
)
)
record: namespace_cpu:kube_pod_container_resource_requests:sum
- expr: |-
max by (cluster, namespace, workload, pod) (
label_replace(

View File

@ -82,7 +82,7 @@ spec:
!=
kube_deployment_status_replicas_available{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}
) and (
changes(kube_deployment_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[5m])
changes(kube_deployment_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[10m])
==
0
)
@ -103,7 +103,7 @@ spec:
!=
kube_statefulset_status_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}
) and (
changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[5m])
changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[10m])
==
0
)
@ -273,7 +273,7 @@ spec:
<
kube_hpa_spec_max_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"})
and
changes(kube_hpa_status_current_replicas[15m]) == 0
changes(kube_hpa_status_current_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[15m]) == 0
for: 15m
labels:
severity: warning

View File

@ -30,11 +30,11 @@ spec:
runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecpuovercommit
summary: Cluster has overcommitted CPU resource requests.
expr: |-
sum(namespace:kube_pod_container_resource_requests_cpu_cores:sum{})
sum(namespace_cpu:kube_pod_container_resource_requests:sum{})
/
sum(kube_node_status_allocatable_cpu_cores)
sum(kube_node_status_allocatable{resource="cpu"})
>
(count(kube_node_status_allocatable_cpu_cores)-1) / count(kube_node_status_allocatable_cpu_cores)
((count(kube_node_status_allocatable{resource="cpu"}) > 1) - 1) / count(kube_node_status_allocatable{resource="cpu"})
for: 5m
labels:
severity: warning
@ -47,13 +47,13 @@ spec:
runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubememoryovercommit
summary: Cluster has overcommitted memory resource requests.
expr: |-
sum(namespace:kube_pod_container_resource_requests_memory_bytes:sum{})
sum(namespace_memory:kube_pod_container_resource_requests:sum{})
/
sum(kube_node_status_allocatable_memory_bytes)
sum(kube_node_status_allocatable{resource="memory"})
>
(count(kube_node_status_allocatable_memory_bytes)-1)
((count(kube_node_status_allocatable{resource="memory"}) > 1) - 1)
/
count(kube_node_status_allocatable_memory_bytes)
count(kube_node_status_allocatable{resource="memory"})
for: 5m
labels:
severity: warning
@ -68,7 +68,7 @@ spec:
expr: |-
sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="cpu"})
/
sum(kube_node_status_allocatable_cpu_cores)
sum(kube_node_status_allocatable{resource="cpu"})
> 1.5
for: 5m
labels:
@ -84,7 +84,7 @@ spec:
expr: |-
sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="memory"})
/
sum(kube_node_status_allocatable_memory_bytes{job="kube-state-metrics"})
sum(kube_node_status_allocatable{resource="memory",job="kube-state-metrics"})
> 1.5
for: 5m
labels:

View File

@ -51,7 +51,7 @@ spec:
description: An aggregated API {{`{{`}} $labels.name {{`}}`}}/{{`{{`}} $labels.namespace {{`}}`}} has reported errors. It has appeared unavailable {{`{{`}} $value | humanize {{`}}`}} times averaged over the past 10m.
runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-aggregatedapierrors
summary: An aggregated API has reported errors.
expr: sum by(name, namespace)(increase(aggregator_unavailable_apiservice_count[10m])) > 4
expr: sum by(name, namespace)(increase(aggregator_unavailable_apiservice_total[10m])) > 4
labels:
severity: warning
{{- if .Values.defaultRules.additionalRuleLabels }}

View File

@ -59,7 +59,7 @@ spec:
)
/
max by(node) (
kube_node_status_capacity_pods{job="kube-state-metrics"} != 1
kube_node_status_capacity{job="kube-state-metrics",resource="pods"} != 1
) > 0.95
for: 15m
labels:

View File

@ -29,7 +29,7 @@ spec:
description: There are {{`{{`}} $value {{`}}`}} different semantic versions of Kubernetes components running.
runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeversionmismatch
summary: Different semantic versions of Kubernetes components running.
expr: count(count by (gitVersion) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"gitVersion","$1","gitVersion","(v[0-9]*.[0-9]*).*"))) > 1
expr: count(count by (git_version) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"git_version","$1","git_version","(v[0-9]*.[0-9]*).*"))) > 1
for: 15m
labels:
severity: warning

View File

@ -34,7 +34,7 @@ spec:
count by (cluster, node) (sum by (node, cpu) (
node_cpu_seconds_total{job="node-exporter"}
* on (namespace, pod) group_left(node)
node_namespace_pod:kube_pod_info:
topk by(namespace, pod) (1, node_namespace_pod:kube_pod_info:)
))
record: node:node_num_cpu:sum
- expr: |-

View File

@ -1,5 +1,5 @@
{{- /*
Generated from 'etcd' group from https://raw.githubusercontent.com/etcd-io/website/master/content/docs/v3.4.0/op-guide/etcd3_alert.rules.yml
Generated from 'etcd' group from https://raw.githubusercontent.com/etcd-io/website/master/content/en/docs/v3.4/op-guide/etcd3_alert.rules.yml
Do not change in-place! In order to change this file first read following link:
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack
*/ -}}

View File

@ -0,0 +1,28 @@
{{- if and .Values.prometheus.enabled .Values.prometheus.thanosServiceExternal.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-thanos-external
namespace: {{ template "kube-prometheus-stack.namespace" . }}
labels:
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
{{- if .Values.prometheus.thanosServiceExternal.labels }}
{{ toYaml .Values.prometheus.thanosServiceExternal.labels | indent 4 }}
{{- end }}
{{- if .Values.prometheus.thanosServiceExternal.annotations }}
annotations:
{{ toYaml .Values.prometheus.thanosServiceExternal.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.prometheus.thanosServiceExternal.type }}
ports:
- name: {{ .Values.prometheus.thanosServiceExternal.portName }}
port: {{ .Values.prometheus.thanosServiceExternal.port }}
targetPort: {{ .Values.prometheus.thanosServiceExternal.targetPort }}
{{- if eq .Values.prometheus.thanosServiceExternal.type "NodePort" }}
nodePort: {{ .Values.prometheus.thanosServiceExternal.nodePort }}
{{- end }}
selector:
app: prometheus
prometheus: {{ template "kube-prometheus-stack.fullname" . }}-prometheus
{{- end }}

View File

@ -13,6 +13,8 @@ metadata:
annotations:
{{ toYaml .Values.prometheus.serviceAccount.annotations | indent 4 }}
{{- end }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.global.imagePullSecrets | indent 2 }}
{{- end }}
{{- end }}

View File

@ -14,6 +14,10 @@ namespaceOverride: ""
##
kubeTargetVersionOverride: ""
## Allow kubeVersion to be overridden while creating the ingress
##
kubeVersionOverride: ""
## Provide a name to substitute for the full names of resources
##
fullnameOverride: ""
@ -1607,6 +1611,24 @@ prometheus:
##
nodePort: 30901
# Service for external access to sidecar
# Enabling this creates a service to expose thanos-sidecar outside the cluster.
thanosServiceExternal:
enabled: false
annotations: {}
labels: {}
portName: grpc
port: 10901
targetPort: "grpc"
## Service type
##
type: LoadBalancer
## Port to expose on each node
##
nodePort: 30901
## Configuration for Prometheus service
##
service:

View File

@ -7,6 +7,9 @@ dashboards:
tags: ['kubernetes', 'DNS']
- name: etcd
url: https://grafana.com/api/dashboards/3070/revisions/3/download
tags: ['kubernetes', 'etcd']
- name: node
url: https://grafana.com/api/dashboards/1860/revisions/23/download
tags: ['kubernetes']
# cd kube-mixin; for f in *.json; do echo "- name: ${f%%.json}" >> ../dashboards.yaml; echo " url: file://kube-mixin/$f" >> ../dashboards.yaml; done; cd -
- name: apiserver

View File

@ -130,7 +130,7 @@
"type": "timepicker"
},
"timezone": "browser",
"title": "Home Copy",
"title": "ZeroDownTime Home",
"uid": "6QOeg59Mz",
"version": 4
}

View File

@ -34,11 +34,17 @@ with open(config_file, 'r') as yaml_contents:
config = yaml.safe_load(yaml_contents.read())
configmap = ''
if 'condition' in config:
configmap = '''{{- if %(condition)s }}
# use index function to make go template happy if '-' in names
if '-' in config['condition']:
tokens = config['condition'].split('.')
configmap = '''{{- if index .Values %(condition)s }}
''' % {'condition': ' '.join(f'"{w}"' for w in tokens[2:])}
else:
configmap = '''{{- if %(condition)s }}
''' % config
else:
configmap = ''
# Base configmap for KubeZero
configmap += '''apiVersion: v1

File diff suppressed because one or more lines are too long

View File

@ -8,4 +8,4 @@ metadata:
{{ include "kubezero-lib.labels" . | indent 4 }}
binaryData:
home.json.gz:
H4sIAAAAAAAC/9VVS2/UMBC+8yuMD6hI7e5mH63ghlohKkAUWopEWyFvPEmsdexgO93dVv3v2M7L2RQqbnDZ9XwzHs/jy8z9M4QwEUIaYpgUGr9G9xayIGfaWOnKS6hGvWZZMm5OhVVG+x1KiSFalioGq8BnSuZgMig1DmxAkCV3eqNKCPCM0UdQFktxLLlUzqFKl2Rvso+mUWR/Fot9FL0MXQuS+4ffdLmgF+gNB2V6IZht4e0o0dlSEkVxrXvw/zf298GZY6DM7ESLUwHmlFpElJxXiCJFdiElN6yw+MSDzJlMX/kzZ2Llqnp148WCCOC6rWtT1X71Gu9ekzDg9FiKhKVtc6orkJCSG9+yhyBDeQtK2YJWrzbZtf5SqzuTuu8rs+Is8LF2CcwDYNMkV8tbJw98+8QPW1EWfU550DbVgHDUws+vvp9coA8ylTd7mTGFfj0ex1SM7kDJAyrXwrAcRrbmY6I1GD1elUtwyjG3dw50Tjg/OJyPCpGia3xHzTV+eS2+AY8t+ZCR6L21/27t0UcwisUaPUdtHEEyufT0wzlRK/csHiRW8DJl4hKUtgk506PRYjRrfWAX6FtL+d3mOfw8Y4kZKoynFn5nQw381Ow0sDFVFHUM/xJRosUOU6LpE0yZPc6UlnE7X8nvyZNITkH5TzB8IyebUwO5M56F+M8SlHsfh93WmVy/A0KZSPVw6DjtF4gtQ/n2ksEa6OM250BU7KqREK4HSkOUeuymIelvqv0kw5qbQwqdNKNMD4nkxpyf5P8vmXrAH9g0/3s2AdCvirs6NeNnOHqYoLAZbXI+nAptBz4QA9qgxE4AZMuJlnY+DXshYK3rPtjfah8oSBRoVwg8m1T9wzrOICcdFaZHFWy2vO6pWlWWPVLYoZEX3K4+kQ63eLfXXGKdPqmGFhZyfRBldczYyBrDvWsFi1egust2bVMQPZY3Cf1gdsqrWxLsOqtcBAyNJoEwC4Uo786L4ByFwmwSarLuPA3OUb3db5q8bBI/OipcPflK6PgwdBy+Mp2HAu3ORzSMdzeWZtR3he0V+04Kr18qudZWWSuCtYGOZbGt8NJ/Afjw8ydIF68+3lXobUuh+bOHXxBMSbrqCQAA
H4sIAAAAAAAC/9VVS28UORC+8yuMDwikZGZ6HonghogQEaBllyxIJBHytKu7rXHbvbY7MwnKf9+y++WZDkR7Wy4zrq/K5Xp8XfXjCSGUKaUdc0IrS1+RHwghKIV1KF0GibRo0KxrId25QmVyNKCcOWZ1bVJABf1kdAmugNrSyAYUW0uvd6aGCC8EfwAVqVZvtNTGOzT5mj2fHZF5kuDPanVEkhexa8XK8PDrIRfyjLyWYNxeCO62Cnac2WKtmeG01d2H/2v8vffmFLhwB9HSXIE754ioWsoGMawqLrSWTlSIzwIovMn8ZThLoTa+qpfXQayYAmn7unZV3a9e5z1oMgGSv9EqE3nfnOYKZKyWLrTsPspQ34AxWNDm1S673l+Ouk/a7vsqUFxEPrY+gWUE7LrkWvnWyyPfIfGTXtTVPqcCiE11oDy16NPLb2cX5IPO9fXzwrnKvppOU64md2D0Mddb5UQJE6z5lFkLzk439Rq8cirxzrEtmZTHJ8tJpXJyRe+4u6IvrtRXkCmSjzhN3qP9N7QnH8EZkVrylPRxRMmUOtCPlsxs/LN0lFgl61yoL2AsJuRNTyeryaL3QX2gb5Hyh83z+OdCZG6scIFa9B2GGvlp2elg55oo2hj+T0RJVgdMSeaPMGXxMFN6xh18JT8nT6YlBxM+wfiNku3OHZTeeBHj/9Rg/Ps07rYt9PYdMC5UbsdDx2v/ghQZKm+/CNgCf9jmMzCT+mpkTNqR0jFjHrrpWP6Taj/KsO7mmEJn3SizYyL5MRcm+e9Lpj3gF2xa/nc2AfC/jfR16sbPePQIxWE32ZVyPBX6DnxgDqwjGU4AguUka5xP414o2Nq2D/jb7AMDmQHrC0EXs6Z/1KYFlGygwvy0gd2tbHtqNo3lHilwaJSVxNWn8vEWH/aaT2zQZ83Qokpvj5OijZk63WJ071ol0g2Y4TKubQ5qj+VdQt8FTnlzw6Jdh8pVxNBkFgmLWEjK4byKzkksLGaxphjO8+ictNv9ussLk/g+UOHy0Vdixyex4/iV+TIW+HA+5XG8h7F0o34o7F6x77QK+rXRW4vKVtEyzm+1M6TpBVqSfofQOnwJ9OTPPyBfvfx416A3PZWWT+7/BfIgcBPyCQAA

View File

@ -1,6 +1,6 @@
#!/bin/bash
VERSION=15.2.0
VERSION=15.4.4
rm -rf charts/kube-prometheus-stack
curl -L -s -o - https://github.com/prometheus-community/helm-charts/releases/download/kube-prometheus-stack-${VERSION}/kube-prometheus-stack-${VERSION}.tgz | tar xfz - -C charts

View File

@ -0,0 +1,26 @@
apiVersion: v2
name: kubezero-mq
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
type: application
version: 0.2.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- nats
- rabbitmq
maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: nats
version: 0.8.3
#repository: https://nats-io.github.io/k8s/helm/charts/
condition: nats.enabled
- name: rabbitmq
version: 8.13.1
repository: https://charts.bitnami.com/bitnami
condition: rabbitmq.enabled
kubeVersion: ">= 1.18.0"

View File

@ -0,0 +1,58 @@
# kubezero-mq
![Version: 0.2.0](https://img.shields.io/badge/Version-0.2.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
**Homepage:** <https://kubezero.com>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Quarky9 | | |
## Requirements
Kubernetes: `>= 1.18.0`
| Repository | Name | Version |
|------------|------|---------|
| | nats | 0.8.3 |
| https://charts.bitnami.com/bitnami | rabbitmq | 8.13.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| nats.enabled | bool | `false` | |
| nats.exporter.serviceMonitor.enabled | bool | `false` | |
| nats.nats.advertise | bool | `false` | |
| nats.nats.image | string | `"nats:2.2.1-alpine3.13"` | |
| nats.nats.jetstream.enabled | bool | `true` | |
| nats.natsbox.enabled | bool | `false` | |
| rabbitmq.auth.erlangCookie | string | `"randomlongerlangcookie"` | |
| rabbitmq.auth.password | string | `"supersecret"` | |
| rabbitmq.auth.tls.enabled | bool | `false` | |
| rabbitmq.auth.tls.existingSecret | string | `"rabbitmq-server-certificate"` | |
| rabbitmq.auth.tls.existingSecretFullChain | bool | `true` | |
| rabbitmq.auth.tls.failIfNoPeerCert | bool | `false` | |
| rabbitmq.clustering.forceBoot | bool | `true` | |
| rabbitmq.enabled | bool | `false` | |
| rabbitmq.hosts | list | `[]` | hostnames of rabbitmq services, used for Istio and TLS |
| rabbitmq.istio.enabled | bool | `false` | |
| rabbitmq.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| rabbitmq.metrics.enabled | bool | `false` | |
| rabbitmq.metrics.serviceMonitor.enabled | bool | `false` | |
| rabbitmq.pdb.create | bool | `true` | |
| rabbitmq.podAntiAffinityPreset | string | `""` | |
| rabbitmq.replicaCount | int | `1` | |
| rabbitmq.resources.requests.cpu | string | `"100m"` | |
| rabbitmq.resources.requests.memory | string | `"256Mi"` | |
| rabbitmq.topologySpreadConstraints | string | `"- maxSkew: 1\n topologyKey: topology.kubernetes.io/zone\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels: {{- include \"common.labels.matchLabels\" . | nindent 6 }}\n- maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels: {{- include \"common.labels.matchLabels\" . | nindent 6 }}"` | |
## Resources
### NATS
- https://grafana.com/grafana/dashboards/13707

View File

@ -16,4 +16,6 @@
{{ template "chart.valuesSection" . }}
## Resources
### NATS
- https://grafana.com/grafana/dashboards/13707

View File

@ -0,0 +1,8 @@
configmap: grafana-dashboards-nats
condition: '.Values.nats.exporter.serviceMonitor.enabled'
gzip: true
# folder:
dashboards:
- name: nats
url: https://grafana.com/api/dashboards/13707/revisions/1/download
tags: ['NATS']

View File

@ -0,0 +1,8 @@
configmap: grafana-dashboards-rabbitmq
condition: '.Values.rabbitmq.metrics.enabled'
gzip: true
# folder:
dashboards:
- name: rabbitmq
url: https://grafana.com/api/dashboards/10991/revisions/11/download
tags: ['RabbitMQ']

View File

@ -0,0 +1,35 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: nats
namespace: argocd
spec:
project: default
source:
repoURL: 'https://github.com/zero-down-time/kubezero'
path: charts/kubezero-mq
targetRevision: master
helm:
values: |
nats:
enabled: true
nats:
jetstream:
memStorage:
enabled: true
size: 128Mi
fileStorage:
enabled: true
storageClassName: ebs-sc-gp3-xfs
exporter:
serviceMonitor:
enabled: true
destination:
server: 'https://kubernetes.default.svc'
namespace: nats
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,44 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: rabbitmq
namespace: argocd
spec:
project: default
source:
repoURL: 'https://github.com/zero-down-time/kubezero'
path: charts/kubezero-mq
targetRevision: master
helm:
values: |
rabbitmq:
enabled: true
replicaCount: 3
hosts:
- mq.example.com
auth:
password: blablabla
erlangCookie: changemeplease
tls:
enabled: true
# Some custom plugin to be installed at boot
communityPlugins: "https://github.com/rabbitmq/rabbitmq-delayed-message-exchange/releases/download/3.8.9/rabbitmq_delayed_message_exchange-3.8.9-0199d11c.ez"
extraPlugins: "rabbitmq_delayed_message_exchange"
# Enabled metrics
metrics:
enabled: true
serviceMonitor:
enabled: true
destination:
server: 'https://kubernetes.default.svc'
namespace: rabbitmq
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true

View File

@ -0,0 +1,13 @@
{{- if .Values.nats.exporter.serviceMonitor.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "grafana-dashboards-nats" | trunc 63 | trimSuffix "-" }}
namespace: {{ .Release.Namespace }}
labels:
grafana_dashboard: "1"
{{ include "kubezero-lib.labels" . | indent 4 }}
binaryData:
nats.json.gz:
H4sIAAAAAAAC/+2cW2/bNhSA3/MrBD2tgBNYci7b3lI3XQssF9TuXobAoCVaIkKRGknFTgP/95GUbFEWlWZFFystXwyLh+Q5pM7lCx3p8cDz/NkMkbwQ3P/d+1tee96j/pQSAjIoW/13k9nV+XRyePPp+vJi+uHi88QfbPpgMIdYdSp7MJpBkcKC1z1iyCOGcoEo0ZPBBSiw8Oqu3ofp9MbzLkicU0REPVI85Fp/DATgtGARrGU5LhJEPsZKnlu0lvKragWGXbrDWn7eDsrVM/hPgRi0rH+jPwdErnE7NdJKEwbytG7c7NUfzeZ7yHi18ErzwK5DTrcABFi0NJsNPU2BoWl0FBwFT6uzbSnq3EzS3kab3uBoeDTc3WA9Kykw1lcCCQw37uJNIJPDvXeAp3MKWKxn3XUYa09vQZlXcOgtkUi9eYGwOETEdCo97GKVUybkQOlX1CNAcI8uFihCAHspxJkXpYCJcjW+AEntBFqtv10FFw+42jh2V3VHGfxCiW6dM7rkkJUCGCMB5rq7YAXUbSmK4ZgSwShWOhYA81LApQEwHjPK5TfEGjI1adsrI4oxyDk0u2qBRa9uTyFKUqHMDE+G+cqIEeXXtQZTi5YDjAAfU0yZ6vS4HpjCOWB814Qy3mvXsrvMU8aWMsYos869QFglm6DRmDDtY6bp6vakMqZTiuPA8D+rXK9Q2cqSOfglDE4HXjgcyo+zgTc8Cs/e+F1Dw69MHe5MPToeeEEQVh9y7vCNbwxtbrBeVHOliF/BpW3HMEwgsWyCvIMJOefTjp1WPe4TuyAqGINE2IUZWHUIELHdOO3odGkfI6gAuGPUPcCFTs1qXOdWYUS2nVqCJYpFKoXhruROu/5to1ndzRtVhS5prP03ooTASMC46b45lP5NZMaAVj/VhYyBGBVKx0lbZg8dueExZFB7zAJT0dQpEwyC/FrmQenw0GI8lwEtW0+bjQJEd1ZtXMA8h/GfiNgXIQBLoGjmh90cUcbrKtcmq/w6uwfsyyzKi52gUd5LZCaWN/Q9iISOirDVpfTj95RlQCesx0euE/8Mxet1e0aZWRiKnqGawUWJCudtmdoGKTputK+Nq+Ymq7T/XiY1S+hr2SRFC2EXVqWvqmXjm8/N+ysoxQLl7SjO+CfIKS6qimiPLlVG7PHFZQmUkqEttGYbIIiKrMBAoHvYnZGkRxPpY1VasDhMzTIGA2nRCqwQb6/MSAqdWh/ACj7DCRdbp6lis32nN6zaTtlKSJO3QFfVoO1nOt1Zh5UJzyqyL25neU+uRM7AerwOM0YOLKtzMNEvmBg5mHAw8fpgIoPZvmDCqrqPMHEJM8oeXhtP9IMX5g8CckcL+6GFg52w0OOnlXc3jjG2Ln89ke6uYpXbz7jcAclPxDRnL8M0thz5DKixDXNUs3+qGe2fahCZdRSel0Gbbv3P4Jtg+GKA81YZ6X0kjm0c27iTEEcN34UafnXU4KjhlVJDxpN9QoNdfb+YQf/2fykNddzwjdzQ83rruMFxwz644cRxg+OG18gNtBD7PW54woA+njdcF8KBgztwcODgwOG7gMPpDwQOTP1aNkExdGDhwELV9X2eSHTr7+uRhEMLdybxU/yfxjRltEjSXPq7+z+Nnx2AQve/p/8Bb9oO0w++Ce1807qN3wdvjnvwIEu5abIAP4dwgv/lgZanTHgG5IQvxjjjDksd4fSVDIYOb9zJSb/B4diBgwOH1wcOvJhv32Jgqdtp5/HZi52cfMXCfj3l0mmrYwvHFo4tHFt8E1v85tjCscUrZAtMl+pYgBcZZHs7l/iaFb06mphIY72x1VhHEI4gfrwfX8YYyQS686Ds9nVmKmi2XuMvytDyCV0eBhsHk2FRtanB6+2wHEV3OodWg2WYq3o82yQd06X8E+NFccHQuBiZF0H9kL1/YnwPzIvR0JQYr9gLje9BXK70drMGafCMVn82mHZ1aTEnPjUnNrWEx+ZFXbr8s9i0d2NLvX0wy1Wsk6TePoy40LVm2wsQIkv2xuTObtW+6/f9VZvp8yiFGfhr+0a+QJdG4xV95XWzOPsJgUJn6mB0Njw7WP8LyPVbsRZSAAA=
{{- end }}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,35 @@
{{- if .Values.rabbitmq.istio.enabled }}
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: rabbit-amqp
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
gateways:
- {{ .Values.rabbitmq.istio.gateway }}
hosts:
{{ toYaml .Values.rabbitmq.hosts | indent 2 }}
tcp:
- match:
- port: 5672
route:
- destination:
host: rabbitmq-headless
port:
number: 5672
- match:
- port: 5671
route:
- destination:
host: rabbitmq-headless
port:
number: 5671
http:
- route:
- destination:
host: rabbitmq-headless
port:
number: 15672
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if .Values.rabbitmq.auth.tls.enabled }}
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: rabbitmq-server-certificate
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
secretName: rabbitmq-server-certificate
issuerRef:
name: letsencrypt-dns-prod
kind: ClusterIssuer
dnsNames:
{{ toYaml .Values.rabbitmq.hosts | indent 2 }}
{{- end }}

15
charts/kubezero-mq/update.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
set -ex
### NATS
# get latest chart until they have upstream repo fixed
rm -rf charts/nats && mkdir -p charts/nats
git clone --depth=1 https://github.com/nats-io/k8s.git
cp -r k8s/helm/charts/nats/* charts/nats/
rm -rf k8s
# Fetch dashboards
../kubezero-metrics/sync_grafana_dashboards.py dashboards-nats.yaml templates/nats/grafana-dashboards.yaml
../kubezero-metrics/sync_grafana_dashboards.py dashboards-rabbitmq.yaml templates/rabbitmq/grafana-dashboards.yaml

View File

@ -0,0 +1,69 @@
# nats
nats:
enabled: false
nats:
image: nats:2.2.1-alpine3.13
advertise: false
jetstream:
enabled: true
natsbox:
enabled: false
exporter:
serviceMonitor:
enabled: false
# rabbitmq
rabbitmq:
enabled: false
# rabbitmq.hosts -- hostnames of rabbitmq services, used for Istio and TLS
hosts: []
istio:
enabled: false
gateway: istio-ingress/private-ingressgateway
auth:
password: "supersecret"
erlangCookie: "randomlongerlangcookie"
tls:
enabled: false
failIfNoPeerCert: false
existingSecret: rabbitmq-server-certificate
existingSecretFullChain: true
clustering:
forceBoot: true
resources:
requests:
memory: 256Mi
cpu: 100m
replicaCount: 1
podAntiAffinityPreset: ""
topologySpreadConstraints: |-
- maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }}
pdb:
create: true
metrics:
enabled: false
serviceMonitor:
enabled: false

View File

@ -1,17 +0,0 @@
apiVersion: v2
name: kubezero-nats
description: KubeZero umbrella chart for NATS
type: application
version: 0.1.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- nats
maintainers:
- name: Quarky9
dependencies:
- name: nats
version: 0.8.3
#repository: https://nats-io.github.io/k8s/helm/charts/
kubeVersion: ">= 1.18.0"

View File

@ -1,24 +0,0 @@
# kubezero-nats
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for NATS
**Homepage:** <https://kubezero.com>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Quarky9 | | |
## Requirements
Kubernetes: `>= 1.18.0`
| Repository | Name | Version |
|------------|------|---------|
| | nats | 0.8.3 |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.5.0](https://github.com/norwoodj/helm-docs/releases/v1.5.0)

View File

@ -1,9 +0,0 @@
#!/bin/bash
set -ex
# get latest chart until they have upstream repo fixed
rm -rf charts/nats && mkdir -p charts/nats
git clone --depth=1 https://github.com/nats-io/k8s.git
cp -r k8s/helm/charts/nats/* charts/nats/
rm -rf k8s

Some files were not shown because too many files have changed in this diff Show More