From abddab3899e9c63a48af3a9c70fce247d599b2d2 Mon Sep 17 00:00:00 2001 From: Stefan Reimer Date: Fri, 14 Apr 2023 10:44:57 +0000 Subject: [PATCH] Latest Prometheus stack --- charts/kubezero-metrics/Chart.yaml | 10 +- charts/kubezero-metrics/README.md | 10 +- .../charts/kube-prometheus-stack/Chart.yaml | 10 +- .../charts/kube-prometheus-stack/README.md | 38 ++ .../charts/grafana/Chart.yaml | 4 +- .../charts/grafana/README.md | 56 +- .../charts/grafana/templates/NOTES.txt | 3 +- .../charts/grafana/templates/_helpers.tpl | 12 +- .../charts/grafana/templates/_pod.tpl | 21 +- .../charts/grafana/templates/clusterrole.yaml | 2 +- .../grafana/templates/clusterrolebinding.yaml | 2 +- .../charts/grafana/templates/configmap.yaml | 9 + .../grafana/templates/headless-service.yaml | 5 +- .../charts/grafana/templates/hpa.yaml | 7 +- .../templates/image-renderer-deployment.yaml | 6 + .../grafana/templates/image-renderer-hpa.yaml | 47 ++ .../image-renderer-network-policy.yaml | 14 +- .../image-renderer-servicemonitor.yaml | 48 ++ .../charts/grafana/templates/role.yaml | 2 +- .../grafana/templates/servicemonitor.yaml | 4 + .../charts/grafana/values.yaml | 47 +- .../charts/kube-state-metrics/Chart.yaml | 4 +- .../charts/kube-state-metrics/README.md | 31 +- .../kube-state-metrics/templates/NOTES.txt | 13 + .../kube-state-metrics/templates/_helpers.tpl | 55 ++ .../templates/deployment.yaml | 110 +++- .../templates/networkpolicy.yaml | 43 ++ .../templates/rbac-configmap.yaml | 15 + .../kube-state-metrics/templates/role.yaml | 10 + .../templates/serviceaccount.yaml | 2 +- .../templates/servicemonitor.yaml | 8 + .../charts/kube-state-metrics/values.yaml | 72 ++- .../prometheus-node-exporter/Chart.yaml | 2 +- .../charts/prometheus-node-exporter/README.md | 19 + .../templates/NOTES.txt | 14 + .../templates/_helpers.tpl | 59 ++- .../templates/clusterrole.yaml | 20 + .../templates/clusterrolebinding.yaml | 20 + .../templates/daemonset.yaml | 69 ++- .../templates/podmonitor.yaml | 91 ++++ .../templates/rbac-configmap.yaml | 15 + .../templates/serviceaccount.yaml | 4 +- .../templates/servicemonitor.yaml | 4 + .../templates/verticalpodautoscaler.yaml | 4 +- .../prometheus-node-exporter/values.yaml | 147 +++++- .../crds/crd-alertmanagerconfigs.yaml | 10 +- .../crds/crd-alertmanagers.yaml | 433 ++++++++++++--- .../crds/crd-podmonitors.yaml | 4 +- .../crds/crd-probes.yaml | 4 +- .../crds/crd-prometheuses.yaml | 496 ++++++++++++++---- .../crds/crd-prometheusrules.yaml | 5 +- .../crds/crd-servicemonitors.yaml | 4 +- .../crds/crd-thanosrulers.yaml | 398 +++++++++++--- .../templates/_helpers.tpl | 27 +- .../templates/alertmanager/alertmanager.yaml | 4 + .../templates/alertmanager/secret.yaml | 6 +- .../alertmanager/servicemonitor.yaml | 4 + .../exporters/core-dns/servicemonitor.yaml | 1 + .../kube-api-server/servicemonitor.yaml | 3 +- .../servicemonitor.yaml | 1 + .../exporters/kube-dns/servicemonitor.yaml | 1 + .../exporters/kube-etcd/servicemonitor.yaml | 1 + .../exporters/kube-proxy/servicemonitor.yaml | 1 + .../kube-scheduler/servicemonitor.yaml | 1 + .../exporters/kubelet/servicemonitor.yaml | 1 + .../grafana/configmaps-datasources.yaml | 2 +- .../mutatingWebhookConfiguration.yaml | 6 +- .../validatingWebhookConfiguration.yaml | 6 +- .../prometheus-operator/clusterrole.yaml | 11 + .../prometheus-operator/deployment.yaml | 16 +- .../prometheus-operator/networkpolicy.yaml | 1 + .../prometheus-operator/servicemonitor.yaml | 1 + .../templates/prometheus/csi-secret.yaml | 4 +- .../templates/prometheus/networkpolicy.yaml | 32 ++ .../templates/prometheus/podmonitors.yaml | 1 + .../templates/prometheus/prometheus.yaml | 10 + .../templates/prometheus/servicemonitor.yaml | 4 + .../servicemonitorThanosSidecar.yaml | 4 + .../templates/prometheus/servicemonitors.yaml | 1 + .../templates/thanos-ruler/extrasecret.yaml | 4 +- .../templates/thanos-ruler/ingress.yaml | 4 +- .../thanos-ruler/podDisruptionBudget.yaml | 6 +- .../templates/thanos-ruler/ruler.yaml | 12 +- .../templates/thanos-ruler/service.yaml | 8 +- .../thanos-ruler/serviceaccount.yaml | 6 +- .../thanos-ruler/servicemonitor.yaml | 10 +- .../charts/kube-prometheus-stack/values.yaml | 357 ++++++++++++- charts/kubezero-metrics/jsonnet/build.sh | 4 +- .../dashboards/k8s-resources-cluster.json | 16 +- .../dashboards/k8s-resources-namespace.json | 16 +- .../jsonnet/dashboards/k8s-resources-pod.json | 20 +- .../jsonnet/dashboards/kubelet.json | 2 +- .../kubezero-metrics/jsonnet/jsonnetfile.json | 2 +- .../jsonnet/jsonnetfile.lock.json | 70 +-- .../jsonnet/rules/alertmanager-prometheusRule | 2 +- .../jsonnet/rules/etcd-mixin-prometheusRule | 32 +- .../rules/kube-state-metrics-prometheusRule | 2 +- .../jsonnet/rules/kubernetes-prometheusRule | 36 +- .../rules/node-exporter-prometheusRule | 30 +- .../rules/prometheus-operator-prometheusRule | 2 +- .../jsonnet/rules/prometheus-prometheusRule | 15 +- .../templates/grafana-dashboards-k8s.yaml | 8 +- .../templates/rules/etcd-mixin.yaml | 30 +- .../templates/rules/kubernetes.yaml | 42 +- .../templates/rules/node-exporter.yaml | 30 +- .../templates/rules/prometheus.yaml | 11 + charts/kubezero-metrics/zdt.patch | 2 +- charts/kubezero/README.md | 2 +- charts/kubezero/values.yaml | 2 +- 109 files changed, 2908 insertions(+), 567 deletions(-) create mode 100644 charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-hpa.yaml create mode 100644 charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-servicemonitor.yaml create mode 100644 charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/networkpolicy.yaml create mode 100644 charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/rbac-configmap.yaml create mode 100644 charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/clusterrole.yaml create mode 100644 charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml create mode 100644 charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/podmonitor.yaml create mode 100644 charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/rbac-configmap.yaml create mode 100644 charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/networkpolicy.yaml diff --git a/charts/kubezero-metrics/Chart.yaml b/charts/kubezero-metrics/Chart.yaml index 7d1bb5f3..f9f7eb55 100644 --- a/charts/kubezero-metrics/Chart.yaml +++ b/charts/kubezero-metrics/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: kubezero-metrics description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations. type: application -version: 0.8.9 +version: 0.9.0 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: @@ -19,16 +19,16 @@ dependencies: repository: https://cdn.zero-downtime.net/charts/ # https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack - name: kube-prometheus-stack - version: 43.2.0 + version: 45.9.1 # Switch back to upstream once all alerts are fixed eg. etcd gpcr # repository: https://prometheus-community.github.io/helm-charts - name: prometheus-adapter - version: 3.5.0 + version: 4.1.1 repository: https://prometheus-community.github.io/helm-charts condition: prometheus-adapter.enabled - name: prometheus-pushgateway - version: 2.0.2 + version: 2.1.3 # Switch back to upstream once namespaces are supported repository: https://prometheus-community.github.io/helm-charts condition: prometheus-pushgateway.enabled -kubeVersion: ">= 1.24.0" +kubeVersion: ">= 1.25.0" diff --git a/charts/kubezero-metrics/README.md b/charts/kubezero-metrics/README.md index 82ea0428..0f880cd3 100644 --- a/charts/kubezero-metrics/README.md +++ b/charts/kubezero-metrics/README.md @@ -1,6 +1,6 @@ # kubezero-metrics -![Version: 0.8.9](https://img.shields.io/badge/Version-0.8.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) +![Version: 0.9.0](https://img.shields.io/badge/Version-0.9.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations. @@ -14,14 +14,14 @@ KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all ## Requirements -Kubernetes: `>= 1.24.0` +Kubernetes: `>= 1.25.0` | Repository | Name | Version | |------------|------|---------| -| | kube-prometheus-stack | 43.2.0 | +| | kube-prometheus-stack | 45.9.1 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | -| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 3.5.0 | -| https://prometheus-community.github.io/helm-charts | prometheus-pushgateway | 2.0.2 | +| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 4.1.1 | +| https://prometheus-community.github.io/helm-charts | prometheus-pushgateway | 2.1.3 | ## Values diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/Chart.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/Chart.yaml index 8c4575f5..ecec7b99 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/Chart.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/Chart.yaml @@ -7,20 +7,20 @@ annotations: url: https://github.com/prometheus-operator/kube-prometheus artifacthub.io/operator: "true" apiVersion: v2 -appVersion: 0.61.1 +appVersion: v0.63.0 dependencies: - condition: kubeStateMetrics.enabled name: kube-state-metrics repository: https://prometheus-community.github.io/helm-charts - version: 4.24.* + version: 5.0.* - condition: nodeExporter.enabled name: prometheus-node-exporter repository: https://prometheus-community.github.io/helm-charts - version: 4.8.* + version: 4.14.* - condition: grafana.enabled name: grafana repository: https://grafana.github.io/helm-charts - version: 6.48.* + version: 6.51.* description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus @@ -52,4 +52,4 @@ sources: - https://github.com/prometheus-community/helm-charts - https://github.com/prometheus-operator/kube-prometheus type: application -version: 43.2.0 +version: 45.9.1 diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/README.md b/charts/kubezero-metrics/charts/kube-prometheus-stack/README.md index e32bba5c..ac4b0857 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/README.md +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/README.md @@ -80,6 +80,44 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. +### From 44.x to 45.x + +This version upgrades Prometheus-Operator to v0.63.0, Prometheus to v2.43.0 and Thanos to v0.30.2. + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 43.x to 44.x + +This version upgrades Prometheus-Operator to v0.62.0, Prometheus to v2.41.0 and Thanos to v0.30.1. + +Run these commands to update the CRDs before applying the upgrade. + +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +If you have explicitly set `prometheusOperator.admissionWebhooks.failurePolicy`, this value is now always used even when `.prometheusOperator.admissionWebhooks.patch.enabled` is `true` (the default). + +The values for `prometheusOperator.image.tag` & `prometheusOperator.prometheusConfigReloader.image.tag` are now empty by default and the Chart.yaml `appVersion` field is used instead. + ### From 42.x to 43.x This version upgrades Prometheus-Operator to v0.61.1, Prometheus to v2.40.5 and Thanos to v0.29.0. diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/Chart.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/Chart.yaml index 849916ad..be7ac448 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/Chart.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 9.3.1 +appVersion: 9.3.8 description: The leading tool for querying and visualizing time series and metrics. home: https://grafana.net icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png @@ -19,4 +19,4 @@ name: grafana sources: - https://github.com/grafana/grafana type: application -version: 6.48.0 +version: 6.51.5 diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/README.md b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/README.md index d5210caa..cd8d3168 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/README.md +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/README.md @@ -146,7 +146,7 @@ This version requires Helm >= 3.1.0. | `podPortName` | Name of the grafana port on the pod | `grafana` | | `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` | | `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` | -| `sidecar.image.tag` | Sidecar image tag | `1.19.2` | +| `sidecar.image.tag` | Sidecar image tag | `1.22.0` | | `sidecar.image.sha` | Sidecar image sha (optional) | `""` | | `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | | `sidecar.resources` | Sidecar resources | `{}` | @@ -220,7 +220,8 @@ This version requires Helm >= 3.1.0. | `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` | | `rbac.extraRoleRules` | Additional rules to add to the Role | [] | | `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | -| `command` | Define command to be executed by grafana container at startup | `nil` | +| `command` | Define command to be executed by grafana container at startup | `nil` | +| `args` | Define additional args if command is used | `nil` | | `testFramework.enabled` | Whether to create test-related resources | `true` | | `testFramework.image` | `test-framework` image repository. | `bats/bats` | | `testFramework.tag` | `test-framework` image tag. | `v1.4.1` | @@ -276,11 +277,10 @@ This version requires Helm >= 3.1.0. | `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` | | `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` | - - ### Example ingress with path With grafana 6.3 and above + ```yaml grafana.ini: server: @@ -491,6 +491,51 @@ delete_notifiers: # default org_id: 1 ``` +## Provision alert rules, contact points, notification policies and notification templates + +There are two methods to provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +alerting: + team1-alert-rules.yaml: + file: alerting/team1/rules.yaml + team2-alert-rules.yaml: + file: alerting/team2/rules.yaml + team3-alert-rules.yaml: + file: alerting/team3/rules.yaml + notification-policies.yaml: + file: alerting/shared/notification-policies.yaml + notification-templates.yaml: + file: alerting/shared/notification-templates.yaml + contactpoints.yaml: + apiVersion: 1 + contactPoints: + - orgId: 1 + name: Slack channel + receivers: + - uid: default-receiver + type: slack + settings: + # Webhook URL to be filled in + url: "" + # We need to escape double curly braces for the tpl function. + text: '{{ `{{ template "default.message" . }}` }}' + title: '{{ `{{ template "default.title" . }}` }}' +``` + +There are two possibilities: + +* Inlining the file contents as described in the example `values.yaml` and the official [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/). +* Importing a file using a relative path starting from the chart root directory. + +### Important notes on file provisioning + +* The chart supports importing YAML and JSON files. +* The filename must be unique, otherwise one volume mount will overwrite the other. +* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped. +* The number of total files under `alerting:` is not limited. Each file will end up as a volume mount in the corresponding provisioning folder of the deployed Grafana instance. +* The file size for each import is limited by what the function `.Files.Get` can handle, which suffices for most cases. + ## How to serve Grafana with a path prefix (/grafana) In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. @@ -598,6 +643,9 @@ grafana.ini: unified_alerting: enabled: true ha_peers: {{ Name }}-headless:9094 + ha_listen_address: ${POD_IP}:9094 + ha_advertise_address: ${POD_IP}:9094 + alerting: enabled: false ``` diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/NOTES.txt b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/NOTES.txt index f399f43f..d86419fe 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/NOTES.txt +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/NOTES.txt @@ -1,6 +1,7 @@ 1. Get your '{{ .Values.adminUser }}' user password by running: - kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo + kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} -o jsonpath="{.data.{{ .Values.admin.passwordKey | default "admin-password" }}}" | base64 --decode ; echo + 2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/_helpers.tpl b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/_helpers.tpl index cb41fcc3..8307e10a 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/_helpers.tpl +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/_helpers.tpl @@ -68,7 +68,7 @@ Common labels helm.sh/chart: {{ include "grafana.chart" . }} {{ include "grafana.selectorLabels" . }} {{- if or .Chart.AppVersion .Values.image.tag }} -app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- with .Values.extraLabels }} @@ -91,7 +91,7 @@ Common labels helm.sh/chart: {{ include "grafana.chart" . }} {{ include "grafana.imageRenderer.selectorLabels" . }} {{- if or .Chart.AppVersion .Values.image.tag }} -app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} @@ -145,10 +145,12 @@ Return the appropriate apiVersion for ingress. Return the appropriate apiVersion for Horizontal Pod Autoscaler. */}} {{- define "grafana.hpa.apiVersion" -}} -{{- if semverCompare "<1.23-0" .Capabilities.KubeVersion.Version }} -{{- print "autoscaling/v2beta1" }} -{{- else }} +{{- if $.Capabilities.APIVersions.Has "autoscaling/v2/HorizontalPodAutoscaler" }} {{- print "autoscaling/v2" }} +{{- else if $.Capabilities.APIVersions.Has "autoscaling/v2beta2/HorizontalPodAutoscaler" }} +{{- print "autoscaling/v2beta2" }} +{{- else }} +{{- print "autoscaling/v2beta1" }} {{- end }} {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/_pod.tpl b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/_pod.tpl index 95ec1f8f..68a9b4b9 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/_pod.tpl +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/_pod.tpl @@ -763,7 +763,13 @@ containers: {{- range .Values.command }} - {{ . | quote }} {{- end }} - {{- end}} + {{- end }} + {{- if .Values.args }} + args: + {{- range .Values.args }} + - {{ . | quote }} + {{- end }} + {{- end }} {{- with .Values.containerSecurityContext }} securityContext: {{- toYaml . | nindent 6 }} @@ -878,7 +884,17 @@ containers: - name: {{ .Values.podPortName }} containerPort: {{ .Values.service.targetPort }} protocol: TCP + - name: {{ .Values.gossipPortName }}-tcp + containerPort: 9094 + protocol: TCP + - name: {{ .Values.gossipPortName }}-udp + containerPort: 9094 + protocol: UDP env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - name: GF_SECURITY_ADMIN_USER valueFrom: @@ -1128,8 +1144,7 @@ volumes: path: {{ .hostPath }} {{- else if .csi }} csi: - data: - {{- toYaml .data | nindent 8 }} + {{- toYaml .data | nindent 6 }} {{- else }} emptyDir: {} {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/clusterrole.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/clusterrole.yaml index 3396713a..c4ca270a 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/clusterrole.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/clusterrole.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.rbac.create (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }} +{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingRole) }} kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/clusterrolebinding.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/clusterrolebinding.yaml index 48411fe8..b848e8c1 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/clusterrolebinding.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/clusterrolebinding.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} +{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }} kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/configmap.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/configmap.yaml index b0735a2b..00ab74e1 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/configmap.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/configmap.yaml @@ -1,4 +1,5 @@ {{- if .Values.createConfigmap }} +{{- $files := .Files }} {{- $root := . -}} apiVersion: v1 kind: ConfigMap @@ -53,9 +54,14 @@ data: {{- end }} {{- range $key, $value := .Values.alerting }} + {{- if (hasKey $value "file") }} + {{- $key | nindent 2 }}: + {{- toYaml ( $files.Get $value.file ) | nindent 4}} + {{- else }} {{- $key | nindent 2 }}: | {{- tpl (toYaml $value | nindent 4) $root }} {{- end }} + {{- end }} {{- range $key, $value := .Values.dashboardProviders }} {{- $key | nindent 2 }}: | @@ -87,6 +93,9 @@ data: {{- end }} {{- if $value.bearerToken }} -H "Authorization: Bearer {{ $value.bearerToken }}" \ + {{- end }} + {{- if $value.basic }} + -H "Basic: {{ $value.basic }}" \ {{- end }} {{- if $value.gitlabToken }} -H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \ diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/headless-service.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/headless-service.yaml index caaed5d5..3028589d 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/headless-service.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/headless-service.yaml @@ -17,7 +17,6 @@ spec: {{- include "grafana.selectorLabels" . | nindent 4 }} type: ClusterIP ports: - - protocol: TCP - port: 3000 - targetPort: {{ .Values.service.targetPort }} + - name: {{ .Values.gossipPortName }}-tcp + port: 9094 {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/hpa.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/hpa.yaml index f53dfc83..46bbcb49 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/hpa.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/hpa.yaml @@ -26,7 +26,7 @@ spec: - type: Resource resource: name: memory - {{- if semverCompare "<1.23-0" .Capabilities.KubeVersion.Version }} + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} targetAverageUtilization: {{ .Values.autoscaling.targetMemory }} {{- else }} target: @@ -38,7 +38,7 @@ spec: - type: Resource resource: name: cpu - {{- if semverCompare "<1.23-0" .Capabilities.KubeVersion.Version }} + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} targetAverageUtilization: {{ .Values.autoscaling.targetCPU }} {{- else }} target: @@ -46,4 +46,7 @@ spec: averageUtilization: {{ .Values.autoscaling.targetCPU }} {{- end }} {{- end }} + {{- if .Values.autoscaling.behavior }} + behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }} + {{- end }} {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-deployment.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-deployment.yaml index b087179f..0c3d30c5 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-deployment.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-deployment.yaml @@ -15,7 +15,9 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: + {{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }} replicas: {{ .Values.imageRenderer.replicas }} + {{- end }} revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} selector: matchLabels: @@ -86,6 +88,10 @@ spec: env: - name: HTTP_PORT value: {{ .Values.imageRenderer.service.targetPort | quote }} + {{- if .Values.imageRenderer.serviceMonitor.enabled }} + - name: ENABLE_METRICS + value: "true" + {{- end }} {{- range $key, $value := .Values.imageRenderer.env }} - name: {{ $key | quote }} value: {{ $value | quote }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-hpa.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-hpa.yaml new file mode 100644 index 00000000..b0f0059b --- /dev/null +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-hpa.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }} +apiVersion: {{ include "grafana.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer + helm.sh/chart: {{ include "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "grafana.fullname" . }}-image-renderer + minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }} + maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }} + metrics: + {{- if .Values.imageRenderer.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.behavior }} + behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-network-policy.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-network-policy.yaml index fb694451..d1a0eb31 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-network-policy.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-network-policy.yaml @@ -24,13 +24,16 @@ spec: from: - namespaceSelector: matchLabels: - name: {{ include "grafana.namespace" . }} - - podSelector: + kubernetes.io/metadata.name: {{ include "grafana.namespace" . }} + podSelector: matchLabels: {{- include "grafana.selectorLabels" . | nindent 14 }} {{- with .Values.podLabels }} {{- toYaml . | nindent 14 }} {{- end }} + {{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}} + {{ toYaml . | nindent 8 }} + {{- end }} {{- end }} {{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }} @@ -61,10 +64,13 @@ spec: protocol: TCP # talk only to grafana - ports: - - port: {{ .Values.service.port }} + - port: {{ .Values.service.targetPort }} protocol: TCP to: - - podSelector: + - namespaceSelector: + matchLabels: + name: {{ include "grafana.namespace" . }} + podSelector: matchLabels: {{- include "grafana.selectorLabels" . | nindent 14 }} {{- with .Values.podLabels }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-servicemonitor.yaml new file mode 100644 index 00000000..5d9f09d2 --- /dev/null +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if .Values.imageRenderer.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + {{- if .Values.imageRenderer.serviceMonitor.namespace }} + namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }} + {{- else }} + namespace: {{ include "grafana.namespace" . }} + {{- end }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.imageRenderer.service.portName }} + {{- with .Values.imageRenderer.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.imageRenderer.serviceMonitor.path }} + scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }} + {{- with .Values.imageRenderer.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}-image-renderer" + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ include "grafana.namespace" . }} + {{- with .Values.imageRenderer.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/role.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/role.yaml index ffdb16f6..df8ac9a7 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/role.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/role.yaml @@ -12,7 +12,7 @@ metadata: {{- end }} {{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }} rules: - {{- if .Values.rbac.pspEnabled }} + {{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} - apiGroups: ['extensions'] resources: ['podsecuritypolicies'] verbs: ['use'] diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/servicemonitor.yaml index 6575fb9a..a4e9f00b 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/templates/servicemonitor.yaml @@ -41,4 +41,8 @@ spec: namespaceSelector: matchNames: - {{ include "grafana.namespace" . }} + {{- with .Values.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/values.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/values.yaml index bc1834fb..b38d2204 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/values.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/grafana/values.yaml @@ -17,8 +17,8 @@ rbac: create: true ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) # useExistingRole: name-of-some-(cluster)role - pspEnabled: true - pspUseAppArmor: true + pspEnabled: false + pspUseAppArmor: false namespaced: false extraRoleRules: [] # - apiGroups: [] @@ -52,6 +52,7 @@ autoscaling: maxReplicas: 5 targetCPU: "60" targetMemory: "" + behavior: {} ## See `kubectl explain poddisruptionbudget.spec` for more ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ @@ -159,7 +160,7 @@ downloadDashboards: # podLabels: {} podPortName: grafana - +gossipPortName: gossip ## Deployment annotations # annotations: {} @@ -193,6 +194,7 @@ serviceMonitor: tlsConfig: {} scrapeTimeout: 30s relabelings: [] + targetLabels: [] extraExposePorts: [] # - name: keycloak @@ -382,6 +384,14 @@ admin: # - "sh" # - "/run.sh" +## Optionally define args if command is used +## Needed if using `hashicorp/envconsul` to manage secrets +## By default no arguments are set +# args: +# - "-secret" +# - "secret/grafana" +# - "./grafana" + ## Extra environment variables that will be pass onto deployment pods ## ## to provide grafana with access to CloudWatch on AWS EKS: @@ -663,6 +673,9 @@ dashboards: {} # local-dashboard-bitbucket: # url: https://example.com/repository/test-bitbucket.json # bearerToken: '' + # local-dashboard-azure: + # url: https://example.com/repository/test-azure.json + # basic: '' ## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. ## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. @@ -754,7 +767,7 @@ smtp: sidecar: image: repository: quay.io/kiwigrid/k8s-sidecar - tag: 1.21.0 + tag: 1.22.0 sha: "" imagePullPolicy: IfNotPresent resources: {} @@ -1008,6 +1021,13 @@ imageRenderer: # Enable the image-renderer deployment & service enabled: false replicas: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} image: # image-renderer Image repository repository: grafana/grafana-image-renderer @@ -1047,6 +1067,23 @@ imageRenderer: targetPort: 8081 # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" appProtocol: "" + serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels + targetLabels: [] + # - targetLabel1 + # - targetLabel2 # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana grafanaProtocol: http # In case a sub_path is used this needs to be added to the image renderer callback @@ -1060,6 +1097,8 @@ imageRenderer: limitIngress: true # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods limitEgress: false + # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) + extraIngressSelectors: [] resources: {} # limits: # cpu: 100m diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/Chart.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/Chart.yaml index 29d6acc3..0691f093 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/Chart.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 2.7.0 +appVersion: 2.8.2 description: Install kube-state-metrics to generate and expose cluster-level metrics home: https://github.com/kubernetes/kube-state-metrics/ keywords: @@ -18,4 +18,4 @@ name: kube-state-metrics sources: - https://github.com/kubernetes/kube-state-metrics/ type: application -version: 4.24.0 +version: 5.0.1 diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/README.md b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/README.md index 7c2e1691..843be89e 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/README.md +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/README.md @@ -2,14 +2,15 @@ Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics). -## Get Repo Info - +## Get Repository Info + ```console helm repo add prometheus-community https://prometheus-community.github.io/helm-charts helm repo update ``` _See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + ## Install Chart @@ -43,20 +44,19 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen You can upgrade in-place: -1. [get repo info](#get-repo-info) -1. [upgrade](#upgrading-chart) your existing release name using the new chart repo - +1. [get repository info](#get-repository-info) +1. [upgrade](#upgrading-chart) your existing release name using the new chart repository ## Upgrading to v3.0.0 v3.0.0 includes kube-state-metrics v2.0, see the [changelog](https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/CHANGELOG.md) for major changes on the application-side. The upgraded chart now the following changes: + * Dropped support for helm v2 (helm v3 or later is required) * collectors key was renamed to resources * namespace key was renamed to namespaces - ## Configuration See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: @@ -65,4 +65,21 @@ See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_h helm show values prometheus-community/kube-state-metrics ``` -You may also run `helm show values` on this chart's [dependencies](#dependencies) for additional options. +### kube-rbac-proxy + +You can enable `kube-state-metrics` endpoint protection using `kube-rbac-proxy`. By setting `kubeRBACProxy.enabled: true`, this chart will deploy one RBAC proxy container per endpoint (metrics & telemetry). +To authorize access, authenticate your requests (via a `ServiceAccount` for example) with a `ClusterRole` attached such as: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-state-metrics-read +rules: + - apiGroups: [ "" ] + resources: ["services/kube-state-metrics"] + verbs: + - get +``` + +See [kube-rbac-proxy examples](https://github.com/brancz/kube-rbac-proxy/tree/master/examples/resource-attributes) for more details. diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/NOTES.txt b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/NOTES.txt index 5a646e0c..3589c24e 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/NOTES.txt +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/NOTES.txt @@ -8,3 +8,16 @@ In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-s They are served either as plaintext or protobuf depending on the Accept header. They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. +{{- if .Values.kubeRBACProxy.enabled}} + +kube-rbac-proxy endpoint protections is enabled: +- Metrics endpoints are now HTTPS +- Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions: +``` +rules: + - apiGroups: [ "" ] + resources: ["services/{{ template "kube-state-metrics.fullname" . }}"] + verbs: + - get +``` +{{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/_helpers.tpl b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/_helpers.tpl index 0d193fbc..a4358c87 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/_helpers.tpl +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/_helpers.tpl @@ -77,9 +77,13 @@ release: {{ .Release.Name }} Selector labels */}} {{- define "kube-state-metrics.selectorLabels" }} +{{- if .Values.selectorOverride }} +{{ toYaml .Values.selectorOverride }} +{{- else }} app.kubernetes.io/name: {{ include "kube-state-metrics.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} +{{- end }} {{/* Sets default scrape limits for servicemonitor */}} {{- define "servicemonitor.scrapeLimits" -}} @@ -99,3 +103,54 @@ labelNameLengthLimit: {{ . }} labelValueLengthLimit: {{ . }} {{- end }} {{- end -}} + +{{/* +Formats imagePullSecrets. Input is (dict "Values" .Values "imagePullSecrets" .{specific imagePullSecrets}) +*/}} +{{- define "kube-state-metrics.imagePullSecrets" -}} +{{- range (concat .Values.global.imagePullSecrets .imagePullSecrets) }} + {{- if eq (typeOf .) "map[string]interface {}" }} +- {{ toYaml . | trim }} + {{- else }} +- name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} + +{{/* +The image to use for kube-state-metrics +*/}} +{{- define "kube-state-metrics.image" -}} +{{- if .Values.image.sha }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }} +{{- else }} +{{- printf "%s/%s:%s@%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }} +{{- end }} +{{- else }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- else }} +{{- printf "%s/%s:%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +The image to use for kubeRBACProxy +*/}} +{{- define "kubeRBACProxy.image" -}} +{{- if .Values.kubeRBACProxy.image.sha }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }} +{{- else }} +{{- printf "%s/%s:%s@%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }} +{{- end }} +{{- else }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }} +{{- else }} +{{- printf "%s/%s:%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/deployment.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/deployment.yaml index e529d3fc..cb519a0c 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/deployment.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/deployment.yaml @@ -40,6 +40,8 @@ spec: priorityClassName: {{ .Values.priorityClassName }} {{- end }} containers: + {{- $httpPort := ternary 9090 (.Values.service.port | default 8080) .Values.kubeRBACProxy.enabled}} + {{- $telemetryPort := ternary 9091 (.Values.selfMonitor.telemetryPort | default 8081) .Values.kubeRBACProxy.enabled}} - name: {{ template "kube-state-metrics.name" . }} {{- if .Values.autosharding.enabled }} env: @@ -56,9 +58,7 @@ spec: {{- if .Values.extraArgs }} {{- .Values.extraArgs | toYaml | nindent 8 }} {{- end }} - {{- if .Values.service.port }} - - --port={{ .Values.service.port | default 8080}} - {{- end }} + - --port={{ $httpPort }} {{- if .Values.collectors }} - --resources={{ .Values.collectors | join "," }} {{- end }} @@ -96,11 +96,16 @@ spec: {{- if .Values.kubeconfig.enabled }} - --kubeconfig=/opt/k8s/.kube/config {{- end }} + {{- if .Values.kubeRBACProxy.enabled }} + - --telemetry-host=127.0.0.1 + - --telemetry-port={{ $telemetryPort }} + {{- else }} {{- if .Values.selfMonitor.telemetryHost }} - --telemetry-host={{ .Values.selfMonitor.telemetryHost }} {{- end }} {{- if .Values.selfMonitor.telemetryPort }} - - --telemetry-port={{ .Values.selfMonitor.telemetryPort | default 8081 }} + - --telemetry-port={{ $telemetryPort }} + {{- end }} {{- end }} {{- if or (.Values.kubeconfig.enabled) (.Values.volumeMounts) }} volumeMounts: @@ -114,28 +119,26 @@ spec: {{- end }} {{- end }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if .Values.image.sha }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}" - {{- else }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - {{- end }} + image: {{ include "kube-state-metrics.image" . }} + {{- if eq .Values.kubeRBACProxy.enabled false }} ports: - containerPort: {{ .Values.service.port | default 8080}} name: "http" {{- if .Values.selfMonitor.enabled }} - - containerPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + - containerPort: {{ $telemetryPort }} name: "metrics" {{- end }} + {{- end }} livenessProbe: httpGet: path: /healthz - port: {{ .Values.service.port | default 8080}} + port: {{ $httpPort }} initialDelaySeconds: 5 timeoutSeconds: 5 readinessProbe: httpGet: path: / - port: {{ .Values.service.port | default 8080}} + port: {{ $httpPort }} initialDelaySeconds: 5 timeoutSeconds: 5 {{- if .Values.resources }} @@ -146,9 +149,81 @@ spec: securityContext: {{ toYaml .Values.containerSecurityContext | indent 10 }} {{- end }} -{{- if .Values.imagePullSecrets }} + {{- if .Values.kubeRBACProxy.enabled }} + - name: kube-rbac-proxy-http + args: + {{- if .Values.kubeRBACProxy.extraArgs }} + {{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 8 }} + {{- end }} + - --secure-listen-address=:{{ .Values.service.port | default 8080}} + - --upstream=http://127.0.0.1:{{ $httpPort }}/ + - --proxy-endpoints-port=8888 + - --config-file=/etc/kube-rbac-proxy-config/config-file.yaml + volumeMounts: + - name: kube-rbac-proxy-config + mountPath: /etc/kube-rbac-proxy-config + imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }} + image: {{ include "kubeRBACProxy.image" . }} + ports: + - containerPort: {{ .Values.service.port | default 8080}} + name: "http" + - containerPort: 8888 + name: "http-healthz" + readinessProbe: + httpGet: + scheme: HTTPS + port: 8888 + path: healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.kubeRBACProxy.resources }} + resources: +{{ toYaml .Values.kubeRBACProxy.resources | indent 10 }} +{{- end }} +{{- if .Values.kubeRBACProxy.containerSecurityContext }} + securityContext: +{{ toYaml .Values.kubeRBACProxy.containerSecurityContext | indent 10 }} +{{- end }} + {{- if .Values.selfMonitor.enabled }} + - name: kube-rbac-proxy-telemetry + args: + {{- if .Values.kubeRBACProxy.extraArgs }} + {{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 8 }} + {{- end }} + - --secure-listen-address=:{{ .Values.selfMonitor.telemetryPort | default 8081 }} + - --upstream=http://127.0.0.1:{{ $telemetryPort }}/ + - --proxy-endpoints-port=8889 + - --config-file=/etc/kube-rbac-proxy-config/config-file.yaml + volumeMounts: + - name: kube-rbac-proxy-config + mountPath: /etc/kube-rbac-proxy-config + imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }} + image: {{ include "kubeRBACProxy.image" . }} + ports: + - containerPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + name: "metrics" + - containerPort: 8889 + name: "metrics-healthz" + readinessProbe: + httpGet: + scheme: HTTPS + port: 8889 + path: healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.kubeRBACProxy.resources }} + resources: +{{ toYaml .Values.kubeRBACProxy.resources | indent 10 }} +{{- end }} +{{- if .Values.kubeRBACProxy.containerSecurityContext }} + securityContext: +{{ toYaml .Values.kubeRBACProxy.containerSecurityContext | indent 10 }} +{{- end }} + {{- end }} + {{- end }} +{{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.imagePullSecrets) | indent 8 }} {{- end }} {{- if .Values.affinity }} affinity: @@ -166,13 +241,18 @@ spec: topologySpreadConstraints: {{ toYaml .Values.topologySpreadConstraints | indent 8 }} {{- end }} - {{- if or (.Values.kubeconfig.enabled) (.Values.volumes) }} + {{- if or (.Values.kubeconfig.enabled) (.Values.volumes) (.Values.kubeRBACProxy.enabled) }} volumes: {{- if .Values.kubeconfig.enabled}} - name: kubeconfig secret: secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig {{- end }} + {{- if .Values.kubeRBACProxy.enabled}} + - name: kube-rbac-proxy-config + configMap: + name: {{ template "kube-state-metrics.fullname" . }}-rbac-config + {{- end }} {{- if .Values.volumes }} {{ toYaml .Values.volumes | indent 8 }} {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/networkpolicy.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/networkpolicy.yaml new file mode 100644 index 00000000..40b4cbc2 --- /dev/null +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/networkpolicy.yaml @@ -0,0 +1,43 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + {{- if .Values.annotations }} + annotations: + {{ toYaml .Values.annotations | nindent 4 }} + {{- end }} +spec: + {{- if .Values.networkPolicy.egress }} + ## Deny all egress by default + egress: + {{- toYaml .Values.networkPolicy.egress | nindent 4 }} + {{- end }} + ingress: + {{- if .Values.networkPolicy.ingress }} + {{- toYaml .Values.networkPolicy.ingress | nindent 4 }} + {{- else }} + ## Allow ingress on default ports by default + - ports: + - port: {{ .Values.service.port | default 8080 }} + protocol: TCP + {{- if .Values.selfMonitor.enabled }} + {{- $telemetryPort := ternary 9091 (.Values.selfMonitor.telemetryPort | default 8081) .Values.kubeRBACProxy.enabled}} + - port: {{ $telemetryPort }} + protocol: TCP + {{- end }} + {{- end }} + podSelector: + {{- if .Values.networkPolicy.podSelector }} + {{- toYaml .Values.networkPolicy.podSelector | nindent 4 }} + {{- else }} + matchLabels: + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + {{- end }} + policyTypes: + - Ingress + - Egress +{{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/rbac-configmap.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/rbac-configmap.yaml new file mode 100644 index 00000000..812c13b8 --- /dev/null +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/rbac-configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.kubeRBACProxy.enabled}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kube-state-metrics.fullname" . }}-rbac-config +data: + config-file.yaml: |+ + authorization: + resourceAttributes: + namespace: {{ template "kube-state-metrics.namespace" . }} + apiVersion: v1 + resource: services + subresource: {{ template "kube-state-metrics.fullname" . }} + name: {{ template "kube-state-metrics.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/role.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/role.yaml index 6474914f..24c057da 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/role.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/role.yaml @@ -189,6 +189,16 @@ rules: - verticalpodautoscalers verbs: ["list", "watch"] {{ end -}} +{{- if $.Values.kubeRBACProxy.enabled }} +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] +{{- end }} {{ if $.Values.rbac.extraRules }} {{ toYaml $.Values.rbac.extraRules }} {{ end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/serviceaccount.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/serviceaccount.yaml index e1229eb9..a7ff4dd3 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/serviceaccount.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/serviceaccount.yaml @@ -11,5 +11,5 @@ metadata: {{ toYaml .Values.serviceAccount.annotations | indent 4 }} {{- end }} imagePullSecrets: -{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} + {{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} {{- end -}} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/servicemonitor.yaml index e93df4c4..adfa1058 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/servicemonitor.yaml @@ -11,6 +11,14 @@ metadata: {{- end }} spec: jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.monitor.jobLabel }} + {{- with .Values.prometheus.monitor.targetLabels }} + targetLabels: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + {{- with .Values.prometheus.monitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | trim | nindent 4 }} + {{- end }} {{- include "servicemonitor.scrapeLimits" .Values.prometheus.monitor | indent 2 }} selector: matchLabels: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/values.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/values.yaml index d32b1232..14f66f62 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/values.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/kube-state-metrics/values.yaml @@ -1,14 +1,33 @@ # Default values for kube-state-metrics. prometheusScrape: true image: - repository: registry.k8s.io/kube-state-metrics/kube-state-metrics - tag: v2.7.0 + registry: registry.k8s.io + repository: kube-state-metrics/kube-state-metrics + # If unset use v + .Charts.appVersion + tag: "" sha: "" pullPolicy: IfNotPresent imagePullSecrets: [] # - name: "image-pull-secret" +global: + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + # + # Allow parent charts to override registry hostname + imageRegistry: "" + # If set to true, this will deploy kube-state-metrics as a StatefulSet and the data # will be automatically sharded across <.Values.replicas> pods using the built-in # autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding @@ -38,6 +57,9 @@ service: customLabels: {} # app: kube-state-metrics +## Override selector labels +selectorOverride: {} + ## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box releaseLabel: false @@ -60,6 +82,39 @@ rbac: # verbs: ["list", "watch"] extraRules: [] +# Configure kube-rbac-proxy. When enabled, creates one kube-rbac-proxy container per exposed HTTP endpoint (metrics and telemetry if enabled). +# The requests are served through the same service but requests are then HTTPS. +kubeRBACProxy: + enabled: false + image: + registry: quay.io + repository: brancz/kube-rbac-proxy + tag: v0.14.0 + sha: "" + pullPolicy: IfNotPresent + + # List of additional cli arguments to configure kube-rbac-prxy + # for example: --tls-cipher-suites, --log-file, etc. + # all the possible args can be found here: https://github.com/brancz/kube-rbac-proxy#usage + extraArgs: [] + + ## Specify security settings for a Container + ## Allows overrides and additional options compared to (Pod) securityContext + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + containerSecurityContext: {} + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + serviceAccount: # Specifies whether a ServiceAccount should be created, require rbac true create: true @@ -80,6 +135,8 @@ prometheus: additionalLabels: {} namespace: "" jobLabel: "" + targetLabels: [] + podTargetLabels: [] interval: "" ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. ## @@ -126,6 +183,17 @@ podSecurityPolicy: additionalVolumes: [] +## Configure network policy for kube-state-metrics +networkPolicy: + enabled: false + # egress: + # - {} + # ingress: + # - {} + # podSelector: + # matchLabels: + # app.kubernetes.io/name: kube-state-metrics + securityContext: enabled: true runAsGroup: 65534 diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/Chart.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/Chart.yaml index 5b50a123..39101f6f 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/Chart.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/Chart.yaml @@ -15,4 +15,4 @@ name: prometheus-node-exporter sources: - https://github.com/prometheus/node_exporter/ type: application -version: 4.8.0 +version: 4.14.0 diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/README.md b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/README.md index 02de7b14..cad7b1ae 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/README.md +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/README.md @@ -75,3 +75,22 @@ See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_h ```console helm show values prometheus-community/prometheus-node-exporter ``` + +### kube-rbac-proxy + +You can enable `prometheus-node-exporter` endpoint protection using `kube-rbac-proxy`. By setting `kubeRBACProxy.enabled: true`, this chart will deploy a RBAC proxy container protecting the node-exporter endpoint. +To authorize access, authenticate your requests (via a `ServiceAccount` for example) with a `ClusterRole` attached such as: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus-node-exporter-read +rules: + - apiGroups: [ "" ] + resources: ["services/node-exporter-prometheus-node-exporter"] + verbs: + - get +``` + +See [kube-rbac-proxy examples](https://github.com/brancz/kube-rbac-proxy/tree/master/examples/resource-attributes) for more details. diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/NOTES.txt b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/NOTES.txt index df05e3fb..db8584de 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/NOTES.txt +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/NOTES.txt @@ -13,3 +13,17 @@ echo "Visit http://127.0.0.1:9100 to use your application" kubectl port-forward --namespace {{ template "prometheus-node-exporter.namespace" . }} $POD_NAME 9100 {{- end }} + +{{- if .Values.kubeRBACProxy.enabled}} + +kube-rbac-proxy endpoint protections is enabled: +- Metrics endpoints is now HTTPS +- Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions: +``` +rules: + - apiGroups: [ "" ] + resources: ["services/{{ template "prometheus-node-exporter.fullname" . }}"] + verbs: + - get +``` +{{- end }} \ No newline at end of file diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/_helpers.tpl b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/_helpers.tpl index f5e2603e..f7b0db2d 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/_helpers.tpl +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/_helpers.tpl @@ -76,9 +76,17 @@ The image to use */}} {{- define "prometheus-node-exporter.image" -}} {{- if .Values.image.sha }} -{{- printf "%s:%s@%s" .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }} {{- else }} -{{- printf "%s:%s" .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- printf "%s/%s:%s@%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }} +{{- end }} +{{- else }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- else }} +{{- printf "%s/%s:%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- end }} {{- end }} {{- end }} @@ -126,3 +134,50 @@ labelNameLengthLimit: {{ . }} labelValueLengthLimit: {{ . }} {{- end }} {{- end }} + +{{/* +Formats imagePullSecrets. Input is (dict "Values" .Values "imagePullSecrets" .{specific imagePullSecrets}) +*/}} +{{- define "prometheus-node-exporter.imagePullSecrets" -}} +{{- range (concat .Values.global.imagePullSecrets .imagePullSecrets) }} + {{- if eq (typeOf .) "map[string]interface {}" }} +- {{ toYaml . | trim }} + {{- else }} +- name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Create the namespace name of the pod monitor +*/}} +{{- define "prometheus-node-exporter.podmonitor-namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- if .Values.prometheus.podMonitor.namespace }} +{{- .Values.prometheus.podMonitor.namespace }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} +{{- end }} + +{{/* Sets default scrape limits for podmonitor */}} +{{- define "podmonitor.scrapeLimits" -}} +{{- with .sampleLimit }} +sampleLimit: {{ . }} +{{- end }} +{{- with .targetLimit }} +targetLimit: {{ . }} +{{- end }} +{{- with .labelLimit }} +labelLimit: {{ . }} +{{- end }} +{{- with .labelNameLengthLimit }} +labelNameLengthLimit: {{ . }} +{{- end }} +{{- with .labelValueLengthLimit }} +labelValueLengthLimit: {{ . }} +{{- end }} +{{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/clusterrole.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/clusterrole.yaml new file mode 100644 index 00000000..1fd91150 --- /dev/null +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/clusterrole.yaml @@ -0,0 +1,20 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} +rules: + {{- if $.Values.kubeRBACProxy.enabled }} + - apiGroups: [ "authentication.k8s.io" ] + resources: + - tokenreviews + verbs: [ "create" ] + - apiGroups: [ "authorization.k8s.io" ] + resources: + - subjectaccessreviews + verbs: [ "create" ] + {{- end }} +{{- end -}} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..653305ad --- /dev/null +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + name: {{ template "prometheus-node-exporter.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} +{{- else }} + name: {{ template "prometheus-node-exporter.fullname" . }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "prometheus-node-exporter.serviceAccountName" . }} + namespace: {{ template "prometheus-node-exporter.namespace" . }} +{{- end -}} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/daemonset.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/daemonset.yaml index 189b2d0b..2a6c92bf 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/daemonset.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/daemonset.yaml @@ -26,7 +26,7 @@ spec: labels: {{- include "prometheus-node-exporter.labels" . | nindent 8 }} spec: - automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} + automountServiceAccountToken: {{ ternary true false (or .Values.serviceAccount.automountServiceAccountToken .Values.kubeRBACProxy.enabled) }} {{- with .Values.securityContext }} securityContext: {{- toYaml . | nindent 8 }} @@ -40,6 +40,7 @@ spec: {{- end }} serviceAccountName: {{ include "prometheus-node-exporter.serviceAccountName" . }} containers: + {{- $servicePort := ternary 8100 .Values.service.port .Values.kubeRBACProxy.enabled }} - name: node-exporter image: {{ include "prometheus-node-exporter.image" . }} imagePullPolicy: {{ .Values.image.pullPolicy }} @@ -48,8 +49,11 @@ spec: - --path.sysfs=/host/sys {{- if .Values.hostRootFsMount.enabled }} - --path.rootfs=/host/root + {{- if semverCompare ">=1.4.0" (default .Chart.AppVersion .Values.image.tag) }} + - --path.udev.data=/host/root/run/udev/data {{- end }} - - --web.listen-address=[$(HOST_IP)]:{{ .Values.service.port }} + {{- end }} + - --web.listen-address=[$(HOST_IP)]:{{ $servicePort }} {{- with .Values.extraArgs }} {{- toYaml . | nindent 12 }} {{- end }} @@ -71,10 +75,12 @@ spec: - name: {{ $key }} value: {{ $value | quote }} {{- end }} + {{- if eq .Values.kubeRBACProxy.enabled false }} ports: - name: {{ .Values.service.portName }} containerPort: {{ .Values.service.port }} protocol: TCP + {{- end }} livenessProbe: failureThreshold: {{ .Values.livenessProbe.failureThreshold }} httpGet: @@ -84,7 +90,7 @@ spec: value: {{ $header.value }} {{- end }} path: / - port: {{ .Values.service.port }} + port: {{ $servicePort }} scheme: {{ upper .Values.livenessProbe.httpGet.scheme }} initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.livenessProbe.periodSeconds }} @@ -99,7 +105,7 @@ spec: value: {{ $header.value }} {{- end }} path: / - port: {{ .Values.service.port }} + port: {{ $servicePort }} scheme: {{ upper .Values.readinessProbe.httpGet.scheme }} initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.readinessProbe.periodSeconds }} @@ -147,14 +153,14 @@ spec: {{- end }} {{- with .Values.sidecars }} {{- toYaml . | nindent 8 }} - {{- if or .Values.sidecarVolumeMount .Values.sidecarHostVolumeMounts }} + {{- if or $.Values.sidecarVolumeMount $.Values.sidecarHostVolumeMounts }} volumeMounts: - {{- range $_, $mount := .Values.sidecarVolumeMount }} + {{- range $_, $mount := $.Values.sidecarVolumeMount }} - name: {{ $mount.name }} mountPath: {{ $mount.mountPath }} readOnly: {{ $mount.readOnly }} {{- end }} - {{- range $_, $mount := .Values.sidecarHostVolumeMounts }} + {{- range $_, $mount := $.Values.sidecarHostVolumeMounts }} - name: {{ $mount.name }} mountPath: {{ $mount.mountPath }} readOnly: {{ $mount.readOnly }} @@ -164,9 +170,49 @@ spec: {{- end }} {{- end }} {{- end }} - {{- with .Values.imagePullSecrets }} + {{- if .Values.kubeRBACProxy.enabled }} + - name: kube-rbac-proxy + args: + {{- if .Values.kubeRBACProxy.extraArgs }} + {{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 12 }} + {{- end }} + - --secure-listen-address=:{{ .Values.service.port}} + - --upstream=http://127.0.0.1:{{ $servicePort }}/ + - --proxy-endpoints-port=8888 + - --config-file=/etc/kube-rbac-proxy-config/config-file.yaml + volumeMounts: + - name: kube-rbac-proxy-config + mountPath: /etc/kube-rbac-proxy-config + imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }} + {{- if .Values.kubeRBACProxy.image.sha }} + image: "{{ .Values.global.imageRegistry | default .Values.kubeRBACProxy.image.registry}}/{{ .Values.kubeRBACProxy.image.repository }}:{{ .Values.kubeRBACProxy.image.tag }}@sha256:{{ .Values.kubeRBACProxy.image.sha }}" + {{- else }} + image: "{{ .Values.global.imageRegistry | default .Values.kubeRBACProxy.image.registry}}/{{ .Values.kubeRBACProxy.image.repository }}:{{ .Values.kubeRBACProxy.image.tag }}" + {{- end }} + ports: + - containerPort: {{ .Values.service.port}} + name: "http" + - containerPort: 8888 + name: "http-healthz" + readinessProbe: + httpGet: + scheme: HTTPS + port: 8888 + path: healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.kubeRBACProxy.resources }} + resources: + {{ toYaml .Values.kubeRBACProxy.resources | nindent 12 }} + {{- end }} + {{- if .Values.kubeRBACProxy.containerSecurityContext }} + securityContext: + {{ toYaml .Values.kubeRBACProxy.containerSecurityContext | nindent 12 }} + {{- end }} + {{- end }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} imagePullSecrets: - {{ toYaml . | nindent 8 }} + {{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.imagePullSecrets) | indent 8 }} {{- end }} hostNetwork: {{ .Values.hostNetwork }} hostPID: {{ .Values.hostPID }} @@ -223,3 +269,8 @@ spec: secret: secretName: {{ $mount.name }} {{- end }} + {{- if .Values.kubeRBACProxy.enabled }} + - name: kube-rbac-proxy-config + configMap: + name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config + {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/podmonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/podmonitor.yaml new file mode 100644 index 00000000..f88da6a3 --- /dev/null +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/podmonitor.yaml @@ -0,0 +1,91 @@ +{{- if .Values.prometheus.podMonitor.enabled }} +apiVersion: {{ .Values.prometheus.podMonitor.apiVersion | default "monitoring.coreos.com/v1" }} +kind: PodMonitor +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.podmonitor-namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + {{- with .Values.prometheus.podMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.podMonitor.jobLabel }} + {{- include "podmonitor.scrapeLimits" .Values.prometheus.podMonitor | nindent 2 }} + selector: + matchLabels: + {{- with .Values.prometheus.podMonitor.selectorOverride }} + {{- toYaml . | nindent 6 }} + {{- else }} + {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "prometheus-node-exporter.namespace" . }} + {{- with .Values.prometheus.podMonitor.attachMetadata }} + attachMetadata: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + podMetricsEndpoints: + - port: {{ .Values.service.portName }} + {{- with .Values.prometheus.podMonitor.scheme }} + scheme: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.path }} + path: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.basicAuth }} + basicAuth: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.bearerTokenSecret }} + bearerTokenSecret: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.authorization }} + authorization: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.oauth2 }} + oauth2: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.proxyUrl }} + proxyUrl: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.honorTimestamps }} + honorTimestamps: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.honorLabels }} + honorLabels: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 8 }} + {{- end }} + enableHttp2: {{ default false .Values.prometheus.podMonitor.enableHttp2 }} + filterRunning: {{ default true .Values.prometheus.podMonitor.filterRunning }} + followRedirects: {{ default false .Values.prometheus.podMonitor.followRedirects }} + {{- with .Values.prometheus.podMonitor.params }} + params: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/rbac-configmap.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/rbac-configmap.yaml new file mode 100644 index 00000000..d8606b1c --- /dev/null +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/rbac-configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.kubeRBACProxy.enabled}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config +data: + config-file.yaml: |+ + authorization: + resourceAttributes: + namespace: {{ template "prometheus-node-exporter.namespace" . }} + apiVersion: v1 + resource: services + subresource: {{ template "prometheus-node-exporter.fullname" . }} + name: {{ template "prometheus-node-exporter.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/serviceaccount.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/serviceaccount.yaml index b82630ca..5c3348c0 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/serviceaccount.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/serviceaccount.yaml @@ -10,8 +10,8 @@ metadata: annotations: {{- toYaml . | nindent 4 }} {{- end }} -{{- with .Values.serviceAccount.imagePullSecrets }} +{{- if or .Values.serviceAccount.imagePullSecrets .Values.global.imagePullSecrets }} imagePullSecrets: - {{- toYaml . | nindent 2 }} + {{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} {{- end }} {{- end -}} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/servicemonitor.yaml index 073ce57a..bd8a9a6a 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/servicemonitor.yaml @@ -12,6 +12,10 @@ metadata: spec: jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.monitor.jobLabel }} {{- include "servicemonitor.scrapeLimits" .Values.prometheus.monitor | nindent 2 }} + {{- with .Values.prometheus.monitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} selector: matchLabels: {{- with .Values.prometheus.monitor.selectorOverride }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml index ae8295d9..a2962575 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml @@ -9,7 +9,7 @@ metadata: spec: resourcePolicy: containerPolicies: - - containerName: {{ include "prometheus-node-exporter.name" . }} + - containerName: node-exporter {{- with .Values.verticalPodAutoscaler.controlledResources }} controlledResources: {{ . }} {{- end }} @@ -24,7 +24,7 @@ spec: targetRef: apiVersion: apps/v1 kind: DaemonSet - name: {{ include "prometheus-node-exporter.fullname" . }} + name: {{ include "prometheus-node-exporter.fullname" . }} {{- if .Values.verticalPodAutoscaler.updatePolicy }} updatePolicy: {{- with .Values.verticalPodAutoscaler.updatePolicy.updateMode }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/values.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/values.yaml index f491bdfa..eb7dc6b1 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/values.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/charts/prometheus-node-exporter/values.yaml @@ -2,7 +2,8 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. image: - repository: quay.io/prometheus/node-exporter + registry: quay.io + repository: prometheus/node-exporter # Overrides the image tag whose default is {{ printf "v%s" .Chart.AppVersion }} tag: "" pullPolicy: IfNotPresent @@ -11,6 +12,56 @@ image: imagePullSecrets: [] # - name: "image-pull-secret" +global: + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + # + # Allow parent charts to override registry hostname + imageRegistry: "" + +# Configure kube-rbac-proxy. When enabled, creates a kube-rbac-proxy to protect the node-exporter http endpoint. +# The requests are served through the same service but requests are HTTPS. +kubeRBACProxy: + enabled: false + image: + registry: quay.io + repository: brancz/kube-rbac-proxy + tag: v0.14.0 + sha: "" + pullPolicy: IfNotPresent + + # List of additional cli arguments to configure kube-rbac-prxy + # for example: --tls-cipher-suites, --log-file, etc. + # all the possible args can be found here: https://github.com/brancz/kube-rbac-proxy#usage + extraArgs: [] + + ## Specify security settings for a Container + ## Allows overrides and additional options compared to (Pod) securityContext + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + containerSecurityContext: {} + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + service: type: ClusterIP port: 9100 @@ -34,6 +85,10 @@ prometheus: jobLabel: "" + # List of pod labels to add to node exporter metrics + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor + podTargetLabels: [] + scheme: http basicAuth: {} bearerTokenFile: @@ -74,6 +129,96 @@ prometheus: ## labelValueLengthLimit: 0 + # PodMonitor defines monitoring for a set of pods. + # ref. https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.PodMonitor + # Using a PodMonitor may be preferred in some environments where there is very large number + # of Node Exporter endpoints (1000+) behind a single service. + # The PodMonitor is disabled by default. When switching from ServiceMonitor to PodMonitor, + # the time series resulting from the configuration through PodMonitor may have different labels. + # For instance, there will not be the service label any longer which might + # affect PromQL queries selecting that label. + podMonitor: + enabled: false + # Namespace in which to deploy the pod monitor. Defaults to the release namespace. + namespace: "" + # Additional labels, e.g. setting a label for pod monitor selector as set in prometheus + additionalLabels: {} + # release: kube-prometheus-stack + # PodTargetLabels transfers labels of the Kubernetes Pod onto the target. + podTargetLabels: [] + # apiVersion defaults to monitoring.coreos.com/v1. + apiVersion: "" + # Override pod selector to select pod objects. + selectorOverride: {} + # Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above. + attachMetadata: + node: false + # The label to use to retrieve the job name from. Defaults to label app.kubernetes.io/name. + jobLabel: "" + + # Scheme/protocol to use for scraping. + scheme: "http" + # Path to scrape metrics at. + path: "/metrics" + + # BasicAuth allow an endpoint to authenticate over basic authentication. + # More info: https://prometheus.io/docs/operating/configuration/#endpoint + basicAuth: {} + # Secret to mount to read bearer token for scraping targets. + # The secret needs to be in the same namespace as the pod monitor and accessible by the Prometheus Operator. + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core + bearerTokenSecret: {} + # TLS configuration to use when scraping the endpoint. + tlsConfig: {} + # Authorization section for this endpoint. + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.SafeAuthorization + authorization: {} + # OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.OAuth2 + oauth2: {} + + # ProxyURL eg http://proxyserver:2195. Directs scrapes through proxy to this endpoint. + proxyUrl: "" + # Interval at which endpoints should be scraped. If not specified Prometheus’ global scrape interval is used. + interval: "" + # Timeout after which the scrape is ended. If not specified, the Prometheus global scrape interval is used. + scrapeTimeout: "" + # HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. + honorTimestamps: true + # HonorLabels chooses the metric’s labels on collisions with target labels. + honorLabels: true + # Whether to enable HTTP2. Default false. + enableHttp2: "" + # Drop pods that are not running. (Failed, Succeeded). + # Enabled by default. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + filterRunning: "" + # FollowRedirects configures whether scrape requests follow HTTP 3xx redirects. Default false. + followRedirects: "" + # Optional HTTP URL parameters + params: {} + + # RelabelConfigs to apply to samples before scraping. Prometheus Operator automatically adds + # relabelings for a few standard Kubernetes fields. The original scrape job’s name + # is available via the __tmp_prometheus_job_name label. + # More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + relabelings: [] + # MetricRelabelConfigs to apply to samples before ingestion. + metricRelabelings: [] + + # SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + sampleLimit: 0 + # TargetLimit defines a limit on the number of scraped targets that will be accepted. + targetLimit: 0 + # Per-scrape limit on number of labels that will be accepted for a sample. + # Only valid in Prometheus versions 2.27.0 and newer. + labelLimit: 0 + # Per-scrape limit on length of labels name that will be accepted for a sample. + # Only valid in Prometheus versions 2.27.0 and newer. + labelNameLengthLimit: 0 + # Per-scrape limit on length of labels value that will be accepted for a sample. + # Only valid in Prometheus versions 2.27.0 and newer. + labelValueLengthLimit: 0 + ## Customize the updateStrategy if set updateStrategy: type: RollingUpdate diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-alertmanagerconfigs.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-alertmanagerconfigs.yaml index 858e8dcd..fa0f9b18 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-alertmanagerconfigs.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-alertmanagerconfigs.yaml @@ -1,10 +1,10 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 + controller-gen.kubebuilder.io/version: v0.11.1 creationTimestamp: null name: alertmanagerconfigs.monitoring.coreos.com spec: @@ -4383,6 +4383,12 @@ spec: the resource's namespace. If present, it will be added to the generated Alertmanager configuration as a first-level route. properties: + activeTimeIntervals: + description: ActiveTimeIntervals is a list of MuteTimeInterval + names when this route should be active. + items: + type: string + type: array continue: description: Boolean indicating whether an alert should continue matching subsequent sibling nodes. It will always be overridden diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-alertmanagers.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-alertmanagers.yaml index b2bc5bc9..0701cccc 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-alertmanagers.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-alertmanagers.yaml @@ -1,10 +1,10 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 + controller-gen.kubebuilder.io/version: v0.11.1 creationTimestamp: null name: alertmanagers.monitoring.coreos.com spec: @@ -29,6 +29,16 @@ spec: jsonPath: .spec.replicas name: Replicas type: integer + - description: The number of ready replicas + jsonPath: .status.availableReplicas + name: Ready + type: integer + - jsonPath: .status.conditions[?(@.type == 'Reconciled')].status + name: Reconciled + type: string + - jsonPath: .status.conditions[?(@.type == 'Available')].status + name: Available + type: string - jsonPath: .metadata.creationTimestamp name: Age type: date @@ -2215,6 +2225,27 @@ spec: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -2721,6 +2752,16 @@ spec: to ensure the Prometheus Operator knows what version of Alertmanager is being configured. type: string + imagePullPolicy: + description: Image pull policy for the 'alertmanager', 'init-config-reloader' + and 'config-reloader' containers. See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + for more details. + enum: + - "" + - Always + - Never + - IfNotPresent + type: string imagePullSecrets: description: An optional list of references to secrets in the same namespace to use for pulling prometheus and alertmanager images @@ -3500,6 +3541,27 @@ spec: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -3992,8 +4054,9 @@ spec: description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available - as soon as it is ready) This is an alpha field and requires enabling - StatefulSetMinReadySeconds feature gate. + as soon as it is ready) This is an alpha field from kubernetes 1.22 + until 1.24 which requires enabling the StatefulSetMinReadySeconds + feature gate. format: int32 type: integer nodeSelector: @@ -4049,6 +4112,26 @@ spec: resources: description: Define resources requests and limits for single Pods. properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4196,9 +4279,14 @@ spec: type: object supplementalGroups: description: A list of groups applied to the first process run - in each container, in addition to the container's primary GID. If - unspecified, no groups will be added to any container. Note - that this field cannot be set when spec.os.name is windows. + in each container, in addition to the container's primary GID, + the fsGroup (if specified), and group memberships defined in + the container image for the uid of the container process. If + unspecified, no additional groups are added to any container. + Note that group memberships defined in the container image for + the uid of the container process are still effective, even if + they are not included in this list. Note that this field cannot + be set when spec.os.name is windows. items: format: int64 type: integer @@ -4280,9 +4368,9 @@ spec: allows to remove any subPath usage in volume mounts.' type: boolean emptyDir: - description: 'EmptyDirVolumeSource to be used by the Prometheus - StatefulSets. If specified, used in place of any volumeClaimTemplate. - More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' + description: 'EmptyDirVolumeSource to be used by the StatefulSet. + If specified, used in place of any volumeClaimTemplate. More + info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' properties: medium: description: 'medium represents what type of storage medium @@ -4305,9 +4393,9 @@ spec: x-kubernetes-int-or-string: true type: object ephemeral: - description: 'EphemeralVolumeSource to be used by the Prometheus - StatefulSets. This is a beta field in k8s 1.21, for lower versions, - starting with k8s 1.19, it requires enabling the GenericEphemeralVolume + description: 'EphemeralVolumeSource to be used by the StatefulSet. + This is a beta field in k8s 1.21, for lower versions, starting + with k8s 1.19, it requires enabling the GenericEphemeralVolume feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes' properties: volumeClaimTemplate: @@ -4354,9 +4442,12 @@ spec: provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data - source. If the AnyVolumeDataSource feature gate - is enabled, this field will always have the same - contents as the DataSourceRef field.' + source. When the AnyVolumeDataSource feature gate + is enabled, dataSource contents will be copied to + dataSourceRef, and dataSourceRef contents will be + copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource @@ -4381,27 +4472,33 @@ spec: dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any local object - from a non-empty API group (non core object) or - a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed if - the type of the specified object matches some installed - volume populator or dynamic provisioner. This field - will replace the functionality of the DataSource - field and as such if both fields are non-empty, - they must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will - be set to the same value automatically if one of - them is empty and the other is non-empty. There - are two important differences between DataSource - and DataSourceRef: * While DataSource only allows - two specific types of objects, DataSourceRef allows + volume is desired. This may be any object from a + non-empty API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic + provisioner. This field will replace the functionality + of the dataSource field and as such if both fields + are non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t specified + in dataSourceRef, both fields (dataSource and dataSourceRef) + will be set to the same value automatically if one + of them is empty and the other is non-empty. When + namespace is specified in dataSourceRef, dataSource + isn''t set to the same value and must be empty. + There are three important differences between dataSource + and dataSourceRef: * While dataSource only allows + two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed values - (dropping them), DataSourceRef preserves all values, + objects. * While dataSource ignores disallowed values + (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is - specified. (Beta) Using this field requires the - AnyVolumeDataSource feature gate to be enabled.' + specified. * While dataSource only allows local + objects, dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the namespace + field of dataSourceRef requires the CrossNamespaceVolumeDataSource + feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource @@ -4418,11 +4515,21 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept the + reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string required: - kind - name type: object - x-kubernetes-map-type: atomic resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure @@ -4431,6 +4538,29 @@ spec: value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of + one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes + that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4524,7 +4654,10 @@ spec: type: object type: object volumeClaimTemplate: - description: A PVC spec to be used by the Prometheus StatefulSets. + description: A PVC spec to be used by the StatefulSet. The easiest + way to use a volume that cannot be automatically provisioned + (for whatever reason) is to use a label selector alongside manually + created PersistentVolumes. properties: apiVersion: description: 'APIVersion defines the versioned schema of this @@ -4584,9 +4717,12 @@ spec: * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the - contents of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always have - the same contents as the DataSourceRef field.' + contents of the specified data source. When the AnyVolumeDataSource + feature gate is enabled, dataSource contents will be + copied to dataSourceRef, and dataSourceRef contents + will be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, then + dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource @@ -4608,24 +4744,31 @@ spec: dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any local object from - a non-empty API group (non core object) or a PersistentVolumeClaim + volume is desired. This may be any object from a non-empty + API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. - This field will replace the functionality of the DataSource + This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will be set - to the same value automatically if one of them is empty - and the other is non-empty. There are two important - differences between DataSource and DataSourceRef: * - While DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as well as - PersistentVolumeClaim objects. * While DataSource ignores - disallowed values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed value - is specified. (Beta) Using this field requires the AnyVolumeDataSource + when namespace isn''t specified in dataSourceRef, both + fields (dataSource and dataSourceRef) will be set to + the same value automatically if one of them is empty + and the other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the same + value and must be empty. There are three important differences + between dataSource and dataSourceRef: * While dataSource + only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim + objects. * While dataSource ignores disallowed values + (dropping them), dataSourceRef preserves all values, + and generates an error if a disallowed value is specified. + * While dataSource only allows local objects, dataSourceRef + allows objects in any namespaces. (Beta) Using this + field requires the AnyVolumeDataSource feature gate + to be enabled. (Alpha) Using the namespace field of + dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: @@ -4640,11 +4783,20 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace is specified, + a gateway.networking.k8s.io/ReferenceGrant object + is required in the referent namespace to allow that + namespace's owner to accept the reference. See the + ReferenceGrant documentation for details. (Alpha) + This field requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string required: - kind - name type: object - x-kubernetes-map-type: atomic resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure @@ -4653,6 +4805,29 @@ spec: must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used by + this container. \n This is an alpha field and requires + enabling the DynamicResourceAllocation feature gate. + \n This field is immutable." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the Pod + where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4996,8 +5171,8 @@ spec: are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor - policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." + policy. This is a beta-level feature default enabled by the + NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: description: "NodeTaintsPolicy indicates how we will treat node @@ -5006,8 +5181,8 @@ spec: for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the - Ignore policy. This is a alpha-level feature enabled by the - NodeInclusionPolicyInPodTopologySpread feature flag." + Ignore policy. This is a beta-level feature default enabled + by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that @@ -5548,9 +5723,12 @@ spec: provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified - data source. If the AnyVolumeDataSource feature - gate is enabled, this field will always have the - same contents as the DataSourceRef field.' + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource @@ -5576,27 +5754,35 @@ spec: description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any - local object from a non-empty API group (non core - object) or a PersistentVolumeClaim object. When - this field is specified, volume binding will only - succeed if the type of the specified object matches - some installed volume populator or dynamic provisioner. + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. This field will replace the functionality of the - DataSource field and as such if both fields are + dataSource field and as such if both fields are non-empty, they must have the same value. For - backwards compatibility, both fields (DataSource - and DataSourceRef) will be set to the same value + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value automatically if one of them is empty and the - other is non-empty. There are two important differences - between DataSource and DataSourceRef: * While - DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as well - as PersistentVolumeClaim objects. * While DataSource - ignores disallowed values (dropping them), DataSourceRef - preserves all values, and generates an error if - a disallowed value is specified. (Beta) Using - this field requires the AnyVolumeDataSource feature - gate to be enabled.' + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' properties: apiGroup: description: APIGroup is the group for the resource @@ -5613,11 +5799,21 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string required: - kind - name type: object - x-kubernetes-map-type: atomic resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure @@ -5626,6 +5822,30 @@ spec: value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available inside + a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -6809,31 +7029,71 @@ spec: type: object status: description: 'Most recent observed status of the Alertmanager cluster. - Read-only. Not included when requesting from the apiserver, only from - the Prometheus Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + Read-only. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' properties: availableReplicas: description: Total number of available pods (ready for at least minReadySeconds) targeted by this Alertmanager cluster. format: int32 type: integer + conditions: + description: The current state of the Alertmanager object. + items: + description: Condition represents the state of the resources associated + with the Prometheus or Alertmanager resource. + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update + to the current status property. + format: date-time + type: string + message: + description: Human-readable message indicating details for the + condition's last transition. + type: string + observedGeneration: + description: ObservedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if `.metadata.generation` + is currently 12, but the `.status.conditions[].observedGeneration` + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + type: integer + reason: + description: Reason for the condition's last transition. + type: string + status: + description: Status of the condition. + type: string + type: + description: Type of the condition being reported. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map paused: description: Represents whether any actions on the underlying managed objects are being performed. Only delete actions will be performed. type: boolean replicas: description: Total number of non-terminated pods targeted by this - Alertmanager cluster (their labels match the selector). + Alertmanager object (their labels match the selector). format: int32 type: integer unavailableReplicas: description: Total number of unavailable pods targeted by this Alertmanager - cluster. + object. format: int32 type: integer updatedReplicas: description: Total number of non-terminated pods targeted by this - Alertmanager cluster that have the desired version spec. + Alertmanager object that have the desired version spec. format: int32 type: integer required: @@ -6848,4 +7108,5 @@ spec: type: object served: true storage: true - subresources: {} + subresources: + status: {} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-podmonitors.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-podmonitors.yaml index d39a5527..f3dfc428 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-podmonitors.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-podmonitors.yaml @@ -1,10 +1,10 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 + controller-gen.kubebuilder.io/version: v0.11.1 creationTimestamp: null name: podmonitors.monitoring.coreos.com spec: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-probes.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-probes.yaml index d82abd8f..a20c6ff0 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-probes.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-probes.yaml @@ -1,10 +1,10 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 + controller-gen.kubebuilder.io/version: v0.11.1 creationTimestamp: null name: probes.monitoring.coreos.com spec: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-prometheuses.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-prometheuses.yaml index 7da4e7bc..7666280f 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-prometheuses.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-prometheuses.yaml @@ -1,10 +1,10 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 + controller-gen.kubebuilder.io/version: v0.11.1 argocd.argoproj.io/sync-options: Replace=true creationTimestamp: null name: prometheuses.monitoring.coreos.com @@ -1051,6 +1051,53 @@ spec: Bearer, Basic will cause an error type: string type: object + basicAuth: + description: BasicAuth allow an endpoint to authenticate + over basic authentication + properties: + password: + description: The secret in the service monitor namespace + that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: The secret in the service monitor namespace + that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object bearerTokenFile: description: BearerTokenFile to read from filesystem to use when authenticating to Alertmanager. @@ -2249,6 +2296,27 @@ spec: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -2910,6 +2978,16 @@ spec: to ensure the Prometheus Operator knows what version of Prometheus is being configured. type: string + imagePullPolicy: + description: Image pull policy for the 'prometheus', 'init-config-reloader' + and 'config-reloader' containers. See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + for more details. + enum: + - "" + - Always + - Never + - IfNotPresent + type: string imagePullSecrets: description: An optional list of references to secrets in the same namespace to use for pulling prometheus and alertmanager images @@ -3691,6 +3769,27 @@ spec: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4182,8 +4281,9 @@ spec: description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available - as soon as it is ready) This is an alpha field and requires enabling - StatefulSetMinReadySeconds feature gate. + as soon as it is ready) This is an alpha field from kubernetes 1.22 + until 1.24 which requires enabling the StatefulSetMinReadySeconds + feature gate. format: int32 type: integer nodeSelector: @@ -4281,9 +4381,15 @@ spec: type: object x-kubernetes-map-type: atomic podMonitorSelector: - description: '*Experimental* PodMonitors to be selected for target - discovery. *Deprecated:* if neither this nor serviceMonitorSelector - are specified, configuration is unmanaged.' + description: "*Experimental* PodMonitors to be selected for target + discovery. \n If `spec.serviceMonitorSelector`, `spec.podMonitorSelector` + and `spec.probeSelector` are null, the Prometheus configuration + is unmanaged. The Prometheus operator will ensure that the Prometheus + configuration's Secret exists, but it is the responsibility of the + user to provide the raw gzipped Prometheus configuration under the + `prometheus.yaml.gz` key. This behavior is deprecated and will be + removed in the next major version of the custom resource definition. + It is recommended to use `spec.additionalScrapeConfigs` instead." properties: matchExpressions: description: matchExpressions is a list of label selector requirements. @@ -4327,6 +4433,12 @@ spec: type: object type: object x-kubernetes-map-type: atomic + podTargetLabels: + description: PodTargetLabels are added to all Pod/ServiceMonitors' + podTargetLabels + items: + type: string + type: array portName: description: Port name used for the pods and governing service. This defaults to web @@ -4381,7 +4493,15 @@ spec: type: object x-kubernetes-map-type: atomic probeSelector: - description: '*Experimental* Probes to be selected for target discovery.' + description: "*Experimental* Probes to be selected for target discovery. + \n If `spec.serviceMonitorSelector`, `spec.podMonitorSelector` and + `spec.probeSelector` are null, the Prometheus configuration is unmanaged. + The Prometheus operator will ensure that the Prometheus configuration's + Secret exists, but it is the responsibility of the user to provide + the raw gzipped Prometheus configuration under the `prometheus.yaml.gz` + key. This behavior is deprecated and will be removed in the next + major version of the custom resource definition. It is recommended + to use `spec.additionalScrapeConfigs` instead." properties: matchExpressions: description: matchExpressions is a list of label selector requirements. @@ -4463,6 +4583,7 @@ spec: maxConcurrency: description: Number of concurrent queries that can be run at once. format: int32 + minimum: 1 type: integer maxSamples: description: Maximum number of samples a single query can load @@ -5372,6 +5493,26 @@ spec: resources: description: Define resources requests and limits for single Pods. properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -5651,9 +5792,14 @@ spec: type: object supplementalGroups: description: A list of groups applied to the first process run - in each container, in addition to the container's primary GID. If - unspecified, no groups will be added to any container. Note - that this field cannot be set when spec.os.name is windows. + in each container, in addition to the container's primary GID, + the fsGroup (if specified), and group memberships defined in + the container image for the uid of the container process. If + unspecified, no additional groups are added to any container. + Note that group memberships defined in the container image for + the uid of the container process are still effective, even if + they are not included in this list. Note that this field cannot + be set when spec.os.name is windows. items: format: int64 type: integer @@ -5765,9 +5911,15 @@ spec: type: object x-kubernetes-map-type: atomic serviceMonitorSelector: - description: ServiceMonitors to be selected for target discovery. - *Deprecated:* if neither this nor podMonitorSelector are specified, - configuration is unmanaged. + description: "ServiceMonitors to be selected for target discovery. + \n If `spec.serviceMonitorSelector`, `spec.podMonitorSelector` and + `spec.probeSelector` are null, the Prometheus configuration is unmanaged. + The Prometheus operator will ensure that the Prometheus configuration's + Secret exists, but it is the responsibility of the user to provide + the raw gzipped Prometheus configuration under the `prometheus.yaml.gz` + key. This behavior is deprecated and will be removed in the next + major version of the custom resource definition. It is recommended + to use `spec.additionalScrapeConfigs` instead." properties: matchExpressions: description: matchExpressions is a list of label selector requirements. @@ -5838,9 +5990,9 @@ spec: allows to remove any subPath usage in volume mounts.' type: boolean emptyDir: - description: 'EmptyDirVolumeSource to be used by the Prometheus - StatefulSets. If specified, used in place of any volumeClaimTemplate. - More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' + description: 'EmptyDirVolumeSource to be used by the StatefulSet. + If specified, used in place of any volumeClaimTemplate. More + info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' properties: medium: description: 'medium represents what type of storage medium @@ -5863,9 +6015,9 @@ spec: x-kubernetes-int-or-string: true type: object ephemeral: - description: 'EphemeralVolumeSource to be used by the Prometheus - StatefulSets. This is a beta field in k8s 1.21, for lower versions, - starting with k8s 1.19, it requires enabling the GenericEphemeralVolume + description: 'EphemeralVolumeSource to be used by the StatefulSet. + This is a beta field in k8s 1.21, for lower versions, starting + with k8s 1.19, it requires enabling the GenericEphemeralVolume feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes' properties: volumeClaimTemplate: @@ -5912,9 +6064,12 @@ spec: provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data - source. If the AnyVolumeDataSource feature gate - is enabled, this field will always have the same - contents as the DataSourceRef field.' + source. When the AnyVolumeDataSource feature gate + is enabled, dataSource contents will be copied to + dataSourceRef, and dataSourceRef contents will be + copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource @@ -5939,27 +6094,33 @@ spec: dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any local object - from a non-empty API group (non core object) or - a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed if - the type of the specified object matches some installed - volume populator or dynamic provisioner. This field - will replace the functionality of the DataSource - field and as such if both fields are non-empty, - they must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will - be set to the same value automatically if one of - them is empty and the other is non-empty. There - are two important differences between DataSource - and DataSourceRef: * While DataSource only allows - two specific types of objects, DataSourceRef allows + volume is desired. This may be any object from a + non-empty API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic + provisioner. This field will replace the functionality + of the dataSource field and as such if both fields + are non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t specified + in dataSourceRef, both fields (dataSource and dataSourceRef) + will be set to the same value automatically if one + of them is empty and the other is non-empty. When + namespace is specified in dataSourceRef, dataSource + isn''t set to the same value and must be empty. + There are three important differences between dataSource + and dataSourceRef: * While dataSource only allows + two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed values - (dropping them), DataSourceRef preserves all values, + objects. * While dataSource ignores disallowed values + (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is - specified. (Beta) Using this field requires the - AnyVolumeDataSource feature gate to be enabled.' + specified. * While dataSource only allows local + objects, dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the namespace + field of dataSourceRef requires the CrossNamespaceVolumeDataSource + feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource @@ -5976,11 +6137,21 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept the + reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string required: - kind - name type: object - x-kubernetes-map-type: atomic resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure @@ -5989,6 +6160,29 @@ spec: value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of + one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes + that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -6082,7 +6276,10 @@ spec: type: object type: object volumeClaimTemplate: - description: A PVC spec to be used by the Prometheus StatefulSets. + description: A PVC spec to be used by the StatefulSet. The easiest + way to use a volume that cannot be automatically provisioned + (for whatever reason) is to use a label selector alongside manually + created PersistentVolumes. properties: apiVersion: description: 'APIVersion defines the versioned schema of this @@ -6142,9 +6339,12 @@ spec: * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the - contents of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always have - the same contents as the DataSourceRef field.' + contents of the specified data source. When the AnyVolumeDataSource + feature gate is enabled, dataSource contents will be + copied to dataSourceRef, and dataSourceRef contents + will be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, then + dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource @@ -6166,24 +6366,31 @@ spec: dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any local object from - a non-empty API group (non core object) or a PersistentVolumeClaim + volume is desired. This may be any object from a non-empty + API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. - This field will replace the functionality of the DataSource + This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will be set - to the same value automatically if one of them is empty - and the other is non-empty. There are two important - differences between DataSource and DataSourceRef: * - While DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as well as - PersistentVolumeClaim objects. * While DataSource ignores - disallowed values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed value - is specified. (Beta) Using this field requires the AnyVolumeDataSource + when namespace isn''t specified in dataSourceRef, both + fields (dataSource and dataSourceRef) will be set to + the same value automatically if one of them is empty + and the other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the same + value and must be empty. There are three important differences + between dataSource and dataSourceRef: * While dataSource + only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim + objects. * While dataSource ignores disallowed values + (dropping them), dataSourceRef preserves all values, + and generates an error if a disallowed value is specified. + * While dataSource only allows local objects, dataSourceRef + allows objects in any namespaces. (Beta) Using this + field requires the AnyVolumeDataSource feature gate + to be enabled. (Alpha) Using the namespace field of + dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: @@ -6198,11 +6405,20 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace is specified, + a gateway.networking.k8s.io/ReferenceGrant object + is required in the referent namespace to allow that + namespace's owner to accept the reference. See the + ReferenceGrant documentation for details. (Alpha) + This field requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string required: - kind - name type: object - x-kubernetes-map-type: atomic resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure @@ -6211,6 +6427,29 @@ spec: must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used by + this container. \n This is an alpha field and requires + enabling the DynamicResourceAllocation feature gate. + \n This field is immutable." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the Pod + where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -6644,6 +6883,27 @@ spec: Thanos sidecar. If not provided, no requests/limits will be set properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -6911,8 +7171,8 @@ spec: are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor - policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." + policy. This is a beta-level feature default enabled by the + NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: description: "NodeTaintsPolicy indicates how we will treat node @@ -6921,8 +7181,8 @@ spec: for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the - Ignore policy. This is a alpha-level feature enabled by the - NodeInclusionPolicyInPodTopologySpread feature flag." + Ignore policy. This is a beta-level feature default enabled + by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that @@ -7477,9 +7737,12 @@ spec: provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified - data source. If the AnyVolumeDataSource feature - gate is enabled, this field will always have the - same contents as the DataSourceRef field.' + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource @@ -7505,27 +7768,35 @@ spec: description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any - local object from a non-empty API group (non core - object) or a PersistentVolumeClaim object. When - this field is specified, volume binding will only - succeed if the type of the specified object matches - some installed volume populator or dynamic provisioner. + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. This field will replace the functionality of the - DataSource field and as such if both fields are + dataSource field and as such if both fields are non-empty, they must have the same value. For - backwards compatibility, both fields (DataSource - and DataSourceRef) will be set to the same value + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value automatically if one of them is empty and the - other is non-empty. There are two important differences - between DataSource and DataSourceRef: * While - DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as well - as PersistentVolumeClaim objects. * While DataSource - ignores disallowed values (dropping them), DataSourceRef - preserves all values, and generates an error if - a disallowed value is specified. (Beta) Using - this field requires the AnyVolumeDataSource feature - gate to be enabled.' + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' properties: apiGroup: description: APIGroup is the group for the resource @@ -7542,11 +7813,21 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string required: - kind - name type: object - x-kubernetes-map-type: atomic resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure @@ -7555,6 +7836,30 @@ spec: value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available inside + a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -8589,6 +8894,13 @@ spec: a rolling update will be triggered. type: boolean type: object + maxConnections: + description: Defines the maximum number of simultaneous connections + A zero value means that Prometheus doesn't accept any incoming + connection. + format: int32 + minimum: 0 + type: integer pageTitle: description: The prometheus web page title type: string @@ -8755,8 +9067,8 @@ spec: conditions: description: The current state of the Prometheus deployment. items: - description: PrometheusCondition represents the state of the resources - associated with the Prometheus resource. + description: Condition represents the state of the resources associated + with the Prometheus or Alertmanager resource. properties: lastTransitionTime: description: lastTransitionTime is the time of the last update @@ -8769,8 +9081,8 @@ spec: type: string observedGeneration: description: ObservedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration + that the condition was set based upon. For instance, if `.metadata.generation` + is currently 12, but the `.status.conditions[].observedGeneration` is 9, the condition is out of date with respect to the current state of the instance. format: int64 @@ -8779,7 +9091,7 @@ spec: description: Reason for the condition's last transition. type: string status: - description: status of the condition. + description: Status of the condition. type: string type: description: Type of the condition being reported. diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-prometheusrules.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-prometheusrules.yaml index f139ffef..d8f2b40f 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-prometheusrules.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-prometheusrules.yaml @@ -1,10 +1,10 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 + controller-gen.kubebuilder.io/version: v0.11.1 creationTimestamp: null name: prometheusrules.monitoring.coreos.com spec: @@ -57,7 +57,6 @@ spec: minLength: 1 type: string partial_response_strategy: - default: "" description: 'PartialResponseStrategy is only used by ThanosRuler and will be ignored by Prometheus instances. More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response' pattern: ^(?i)(abort|warn)?$ diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-servicemonitors.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-servicemonitors.yaml index 92ecc354..f814eb7c 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-servicemonitors.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-servicemonitors.yaml @@ -1,10 +1,10 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 + controller-gen.kubebuilder.io/version: v0.11.1 creationTimestamp: null name: servicemonitors.monitoring.coreos.com spec: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-thanosrulers.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-thanosrulers.yaml index 75597276..73c6cf02 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-thanosrulers.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/crds/crd-thanosrulers.yaml @@ -1,10 +1,10 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 + controller-gen.kubebuilder.io/version: v0.11.1 creationTimestamp: null name: thanosrulers.monitoring.coreos.com spec: @@ -54,6 +54,31 @@ spec: description: 'Specification of the desired behavior of the ThanosRuler cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' properties: + additionalArgs: + description: AdditionalArgs allows setting additional arguments for + the ThanosRuler container. It is intended for e.g. activating hidden + flags which are not supported by the dedicated configuration options + yet. The arguments are passed as-is to the ThanosRuler container + which may cause issues if they are invalid or not supported by the + given ThanosRuler version. In case of an argument conflict (e.g. + an argument which is already set by the operator itself) or when + providing an invalid argument the reconciliation will fail and an + error will be logged. + items: + description: Argument as part of the AdditionalArgs list. + properties: + name: + description: Name of the argument, e.g. "scrape.discovery-reload-interval". + minLength: 1 + type: string + value: + description: Argument value, e.g. 30s. Can be empty for name-only + arguments (e.g. --storage.tsdb.no-lockfile) + type: string + required: + - name + type: object + type: array affinity: description: If specified, the pod's scheduling constraints. properties: @@ -1713,6 +1738,27 @@ spec: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -2383,6 +2429,16 @@ spec: image: description: Thanos container image URL. type: string + imagePullPolicy: + description: Image pull policy for the 'thanos', 'init-config-reloader' + and 'config-reloader' containers. See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + for more details. + enum: + - "" + - Always + - Never + - IfNotPresent + type: string imagePullSecrets: description: An optional list of references to secrets in the same namespace to use for pulling thanos images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod @@ -3161,6 +3217,27 @@ spec: description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -3660,8 +3737,9 @@ spec: description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available - as soon as it is ready) This is an alpha field and requires enabling - StatefulSetMinReadySeconds feature gate. + as soon as it is ready) This is an alpha field from kubernetes 1.22 + until 1.24 which requires enabling the StatefulSetMinReadySeconds + feature gate. format: int32 type: integer nodeSelector: @@ -3789,6 +3867,26 @@ spec: description: Resources defines the resource requirements for single Pods. If not provided, no requests/limits will be set properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4016,9 +4114,14 @@ spec: type: object supplementalGroups: description: A list of groups applied to the first process run - in each container, in addition to the container's primary GID. If - unspecified, no groups will be added to any container. Note - that this field cannot be set when spec.os.name is windows. + in each container, in addition to the container's primary GID, + the fsGroup (if specified), and group memberships defined in + the container image for the uid of the container process. If + unspecified, no additional groups are added to any container. + Note that group memberships defined in the container image for + the uid of the container process are still effective, even if + they are not included in this list. Note that this field cannot + be set when spec.os.name is windows. items: format: int64 type: integer @@ -4092,9 +4195,9 @@ spec: allows to remove any subPath usage in volume mounts.' type: boolean emptyDir: - description: 'EmptyDirVolumeSource to be used by the Prometheus - StatefulSets. If specified, used in place of any volumeClaimTemplate. - More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' + description: 'EmptyDirVolumeSource to be used by the StatefulSet. + If specified, used in place of any volumeClaimTemplate. More + info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' properties: medium: description: 'medium represents what type of storage medium @@ -4117,9 +4220,9 @@ spec: x-kubernetes-int-or-string: true type: object ephemeral: - description: 'EphemeralVolumeSource to be used by the Prometheus - StatefulSets. This is a beta field in k8s 1.21, for lower versions, - starting with k8s 1.19, it requires enabling the GenericEphemeralVolume + description: 'EphemeralVolumeSource to be used by the StatefulSet. + This is a beta field in k8s 1.21, for lower versions, starting + with k8s 1.19, it requires enabling the GenericEphemeralVolume feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes' properties: volumeClaimTemplate: @@ -4166,9 +4269,12 @@ spec: provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data - source. If the AnyVolumeDataSource feature gate - is enabled, this field will always have the same - contents as the DataSourceRef field.' + source. When the AnyVolumeDataSource feature gate + is enabled, dataSource contents will be copied to + dataSourceRef, and dataSourceRef contents will be + copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource @@ -4193,27 +4299,33 @@ spec: dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any local object - from a non-empty API group (non core object) or - a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed if - the type of the specified object matches some installed - volume populator or dynamic provisioner. This field - will replace the functionality of the DataSource - field and as such if both fields are non-empty, - they must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will - be set to the same value automatically if one of - them is empty and the other is non-empty. There - are two important differences between DataSource - and DataSourceRef: * While DataSource only allows - two specific types of objects, DataSourceRef allows + volume is desired. This may be any object from a + non-empty API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic + provisioner. This field will replace the functionality + of the dataSource field and as such if both fields + are non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t specified + in dataSourceRef, both fields (dataSource and dataSourceRef) + will be set to the same value automatically if one + of them is empty and the other is non-empty. When + namespace is specified in dataSourceRef, dataSource + isn''t set to the same value and must be empty. + There are three important differences between dataSource + and dataSourceRef: * While dataSource only allows + two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim - objects. * While DataSource ignores disallowed values - (dropping them), DataSourceRef preserves all values, + objects. * While dataSource ignores disallowed values + (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is - specified. (Beta) Using this field requires the - AnyVolumeDataSource feature gate to be enabled.' + specified. * While dataSource only allows local + objects, dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the namespace + field of dataSourceRef requires the CrossNamespaceVolumeDataSource + feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource @@ -4230,11 +4342,21 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept the + reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string required: - kind - name type: object - x-kubernetes-map-type: atomic resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure @@ -4243,6 +4365,29 @@ spec: value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of + one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes + that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4336,7 +4481,10 @@ spec: type: object type: object volumeClaimTemplate: - description: A PVC spec to be used by the Prometheus StatefulSets. + description: A PVC spec to be used by the StatefulSet. The easiest + way to use a volume that cannot be automatically provisioned + (for whatever reason) is to use a label selector alongside manually + created PersistentVolumes. properties: apiVersion: description: 'APIVersion defines the versioned schema of this @@ -4396,9 +4544,12 @@ spec: * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the - contents of the specified data source. If the AnyVolumeDataSource - feature gate is enabled, this field will always have - the same contents as the DataSourceRef field.' + contents of the specified data source. When the AnyVolumeDataSource + feature gate is enabled, dataSource contents will be + copied to dataSourceRef, and dataSourceRef contents + will be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, then + dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource @@ -4420,24 +4571,31 @@ spec: dataSourceRef: description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any local object from - a non-empty API group (non core object) or a PersistentVolumeClaim + volume is desired. This may be any object from a non-empty + API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. - This field will replace the functionality of the DataSource + This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, - both fields (DataSource and DataSourceRef) will be set - to the same value automatically if one of them is empty - and the other is non-empty. There are two important - differences between DataSource and DataSourceRef: * - While DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as well as - PersistentVolumeClaim objects. * While DataSource ignores - disallowed values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed value - is specified. (Beta) Using this field requires the AnyVolumeDataSource + when namespace isn''t specified in dataSourceRef, both + fields (dataSource and dataSourceRef) will be set to + the same value automatically if one of them is empty + and the other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the same + value and must be empty. There are three important differences + between dataSource and dataSourceRef: * While dataSource + only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim + objects. * While dataSource ignores disallowed values + (dropping them), dataSourceRef preserves all values, + and generates an error if a disallowed value is specified. + * While dataSource only allows local objects, dataSourceRef + allows objects in any namespaces. (Beta) Using this + field requires the AnyVolumeDataSource feature gate + to be enabled. (Alpha) Using the namespace field of + dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' properties: apiGroup: @@ -4452,11 +4610,20 @@ spec: name: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace is specified, + a gateway.networking.k8s.io/ReferenceGrant object + is required in the referent namespace to allow that + namespace's owner to accept the reference. See the + ReferenceGrant documentation for details. (Alpha) + This field requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string required: - kind - name type: object - x-kubernetes-map-type: atomic resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure @@ -4465,6 +4632,29 @@ spec: must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used by + this container. \n This is an alpha field and requires + enabling the DynamicResourceAllocation feature gate. + \n This field is immutable." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the Pod + where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4802,8 +4992,8 @@ spec: are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor - policy. This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread - feature flag." + policy. This is a beta-level feature default enabled by the + NodeInclusionPolicyInPodTopologySpread feature flag." type: string nodeTaintsPolicy: description: "NodeTaintsPolicy indicates how we will treat node @@ -4812,8 +5002,8 @@ spec: for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the - Ignore policy. This is a alpha-level feature enabled by the - NodeInclusionPolicyInPodTopologySpread feature flag." + Ignore policy. This is a beta-level feature default enabled + by the NodeInclusionPolicyInPodTopologySpread feature flag." type: string topologyKey: description: TopologyKey is the key of node labels. Nodes that @@ -4877,6 +5067,9 @@ spec: file. When used alongside with TracingConfig, TracingConfigFile takes precedence. type: string + version: + description: Version of Thanos to be deployed. + type: string volumes: description: Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will be appended @@ -5334,9 +5527,12 @@ spec: provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified - data source. If the AnyVolumeDataSource feature - gate is enabled, this field will always have the - same contents as the DataSourceRef field.' + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' properties: apiGroup: description: APIGroup is the group for the resource @@ -5362,27 +5558,35 @@ spec: description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any - local object from a non-empty API group (non core - object) or a PersistentVolumeClaim object. When - this field is specified, volume binding will only - succeed if the type of the specified object matches - some installed volume populator or dynamic provisioner. + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. This field will replace the functionality of the - DataSource field and as such if both fields are + dataSource field and as such if both fields are non-empty, they must have the same value. For - backwards compatibility, both fields (DataSource - and DataSourceRef) will be set to the same value + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value automatically if one of them is empty and the - other is non-empty. There are two important differences - between DataSource and DataSourceRef: * While - DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as well - as PersistentVolumeClaim objects. * While DataSource - ignores disallowed values (dropping them), DataSourceRef - preserves all values, and generates an error if - a disallowed value is specified. (Beta) Using - this field requires the AnyVolumeDataSource feature - gate to be enabled.' + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' properties: apiGroup: description: APIGroup is the group for the resource @@ -5399,11 +5603,21 @@ spec: description: Name is the name of resource being referenced type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string required: - kind - name type: object - x-kubernetes-map-type: atomic resources: description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure @@ -5412,6 +5626,30 @@ spec: value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available inside + a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/_helpers.tpl b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/_helpers.tpl index b01fe6dd..9e3e4d67 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/_helpers.tpl +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/_helpers.tpl @@ -57,6 +57,12 @@ The longest name that gets created adds and extra 37 characters, so truncation s {{- printf "%s-thanos-ruler" (include "kube-prometheus-stack.fullname" .) -}} {{- end }} +{{/* Shortened name suffixed with thanos-ruler */}} +{{- define "kube-prometheus-stack.thanosRuler.name" -}} +{{- default (printf "%s-thanos-ruler" (include "kube-prometheus-stack.name" .)) .Values.thanosRuler.name -}} +{{- end }} + + {{/* Create chart name and version as used by the chart label. */}} {{- define "kube-prometheus-stack.chartref" -}} {{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} @@ -106,7 +112,7 @@ heritage: {{ $.Release.Service | quote }} {{/* Create the name of thanosRuler service account to use */}} {{- define "kube-prometheus-stack.thanosRuler.serviceAccountName" -}} {{- if .Values.thanosRuler.serviceAccount.create -}} - {{ default (include "kube-prometheus-stack.thanosRuler.fullname" .) .Values.thanosRuler.serviceAccount.name }} + {{ default (include "kube-prometheus-stack.thanosRuler.name" .) .Values.thanosRuler.serviceAccount.name }} {{- else -}} {{ default "default" .Values.thanosRuler.serviceAccount.name }} {{- end -}} @@ -228,6 +234,25 @@ Use the prometheus-node-exporter namespace override for multi-namespace deployme {{- include "kube-prometheus-stack.kubeVersionDefaultValue" (list $values ">= 1.23-0" $insecure $secure $userValue) -}} {{- end -}} +{{/* Sets default scrape limits for servicemonitor */}} +{{- define "servicemonitor.scrapeLimits" -}} +{{- with .sampleLimit }} +sampleLimit: {{ . }} +{{- end }} +{{- with .targetLimit }} +targetLimit: {{ . }} +{{- end }} +{{- with .labelLimit }} +labelLimit: {{ . }} +{{- end }} +{{- with .labelNameLengthLimit }} +labelNameLengthLimit: {{ . }} +{{- end }} +{{- with .labelValueLengthLimit }} +labelValueLengthLimit: {{ . }} +{{- end }} +{{- end -}} + {{/* To help compatibility with other charts which use global.imagePullSecrets. Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/alertmanager.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/alertmanager.yaml index 986f0353..4786af0f 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/alertmanager.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/alertmanager.yaml @@ -77,6 +77,10 @@ spec: alertmanagerConfiguration: {{ toYaml .Values.alertmanager.alertmanagerSpec.alertmanagerConfiguration | indent 4 }} {{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.alertmanagerConfigMatcherStrategy }} + alertmanagerConfigMatcherStrategy: +{{ toYaml .Values.alertmanager.alertmanagerSpec.alertmanagerConfigMatcherStrategy | indent 4 }} +{{- end }} {{- if .Values.alertmanager.alertmanagerSpec.resources }} resources: {{ toYaml .Values.alertmanager.alertmanagerSpec.resources | indent 4 }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/secret.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/secret.yaml index 0a354a3b..c3549f89 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/secret.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/secret.yaml @@ -13,14 +13,16 @@ metadata: {{ include "kube-prometheus-stack.labels" . | indent 4 }} data: {{- if .Values.alertmanager.tplConfig }} -{{- if eq (typeOf .Values.alertmanager.config) "string" }} +{{- if .Values.alertmanager.stringConfig }} + alertmanager.yaml: {{ tpl (.Values.alertmanager.stringConfig) . | b64enc | quote }} +{{- else if eq (typeOf .Values.alertmanager.config) "string" }} alertmanager.yaml: {{ tpl (.Values.alertmanager.config) . | b64enc | quote }} {{- else }} alertmanager.yaml: {{ tpl (toYaml .Values.alertmanager.config) . | b64enc | quote }} {{- end }} {{- else }} alertmanager.yaml: {{ toYaml .Values.alertmanager.config | b64enc | quote }} -{{- end}} +{{- end }} {{- range $key, $val := .Values.alertmanager.templateFiles }} {{ $key }}: {{ $val | b64enc | quote }} {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/servicemonitor.yaml index 350ebe88..50aad644 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/alertmanager/servicemonitor.yaml @@ -7,7 +7,11 @@ metadata: labels: app: {{ template "kube-prometheus-stack.name" . }}-alertmanager {{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- with .Values.alertmanager.serviceMonitor.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} spec: + {{- include "servicemonitor.scrapeLimits" .Values.alertmanager.serviceMonitor | nindent 2 }} selector: matchLabels: app: {{ template "kube-prometheus-stack.name" . }}-alertmanager diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/core-dns/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/core-dns/servicemonitor.yaml index 8d501374..3eb0023d 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/core-dns/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/core-dns/servicemonitor.yaml @@ -12,6 +12,7 @@ metadata: {{ include "kube-prometheus-stack.labels" . | indent 4 }} spec: jobLabel: jobLabel + {{- include "servicemonitor.scrapeLimits" .Values.coreDns.serviceMonitor | nindent 2 }} selector: matchLabels: app: {{ template "kube-prometheus-stack.name" . }}-coredns diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-api-server/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-api-server/servicemonitor.yaml index 6f83c9bd..543ea752 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-api-server/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-api-server/servicemonitor.yaml @@ -11,13 +11,14 @@ metadata: {{- end }} {{ include "kube-prometheus-stack.labels" . | indent 4 }} spec: + {{- include "servicemonitor.scrapeLimits" .Values.kubeApiServer.serviceMonitor | nindent 2 }} endpoints: - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token {{- if .Values.kubeApiServer.serviceMonitor.interval }} interval: {{ .Values.kubeApiServer.serviceMonitor.interval }} {{- end }} {{- if .Values.kubeApiServer.serviceMonitor.proxyUrl }} - proxyUrl: {{ .Values.kubeApiServer.serviceMonitor.proxyUrl}} + proxyUrl: {{ .Values.kubeApiServer.serviceMonitor.proxyUrl }} {{- end }} port: https scheme: https diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-controller-manager/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-controller-manager/servicemonitor.yaml index 65556e13..6bf6287c 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-controller-manager/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-controller-manager/servicemonitor.yaml @@ -12,6 +12,7 @@ metadata: {{ include "kube-prometheus-stack.labels" . | indent 4 }} spec: jobLabel: jobLabel + {{- include "servicemonitor.scrapeLimits" .Values.kubeControllerManager.serviceMonitor | nindent 2 }} selector: matchLabels: app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-dns/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-dns/servicemonitor.yaml index 5c4afc91..81dc32cd 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-dns/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-dns/servicemonitor.yaml @@ -12,6 +12,7 @@ metadata: {{ include "kube-prometheus-stack.labels" . | indent 4 }} spec: jobLabel: jobLabel + {{- include "servicemonitor.scrapeLimits" .Values.kubeDns.serviceMonitor | nindent 2 }} selector: matchLabels: app: {{ template "kube-prometheus-stack.name" . }}-kube-dns diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-etcd/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-etcd/servicemonitor.yaml index 8418c007..b4274dec 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-etcd/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-etcd/servicemonitor.yaml @@ -12,6 +12,7 @@ metadata: {{ include "kube-prometheus-stack.labels" . | indent 4 }} spec: jobLabel: jobLabel + {{- include "servicemonitor.scrapeLimits" .Values.kubeEtcd.serviceMonitor | nindent 4 }} selector: matchLabels: app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-proxy/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-proxy/servicemonitor.yaml index 329b37b9..218a0676 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-proxy/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-proxy/servicemonitor.yaml @@ -12,6 +12,7 @@ metadata: {{ include "kube-prometheus-stack.labels" . | indent 4 }} spec: jobLabel: jobLabel + {{- include "servicemonitor.scrapeLimits" .Values.kubeProxy.serviceMonitor | nindent 2 }} selector: matchLabels: app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-scheduler/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-scheduler/servicemonitor.yaml index 6d43bf6f..1a8e5d21 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-scheduler/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kube-scheduler/servicemonitor.yaml @@ -12,6 +12,7 @@ metadata: {{ include "kube-prometheus-stack.labels" . | indent 4 }} spec: jobLabel: jobLabel + {{- include "servicemonitor.scrapeLimits" .Values.kubeScheduler.serviceMonitor | nindent 2 }} selector: matchLabels: app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kubelet/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kubelet/servicemonitor.yaml index 52700bc7..dcb94feb 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kubelet/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/exporters/kubelet/servicemonitor.yaml @@ -11,6 +11,7 @@ metadata: {{- end }} {{- include "kube-prometheus-stack.labels" . | indent 4 }} spec: + {{- include "servicemonitor.scrapeLimits" .Values.kubelet.serviceMonitor | nindent 2 }} endpoints: {{- if .Values.kubelet.serviceMonitor.https }} - port: https-metrics diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/grafana/configmaps-datasources.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/grafana/configmaps-datasources.yaml index 755168a6..f7e613bd 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/grafana/configmaps-datasources.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/grafana/configmaps-datasources.yaml @@ -6,7 +6,7 @@ metadata: namespace: {{ template "kube-prometheus-stack-grafana.namespace" . }} {{- if .Values.grafana.sidecar.datasources.annotations }} annotations: -{{ toYaml .Values.grafana.sidecar.datasources.annotations | indent 4 }} + {{- toYaml .Values.grafana.sidecar.datasources.annotations | nindent 4 }} {{- end }} labels: {{ $.Values.grafana.sidecar.datasources.label }}: {{ $.Values.grafana.sidecar.datasources.labelValue | quote }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml index 7a12754e..2dfbc2bb 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml @@ -13,10 +13,12 @@ metadata: {{- include "kube-prometheus-stack.labels" $ | indent 4 }} webhooks: - name: prometheusrulemutate.monitoring.coreos.com - {{- if .Values.prometheusOperator.admissionWebhooks.patch.enabled }} + {{- if .Values.prometheusOperator.admissionWebhooks.failurePolicy }} + failurePolicy: {{ .Values.prometheusOperator.admissionWebhooks.failurePolicy }} + {{- else if .Values.prometheusOperator.admissionWebhooks.patch.enabled }} failurePolicy: Ignore {{- else }} - failurePolicy: {{ .Values.prometheusOperator.admissionWebhooks.failurePolicy }} + failurePolicy: Fail {{- end }} rules: - apiGroups: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml index 8d4e7d3c..525823c3 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml @@ -13,10 +13,12 @@ metadata: {{- include "kube-prometheus-stack.labels" $ | indent 4 }} webhooks: - name: prometheusrulemutate.monitoring.coreos.com - {{- if .Values.prometheusOperator.admissionWebhooks.patch.enabled }} + {{- if .Values.prometheusOperator.admissionWebhooks.failurePolicy }} + failurePolicy: {{ .Values.prometheusOperator.admissionWebhooks.failurePolicy }} + {{- else if .Values.prometheusOperator.admissionWebhooks.patch.enabled }} failurePolicy: Ignore {{- else }} - failurePolicy: {{ .Values.prometheusOperator.admissionWebhooks.failurePolicy }} + failurePolicy: Fail {{- end }} rules: - apiGroups: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/clusterrole.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/clusterrole.yaml index 300956a1..a6c5b29e 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/clusterrole.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/clusterrole.yaml @@ -11,6 +11,7 @@ rules: - monitoring.coreos.com resources: - alertmanagers + - alertmanagers/status - alertmanagers/finalizers - alertmanagerconfigs - prometheuses @@ -78,4 +79,14 @@ rules: - get - list - watch +{{- if .Capabilities.APIVersions.Has "discovery.k8s.io/v1/EndpointSlice" }} +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +{{- end }} {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/deployment.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/deployment.yaml index 97a78cda..360d6ec1 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/deployment.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/deployment.yaml @@ -40,11 +40,13 @@ spec: {{- end }} containers: - name: {{ template "kube-prometheus-stack.name" . }} - {{- $registry := .Values.global.imageRegistry | default .Values.prometheusOperator.image.registry -}} + {{- $configReloaderRegistry := .Values.global.imageRegistry | default .Values.prometheusOperator.prometheusConfigReloader.image.registry -}} + {{- $operatorRegistry := .Values.global.imageRegistry | default .Values.prometheusOperator.image.registry -}} + {{- $thanosRegistry := .Values.global.imageRegistry | default .Values.prometheusOperator.thanosImage.registry -}} {{- if .Values.prometheusOperator.image.sha }} - image: "{{ $registry }}/{{ .Values.prometheusOperator.image.repository }}:{{ .Values.prometheusOperator.image.tag }}@sha256:{{ .Values.prometheusOperator.image.sha }}" + image: "{{ $operatorRegistry }}/{{ .Values.prometheusOperator.image.repository }}:{{ .Values.prometheusOperator.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.prometheusOperator.image.sha }}" {{- else }} - image: "{{ $registry }}/{{ .Values.prometheusOperator.image.repository }}:{{ .Values.prometheusOperator.image.tag }}" + image: "{{ $operatorRegistry }}/{{ .Values.prometheusOperator.image.repository }}:{{ .Values.prometheusOperator.image.tag | default .Chart.AppVersion }}" {{- end }} imagePullPolicy: "{{ .Values.prometheusOperator.image.pullPolicy }}" args: @@ -80,9 +82,9 @@ spec: - --alertmanager-default-base-image={{ .Values.global.imageRegistry | default .Values.prometheusOperator.alertmanagerDefaultBaseImageRegistry }}/{{ .Values.prometheusOperator.alertmanagerDefaultBaseImage }} {{- end }} {{- if .Values.prometheusOperator.prometheusConfigReloader.image.sha }} - - --prometheus-config-reloader={{ $registry }}/{{ .Values.prometheusOperator.prometheusConfigReloader.image.repository }}:{{ .Values.prometheusOperator.prometheusConfigReloader.image.tag }}@sha256:{{ .Values.prometheusOperator.prometheusConfigReloader.image.sha }} + - --prometheus-config-reloader={{ $configReloaderRegistry }}/{{ .Values.prometheusOperator.prometheusConfigReloader.image.repository }}:{{ .Values.prometheusOperator.prometheusConfigReloader.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.prometheusOperator.prometheusConfigReloader.image.sha }} {{- else }} - - --prometheus-config-reloader={{ $registry }}/{{ .Values.prometheusOperator.prometheusConfigReloader.image.repository }}:{{ .Values.prometheusOperator.prometheusConfigReloader.image.tag }} + - --prometheus-config-reloader={{ $configReloaderRegistry }}/{{ .Values.prometheusOperator.prometheusConfigReloader.image.repository }}:{{ .Values.prometheusOperator.prometheusConfigReloader.image.tag | default .Chart.AppVersion }} {{- end }} - --config-reloader-cpu-request={{ .Values.prometheusOperator.prometheusConfigReloader.resources.requests.cpu }} - --config-reloader-cpu-limit={{ .Values.prometheusOperator.prometheusConfigReloader.resources.limits.cpu }} @@ -98,9 +100,9 @@ spec: - --prometheus-instance-namespaces={{ .Values.prometheusOperator.prometheusInstanceNamespaces | join "," }} {{- end }} {{- if .Values.prometheusOperator.thanosImage.sha }} - - --thanos-default-base-image={{ $registry }}/{{ .Values.prometheusOperator.thanosImage.repository }}:{{ .Values.prometheusOperator.thanosImage.tag }}@sha256:{{ .Values.prometheusOperator.thanosImage.sha }} + - --thanos-default-base-image={{ $thanosRegistry }}/{{ .Values.prometheusOperator.thanosImage.repository }}:{{ .Values.prometheusOperator.thanosImage.tag }}@sha256:{{ .Values.prometheusOperator.thanosImage.sha }} {{- else }} - - --thanos-default-base-image={{ $registry }}/{{ .Values.prometheusOperator.thanosImage.repository }}:{{ .Values.prometheusOperator.thanosImage.tag }} + - --thanos-default-base-image={{ $thanosRegistry }}/{{ .Values.prometheusOperator.thanosImage.repository }}:{{ .Values.prometheusOperator.thanosImage.tag }} {{- end }} {{- if .Values.prometheusOperator.thanosRulerInstanceNamespaces }} - --thanos-ruler-instance-namespaces={{ .Values.prometheusOperator.thanosRulerInstanceNamespaces | join "," }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/networkpolicy.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/networkpolicy.yaml index ebffa9c8..aeb99895 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/networkpolicy.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/networkpolicy.yaml @@ -6,6 +6,7 @@ metadata: namespace: {{ template "kube-prometheus-stack.namespace" . }} labels: app: {{ template "kube-prometheus-stack.name" . }}-operator + {{- include "kube-prometheus-stack.labels" . | nindent 4 }} spec: egress: - {} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/servicemonitor.yaml index 987b0ec6..16e6e090 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus-operator/servicemonitor.yaml @@ -11,6 +11,7 @@ metadata: {{ toYaml . | indent 4 }} {{- end }} spec: + {{- include "servicemonitor.scrapeLimits" .Values.prometheusOperator.serviceMonitor | nindent 2 }} endpoints: {{- if .Values.prometheusOperator.tls.enabled }} - port: https diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/csi-secret.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/csi-secret.yaml index 89399cec..e05382f6 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/csi-secret.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/csi-secret.yaml @@ -1,4 +1,4 @@ -{{- if .Values.prometheus.prometheusSpec.thanos.secretProviderClass }} +{{- if and .Values.prometheus.prometheusSpec.thanos .Values.prometheus.prometheusSpec.thanos.secretProviderClass }} --- apiVersion: secrets-store.csi.x-k8s.io/v1alpha1 kind: SecretProviderClass @@ -9,4 +9,4 @@ metadata: app: {{ template "kube-prometheus-stack.name" . }}-prometheus spec: {{ toYaml .Values.prometheus.prometheusSpec.thanos.secretProviderClass | indent 2 }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/networkpolicy.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/networkpolicy.yaml new file mode 100644 index 00000000..7090440b --- /dev/null +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/networkpolicy.yaml @@ -0,0 +1,32 @@ +{{- if .Values.prometheus.networkPolicy.enabled }} +apiVersion: {{ template "kube-prometheus-stack.prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus + {{- include "kube-prometheus-stack.labels" . | nindent 4 }} + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus + namespace: {{ template "kube-prometheus-stack.namespace" . }} +spec: + {{- if .Values.prometheus.networkPolicy.egress }} + ## Deny all egress by default + egress: + {{- toYaml .Values.prometheus.networkPolicy.egress | nindent 4 }} + {{- end }} + {{- if .Values.prometheus.networkPolicy.ingress }} + # Deny all ingress by default (prometheus scrapes itself using localhost) + ingress: + {{- toYaml .Values.prometheus.networkPolicy.ingress | nindent 4 }} + {{- end }} + policyTypes: + - Egress + - Ingress + podSelector: + {{- if .Values.prometheus.networkPolicy.podSelector }} + {{- toYaml .Values.prometheus.networkPolicy.podSelector | nindent 4 }} + {{- else }} + matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [prometheus]} + - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.prometheus.crname" . }}]} + {{- end }} +{{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/podmonitors.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/podmonitors.yaml index 95d568e1..4e748c23 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/podmonitors.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/podmonitors.yaml @@ -15,6 +15,7 @@ items: {{ toYaml .additionalLabels | indent 8 }} {{- end }} spec: + {{- include "servicemonitor.scrapeLimits" . | nindent 6 }} podMetricsEndpoints: {{ toYaml .podMetricsEndpoints | indent 8 }} {{- if .jobLabel }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/prometheus.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/prometheus.yaml index 5235f744..0d12a1aa 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/prometheus.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/prometheus.yaml @@ -117,6 +117,12 @@ spec: {{- if .Values.prometheus.prometheusSpec.retentionSize }} retentionSize: {{ .Values.prometheus.prometheusSpec.retentionSize | quote }} {{- end }} +{{- if .Values.prometheus.prometheusSpec.tsdb }} + tsdb: + {{- if .Values.prometheus.prometheusSpec.tsdb.outOfOrderTimeWindow }} + outOfOrderTimeWindow: {{ .Values.prometheus.prometheusSpec.tsdb.outOfOrderTimeWindow }} + {{- end }} +{{- end }} {{- if eq .Values.prometheus.prometheusSpec.walCompression false }} walCompression: false {{ else }} @@ -391,4 +397,8 @@ spec: minReadySeconds: {{ .Values.prometheus.prometheusSpec.minReadySeconds }} {{- end }} hostNetwork: {{ .Values.prometheus.prometheusSpec.hostNetwork }} +{{- if .Values.prometheus.prometheusSpec.hostAliases }} + hostAliases: +{{ toYaml .Values.prometheus.prometheusSpec.hostAliases | indent 4 }} +{{- end }} {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitor.yaml index 356c013f..56eed4bf 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitor.yaml @@ -7,7 +7,11 @@ metadata: labels: app: {{ template "kube-prometheus-stack.name" . }}-prometheus {{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- with .Values.prometheus.serviceMonitor.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} spec: + {{- include "servicemonitor.scrapeLimits" .Values.prometheus.serviceMonitor | nindent 2 }} selector: matchLabels: app: {{ template "kube-prometheus-stack.name" . }}-prometheus diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitorThanosSidecar.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitorThanosSidecar.yaml index f2644d98..5643099e 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitorThanosSidecar.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitorThanosSidecar.yaml @@ -7,7 +7,11 @@ metadata: labels: app: {{ template "kube-prometheus-stack.name" . }}-thanos-sidecar {{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- with .Values.prometheus.thanosServiceMonitor.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} spec: + {{- include "servicemonitor.scrapeLimits" .Values.prometheus.thanosServiceMonitor | nindent 2 }} selector: matchLabels: app: {{ template "kube-prometheus-stack.name" . }}-thanos-discovery diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitors.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitors.yaml index a78d1cd0..92a54123 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitors.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/prometheus/servicemonitors.yaml @@ -15,6 +15,7 @@ items: {{ toYaml .additionalLabels | indent 8 }} {{- end }} spec: + {{- include "servicemonitor.scrapeLimits" . | nindent 6 }} endpoints: {{ toYaml .endpoints | indent 8 }} {{- if .jobLabel }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/extrasecret.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/extrasecret.yaml index fe2ea5be..587fca2d 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/extrasecret.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/extrasecret.yaml @@ -1,5 +1,5 @@ {{- if .Values.thanosRuler.extraSecret.data -}} -{{- $secretName := printf "thanos-ruler-%s-extra" (include "kube-prometheus-stack.fullname" . ) -}} +{{- $secretName := printf "%s-extra" (include "kube-prometheus-stack.thanosRuler.name" . ) -}} apiVersion: v1 kind: Secret metadata: @@ -10,7 +10,7 @@ metadata: {{ toYaml .Values.thanosRuler.extraSecret.annotations | indent 4 }} {{- end }} labels: - app: {{ template "kube-prometheus-stack.name" . }}-thanos-ruler + app: {{ template "kube-prometheus-stack.thanosRuler.name" . }} app.kubernetes.io/component: thanos-ruler {{ include "kube-prometheus-stack.labels" . | indent 4 }} data: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/ingress.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/ingress.yaml index 2760805c..aed9db28 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/ingress.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/ingress.yaml @@ -1,6 +1,6 @@ {{- if and .Values.thanosRuler.enabled .Values.thanosRuler.ingress.enabled }} {{- $pathType := .Values.thanosRuler.ingress.pathType | default "ImplementationSpecific" }} -{{- $serviceName := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "thanos-ruler" }} +{{- $serviceName := include "kube-prometheus-stack.thanosRuler.name" . }} {{- $servicePort := .Values.thanosRuler.service.port -}} {{- $routePrefix := list .Values.thanosRuler.thanosRulerSpec.routePrefix }} {{- $paths := .Values.thanosRuler.ingress.paths | default $routePrefix -}} @@ -16,7 +16,7 @@ metadata: {{ toYaml .Values.thanosRuler.ingress.annotations | indent 4 }} {{- end }} labels: - app: {{ template "kube-prometheus-stack.name" . }}-thanos-ruler + app: {{ template "kube-prometheus-stack.thanosRuler.name" . }} {{- if .Values.thanosRuler.ingress.labels }} {{ toYaml .Values.thanosRuler.ingress.labels | indent 4 }} {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/podDisruptionBudget.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/podDisruptionBudget.yaml index d3d378d6..83e54edf 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/podDisruptionBudget.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/podDisruptionBudget.yaml @@ -2,10 +2,10 @@ apiVersion: {{ include "kube-prometheus-stack.pdb.apiVersion" . }} kind: PodDisruptionBudget metadata: - name: {{ template "kube-prometheus-stack.fullname" . }}-thanos-ruler + name: {{ template "kube-prometheus-stack.thanosRuler.name" . }} namespace: {{ template "kube-prometheus-stack.namespace" . }} labels: - app: {{ template "kube-prometheus-stack.name" . }}-thanos-ruler + app: {{ template "kube-prometheus-stack.thanosRuler.name" . }} {{ include "kube-prometheus-stack.labels" . | indent 4 }} spec: {{- if .Values.thanosRuler.podDisruptionBudget.minAvailable }} @@ -17,5 +17,5 @@ spec: selector: matchLabels: app.kubernetes.io/name: thanos-ruler - thanos-ruler: {{ template "kube-prometheus-stack.fullname" . }}-thanos-ruler + thanos-ruler: {{ template "kube-prometheus-stack.thanosRuler.name" . }} {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/ruler.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/ruler.yaml index ebca08c4..cf678b54 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/ruler.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/ruler.yaml @@ -2,11 +2,11 @@ apiVersion: monitoring.coreos.com/v1 kind: ThanosRuler metadata: - name: {{ template "kube-prometheus-stack.fullname" . }}-thanos-ruler + name: {{ template "kube-prometheus-stack.thanosRuler.name" . }} namespace: {{ template "kube-prometheus-stack.namespace" . }} labels: - app: {{ template "kube-prometheus-stack.name" . }}-thanos-ruler -{{ include "kube-prometheus-stack.labels" . | indent 4 }} + app: {{ include "kube-prometheus-stack.thanosRuler.name" . }} +{{- include "kube-prometheus-stack.labels" . | indent 4 -}} {{- if .Values.thanosRuler.annotations }} annotations: {{ toYaml .Values.thanosRuler.annotations | indent 4 }} @@ -35,7 +35,7 @@ spec: {{- else if and .Values.thanosRuler.ingress.enabled .Values.thanosRuler.ingress.hosts }} externalPrefix: "http://{{ tpl (index .Values.thanosRuler.ingress.hosts 0) . }}{{ .Values.thanosRuler.thanosRulerSpec.routePrefix }}" {{- else }} - externalPrefix: http://{{ template "kube-prometheus-stack.fullname" . }}-thanosRuler.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.thanosRuler.service.port }} + externalPrefix: http://{{ template "kube-prometheus-stack.thanosRuler.name" . }}.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.thanosRuler.service.port }} {{- end }} {{- if .Values.thanosRuler.thanosRulerSpec.nodeSelector }} nodeSelector: @@ -126,7 +126,7 @@ spec: labelSelector: matchExpressions: - {key: app.kubernetes.io/name, operator: In, values: [thanos-ruler]} - - {key: thanos-ruler, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-thanos-ruler]} + - {key: thanos-ruler, operator: In, values: [{{ template "kube-prometheus-stack.thanosRuler.name" . }}]} {{- else if eq .Values.thanosRuler.thanosRulerSpec.podAntiAffinity "soft" }} podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: @@ -136,7 +136,7 @@ spec: labelSelector: matchExpressions: - {key: app.kubernetes.io/name, operator: In, values: [thanos-ruler]} - - {key: thanos-ruler, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-thanos-ruler]} + - {key: thanos-ruler, operator: In, values: [{{ template "kube-prometheus-stack.thanosRuler.name" . }}]} {{- end }} {{- if .Values.thanosRuler.thanosRulerSpec.tolerations }} tolerations: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/service.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/service.yaml index 093dbf7c..be0c8445 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/service.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/service.yaml @@ -2,12 +2,12 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "kube-prometheus-stack.fullname" . }}-thanos-ruler + name: {{ template "kube-prometheus-stack.thanosRuler.name" . }} namespace: {{ template "kube-prometheus-stack.namespace" . }} labels: - app: {{ template "kube-prometheus-stack.name" . }}-thanos-ruler + app: {{ template "kube-prometheus-stack.thanosRuler.name" . }} self-monitor: {{ .Values.thanosRuler.serviceMonitor.selfMonitor | quote }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- include "kube-prometheus-stack.labels" . | indent 4 -}} {{- if .Values.thanosRuler.service.labels }} {{ toYaml .Values.thanosRuler.service.labels | indent 4 }} {{- end }} @@ -48,6 +48,6 @@ spec: {{- end }} selector: app.kubernetes.io/name: thanos-ruler - thanos-ruler: {{ template "kube-prometheus-stack.fullname" . }}-thanos-ruler + thanos-ruler: {{ template "kube-prometheus-stack.thanosRuler.name" . }} type: "{{ .Values.thanosRuler.service.type }}" {{- end }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/serviceaccount.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/serviceaccount.yaml index 0138c357..b58f1cd4 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/serviceaccount.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/serviceaccount.yaml @@ -5,10 +5,10 @@ metadata: name: {{ template "kube-prometheus-stack.thanosRuler.serviceAccountName" . }} namespace: {{ template "kube-prometheus-stack.namespace" . }} labels: - app: {{ template "kube-prometheus-stack.name" . }}-thanos-ruler - app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-thanos-ruler + app: {{ template "kube-prometheus-stack.thanosRuler.name" . }} + app.kubernetes.io/name: {{ template "kube-prometheus-stack.thanosRuler.name" . }} app.kubernetes.io/component: thanos-ruler -{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- include "kube-prometheus-stack.labels" . | indent 4 -}} {{- if .Values.thanosRuler.serviceAccount.annotations }} annotations: {{ toYaml .Values.thanosRuler.serviceAccount.annotations | indent 4 }} diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/servicemonitor.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/servicemonitor.yaml index 4a05679b..7f72688a 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/servicemonitor.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/templates/thanos-ruler/servicemonitor.yaml @@ -2,15 +2,19 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: {{ template "kube-prometheus-stack.fullname" . }}-thanos-ruler + name: {{ template "kube-prometheus-stack.thanosRuler.name" . }} namespace: {{ template "kube-prometheus-stack.namespace" . }} labels: - app: {{ template "kube-prometheus-stack.name" . }}-thanos-ruler + app: {{ template "kube-prometheus-stack.thanosRuler.name" . }} {{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- with .Values.thanosRuler.serviceMonitor.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} spec: + {{- include "servicemonitor.scrapeLimits" .Values.thanosRuler.serviceMonitor | nindent 2 }} selector: matchLabels: - app: {{ template "kube-prometheus-stack.name" . }}-thanos-ruler + app: {{ template "kube-prometheus-stack.thanosRuler.name" . }} release: {{ $.Release.Name | quote }} self-monitor: {{ .Values.thanosRuler.serviceMonitor.selfMonitor | quote }} namespaceSelector: diff --git a/charts/kubezero-metrics/charts/kube-prometheus-stack/values.yaml b/charts/kubezero-metrics/charts/kube-prometheus-stack/values.yaml index 4ad72b40..8a47b477 100644 --- a/charts/kubezero-metrics/charts/kube-prometheus-stack/values.yaml +++ b/charts/kubezero-metrics/charts/kube-prometheus-stack/values.yaml @@ -212,6 +212,13 @@ alertmanager: templates: - '/etc/alertmanager/config/*.tmpl' + ## Alertmanager configuration directives (as string type, preferred over the config hash map) + ## stringConfig will be used only, if tplConfig is true + ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file + ## https://prometheus.io/webtools/alerting/routing-tree-editor/ + ## + stringConfig: "" + ## Pass the Alertmanager configuration directives through Helm's templating ## engine. If the Alertmanager configuration contains Alertmanager templates, ## they'll need to be properly escaped so that they are not interpreted by @@ -413,6 +420,30 @@ alertmanager: interval: "" selfMonitor: true + ## Additional labels + ## + additionalLabels: {} + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## proxyUrl: URL of a proxy that should be used for scraping. ## proxyUrl: "" @@ -533,6 +564,13 @@ alertmanager: # alertmanagerConfiguration: # name: global-alertmanager-Configuration + ## Defines the strategy used by AlertmanagerConfig objects to match alerts. eg: + ## + alertmanagerConfigMatcherStrategy: {} + ## Example with use OnNamespace strategy + # alertmanagerConfigMatcherStrategy: + # type: OnNamespace + ## Define Log Format # Use logfmt (default) or json logging logFormat: logfmt @@ -902,6 +940,27 @@ kubeApiServer: ## Scrape interval. If not set, the Prometheus default scrape interval is used. ## interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## proxyUrl: URL of a proxy that should be used for scraping. ## proxyUrl: "" @@ -955,6 +1014,26 @@ kubelet: ## interval: "" + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## proxyUrl: URL of a proxy that should be used for scraping. ## proxyUrl: "" @@ -1141,6 +1220,26 @@ kubeControllerManager: ## interval: "" + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## proxyUrl: URL of a proxy that should be used for scraping. ## proxyUrl: "" @@ -1195,6 +1294,26 @@ coreDns: ## interval: "" + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## proxyUrl: URL of a proxy that should be used for scraping. ## proxyUrl: "" @@ -1241,6 +1360,26 @@ kubeDns: ## interval: "" + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## proxyUrl: URL of a proxy that should be used for scraping. ## proxyUrl: "" @@ -1325,6 +1464,27 @@ kubeEtcd: ## Scrape interval. If not set, the Prometheus default scrape interval is used. ## interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## proxyUrl: URL of a proxy that should be used for scraping. ## proxyUrl: "" @@ -1388,6 +1548,27 @@ kubeScheduler: ## Scrape interval. If not set, the Prometheus default scrape interval is used. ## interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## proxyUrl: URL of a proxy that should be used for scraping. ## proxyUrl: "" @@ -1452,6 +1633,26 @@ kubeProxy: ## interval: "" + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## proxyUrl: URL of a proxy that should be used for scraping. ## proxyUrl: "" @@ -1502,6 +1703,26 @@ kube-state-metrics: ## interval: "" + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used. ## scrapeTimeout: "" @@ -1565,6 +1786,26 @@ prometheus-node-exporter: ## interval: "" + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. ## scrapeTimeout: "" @@ -1615,7 +1856,7 @@ prometheusOperator: ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted ## rules from making their way into prometheus and potentially preventing the container from starting admissionWebhooks: - failurePolicy: Fail + failurePolicy: ## The default timeoutSeconds is 10 and the maximum value is 30. timeoutSeconds: 10 enabled: true @@ -1632,9 +1873,9 @@ prometheusOperator: patch: enabled: true image: - registry: k8s.gcr.io + registry: registry.k8s.io repository: ingress-nginx/kube-webhook-certgen - tag: v1.3.0 + tag: v20221220-controller-v1.5.1-58-g787ea74b6 sha: "" pullPolicy: IfNotPresent resources: {} @@ -1798,6 +2039,27 @@ prometheusOperator: ## Scrape interval. If not set, the Prometheus default scrape interval is used. ## interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## Scrape timeout. If not set, the Prometheus default scrape timeout is used. scrapeTimeout: "" selfMonitor: true @@ -1909,7 +2171,8 @@ prometheusOperator: image: registry: quay.io repository: prometheus-operator/prometheus-operator - tag: v0.61.1 + # if not set appVersion field from Chart.yaml is used + tag: "" sha: "" pullPolicy: IfNotPresent @@ -1935,7 +2198,8 @@ prometheusOperator: image: registry: quay.io repository: prometheus-operator/prometheus-config-reloader - tag: v0.61.1 + # if not set appVersion field from Chart.yaml is used + tag: "" sha: "" # resource config for prometheusConfigReloader @@ -1952,7 +2216,7 @@ prometheusOperator: thanosImage: registry: quay.io repository: thanos/thanos - tag: v0.29.0 + tag: v0.30.2 sha: "" ## Set a Field Selector to filter watched secrets @@ -1962,13 +2226,23 @@ prometheusOperator: ## Deploy a Prometheus instance ## prometheus: - enabled: true ## Annotations for Prometheus ## annotations: {} + ## Configure network policy for the prometheus + networkPolicy: + enabled: false + # egress: + # - {} + # ingress: + # - {} + # podSelector: + # matchLabels: + # app: prometheus + ## Service account for Prometheuses to use. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ ## @@ -2020,6 +2294,10 @@ prometheus: enabled: false interval: "" + ## Additional labels + ## + additionalLabels: {} + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. scheme: "" @@ -2302,6 +2580,30 @@ prometheus: interval: "" selfMonitor: true + ## Additional labels + ## + additionalLabels: {} + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. scheme: "" @@ -2390,7 +2692,7 @@ prometheus: image: registry: quay.io repository: prometheus/prometheus - tag: v2.40.5 + tag: v2.42.0 sha: "" ## Tolerations for use with node taints @@ -2580,6 +2882,11 @@ prometheus: ## retentionSize: "" + ## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration + ## See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb + tsdb: + outOfOrderTimeWindow: 0s + ## Enable compression of the write-ahead log using Snappy. ## walCompression: true @@ -2940,6 +3247,14 @@ prometheus: # When hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet automatically. hostNetwork: false + # HostAlias holds the mapping between IP and hostnames that will be injected + # as an entry in the pod’s hosts file. + hostAliases: [] + # - ip: 10.10.0.100 + # hostnames: + # - a1.app.local + # - b1.app.local + additionalRulesForClusterRole: [] # - apiGroups: [ "" ] # resources: @@ -3186,6 +3501,30 @@ thanosRuler: interval: "" selfMonitor: true + ## Additional labels + ## + additionalLabels: {} + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + ## proxyUrl: URL of a proxy that should be used for scraping. ## proxyUrl: "" @@ -3232,7 +3571,7 @@ thanosRuler: image: registry: quay.io repository: thanos/thanos - tag: v0.29.0 + tag: v0.30.2 sha: "" ## Namespaces to be selected for PrometheusRules discovery. diff --git a/charts/kubezero-metrics/jsonnet/build.sh b/charts/kubezero-metrics/jsonnet/build.sh index 016ed714..1f55ec1b 100755 --- a/charts/kubezero-metrics/jsonnet/build.sh +++ b/charts/kubezero-metrics/jsonnet/build.sh @@ -9,6 +9,6 @@ which jb > /dev/null || { echo "Required jb ( json-bundler ) not found!"; exit 1 if [ -r jsonnetfile.lock.json ]; then jb update else - #jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@main - jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@release-0.11 + jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@main + #jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@release-0.11 fi diff --git a/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-cluster.json b/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-cluster.json index 0672aeaa..052d96ed 100644 --- a/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-cluster.json +++ b/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-cluster.json @@ -2231,7 +2231,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceil(sum by(namespace) (rate(container_fs_reads_total{job=\"kubelet\", container!=\"\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]) + rate(container_fs_writes_total{job=\"kubelet\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval])))", + "expr": "ceil(sum by(namespace) (rate(container_fs_reads_total{job=\"kubelet\", container!=\"\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]) + rate(container_fs_writes_total{job=\"kubelet\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{namespace}}", @@ -2310,7 +2310,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by(namespace) (rate(container_fs_reads_bytes_total{job=\"kubelet\", container!=\"\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]) + rate(container_fs_writes_bytes_total{job=\"kubelet\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", + "expr": "sum by(namespace) (rate(container_fs_reads_bytes_total{job=\"kubelet\", container!=\"\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]) + rate(container_fs_writes_bytes_total{job=\"kubelet\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{namespace}}", @@ -2529,7 +2529,7 @@ ], "targets": [ { - "expr": "sum by(namespace) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", + "expr": "sum by(namespace) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -2538,7 +2538,7 @@ "step": 10 }, { - "expr": "sum by(namespace) (rate(container_fs_writes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", + "expr": "sum by(namespace) (rate(container_fs_writes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -2547,7 +2547,7 @@ "step": 10 }, { - "expr": "sum by(namespace) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]) + rate(container_fs_writes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", + "expr": "sum by(namespace) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]) + rate(container_fs_writes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -2556,7 +2556,7 @@ "step": 10 }, { - "expr": "sum by(namespace) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", + "expr": "sum by(namespace) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -2565,7 +2565,7 @@ "step": 10 }, { - "expr": "sum by(namespace) (rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", + "expr": "sum by(namespace) (rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -2574,7 +2574,7 @@ "step": 10 }, { - "expr": "sum by(namespace) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]) + rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", + "expr": "sum by(namespace) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]) + rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace!=\"\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, diff --git a/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-namespace.json b/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-namespace.json index ce678885..414c6508 100644 --- a/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-namespace.json +++ b/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-namespace.json @@ -1957,7 +1957,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceil(sum by(pod) (rate(container_fs_reads_total{container!=\"\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]) + rate(container_fs_writes_total{container!=\"\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))", + "expr": "ceil(sum by(pod) (rate(container_fs_reads_total{container!=\"\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]) + rate(container_fs_writes_total{container!=\"\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{pod}}", @@ -2036,7 +2036,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{container!=\"\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]) + rate(container_fs_writes_bytes_total{container!=\"\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", + "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{container!=\"\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]) + rate(container_fs_writes_bytes_total{container!=\"\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{pod}}", @@ -2255,7 +2255,7 @@ ], "targets": [ { - "expr": "sum by(pod) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", + "expr": "sum by(pod) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -2264,7 +2264,7 @@ "step": 10 }, { - "expr": "sum by(pod) (rate(container_fs_writes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", + "expr": "sum by(pod) (rate(container_fs_writes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -2273,7 +2273,7 @@ "step": 10 }, { - "expr": "sum by(pod) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]) + rate(container_fs_writes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", + "expr": "sum by(pod) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]) + rate(container_fs_writes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -2282,7 +2282,7 @@ "step": 10 }, { - "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", + "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -2291,7 +2291,7 @@ "step": 10 }, { - "expr": "sum by(pod) (rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", + "expr": "sum by(pod) (rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -2300,7 +2300,7 @@ "step": 10 }, { - "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]) + rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", + "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]) + rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, diff --git a/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-pod.json b/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-pod.json index b7a58c70..0bc81558 100644 --- a/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-pod.json +++ b/charts/kubezero-metrics/jsonnet/dashboards/k8s-resources-pod.json @@ -1461,7 +1461,7 @@ "steppedLine": false, "targets": [ { - "expr": "ceil(sum by(pod) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval])))", + "expr": "ceil(sum by(pod) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "Reads", @@ -1469,7 +1469,7 @@ "step": 10 }, { - "expr": "ceil(sum by(pod) (rate(container_fs_writes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\",namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval])))", + "expr": "ceil(sum by(pod) (rate(container_fs_writes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\",namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "Writes", @@ -1548,7 +1548,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))", + "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "Reads", @@ -1556,7 +1556,7 @@ "step": 10 }, { - "expr": "sum by(pod) (rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))", + "expr": "sum by(pod) (rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "Writes", @@ -1946,7 +1946,7 @@ ], "targets": [ { - "expr": "sum by(container) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", + "expr": "sum by(container) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -1955,7 +1955,7 @@ "step": 10 }, { - "expr": "sum by(container) (rate(container_fs_writes_total{job=\"kubelet\",device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", + "expr": "sum by(container) (rate(container_fs_writes_total{job=\"kubelet\",device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -1964,7 +1964,7 @@ "step": 10 }, { - "expr": "sum by(container) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]) + rate(container_fs_writes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", + "expr": "sum by(container) (rate(container_fs_reads_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]) + rate(container_fs_writes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -1973,7 +1973,7 @@ "step": 10 }, { - "expr": "sum by(container) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", + "expr": "sum by(container) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -1982,7 +1982,7 @@ "step": 10 }, { - "expr": "sum by(container) (rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", + "expr": "sum by(container) (rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, @@ -1991,7 +1991,7 @@ "step": 10 }, { - "expr": "sum by(container) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]) + rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", + "expr": "sum by(container) (rate(container_fs_reads_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]) + rate(container_fs_writes_bytes_total{job=\"kubelet\", device=~\"(/dev.+)|mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\", container!=\"\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[$__rate_interval]))", "format": "table", "instant": true, "intervalFactor": 2, diff --git a/charts/kubezero-metrics/jsonnet/dashboards/kubelet.json b/charts/kubezero-metrics/jsonnet/dashboards/kubelet.json index 324f71db..f0e9b1f1 100644 --- a/charts/kubezero-metrics/jsonnet/dashboards/kubelet.json +++ b/charts/kubezero-metrics/jsonnet/dashboards/kubelet.json @@ -694,7 +694,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(kubelet_pod_start_duration_seconds_count{cluster=\"$cluster\",job=\"kubelet\",instance=~\"$instance\"}[$__rate_interval])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(kubelet_pod_start_duration_seconds_bucket{cluster=\"$cluster\",job=\"kubelet\",instance=~\"$instance\"}[$__rate_interval])) by (instance, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{instance}} pod", diff --git a/charts/kubezero-metrics/jsonnet/jsonnetfile.json b/charts/kubezero-metrics/jsonnet/jsonnetfile.json index 2a66a20a..97f3b96f 100644 --- a/charts/kubezero-metrics/jsonnet/jsonnetfile.json +++ b/charts/kubezero-metrics/jsonnet/jsonnetfile.json @@ -8,7 +8,7 @@ "subdir": "jsonnet/kube-prometheus" } }, - "version": "release-0.11" + "version": "main" } ], "legacyImports": true diff --git a/charts/kubezero-metrics/jsonnet/jsonnetfile.lock.json b/charts/kubezero-metrics/jsonnet/jsonnetfile.lock.json index abb6d35a..8b632502 100644 --- a/charts/kubezero-metrics/jsonnet/jsonnetfile.lock.json +++ b/charts/kubezero-metrics/jsonnet/jsonnetfile.lock.json @@ -18,8 +18,8 @@ "subdir": "contrib/mixin" } }, - "version": "9e3966fbce6dccd2271b7ade588fefeb4ca7b247", - "sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc=" + "version": "22f3e50adafd9d4cf9dd29dd5837483a6417238c", + "sum": "QTzBqwjnM6cGGVBhOiVJyA+ZVTkmCTuH6C6YW7XKRFw=" }, { "source": { @@ -28,7 +28,7 @@ "subdir": "grafana-mixin" } }, - "version": "3eed09056849ab873b867b561b7ce580ef2c75ba", + "version": "1120f9e255760a3c104b57871fcb91801e934382", "sum": "MkjR7zCgq6MUZgjDzop574tFKoTX2OBr7DTwm1K+Ofs=" }, { @@ -38,9 +38,19 @@ "subdir": "grafonnet" } }, - "version": "30280196507e0fe6fa978a3e0eaca3a62844f817", + "version": "f0b70307b8e5f12236b277883d998af129a8211f", "sum": "342u++/7rViR/zj2jeJOjshzglkZ1SY+hFNuyCBFMdc=" }, + { + "source": { + "git": { + "remote": "https://github.com/grafana/grafonnet-lib.git", + "subdir": "grafonnet-7.0" + } + }, + "version": "f0b70307b8e5f12236b277883d998af129a8211f", + "sum": "gCtR9s/4D5fxU9aKXg0Bru+/njZhA0YjLjPiASc61FM=" + }, { "source": { "git": { @@ -48,7 +58,7 @@ "subdir": "grafana-builder" } }, - "version": "d68f9a6e0b1af7c4c4056dc2b43fb8f3bac01f43", + "version": "e0b90a4435817ad642d8d049e7dd975264cb960e", "sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0=" }, { @@ -58,18 +68,8 @@ "subdir": "" } }, - "version": "b8f44bb7be728423836bef0e904ec7166895a34b", - "sum": "LCgSosxceeYuoau5fYSPtE5eXOFe46DxexfkrctUv7c=" - }, - { - "source": { - "git": { - "remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin.git", - "subdir": "lib/promgrafonnet" - } - }, - "version": "3c386687c1f8ceb6b79ff887c4a934e9cee1b90a", - "sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps=" + "version": "eed459199703c969afc318ea55b9361ae48180a7", + "sum": "iKDOR7+jXw3Rctog6Z1ofweIK5BLjuGeguIZjXLP8ls=" }, { "source": { @@ -78,8 +78,8 @@ "subdir": "jsonnet/kube-state-metrics" } }, - "version": "0567e1e1b981755e563d2244fa1659563f2cddbc", - "sum": "P0dCnbzyPScQGNXwXRcwiPkMLeTq0IPNbSTysDbySnM=" + "version": "e3d99ba7cf690b28ab2df9cf8d38c88afa630474", + "sum": "+dOzAK+fwsFf97uZpjcjTcEJEC1H8hh/j8f5uIQK/5g=" }, { "source": { @@ -88,7 +88,7 @@ "subdir": "jsonnet/kube-state-metrics-mixin" } }, - "version": "0567e1e1b981755e563d2244fa1659563f2cddbc", + "version": "e3d99ba7cf690b28ab2df9cf8d38c88afa630474", "sum": "u8gaydJoxEjzizQ8jY8xSjYgWooPmxw+wIWdDxifMAk=" }, { @@ -98,8 +98,8 @@ "subdir": "jsonnet/kube-prometheus" } }, - "version": "e3066575dc8be21f578f12887563bda3ee7a2eff", - "sum": "nNEMDrb5sQDOxJ20ITDvldyfIbbiGcVr8Bq46PH2ww8=" + "version": "a209d48272a0726260784bcb74dca5c8ab7d4591", + "sum": "iiIzgEw2EKncbZWzdPGrek+0b0VPwMa5plbW/R1hLPw=" }, { "source": { @@ -108,7 +108,7 @@ "subdir": "jsonnet/mixin" } }, - "version": "5db6996d3ca995e66301c53c33959fd64c3f6ae6", + "version": "1448496c52158c0c286a696605818a0f5de87892", "sum": "GQmaVFJwKMiD/P4n3N2LrAZVcwutriWrP8joclDtBYQ=", "name": "prometheus-operator-mixin" }, @@ -119,8 +119,8 @@ "subdir": "jsonnet/prometheus-operator" } }, - "version": "5db6996d3ca995e66301c53c33959fd64c3f6ae6", - "sum": "pUggCYwO/3Y/p6Vgryx8Y4KO3QkJ+GqimrZtn/luzzI=" + "version": "1448496c52158c0c286a696605818a0f5de87892", + "sum": "Ynpnbz195OTwY1DDpGRWlxmDI+tdwxjIXAphN9VIEkU=" }, { "source": { @@ -129,8 +129,8 @@ "subdir": "doc/alertmanager-mixin" } }, - "version": "14b01e6a34dd3155768c7e9bd5c4376055de9419", - "sum": "f3iZDUXQ/YWB5yDCY7VLD5bs442+3CdJgXJhJyWhNf8=", + "version": "9a8d1f976e12b325ec47b84987a78b7845738be6", + "sum": "PsK+V7oETCPKu2gLoPfqY0wwPKH9TzhNj6o2xezjjXc=", "name": "alertmanager" }, { @@ -140,8 +140,8 @@ "subdir": "docs/node-mixin" } }, - "version": "a2321e7b940ddcff26873612bccdf7cd4c42b6b6", - "sum": "MlWDAKGZ+JArozRKdKEvewHeWn8j2DNBzesJfLVd0dk=" + "version": "b87c6a8826d41a242182f798e3e5688c870a9b12", + "sum": "TwdaTm0Z++diiLyaKAAimmC6hBL7XbrJc0RHhBCpAdU=" }, { "source": { @@ -150,8 +150,8 @@ "subdir": "documentation/prometheus-mixin" } }, - "version": "d7e7b8e04b5ecdc1dd153534ba376a622b72741b", - "sum": "APXOIP3B3dZ3Tyh7L2UhyWR8Vbf5+9adTLz/ya7n6uU=", + "version": "136b48855a974ce16e3bf591f1452d41d55eefa9", + "sum": "LRx0tbMnoE1p8KEn+i81j2YsA5Sgt3itE5Y6jBf5eOQ=", "name": "prometheus" }, { @@ -161,8 +161,8 @@ "subdir": "config/crd/bases" } }, - "version": "3738a607a42a0c9566587a49cec7587cc92d61bd", - "sum": "GQ0GFKGdIWKx1b78VRs6jtC4SMqkBjT5jl65QUjPKK4=" + "version": "ffb5f03ca7a99a31be783472e3411df2c1d09ab7", + "sum": "bY/Pcrrbynguq8/HaI88cQ3B2hLv/xc+76QILY7IL+g=" }, { "source": { @@ -171,8 +171,8 @@ "subdir": "mixin" } }, - "version": "17c576472d80972bfd3705e1e0a08e6f8da8e04b", - "sum": "dBm9ML50quhu6dwTIgfNmVruMqfaUeQVCO/6EKtQLxE=", + "version": "f8d401d92c1c59b88a203b71e975395271444212", + "sum": "zSLNV/0bN4DcVKojzCqjmhfjtzTY4pDKZXqbAUzw5R0=", "name": "thanos-mixin" } ], diff --git a/charts/kubezero-metrics/jsonnet/rules/alertmanager-prometheusRule b/charts/kubezero-metrics/jsonnet/rules/alertmanager-prometheusRule index 1c675801..b4f76248 100644 --- a/charts/kubezero-metrics/jsonnet/rules/alertmanager-prometheusRule +++ b/charts/kubezero-metrics/jsonnet/rules/alertmanager-prometheusRule @@ -7,7 +7,7 @@ "app.kubernetes.io/instance": "main", "app.kubernetes.io/name": "alertmanager", "app.kubernetes.io/part-of": "kube-prometheus", - "app.kubernetes.io/version": "0.24.0", + "app.kubernetes.io/version": "0.25.0", "prometheus": "k8s", "role": "alert-rules" }, diff --git a/charts/kubezero-metrics/jsonnet/rules/etcd-mixin-prometheusRule b/charts/kubezero-metrics/jsonnet/rules/etcd-mixin-prometheusRule index 02e0ba7c..af7b8c61 100644 --- a/charts/kubezero-metrics/jsonnet/rules/etcd-mixin-prometheusRule +++ b/charts/kubezero-metrics/jsonnet/rules/etcd-mixin-prometheusRule @@ -64,10 +64,10 @@ { "alert": "etcdGRPCRequestsSlow", "annotations": { - "description": "etcd cluster \"{{ $labels.job }}\": gRPC requests to {{ $labels.grpc_method }} are taking {{ $value }}s on etcd instance {{ $labels.instance }}.", + "description": "etcd cluster \"{{ $labels.job }}\": 99th percentile of gRPC requests is {{ $value }}s on etcd instance {{ $labels.instance }} for {{ $labels.grpc_method }} method.", "summary": "etcd grpc requests are slow" }, - "expr": "histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~\".*etcd.*\", grpc_type=\"unary\"}[5m])) without(grpc_type))\n> 0.15\n", + "expr": "histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~\".*etcd.*\", grpc_method!=\"Defragment\", grpc_type=\"unary\"}[5m])) without(grpc_type))\n> 0.15\n", "for": "10m", "labels": { "severity": "critical" @@ -112,7 +112,8 @@ { "alert": "etcdHighFsyncDurations", "annotations": { - "message": "etcd cluster \"{{ $labels.job }}\": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}." + "description": "etcd cluster \"{{ $labels.job }}\": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.", + "summary": "etcd cluster 99th percentile fsync durations are too high." }, "expr": "histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~\".*etcd.*\"}[5m]))\n> 1\n", "for": "10m", @@ -133,11 +134,12 @@ } }, { - "alert": "etcdBackendQuotaLowSpace", + "alert": "etcdDatabaseQuotaLowSpace", "annotations": { - "message": "etcd cluster \"{{ $labels.job }}\": database size exceeds the defined quota on etcd instance {{ $labels.instance }}, please defrag or increase the quota as the writes to etcd will be disabled when it is full." + "description": "etcd cluster \"{{ $labels.job }}\": database size exceeds the defined quota on etcd instance {{ $labels.instance }}, please defrag or increase the quota as the writes to etcd will be disabled when it is full.", + "summary": "etcd cluster database is running full." }, - "expr": "(etcd_mvcc_db_total_size_in_bytes/etcd_server_quota_backend_bytes)*100 > 95\n", + "expr": "(last_over_time(etcd_mvcc_db_total_size_in_bytes[5m]) / last_over_time(etcd_server_quota_backend_bytes[5m]))*100 > 95\n", "for": "10m", "labels": { "severity": "critical" @@ -146,9 +148,23 @@ { "alert": "etcdExcessiveDatabaseGrowth", "annotations": { - "message": "etcd cluster \"{{ $labels.job }}\": Observed surge in etcd writes leading to 50% increase in database size over the past four hours on etcd instance {{ $labels.instance }}, please check as it might be disruptive." + "description": "etcd cluster \"{{ $labels.job }}\": Predicting running out of disk space in the next four hours, based on write observations within the past four hours on etcd instance {{ $labels.instance }}, please check as it might be disruptive.", + "summary": "etcd cluster database growing very fast." }, - "expr": "increase(((etcd_mvcc_db_total_size_in_bytes/etcd_server_quota_backend_bytes)*100)[240m:1m]) > 50\n", + "expr": "predict_linear(etcd_mvcc_db_total_size_in_bytes[4h], 4*60*60) > etcd_server_quota_backend_bytes\n", + "for": "10m", + "labels": { + "severity": "warning" + } + }, + { + "alert": "etcdDatabaseHighFragmentationRatio", + "annotations": { + "description": "etcd cluster \"{{ $labels.job }}\": database size in use on instance {{ $labels.instance }} is {{ $value | humanizePercentage }} of the actual allocated disk space, please run defragmentation (e.g. etcdctl defrag) to retrieve the unused fragmented disk space.", + "runbook_url": "https://etcd.io/docs/v3.5/op-guide/maintenance/#defragmentation", + "summary": "etcd database size in use is less than 50% of the actual allocated storage." + }, + "expr": "(last_over_time(etcd_mvcc_db_total_size_in_use_in_bytes[5m]) / last_over_time(etcd_mvcc_db_total_size_in_bytes[5m])) < 0.5 and etcd_mvcc_db_total_size_in_use_in_bytes > 104857600\n", "for": "10m", "labels": { "severity": "warning" diff --git a/charts/kubezero-metrics/jsonnet/rules/kube-state-metrics-prometheusRule b/charts/kubezero-metrics/jsonnet/rules/kube-state-metrics-prometheusRule index 2e33a787..4fad07b6 100644 --- a/charts/kubezero-metrics/jsonnet/rules/kube-state-metrics-prometheusRule +++ b/charts/kubezero-metrics/jsonnet/rules/kube-state-metrics-prometheusRule @@ -6,7 +6,7 @@ "app.kubernetes.io/component": "exporter", "app.kubernetes.io/name": "kube-state-metrics", "app.kubernetes.io/part-of": "kube-prometheus", - "app.kubernetes.io/version": "2.5.0", + "app.kubernetes.io/version": "2.8.2", "prometheus": "k8s", "role": "alert-rules" }, diff --git a/charts/kubezero-metrics/jsonnet/rules/kubernetes-prometheusRule b/charts/kubezero-metrics/jsonnet/rules/kubernetes-prometheusRule index 2e4562bf..0389247c 100644 --- a/charts/kubezero-metrics/jsonnet/rules/kubernetes-prometheusRule +++ b/charts/kubezero-metrics/jsonnet/rules/kubernetes-prometheusRule @@ -36,7 +36,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepodnotready", "summary": "Pod has been in a non-ready state for more than 15 minutes." }, - "expr": "sum by (namespace, pod, cluster) (\n max by(namespace, pod, cluster) (\n kube_pod_status_phase{job=\"kube-state-metrics\", phase=~\"Pending|Unknown\"}\n ) * on(namespace, pod, cluster) group_left(owner_kind) topk by(namespace, pod, cluster) (\n 1, max by(namespace, pod, owner_kind, cluster) (kube_pod_owner{owner_kind!=\"Job\"})\n )\n) > 0\n", + "expr": "sum by (namespace, pod, cluster) (\n max by(namespace, pod, cluster) (\n kube_pod_status_phase{job=\"kube-state-metrics\", phase=~\"Pending|Unknown|Failed\"}\n ) * on(namespace, pod, cluster) group_left(owner_kind) topk by(namespace, pod, cluster) (\n 1, max by(namespace, pod, owner_kind, cluster) (kube_pod_owner{owner_kind!=\"Job\"})\n )\n) > 0\n", "for": "15m", "labels": { "severity": "warning" @@ -189,7 +189,7 @@ "annotations": { "description": "HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} has not matched the desired number of replicas for longer than 15 minutes.", "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubehpareplicasmismatch", - "summary": "HPA has not matched descired number of replicas." + "summary": "HPA has not matched desired number of replicas." }, "expr": "(kube_horizontalpodautoscaler_status_desired_replicas{job=\"kube-state-metrics\"}\n !=\nkube_horizontalpodautoscaler_status_current_replicas{job=\"kube-state-metrics\"})\n and\n(kube_horizontalpodautoscaler_status_current_replicas{job=\"kube-state-metrics\"}\n >\nkube_horizontalpodautoscaler_spec_min_replicas{job=\"kube-state-metrics\"})\n and\n(kube_horizontalpodautoscaler_status_current_replicas{job=\"kube-state-metrics\"}\n <\nkube_horizontalpodautoscaler_spec_max_replicas{job=\"kube-state-metrics\"})\n and\nchanges(kube_horizontalpodautoscaler_status_current_replicas{job=\"kube-state-metrics\"}[15m]) == 0\n", "for": "15m", @@ -222,7 +222,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuovercommit", "summary": "Cluster has overcommitted CPU resource requests." }, - "expr": "sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) - (sum(kube_node_status_allocatable{resource=\"cpu\"}) - max(kube_node_status_allocatable{resource=\"cpu\"})) > 0\nand\n(sum(kube_node_status_allocatable{resource=\"cpu\"}) - max(kube_node_status_allocatable{resource=\"cpu\"})) > 0\n", + "expr": "sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) - (sum(kube_node_status_allocatable{resource=\"cpu\", job=\"kube-state-metrics\"}) - max(kube_node_status_allocatable{resource=\"cpu\", job=\"kube-state-metrics\"})) > 0\nand\n(sum(kube_node_status_allocatable{resource=\"cpu\", job=\"kube-state-metrics\"}) - max(kube_node_status_allocatable{resource=\"cpu\", job=\"kube-state-metrics\"})) > 0\n", "for": "10m", "labels": { "severity": "warning" @@ -235,7 +235,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryovercommit", "summary": "Cluster has overcommitted memory resource requests." }, - "expr": "sum(namespace_memory:kube_pod_container_resource_requests:sum{}) - (sum(kube_node_status_allocatable{resource=\"memory\"}) - max(kube_node_status_allocatable{resource=\"memory\"})) > 0\nand\n(sum(kube_node_status_allocatable{resource=\"memory\"}) - max(kube_node_status_allocatable{resource=\"memory\"})) > 0\n", + "expr": "sum(namespace_memory:kube_pod_container_resource_requests:sum{}) - (sum(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"}) - max(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"})) > 0\nand\n(sum(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"}) - max(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"})) > 0\n", "for": "10m", "labels": { "severity": "warning" @@ -414,7 +414,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclienterrors", "summary": "Kubernetes API server client is experiencing errors." }, - "expr": "(sum(rate(rest_client_requests_total{code=~\"5..\"}[5m])) by (cluster, instance, job, namespace)\n /\nsum(rate(rest_client_requests_total[5m])) by (cluster, instance, job, namespace))\n> 0.01\n", + "expr": "(sum(rate(rest_client_requests_total{job=\"apiserver\",code=~\"5..\"}[5m])) by (cluster, instance, job, namespace)\n /\nsum(rate(rest_client_requests_total{job=\"apiserver\"}[5m])) by (cluster, instance, job, namespace))\n> 0.01\n", "for": "15m", "labels": { "severity": "warning" @@ -498,6 +498,7 @@ "summary": "Client certificate is about to expire." }, "expr": "apiserver_client_certificate_expiration_seconds_count{job=\"apiserver\"} > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job=\"apiserver\"}[5m]))) < 604800\n", + "for": "5m", "labels": { "severity": "warning" } @@ -510,6 +511,7 @@ "summary": "Client certificate is about to expire." }, "expr": "apiserver_client_certificate_expiration_seconds_count{job=\"apiserver\"} > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job=\"apiserver\"}[5m]))) < 86400\n", + "for": "5m", "labels": { "severity": "critical" } @@ -616,7 +618,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodereadinessflapping", "summary": "Node readiness status is flapping." }, - "expr": "sum(changes(kube_node_status_condition{status=\"true\",condition=\"Ready\"}[15m])) by (cluster, node) > 2\n", + "expr": "sum(changes(kube_node_status_condition{job=\"kube-state-metrics\",status=\"true\",condition=\"Ready\"}[15m])) by (cluster, node) > 2\n", "for": "15m", "labels": { "severity": "warning" @@ -996,19 +998,19 @@ "record": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate" }, { - "expr": "container_memory_working_set_bytes{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (namespace, pod) group_left(node) topk by(namespace, pod) (1,\n max by(namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n", + "expr": "container_memory_working_set_bytes{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,\n max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n", "record": "node_namespace_pod_container:container_memory_working_set_bytes" }, { - "expr": "container_memory_rss{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (namespace, pod) group_left(node) topk by(namespace, pod) (1,\n max by(namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n", + "expr": "container_memory_rss{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,\n max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n", "record": "node_namespace_pod_container:container_memory_rss" }, { - "expr": "container_memory_cache{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (namespace, pod) group_left(node) topk by(namespace, pod) (1,\n max by(namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n", + "expr": "container_memory_cache{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,\n max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n", "record": "node_namespace_pod_container:container_memory_cache" }, { - "expr": "container_memory_swap{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (namespace, pod) group_left(node) topk by(namespace, pod) (1,\n max by(namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n", + "expr": "container_memory_swap{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,\n max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n", "record": "node_namespace_pod_container:container_memory_swap" }, { @@ -1149,7 +1151,7 @@ "record": "node_namespace_pod:kube_pod_info:" }, { - "expr": "count by (cluster, node) (sum by (node, cpu) (\n node_cpu_seconds_total{job=\"node-exporter\"}\n* on (namespace, pod) group_left(node)\n topk by(namespace, pod) (1, node_namespace_pod:kube_pod_info:)\n))\n", + "expr": "count by (cluster, node) (\n node_cpu_seconds_total{mode=\"idle\",job=\"node-exporter\"}\n * on (namespace, pod) group_left(node)\n topk by(namespace, pod) (1, node_namespace_pod:kube_pod_info:)\n)\n", "record": "node:node_num_cpu:sum" }, { @@ -1157,7 +1159,11 @@ "record": ":node_memory_MemAvailable_bytes:sum" }, { - "expr": "sum(rate(node_cpu_seconds_total{job=\"node-exporter\",mode!=\"idle\",mode!=\"iowait\",mode!=\"steal\"}[5m])) /\ncount(sum(node_cpu_seconds_total{job=\"node-exporter\"}) by (cluster, instance, cpu))\n", + "expr": "avg by (cluster, node) (\n sum without (mode) (\n rate(node_cpu_seconds_total{mode!=\"idle\",mode!=\"iowait\",mode!=\"steal\",job=\"node-exporter\"}[5m])\n )\n)\n", + "record": "node:node_cpu_utilization:ratio_rate5m" + }, + { + "expr": "avg by (cluster) (\n node:node_cpu_utilization:ratio_rate5m\n)\n", "record": "cluster:node_cpu:ratio_rate5m" } ] @@ -1166,21 +1172,21 @@ "name": "kubelet.rules", "rules": [ { - "expr": "histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job=\"kubelet\", metrics_path=\"/metrics\"})\n", + "expr": "histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job=\"kubelet\", metrics_path=\"/metrics\"}[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job=\"kubelet\", metrics_path=\"/metrics\"})\n", "labels": { "quantile": "0.99" }, "record": "node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile" }, { - "expr": "histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job=\"kubelet\", metrics_path=\"/metrics\"})\n", + "expr": "histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job=\"kubelet\", metrics_path=\"/metrics\"}[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job=\"kubelet\", metrics_path=\"/metrics\"})\n", "labels": { "quantile": "0.9" }, "record": "node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile" }, { - "expr": "histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job=\"kubelet\", metrics_path=\"/metrics\"})\n", + "expr": "histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job=\"kubelet\", metrics_path=\"/metrics\"}[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job=\"kubelet\", metrics_path=\"/metrics\"})\n", "labels": { "quantile": "0.5" }, diff --git a/charts/kubezero-metrics/jsonnet/rules/node-exporter-prometheusRule b/charts/kubezero-metrics/jsonnet/rules/node-exporter-prometheusRule index 32de17be..ed36dcd2 100644 --- a/charts/kubezero-metrics/jsonnet/rules/node-exporter-prometheusRule +++ b/charts/kubezero-metrics/jsonnet/rules/node-exporter-prometheusRule @@ -6,7 +6,7 @@ "app.kubernetes.io/component": "exporter", "app.kubernetes.io/name": "node-exporter", "app.kubernetes.io/part-of": "kube-prometheus", - "app.kubernetes.io/version": "1.3.1", + "app.kubernetes.io/version": "1.5.0", "prometheus": "k8s", "role": "alert-rules" }, @@ -25,7 +25,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemspacefillingup", "summary": "Filesystem is predicted to run out of space within the next 24 hours." }, - "expr": "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\"} * 100 < 15\nand\n predict_linear(node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"}[6h], 24*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n", + "expr": "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 15\nand\n predict_linear(node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"}[6h], 24*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n", "for": "1h", "labels": { "severity": "warning" @@ -38,7 +38,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemspacefillingup", "summary": "Filesystem is predicted to run out of space within the next 4 hours." }, - "expr": "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\"} * 100 < 10\nand\n predict_linear(node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"}[6h], 4*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n", + "expr": "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 10\nand\n predict_linear(node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"}[6h], 4*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n", "for": "1h", "labels": { "severity": "critical" @@ -51,7 +51,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutofspace", "summary": "Filesystem has less than 5% space left." }, - "expr": "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\"} * 100 < 5\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n", + "expr": "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 5\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n", "for": "30m", "labels": { "severity": "warning" @@ -64,7 +64,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutofspace", "summary": "Filesystem has less than 3% space left." }, - "expr": "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\"} * 100 < 3\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n", + "expr": "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 3\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n", "for": "30m", "labels": { "severity": "critical" @@ -77,7 +77,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemfilesfillingup", "summary": "Filesystem is predicted to run out of inodes within the next 24 hours." }, - "expr": "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\"} * 100 < 40\nand\n predict_linear(node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"}[6h], 24*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n", + "expr": "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 40\nand\n predict_linear(node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"}[6h], 24*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n", "for": "1h", "labels": { "severity": "warning" @@ -90,7 +90,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemfilesfillingup", "summary": "Filesystem is predicted to run out of inodes within the next 4 hours." }, - "expr": "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\"} * 100 < 20\nand\n predict_linear(node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"}[6h], 4*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n", + "expr": "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 20\nand\n predict_linear(node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"}[6h], 4*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n", "for": "1h", "labels": { "severity": "critical" @@ -103,7 +103,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutoffiles", "summary": "Filesystem has less than 5% inodes left." }, - "expr": "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\"} * 100 < 5\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n", + "expr": "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 5\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n", "for": "1h", "labels": { "severity": "warning" @@ -116,7 +116,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutoffiles", "summary": "Filesystem has less than 3% inodes left." }, - "expr": "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\"} * 100 < 3\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n", + "expr": "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 3\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n", "for": "1h", "labels": { "severity": "critical" @@ -179,7 +179,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/nodeclockskewdetected", "summary": "Clock skew detected." }, - "expr": "(\n node_timex_offset_seconds > 0.05\nand\n deriv(node_timex_offset_seconds[5m]) >= 0\n)\nor\n(\n node_timex_offset_seconds < -0.05\nand\n deriv(node_timex_offset_seconds[5m]) <= 0\n)\n", + "expr": "(\n node_timex_offset_seconds{job=\"node-exporter\"} > 0.05\nand\n deriv(node_timex_offset_seconds{job=\"node-exporter\"}[5m]) >= 0\n)\nor\n(\n node_timex_offset_seconds{job=\"node-exporter\"} < -0.05\nand\n deriv(node_timex_offset_seconds{job=\"node-exporter\"}[5m]) <= 0\n)\n", "for": "10m", "labels": { "severity": "warning" @@ -192,7 +192,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/nodeclocknotsynchronising", "summary": "Clock not synchronising." }, - "expr": "min_over_time(node_timex_sync_status[5m]) == 0\nand\nnode_timex_maxerror_seconds >= 16\n", + "expr": "min_over_time(node_timex_sync_status{job=\"node-exporter\"}[5m]) == 0\nand\nnode_timex_maxerror_seconds{job=\"node-exporter\"} >= 16\n", "for": "10m", "labels": { "severity": "warning" @@ -205,7 +205,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/noderaiddegraded", "summary": "RAID Array is degraded" }, - "expr": "node_md_disks_required - ignoring (state) (node_md_disks{state=\"active\"}) > 0\n", + "expr": "node_md_disks_required{job=\"node-exporter\",device=~\"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)\"} - ignoring (state) (node_md_disks{state=\"active\",job=\"node-exporter\",device=~\"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)\"}) > 0\n", "for": "15m", "labels": { "severity": "critical" @@ -218,7 +218,7 @@ "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/node/noderaiddiskfailure", "summary": "Failed device in RAID array" }, - "expr": "node_md_disks{state=\"failed\"} > 0\n", + "expr": "node_md_disks{state=\"failed\",job=\"node-exporter\",device=~\"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)\"} > 0\n", "labels": { "severity": "warning" } @@ -275,11 +275,11 @@ "record": "instance:node_vmstat_pgmajfault:rate5m" }, { - "expr": "rate(node_disk_io_time_seconds_total{job=\"node-exporter\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\"}[5m])\n", + "expr": "rate(node_disk_io_time_seconds_total{job=\"node-exporter\", device=~\"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)\"}[5m])\n", "record": "instance_device:node_disk_io_time_seconds:rate5m" }, { - "expr": "rate(node_disk_io_time_weighted_seconds_total{job=\"node-exporter\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\"}[5m])\n", + "expr": "rate(node_disk_io_time_weighted_seconds_total{job=\"node-exporter\", device=~\"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)\"}[5m])\n", "record": "instance_device:node_disk_io_time_weighted_seconds:rate5m" }, { diff --git a/charts/kubezero-metrics/jsonnet/rules/prometheus-operator-prometheusRule b/charts/kubezero-metrics/jsonnet/rules/prometheus-operator-prometheusRule index 1283aea7..8fd4cf07 100644 --- a/charts/kubezero-metrics/jsonnet/rules/prometheus-operator-prometheusRule +++ b/charts/kubezero-metrics/jsonnet/rules/prometheus-operator-prometheusRule @@ -6,7 +6,7 @@ "app.kubernetes.io/component": "controller", "app.kubernetes.io/name": "prometheus-operator", "app.kubernetes.io/part-of": "kube-prometheus", - "app.kubernetes.io/version": "0.57.0", + "app.kubernetes.io/version": "0.64.0", "prometheus": "k8s", "role": "alert-rules" }, diff --git a/charts/kubezero-metrics/jsonnet/rules/prometheus-prometheusRule b/charts/kubezero-metrics/jsonnet/rules/prometheus-prometheusRule index 13109a14..0ef1d141 100644 --- a/charts/kubezero-metrics/jsonnet/rules/prometheus-prometheusRule +++ b/charts/kubezero-metrics/jsonnet/rules/prometheus-prometheusRule @@ -7,7 +7,7 @@ "app.kubernetes.io/instance": "k8s", "app.kubernetes.io/name": "prometheus", "app.kubernetes.io/part-of": "kube-prometheus", - "app.kubernetes.io/version": "2.36.1", + "app.kubernetes.io/version": "2.43.0", "prometheus": "k8s", "role": "alert-rules" }, @@ -266,6 +266,19 @@ "severity": "critical" } }, + { + "alert": "PrometheusHighQueryLoad", + "annotations": { + "description": "Prometheus {{$labels.namespace}}/{{$labels.pod}} query API has less than 20% available capacity in its query engine for the last 15 minutes.", + "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheushighqueryload", + "summary": "Prometheus is reaching its maximum capacity serving concurrent requests." + }, + "expr": "avg_over_time(prometheus_engine_queries{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m]) > 0.8\n", + "for": "15m", + "labels": { + "severity": "warning" + } + }, { "alert": "PrometheusErrorSendingAlertsToAnyAlertmanager", "annotations": { diff --git a/charts/kubezero-metrics/templates/grafana-dashboards-k8s.yaml b/charts/kubezero-metrics/templates/grafana-dashboards-k8s.yaml index 5a45d744..2ac1b062 100644 --- a/charts/kubezero-metrics/templates/grafana-dashboards-k8s.yaml +++ b/charts/kubezero-metrics/templates/grafana-dashboards-k8s.yaml @@ -22,19 +22,19 @@ binaryData: controller-manager.json.gz: H4sIAAAAAAAC/+1dW2/bNhR+z68QtA1IALeTb208oA9puhbFmi1tvO1hKwxaomUtkqiSVOIs8H77DqkbJdGOk7aJ3fAliM/h5fBcP9K0dL1nWfZkEsRJypn9k/XXx05GofhTGlCs0FAcE454QGJBvAYSEMOAcdkEPi5lM+wFHE1DDNQZChmWRD/G/K0HpDgNw4xCUTIfExLyIAG6I4nzwMPHJOaUhKzWP6j1DYP4XBGM4hkIOgeC3XeYndHIpWwhpcxkBapLwhAlrCZbjeG1OAmKcVgNpQ6X9UTuHI+DCJOUKzJWfBIS+hK55z4ladwav2rzBwpTvJpdF0HSv+uNRu7gmd2pk6k/Rfu9/vOO1e2NOtbA6VjO08PRQbPdd95ggPrIVqgfa/N6iCNGUuoKsexTSiLM5zhltYHsGaEREku3YxLjOs9HqY9LZynJEVoUq+06TkOsKIgLZpPF5uRSpyFg8blwARJ679AU151H1+oE0XMsdcppipVGy7r8NPBOiXT3OkP6Y69OijmmFygUmuhGdT2E2MfS9g1FoDDw4yM2zuNFyNI0ZuDP+RmExU2i1mNCUXWSBLE/vkqktlfx2t5VlxRaxyiSfnAhjGNxYnG84A2fsnK2mKrGWHY2G5yi2N9g8F598JUeDI72Cpz4lIBxWNvbbBGtknlCpIYh1OIYuxzyQKvdWEikCfCEMD4LFqK3rWO8hnx2Fvwrhx86PzTaQO7S9pX0tV2lqk4gad1suhmErgxQkL2tVJ4tzP71xyMNk5QdN9Q5g4zZCg4g0nNwUU0ymAVheCwynLS/yF39LqSu7iH8ORyJ3NU9bOWumRBIH+NiFnW8bLgeZMHuqN8aSE0pK2NL1jMYM43itqk4oj7mG1gBLxIpE0uj/TS5dsOUQcZ48bf9ff7v33bH+odMgXKeTvETNyuEIaZPIhQjXzRYHrRNVCVgDmVowjANMGs3K/LTa+RyqZxeq0mWpl6X47UHAbeUZdw+2tQdypTLNKoLuMx79u9Jg1HiguumuRCtivRqg2XpzmaQ3kLMALXUx5d5RI2uw2Z0yRabRRcRctovbhlYRSq7VWzJTr/muRIKZdVPWX8NnkCFQey4ABCNKjZFVFspofKz+Tsc+3wuk2aLh1d12wgwBDJ2uy3iG4q8AMe8WflvKMP9eyjD6MJfkW7clNJMZk03qD8rugnj6TnNiq9PWDoO9Pkz8KTNmlVKxhRHq3Km9Cu2AcQoG7UYl/nM3U1QSavutiqTnWBwo5gjX4+JE9Fb+Esqhh+2eXoPBVN5mGKZh2chaaQFihOMtEU+S6q/XWAKrog1K4IC5+JVIZOXxCaVw65AKyUUgyTB3rusXrb5dyo5FHG8f0no+acUp3iCPI9NpE/crRJ1rCCGFcQufvEfdCw+QIn663vYPcJkkyIUPx4cWNMraz8fvOrZsQTuu9eSdl2sdrm0rq8LQeQHIcty+cVrXsNRxIJeZ3is5WWCdzYPZlzPzIvln2BD670wonXkedYHUPWdqmczfRDKNXsumRomRUENYi+4CLwU1UqWvvjKHX5dsgVaBKwt1zR1zzN/1uStKE8RQjlN9JZvGzS9VqfJMtfJMwv9Eq7QYpMtUeWmJNG4Zyi2olrxBJP4LxFrbcqUsqHtltUNLUtZ8q32XjuyCjXa9prUMsb0OTynvoW4lwdYevYHclk7oyrlGeehVz8ZKgPyFSCiKUEUYhFkr/MLgDl/pjDyAKHQek+x0Nc8pnoEOHBgcKDBgTuBA3tbhAM9nPC5gYA7CwFfCfsZ/PfA+A+mo/w+sZPzFeDfNi/CoD+D/taiv6FBfwb9GfS3Dv3NA8YJVOVo8ilFMQ9CvO88HY06lgYW5uAwzTImwC8AgB6bZJX6oeFixwrxgcGMd8aM70DXsXtlUONDo8Zv4MzQnBgazLirmPHZlmBGLTTcNdC4/kaGQY2fhxoHD35kCNiET9xQhJm8HAwfiy+Q7wr8Oi6YUNB6T5+ugIH3CfJ6i8UtYFxni/TX3wr99dfq7+UW62+wFfobrNXf8Rbrb7gV+huu1d+r7diG/QJato5O31ofMhuY+xvm/sYjvr/xeK+tPjebD7P52I3Nx+H2nlhrUM09nVp3LFCq6Hr629l43SG2aNaxUho+xLG1mFweU8P8u3BKDfmSl9AoP6e2RiM+t97nHmCw0mM6tXZ2/cja3HEw59VfCjIemjsO5o6DueOw84jxzc8GMH4pwPgGG7xo8KLBiwYvGrzYxIsjc8RojhjN/Ya1gDGhxMWMARQUTgKYMMIRoVeT6RXH7I4AcNU91/tFctUt1e1HcSdS6QaqPTBUk07/DXwRuuXrMF+F1nFKQz4DVAxQMUClCVTk4VWBVtwkLc+qPuchPrf4RY4BL6vAy/Hp71bKhJMb/GJ+hv0If4b9iJFL1yAXg1wMclmLXHwy8QklKRcGNEcq9/XFWKlyA0seGyz5Wr/13e51fMvfh+3lK7CZO8cR+gNTlknZHWRkfpVN5SF6nr0CA4pX5aUytdIYi3OxciyOoySE5cZ++4Uee01vUMBGDeUVT7n28Ayloe5B2ypXBxrlaz/qyaX0KVs8v986y4Cdwi9eGKDAPoVLkuI1JWpatT+lmF7lh/5teKi8R6Rbo/q4+YT+0kDK9HuNdSmaQ2FYvNKi7tGqTlWFbABmC6UpZQvyL1RUDx/pno1faTSvuupYEZgm0PQptKzpcpOK5XSTLJ2Kx93f9GD7jpVPcqA3Se8mk+QVSjUdRIDUO3tfSGXXuS3xBU3fODd4tj6FkTI8zgZS0et9e4GzygvqJW5Fqr7R/gXu+RwHuNv7DqrLVN+GX+wV1Sl7WZKELWXuLd/TQS6fdAtwVLyCIysFVbckAFhEq865SsoTOxWh2MPKaeyuo3zoqx+ULas9VP7vqh/6jsqpIJzdU/7velk+/FisQUBxxWNunEUd+Jk6sDpLb6B+qN7ZYj/3VHkLWWrq+5fE2asnxsd5uSyP80o/tE4yP8wapHLHbz/vYQc7wymeDZ3RCA9nzmA0dYYzzx30RtgbZI0vygLt7C3/B8u4cmNlawAA k8s-resources-cluster.json.gz: - H4sIAAAAAAAC/+2daW/bOBrH3+dTaLXdRbKNG9uJnQMoFjmaaYF2mmnSDhbTwqAl2tZE11CSkzSb/ezLQ5IpiXZ8xrL99IUbkzqo56B+Jv8SH7c0TUeu64UotDw30E+0R1pEC20rCOm3P37Qr0+7bDNsWiFq25iWhiTCvKzr4vCDSUvcyLZFCUF+78bz7NDyaXmVF/YsE597bkg8m52ig+xA7G9b7m3AT8O/EtwhOOjRAn2/GuiizLvjW/BmicbRUsOzbeQHWD4ar+hhq9tjLddr1ap/r6cVPnKxPTiSfDRej2wLBeee7RFuhaddubKNSJA/Fa8wUdD7iN1uyBpdqxbq8LDdQhR4ETHYBehXxHNw2MNRoGe26li2zQ6bLfSIg/gF+pju74aRa4XZ/SyzsJflhpj0kc0N42Q3t3EXu2bqetkiXfc0uMk5Xd6i31VdHvNPRAht25BaB90Pq7HcITWE+fWaBpK6KUHPu1PXhDS47SEHpRaJBi6S6rLup2HKt8ofnVfcWabwfr5Giuy0mOXJlUe98cnjV8ILNBRoPzHxsm6J3Yu6WBlCPjsKQaYVsdM0inXqyKNeMTHBhJ27Y3u5yAkwsXDwuY8JoZZWtD/wkYGHBTytZN6rZwtDZNwqmxKE2Pex+ZHaUFkfItLFYTZl82nLN8T3Pr8ew47oMcmJS23bMvzohLBerUU/ccN5jGvfftdfxX9+15/03fzBBvkVWg5uCYsUN7NcemE8wIshJ+XbJTJCj+SNEnuiw7tO/VTPVD1J37LGD3usd/Rsk9lEP6zuHlWz7mMtvqSdidQhZ+que1YnVFeGPMf186uv2tfQor0/vyHkDp9264/55EMEm0NyLPBIWDSASL1W+ODz81quafUtM6KZOjQLk20Dy+3amJo/F7z36N4Kio1rR8atCKP8ZbP+Jk5DZp2ck3UXOVi91/DOJu1Q+I1TfR0P6B6PEdWDQKSnI2ExBG3UxraygazS654hfn+sFepE96vcTfS/1UK5dMXZYN0t7UUoq+LryHf3uazbUlzeWsNCHWABYGGzYCGInG3Ww/NGclq4jdq45Xtmy6A/VRA9EWnR+y1PPPrHX9T3YXBCd1OTxI62p7Fj8qNwAmF3qChoIdv2DMR/Oj3+6bXpXmyTCqvFFZrKxDKC7/pucipaTxtDC9RnAWApAsuX2Dnauec4VuiwvgTABcAFwGXtwWUfwAXABcBlOLjYFr0jAraUEVs+ctcAtAC0ALRsFLQcALQAtGwWtNS0CscMMTfjYMcjD61P2DntI8tmUdxqP4R4DE7J7X/DIkjsG1MK26BCT0v7P7Yj8MhYPPKJWxSmfgBGAEY2C0YaACMAI5s8giJgojyzP6I9QC6TkQvMAQHBAMFsJME0gWCAYIBgyjINBPwyDb/AZBDQC9CLTC9buYSk6etjlE+muPQDTX4xbKms/uLdZR5TSht0E6dh9iGiNDnfY2SKW2K27tr6yet7TZEzsS8meTyp3liXx5OqRSQ7BCR7WSSrApIJJKupmaxgvMUgGQOnAZdlWOxkQGWGH7WigNqWsg4tNYMWDxNGZy2LPa00jNHaD9oA+2YhqWdRSaTlZXq8x8f0vE+Kh6bE5tSYt0N7f2pv7rRpyCsXB3N6zIl5oJSUxZ+hBcACwFojwJLjKZOHQFZjDodxsDoCsIKxrjKB1eSDXUH4YI9zj+B5yS7opnj/EpmEB3zyH/qv8ulT5eJCe//+xHFOAgX3+Cik+eIOP2ZyA+5ZpoldfaK7QdrcK89UnNtgHUwcHsre20h6oKxvpr1WExuWg3i/V7zj2QLUlONorO6Gw++ZjdxbdYol26W8pF8Q2mdppnfnaqGn+UobsF2+Et5b7Zl7Rw3UaNarh0eG2Tmk/44xQjVkmgf7hoGNGt67PQoqybhiUEn58999RCqDfvTtq8Hf/2RVCTsn5MwL073fvmq1DGzbrdrI+PjGuhDt76eKGBkOppkYciOnTROyUM8nMVJmmC7EfvfIre0hiLO74YbIBhs6Ouy0q+Zx7RgbVYwanWbjYL9WP8C1pnFQR41csKXHXU7YnZUz7NS/mpYddvVhYTcinmaJu5GhNo53z8vr3URFAQ6excEX5Xew9g9w8Swufrc4F8tih+kdLeYTwcmzOPmyvHkcTxdDFs/k4F/KnMW/Juy4Gj7e5B+TI2PNHe7HMvQmJY8uyZB7b/61N70Ng5BYbncyGw6fnZpmdjDVZ9G4xmSUmkqbff6PD+8uQEOVnxjUR6msxp0F3B3z/Y5e5IbbqN+VtG/JL3Vm2JOshU8I5jMwo2ZTk913tYFdV83OZ/O2c8mmsktg4vOFmHiubx9bLYNerH7MJg8kb7AX3y03LZ4XNa+WOS83MCnWzYe/6Csgffotor4tl/SJIDdgnlT7MaV4RRVIo0AatZ7SqFyegkBqHOn5MSikQHq+edLzAVrFL0ciQSANMdk4HDauREuTff9GK0BoPk/aSt6rxMha277b8zQDGb2cSUF3/vJwxV8ZBnC1oXAl0hLIahLpeR62gKxAew7ac9Ceg/YctOegPQft+WLDTv4hBaK3ksrPxY+qmRwMCvSSK9Dn6GOQr66xCD3zXivwc0l16HPIZpCigxQdpOggRQcpOkjRQYoOUvS5KxiXNrW9/sLzOa19ANrzhQZpQVS7kW57t/RUAL35CiYCCMxfWPKUju5Ru6p/OILYHMTmoIcqnx6qOPkCwqhxhFE1EEaBMAqEUWUWRp2LJKE9nIGtPtbOkGuKaIJZixlmLRaoUTrzgxl9fcMwj8I/OHs+zj4ro7O/UB9oXidJbFO7QgLFwdXl1Aj5M7s6yesQvF16tZA/98TWLojHUAZcXlbp0CISHLxeciHR9F4HcQmIS0BcskbiEv66IGkeyMUh0za0iLiTt7jmULxeaNyJoUEk/u+7/ub1d/3pDxqS7DytZLjtx87Gi1JGGT+M76lg/UVKVZ4LfV/QDJh/McKWZ4Mf7L9QEcy44W8KmAc3LEbUMnYagB8ml8Is67V98aD2r8KXZVy9FFQVoKoAVcXYOQvaioK2QvE6v9wMN4gr4H1+LyWuaC7xdX4rMYYCr/4brYMdJTWBd/69PI0xlQGw2BJYbHctSGQfSARIBEiklBMKgCKjUOQZJSSwCLAIjAu93LhQMQthLGgsAjsAAgMC2ygCY28YgbGglQewUxp9bAWI88SJAxLLPCZ9ksqdAdAA0GCwaHVRpQGoAqgCqAKDRWvMKtJzOoArgCswnrSk8aRxMxaGmyZjuCYwHDAcTPiV8xkWgLiR4qNxXosBnPbynOYDp8Gw0vRIcghIAkgCSFLS5zqBScZhkufe3wRYAlgCw0cvN3yU5GU+F2GUaCwkOwIkAyQDJCv3qx6AzKYZLVK+dQ/wDPAMRo1WClGOAVEAUQBRSv4aJGCUKUePAFMAU2AUqVyjSIWchNGkwVaDFwFXauNCXL0KEAcQt1EQZ2DL3qYkRyFKXgR0O8d1naBFMDKHz/5lVxXd1UzctwS3OY7Rtm/f+G9e/9ftO5j+R9om/QzYR5993PNP06mwT8TKx0DCePlSBRBqr7Vi6++INUoVn2/+1CffAf6cnj8/fL663v7C4uz179xfO4Cb8HZNGBdb4XGxOizcCUi1ceNi49LU6Ef1VoSpJruI6ckKwGr6l1P1iBd1e1dRyPFK0BXAFTxQCGN5yxrLu6ZdEXug8MNnGL6bZCX4Orys/IWBcjNWghf3sVyk0FyjhQc549IDGMVe8wlWll/UyvKDcaGdUi0xWKnBYvJzWTiSO1g15AceLtEK8jN7mKcw/SkJni75AvKzeDr+renHvzV3YDnpWZy8wHXjz6ZeTlpysWIkAXxcnoXi5+JjlsZJrw2unsnVl2V0NawOD6vDw+rw67M6/IxanjlMM73QpM+aLjD/vP9Gq5nAgUteo36NEnAKMR2E3wThd77E8Bs5cQ9enMCLF8u8C4Ab5+XGd5CMC5QCQShOEIqXesklRcnC6wodQynkROwRRuZStUPTH2uKKtByg+BorQVHWkW7sNgoRTviZ3lGf7QVt1sPjB520DdMAtE2sVacUB+wPUxEbvnBaFZ1B5HJ7wHExfTWoKfHCrHj2/Qi3W6aabpNGyXF8yAIJNVNRrgU4nseEybuoMjOST54Csm1KmU90yXgbKSmoaRfoBBp10KrJNXH+SwrmaRaz2c2zXe5+l8RJg/8+V2V4on1+6yzzkYoLe3i+9yNIu25pNNv5a5Lshyy7W+xIbKB/JxNhxhTbcUxRF2JpaWunXbk9K5t4lNbIWMaktG6Q11pKTZPvBJzwCQu4WdqiS53O/Lz5PKUcsaO2mX151wW39Vk19IM4X4JfktaoWdrC81lZeqN44AQ1yNVRAG+EQeS+ynRF7G+6UlkosUNF+dgR0CB7np3lVqiw6U387hMz+zmW/SWSAY7xyZJOUu+O+kN6a13tar0ZV/+Imn49Ib0d03+sl+Va6QlPuvS3zVTxOqP5BqYRFwKhWfPIh+4KR9YPkv9QP4iPYp9aMrtTdqSMd9Pj+u59K8353G3mcKV5/hRiLUvyTyCtqedS4GtR1wLqdOe7ajZMWtm1ajVa6jePDg4aDf3m2i/06geoyOxcT/tsatbT/8Hsm5xaI86AQA= + H4sIAAAAAAAC/+2daW/bOB7G3/dTaLWzi2QbN7YTJ2mAYtEkzbRAO5Np0xkspoVBS7Stja6hpByTyX72JanDlCw7jo9Ytp++SG1S5/+gfiYfkfcvNE0nruuFJLQ8N9CPtXtexAttKwj5t9+/868PO2Izaloh6diUl4YsorKs59Lwg8lL3Mi24xJG/P6l59mh5fPyuizsWyY99dyQebY4RZfYQby/bblXgTyN/Mpol9Ggzwv0vXqgx2XejdxCXlZ8cbzU8Gyb+AFVjyYr+tTq9cWV64163b/VswqfuNQeHEk9mqwntkWCU8/2mLTCw45a2SEsKJ5KVpgk6H+kbi8UF92oD9XRUbuFJPAiZogb0C+Y59CwT6NAz23VtWxbHDZf6DGHyBv0Kd/fDSPXCvP7WebQXpYbUnZNbGkYJ7+5TXvUNTPXqxbpuW+Dy4LT1S2ue2W3J/wTMcavbUStQ25H1VjuiBom/PqFB1L5pQR976a8JuTBbY84KLdINHCRUpd3Pw9TuVXx6LLixjJj7xdrlMjOikWeXHjcG588eSeyQCOB9idlXt4tiXtJj5aGkC+OwohpReI0reG68sjjXjEpo0ycu2t7hcgJKLNo8PM1ZYxbuuT6A58YdFTA80rhvWa+MCTGVemlBCH1fWp+5DYsrQ8J69Ewn7LFtJUb0ltf3o9hR/yY7Njltm0bfnTMRKvW5n9py7lPat98039IPn7TH/Sd4sEG+RVaDm3HFhnezHL5jckAHw45Jd/OiRF6rGiUxBNd2XTqb/Vc1YPyLW/8sC9aR882hU30w/rOUT3vPnHF57wxURrkXN2XvtUNyytDmeP66cVX7Wto8dZfPhAKh8+a9fti8hFGzRE5FngsHDZAnHrt8M6X57Vc07q2zIhn6sgsTLcNLLdnU27+QvDeklsrGL64TmRcxWFUvG3R3iRpKKxTcLLuEoeW7zW6sckaFPngLL+PO3JLJ4jqQSDy07FwOARt0qF26QWKSq93QuTzsTFUFze/pbvF7W99qFy543yw7lT2JkqrkvsoNveFrHtRcntrDQtNwAJgYbNgIYicLdHCy4uUtHAVdWjb98y2wX+qEH4i1ubPW5l4/MMf3PdhcMx3KyeJbW1XE8eUR5EEIp5QUdAmtu0ZRP50uv+v1+F7iU1qopbWeCozywi+6TvpqXg9vxheUH4WAMswsHxOnKOdeo5jhY5oSwAuABeAy9qDyx7ABeACcBkNLrbFn4jAlipiy0fpGkALoAXQslHQsg9oAbRsFrQ0tJrEjHhsxqGOx+7an6jz9ppYtojiducupBNwSmH/SxFB8b4JpYgNavy0vP0TO4JHJuKRT9KiGPoBjABGNgtGWoARwMgm96DEMFGd0Z/4ekAuTyMXjAGBYEAwG0kwByAYEAwIpirDQOCXafgFg0GgF9CLSi8vCgnJ09enpJhMSekHnvxxt2Vp9WfvJveaUnZBl0ka5l8iypLzPSVm/EjM132x/pT1/YM4ZxJfPOX1pGZrXV5Pqg8j2SGQ7HmRrA4ki5GsUc5kQ8ZbDJIJcBpwWY7FjgdUZvhROwq4bTnr8FIzaMswEXTWtsTbSqMYrXOnDbBvFpJ6FJXitDzPjnd/n533oeSlqXhzbsyrka0/t7d02jTkVYiDOb3mJDxQScqS79ACsABYawRYajzl8hBkNWF3mASrI4AV+rqqBFZP7+wKwjt7kmeEzEtxQ5fDz684k+iAT/7D/9U+faqdnWnv3x87znFQwj0+CXm+uKOPmT6A+5ZpUld/0tMgu9wLzyw5tyEamCQ8SltvI22B8r6Z9l5NalgOke3e8BPPjkGttB9N1F1K+D2xiXtVnmLpdhkv6WeMt1ma6d24WuhpfqkNxC5fmWytds3doxZpHTTrh0eG2T3k/15TQhrENPf3DIMaDbp7dRTU0n7FoJbx57+vCasN2tE3Pww+/1NUpeyckrMszPZ+80O7bVDbbjfGxsevognR/v62JEZGg2kuhtzI6fCEHKqXgxgZM0wXYr957Mr2COLsZrQh8sFGjg67nbr5uvGaGnVKWt2D1v5eo7lPGwfGfpO0CsGWHXc5YXdSzbAr/9W07LBrjgq7MfE0S9yNDbVJvHtaXe+mKgo4eBYHn1Xfwdo/4OJZXPxucS5WxQ7TOzoeT4STZ3HyeXXzOBkuRhbP5OAfq5zFP6XsuBo+3uQfk2NjzR3txyq0JhWPLsWQu6/+tTu9DYOQWW7vaTYcPTo1zehgps/icU3ZODWVNvv4n+zeXYCGqjgwqI9TWU06Crgz4fyOXuSGW+S6p2jf0l/qwrDHeQsfMypHYMaNpqa772gDu66anU/mbeeKDWVXwMSnCzHxXGcfWy2Dnq1+zKYvJG+wF98tNy0eFzWvljnPNzAp1s2HP+orIH36JeK+rZb0iRE3EJ4s92NG8SVVkEZBGrWe0qhCnkIgNYn0/DUUUpCeb570fIBWyeRILAiULiabhqP6lXhpuu/feAWE5vOkrXReJUHW2tbNrqcZxOgXTArd+fPDlZwyDHC1oXAVpyXI6inS8yJsgaygPYf2HNpzaM+hPYf2HNrzxYad+kMKoreKys/jH1UzORgK9Ior0OfoY8hX11iEnpvXCn6uqA59DtkMKTqk6JCiQ4oOKTqk6JCiQ4o+dwXj0oa21194Pqe1D6A9X2iQDolqN9Jt75aeCtCbr2AiQGD+zJKnrHeP27X8hyPE5hCbQw9VPT3U8OALhFGTCKMaEEZBGAVhVJWFUadxkvAWzqDWNdVOiGvG0YRRixlGLRaoUTrxgxl9fSkwj8M/nD0fZ59U0dmfuQ80r5smtqldkBjF4epqaoT8mV2d5nUIb1deLeTPPbG1M+YJlIHLqyodWkSCw+sVFxJN73WISyAugbhkjcQlcrogZRzIpaHQNrRZ/CRvS81hPL3QpANDg0j83zf91ctv+sPvPCTFedppd9v37Y0XpYwzfpg8U2H9RUpVHgt9P6YZmH8xwpZHgx/2X6gIZtLwN2OYhxsWI2qZOA3gh6dLYZY1bV/Sqf1T7Msqrl4KVQVUFVBVTJyz0FYMaStKpvMrjHBDXIH5/J5LXHGwxOn8VqIPBVP/jdfBjpOaYM6/56cxoTIAiy2BxXbWgkT2QCIgEZBIJQcUgCLjUOQRJSRYBCyCfqHn6xcazkL0BU1EYPsgMBDYRhGYmGEEfUErD2BvefSJFSBOUycOSCz3mvRxJncGoAHQ0Fm0uqjSAqoAVYAq6CxaY1ZR3tMBrgBX0J+0pP6kSTMW3U1PY7gDMBwYDgN+1XyHBRA3Vnw0ybQY4LTn5zQfnIZupemR5BBIAiQBklT0vU4wySRM8tj8TcASYAm6j56v+yjNy2IuopdoIiQ7ApIByYBk1Z7qAWQ2TW9R6ax7wDPgGXqNVgpRXgNRgChAlIpPgwRGmbL3CJgCTEEvUrV6kYZyEr1Jg60GEwHXGpNCXLMOiAPEbRTEGdSytzjJcYhSFwHdKnBdN2gzSszRo3/5VUV3NJNeWzG3be3yz69ebv/lOEbHvnrlv3r5l3vtUP4f65j8byD+XIs/t/Kv6dTEXyLKJ2DDZB3TEjLUXmrDt3HDrHHy+OJ9TH3ybYDo9CD64eeLL1ufRcC9/E36axvciWk20UG2wh1kTazgCbbauA6ySbFq/Dt7qwZXT7ub6RELhDX9dFV95kW9/kUUSs6KMQuUhVcM0bu3rN69L7wpEq8YfvgZHXpPWRu+ienLn5ksN2Nt+Pg5VogUnmu8cL9gXH4AY7jVfMBa84taa37QQbRdqUUHaw0sLz+XpSSlg8v6/uDhCq0pP7OHZQrzn5LwdMWXlJ/F08lvTT/5rbmNBaZncfICV5I/mXqBacXFJT0J8HF1lo6fi49FGqetNlw9k6vPq+hqrBeP9eKxXvz6rBc/o7pnnuNNzzT6s6Zrzz/uyPH6JnhyGk+eLMWTq5eSUwjuEJDTBOTpEgNy7OA+3DmNO8+W+aSAP+fuz3dIz+dQEiE4pwnOc73i0qR0SfcSPUQlZEni5Ujh0nKHZj/6SqogDodwaa2FS1pNO7NEb0cnkmd5RMf0IrluPTD61CG/UhbE1xavQherGMQeJmFX8mA8q3qDyJQPA+ZS/ozQs2OF1PFtfpNuL8s03eYXpcTzIAgU9U5OABXSWxkTJu2SyC5IR2QKqbVlUn2hb6D5SM1CST8jIdG+xJonpT7JZ1URpdR6vrBpscnV/4gou5NvBpcpp0S7LxrrfITy0h69LTwospZLOf2Lwn0pliO2/WtiiHwgP2bTEcYst+IE4rDU0krTzhty/tQ26Vu7RA41IqN1h7vSKtk89UrCAU9xiTxTO25ytyK/iDAPGWdsl7us+ZjLkqea6lqeIdIvwS/pVej52qHLFWXlGycBEd+PUhEF9DI+kNpOxW2RaJse4ky0pOGSHOzGUKC73k2tkep5+cM8KdNzu/kWfySywc6JSTLOUp9OekuZT69RV77sqV8ULaDeUj431C97dbVGWTy0qXxumHGsfk/vQUjNlVB49CzqgQ/UA6tnae6rX5SXvA9N9XrTa8mZ709P6sL0r5enSbOZwZXn+FFItc/peIS2q50qga1HUlOp85bt6KBrNsy60Wg2SPNgf3+/c7B3QPa6rfprchRvfJ212PUXD/8H8o5FSek6AQA= k8s-resources-namespace.json.gz: - H4sIAAAAAAAC/+1da2/bOBb9nl+h1XQXyTZubCdO0gDFok0m0wLtNJOkXSymhUFLtK2JXqWoPJrN/vblQ5Kpl9+OZfv2Q2qRelD33Esekofi45am6ch1PYqo5bmBfqI9siSWaFsBZUd/fmeHT7v8NGxaFHVszFIpCbFI67mYfjBZihvatkwhyO9fe55NLZ+l10Vi3zLxqedS4tn8EV1kB/J623JvAvEYcUhwl+CgzxL0/XqgyzTvTpwhiiULx1INz7aRH2D1biKjj61en5dcb9Tr/r2eZPjIxfbgTurdRD6yLRScerZHhBWedtXMDiJB9lEiw0RB/yN2e5QXulHP5eGyyygKvJAY/AX0C+I5mPZxGOips7qWbfPbphM94iDxgj5m17s0dC2avs4yc1dZLsXkFtnCME76dBv3sGsm0KsW6blvg+sM6OoZt72i1+P4hISwspXkOui+LMdyS3IIx/WKOVJxUYK+d1ecQ5lz2yU3ZRYJBxApeWn4mZuKs7J3Fxl3linRz+Yonp0k8zi58BganzzxJiJBQ4H2ExMvDUsEL+rhQhfy+V0IMq2QP6aVzyv2PIaKiQkm/Nld28t4ToCJhYPPt5gQZumC8gc+MnCZw7NMjt5+OpEi46awKAHFvo/Nj8yGhfkUkR6m6ZDNhq04Ed/74n2C0Nl2mV3bLnKwKGnb98y2wWoexB5CTpJfbcMP22HAbNsOMEs1g7ZwkxN2i7ZFEMWPhh2yApI33/QX0c9v+q6W3JmnJwff9KcdbU/jz78JO5mntlmVJkKd/fjBvI0Gj395HXY9P7XGzENxjYU/sYyAP2Gy5+5q8d1ZBnspXhJ9N2ugQZ1BLYe/Mkc5f5rlstKIoM2HkVKHnCODetzgzdwprAIXzYH+Vk9lPSlHaYeifV7je7bJcdaP6rvH9bRL8hKfswpSaWRSeVd9q0uLM6mot/TTiy/aF2qxFk00ctp2l91Oi8HYyTwuaboesxUMItgsqUcCj9C8QWT10qYPviiH5ZrWrWWGrDYqrWnicwPL7dmYO0e6ePfo3gryheuExo0MlawZeJ0aVTXcWhnQde5KxVeVV6hJpSnIQfF7PKB7PEbkDhyTPY7QvEvaqIPtwgLyTK/3DgkO0MjlySam8DLZxtRz6cobp513t7IvUZgVvUe2SctE4VbB6601IWoCIQJCBISoSoTIthwL6FAl6JCEAsgQkCEgQxtAhvaBDAEZ2jwyNOAgDnY88tC+88gNa10YB6DtzgPFKhmxMZ2CgSSP+BvL4WdaDgNJHlRilEi+OTCjPDP6JCwDY0VAj4AebTY9OgB6BPQI6FEV6dFix4yAHE1MjmDkCKgRUKMkNQlMFsY+RtmgilI/sEpABFFx9qV3l5JVJQW6jsIxLXpKgvQ9RqZsb9N5V9ZPkd8/lDETYTGJnKrZWhc5VT3P91rA956X79XXk+8Nr+FETPB7/QiZmbVaMq6Qr+sMHjn81F/OmwevW6f5M5JwKGQZka/na+e+ZZrYvZLcpeRiLtccSDgLT0nCoDhXQTpPb1KkdqImImdAyT2Gmu/89fF+fUPMV8oPx+l5NJqFXY9cGK/ZLHXnQdtmj5uF748k9BLu8+R+j4/siU9PesmJzLQ3payEWV9AOFHgJKY2kI2I7F/FvR4RS4+T9pk4w2aJjM2b7FDpQcV12qvZp98ntuvoenUNDCzrvGWat6zWndq44/VrM+xgTkoIXptUsu8qVlJAtxW6rWvUbVX9aVgcQq91+DyG6LQeQqcVJimWMklR0lWYfJYioA/2RD3n63wrJiMJD2jKf9i/2qdPtbMz7f37E8c5CQq4io8oixe3/J5xMyx7fPp0/dRihjHookY+UliRG3E1lAZo2hc2sWE5SFR+zaIe501xAEWZ16JT985Go85LuJN+RljNpZnenasXnvqFiLpqKDRfefRqv7wtgKecGabgc0Onw2Ihly8mfpNGe3p0L4cP4QDAYwH8rvoAa38HiGeB+HRxEKtajumB/jhsLBFAHgvks+rGsYQXong2gH+tchRfeOZqoFs26r9AcPfMvcPWcQMfHOID3DKOOuigjupHhweH+69buHvUae7v3RwHtXigMaj5nvmvW0Rqg27qmxeD3//gWfGYZjyiKRIHQ5qDAU2RwW745kW7bWDWoR3man4RjFWoRiruXIoJ9179c296GwaUWG5vMhuWD+Gu4XSQGC9ZgPArO+CuD5OGzXvmguv/pIlOEpOcCMv74Qkrs3WLT8Zat7C2Nn+3CJsv39Uj0SWAPxT808oFXKSEXVuLn0G4bSr0v+orMHX8B58Or9bUMUFuwJEsxjEheAVZMLUMU8trOrWcjlOYWh5HEH0Ec8sgiAZBNAiiQRC9HEH0MyzF1EasxQRl9IKU0XNZZAri6CHi6OVaeF300fGSXz4Oom3f7XmagYw+3gG59JL7tKL+hz7thvZp1bCEbu0kiulj6NWCYhoU01VWTBdWbiDVqpZoWvKPmQAG3XTFddNzxBhEl2ssnY6wBvV0pdXTc4hmEFCvvYA6PeBzeXW1A2DPAvZ5pQM6gvk0P6YHQE8I9G+rAPTVHfIB55lwfl9NnGHhSzmssPAFFr6szsKX5X+BGRbA5ETZEgpYBrGwNTDV8fqh4nzwg6Ushxnf7LAiBoIPFsfMxwtIMHfcV8jG589iYyGn2mAr//YsVg7ukL/BRn6vr4bMEJbTwXI6kB6uhPQQVtRNLD18DdJDkB6C9LDSH2uVQaJdYgOzPqb2Drmm9CaYM6umDvGdH8yI9TWneI5FAezKCxKnB/uSYaB53TiwTe0CSRoOUFdUlTgz1HFcU0C78tpEf+6BrZ0Rj1MZgLyqUsVFBDigXnHN4vSob7jASaOe5ntmAEInEDqtidBJfERRmatxMeXzzm0im3I58Sw/ujjr3M3Tn8wn+ePa8bDb950N1zmVWZ9GrSqYf6F6p1HO70tCA/ZfkM5opPsDAIvVHI0bAKZk9IDDglQ/YwcCADGFNGhZ3yaOxrZ/l2BWcYtbEFaAsAKEFWPHLCgscgqLgm8WZ9NAYwEfLX4mjcXhEr8UO8lIyrPxtMp/FnZJ3Gy4tgS+pfn8vIvLCoB1LYF17a4F52gA5wDOAZxj2PwBkI4lk44RIkdgHcA6YKzn+cZ68lEI4ztjca0mcC3gWsC1hooFgGwte4RnnMUGQLmen3L5QLlgoGd68rEP5APIB5CP4Uo5YB8VYR+j1r8BAQECAmM+zzfmE8dlNhZh5Gcs8nUA5AvIF5CvsVTywMEqOgJUuD4ZiBgQMRgJWiky0gIyAmQEyMh4S8WAjVR3RAgICRASGBmq1shQLiZhhGhw1uDbKLXG2HTtEOga0LWNomsGtuxtxtkYcZL7qmxnuFs3aBOMzGTWLrs9i4lvLcbN/vdNdxyjY9+88l+9/K9762D2H+mY7G/A/9zyP/fir+nU+F/E0+fzQQDtpZYv9R2xFF15FYu9Ayx1Gpb64fPF1fYl98mX/xYY7wAphQ8SwDjZKo+THQHxAuK1aeNkozlXamncqjGvyhce6NdUKwT7xAt7/YuQChImORhQMFglCOOCyxoXvGKVEGt5tQ+fYShwko22GsdAO5+Xdm7GTluyHct4Cos1lniQMS67gZGvNZ9g565F7dw1GD3aqdQX3GsN2KxrLt/lFwAXDQwCwhXaoWtmhEUIs+4nIF3xDbpmQTrqa/pRX3MHtuuZBeSzKm64p0BcMJIAGFdnI665YMzDOK61AeqZoD6vItSw+xbsvgW7b63R7lvjCoNy+3zMYYYnO2m06CmfNd3CaxiEKZUUYFjZfcDWKwxHC/bAFSu7JdpksgUAsrJbq43RLgCSK7E52zqG5LjSInDLzdqqrkDsUAnNEexTB7JwUCUVqpK0mnZm8ZGMTiieMkKktBWVWw+MPnbQV0wCWTb5bScpUeBXmIjciJuxqOoNPFM0BsTFrI3Qk3tR7Pg2e0m3l0SabrNCKf48cAJFmpNSN1F8L3zCxF0U2hldiAghNbdIpM/FCzjtqYkr6WeIIu1KCpqU/CieVbmTkuv53KbZKlf/EWLyIIbwimRRvN7nlXXaQ1lqD99nGoqk5lIev5V5L8VyyLa/RoZIO/Iom5YYs9iKYyi/YksrVTuryFnzbeK3doHWqSSidYdBaRWcHqMSEYJJIBFPassqdzv0FQpTYy0+xTX2NsQyAkYlEu6xU4xecxR6UQOnosyCRUAU/BEXSE/n5krO04pPjnxDvpqSEQb4Wt5IrbJWwmHqC3aYhClO7TLcU9rJbdrcZ8Kg7fdZkzPEl4pp7JPCY9fDx7bipvBJVvyWMHtU5XclB9Vd767WiLXhjDtGaXrqMt9iDIwMLo5MktB6lQzpLeVLjY26crCvHii6Ur2l/G6oB/t1NUfZ+6Op/G6Y0tO/x+/AFywobjTyKeqND9Ubq09pHqgHyqcGjky1vHFZUub76QmNof7l+jRqpRMu7zl+SLF2Gc9waXva77EXatsXXqzO00Mh1NWPW6h12KwfHRtm94j9e40RaiDTPNg3DGw0ZCjptwlTqG89/R+QxAADsCIBAA== + H4sIAAAAAAAC/+1da2/bOBb9nl+h1XQXyTZubCdO0gCDRZs00wLtNJOkXSymhUFLtK2NXqWoPJrJ/vblQ5Kpl9+OZfv2Q2qRelD33Esekofi45am6ch1PYqo5bmBfqI9siSWaFsBZUd/fmeHT7v8NGxaFHVszFIpCbFI67mYfjBZihvatkwhyO9fe55NLZ+l10Vi3zLxqedS4tn8EV1kB/J623JvAvEYcUhwl+CgzxL0/XqgyzTvTpwhiiULx1INz7aRH2D1biKjj61en5dcb9Tr/r2eZPjIxfbgTurdRD6yLRScerZHhBWedtXMDiJB9lEiw0RB/yN2e5QXulHP5eGyyygKvJAY/AX0C+I5mPZxGOips7qWbfPbphM94iDxgj5m17s0dC2avs4yc1dZLsXkFtnCME76dBv3sGsm0KsW6blvgusM6OoZt72i1+P4hISwspXkOui+LMdyS3IIx/WKOVJxUYK+d1ecQ5lz2yU3ZRYJBxApeWn4mZuKs7J3Fxl3linRz+Yonp0k8zi58BganzzxJiJBQ4H2ExMvDUsEL+rhQhfy+V0IMq2QP6aVzyv2PIaKiQkm/Nld28t4ToCJhYPPt5gQZumC8gc+MnCZw7NMjt5+OpEi46awKAHFvo/Nj8yGhfkUkR6m6ZDNhq04Ed/74n2C0Nl2mV3bLnKwKGnb98y2wWoexB5CTpJfbcMP22HAbNsOMEs1g7ZwkxN2i7ZFEMWPhh2yApJfv+kvop/f9F0tuTNPTw6+6U872p7Gn38TdjJPbbMqTYQ6+/GDeRsNHv/rddj1/NQaMw/FNRb+xDIC/oTJnrurxXdnGeyleEn03ayBBnUGtRz+yhzl/GmWy0ojgjYfRkodco4M6nGDN3OnsApcNAf6Gz2V9aQcpR2K9nmN79kmx1k/qu8e19MuyUt8zipIpZFJ5V31rS4tzqSi3tJPL75oX6jFWjTRyGnbXXY7LQZjJ/O4pOl6zFYwiGCzpB4JPELzBpHVS5s++KIclmtat5YZstqotKaJzw0st2dj7hzp4t2jeyvIF64TGjcyVLJm4HVqVNVwa2VA17krFV9VXqEmlaYgB8Xv8YDu8RiRO3BM9jhC8y5pow62CwvIM73eWyQ4QCOXJ5uYwstkG1PPpStvnHbe3cq+RGFW9B7ZJi0ThVsFr7fWhKgJhAgIERCiKhEi23IsoEOVoEMSCiBDQIaADG0AGdoHMgRkaPPI0ICDONjxyEP7ziM3rHVhHIC2Ow8Uq2TExnQKBpI84m8sh59pOQwkeVCJUSL55sCM8szok7AMjBUBPQJ6tNn06ADoEdAjoEdVpEeLHTMCcjQxOYKRI6BGQI2S1CQwWRj7GGWDKkr9wCoBEUTF2ZfeXUpWlRToOgrHtOgpCdL3GJmyvU3nXVk/RX7/UMZMhMUkcqpma13kVPU832sB33tevldfT743vIYTMcHv9SNkZtZqybhCvq4zeOTwU385bx68bp3mz0jCoZBlRL6er537lmli90pyl5KLuVxzIOEsPCUJg+JcBek8vUmR2omaiJwBJfcYar7z18f79Q0xXyk/HKfn0WgWdj1yYbxms9SdB22bPW4Wvj+S0Eu4z5P7PT6yJz496SUnMtPelLISZn0B4USBk5jaQDYisn8V93pELD1O2mfiDJslMjZvskOlBxXXaa9mn36f2K6j69U1MLCs85Zp3rJad2rjjtevzbCDOSkheG1Syb6rWEkB3Vbotq5Rt1X1p2FxCL3W4fMYotN6CJ1WmKRYyiRFSVdh8lmKgD7YE/Wcr/OtmIwkPKAp/2H/ap8+1c7OtPfvTxznJCjgKj6iLF7c8nvGzbDs8enT9VOLGcagixr5SGFFbsTVUBqgaV/YxIblIFH5NYt6nDfFARRlXotO3VsbjTov4U76GWE1l2Z6d65eeOoXIuqqodB85dGr/fKmAJ5yZpiCzw2dDouFXL6Y+E0a7enRvRw+hAMAjwXw2+oDrP0dIJ4F4tPFQaxqOaYH+uOwsUQAeSyQz6obxxJeiOLZAH5X5Si+8MzVQLds1H+B4O6Ze4et4wY+OMQHuGUcddBBHdWPDg8O91+3cPeo09zfuzkOavFAY1DzPfNft4jUBt3UX18Mfv+DZ8VjmvGIpkgcDGkOBjRFBrvhry/abQOzDu0wV/OLYKxCNVJx51JMuPfqn3vT2zCgxHJ7k9mwfAh3DaeDxHjJAoRf2QF3fZg0bN4zF1z/J010kpjkRFjeD09Yma1bfDLWuoW1tfnbRdh8+a4eiS4B/KHgn1Yu4CIl7Npa/AzCbVOhf6evwNTxH3w6vFpTxwS5AUeyGMeE4BVkwdQyTC2v6dRyOk5hankcQfQRzC2DIBoE0SCIBkH0cgTRz7AUUxuxFhOU0QtSRs9lkSmIo4eIo5dr4XXRR8dLfvk4iLZ9t+dpBjL6eAfk0kvu04r6H/q0G9qnVcMSurWTKKaPoVcLimlQTFdZMV1YuYFUq1qiack/ZgIYdNMV103PEWMQXa6xdDrCGtTTlVZPzyGaQUC99gLq9IDP5dXVDoA9C9jnlQ7oCObT/JgeAD0h0L+tAtBXd8gHnGfC+X01cYaFL+WwwsIXWPiyOgtflv8FZlgAkxNlSyhgGcTC1sBUx+uHivPBD5ayHGZ8s8OKGAg+WBwzHy8gwdxxXyEbnz+LjYWcaoOt/NuzWDm4Q/4GG/m9vhoyQ1hOB8vpQHq4EtJDWFE3sfTwNUgPQXoI0sNKf6xVBol2iQ3M+pjaW+Sa0ptgzqyaOsS3fjAj1tec4jkWBbArL0icHuxLhoHmdePANrULJGk4QF1RVeLMUMdxTQHtymsT/bkHtnZGPE5lAPKqShUXEeCAesU1i9OjvuECJ416mu+ZAQidQOi0JkIn8RFFZa7GxZTPO7eJbMrlxLP86OKsczdPfzKf5I9rx8Nu33c2XOdUZn0atapg/oXqnUY5vy8JDdh/QTqjke4PACxWczRuAJiS0QMOC1L9jB0IAMQU0qBlfZs4Gtv+XYJZxS1uQVgBwgoQVowds6CwyCksCr5ZnE0DjQV8tPiZNBaHS/xS7CQjKc/G0yr/WdglcbPh2hL4lubz8y4uKwDWtQTWtbsWnKMBnAM4B3COYfMHQDqWTDpGiByBdQDrgLGe5xvryUchjO+MxbWawLWAawHXGioWALK17BGecRYbAOV6fsrlA+WCgZ7pycc+kA8gH0A+hivlgH1UhH2MWv8GBAQICIz5PN+YTxyX2ViEkZ+xyNcBkC8gX0C+xlLJAwer6AhQ4fpkIGJAxGAkaKXISAvICJARICPjLRUDNlLdESEgJEBIYGSoWiNDuZiEEaLBWYNvo9QaY9O1Q6BrQNc2iq4Z2LK3GWdjxEnuq7Kd4W7doE0wMpNZu+z2LCa+tRg3+983fXuP/X71cucvxzE69s0r/9XLv9xbB7P/SMdkfwP+55b/uRd/TafG/yKePp8vA2gvtXzx74ilCMwrXf4d4K3T8NYPny+uti+5l778twB7B2gqfKIARs5WeeTsCKgYULFNGzkbzcJSi+VWloutzlsAIZtqFWGfeGGvfxFSQcskKwNSBisJYexwWWOHV6wSYm2x9uEzDBdOshlX4xiI6PMS0c3YjUu2YxlPYbHGEg8yxmU3MPK15hPs7rWo3b0G40k7lfrKe60BG3rN5dv9AuCioUJAuEK7eM2MsAhh1g8FpCu+idcsSEd9TT/qa+7Alj6zgHxWxU35FIgLRhIA4+ps1jUXjHkYx7U2QD0T1OdVhBp26IIdumCHrjXaoWtc8VBuL5B5TvVkp5EWPfezpvt9DcMypaQCMKu/e9iaBuZomR84Z/W3VptM7ACIVn+vtjHaDoB0cZC+gyBdjEQJHHVDt8UrEE1UQrsEe+KB4BzUTYXqJq2mnVl8RKQTiqeMEDttReXWA6OPHfQVk0CWTX5HSkod+BUmIjfiZiyqegPPFK0CcTFrLPTkXhQ7vs1e0u0lkabbrFCKPw+cQJH4pFRSFN8LnzBxF4V2Rl8iQkjNLZL/cxEETntq4kr6GaJIu5LCKCU/imdVNqXkej63abbK1X+EmDyIocAieRWv93llnfZQltrD95mGIqm5lMdvZd5LsRyy7a+RIdKOPMqmJcYstuIYCrLY0krVzipy1nyb+I1doJkqiWjdYVBaBafHqESEYBJIxJPassrdDn2Fy9RYi09xjb0NsYyAUYmEe+wUo9cchV7UwKkos2AREAV/xAXS07m5kvO04pMj35CvpmSEAb6WN1KrrJVwmPqCHSZhilO7DPeUdnKbNveZMGj7fdbkDPGlYhr7pPDY9fCxrbgpfJIVvyXMHlX5XclBdde7qzVijTnjjlGanrrMtxgDI4OLI5MktF4lQ3pL+Spko64c7KsHij5Vbym/G+rBfl3NUfYZaSq/G6b09O/xO/CFD4objXyKeuND9cbqU5oH6oHyWYMjUy1vXJaU+X56Qquof7k+jVrphMt7jh9SrF3GM2XanvZ77IXa9oUXq/z0UAh+9eMWah0260fHhtk9Yv9eY4QayDQP9g0DGw0ZSvptwhTqW0//BxjlaIYcIwEA k8s-resources-node.json.gz: H4sIAAAAAAAC/+1d/1PbNhT/nb/C07Y72MFIAmxd7vZDC6PdXbuxQnu3W3c5xVYSDdvyJJlAOfa370lyHDmWS0hoCUH9KX5P1pf3ed8kPdPrjSBAOE2ZxJKyVKBucA0kIMZUSHj66294vNlWzUhEJe7HBKiS50TThimRv0ZASfM4NhSOs9EZY7GkGdBbmjiiETlkqeQsVkMMcCzM+zFNz4UeRj9yMuBEjICA9loCGRob6xZ6WmZyQA1ZHONMELs3zRgROhypmaPOQSu7RCUjwymJpz3ZvWk+jikWhyxmXEvhZttm9jEXs0NpRoTF6DVJh1JNut2q8UjTaxILlvNQLQCdcJYQOSK5QJVWAxrH9W6pkne7Skol4RdYNUbtpNpJTIYkjUpg7fUO0+fibAZSu8XF0DV5Jf2cc5LKBm6CL5s4NG3gcIXaKaiJeypixMZujgTVjRs6BYnkUwAsXhVcUELdarZ3zRjTSGPbmuVYeluSlRWcMEDjDdMr0YQAi+Aj4awKS0YA/FTiIXEqSKZ64TiiuRrmoM5z6xWgEhFOuBp7EDNZHVMQTon4/YJwDpKu2sKsPUxtQvUFkAYhznBI5RXanm0WKqtRzb4+7uz/dHBYb1GaQh3BqZ63agzwGxFJT/W0m15WvmXqb5xNShNwcy2UOzWukDg8d6gQKJH1VFUDAYIiTV4BmMoI2p0qtRimpoNCkiwj0WuYpBNxifmQyDmwJJeZxkjkyeZ53ie9FHS0B+PKXPQm2F6HcQ4j8p8/oG+Knx/QdqCa/vwf0NQPRQAvrb0XtAuz/AO62apjPmA8wdoTS5rASAbFWrOJ7zrGodRa1GnA77js79PaaFqDwM6tuDSDKck0MFU8t+cXoJZdihOioe5lLOqFEN8woMS75a8eyKaXC7BxWD1QI9HT7qoLXfQox5LMJ++braB/FWzCKF9UzNfXMOLNzT1KuNFi5EjFfRZHDqeqlnUMEdIxluadjuhAuplShzZ0ePIueKdgqLpDWXqN69logzmJGoKKYFzWxWdiTU9eZXpEmkb0gkY5hKbGsDNpqzOm6swu8SUV9Xn18/Dc2Hpd6CgpQo4SyQxkSGmq+63mwFoGT50CupdwhS/nCSNTFYXhuHQoFO6TuEmXYjZ8gXWm167xTKrhfM3kGvWYYq34Tub/gItwm5hZxy1xaWOWWtoW5AoZwbOGU1B/BWehdwRu9ls2rmT95YTOCpOz9elTdmh4p/Sj5o9+MNZSQPE0k/16rt/xuf6XzfXb65nr31uSWp/LbVmqkFfxnTYcZ/UoZiyJTPOTP+Hfzps3O0dHwatX3STpCkfek2EJ9pI29zkJw2avge4UE8rpujOM6eao0BGnIw8nbqgK0KILjkhIE6ydX8e11zl3G1DBPNPbiRcxvq1dmTuhIw6eK4jYOEXOpu+49lWfhOa9st7g6+cOeJozwwp8aZ70wRZq/DylVtBeHN235F/wL1J4gJcB+MXqAxx86yFeBuLDzwdxEUf14+JAv6YJ9Xa8HMhHq2vHBl5vxcsB/MsqW/EJizy6C6GbuSS3Cpa74nhaItz9/rvdxWUoJKfp8G4ybD41XeTsfzWPrvXJhOPQGrac+uzBeXOzwJl2fQxOBvosCzk2H8tfFBSS6ZYC72qBZ3kX5kwvSFdfxVRA6E0uV+CHSUnXTtQv0DrdyQS7gYe6CerDlbOqWCeIayfoI29TTwPoX9AjuG79IwcFWK3rVo5ToZB041hmaA6Wv47117Freh1btVN/HTtP7eWev4/1tZe+9tLXXj547WVCEsavfPllw7bEiKc3ZvycpkMQgez1ryQR80q67OkraOnLMJfdF7zRcJg6iWBzvMtAi8IR2fJVmQ+8DdBG4bcBT3QbYJul3wncpTBz328EfGGmL8xc5cJMp3PzNQOrVZtp8o+lAPblmStennmPGPvarjWu0Cyw9kWaK12keQ/W7Os0175Os3rg8/b0dMuDvQzYxytt0AXMh/UzPQ/0HYF++RiAPh3jzOO8FM6vVhNnX1/v6+t9ff3nra9f8m5ywavJp1Vmb2TsK+0f5/V7s4p/sjjYg/4la+7nl7Yvu39yBvbkC/A/G+RcrH+icPzwYtb1SWsv6JcPL2gxxtnay/kVehwFev7bHf/tji/aexRFe3N9vrNRTBUJiGYJfk+4MNNp7xuyqgFSb0SYn5s/ui7xcKqMSOV4PCXFgaLpS5Iki2Fd6bD+N+M3ZnG3atUq1X6SXGo1iMgA5/FMoZS2Gpu74dAv/QlCVTlL7UFHWOLg1BT4WfzChO3yP4vLsslfwrf9K4KtIr/SR3yuMkHrL9e3K9QhuZyJCqWzsobfmFmXJTkcx+8LQVR19zaZNgjTLcU5KiEnkra8OfhuCNQReR47iv8ajBglACV1NJ+gUoT+u0CiR+oZL7uZZ9f/sD7kBkprd9Q3FmQHVsNpKCBb2A6K/rfc6HVuQ6+IaTbKYCwaIvHHZEKoyq3NXNHcjQvdMEuzGLkgZ6Yj20s9CoVp3ZPCVANrqS8qLVxYWaYf49B0wJwZ6I1JQddDXzYmkcz8lx46wSnd98AkjyDR8U578u0DpH4FDVVeyygkUHz6ciGS3iRltnMZdDDVC9RuWQ979oNVNI0OrN9t+2GvZXOmX2igjvW7HRmt/XuyBvVJjqUYt45id/yD3bE9Smfffphe/KAfI3u+k7lUxPeR6YJc9O7ssIi45Ve5LMlySYK3xSmKCHaD30ABg80TFgmjhijX9eeo02rh8Nkg6g/6/R/3+3s/4cHg2bN2+xnZj9phJzSNL8qA39q4+R94i1XE8mUAAA== k8s-resources-pod.json.gz: - H4sIAAAAAAAC/+1dWXPbOBJ+96/gcGan7EkcS/KRxFXzENvxJFVJxWM7mdoap1QQCUlY8xoQ9BGv97cvAFIUSII6bYmUOg+KCVA4+usGvm42xIcNwzCR5/kMMeJ7oXloPPAiXuiQkPGrv7/zy8eX4jZsE4Y6DualjEZYlvU8zD7avMSLHCcuoSjoX/q+w0jAyxuysE9sfOx7jPqO6KKLnDD+vkO861B2Iy8p7lIc9nmBudsIzbjMv5V3yGHFg+Ollu84KAix2pqs6GPS64uRm639RnBnphUB8rAzbEltTdYjh6Dw2Hd8KqXw+FKt7CAa5ruSFTYK+5+w12Ni0M1GoQ6XfY2h0I+oJSZgnlHfxayPo9DM3NUljlNslgh5N7NFHsP0BombzaabbcTBPezZKbDqfHveu/AyB6l6x01PN3gh/YhS7LGSWhfdldUQr6SGCtQuuJrohxL2/Vt9DeOq65Q0yiUSDQFQ6rLgciWUd+VblxW3xJbYNvI1it6mxcIKznyOxmdfzkQWGCg0fmDqZ2EJMAffY6iHtQoSiFYoskkkutkv1un1iqNiY4qp6Lvr+CzbZ4gpweGXG0wpl3TWFvL2MLQJ0RbF/3BJsqx+DgzRl939fNrae7t/XLwj0eJGoUKsCsOVooitqrz6WgWfVqE2ZMi61oCfg3/UtB3ikjGTPn37ZrdRi0krV1mtDQNk4bJFjFcKm222sqVJNwWTCRkOAmx/4oPUKihDtIfZBKqH7wIp4TByNz1uTW0PuViOtB34dtviuwnindDD9K+2FUTtKOQW1Q4xL7XDtlwcDnkTbUIRww9pG79fmb+kF1fmS4O3Kcr4f+LKciI+ESpKkj+vzMcto3NvbKbdbWlA96mL5N7DiCtGIcyteNtgtT5FFpNq1CpRgdO0vYeHtN/HR7Pkdi70a2UrzukFDiS4UxmCCsKVJ4quo04OgDbfseVO1h4sEg//8TtcdOLWba4oDG/z3Y0SKywT7UtjUmAGffEijjYH5crbuvIWiUT5SlgNEOIla6UhKFuVZwagdGVkfUFHfcfW7PViVqecuGn6knUXfdJl+komGZd5fPbV+CrWq+wuzdLd4SFPghDFdgnXCX3KitKLKVCb3QeyR+LZ5IbYEWdMpWxocK8k8tmR3aE7EhbH1Yms63hNLwrddBMmJESSg8wUWqb/VjnfSzmd9Ez0U7hHd5Owm6GG8u4o0ygU6mCnTJccv3eEpAPSLNTFDFj7tZgCF3mBMuOpVoclTkJvYvE8xvCPjXxpalt8gQ0wyhtOUvqRrxXSUdVXn/u3GWc0HdBlYnKqPo2yw7jugvyQ9f2D2FoSKMAHTXzQVkV8UM3XYt3VVYAHWjEPtGa+CPEsivmSuZn1O6wu9zb61Gd83bDbXLTEH/gfChdzMJuGaqVd/MQLS/2Sv39pt4V30x5Y4PetvKti7IwZetUGvAq+1YSscrTqyUjDwJYtShix+GJTFm4oDRiU1fmBpHoa2iBXLrHqvGrtFyrvEyLIhcM57qTTfhLCfBkbGfF6wJqXzJqTDSTyyNPTziYQ53oQZ409AnsusOcied6FBziLpc9NoM8j6XNxLOP4c8junameIl0Wd7PYkvCQjf2b/9v+/Hn75MT48OHQdQ9DDcsLEOP24pW3OdiO+8S2sWfO9vRHH58r0DLtam4NlqEsQLNO2MYWcZFc/Fo6gnetN6Ck8lI6OkcOGndfyqHME8pXLsP2bzWRXXHrVyrXqpHQfBPWa/z8TgNPeVw1A58XuR1uC4V6STnSnXt2dM9HP9YEgCcC+Kj6ABv/Aojngfj4+SBWvYjZgf406kk9gDwRyCfVteMYXrDi+QB+X2krHgThAOOZMLbK5VcFK644qoogd179tjO7DENG87HJsTIsD5kuNVtqnjyR6bKlZAhDE8vnvmnJE77ZQv3FPijuysiXqfFS5k/XScR2mIrpUKIRRId8zOQGH06UTrVWOBw9Bw7VNAljxwAlmUlJjitnrEna3VqhcAKmCioyWkXemzXIwfwz4qpTrafJFHmhQFKPY0o2NVWQowk5miv6qDlrp/CUeZIczT14zAxZmvU4J5iagZaXrOUpwtqJpL5nDIeE3cWuT+/btz69Jl6P+xes3blnOCymxc53kKqQLEtcbuXxBRw2FEXVOWwY6wScN1zuecPlobAqRw4/SwnGWU3G5l8XF1uQRb1kv1buLeDXrqlfqxokuLbTJFDvg2cLCdSQQF3lBOoxbANye6qTSR2zkLlghmTqiidTPyHGkIm5wvnUCdaQUl3plOonsGbIql75rOosBTsHCjYn2KeVNugE5mNk9TEAPRfQf9QB6ItbFADOc+H8oZo4w2kYOA2zPFRX6DRMnZ/rr9exmBgfOPRQBsWznIxZvHmMsoaRafagH5U5FDM5FHAupraGOtU+9kSWC2dknktdaPh8CmL89LuR/N5ntujsy0kNATldCCCWCNIAJJNB8sdCIAlvUQCITIbIB7MeqY5wuA8O90ESZC2SIOF83/Tn+w4gC3KxWZDwFoYkC/JgmS9hEL/woPA3DzPhDnM/0sLcxYz94bKXGczC5/6XepelLy/g1Qs+pcV7XMYLC56aqJ3HoBlHyLNjK4MjKctlY0cBHEhZChd7uQqU5DVQEqAkQEkSSiK9fZcw4CR14iSXCWpASoCUQIBo6QGiohVCdGgSKvYGqBhQMaBiuegQH77YC4GL1SQ+xKVp+F0jiRPZxlmMHzCyZb//ERgZhIlm5iZvgZsANwFukg8TATmpJTkZBIwY8BPgJxAxWnrEaGCXeVuEuNEk3CxfBuQMyBkEjlJuZlNfdA4crd4BJOMkxhGIGhA1CCTVl6w0gawAWQGyUhZJArayKhElICxAWCCyVK3IUsEmIcI0vGv4G1PbzYnpXAvoHNC5taJzFibOJud0nDhJ3mRs5rhdN2xTjOzSp4I2viGcqnFa5rpWx7l+Fbx68V/vxsX8P9qx+WcoPm7Ex538tN1t8YlEufbHXp6eFi6UDp4LcS3+nVmTIHlLyYiTAIuCsjZI/iXFVX9e//HL2QUQd/idCYg11jnWuAvkFMjpusUax/PSkYcba8FO14CcTsZLAcp1ZKeXfepHvf5ZxICjwkFXCC4vKbh8wdcd8WaPj1+MbeOEiJ/P70Sil80z3+ZFYucwfjXiVWcLYs5PFXPeA1oPtH5tY87Kz3hPG3l+Ejo3is0ZL4xpQ6jPPqatBWc2pBNahfwGEQfdlB7Qi/w2BowToqIQFa1hVHQf6BPQp3WMik7CnEb/8Nsy+dNyRwYk6inCdZJKxUwKiBSE7iB0V73QXfouV4jYjSKWGl4JbzdYMK9srgWvjLe4nKZwW+OFeznh8gas4oL6ODVPzWU1D4hqcW7jmGrI7p1JNqX0VcOXxQ1ztjcGKy8F1rc52PH7xLaxN+ObkYfhoWq92Hy7Wbc3m7+r5quvJcC6yB8gPC3CRxVGWJowdz4B6adA+riaSCduaJC4oVsVf619xUE+eT6QhWM6N8SaIANgPCXG7yuOsUwwSVZtgHouqE+rCHUaiwBsZ8LWKpdfFTbkiqOqCHLn1W87s8tQRNe83nQyLA/VP+PDn2rnUc/+eKYCb6B+Zz5PfvxoWEemHgGs88N6tBRY626tM6TJgbLOr6zHS1TWOhzUqTW4J8vcYADdZ0b3PZju4pOKQHHnV9xTs+LJScfxc3NjmBFRrcQk8cN6AlI9oKmPqamCDHBIXVqn1KVx6UobybjN0OpjF33DNIzHFh+oi5MVxDdsRK9lY9yqekPNlFsD9bA4Qp22xbAbOHySXi+1NNPhg1L0eagESpJOJs+J4TupEzbuosjJZYhIE1Jrdfn4Io0BZzU1VSXzBDFkXMSpTUp9Ys9q4pNS6wdCpvkl1/wnwvRe/qqkLkFKrPtisc5qKC/t4bvcRpGuXEr3G7l5KZJDjvMtEURWkcfJtESYeilOkAM2kLSytPOFnG/mNn7naLKeSizadDmURHP7AJWEHkwDieypHS+5m1GgEJptvuMzvM1nQ4kVciqRMpEtPXqtceglG5yKMjcWCVH452BAZra2MHJRpr850Y14akpFFOLLuCF1yaqFwjSeWWFS3jizyghNaafNtIXORGE76PMtZ4Qu6Unto8JqQcdWRce4HzKfdvEGuPfS9afWp3In6VF6SauhYxsDuvUYkwsixZ7Qim7s55ief7vdHBxS4P5JUmZmvhYQzvLp8MuJSFLXUSXc5r7yyqlmQ7nYVS+ULGZzX/m7qV7sNtQa5RXoLeXvph1r+vfBHMT5GUWZxvaiNnygNqz20tpTL5TfPH5tq+MdjCUjvh++zGg1v14eJ0ww9Rd9N4gYNs5xbJOhsWOcJWZhRjIT3DzYf9PEewd4D+9brztor4Earw/2Dnbf7uPu605rN775JiWgjY3H/wP8ci/i3AABAA== + H4sIAAAAAAAC/+1dWXPbOBJ+z6/gcGa37ElsS/KRxFXzENvxJFVJxWM7mdoap1QQCUlY8xoQ9BGP97cvAFIUSIK6LEuk1HlwRIDC0V838HWzIT68MAwTeZ7PECO+F5qHxgMv4oUOCRm/+us7v3x8JW7DNmGo42BeymiEZVnPw+yjzUu8yHHiEoqC/qXvO4wEvLwhC/vExse+x6jviC66yAnj7zvEuw5lN/KS4i7FYZ8XmLuN0IzL/Ft5hxxWPDheavmOg4IQq63Jij4mvb4YudnabwR3ZloRIA87w5bU1mQ9cggKj33Hp1IKj6/Uyg6iYb4rWWGjsP8Jez0mBt1sFOpw2dcYCv2IWmIC5hn1Xcz6OArNzF1d4jjFZomQdzNb5DFMb5C42Wy62UYc3MOenQKrzrfnvQsvc5Cqd9z0dIMX0o8oxR4rqXXRXVkN8UpqqEDtgquJfihh37/V1zCuuk5Jo1wi0RAApS4LLldCeVe+dVlxS2yJbSNfo+htWiys4MznaHz25UxkgYFC4wemfhaWAHPwPYZ6WKsggWiFIptEopv9Yp1erzgqNqaYir67js+yfYaYEhx+ucGUcklnbSFvD0ObEG1R/DeXJMvq58AQfdndz6etvbf7x8U7Ei1uFCrEqjBcKYrYqsqrr1XwaRVqQ4asaw34OfhHTdshLhkz6dO3b3YbtZi0cpXV2jBAFi5bxHilsNlmK1uadFMwmZDhIMD2Jz5IrYIyRHuYTaB6+C6QEg4jd8Pj1tT2kIvlSNuBb7ctvpsg3gk9TD+1rSBqRyG3qHaIeakdtuXicMibaBOKGH5I2/jtyvwlvbgyXxm8TVHG/xNXlhPxiVBRkny8Mh83jc69sZF2t6kB3acuknsPI64YhTC34m2D1foUWUyqUatEBU7T9h4e0n4fH82S27nQr5WtOKcXOJDgTmUIKghXnii6jjo5ANp8x5Y7WXuwSDz81+9w0Ylbt7iiMLzFdzdKrLBMtK+MSYEZ9MWLONoclCtv88pbJBLlK2E1QIiXrJWGoGxVnhmA0pWR9QUd9R1bs9eLWZ1y4qbpS9Zd9EmX6SuZZFzm8dlX46tYr7K7NEt3h4c8CUIU2yVcJ/QpK0ovpkBtdh/IHolnkxtiR5wxlbKhwb2SyGdHdofuSFgcVyeyruM1vSh0002YkBBJDjJTaJn+W+V8L+V00jPRT+Ee3U3CboYayrujTKNQqIOdMl1y/N4Rkg5Is1AXM2Dt12IKXOQFyoynWh2WOAm9icXzGMM/XuRLU9viC2yAUd5wktKPfK2Qjqq++ty/zTij6YAuE5NT9WmUHcZ1F+SHrO8fxNaSQAE+aOKDtirig2q+FuuurgI80Ip5oDXzRYhnUcyXzI2s32F1ubfRpz7j64bd5qIl/sD/ULiYg9k0VCvt4ideWOqX/PVLuy28m/bAAr9v5l0VY2fM0Ks24FXwrSZklaNVT0YaBrZsUcKIxRebsnBDacCgrM4PJNXT0Aa5colVZ7u1X6i8T4ggFw7nuJNOey6E+TI2MuL1gDUvmTUnG0jkkfnTziYQ53oQZ409AnsusOcied6FBziLpc9NoM8j6XNxLOP4c8junameIl0Wd7PYkvCQjf2H/9v6/Hnr5MT48OHQdQ9DDcsLEOP24pW3OdiO+8S2sWfO9vRHH58r0DLtam4NlqEsQLNO2MYWcZFc/Fo6gnetN6Ck8lI6OkcOGndfyqHME8pXLsP2bzWRXXHrVyrXqpHQfBPWa/z8TgNPeVw1A58XuR1uC4V6STnSnXt2dM9HP9YEgCcC+Kj6ABv/AoifAvHx80GsehGzA/1p1JN6AHkikE+qa8cxvGDFTwP4faWteBCEA4xnwtgql18VrLjiqCqC3Nn+dWd2GYaM5mOTY2VYHjJdarbUU/JEpsuWkiEMTSyf+6YlT/hmC/UX+6C4KyNfpsZLeXq6TiK2w1RMhxKNIDrkYyY3+HCidKq1wuHoOXCopkkYOwYoyUxKclw5Y03S7tYKhRMwVVCR0Sry3qxBDuYfEVedaj1NpsgLBZJ6HFOyqamCHE3I0VzRR81ZO4WnzJPkaO7BY2bI0qzHOcHUDLS8ZC1PEdZOJPU9Yzgk7C52fXrfvvXpNfF63L9g7c49w2ExLfZpB6kKybLE5VYeX8BhQ1FUncOGsU7AecPlnjdcHgqrcuTws5RgnNVkbPx5cbEJWdRL9mvl3gJ+7Zr6tapBgms7TQL1Pni2kEANCdRVTqAewzYgt6c6mdQxC3kSzJBMXfFk6jliDJmYK5xPnWANKdWVTqmegzVDVvXKZ1VnKdg5ULAngn1aaYNOYD5GVh8D0E8C+vc6AH1xiwLA+Uk4f6gmznAaBk7DLA/VFToNU+fn+ut1LCbGBw49lEHxLCdjFm8eo6xhZJo96EdlDsVMDgWci6mtoU61j83JcuGMzHOpCw2fT0GMn34zkt/7zBadfTmpISCnCwHEEkEagGQySH5fCCThLQoAkckQ+WDWI9URDvfB4T5IgqxFEiSc75v+fN8BZEEuNgsS3sKQZEEeLPMlDOIXHhT+5mEm3GHuR1qYu5ixP1z2MoNZ+Nz/Uu+y9OUFvHrBp7R4j8t4YcG8idp5DJpxhDw7tjI4krJcNnYUwIGUpXCxV6tASV4DJQFKApQkoSTS23cJA05SJ05ymaAGpARICQSIlh4gKlohRIcmoWJvgIoBFQMqlosO8eGLvRC4WE3iQ1yaht81kjiRbZzF+AEjW/b7H4GRQZhoZm7yFrgJcBPgJvkwEZCTWpKTQcCIAT8BfgIRo6VHjAZ2mbdFiBtNws3yZUDOgJxB4CjlZjb1RefA0eodQDJOYhyBqAFRg0BSfclKE8gKkBUgK2WRJGArqxJRAsIChAUiS9WKLBVsEiJMw7uGvzG11ZyYzrWAzgGdWys6Z2HibHBOx4mT5E3GRo7bdcM2xcgufSpo4xvCqRqnZRs7/PP2y81/XNfqONfbwfbLf7wbF/P/aMfmf0Px50b8uZN/bXdL/EWiXPurL/PnhwvlhedCbot/edYkkN5SMuJIwMIxrQ2kf0q51Z/pf/xydgFUHn55AqKPdY4+7gJdBbq6btHH8Ux15HHHevHVNaCrkzFVwHSt+epln/pRr38WMWCtcBgWAtBLCkBf8HVHvP3j4xdjyzgh4if2O5HoZePMt3mR2EKMfxvxqrMJcel5xaX3gOgD0V/buLTyU9/TRqfnQudGsTnjpTFtdPXZx7S54OyHdEKrkAMhIqMb0hV6md/GgHFCnBTipDWMk+4DfQL6tI5x0kmY0+gfh1smf1ruyIBEzSNcJ6lUzKSASEHoDkJ31Qvdpe97hYjdKGKp4ZXwBoQF88rmWvDKeIvLaQq3NV64lxMub8AqLqiPU/PUXObzgKgW5zaOqYbs3plkU0pfR3xZ3DBne6uw8uJgfZuDHb9PbBt7M749eRgeqtbLz7eadXv7+btqvh5bAqyL/AHC0yJ8VGGEpQlz5xOQngfSx9VEOnFDg8QNrRbKrbqBfPJ8IAvH9MkQa4IMgPGUGL+vOMYywSRZtQHqJ0F9WkWo01gEYDsTtla5/KqwIVccVUWQO9u/7swuQxFd83rTybA8VP+MD39qklA9+3OaCryu+p35PBnzo/EdmYME+M4R36Ol4Lsy9jtDBh2o7xzV93iJ6lurUz21RvlkmZsQwLwomN+DMS8xJwlUeY6qfGpWPMnpOH7+bgwzK6qV4CR+xE9Aqgc09VU1VZBJDilQ65QCNS7t6UUybjO0+thF3zAN47HFB/PipAfxDRvRa9kYt6reUDPlHkE9LI5ip20x7AYOn6TXSy3NdPigFH0eKoGS7JPJl2L4TuqEjbsocnKZJtKE1FpdXr9Ih8BZTU1VyTxBDBkXcYqUUp/Ys5pApdT6gZBpfsk1/44wvZe/YKlLtBLrvlissxrKS3v4LrdRpCuX0v2L3LwUySHH+ZYIIqvI42RaIky9FCfIJRtIWlna+ULON3Mbv3M02VMlFm26HEqiuX2ASkIPpoFE9tSOl9yNKFCYzRbf8Rne4rOhxAo5lUiZyKYevdY49JINTkWZG4uEKPxjMCAzW1sYuSjT35zoRjw1pSIK8WXckLpk1UJhGs+sMClvnFllhKa002baQmeisB30+ZYzQpf0pPZRYbWgY6uiY9wPeZp28Qa499L1p9ancifpUXpJq6FjLwZ06zEmF0SKPaEV3djPMT3/dqs5OOzA/ZOkzMx8LSCc5dPhlxORpK6jSrjNfeX1Vs2GcrGrXijZ0Oa+8rmpXuw21Brldest5XPTjjX9+2AO4hyOokxje1EbPlAbVntp7akXyu8rv7bV8Q7GkhHfD19mxppfL48TJpj6i74bRAwb5zi2ydDYMc4SszAjmVFuHuy/aeK9A7yH963XHbTXQI3XB3sHu2/3cfd1p7Ub33yTEtDGi8f/AyGdDFRIAQEA k8s-resources-workload.json.gz: H4sIAAAAAAAC/+1d+2/bOBL+PX+FTtdbJIvkYufhNAGKQ5M02AXau2ya9nDYFAZt0bYukqglqTwaeP/25UMPSqITO2li2Zn+Uosj8zXfDD8OJ/TdiuO4KIoIR9wnEXMPnDtRJAoDn3Hx9Ps38Thel69hz+eoF2BRymmCVdkwwvxXT5RESRDoEori0TkhAfdjUd5ShSPfw0ck4pQEsokBCpj+fuBHl0w1ox4pHlDMRqLA3W4xV5eRa/WG6pbunCjtkyBAMcNmbUowwv5wJHvubu224hs3F8QowkFRk1mbkqPAR+yIBISqWRivm8IeoqzalBJ4iI0+4mjIZafbrZoMT/oaR4wktC8H4J5SEmI+wglzS28N/CCoV+vL+W6XiyKO6RWSL7vtsFxJgIc48nLFmuMdRu/ZeUWl5htXQ1vn5ewnlOKIT5CG6GaSxI8mSKjU2mcBE3tX2Ihc2yVcQDeYUKmYkaRQgCErK1eAUL1VrV0Jrn1P6bZVlRi4zYulFZwSoY1PRI1EFTiIOd8xJWW1xFgoP+JoiK0AiWUtFHl+IpvZrcvsuBJa8TDFVLY9CAgvt8kw9TH7zxWmVMy0pf8sRn08Cc5CKLXX3iqXctS/tE0e4ziOsfdRzKG1qxzRIeZlg6wapXoR38RqPCwJVy8iWRSJ2e1GKMSqv92YeN2+8C5INEUP8k/dfpx0EyZmuMuwKPVYV4HlQFTU9Sni+K4fJKKb9N2F+yb9eOGuO3nNsjx/uHDHsvWfHRKt5oXrou013akhJUncDfCAr14TehkQ5K072acuv43xWlFzNxeICg4uk54eBbmWI6A4QD0czNi7ojEpyD6b5aoTUij/V8NZc3q3zqoeg7tenfgBoSFSrpT7oZxEiZ76a5nzOUF9TqSmtmqvaB90ktd3dyfaHI/dCS8KzFwaa0rZEwhYKWyWBGPjqQxpPpIrCgk8C9zlsE6E77W0pWSfR/6A24VcOU336PSL80VCrGxoPF//7qp+DFHsTXBXjFBenz7txZTuZIt+5PlXvpcIpzfRoWXvqrW43LMbdOOzer96Sf9SG2N90t0wdWZySioqcyUA7d+a7LJzt6zIhX0It+gGT+EbCoiK5ii3AEpa0iQsBWR4iBSHaNdkehGzfk2vYq06NIsRl8G53thB2E1Mj6O6aFasbKVamtuWWIVijKqGk5b+KpyF4pp28Rm5LvHJvEPnqcmZeLrPDrXss/9dyUcdbS2pKl4njayzyC1gkS/LItvAIu9lkfW+PEQjGb8NplkplF3KAZ3XVzFtSbjgJ/8T/zY+fdo4PnZ++eUgDA+YhffEiAt7iSbXmS3DYvfr4cidaU3Iu2tnGJnzIjTFiNWR9zM3VFbQYwfs4b4fIuX8LBRPczaLAaXCc8X3DwP00Hs5d3KPqfBcjidYsWt99QtVvupe1XyV1uv8/b1FPZOZYUl9URL2hC3U5EnkG4v247V7hv8Q/oUzUPBTFHzYfAU7/wAVP0XFR8+n4nQdVY+PV/RHP/TBjp+m5OPm2rFWL1jx0xT8oclWfEq8xdBufQ/z7Mrd9DY7u2/beKeDd/Buf6+HdlqotdfZ6Wzv7+LBXm9re/PyLdsQulRbUrYRE+9fV4huFNvUd2+Kzz9JURbjzCKcqrAIcRYBTiUQFb570+32sdjQ3ge12KbGJriRhoPLmMLNf/68+fg5ZJz60XC2OZwcwoWTgsU5KVCBIMsZgdjhq1CP1XE94gih3gbFAxU6dC17vQmnBuuz4imf/wI7mbsTHzTNv/s/6Ym5k69uiFFzvBFiYQ59Jmd4Vk1ltQuBACkga77IOnw+ZIGnegY8bYLlguUqyz2az5oQqC0j4GppcXUMK8Iyrghgt0tutx/cBcgs+i0RltyszCKKIiY1addjvv+3iCDzCDKPljTzqGynkHk0TQL7NqQeQQL7a01gLxhniENCbxWB86OhoPC827vlmM1M1/Iq/yZEssAPhWr0AySuQ+J6Zdn6pHAHueuNYJDK4oFBvlIGaTVFIJFTpK/vAIeE9HVIX29y+vpEngF5c83JYNf840kKhiT2hiex/0AdQwbsEuexp7qGVPZGp7L/AGuGbHbIZodsdshmh2z2hclmh2MDyGJf9Cx2jVwA19ImsoOTWr4EdjBayGEHaEEaO6wHkL4OJgsZ7LOkGEESOySxQwrSQqQgQR77zClIu5CCBClIkILU6Bs0tZE4Z7iP/SvsHKLI02iCk89m5iMdxuyJuj6XFE/sgUDZjU9Meryyz4QOHDLIDNtzTpGm4aDqhmYnPVnVmV1z0Hbjc5TiH27YzjElksqAypuasvQcBg5a/xFaP2mi1iFBDRLUIEFt0RPUVuWhlLpqarU4i4owl+csXarXcH3Sp6+mMs6lAsxnP4wa/y5AKZvrZvG2b2sXkTyIcmonUbZTqPkcQv055SlUcQb1GpPU7sUST8kBgGkpwXT4omDKHFOsWSagacnQdDQf1wRwWk44Hc/VOXl6CwyoWjJUfZivkwJYLSesThp/u2l6Tvdvjcwm3kMFSWKQJAZJYlPbLGSL1bLFLLeediBd7GXTxeDW0zRdrDOnS08hODwfFgoXn9qZ5/1ZgHD76cuzSpkABpxyDpxyfRkY1R4wKmBUwKjgiBQo1bwo1QPJ9sCpgFNBnO7l4nR1K4TY3DRM8i0wSWCSr4tJCjhCbA6IZFOI5HthRsJ6naMMjAWjlHN7KpSb/00QUEygmBC2W1iytQ9kC8gWkC0I2wHbajLbMv4cFwgXEC6I6c0ppvewrUKwbyb+WS0DAgoEFDLx4K8hgYG+ZC7eNBd4AdV8eaoZA9WE2N7juVUbuBVwK+BWcDcAkKv5k6uHrswEfgX8CkJ5LxfKy+yyaosQt5uKW24BtwRuCdwSLgoCitnY+J31xmbgmcAzIY63UFxrG7gWcC3gWnB9HpCtRsfzgG8B34K4XrPiejWbnBDfW0l77bL+CIfoK6ZM96y9o4vlrzPKb3iIXqrKxKo2LFCpFhwqliisfoRF18VxGAdiiNEwtzLBJhg3sFxAwCBaJbLK8Y1ChIcHKAkqq7wyH1Nq477ydxtxGac5kNxjQU+dz5qfGvLUlk32akhJLPVW9a/uHwmmt8oAbSxX3osqPXMZn6J0iG8qF6nmXstofqUyLmPmUBB8TSeiDOOH5nTCZNpncQoin8204baFkxZLuYffBxbmOsGe3VCo0re8nmklJQezqES11NXudjWJ7/lR+nFOmdbs2tt6SHvp4mVqWRiLUhH7LeuQW5bWei7L7C+n2NBDMwQJw+e6ItNhLQRgWs8MmJw1PhoyirEWbFZiJmHdeCQWnHuwZGffY4PTAsaWBWNqzh4Lr+fcJ40r+yGA3LJALlNrI2E3eRdeSJYDiivZNmCsaa+vtJMS3oHebrsRud5oZztQsStOy9zS12JfsHZafDmdkjwWY24D3V3j1L7dMh62zQcjRuruGp/b5sN2y5QYV/ZsGZ/bnjaIb9kYZIzEgNuDrZgVd8yKzVa2dswHY+Oy55n9zfpSmr7vREXg3C/nR+keJb81nIRxInZGZ9nPFDqbzn9N63ETFWx2Ubuzg/YGre3t/cH+Pn6738dot9/b2cP7Pdxp7+mXr/L9UWtl/Bebr3b2vuAAAA== k8s-resources-workloads-namespace.json.gz: H4sIAAAAAAAC/+1d62/bOBL/nr9Cp+0dkr1kYzuvJkBxaJwGu0C7103SHg6bwqAl2tZGElWKyqOB728/PiSZsqj4kcSWk+mXRhyJpDgP/mY4I9+vWZaNwpAwxDwSxvaRdc+beKPvxYxf/fmNXw43xW3Y9Rjq+pi3Mppg2dYPMfvN5S1h4vuqhaJocEGIz7yItzdk48BzcZuEjBJfDNFDfqye973wKpbDyEuKexTHA95g7zRiW7WRG3mHnJaaHG91iO+jKMZ6b5IwwF5/IGZut/Ya0a2dEyIUYn/Uk96bpCPfQ3Gb+ITKVRhu6sQuovH4UJLgonjwEYd9JibdbJRouOoxhmKSUEe8gP2ZkgCzAU5iu3BXz/P9creeWO9msSlkmF4jcbPdDIqd+LiPQzdnrP6+/fB9fDHGUv2O675p8mL1E0pxyCqoAbqtonhhBYUKrp1zMTFPJR6QGzOFcdH1KzrlK5KMGKDRiszlQijvGu9dEm48V/K2MU7R5DZvFlrwmXBufCLyTWSDhWLrB6akyJYIc+aHDPWxUUAi0QtFrpeIYfbKNLNcca64mGIqxu75hBXHjDH1cPzva0wpX+miLozrw0gnRF/fE77M1pZF8Xe+oqwop5lCEjnsT6et3cO9dvmOXB3KXBzJeqNE4LbDxeG5nHrVw8K+jGyO8ZZcDcxUjdOtEjVmyLkyiNGYIE2zgL4XeBOW7/Tw7U7jlSyfdlXUpDhCDq4yrJwo7EizVWxNhympccxwFGH3I5+kUWkYon3MplAHfBtJHsVJsH4ZWlbItbwTogDL2XYi4nYcvsshPhA9yv/qOFHSSWKu6Z0Y81Y37kijdcS76XgUMXzv+AmfJH13ab9J/7y0N628Z9GeX1zaw8vwZ4uE63nTJh95Q0yoT0kSdXzcY+s3hF75BLmbVvZXh91FeGPUaycn8MePrpKuegNyI2ZPsY+62J9xZmODCar4X055w+reWZXTugzLEt8jNEByK2deIBZPynDptmzzO0UOkzrUqpDe07y/+/ts9OGQK+XoUk5mOLQreuAidKWBnTEpx5EU1ZkMRC5SDvIRXZdc4BBIQgNpM2bmQLrwA0Rdfpn1xVsy2/0Ll0fOkY1FLvjk/eMFLLCy7ctc3qrdZe7FrbTPbCCAOvFdAwoSb3fKIa1hLEk7H3g9ZiYyiUXt9ucv1hdhMYv4heV71P04PEQUuxUoMCaUlVdRgUOp72JEL3S9a89NOJasxInZvdLFKc7sFt16cXle3cS5UjtLedHtIMWIYknGWGYLgTM/VY2Ec7QrfTbzK9yh22lw30hS+XCUGQRKbA5VsuST/jGSrlmzRFO+gfEx5RyUEYz2xjNp/hJfwqxi6j0moKC18dZct7gBjzAaV5y09TduM6QLbyafkZuCm55P6CJVOV2eHtJDRTv3fkj6YF9pS8qK1+mdl53zFjjni3XOmy/TOX8yl6g8l0k+Uczu/JkiBBflXUxpEh7BlP/yf1ufPm2dnFi//noUBEexAatEiHF9Cav7zLZh5dna8/njZ0kYemHf+kzcKm88FROjLXcyS1Tk0bzv7GLHC5C0fw2Tc31l1qGUeCH912MfTbovh0/2CeXGy3K5r2cbb/1Cpbl6kDtfhQJbP703cKgaHBY4GCZBl6tDiZ6EnrZvz8dgM4RcNndbq8bd4/py9+zhWCQweCoGt+vPYOvvwOLHsPjk+VicAiV5OT+jPz4UFAcmT8XkD/XVY8Ve0OLHMfi0zlr8nzR+vhosrjrDekYOb7vbqLm/iw56jZ2dw97hIX576GC053R3D/BhF+83D7av3sZbWTg53sqOJP51jejWKCLx7s3o738IUha+zoLXsnEUvR7FriUh6/Xdm07Hwb4vG2VEO23otB4UxptKRtfB2mRiaF2IwcDczGNuCkdh9WRzzTmrLeb2Lz9vz7+GMaNe2J9tDatPbGY95XZIErL15RwXP3xY/ODRmgyZGg7VYoZkUNRo/Oc4cyuPQXFPBtltQ0jkkYeXkHCw4ISDGkjR8XNJUb7yI4nJYEcnO6C//4t0+WqKW7f4OzO8FWBujZxYrPmsPNLOyNXhOMjTEuSpDVZpBaRoG7T0VWvpyTKsvsobAml6cdL0AWz+S7H5oKMvVEdP7RXIxvxDZJjWKxuTojAWnDTzMQ+iGEiQrQnZmi80W7Oop5CtWcrWNNRS7kC65mLTNaGWEmopoZYSail1h9KyRj5PgANC76Qb4YV97jqyTveOYd358TGbx+PJx/gbJ4kGL+BWQ10MxTQq/B8orYTSymlKK5XkQnXl81VXLneFX0qB5Se5ilBjWQuvXe5t4LW/Uq/dqIrguJcc97Lfvgt++2L9diizhDLL2Vx1KLN84WWWlUASEq7rU2mpAOajGAzFljUvtnxCHkOl1uO4XOt6y5TXUHJZ65LLJ9BmqLqEqkuouoSqS6i6XAVzA1WXUHUJVZf1rLqE1AQowlxoOc5zl3flZ+iQ778EkWqDnYKyTNBbqMxcftUXCNQyBeoDbARQqwlaC+Waz5LbCRWbULEJuZ8rkfsJRZsz537uQe4n5H5C7medcz/bSkmsM+xg7xpbxyh0lTTBEWE9E0GPo/iRvL4QEI/7PsDs2ieFzs/sM84Di/QyxXatz0jBcGB1PXNDo0ezOtNrBtyuf47okyu2dUKJgDLA8pqmiz6LggPX655AOjfXIXH0JrQYsSJjWdpqJZCaEwYheRSSRyF59LUlj66Lw0n5Xd310alkiJkQkA5VqE4d/6rv8D72EHj4J7dHYrhOFoH9tqEOJa3SqaTpTLIWR5LFA8mNFTiCfPJM1wflhqXQEARn5QXneKGCkxmcSPkTIDkrLDnt5ZgcEJ3VF52TpRodVwUxQIJWWII+LNf4gAitvgjV/0cW0hPU35UU1vHTjJC+B+l7kL43tc5CHl8pj8/w4wv7kMi32ES+BiTyqUS+/SV98R6CtFMgznf/u7R/+ecTYtBn/Mz96n8O/OEUTfgm+OKBpcjOA1i5BFi5+RJA1QGAKgBVAKrgBBNQ1RJR1YRiCIBVAKsgWre4aF1ZCyFCNw2YfAtgEsDk6wKTXBwhQgdYskZY8j3XJK7AVjuTxxGoFKucZdEf5bVbADUBakIEb2VB1yGALgBdALogggeoaxVQl1Y+DcALgBfE+JYU45tSYSECOBMYHW8DNApoFJL0oLAR4OiC0/Sm+fAaQM7FQ84IICfE+uaHV02AVwCvAF5B9T/gq1rgq0lfOwWIBRALonqLi+plejmuixC9mwpetgBeArwEeAlfCAKUWeconvF72wA1AWpCNG+l4NYOwC2AWwC34HN6gLfqHtUDyAWQC6J79YrulXSyIsq3ls7ajp0BDtBXTGM1s+auahY/rymecBG9kp3xja0/kkq551C+S2H5KzqqL4aDyOevGPZzLeOAImaaLI9EQMNaBbzK8K2UCBf3UOKPbfRSfXSqCf6KH97ERTnNBck+4QjVOlcQVaOnuqwDWI1KIsG3cftqf08wvZMKaAK64vOpwjIX5ZO39vHt2PdWc6ulDb829l7ayiHf/5ouRFGMJ61pxWKaV3EKLJ+ttGa2uZHmu7mL3/sG8Fqhz3bAWekZbs+4kuKDWVgiR+ooc7ueRBpS2uLQjuEt/jbUc2IOF3LUtGHmXmsS99LNS+cyVxbJoviPbEJ2kVqauWgz35zKhno1jZDE+EJ1pBuslRCYxjMLTA4c5xaZHLR6YY88ID1myD3UgOwrlCqUMGLgjmjuOCSR8rbTGKeojdluNgoSM3GbiHxyF4g7qnaK/Ia5pZbvNl7opXt3UU4W6/MMx5yejSXo1tjPZM2mVjVarllU8cqLvlD//C50DAuTKmqjHoq6lmHUocJknuRaisZ6yhe0Q3Kz1czcI1sqq2izC49FHoeUdPRwumB5rED3Uew97WBZ12B7R7/QYnj2nvZ3U7/YaegU7YMzLe3vpqvU+Vv2DsKL10Rx4ih6x/t6x/oorV39QkPVB64+32wuheX7QWSEyP5y0U4BdP7laxJECYftZ9nvHlrb1u+ZHFvrWZ1NrMTVTmRk1EZvD3rdhnvYPMROA6O93v7e7k6ztYub+85uC+2pm69zJN9YG/4fNN+eM6H2AAA= kubelet.json.gz: - H4sIAAAAAAAC/+1dW2/bOBZ+z68QhHloAU8R23ESLzAPbdIWi+lMs0mnC2xbCIxE25rIokpRib1B9rfvIakLZUm51IklN8dP1iHFyzmH33d4kXS9Y1m24/hhlIjY/of15VtPSzj9nvicGjIShkwQ4bNQCq9BBMLAj4XKApc3Khv1fEHOAwrSCQliqoTTkIp/eiAKkyDQEk6i2SfGAuFHIN9Vwpnv0SMWCs6CuHS/X7o38MMLo2ERCanK/0W1SbcM5B4RJGYJd2Vb7BPO5lTMaBLbvSzHxKeBBxVO/GneJX0rnZAkEHFJWqk5F89JFPnhtCZFzECHMxZ4q0XJu5inWkbOYxYkgtq9cnosaBRnys1+N6Xik9CX6rdDFlI7T7jZWckL2va9E1Zugz2Dy4OiOPsKrveM60Vml/R6Ka8rRSvTDPLLqoZsFpWdRgldFjD+R6qBSxIkZve1d2SphFNiJv6dxMKfLPPkRDAzmXGfhtpR65I59RKXfqxpk2wVCdzCkwqzk1j8ycSf4IC2kVI2tXImebNdMqTuXO7OOzWGtAVdiFJ3KlqOgmTqh58pj9NuHRReLAifUlFudqlXdBFxeU+czF9cJOc0oMIJoTonJHN67QagT8p/+2r/kv79avesv9k5SNLcX+2bl+VOTRifE+V6wp9TJ6ag9LicxQ+hKOj8O+IKxk0n0TqlUxp67/JyyjdzOlF4Yb+uunXhWcIXCmjs0yQMYQBav+v2GkNccBLGEbhQKExE0WnLSN0dg7foalKFI4Q8IoTs3Q9ChgghWwYhXA86J2JefD8U6Vl+CIMtdOlv/4OM2YWEF+vjqdVQuOOyJBRr1/DUAHZ9ndV2c/MoYHYCekUg6w6QHd4PyPYQyLYUyFyYehA/hIKfDM7yKp4rqB3lOkZo6w609Qf3w7YRYtuWYNsl+BpAwJyEZApoI5gggaOF66Jbz5KDkUJewJAEilWXDps4V4wH3lbB0mvVA+uzUgyAE2Ay4lKHcGn/fri0j7iEuNTLYMmjsQ9q3mZcOtZdQGDqKDAN7rkwfoDAtCXAxAEryqvjrhoADuWc8XUnal9+cRxZg5MByreX2wNGGgmst1IRjwtFJPBJfCQ9XnlcYdZzwuNKeR6JZx9oOBVyhPZ3S3Jal/1unAO/haJKgveceL7uzu7DgaI0k6rbQTuoB4rDAiiUGcs1gZ6m4ev4U7qxKnhCjWLJ5XS183LIJjy1ykr2OVnUZJ/7YY2U+9OZOPO9mloBvq9qpJD3376nTJRt2GbuILm+po4cB2RRFe0AbuaJJeFVWkv/NoiVTThhMHAyFJGCwgEiCt4BmDilFe+J5F3SFZK4NPFV8qqzgao9yqmCk0nAjBHCaUTVgDTVYevx/fES0AU0ttJqGFAurfN0GEnuRaVuSYgR9T6AUqpj8IcxkMM4l0jEQEl6219Ha/VQuIqEDwFC63xpvcircSRsFEi6UZi0rq/L7fhB5DSjmy8mos7pO8ChVWeQ8rOZPxHVhBSCP2atsk6JERHBmMpOUFybIxPw2KsZm4yLFURSY8/JgNoPPf/S92BGXCXaLI+KQIoGLMjCX4HF88S90C63AgBZYCe7a0Yechu6mrseX3KwyKO/oolLsqC3eHrhOixacZmAgN9WmiAT2PQNiakJMwaGVrJrEK2Ija7UhTldbmXm28+Vvisrow38PUb+Rv7uOn+rqczT0niWp2eViRRpvEzjejKFZI5kjmT+VGQ+uOs0a3+vnsyN/iCbI5u3wOYzPxYM0HnufE8I8HhAX+y+Go971n1o3ktS6oupy0IvdjSIb5jxe1ZAXyLtl2k/M401HouZldkWQ4D1QoBtCACQ/ju3FD/oN9B/H+kf6b9Tk3l5DB7q46JK7rccJH00bt8sjUNn78HbvR9Q4RXjF5Q/Bx3qnjao8c1mw58T5lln0nlxvQPXOzDgaXHzojHiGWDEgxHPdix4tBwKtbCs8Sjx0MMU3BQobWglaeMq7ma4dJyqH0MmXB/CcGnz60OHDdHScHPRknwlztt5JJb1Sf+hnGGIhSHWgxaVIBDgoJBiy6itSKpogKSEnpU+HKTP9Le3dSQbo0Sl9nRlM+lMW8/CI6G4qoJhQidWVZrihD2MEzBO+JnihJYOkGJ0sEZ0gCdNMUbAGKHlk6bDUUOIMMIQAUOEn3C35h5rDLftJaz3ZP1DAomWj6luWzRxjAdYcYMCo4pubFDsDRqiin08zoEBQqcOsLpTzpIof+/XQzYcnigW2NBTqd18IuVImcNKzVFoxeK4ToDrBMjore0lNFL6AVI6Uvp2nNC8i+tbmPjjA6m30T/O5XEuj8xf+B3grcv9KHtTqjyQG/gT6i7dgFr0EgjBgkEvBzbjLa8AjBvCBXyfJIYLHXuEFRzR4VR+MbKt6X/3X7n7+A8TfHj73tJax7k9zu2R4duc2zeRNb48Esl6W56+NFg8I8ef/enA7jF51hlkc5yrI5Nv/DTfaL/hM+j43kgk8i0k8ufymH/3iNzDJ/yRyJHI2yLy/WEDkT/oDZCrVHgXk1dI+wepfDXt8bnc/DIbknlrS+jAQMJxAzkQgLK/g23Egx7Ca/xipgsmkLLBq1ctfaRusFg84rsen1xRw9YUNWxU1JsuKmqvNUXtNSrqqIuKGrWmqFGjoo43GxCfnhzh86m4N4WBcHuB8EHD94sH+GJQXNHq+IpWDUWvt6K1zpFTUNx5z0p40MppU1m7+gMN6Mpy16k2CX76BBe+kO83xPeHd9F9w1dMB0Nc98J1r7YpP+LMpXEMZC5tCKw+p3PGl875UtB4vU2p57fb9IfSHdLsejSrXG8LqLYj7XzmdHt4X7rdQ7pFum2bbtUkOuNcN0ryOfPTvObx+VHw0clfVhJLV0MWXm+yO5Oa2IIJbzfa+cxZuHgL0l00PEIaRhpum4anzJkyzhIh9Y2z3IdR7Ptcc8ixyLGtcexO6uVyHEgXVwd3drVP2pxdFf5ux+6MzslnymP9lgH9sQCAkqX2aI/wC30foGNhOTX6eUjlND+vTdB5FBBw/2nubbY8XmzY+7qOmkqqFXShFOvRCUmCFdUq1zJT6yylXiO8Mogym9jHEBJYZzomqPq3GTEYqUy9g6GMEiD+nlC+TFcKVyOLkvL7JemUSnPbZtZs5BrVrw6dUiQQfE4VsTIgDZ2aCrkzDiqUZj4s6oeA+R59HdTRda7RlBfMsuZgGr/mnkzLNbfcpWJVnaOh5UUSXa9w0E3PSgt9WW+CwV0mSJHXNBV4vNJz/K+sFXY5tdJcKavPnBpY98dISGL6SRdUG/ZsyOq7TVZfgfbc6BnxPsTqdfesafZeXXxyU+yc/xzOsJNB+42GWV/pMwXYiQ5U7JBd/drPWB+CjlRml26LfPdCxabpzalK8mURk5rtkRHE9HeNi6F50Z8X/0fG/755Mdw1U4zYZGD873sa9L5lfZBhouEgd9ZiFrxvFmzWMtgzL4ovmtoHntnerC0l9f2Xqcja/uvTUcqJWeD3u/ZILU3UdM4e9oeHE9IfjbzRuD862B9Pzj3qHY4Pibs7nnhjnfkyp97dnZv/A+pcQKEItwAA + H4sIAAAAAAAC/+1dW2/bOBZ+z68QhHloAU8R23ESLzAPbdIWi+lMs0mnC2xbCIxE25rIokpRib1B9rfvIakLZUm51IklN8dP1iHFyzmH33d4kXS9Y1m24/hhlIjY/of15VtPSzj9nvicGjIShkwQ4bNQCq9BBMLAj4XKApc3Khv1fEHOAwrSCQliqoTTkIp/eiAKkyDQEk6i2SfGAuFHIN9Vwpnv0SMWCs6CuHS/X7o38MMLo2ERCanK/0W1SbcM5B4RJGYJd2Vb7BPO5lTMaBLbvSzHxKeBBxVO/GneJX0rnZAkEHFJWqk5F89JFPnhtCZFzECHMxZ4q0XJu5inWkbOYxYkgtq9cnosaBRnys1+N6Xik9CX6rdDFlI7T7jZWckL2va9E1Zugz2Dy4OiOPsKrveM60Vml/R6Ka8rRSvTDPLLqoZsFpWdRgldFjD+R6qBSxIkZve1d2SphFNiJv6dxMKfLPPkRDAzmXGfhtpR65I59RKXfqxpk2wVCdzCkwqzk1j8ycSf4IC2kVI2tXImebNdMqTuXO7OOzWGtAVdiFJ3KlqOgmTqh58pj9NuHRReLAifUlFudqlXdBFxeU+czF9cJOc0oMIJoTonJHN67QagT8p/+2r/kv79avesv9k5SNLcX+2bl+VOTRifE+V6wp9TJ6ag9LicxQ+hKOj8O+IKxk0n0TqlUxp67/JyyjdzOlF4Yb+uunXhWcIXCmjs0yQMYQBav+v2GkNccBLGEbhQKExE0WnLSN0dg7foalKFI4Q8IoTs3Q9ChgghWwYhXA86J2JefD8U6Vl+CIMtdOlv/4OM2YWEF+vjqdVQuOOyJBRr1/DUAHZ9ndV2c/MoYHYCekUg6w6QHd4PyPYQyLYUyFyYehA/hIKfDM7yKp4rqB3lOkZo6w609Qf3w7YRYtuWYNsl+BpAwJyEZApoI5gggaOF66Jbz5KDkUJewJAEilWXDps4V4wH3lbB0mvVA+uzUgyAE2Ay4lKHcGn/fri0j7iEuNTLYMmjsQ9q3mZcOtZdQGDqKDAN7rkwfoDAtCXAxAEryqvjrhoADuWc8XUnal9+cRxZg5MByreX2wNGGgmst1IRjwtFJPBJfCQ9XnlcYdZzwuNKeR6JZx9oOBVyhPZ3S3Jal/1unAO/haJKgveceL7uzu7DgaI0k6rbQTuoB4rDAiiUGcs1gZ6m4ev4U7qxKnhCjWLJ5XS183LIJjy1ykr2OVnUZJ/7YY2U+9OZOPO9mloBvq9qpJD3376nTJRt2GbuILm+po4cB2RRFe0AbuaJJeFVWkv/NoiVTThhMHAyFJGCwgEiCt4BmDilFe+J5F3SFZK4NPFV8qqzgao9yqmCk0nAjBHCaUTVgDTVYevx/fES0AU0ttJqGFAurfN0GEnuRaVuSYgR9T6AUqpj8IcxkMM4l0jEQEl6219Ha/VQuIqEDwFC63xpvcircSRsFEi6UZi0rq/L7fhB5DSjmy8mos7pO8ChVWeQ8rOZPxHVhBSCP2atsk6JERHBmMpOUFybIxPw2KsZm4yLFURSY8/JgNoPPf/S92BGXCXaLI+KQIoGLMjCX4HF88S90C63AgBZYCe7a0Yechu6mrseX3KwyKO/oolLsqC3eHrhOixacZmAgN9WmiAT2PQNiakJMwaGVrJrEK2Ija7UhTldbmXm28+Vvisrow38PUb+Rv7uOn+rqczT0niWp2eViRRpvEzjejKFZI5kjmT+VGQ+uOs0a3+vnsyN/iCbI5u3wOYzPxYM0HnufE8I8HhAX+y+Go971n1o3ktS6oupy0IvdjSIb5jxe1ZAXyLtl2k/M401HouZldkWQ4D1QoBtCACQ/ju3FD/oN9B/H+kf6b9Tk3l5DB7q46JK7rccJH00bt8sjUNn78HbvR9Q4RXjF5Q/Bx3qnjao8c1mw58T5lln0nlxvQPXOzDgaXHzojHiGWDEgxHPdix43BIKbWidY+PrGo8SED1Mw02R0s+q4m7GS8ep+jFmwgUijJc2v0B02BAuDTcXLsl34rydR2JZn/QfyhnGWBhjPWhVCQIBDgop9ozaWRExd4wkJfSs9Okgfai/vb0j2RglKrWnK7tJZ9p6Fp4JxWUVDBM6sazSFCfsYZyAccLPFCe0dIIUo4M1ogM8aooxAsYILR81HY4aQoQRhggYIvyE2zX3WGO4bS9hvUfrHxJItHxOdduiiWM8wYobFBhVdGODYm/QEFXs43kODBA6dYLVnXKWRPmLvx6y4fBEscCGHkvt5iMpR8ocVmqOQisWx3UCXCdARm9tL6GR0g+Q0pHSt+OI5l1c38LEH59IvY3+cS6Pc3lk/sLvAG9d7kfZq1LlgdzAn1B36QbUopdACBYMejmwGW95BWDcEC7gCyUxXOjYM6zgiA6n8pORbU3/u//O3cd/mODD2/eW1jrO7XFujwzf5ty+iazx7ZFI1tvy+KXB4hk5/uxPB3aPybPOIJvjXB2ZfOOn+Ub7Dd9BxxdHIpFvIZE/l8f8u0fkHj7hj0SORN4Wke8PG4j8Qa+AXKXCu5i8Qto/SOWraY/P5ean2ZDMW1tCBwYSjhvIgQCU/R1sIx70EF7jJzNdMIGUDV69aukrdYPF4hFf9vjkihq2pqhho6LedFFRe60paq9RUUddVNSoNUWNGhV1vNmA+PTkCJ9Pxb0pDITbC4QPGj5gPMA3g+KKVsdXtGooer0VrXWOnILizntWwoNWTpvK2tUfaEBXlrtOtUnw2ye48IV8vyG+P7yL7hs+YzoY4roXrnu1TfkRZy6NYyBzaUNg9TmdM750zpeCxuttSj2/3aY/lO6QZtejWeV6W0C1HWnnM6fbw/vS7R7SLdJt23SrJtEZ57pRks+Zn+Y1j8+Pgo9O/rKSWLoasvB6k92Z1MQWTHi70c5nzsLFW5DuouER0jDScNs0PGXOlHGWCKlvnOU+jGLf55pDjkWObY1jd1Ivl+NAurg6uLOrfdLm7Krwdzt2Z3ROPlMe67cM6I8FAJQstUd7hF/o+wAdC8up0c9DKqf5eW2CzqOAgPtPc2+z5fFiw97XddRUUq2gC6VYj05IEqyoVrmWmVpnKfUa4ZVBlNnEPoaQwDrTMUHVv82IwUhl6h0MZZQA8feE8mW6UrgaWZSU3y9Jp1Sa2zazZiPXqH516JQigeBzqoiVAWno1FTInXFQoTTzYVE/BMz36Ougjq5zjaa8YJY1B9P4NfdkWq655S4Vq+ocDS0vkuh6hYNuelZa6Mt6EwzuMkGKvKapwOOVnuN/Za2wy6mV5kpZfebUwLo/RkIS00+6oNqwZ0NW322y+gq050bPiPchVq+7Z02z9+rik5ti5/zncIadDNpvNMz6Sp8pwE50oGKH7OrXfsb6EHSkMrt0W+S7Fyo2TW9OVZIvi5jUbI+MIKa/a1wMzYv+vPg/Mv73zYvhrplixCYD43/f06D3LeuDDBMNB7mzFrPgfbNgs5bBnnlRfNHUPvDM9mZtKanvv0xF1vZfn45STswCv9+1R2ppoqZz9rA/PJyQ/mjkjcb90cH+eHLuUe9wfEjc3fHEG+vMlzn17u7c/B+xNxywCbcAAA== namespace-by-pod.json.gz: H4sIAAAAAAAC/+1dWW/bOhZ+768Q1E6RYpJbS16SFAgGWacF2t5Mk3Yw0wYBLdG2bmRJl6KyNMj89uGihaRI2U7SLNfqQ1uT1CF51o+HpHT9wrLs09MgSjKc2u+s7yervATBP7MAQaEMRFGMAQ7iiBZekyJSGAYppk3YLysvZTXDLAjxh4hUOqtVqQ8wSOMMeZBU2IconkI8gVlqC21gBIYhrccog0L5JPA1pYEXR7txGCNKEI2HYKWzarmOQ/7q91ct541IOgJT1vF2NRfrtbUdQoSlIeCrhLXzQToZxgD5dl53w/49IX/fMKZAP8DKaO1xBPEHn5REWRjyEgSSyXEchzhISHmHFdLp7MYRRnFIGToCYcqfD6RnwyA6E6SQgAiy9pzlBcNtLw5DkKRQpCRV+LWaMQr8w7gSJmeyIrAL8tvtCQWXxQTy31f0d8Gdkjabg1v+rEZ9UpYhmECAhakKpR8wREw8+uov8YXEYFaVTuKL4wDXNMfGeaG9myEEI2ztgMi/CHw8seU2R8FP1m4yECpyTUDxBdeBfI4V44E3gcfBFMZZbTIe1csd4J2NUZxFvk42MfoGwgzqq1LBtEjZS3dz0+sNREVlCu9214miu5urVo/ofue3jU1J6V/6vR7ogkKFKwnMNEbbh14wBUxypcztUYymTHCEa1N4mkIUQOGZMcjGUFaqKbgsZul0ROWZBlFRIRZTUaocobKYEIc0iUP/IxhC2WjUFp8AOoOMf1QVauppVP1NRfUdd4bqOzXaExiMJ1ikxa2hW/2MiHqfg1BVF9nUc84lSRCNj7kWOrpyWUmq+Qju7pzy2MKxheElFlTDyquEeQgzMRBDIBrPIOZWxGpKR3Rhj+jdYUy4kMoKQdXhiLgKie82ZRFr/Slm/p+YRhRBDxOPJrU5psNROBoncrzi+hvA0P9dU8OsOfRkhnLJABLmhKITado+HIEsxCoxPlk+xeLPqlIfRLJOyQ7rFeV5mgBioEqTLAqYBe4kqTiuG2lcuZLIKsX5cg4R4uH0Wn6mtKA6F+S5FT6Kh0l0tjZGEEbKOJmy+/CyPklBW6jEpLqb1UX6vYJhSNyFsWPH2HG/FMsdukeCHtb6do19rxv6NioZe7D0eTUDq/xPEqd4FNDebVstPCB4o4hy/c7fhHoE68+wMuMjzA98ItDC7H9GJKTQ56iEZU+BubHan99uKxVx+UCDE0lrXoKUoDPiQKFq62Eoo8MuQYaOs0H+2tikwdLZkILliPZcDyyUskiHk3FJuHU2uxIBMXbVZMOwIqGTTSOZ1xigMcQNrISXCes7zaYrAcFGcIW4QQzIsNApwZsXMTojmN2DwTk8HV5hmJ5ignDDay/MUhJttn7Yr/L//rBXS7ey9b8fgpP5Yd98f1UEp3eviCMgI6Vu8uTNG1lKzRAgN4EUg0j1yFL8OwAeZhyVTNQO4RhG/kHZhUyXKCXDfvZ2k4JIbkzkMxnuAddKKVDQ8qNJMML1CgU8fiG8t+KRtUOZbH3hLPfrgJHDoLKYma9oSxuiLbHaZluK6crB3prTjAo3M9OSWMPPeWj3+BxboNsCXblgMaTba5Fui3RbpNsi3Rbptkj3lyBdTESSTgPcQt2Hg7rHOc/xcqBdproNIywGQxGxYTSsaibaKAgxfGi9NE2trF6U4E4zwZ2FCe42E9xdmOBeM8G9hQnuNxPcX5jgQTPBg/kJJrFvoEVrGlR39hqGRgAJOo8EAyTITLDAuZcFM7d+HMPeT1/E+jBV92RoId+CEQesWxaUQLkaSh0o0wILpNZPiGIhbhPfChFk0WQUxhWEt1MPxYxZ0qBoQHsPgc+ekGtihGVmEf+gcMOHqWcMhyzofITRmM+4o8Z3YXIpvgqbljogDECq9zwMr+U8qYWiaqGtYHri5asI9B/yZ+3Tp7W9Pev9+3fT6btUCXbCStmVYxkRXh1P5BXV5qe9h4ieWn58IUNo1uwrCuthMAEk5qBIP2UZxcszK4LTJPB9BbCXywryLMKzzbdkerl3WE/9LIcI9GFqHjFE2XRITEsrBml1t4AQdKBkqeSw85hyKABiYQrWIfDOIE6XUxS79yuK5FaiEAxiuaWx9xSkoRqGtYfiJFlWX7X/RA1kuaVy8JhSOYyfINflE4a3Yrr/dh04G53Bugc3e13Q6wDo9nv93mC97zmbo1Hfe3uWER5GEMN0LU9rBdF4jawC/xGj8Qd/y3mN4IjKYqvbSV+fA7RW5bKqRBarIA9tvTo99aCafBSkXVt43lXQKnKuJ6ae+p62NbyyVghfzBk/drTTkOuraUkt1+feLtcnlZPZJeIy22xTj543fZLs3PmV7CyUM+FxZBn4ufsg6rlEDN17SAX1OdBZBr7uP6iiLhFjD2Yyto4E7rwVdYQBFnPdBUDhDDDt4DzejQhnU58WHzyxKxH3ehWCofrdAqNXxy3sIUC1Q0zsbo0uI07Loa75nPsfrlTwTwT8ADJ76PyC4/+uYftjvdrTYFYld0UYNY620+P85pC6BQ/Ox5rSYruyXkNvEu1PE3ylvzP1X7opUqvhB3ZUUvyYjlqK6DmvI36KRq3LTwEo1FPS+N/5/o68jLOZi9QQ0h77uJl/G8mdbxtpwfNWpn2bBBJNJGFhXPcvCSVJ1S5LpY2wpDgIJjc371Lp3QE/gfB7fq5JmeesfSbxHAkmsau20UW8OYljH/nxEnmgf4H1nPkEx0LHNK6vSS83N7dewTWf4fh+H4Ezzzxq77qVmYxr0YwBYhFRNWS+8+mqhnpaRIMg8oPzwM9AefDipn46hN5/rAZwCS4DxfkOM56qrnmLaW6aWN75K85oKq31zqj0LN9PakO8ApdNm62V7tCdEVkp6NlcXZYqjMc7IIU1LeIet9ZcczJSnMdMvPqUhlgo9rLCg9qhaRM+2GjxQYsPWnzw5DKUywMQin2wFiG0COGpIAQhZSN5HTFjI1XcX8LGNSRsNjUJm4Y9TQ28MUKcJpjTAHXmgztayNMIe4wMrZiqXt9RMZCOuyWHux3j9R3GaWXyGlw0BzYy46NmjDQDJzVjJTNeMmOmWbjJ7Kxm4KcmDNV0vUaViQ5PmTGVAVcZsdW8+KoJY83AWU1YqwFvNWMuM+6aib0a8Zcegxlx2CwsZsRj2mteT3Wrc05wNgdAmxukNQE1A1hrvszWeMpCD94aAZyS5ZnrAKYOzjVBOgOsmwva1R2JHuIZYF4j1GuCe2bIN8OT1qFffQp1CKi1o0pVk0Sjoias1YS3GjCX+VqtAXvNc+PzOUxBtLX5Tpq1qKyWnZofljktLGthWQvLniYse5QTUy0umxeXzbyN0UKzFpr95aFZLZv98OecVOO7ywtfHy9J2TXsJjpum6W8pyxlbxYc7rZwuIXDLRx+HlnKhzyW3aLi22Yr9fcvW2jcQuM2a9miNG3WciZM67UwrYVpLUx7JlnLFqc9k+xlC9VaqNZmMR8ri7mPEEWEt0livshHbucvFaG13Q6nRZsJnxhLvQmcgm8QpXz8zgYvpu8lLF4GzZ8jkbLSWrt6oYld9obhNAkJI6LxPN+MqxCV9t2Z+dvHDe/PLGp14Lr4dpyYUC1UzaYvabeOOADWfCtOgMdCbfW+ddH92n9mEF0x7dZ+2q5iviOVjuGlcuNa+ARd2b2a/JVujITfhJeMa6+FiMuIuT7ElzNNPNkdRCQi+3Bb/+porfHaUyKVQNO8YHAe4xfhLuvplHvWlSy5/iMeErRAFTCEmECCVSsn+kbPfXcW9/PgJOoLUXbG4vRfxShsubY2XFqmb5zLls9HqMhSeMwJaS+56AVu//Z3kQbI2JvFa5eGSPGpF2dMFbodtYY7SdvpSBowwyApw9fSK8LoqcEoxRZaw5xHD4llB1GQe1NZ9nc5qEi0pISRb+xmZyHpvXJ54JZqr/viwWKK/0smv4iRnAXJVxQeXUWe7uIXNyHnOZiQIroHNKG+yXL6dzCYWQp8T467WmwZVPiFCQiStW3I1+b6tELBnW5HxYgVg2idCVCbe9MAdIMoTMKYu6umiTkTY1+k6oUOjGr9AWHCan+6KpF7biZcfiSoteLHgF869i9qww1W1TNrem8RTe8tiYorH5hmWYhy6VR+Zia+WCttvviSDF/rVY8lAQEBqHo459lpMRhRvHZfWFaKui+5YNup9FzUedsRf3Q7Yo1wq9MV/u/kHzs4KeZA03B1tTP3IhIeiITFXtye+EO4tr3ui+MtxiKx72fMUqH21+PdfLVbLMM/l6/rtN5anwsUZa0cxn7KsZSdsV0Ie2O4DjaGXXfgr4PByBl1em6v1xkMuoMNb7AORrzxebnU7ry4+T/NN/Mvkn0AAA== namespace-by-workload.json.gz: diff --git a/charts/kubezero-metrics/templates/rules/etcd-mixin.yaml b/charts/kubezero-metrics/templates/rules/etcd-mixin.yaml index 4fd4bab8..fc6b717e 100644 --- a/charts/kubezero-metrics/templates/rules/etcd-mixin.yaml +++ b/charts/kubezero-metrics/templates/rules/etcd-mixin.yaml @@ -49,9 +49,9 @@ spec: severity: warning - alert: etcdGRPCRequestsSlow annotations: - description: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": gRPC requests to {{`{{`}} $labels.grpc_method {{`}}`}} are taking {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + description: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": 99th percentile of gRPC requests is {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}} for {{`{{`}} $labels.grpc_method {{`}}`}} method.' summary: etcd grpc requests are slow - expr: 'histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_type="unary"}[5m])) without(grpc_type)) + expr: 'histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_method!="Defragment", grpc_type="unary"}[5m])) without(grpc_type)) > 0.15 @@ -95,7 +95,8 @@ spec: severity: warning - alert: etcdHighFsyncDurations annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": 99th percentile fsync durations are {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + description: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": 99th percentile fsync durations are {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + summary: etcd cluster 99th percentile fsync durations are too high. expr: 'histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~".*etcd.*"}[5m])) > 1 @@ -116,10 +117,11 @@ spec: for: 10m labels: severity: warning - - alert: etcdBackendQuotaLowSpace + - alert: etcdDatabaseQuotaLowSpace annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": database size exceeds the defined quota on etcd instance {{`{{`}} $labels.instance {{`}}`}}, please defrag or increase the quota as the writes to etcd will be disabled when it is full.' - expr: '(etcd_mvcc_db_total_size_in_bytes/etcd_server_quota_backend_bytes)*100 > 95 + description: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": database size exceeds the defined quota on etcd instance {{`{{`}} $labels.instance {{`}}`}}, please defrag or increase the quota as the writes to etcd will be disabled when it is full.' + summary: etcd cluster database is running full. + expr: '(last_over_time(etcd_mvcc_db_total_size_in_bytes[5m]) / last_over_time(etcd_server_quota_backend_bytes[5m]))*100 > 95 ' for: 10m @@ -127,8 +129,20 @@ spec: severity: critical - alert: etcdExcessiveDatabaseGrowth annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": Observed surge in etcd writes leading to 50% increase in database size over the past four hours on etcd instance {{`{{`}} $labels.instance {{`}}`}}, please check as it might be disruptive.' - expr: 'increase(((etcd_mvcc_db_total_size_in_bytes/etcd_server_quota_backend_bytes)*100)[240m:1m]) > 50 + description: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": Predicting running out of disk space in the next four hours, based on write observations within the past four hours on etcd instance {{`{{`}} $labels.instance {{`}}`}}, please check as it might be disruptive.' + summary: etcd cluster database growing very fast. + expr: 'predict_linear(etcd_mvcc_db_total_size_in_bytes[4h], 4*60*60) > etcd_server_quota_backend_bytes + + ' + for: 10m + labels: + severity: warning + - alert: etcdDatabaseHighFragmentationRatio + annotations: + description: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": database size in use on instance {{`{{`}} $labels.instance {{`}}`}} is {{`{{`}} $value | humanizePercentage {{`}}`}} of the actual allocated disk space, please run defragmentation (e.g. etcdctl defrag) to retrieve the unused fragmented disk space.' + runbook_url: https://etcd.io/docs/v3.5/op-guide/maintenance/#defragmentation + summary: etcd database size in use is less than 50% of the actual allocated storage. + expr: '(last_over_time(etcd_mvcc_db_total_size_in_use_in_bytes[5m]) / last_over_time(etcd_mvcc_db_total_size_in_bytes[5m])) < 0.5 and etcd_mvcc_db_total_size_in_use_in_bytes > 104857600 ' for: 10m diff --git a/charts/kubezero-metrics/templates/rules/kubernetes.yaml b/charts/kubezero-metrics/templates/rules/kubernetes.yaml index 7a18034a..79697385 100644 --- a/charts/kubezero-metrics/templates/rules/kubernetes.yaml +++ b/charts/kubezero-metrics/templates/rules/kubernetes.yaml @@ -25,7 +25,7 @@ spec: description: Pod {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} has been in a non-ready state for longer than 15 minutes. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepodnotready summary: Pod has been in a non-ready state for more than 15 minutes. - expr: "sum by (namespace, pod, cluster) (\n max by(namespace, pod, cluster) (\n kube_pod_status_phase{job=\"kube-state-metrics\", phase=~\"Pending|Unknown\"}\n ) * on(namespace, pod, cluster) group_left(owner_kind) topk by(namespace, pod, cluster) (\n 1, max by(namespace, pod, owner_kind, cluster) (kube_pod_owner{owner_kind!=\"Job\"})\n )\n) > 0\n" + expr: "sum by (namespace, pod, cluster) (\n max by(namespace, pod, cluster) (\n kube_pod_status_phase{job=\"kube-state-metrics\", phase=~\"Pending|Unknown|Failed\"}\n ) * on(namespace, pod, cluster) group_left(owner_kind) topk by(namespace, pod, cluster) (\n 1, max by(namespace, pod, owner_kind, cluster) (kube_pod_owner{owner_kind!=\"Job\"})\n )\n) > 0\n" for: 15m labels: severity: warning @@ -137,7 +137,7 @@ spec: annotations: description: HPA {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.horizontalpodautoscaler {{`}}`}} has not matched the desired number of replicas for longer than 15 minutes. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubehpareplicasmismatch - summary: HPA has not matched descired number of replicas. + summary: HPA has not matched desired number of replicas. expr: "(kube_horizontalpodautoscaler_status_desired_replicas{job=\"kube-state-metrics\"}\n !=\nkube_horizontalpodautoscaler_status_current_replicas{job=\"kube-state-metrics\"})\n and\n(kube_horizontalpodautoscaler_status_current_replicas{job=\"kube-state-metrics\"}\n >\nkube_horizontalpodautoscaler_spec_min_replicas{job=\"kube-state-metrics\"})\n and\n(kube_horizontalpodautoscaler_status_current_replicas{job=\"kube-state-metrics\"}\n <\nkube_horizontalpodautoscaler_spec_max_replicas{job=\"kube-state-metrics\"})\n and\nchanges(kube_horizontalpodautoscaler_status_current_replicas{job=\"kube-state-metrics\"}[15m]) == 0\n" for: 15m labels: @@ -158,11 +158,11 @@ spec: description: Cluster has overcommitted CPU resource requests for Pods by {{`{{`}} $value {{`}}`}} CPU shares and cannot tolerate node failure. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuovercommit summary: Cluster has overcommitted CPU resource requests. - expr: 'sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) - (sum(kube_node_status_allocatable{resource="cpu"}) - max(kube_node_status_allocatable{resource="cpu"})) > 0 + expr: 'sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) - (sum(kube_node_status_allocatable{resource="cpu", job="kube-state-metrics"}) - max(kube_node_status_allocatable{resource="cpu", job="kube-state-metrics"})) > 0 and - (sum(kube_node_status_allocatable{resource="cpu"}) - max(kube_node_status_allocatable{resource="cpu"})) > 0 + (sum(kube_node_status_allocatable{resource="cpu", job="kube-state-metrics"}) - max(kube_node_status_allocatable{resource="cpu", job="kube-state-metrics"})) > 0 ' for: 10m @@ -173,11 +173,11 @@ spec: description: Cluster has overcommitted memory resource requests for Pods by {{`{{`}} $value | humanize {{`}}`}} bytes and cannot tolerate node failure. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryovercommit summary: Cluster has overcommitted memory resource requests. - expr: 'sum(namespace_memory:kube_pod_container_resource_requests:sum{}) - (sum(kube_node_status_allocatable{resource="memory"}) - max(kube_node_status_allocatable{resource="memory"})) > 0 + expr: 'sum(namespace_memory:kube_pod_container_resource_requests:sum{}) - (sum(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) - max(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"})) > 0 and - (sum(kube_node_status_allocatable{resource="memory"}) - max(kube_node_status_allocatable{resource="memory"})) > 0 + (sum(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) - max(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"})) > 0 ' for: 10m @@ -304,7 +304,7 @@ spec: description: Kubernetes API server client '{{`{{`}} $labels.job {{`}}`}}/{{`{{`}} $labels.instance {{`}}`}}' is experiencing {{`{{`}} $value | humanizePercentage {{`}}`}} errors.' runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclienterrors summary: Kubernetes API server client is experiencing errors. - expr: "(sum(rate(rest_client_requests_total{code=~\"5..\"}[5m])) by (cluster, instance, job, namespace)\n /\nsum(rate(rest_client_requests_total[5m])) by (cluster, instance, job, namespace))\n> 0.01\n" + expr: "(sum(rate(rest_client_requests_total{job=\"apiserver\",code=~\"5..\"}[5m])) by (cluster, instance, job, namespace)\n /\nsum(rate(rest_client_requests_total{job=\"apiserver\"}[5m])) by (cluster, instance, job, namespace))\n> 0.01\n" for: 15m labels: severity: warning @@ -388,6 +388,7 @@ spec: expr: 'apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 604800 ' + for: 5m labels: severity: warning - alert: KubeClientCertificateExpiration @@ -398,6 +399,7 @@ spec: expr: 'apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 86400 ' + for: 5m labels: severity: critical - alert: KubeAggregatedAPIErrors @@ -481,7 +483,7 @@ spec: description: The readiness status of node {{`{{`}} $labels.node {{`}}`}} has changed {{`{{`}} $value {{`}}`}} times in the last 15 minutes. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubenodereadinessflapping summary: Node readiness status is flapping. - expr: 'sum(changes(kube_node_status_condition{status="true",condition="Ready"}[15m])) by (cluster, node) > 2 + expr: 'sum(changes(kube_node_status_condition{job="kube-state-metrics",status="true",condition="Ready"}[15m])) by (cluster, node) > 2 ' for: 15m @@ -769,13 +771,13 @@ spec: rules: - expr: "sum by (cluster, namespace, pod, container) (\n irate(container_cpu_usage_seconds_total{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}[5m])\n) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) (\n 1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n" record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate - - expr: "container_memory_working_set_bytes{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (namespace, pod) group_left(node) topk by(namespace, pod) (1,\n max by(namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n" + - expr: "container_memory_working_set_bytes{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,\n max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n" record: node_namespace_pod_container:container_memory_working_set_bytes - - expr: "container_memory_rss{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (namespace, pod) group_left(node) topk by(namespace, pod) (1,\n max by(namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n" + - expr: "container_memory_rss{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,\n max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n" record: node_namespace_pod_container:container_memory_rss - - expr: "container_memory_cache{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (namespace, pod) group_left(node) topk by(namespace, pod) (1,\n max by(namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n" + - expr: "container_memory_cache{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,\n max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n" record: node_namespace_pod_container:container_memory_cache - - expr: "container_memory_swap{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (namespace, pod) group_left(node) topk by(namespace, pod) (1,\n max by(namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n" + - expr: "container_memory_swap{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", image!=\"\"}\n* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,\n max by(cluster, namespace, pod, node) (kube_pod_info{node!=\"\"})\n)\n" record: node_namespace_pod_container:container_memory_swap - expr: "kube_pod_container_resource_requests{resource=\"memory\",job=\"kube-state-metrics\"} * on (namespace, pod, cluster)\ngroup_left() max by (namespace, pod, cluster) (\n (kube_pod_status_phase{phase=~\"Pending|Running\"} == 1)\n)\n" record: cluster:namespace:pod_memory:active:kube_pod_container_resource_requests @@ -869,31 +871,29 @@ spec: rules: - expr: "topk by(cluster, namespace, pod) (1,\n max by (cluster, node, namespace, pod) (\n label_replace(kube_pod_info{job=\"kube-state-metrics\",node!=\"\"}, \"pod\", \"$1\", \"pod\", \"(.*)\")\n))\n" record: 'node_namespace_pod:kube_pod_info:' - - expr: "count by (cluster, node) (sum by (node, cpu) (\n node_cpu_seconds_total{job=\"node-exporter\"}\n* on (namespace, pod) group_left(node)\n topk by(namespace, pod) (1, node_namespace_pod:kube_pod_info:)\n))\n" + - expr: "count by (cluster, node) (\n node_cpu_seconds_total{mode=\"idle\",job=\"node-exporter\"}\n * on (namespace, pod) group_left(node)\n topk by(namespace, pod) (1, node_namespace_pod:kube_pod_info:)\n)\n" record: node:node_num_cpu:sum - expr: "sum(\n node_memory_MemAvailable_bytes{job=\"node-exporter\"} or\n (\n node_memory_Buffers_bytes{job=\"node-exporter\"} +\n node_memory_Cached_bytes{job=\"node-exporter\"} +\n node_memory_MemFree_bytes{job=\"node-exporter\"} +\n node_memory_Slab_bytes{job=\"node-exporter\"}\n )\n) by (cluster)\n" record: :node_memory_MemAvailable_bytes:sum - - expr: 'sum(rate(node_cpu_seconds_total{job="node-exporter",mode!="idle",mode!="iowait",mode!="steal"}[5m])) / - - count(sum(node_cpu_seconds_total{job="node-exporter"}) by (cluster, instance, cpu)) - - ' + - expr: "avg by (cluster, node) (\n sum without (mode) (\n rate(node_cpu_seconds_total{mode!=\"idle\",mode!=\"iowait\",mode!=\"steal\",job=\"node-exporter\"}[5m])\n )\n)\n" + record: node:node_cpu_utilization:ratio_rate5m + - expr: "avg by (cluster) (\n node:node_cpu_utilization:ratio_rate5m\n)\n" record: cluster:node_cpu:ratio_rate5m - name: kubelet.rules rules: - - expr: 'histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + - expr: 'histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) ' labels: quantile: '0.99' record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile - - expr: 'histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + - expr: 'histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) ' labels: quantile: '0.9' record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile - - expr: 'histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + - expr: 'histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (cluster, instance, le) * on(cluster, instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) ' labels: diff --git a/charts/kubezero-metrics/templates/rules/node-exporter.yaml b/charts/kubezero-metrics/templates/rules/node-exporter.yaml index 6c466324..8df72a48 100644 --- a/charts/kubezero-metrics/templates/rules/node-exporter.yaml +++ b/charts/kubezero-metrics/templates/rules/node-exporter.yaml @@ -14,7 +14,7 @@ spec: description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available space left and is filling up. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemspacefillingup summary: Filesystem is predicted to run out of space within the next 24 hours. - expr: "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\"} * 100 < 15\nand\n predict_linear(node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"}[6h], 24*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n" + expr: "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 15\nand\n predict_linear(node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"}[6h], 24*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n" for: 1h labels: severity: warning @@ -23,7 +23,7 @@ spec: description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available space left and is filling up fast. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemspacefillingup summary: Filesystem is predicted to run out of space within the next 4 hours. - expr: "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\"} * 100 < 10\nand\n predict_linear(node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"}[6h], 4*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n" + expr: "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 10\nand\n predict_linear(node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"}[6h], 4*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n" for: 1h labels: severity: critical @@ -32,7 +32,7 @@ spec: description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available space left. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutofspace summary: Filesystem has less than 5% space left. - expr: "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\"} * 100 < 5\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n" + expr: "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 5\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n" for: 30m labels: severity: warning @@ -41,7 +41,7 @@ spec: description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available space left. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutofspace summary: Filesystem has less than 3% space left. - expr: "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\"} * 100 < 3\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n" + expr: "(\n node_filesystem_avail_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_size_bytes{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 3\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n" for: 30m labels: severity: critical @@ -50,7 +50,7 @@ spec: description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available inodes left and is filling up. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemfilesfillingup summary: Filesystem is predicted to run out of inodes within the next 24 hours. - expr: "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\"} * 100 < 40\nand\n predict_linear(node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"}[6h], 24*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n" + expr: "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 40\nand\n predict_linear(node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"}[6h], 24*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n" for: 1h labels: severity: warning @@ -59,7 +59,7 @@ spec: description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available inodes left and is filling up fast. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemfilesfillingup summary: Filesystem is predicted to run out of inodes within the next 4 hours. - expr: "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\"} * 100 < 20\nand\n predict_linear(node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"}[6h], 4*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n" + expr: "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 20\nand\n predict_linear(node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"}[6h], 4*60*60) < 0\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n" for: 1h labels: severity: critical @@ -68,7 +68,7 @@ spec: description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available inodes left. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutoffiles summary: Filesystem has less than 5% inodes left. - expr: "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\"} * 100 < 5\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n" + expr: "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 5\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n" for: 1h labels: severity: warning @@ -77,7 +77,7 @@ spec: description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available inodes left. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodefilesystemalmostoutoffiles summary: Filesystem has less than 3% inodes left. - expr: "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\"} * 100 < 3\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\"} == 0\n)\n" + expr: "(\n node_filesystem_files_free{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} / node_filesystem_files{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} * 100 < 3\nand\n node_filesystem_readonly{job=\"node-exporter\",fstype!=\"\",mountpoint!=\"\"} == 0\n)\n" for: 1h labels: severity: critical @@ -128,7 +128,7 @@ spec: description: Clock on {{`{{`}} $labels.instance {{`}}`}} is out of sync by more than 300s. Ensure NTP is configured correctly on this host. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodeclockskewdetected summary: Clock skew detected. - expr: "(\n node_timex_offset_seconds > 0.05\nand\n deriv(node_timex_offset_seconds[5m]) >= 0\n)\nor\n(\n node_timex_offset_seconds < -0.05\nand\n deriv(node_timex_offset_seconds[5m]) <= 0\n)\n" + expr: "(\n node_timex_offset_seconds{job=\"node-exporter\"} > 0.05\nand\n deriv(node_timex_offset_seconds{job=\"node-exporter\"}[5m]) >= 0\n)\nor\n(\n node_timex_offset_seconds{job=\"node-exporter\"} < -0.05\nand\n deriv(node_timex_offset_seconds{job=\"node-exporter\"}[5m]) <= 0\n)\n" for: 10m labels: severity: warning @@ -137,11 +137,11 @@ spec: description: Clock on {{`{{`}} $labels.instance {{`}}`}} is not synchronising. Ensure NTP is configured on this host. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodeclocknotsynchronising summary: Clock not synchronising. - expr: 'min_over_time(node_timex_sync_status[5m]) == 0 + expr: 'min_over_time(node_timex_sync_status{job="node-exporter"}[5m]) == 0 and - node_timex_maxerror_seconds >= 16 + node_timex_maxerror_seconds{job="node-exporter"} >= 16 ' for: 10m @@ -152,7 +152,7 @@ spec: description: RAID array '{{`{{`}} $labels.device {{`}}`}}' on {{`{{`}} $labels.instance {{`}}`}} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/noderaiddegraded summary: RAID Array is degraded - expr: 'node_md_disks_required - ignoring (state) (node_md_disks{state="active"}) > 0 + expr: 'node_md_disks_required{job="node-exporter",device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"} - ignoring (state) (node_md_disks{state="active",job="node-exporter",device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"}) > 0 ' for: 15m @@ -163,7 +163,7 @@ spec: description: At least one device in RAID array on {{`{{`}} $labels.instance {{`}}`}} failed. Array '{{`{{`}} $labels.device {{`}}`}}' needs attention and possibly a disk swap. runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/noderaiddiskfailure summary: Failed device in RAID array - expr: 'node_md_disks{state="failed"} > 0 + expr: 'node_md_disks{state="failed",job="node-exporter",device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"} > 0 ' labels: @@ -200,11 +200,11 @@ spec: ' record: instance:node_vmstat_pgmajfault:rate5m - - expr: 'rate(node_disk_io_time_seconds_total{job="node-exporter", device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[5m]) + - expr: 'rate(node_disk_io_time_seconds_total{job="node-exporter", device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"}[5m]) ' record: instance_device:node_disk_io_time_seconds:rate5m - - expr: 'rate(node_disk_io_time_weighted_seconds_total{job="node-exporter", device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[5m]) + - expr: 'rate(node_disk_io_time_weighted_seconds_total{job="node-exporter", device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"}[5m]) ' record: instance_device:node_disk_io_time_weighted_seconds:rate5m diff --git a/charts/kubezero-metrics/templates/rules/prometheus.yaml b/charts/kubezero-metrics/templates/rules/prometheus.yaml index 8672671c..c457fb79 100644 --- a/charts/kubezero-metrics/templates/rules/prometheus.yaml +++ b/charts/kubezero-metrics/templates/rules/prometheus.yaml @@ -214,6 +214,17 @@ spec: for: 5m labels: severity: critical + - alert: PrometheusHighQueryLoad + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} query API has less than 20% available capacity in its query engine for the last 15 minutes. + runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheushighqueryload + summary: Prometheus is reaching its maximum capacity serving concurrent requests. + expr: 'avg_over_time(prometheus_engine_queries{job="prometheus-k8s",namespace="monitoring"}[5m]) / max_over_time(prometheus_engine_queries_concurrent_max{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0.8 + + ' + for: 15m + labels: + severity: warning - alert: PrometheusErrorSendingAlertsToAnyAlertmanager annotations: description: '{{`{{`}} printf "%.1f" $value {{`}}`}}% minimum errors while sending alerts from Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} to any Alertmanager.' diff --git a/charts/kubezero-metrics/zdt.patch b/charts/kubezero-metrics/zdt.patch index 9a56660b..5a0ffac1 100644 --- a/charts/kubezero-metrics/zdt.patch +++ b/charts/kubezero-metrics/zdt.patch @@ -4,7 +4,7 @@ diff -tuNr charts/kube-prometheus-stack.orig/crds/crd-prometheuses.yaml charts/k @@ -6,6 +6,7 @@ metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 + controller-gen.kubebuilder.io/version: v0.11.1 + argocd.argoproj.io/sync-options: Replace=true creationTimestamp: null name: prometheuses.monitoring.coreos.com diff --git a/charts/kubezero/README.md b/charts/kubezero/README.md index 3ed009bb..996df34b 100644 --- a/charts/kubezero/README.md +++ b/charts/kubezero/README.md @@ -67,7 +67,7 @@ Kubernetes: `>= 1.25.0` | metrics.istio.grafana | object | `{}` | | | metrics.istio.prometheus | object | `{}` | | | metrics.namespace | string | `"monitoring"` | | -| metrics.targetRevision | string | `"0.8.9"` | | +| metrics.targetRevision | string | `"0.9.0"` | | | network.cilium.cluster | object | `{}` | | | network.enabled | bool | `true` | | | network.retain | bool | `true` | | diff --git a/charts/kubezero/values.yaml b/charts/kubezero/values.yaml index 2c894322..5ebf63f2 100644 --- a/charts/kubezero/values.yaml +++ b/charts/kubezero/values.yaml @@ -76,7 +76,7 @@ istio-private-ingress: metrics: enabled: false namespace: monitoring - targetRevision: 0.8.9 + targetRevision: 0.9.0 istio: grafana: {} prometheus: {}