Compare commits

..

17 Commits

Author SHA1 Message Date
f3371d2685 feat: make operators work on GKE 2024-12-15 15:56:15 +00:00
432781511d Merge pull request 'chore(deps): update kubezero-argo-dependencies' (#28) from renovate/kubezero-argo-kubezero-argo-dependencies into main
Reviewed-on: #28
2024-12-09 15:42:07 +00:00
a5ae87038b chore(deps): update kubezero-argo-dependencies 2024-12-09 15:42:07 +00:00
9b5456bc68 docs: v1.31 draft 2024-12-09 12:50:21 +00:00
3c3fbb7fbf feat: enabled network policies for ArgoCD 2024-12-09 12:39:33 +00:00
fcb1ee5896 fix: disable OverCommit alerts globally 2024-12-04 19:49:33 +00:00
144d826aaf fix: various tweaks and fixes 2024-12-04 18:40:10 +00:00
2fa48e9fcd fix: latest data-prepper, various fixes for telemetry 2024-12-04 18:39:21 +00:00
b17f8d403a feat: add strimzi Kafka operator 2024-12-04 18:38:21 +00:00
d0431f3193 feat: latest CI, switch Jenkins to LTS image 2024-12-04 18:37:39 +00:00
17091f1dc9 Merge pull request 'chore(deps): update helm release renovate to v39' (#24) from renovate/kubezero-ci-major-kubezero-ci-dependencies into main
Reviewed-on: #24
2024-11-28 11:21:32 +00:00
7cae122591 chore(deps): update helm release renovate to v39 2024-11-28 11:21:32 +00:00
3513e15818 Merge pull request 'chore(deps): update kubezero-ci-dependencies' (#30) from renovate/kubezero-ci-kubezero-ci-dependencies into main
Reviewed-on: #30
2024-11-28 11:20:23 +00:00
385086406d chore(deps): update kubezero-ci-dependencies 2024-11-27 16:48:00 +00:00
a13b062d38 feat: fix keycloak metrics issues, bump EFS memory as workarounf for OOM 2024-11-25 19:13:57 +00:00
e70822dd28 Merge pull request 'chore(deps): update keycloak docker tag to v24.2.1' (#23) from renovate/kubezero-auth-kubezero-auth-dependencies into main
Reviewed-on: #23
2024-11-21 11:52:16 +00:00
6068020397 chore(deps): update keycloak docker tag to v24.2.1 2024-11-20 03:03:31 +00:00
59 changed files with 695 additions and 262 deletions

View File

@ -227,7 +227,10 @@ cluster-autoscaler:
scan-interval: 30s scan-interval: 30s
skip-nodes-with-local-storage: false skip-nodes-with-local-storage: false
balance-similar-node-groups: true balance-similar-node-groups: true
ignore-daemonsets-utilization: true
ignore-taint: "node.cilium.io/agent-not-ready" ignore-taint: "node.cilium.io/agent-not-ready"
# Disable for non-clustered control-plane
# leader-elect: false
#securityContext: #securityContext:
# runAsNonRoot: true # runAsNonRoot: true

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo name: kubezero-argo
version: 0.2.6 version: 0.2.7
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -18,11 +18,11 @@ dependencies:
version: ">= 0.1.6" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: argo-events - name: argo-events
version: 2.4.8 version: 2.4.9
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled condition: argo-events.enabled
- name: argo-cd - name: argo-cd
version: 7.7.2 version: 7.7.7
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled condition: argo-cd.enabled
- name: argocd-apps - name: argocd-apps

View File

@ -45,7 +45,9 @@ argo-cd:
format: json format: json
image: image:
repository: public.ecr.aws/zero-downtime/zdt-argocd repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v2.13.0 tag: v2.13.1
networkPolicy:
create: true
configs: configs:
styles: | styles: |
@ -54,8 +56,8 @@ argo-cd:
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); } .sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
cm: cm:
ui.bannercontent: "KubeZero v1.30 - Release notes" ui.bannercontent: "KubeZero v1.31 - Release notes"
ui.bannerurl: "https://kubezero.com/releases/v1.30" ui.bannerurl: "https://kubezero.com/releases/v1.31"
ui.bannerpermanent: "true" ui.bannerpermanent: "true"
ui.bannerposition: "bottom" ui.bannerposition: "bottom"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-auth name: kubezero-auth
description: KubeZero umbrella chart for all things Authentication and Identity management description: KubeZero umbrella chart for all things Authentication and Identity management
type: application type: application
version: 0.6.0 version: 0.6.1
appVersion: 26.0.5 appVersion: 26.0.5
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -18,6 +18,6 @@ dependencies:
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: keycloak - name: keycloak
repository: "oci://registry-1.docker.io/bitnamicharts" repository: "oci://registry-1.docker.io/bitnamicharts"
version: 24.0.4 version: 24.2.1
condition: keycloak.enabled condition: keycloak.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-auth # kubezero-auth
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 26.0.5](https://img.shields.io/badge/AppVersion-26.0.5-informational?style=flat-square) ![Version: 0.6.1](https://img.shields.io/badge/Version-0.6.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 26.0.5](https://img.shields.io/badge/AppVersion-26.0.5-informational?style=flat-square)
KubeZero umbrella chart for all things Authentication and Identity management KubeZero umbrella chart for all things Authentication and Identity management
@ -19,7 +19,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| oci://registry-1.docker.io/bitnamicharts | keycloak | 24.0.4 | | oci://registry-1.docker.io/bitnamicharts | keycloak | 24.2.1 |
# Keycloak # Keycloak
@ -38,9 +38,15 @@ https://github.com/keycloak/keycloak-benchmark/tree/main/provision/minikube/keyc
| Key | Type | Default | Description | | Key | Type | Default | Description |
|-----|------|---------|-------------| |-----|------|---------|-------------|
| keycloak.auth.adminUser | string | `"admin"` | | | keycloak.auth.adminUser | string | `"admin"` | |
| keycloak.auth.existingSecret | string | `"kubezero-auth"` | | | keycloak.auth.existingSecret | string | `"keycloak-auth"` | |
| keycloak.auth.passwordSecretKey | string | `"admin-password"` | | | keycloak.auth.passwordSecretKey | string | `"admin-password"` | |
| keycloak.enabled | bool | `false` | | | keycloak.enabled | bool | `false` | |
| keycloak.externalDatabase.database | string | `"keycloak"` | |
| keycloak.externalDatabase.existingSecret | string | `"keycloak-pg"` | |
| keycloak.externalDatabase.existingSecretPasswordKey | string | `"password"` | |
| keycloak.externalDatabase.host | string | `"keycloak-pg-rw"` | |
| keycloak.externalDatabase.port | int | `5432` | |
| keycloak.externalDatabase.user | string | `"keycloak"` | |
| keycloak.hostnameStrict | bool | `false` | | | keycloak.hostnameStrict | bool | `false` | |
| keycloak.istio.admin.enabled | bool | `false` | | | keycloak.istio.admin.enabled | bool | `false` | |
| keycloak.istio.admin.gateway | string | `"istio-ingress/private-ingressgateway"` | | | keycloak.istio.admin.gateway | string | `"istio-ingress/private-ingressgateway"` | |
@ -52,15 +58,7 @@ https://github.com/keycloak/keycloak-benchmark/tree/main/provision/minikube/keyc
| keycloak.metrics.serviceMonitor.enabled | bool | `true` | | | keycloak.metrics.serviceMonitor.enabled | bool | `true` | |
| keycloak.pdb.create | bool | `false` | | | keycloak.pdb.create | bool | `false` | |
| keycloak.pdb.minAvailable | int | `1` | | | keycloak.pdb.minAvailable | int | `1` | |
| keycloak.postgresql.auth.database | string | `"keycloak"` | |
| keycloak.postgresql.auth.existingSecret | string | `"kubezero-auth"` | |
| keycloak.postgresql.auth.username | string | `"keycloak"` | |
| keycloak.postgresql.enabled | bool | `false` | | | keycloak.postgresql.enabled | bool | `false` | |
| keycloak.postgresql.primary.persistence.size | string | `"1Gi"` | |
| keycloak.postgresql.primary.resources.limits.memory | string | `"128Mi"` | |
| keycloak.postgresql.primary.resources.requests.cpu | string | `"100m"` | |
| keycloak.postgresql.primary.resources.requests.memory | string | `"64Mi"` | |
| keycloak.postgresql.readReplicas.replicaCount | int | `0` | |
| keycloak.production | bool | `true` | | | keycloak.production | bool | `true` | |
| keycloak.proxyHeaders | string | `"xforwarded"` | | | keycloak.proxyHeaders | string | `"xforwarded"` | |
| keycloak.replicaCount | int | `1` | | | keycloak.replicaCount | int | `1` | |

View File

@ -1,9 +1,9 @@
configmap: grafana-dashboards configmap: grafana-dashboards
condition: '.Values.keycloak.metrics.enabled' condition: '.Values.keycloak.metrics.enabled'
gzip: true gzip: true
# folder: folder: KubeZero
dashboards: dashboards:
- name: keycloak - name: keycloak
# url: https://grafana.com/api/dashboards/10441/revisions/2/download url: https://grafana.com/api/dashboards/19659/revisions/1/download
url: https://grafana.com/api/dashboards/17878/revisions/1/download # url: https://grafana.com/api/dashboards/17878/revisions/1/download
tags: ['Keycloak', 'Auth'] tags: ['Keycloak', 'Auth']

File diff suppressed because one or more lines are too long

View File

@ -16,6 +16,8 @@ spec:
- route: - route:
- destination: - destination:
host: {{ template "kubezero-lib.fullname" $ }}-keycloak host: {{ template "kubezero-lib.fullname" $ }}-keycloak
port:
number: 80
{{- end }} {{- end }}
--- ---
@ -41,4 +43,6 @@ spec:
route: route:
- destination: - destination:
host: {{ template "kubezero-lib.fullname" $ }}-keycloak host: {{ template "kubezero-lib.fullname" $ }}-keycloak
port:
number: 80
{{- end }} {{- end }}

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cert-manager name: kubezero-cert-manager
description: KubeZero Umbrella Chart for cert-manager description: KubeZero Umbrella Chart for cert-manager
type: application type: application
version: 0.9.11 version: 0.9.10
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -16,6 +16,6 @@ dependencies:
version: ">= 0.1.6" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: cert-manager - name: cert-manager
version: v1.16.2 version: v1.16.1
repository: https://charts.jetstack.io repository: https://charts.jetstack.io
kubeVersion: ">= 1.26.0-0" kubeVersion: ">= 1.26.0-0"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-ci name: kubezero-ci
description: KubeZero umbrella chart for all things CI description: KubeZero umbrella chart for all things CI
type: application type: application
version: 0.8.18 version: 0.8.19
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -22,15 +22,15 @@ dependencies:
repository: https://dl.gitea.io/charts/ repository: https://dl.gitea.io/charts/
condition: gitea.enabled condition: gitea.enabled
- name: jenkins - name: jenkins
version: 5.7.12 version: 5.7.15
repository: https://charts.jenkins.io repository: https://charts.jenkins.io
condition: jenkins.enabled condition: jenkins.enabled
- name: trivy - name: trivy
version: 0.8.0 version: 0.9.0
repository: https://aquasecurity.github.io/helm-charts/ repository: https://aquasecurity.github.io/helm-charts/
condition: trivy.enabled condition: trivy.enabled
- name: renovate - name: renovate
version: 38.142.6 version: 39.33.1
repository: https://docs.renovatebot.com/helm-charts repository: https://docs.renovatebot.com/helm-charts
condition: renovate.enabled condition: renovate.enabled
kubeVersion: ">= 1.25.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-ci # kubezero-ci
![Version: 0.8.18](https://img.shields.io/badge/Version-0.8.18-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.8.19](https://img.shields.io/badge/Version-0.8.19-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI KubeZero umbrella chart for all things CI
@ -18,11 +18,11 @@ Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.8.0 | | https://aquasecurity.github.io/helm-charts/ | trivy | 0.9.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.jenkins.io | jenkins | 5.7.12 | | https://charts.jenkins.io | jenkins | 5.7.15 |
| https://dl.gitea.io/charts/ | gitea | 10.6.0 | | https://dl.gitea.io/charts/ | gitea | 10.6.0 |
| https://docs.renovatebot.com/helm-charts | renovate | 38.142.6 | | https://docs.renovatebot.com/helm-charts | renovate | 39.33.1 |
# Jenkins # Jenkins
- default build retention 10 builds, 32days - default build retention 10 builds, 32days
@ -92,7 +92,7 @@ Kubernetes: `>= 1.25.0`
| jenkins.agent.defaultsProviderTemplate | string | `"podman-aws"` | | | jenkins.agent.defaultsProviderTemplate | string | `"podman-aws"` | |
| jenkins.agent.idleMinutes | int | `30` | | | jenkins.agent.idleMinutes | int | `30` | |
| jenkins.agent.image.repository | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | | | jenkins.agent.image.repository | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
| jenkins.agent.image.tag | string | `"v0.6.2"` | | | jenkins.agent.image.tag | string | `"v0.7.0"` | |
| jenkins.agent.inheritYamlMergeStrategy | bool | `true` | | | jenkins.agent.inheritYamlMergeStrategy | bool | `true` | |
| jenkins.agent.podName | string | `"podman-aws"` | | | jenkins.agent.podName | string | `"podman-aws"` | |
| jenkins.agent.podRetention | string | `"Default"` | | | jenkins.agent.podRetention | string | `"Default"` | |
@ -113,7 +113,7 @@ Kubernetes: `>= 1.25.0`
| jenkins.controller.containerEnv[1].value | string | `"none"` | | | jenkins.controller.containerEnv[1].value | string | `"none"` | |
| jenkins.controller.disableRememberMe | bool | `true` | | | jenkins.controller.disableRememberMe | bool | `true` | |
| jenkins.controller.enableRawHtmlMarkupFormatter | bool | `true` | | | jenkins.controller.enableRawHtmlMarkupFormatter | bool | `true` | |
| jenkins.controller.image.tag | string | `"alpine-jdk21"` | | | jenkins.controller.image.tag | string | `"lts-alpine-jdk21"` | |
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | | | jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | | | jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | | | jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
@ -162,7 +162,6 @@ Kubernetes: `>= 1.25.0`
| renovate.env.LOG_FORMAT | string | `"json"` | | | renovate.env.LOG_FORMAT | string | `"json"` | |
| renovate.securityContext.fsGroup | int | `1000` | | | renovate.securityContext.fsGroup | int | `1000` | |
| trivy.enabled | bool | `false` | | | trivy.enabled | bool | `false` | |
| trivy.image.tag | string | `"0.56.2"` | |
| trivy.persistence.enabled | bool | `true` | | | trivy.persistence.enabled | bool | `true` | |
| trivy.persistence.size | string | `"1Gi"` | | | trivy.persistence.size | string | `"1Gi"` | |
| trivy.rbac.create | bool | `false` | | | trivy.rbac.create | bool | `false` | |

View File

@ -12,6 +12,18 @@ Use the following links to reference issues, PRs, and commits prior to v2.6.0.
The changelog until v1.5.7 was auto-generated based on git commits. The changelog until v1.5.7 was auto-generated based on git commits.
Those entries include a reference to the git commit to be able to get more details. Those entries include a reference to the git commit to be able to get more details.
## 5.7.15
Update `jenkins/jenkins` to version `2.479.2-jdk17`
## 5.7.14
Update `kubernetes` to version `4296.v20a_7e4d77cf6`
## 5.7.13
Update `configuration-as-code` to version `1897.v79281e066ea_7`
## 5.7.12 ## 5.7.12
Update `configuration-as-code` to version `1887.v9e47623cb_043` Update `configuration-as-code` to version `1887.v9e47623cb_043`

View File

@ -1,10 +1,10 @@
annotations: annotations:
artifacthub.io/category: integration-delivery artifacthub.io/category: integration-delivery
artifacthub.io/changes: | artifacthub.io/changes: |
- Update `configuration-as-code` to version `1887.v9e47623cb_043` - Update `jenkins/jenkins` to version `2.479.2-jdk17`
artifacthub.io/images: | artifacthub.io/images: |
- name: jenkins - name: jenkins
image: docker.io/jenkins/jenkins:2.479.1-jdk17 image: docker.io/jenkins/jenkins:2.479.2-jdk17
- name: k8s-sidecar - name: k8s-sidecar
image: docker.io/kiwigrid/k8s-sidecar:1.28.0 image: docker.io/kiwigrid/k8s-sidecar:1.28.0
- name: inbound-agent - name: inbound-agent
@ -18,7 +18,7 @@ annotations:
- name: support - name: support
url: https://github.com/jenkinsci/helm-charts/issues url: https://github.com/jenkinsci/helm-charts/issues
apiVersion: v2 apiVersion: v2
appVersion: 2.479.1 appVersion: 2.479.2
description: 'Jenkins - Build great things at any scale! As the leading open source description: 'Jenkins - Build great things at any scale! As the leading open source
automation server, Jenkins provides over 1800 plugins to support building, deploying automation server, Jenkins provides over 1800 plugins to support building, deploying
and automating any project. ' and automating any project. '
@ -46,4 +46,4 @@ sources:
- https://github.com/maorfr/kube-tasks - https://github.com/maorfr/kube-tasks
- https://github.com/jenkinsci/configuration-as-code-plugin - https://github.com/jenkinsci/configuration-as-code-plugin
type: application type: application
version: 5.7.12 version: 5.7.15

View File

@ -165,7 +165,7 @@ The following tables list the configurable parameters of the Jenkins chart and t
| [controller.initializeOnce](./values.yaml#L420) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` | | [controller.initializeOnce](./values.yaml#L420) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` |
| [controller.installLatestPlugins](./values.yaml#L409) | bool | Download the minimum required version or latest version of all dependencies | `true` | | [controller.installLatestPlugins](./values.yaml#L409) | bool | Download the minimum required version or latest version of all dependencies | `true` |
| [controller.installLatestSpecifiedPlugins](./values.yaml#L412) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` | | [controller.installLatestSpecifiedPlugins](./values.yaml#L412) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` |
| [controller.installPlugins](./values.yaml#L401) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4295.v7fa_01b_309c95","workflow-aggregator:600.vb_57cdd26fdd7","git:5.6.0","configuration-as-code:1887.v9e47623cb_043"]` | | [controller.installPlugins](./values.yaml#L401) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4296.v20a_7e4d77cf6","workflow-aggregator:600.vb_57cdd26fdd7","git:5.6.0","configuration-as-code:1897.v79281e066ea_7"]` |
| [controller.javaOpts](./values.yaml#L162) | string | Append to `JAVA_OPTS` env var | `nil` | | [controller.javaOpts](./values.yaml#L162) | string | Append to `JAVA_OPTS` env var | `nil` |
| [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` | | [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` |
| [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` | | [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` |

View File

@ -399,10 +399,10 @@ controller:
# Plugins will be installed during Jenkins controller start # Plugins will be installed during Jenkins controller start
# -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` # -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false`
installPlugins: installPlugins:
- kubernetes:4295.v7fa_01b_309c95 - kubernetes:4296.v20a_7e4d77cf6
- workflow-aggregator:600.vb_57cdd26fdd7 - workflow-aggregator:600.vb_57cdd26fdd7
- git:5.6.0 - git:5.6.0
- configuration-as-code:1887.v9e47623cb_043 - configuration-as-code:1897.v79281e066ea_7
# If set to false, Jenkins will download the minimum required version of all dependencies. # If set to false, Jenkins will download the minimum required version of all dependencies.
# -- Download the minimum required version or latest version of all dependencies # -- Download the minimum required version or latest version of all dependencies

View File

@ -2,7 +2,7 @@ gitea:
enabled: false enabled: false
image: image:
tag: 1.22.3 tag: 1.22.6
rootless: true rootless: true
repliaCount: 1 repliaCount: 1
@ -90,7 +90,7 @@ jenkins:
controller: controller:
image: image:
tag: alpine-jdk21 tag: lts-alpine-jdk21
#tagLabel: alpine #tagLabel: alpine
disableRememberMe: true disableRememberMe: true
prometheus: prometheus:
@ -276,8 +276,8 @@ jenkins:
trivy: trivy:
enabled: false enabled: false
image: #image:
tag: 0.57.0 #tag: 0.57.0
persistence: persistence:
enabled: true enabled: true
size: 1Gi size: 1Gi

View File

@ -0,0 +1,4 @@
{"time":"2024-11-21T10:32:42.652788Z","level":"warning","scope":"envoy main","msg":"Usage of the deprecated runtime key overload.global_downstream_max_connections, consider switching to `e │
│ nvoy.resource_monitors.downstream_connections` instead.This runtime key will be removed in future.","caller":"external/envoy/source/server/server.cc:843","thread":"8"} │
│ {"time":"2024-11-21T10:32:42.653492Z","level":"warning","scope":"envoy main","msg":"There is no configured limit to the number of allowed active downstream connections. Configure a limit i │
│ n `envoy.resource_monitors.downstream_connections` resource monitor.","caller":"external/envoy/source/server/server.cc:936","thread":"8"} │

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-lib name: kubezero-lib
description: KubeZero helm library - common helm functions and blocks description: KubeZero helm library - common helm functions and blocks
type: library type: library
version: 0.2.0 version: 0.2.1
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -10,4 +10,4 @@ keywords:
maintainers: maintainers:
- name: Stefan Reimer - name: Stefan Reimer
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.30.0"

View File

@ -2,7 +2,7 @@ kubezero-lib
============ ============
KubeZero helm library - common helm functions and blocks KubeZero helm library - common helm functions and blocks
Current chart version is `0.1.0` Current chart version is `0.2.1`
Source code can be found [here](https://kubezero.com) Source code can be found [here](https://kubezero.com)

View File

@ -0,0 +1,10 @@
{{- /*
maps pods to the kube control-plane
*/ -}}
{{- define "kubezero-lib.control-plane" -}}
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{{- end -}}

View File

@ -25,9 +25,9 @@ Common naming functions
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}} {{- end -}}
{{/* {{- /*
Selector labels Selector labels
*/}} */ -}}
{{- define "kubezero-lib.selectorLabels" -}} {{- define "kubezero-lib.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kubezero-lib.name" . }} app.kubernetes.io/name: {{ include "kubezero-lib.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
@ -49,7 +49,7 @@ This takes an array of three values:
- the top context - the top context
- the template name of the overrides (destination) - the template name of the overrides (destination)
- the template name of the base (source) - the template name of the base (source)
*/}} */ -}}
{{- define "kubezero-lib.util.merge" -}} {{- define "kubezero-lib.util.merge" -}}
{{- $top := first . -}} {{- $top := first . -}}
{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}} {{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}

View File

@ -90,12 +90,11 @@ Kubernetes: `>= 1.26.0`
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].equal[0] | string | `"namespace"` | | | kube-prometheus-stack.alertmanager.config.inhibit_rules[2].equal[0] | string | `"namespace"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].source_matchers[0] | string | `"alertname = InfoInhibitor"` | | | kube-prometheus-stack.alertmanager.config.inhibit_rules[2].source_matchers[0] | string | `"alertname = InfoInhibitor"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].target_matchers[0] | string | `"severity = info"` | | | kube-prometheus-stack.alertmanager.config.inhibit_rules[2].target_matchers[0] | string | `"severity = info"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[3].source_matchers[0] | string | `"alertname = ClusterAutoscalerNodeGroupsEnabled"` | | | kube-prometheus-stack.alertmanager.config.route.group_by[0] | string | `"alertname"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[3].target_matchers[0] | string | `"alertname =~ \"KubeCPUOvercommit|KubeMemoryOvercommit\""` | | | kube-prometheus-stack.alertmanager.config.route.group_by[1] | string | `"severity"` | |
| kube-prometheus-stack.alertmanager.config.route.group_by[0] | string | `"severity"` | | | kube-prometheus-stack.alertmanager.config.route.group_by[2] | string | `"status"` | |
| kube-prometheus-stack.alertmanager.config.route.group_by[1] | string | `"clusterName"` | |
| kube-prometheus-stack.alertmanager.config.route.group_interval | string | `"5m"` | | | kube-prometheus-stack.alertmanager.config.route.group_interval | string | `"5m"` | |
| kube-prometheus-stack.alertmanager.config.route.group_wait | string | `"10s"` | | | kube-prometheus-stack.alertmanager.config.route.group_wait | string | `"30s"` | |
| kube-prometheus-stack.alertmanager.config.route.repeat_interval | string | `"4h"` | | | kube-prometheus-stack.alertmanager.config.route.repeat_interval | string | `"4h"` | |
| kube-prometheus-stack.alertmanager.config.route.routes[0].matchers[0] | string | `"severity = none"` | | | kube-prometheus-stack.alertmanager.config.route.routes[0].matchers[0] | string | `"severity = none"` | |
| kube-prometheus-stack.alertmanager.config.route.routes[0].receiver | string | `"null"` | | | kube-prometheus-stack.alertmanager.config.route.routes[0].receiver | string | `"null"` | |

View File

@ -18,7 +18,7 @@
"subdir": "contrib/mixin" "subdir": "contrib/mixin"
} }
}, },
"version": "19aa0dbe8fd6317a237bae9b6ea52a4f1b445b19", "version": "2b323071a8bd4f02ddaf63e0dfa1fd98c221dccb",
"sum": "IXI3LQIT9NmTPJAk8WLUJd5+qZfcGpeNCyWIK7oEpws=" "sum": "IXI3LQIT9NmTPJAk8WLUJd5+qZfcGpeNCyWIK7oEpws="
}, },
{ {
@ -88,7 +88,7 @@
"subdir": "grafana-builder" "subdir": "grafana-builder"
} }
}, },
"version": "5a6b86b475e427b2dbd9e4af0bcafbb6da0507a5", "version": "767befa8fb46a07be516dec2777d7d89909a529d",
"sum": "yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo=" "sum": "yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo="
}, },
{ {
@ -118,8 +118,8 @@
"subdir": "" "subdir": ""
} }
}, },
"version": "bdbf7f45cedf37d07567be7519fa4139043f9335", "version": "a3fbf21977deb89b7d843eb8371170c011ea6835",
"sum": "j4EAKfqkbPvBFGnBjt4hex2bdNHPpuFWrCxfq5L6EkU=" "sum": "57zW2IGJ9zbYd8BI0qe6JkoWTRSMNiBUWC6+YcnEsWo="
}, },
{ {
"source": { "source": {
@ -128,7 +128,7 @@
"subdir": "jsonnet/kube-state-metrics" "subdir": "jsonnet/kube-state-metrics"
} }
}, },
"version": "17151aca659e0659259b5e1f5675acf849281ade", "version": "32e7727ff4613b0f55dfc18aff15afb8c04d03c5",
"sum": "lO7jUSzAIy8Yk9pOWJIWgPRhubkWzVh56W6wtYfbVH4=" "sum": "lO7jUSzAIy8Yk9pOWJIWgPRhubkWzVh56W6wtYfbVH4="
}, },
{ {
@ -138,7 +138,7 @@
"subdir": "jsonnet/kube-state-metrics-mixin" "subdir": "jsonnet/kube-state-metrics-mixin"
} }
}, },
"version": "17151aca659e0659259b5e1f5675acf849281ade", "version": "32e7727ff4613b0f55dfc18aff15afb8c04d03c5",
"sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c=" "sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c="
}, },
{ {
@ -148,8 +148,8 @@
"subdir": "jsonnet/kube-prometheus" "subdir": "jsonnet/kube-prometheus"
} }
}, },
"version": "c503e5cc5403dd5d56b1c0c5933827baee64aeaf", "version": "7e5a571a3fb735c78e17c76a637eb7e8bb5dd086",
"sum": "fJqINQiYJPmllXFFO+Hl5HrPYANMbhHFUQ28tl0Vi00=" "sum": "uTw/Mj+X91S+oqUpAX81xcfWPDlox0tdSZY/YBw7nGE="
}, },
{ {
"source": { "source": {
@ -158,7 +158,7 @@
"subdir": "jsonnet/mixin" "subdir": "jsonnet/mixin"
} }
}, },
"version": "e951bd3037a053fea681510ccde211c28dc657e1", "version": "a366602bacb2c8d773a9cee058b6971b8d2e3732",
"sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=", "sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=",
"name": "prometheus-operator-mixin" "name": "prometheus-operator-mixin"
}, },
@ -169,8 +169,8 @@
"subdir": "jsonnet/prometheus-operator" "subdir": "jsonnet/prometheus-operator"
} }
}, },
"version": "e951bd3037a053fea681510ccde211c28dc657e1", "version": "a366602bacb2c8d773a9cee058b6971b8d2e3732",
"sum": "YOJjmladGD1PcgNae0h88Mm235CsZSfwf2a4DIcMJFU=" "sum": "z0/lCiMusMHTqntsosMVGYkVcSZjCpyZBmUMVUsK5nA="
}, },
{ {
"source": { "source": {
@ -179,7 +179,7 @@
"subdir": "doc/alertmanager-mixin" "subdir": "doc/alertmanager-mixin"
} }
}, },
"version": "f6b942cf9b3a503d59192eada300d2ad97cba82f", "version": "0f65e8fa5fc72d2678655105c0213b416ca6f34c",
"sum": "Mf4h1BYLle2nrgjf/HXrBbl0Zk8N+xaoEM017o0BC+k=", "sum": "Mf4h1BYLle2nrgjf/HXrBbl0Zk8N+xaoEM017o0BC+k=",
"name": "alertmanager" "name": "alertmanager"
}, },
@ -190,7 +190,7 @@
"subdir": "docs/node-mixin" "subdir": "docs/node-mixin"
} }
}, },
"version": "49d177bf95417b117ab612a376e2434d5dd61c2d", "version": "cf8c6891cc610e54f70383addd4bb6079f0add35",
"sum": "cQCW+1N0Xae5yXecCWDK2oAlN0luBS/5GrwBYSlaFms=" "sum": "cQCW+1N0Xae5yXecCWDK2oAlN0luBS/5GrwBYSlaFms="
}, },
{ {
@ -200,8 +200,8 @@
"subdir": "documentation/prometheus-mixin" "subdir": "documentation/prometheus-mixin"
} }
}, },
"version": "789c9b1a5e455850ed9b3c89cafb37df75ce1e50", "version": "b407c2930da4f50c0d17fc39404c6302a9eb740b",
"sum": "dYLcLzGH4yF3qB7OGC/7z4nqeTNjv42L7Q3BENU8XJI=", "sum": "OYT5u3S8DbamuJV/v3gbWSteOvFzMeNwMj+u4Apk7jM=",
"name": "prometheus" "name": "prometheus"
}, },
{ {
@ -222,7 +222,7 @@
"subdir": "mixin" "subdir": "mixin"
} }
}, },
"version": "f9da21ec0b28073875520159fe72ab744c255b2e", "version": "7037331e6ea7dbe85a1b7af37bf8ea277a80663d",
"sum": "ieCD4eMgGbOlrI8GmckGPHBGQDcLasE1rULYq56W/bs=", "sum": "ieCD4eMgGbOlrI8GmckGPHBGQDcLasE1rULYq56W/bs=",
"name": "thanos-mixin" "name": "thanos-mixin"
} }

View File

@ -29,14 +29,45 @@ local etcdMixin = addMixin({
}, },
}); });
local kp = (import 'kube-prometheus/main.libsonnet') + local kp = (import 'kube-prometheus/main.libsonnet') + {
{ values+:: {
values+:: { common+: {
common+: { namespace: 'monitoring',
namespace: 'monitoring', },
}, },
}, kubernetesControlPlane+: {
}; prometheusRule+: {
spec+: {
groups: [
(
if group.name == 'kubernetes-resources' then
group {
rules: std.filter(
function(rule)
rule.alert != 'KubeCPUOvercommit' && rule.alert != 'KubeMemoryOvercommit',
group.rules
) + [{
alert: 'ClusterAutoscalerNodeGroupsEnabled',
expr: 'cluster_autoscaler_node_groups_count{job="addons-aws-cluster-autoscaler",node_group_type="autoscaled"} > 0',
'for': '5m',
labels: {
severity: 'none',
},
annotations: {
description: 'Inhibitor rule if the Cluster Autoscaler found at least one node group',
summary: 'Cluster Autoscaler found at least one node group.',
},
}],
}
else
group
)
for group in super.groups
],
},
},
},
};
// We just want the Prometheus Rules // We just want the Prometheus Rules
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } + { 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +

View File

@ -123,7 +123,7 @@
{ {
"alert": "KubeDaemonSetRolloutStuck", "alert": "KubeDaemonSetRolloutStuck",
"annotations": { "annotations": {
"description": "DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} has not finished or progressed for at least 15 minutes.", "description": "DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} has not finished or progressed for at least 15m.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetrolloutstuck", "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetrolloutstuck",
"summary": "DaemonSet rollout is stuck." "summary": "DaemonSet rollout is stuck."
}, },
@ -228,32 +228,6 @@
{ {
"name": "kubernetes-resources", "name": "kubernetes-resources",
"rules": [ "rules": [
{
"alert": "KubeCPUOvercommit",
"annotations": {
"description": "Cluster {{ $labels.cluster }} has overcommitted CPU resource requests for Pods by {{ $value }} CPU shares and cannot tolerate node failure.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuovercommit",
"summary": "Cluster has overcommitted CPU resource requests."
},
"expr": "sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) by (cluster) - (sum(kube_node_status_allocatable{job=\"kube-state-metrics\",resource=\"cpu\"}) by (cluster) - max(kube_node_status_allocatable{job=\"kube-state-metrics\",resource=\"cpu\"}) by (cluster)) > 0\nand\n(sum(kube_node_status_allocatable{job=\"kube-state-metrics\",resource=\"cpu\"}) by (cluster) - max(kube_node_status_allocatable{job=\"kube-state-metrics\",resource=\"cpu\"}) by (cluster)) > 0\n",
"for": "10m",
"labels": {
"severity": "warning"
}
},
{
"alert": "KubeMemoryOvercommit",
"annotations": {
"description": "Cluster {{ $labels.cluster }} has overcommitted memory resource requests for Pods by {{ $value | humanize }} bytes and cannot tolerate node failure.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryovercommit",
"summary": "Cluster has overcommitted memory resource requests."
},
"expr": "sum(namespace_memory:kube_pod_container_resource_requests:sum{}) by (cluster) - (sum(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"}) by (cluster) - max(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"}) by (cluster)) > 0\nand\n(sum(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"}) by (cluster) - max(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"}) by (cluster)) > 0\n",
"for": "10m",
"labels": {
"severity": "warning"
}
},
{ {
"alert": "KubeCPUQuotaOvercommit", "alert": "KubeCPUQuotaOvercommit",
"annotations": { "annotations": {
@ -331,6 +305,18 @@
"labels": { "labels": {
"severity": "info" "severity": "info"
} }
},
{
"alert": "ClusterAutoscalerNodeGroupsEnabled",
"annotations": {
"description": "Inhibitor rule if the Cluster Autoscaler found at least one node group",
"summary": "Cluster Autoscaler found at least one node group."
},
"expr": "cluster_autoscaler_node_groups_count{job=\"addons-aws-cluster-autoscaler\",node_group_type=\"autoscaled\"} > 0",
"for": "5m",
"labels": {
"severity": "none"
}
} }
] ]
}, },
@ -506,7 +492,7 @@
{ {
"alert": "KubeClientCertificateExpiration", "alert": "KubeClientCertificateExpiration",
"annotations": { "annotations": {
"description": "A client certificate used to authenticate to kubernetes apiserver is expiring in less than 7.0 days.", "description": "A client certificate used to authenticate to kubernetes apiserver is expiring in less than 7.0 days on cluster {{ $labels.cluster }}.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration", "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration",
"summary": "Client certificate is about to expire." "summary": "Client certificate is about to expire."
}, },
@ -519,7 +505,7 @@
{ {
"alert": "KubeClientCertificateExpiration", "alert": "KubeClientCertificateExpiration",
"annotations": { "annotations": {
"description": "A client certificate used to authenticate to kubernetes apiserver is expiring in less than 24.0 hours.", "description": "A client certificate used to authenticate to kubernetes apiserver is expiring in less than 24.0 hours on cluster {{ $labels.cluster }}.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration", "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration",
"summary": "Client certificate is about to expire." "summary": "Client certificate is about to expire."
}, },

View File

@ -6,7 +6,7 @@
"app.kubernetes.io/component": "controller", "app.kubernetes.io/component": "controller",
"app.kubernetes.io/name": "prometheus-operator", "app.kubernetes.io/name": "prometheus-operator",
"app.kubernetes.io/part-of": "kube-prometheus", "app.kubernetes.io/part-of": "kube-prometheus",
"app.kubernetes.io/version": "0.78.1", "app.kubernetes.io/version": "0.78.2",
"prometheus": "k8s", "prometheus": "k8s",
"role": "alert-rules" "role": "alert-rules"
}, },

View File

@ -7,7 +7,7 @@
"app.kubernetes.io/instance": "k8s", "app.kubernetes.io/instance": "k8s",
"app.kubernetes.io/name": "prometheus", "app.kubernetes.io/name": "prometheus",
"app.kubernetes.io/part-of": "kube-prometheus", "app.kubernetes.io/part-of": "kube-prometheus",
"app.kubernetes.io/version": "2.55.1", "app.kubernetes.io/version": "3.0.1",
"prometheus": "k8s", "prometheus": "k8s",
"role": "alert-rules" "role": "alert-rules"
}, },
@ -74,9 +74,9 @@
{ {
"alert": "PrometheusErrorSendingAlertsToSomeAlertmanagers", "alert": "PrometheusErrorSendingAlertsToSomeAlertmanagers",
"annotations": { "annotations": {
"description": "{{ printf \"%.1f\" $value }}% errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}}.", "description": "{{ printf \"%.1f\" $value }}% of alerts sent by Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}} were affected by errors.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstosomealertmanagers", "runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstosomealertmanagers",
"summary": "Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager." "summary": "More than 1% of alerts sent by Prometheus to a specific Alertmanager were affected by errors."
}, },
"expr": "(\n rate(prometheus_notifications_errors_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n/\n rate(prometheus_notifications_sent_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n)\n* 100\n> 1\n", "expr": "(\n rate(prometheus_notifications_errors_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n/\n rate(prometheus_notifications_sent_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n)\n* 100\n> 1\n",
"for": "15m", "for": "15m",

View File

@ -89,7 +89,7 @@ spec:
severity: warning severity: warning
- alert: KubeDaemonSetRolloutStuck - alert: KubeDaemonSetRolloutStuck
annotations: annotations:
description: DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} has not finished or progressed for at least 15 minutes. description: DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} has not finished or progressed for at least 15m.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetrolloutstuck runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetrolloutstuck
summary: DaemonSet rollout is stuck. summary: DaemonSet rollout is stuck.
expr: "(\n (\n kube_daemonset_status_current_number_scheduled{job=\"kube-state-metrics\"}\n !=\n kube_daemonset_status_desired_number_scheduled{job=\"kube-state-metrics\"}\n ) or (\n kube_daemonset_status_number_misscheduled{job=\"kube-state-metrics\"}\n !=\n 0\n ) or (\n kube_daemonset_status_updated_number_scheduled{job=\"kube-state-metrics\"}\n !=\n kube_daemonset_status_desired_number_scheduled{job=\"kube-state-metrics\"}\n ) or (\n kube_daemonset_status_number_available{job=\"kube-state-metrics\"}\n !=\n kube_daemonset_status_desired_number_scheduled{job=\"kube-state-metrics\"}\n )\n) and (\n changes(kube_daemonset_status_updated_number_scheduled{job=\"kube-state-metrics\"}[5m])\n ==\n 0\n)\n" expr: "(\n (\n kube_daemonset_status_current_number_scheduled{job=\"kube-state-metrics\"}\n !=\n kube_daemonset_status_desired_number_scheduled{job=\"kube-state-metrics\"}\n ) or (\n kube_daemonset_status_number_misscheduled{job=\"kube-state-metrics\"}\n !=\n 0\n ) or (\n kube_daemonset_status_updated_number_scheduled{job=\"kube-state-metrics\"}\n !=\n kube_daemonset_status_desired_number_scheduled{job=\"kube-state-metrics\"}\n ) or (\n kube_daemonset_status_number_available{job=\"kube-state-metrics\"}\n !=\n kube_daemonset_status_desired_number_scheduled{job=\"kube-state-metrics\"}\n )\n) and (\n changes(kube_daemonset_status_updated_number_scheduled{job=\"kube-state-metrics\"}[5m])\n ==\n 0\n)\n"
@ -166,36 +166,6 @@ spec:
severity: warning severity: warning
- name: kubernetes-resources - name: kubernetes-resources
rules: rules:
- alert: KubeCPUOvercommit
annotations:
description: Cluster {{`{{`}} $labels.cluster {{`}}`}} has overcommitted CPU resource requests for Pods by {{`{{`}} $value {{`}}`}} CPU shares and cannot tolerate node failure.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuovercommit
summary: Cluster has overcommitted CPU resource requests.
expr: 'sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) by (cluster) - (sum(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster) - max(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster)) > 0
and
(sum(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster) - max(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster)) > 0
'
for: 10m
labels:
severity: warning
- alert: KubeMemoryOvercommit
annotations:
description: Cluster {{`{{`}} $labels.cluster {{`}}`}} has overcommitted memory resource requests for Pods by {{`{{`}} $value | humanize {{`}}`}} bytes and cannot tolerate node failure.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryovercommit
summary: Cluster has overcommitted memory resource requests.
expr: 'sum(namespace_memory:kube_pod_container_resource_requests:sum{}) by (cluster) - (sum(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster) - max(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster)) > 0
and
(sum(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster) - max(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster)) > 0
'
for: 10m
labels:
severity: warning
- alert: KubeCPUQuotaOvercommit - alert: KubeCPUQuotaOvercommit
annotations: annotations:
description: Cluster {{`{{`}} $labels.cluster {{`}}`}} has overcommitted CPU resource requests for Namespaces. description: Cluster {{`{{`}} $labels.cluster {{`}}`}} has overcommitted CPU resource requests for Namespaces.
@ -250,6 +220,14 @@ spec:
for: 15m for: 15m
labels: labels:
severity: info severity: info
- alert: ClusterAutoscalerNodeGroupsEnabled
annotations:
description: Inhibitor rule if the Cluster Autoscaler found at least one node group
summary: Cluster Autoscaler found at least one node group.
expr: cluster_autoscaler_node_groups_count{job="addons-aws-cluster-autoscaler",node_group_type="autoscaled"} > 0
for: 5m
labels:
severity: none
- name: kubernetes-storage - name: kubernetes-storage
rules: rules:
- alert: KubePersistentVolumeFillingUp - alert: KubePersistentVolumeFillingUp
@ -395,7 +373,7 @@ spec:
rules: rules:
- alert: KubeClientCertificateExpiration - alert: KubeClientCertificateExpiration
annotations: annotations:
description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 7.0 days. description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 7.0 days on cluster {{`{{`}} $labels.cluster {{`}}`}}.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration
summary: Client certificate is about to expire. summary: Client certificate is about to expire.
expr: 'histogram_quantile(0.01, sum without (namespace, service, endpoint) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 604800 expr: 'histogram_quantile(0.01, sum without (namespace, service, endpoint) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 604800
@ -410,7 +388,7 @@ spec:
severity: warning severity: warning
- alert: KubeClientCertificateExpiration - alert: KubeClientCertificateExpiration
annotations: annotations:
description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 24.0 hours. description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 24.0 hours on cluster {{`{{`}} $labels.cluster {{`}}`}}.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration
summary: Client certificate is about to expire. summary: Client certificate is about to expire.
expr: 'histogram_quantile(0.01, sum without (namespace, service, endpoint) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 86400 expr: 'histogram_quantile(0.01, sum without (namespace, service, endpoint) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 86400

View File

@ -57,9 +57,9 @@ spec:
severity: warning severity: warning
- alert: PrometheusErrorSendingAlertsToSomeAlertmanagers - alert: PrometheusErrorSendingAlertsToSomeAlertmanagers
annotations: annotations:
description: '{{`{{`}} printf "%.1f" $value {{`}}`}}% errors while sending alerts from Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} to Alertmanager {{`{{`}}$labels.alertmanager{{`}}`}}.' description: '{{`{{`}} printf "%.1f" $value {{`}}`}}% of alerts sent by Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} to Alertmanager {{`{{`}}$labels.alertmanager{{`}}`}} were affected by errors.'
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstosomealertmanagers runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstosomealertmanagers
summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager. summary: More than 1% of alerts sent by Prometheus to a specific Alertmanager were affected by errors.
expr: "(\n rate(prometheus_notifications_errors_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n/\n rate(prometheus_notifications_sent_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n)\n* 100\n> 1\n" expr: "(\n rate(prometheus_notifications_errors_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n/\n rate(prometheus_notifications_sent_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n)\n* 100\n> 1\n"
for: 15m for: 15m
labels: labels:

View File

@ -1,19 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "zdt-inhibitors" | trunc 63 | trimSuffix "-" }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
groups:
- name: zdt-inhibitors
rules:
- alert: ClusterAutoscalerNodeGroupsEnabled
annotations:
description: "This rule is meant to inhibit other rules and should not be forwarded.\nThe Cluster Autoscaler found at least one node group"
summary: Cluster Autoscaler found at least one node group.
expr: 'cluster_autoscaler_node_groups_count{job="addons-aws-cluster-autoscaler",node_group_type="autoscaled"} > 0'
for: 15m
labels:
severity: none

View File

@ -223,8 +223,8 @@ kube-prometheus-stack:
global: global:
resolve_timeout: 5m resolve_timeout: 5m
route: route:
group_by: ['severity', 'clusterName'] group_by: ['alertname', 'severity', 'status']
group_wait: 10s group_wait: 30s
group_interval: 5m group_interval: 5m
repeat_interval: 4h repeat_interval: 4h
routes: routes:
@ -252,11 +252,6 @@ kube-prometheus-stack:
- alertname = InfoInhibitor - alertname = InfoInhibitor
target_matchers: target_matchers:
- severity = info - severity = info
# Disable cluster overcommiy alerts if we have cluster autoscaler available
- source_matchers:
- alertname = ClusterAutoscalerNodeGroupsEnabled
target_matchers:
- alertname =~ "KubeCPUOvercommit|KubeMemoryOvercommit"
alertmanagerSpec: alertmanagerSpec:
# externalUrl: # externalUrl:
logFormat: json logFormat: json

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-operators name: kubezero-operators
description: Various operators supported by KubeZero description: Various operators supported by KubeZero
type: application type: application
version: 0.1.6 version: 0.1.8
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -10,6 +10,7 @@ keywords:
- operators - operators
- opensearch - opensearch
- postgres - postgres
- kafka
maintainers: maintainers:
- name: Stefan Reimer - name: Stefan Reimer
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
@ -29,4 +30,8 @@ dependencies:
version: 0.22.1 version: 0.22.1
repository: https://cloudnative-pg.github.io/charts repository: https://cloudnative-pg.github.io/charts
condition: cloudnative-pg.enabled condition: cloudnative-pg.enabled
- name: strimzi-kafka-operator
version: 0.44.0
repository: "oci://quay.io/strimzi-helm"
condition: strimzi-kafka-operator.enabled
kubeVersion: ">= 1.26.0" kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-operators # kubezero-operators
![Version: 0.1.6](https://img.shields.io/badge/Version-0.1.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.1.7](https://img.shields.io/badge/Version-0.1.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
Various operators supported by KubeZero Various operators supported by KubeZero
@ -22,6 +22,7 @@ Kubernetes: `>= 1.26.0`
| https://cloudnative-pg.github.io/charts | cloudnative-pg | 0.22.1 | | https://cloudnative-pg.github.io/charts | cloudnative-pg | 0.22.1 |
| https://helm.elastic.co | eck-operator | 2.15.0 | | https://helm.elastic.co | eck-operator | 2.15.0 |
| https://opensearch-project.github.io/opensearch-k8s-operator/ | opensearch-operator | 2.7.0 | | https://opensearch-project.github.io/opensearch-k8s-operator/ | opensearch-operator | 2.7.0 |
| oci://quay.io/strimzi-helm | strimzi-kafka-operator | 0.44.0 |
## Values ## Values
@ -46,6 +47,17 @@ Kubernetes: `>= 1.26.0`
| opensearch-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | opensearch-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| opensearch-operator.tolerations[0].effect | string | `"NoSchedule"` | | | opensearch-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| opensearch-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | opensearch-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| strimzi-kafka-operator.enabled | bool | `false` | |
| strimzi-kafka-operator.leaderElection.enable | bool | `false` | |
| strimzi-kafka-operator.monitoring.podMonitorEnabled | bool | `false` | |
| strimzi-kafka-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| strimzi-kafka-operator.resources.limits.memory | string | `"384Mi"` | |
| strimzi-kafka-operator.resources.requests.cpu | string | `"20m"` | |
| strimzi-kafka-operator.resources.requests.memory | string | `"256Mi"` | |
| strimzi-kafka-operator.revisionHistoryLimit | int | `3` | |
| strimzi-kafka-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| strimzi-kafka-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| strimzi-kafka-operator.watchAnyNamespace | bool | `true` | |
---------------------------------------------- ----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@ -1,7 +1,7 @@
configmap: grafana-dashboards configmap: grafana-dashboards-pg
gzip: true gzip: true
condition: 'index .Values "cloudnative-pg" "monitoring" "podMonitorEnabled"' condition: 'index .Values "cloudnative-pg" "monitoring" "podMonitorEnabled"'
folder: KubeZero folder: Operators
dashboards: dashboards:
- name: Cloudnative-pg - name: Cloudnative-pg
url: https://raw.githubusercontent.com/cloudnative-pg/grafana-dashboards/refs/heads/main/charts/cluster/grafana-dashboard.json url: https://raw.githubusercontent.com/cloudnative-pg/grafana-dashboards/refs/heads/main/charts/cluster/grafana-dashboard.json

View File

@ -0,0 +1,10 @@
configmap: grafana-dashboards-strimzi
gzip: true
condition: 'index .Values "strimzi-kafka-operator" "monitoring" "podMonitorEnabled"'
folder: Operators
dashboards:
- name: strimzi-kafka-operator
url: https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/refs/heads/main/helm-charts/helm3/strimzi-kafka-operator/files/grafana-dashboards/strimzi-operators.json
tags:
- Kafka
- Strimzi

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,15 @@
{{- if index .Values "strimzi-kafka-operator" "monitoring" "podMonitorEnabled" }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "grafana-dashboards-strimzi" | trunc 63 | trimSuffix "-" }}
namespace: {{ .Release.Namespace }}
labels:
grafana_dashboard: "1"
{{- include "kubezero-lib.labels" . | nindent 4 }}
annotations:
k8s-sidecar-target-directory: Operators
binaryData:
strimzi-kafka-operator.json.gz:
H4sIAAAAAAAC/+1dW3PbuBV+96/g8KETzzhbUbIcb2f2wXE2u2ljx2M76bSJRwORkIQ1SXBBMLbiqr+9B+ANvElyZMdy57zYIkACONfvI0CQdzuWZY9Ggv6ZMEFj+2/WZyixrDv9F+rkPKJQak8FmZCQ2Ht5BfNai0MS6PN/q1d8pSJmPFR1r37a/2lo64rFXnt3EQmpX+8slkQ2e7pQpUsb84gkMU+ES+stRoIHVM5oEjfbPSvr7j9UyQIaU8FoS8OXZV3aMPy9UmfZJAw5SANqUpZIe7J9FsvCLmX/UDNOmC/fKZU6e2WpIW1VCuMcGpKxr+qlSKhRPmNeSylzeXjMfS5Ug2I6Ji96e1bfceDPcLhnObtm07mUR6Us1l+sI58KWRlCaZx4NuZEeHZWt9D/r3YyjdvUY7I2WnvCYpf4/6JEgPWFPOGhnEF9T1eCT0azS859yaKiUJvFGejfPguvtaunWtcGbHF9l/s+iWKqrpwQPy5UslrFMAbmnfHSiql2a5a6geP+vlFwmw83O56r41wtVQ8rLjOGf1WUgVKmVMaG25iOs56TwFmCTt5pdz6yi+JF9svojEltHfs4iSUPrHOatmyoIze24DftwbRaoxNGfe+YhxM2rWrVoxOS+LKq69R82mPNQigOuHZxW84g382478W2ccKiIn9AooiF06oe67rUZ/KoGrXVLol0le3tMPH9ioYLPccgQevFSnn0VtXZp389shv1i1rJot58ofs4oi4jfrUJ8/KriuyGfjp1SMYx9xNJazJBoqZRU2lWm25yK0HEUBq2Kecr8RPVm1LeKmGXdSAgkJc0f9jbWabZq04nSUKmzRPykBphYoQxB+yDfJDC61UjnjtzxUEtV6xKFU57qugXhwG5fQOBdsZZqOPF6RVNtHpwqryTzNpaQKNLnWe7Kv+AXMAm87yaJJKb1RzgL0zhQVXP4PgbJHFihoeyWOLSD62hZUP+d5s+ZvsklqdcnqpI6/RtnUvUxXY132lfiPN032pLCImbMwp5KpTHMxJOaR0csoDtEvwG/OA9mfNEZnjWsFnkJ1MWfqrTpYfO7fQ20mERJ8GLWAoWfGNAA7PMfXfNQu+XL/Y/yOSafLEXu9VrJ1xATst5zqhGdFLPCyUVoM+3xJU6/pxNkEWPwzo+b4GUuJP+IaasxpQeIgoiSoko++shijNESEFI2QBSPgJkbAGsqGEgqiCqIKo8Kqocrocqwx6iCqLKBqhyySPmbgGs6HHECCwILAgsGwHLYAWwOP0171ccRBZElg2QBZJuSF25BdiSjQTBBcEFweVxweVgTXA5QHBBcNkAXE6YEOAF5JqK/hYgTDocS4/H6iPQINAg0Dwq0PTXXMd3cCEfgeaBgGbbcAZRBlEGUeaHzpXtd6DMAFEGUWYDlHkNHjqlWwAw6UAQWhBaEFp+7ExZB7QM9xFaEFo2X4bhYnsWYjjeuyDAIMD84BmyLoDB55IRYDYBmHM6Jj4J3W24fSnGcl+A2Y59q686phd6W7hx9Zy6PHSZz7K919u8bzUiPpWSvnQh/GPmdiO6q3fjNhsityx+zYVHxQUEeDOmi5OOqfJn6v2bCr7sNDNxagBvOek9GVO/kYWyyjOfuDSA3lqyiD5pTMSRz6Zhdk6vVu0JcnMh56ktfRY2YHrCfP9DRFwm5xUXLLO7p3J0R4JPYwCS2VvRok+ViOmUhl67ipQTFVvR2+u/sm+NPNwC+pBeIOnodB93mEPJ/k4ZLeJ+ATiqkAi75dR/Mk/O6mlKBaYCzQv2TSljWGc8AEb0DVNpdJxkXdx1MbGs66VSaZTJUdoOKVCIBsuCTLFMbEiFLmTxadtQpoInUZoJGooPTHsvHWTJEXM/u+tqjE8m3cyzGqHV9xC0UnGkqhtRVVCWkA/LVQ9rQHi4AgcPO97fcLicjhY5pZ0JVv3CY3Hkk4KB6jeGVE6IzAw75lIBQ+UEFYXv8y4NrlbVWZnJKqPKXS8AOGO1dpX+m/cM28AEWegKSmJqUMIKCxjFiQv8MJ4k/khy4OufndnV7q41nlsvFGd8cH6Ymvxt0c6dpqaLxUbU5qIQwqqyHCuiwpqBxppsR9ZfUIOkB0kPkh4kPUh6kPS0kp7DNUnPz0h6tpz0TAjzqfeMCc9bLQCSHSQ7SHaQ7CDZQbLz0GSn8bhLB9sZOsh2tpzt+Ny9ftZsp0ZzbpgEliOt9yAXch7kPMh5kPMg50HO8+CrWs5BxwzPK+Q8W855nvFalnrXP87tIM9BnoM8B3kO8pzHX8jq5DkHyHO2nOcALWDcY6DzZ0x5zgohkPUg60HWg6wHWc//O+t54tWsFsqjEon6auGHbAyNjUXpelcfOdG2bG6rUiEvEfoX0Bqo8OJRQG4fggk1txU+BUc6ATwOksCqSm2p0d+fHK25eU4lkUfYNNfv2tj68/Jdc8WmdABbNgG6KJ/864tdTE5xopoCS8YDzmN86nOtzerPEG3uCwgQ1UAUVqPCTteY0235VLRxgPR7teP5KQlok26Uidz+9TZiYm4pHFhOPCLBI+WHdE1lpiPQ0i5RpHJSDUJH8buY29+n00dQixqSdcmtVD1PqhnF9U/zGF9LNzv10u8gFKuT2rAjqa1YK1IA/jslnjbQ4+wfL8ATMN16EbBQg6LrQ/KiYs9SiXPPyreKj9SnjYHXu3TXKqDWLTPuiCoXSIFWw4wkQTQK4t1ugNXJrgagcFkoGx9hXoq6dVj9PjgN8pvFCrip8osZm8hmhQE6ufuXtYKEcSppZtkOE6Q+rlhgAvd/9K0Cmur4u144orCExAVRkTw1wLye1/O8n75j4RwCpHZCWtGWBdIa7SLqbQtLbpLasnOLnFxMSajuUteTkN6CL3r0dZqGWgaos3EDLdvuwz16u7KZXiPRfcpyzH6jJguS5q14+pKXWsSYn6EtwbpqtsaNe0ldBkuFE1T10y1dLkQFwpYIZB+nP6325N8qnX1aHKyWsw4ana+yuVT/13G6ZjjnfFenmC1+T8Rg0PGiiMMtfFHE3z+d4MshcKIVJ1pxohUnWp/7RKtH3fFc0geeb311zyfpBh1TPv0ezqY+7WzqH1+DUUADLuajBFjSSDvLnave3QapS/zy3y82lIuX6p6eSC7+I9V3H8vD7BbxZUYsiwr1brH0PjNv6gesRRd9bTjZCgzIOtFKsT7GRoDi6jOSIiRFSIqQFD1vUhT8YDp0uC4dws2UT0yHgLtQzYmm7igiwHyK1WSofRha9HkY5I/oPVdu9NtxdXoReRHyIuRFyIuQF+FehHtSo8aTeZ3cCB+821pu5PIklMiOSnZ0rBSyBj3ayRpR3ahcpU4a9FJhADtmNCCliQf6+TiwrPEBk/SN/llP9kWqV7toVtIgUtBmJPvU2Uu3ML8OkQiRuvyd6cRUfcKy8TSbhvjaVC8L9Ur+ke+3fEXBzzmN4XlGdRoLzcvCdL3bfnMxOjv/cPLr5e+/fryofHqiyAZmkNt/JjRdho7aXNtQuFMpndLbGu+y42sWfRT+xTx02z4OkdnWkGrHdBOVKxepMZixcm9PUnYEgX7z0pnlFpQ8K7Mrl0H0XBuPcOWjH+V+bsa5PTTotNMzDgbmgVMmNHto/HbMg0HPrJmVv/vGb8dLJb7KZVBRaVhlZS9mwwdmw2Yv/X3zwJiTfeWZ483HUlHfN5VMc6uaW611uFgfsvSTxd3XIuI0Ntk3lF7rXdm6iZ3F/wDuyXP7T6AAAA==
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if index .Values "cloudnative-pg" "monitoring" "podMonitorEnabled" }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "strimzi-kafka-operator" | trunc 63 | trimSuffix "-" }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
strimzi.io/kind: cluster-operator
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
podMetricsEndpoints:
- path: /metrics
port: http
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "strimzi-kafka-entity" | trunc 63 | trimSuffix "-" }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: entity-operator
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
podMetricsEndpoints:
- path: /metrics
port: healthcheck
{{- end }}

View File

@ -17,6 +17,7 @@ rm -rf charts/eck-operator/charts
yq eval -Mi 'del(.dependencies)' charts/eck-operator/Chart.yaml yq eval -Mi 'del(.dependencies)' charts/eck-operator/Chart.yaml
# Create ZDT dashboard configmap # Create ZDT dashboard configmap
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/cloudnative-pg/grafana-dashboards.yaml ../kubezero-metrics/sync_grafana_dashboards.py dashboards-pg.yaml templates/cloudnative-pg/grafana-dashboards.yaml
../kubezero-metrics/sync_grafana_dashboards.py dashboards-strimzi.yaml templates/strimzi/grafana-dashboards.yaml
update_docs update_docs

View File

@ -13,31 +13,34 @@ opensearch-operator:
- name: SKIP_INIT_CONTAINER - name: SKIP_INIT_CONTAINER
value: "true" value: "true"
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
eck-operator: eck-operator:
enabled: false enabled: false
installCRDs: false installCRDs: false
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
cloudnative-pg: cloudnative-pg:
enabled: false enabled: false
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
monitoring: monitoring:
podMonitorEnabled: false podMonitorEnabled: false
# We pull in the dashboard via our pipeline ! # We pull in the dashboard via our pipeline !
grafanaDashboard: grafanaDashboard:
create: false create: false
strimzi-kafka-operator:
enabled: false
revisionHistoryLimit: 3
watchAnyNamespace: true
leaderElection:
enable: false
resources:
requests:
cpu: 20m
memory: 256Mi
limits:
memory: 384Mi
monitoring:
podMonitorEnabled: false

View File

@ -245,9 +245,9 @@ aws-efs-csi-driver:
resources: resources:
requests: requests:
cpu: 20m cpu: 20m
memory: 96Mi memory: 128Mi
limits: limits:
memory: 256Mi memory: 512Mi
affinity: affinity:
nodeAffinity: nodeAffinity:

View File

@ -1,7 +1,7 @@
configmap: grafana-dashboards configmap: grafana-dashboards
gzip: true gzip: true
folder: Telemetry folder: Telemetry
#condition: '.Values.telemetry.metrics.enabled' condition: '.Values.metrics.enabled'
dashboards: dashboards:
#- name: jaeger #- name: jaeger
# url: https://grafana.com/api/dashboards/10001/revisions/2/download # url: https://grafana.com/api/dashboards/10001/revisions/2/download
@ -9,14 +9,19 @@ dashboards:
# - Jaeger # - Jaeger
# - Telemetry # - Telemetry
- name: opensearch - name: opensearch
url: https://grafana.com/api/dashboards/15178/revisions/2/download url: "https://grafana.com/api/dashboards/15178/revisions/2/download"
tags: tags:
- OpenSearch - OpenSearch
- Telemetry - Telemetry
- name: fluent-logging - name: fluent-logging
url: https://grafana.com/api/dashboards/7752/revisions/6/download url: "https://grafana.com/api/dashboards/7752/revisions/6/download"
#url: https://grafana.com/api/dashboards/13042/revisions/2/download #url: https://grafana.com/api/dashboards/13042/revisions/2/download
tags: tags:
- fluentd - fluentd
- fluent-bit - fluent-bit
- Telemetry - Telemetry
- name: kafka
url: "https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/refs/heads/main/examples/metrics/grafana-dashboards/strimzi-kraft.json"
tags:
- Telemetry
- Kafka

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,236 @@
{{- if index .Values "data-prepper" "enabled" }}
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaNodePool
metadata:
name: kraft
labels:
strimzi.io/cluster: telemetry
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
replicas: 1
roles:
- controller
- broker
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
size: 4Gi
deleteClaim: true
kraftMetadata: shared
---
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: telemetry
annotations:
strimzi.io/node-pools: enabled
strimzi.io/kraft: enabled
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
kafka:
version: 3.8.0
metadataVersion: 3.8-IV0
listeners:
- name: plain
port: 9092
type: internal
tls: false
- name: tls
port: 9093
type: internal
tls: true
config:
offsets.topic.replication.factor: 1
transaction.state.log.replication.factor: 1
transaction.state.log.min.isr: 1
default.replication.factor: 1
min.insync.replicas: 1
{{- if .Values.metrics.enabled }}
metricsConfig:
type: jmxPrometheusExporter
valueFrom:
configMapKeyRef:
name: kafka-metrics
key: kafka-metrics-config.yml
{{- end }}
# entityOperator:
# topicOperator: {}
# userOperator: {}
{{- if .Values.metrics.enabled }}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kafka-metrics
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
data:
kafka-metrics-config.yml: |
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
lowercaseOutputName: true
rules:
# Special cases and very specific rules
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), topic=(.+), partition=(.*)><>Value
name: kafka_server_$1_$2
type: GAUGE
labels:
clientId: "$3"
topic: "$4"
partition: "$5"
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Value
name: kafka_server_$1_$2
type: GAUGE
labels:
clientId: "$3"
broker: "$4:$5"
- pattern: kafka.server<type=(.+), cipher=(.+), protocol=(.+), listener=(.+), networkProcessor=(.+)><>connections
name: kafka_server_$1_connections_tls_info
type: GAUGE
labels:
cipher: "$2"
protocol: "$3"
listener: "$4"
networkProcessor: "$5"
- pattern: kafka.server<type=(.+), clientSoftwareName=(.+), clientSoftwareVersion=(.+), listener=(.+), networkProcessor=(.+)><>connections
name: kafka_server_$1_connections_software
type: GAUGE
labels:
clientSoftwareName: "$2"
clientSoftwareVersion: "$3"
listener: "$4"
networkProcessor: "$5"
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total):"
name: kafka_server_$1_$4
type: COUNTER
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+):"
name: kafka_server_$1_$4
type: GAUGE
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total)
name: kafka_server_$1_$4
type: COUNTER
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+)
name: kafka_server_$1_$4
type: GAUGE
labels:
listener: "$2"
networkProcessor: "$3"
# Some percent metrics use MeanRate attribute
# Ex) kafka.server<type=(KafkaRequestHandlerPool), name=(RequestHandlerAvgIdlePercent)><>MeanRate
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>MeanRate
name: kafka_$1_$2_$3_percent
type: GAUGE
# Generic gauges for percents
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>Value
name: kafka_$1_$2_$3_percent
type: GAUGE
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*, (.+)=(.+)><>Value
name: kafka_$1_$2_$3_percent
type: GAUGE
labels:
"$4": "$5"
# Generic per-second counters with 0-2 key/value pairs
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+)><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
# Generic gauges with 0-2 key/value pairs
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
# Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
# Note that these are missing the '_sum' metric!
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*), (.+)=(.+)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
"$6": "$7"
quantile: "0.$8"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
quantile: "0.$6"
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
quantile: "0.$4"
# KRaft overall related metrics
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
- pattern: "kafka.server<type=raft-metrics><>(.+-total|.+-max):"
name: kafka_server_raftmetrics_$1
type: COUNTER
- pattern: "kafka.server<type=raft-metrics><>(current-state): (.+)"
name: kafka_server_raftmetrics_$1
value: 1
type: UNTYPED
labels:
$1: "$2"
- pattern: "kafka.server<type=raft-metrics><>(.+):"
name: kafka_server_raftmetrics_$1
type: GAUGE
# KRaft "low level" channels related metrics
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
- pattern: "kafka.server<type=raft-channel-metrics><>(.+-total|.+-max):"
name: kafka_server_raftchannelmetrics_$1
type: COUNTER
- pattern: "kafka.server<type=raft-channel-metrics><>(.+):"
name: kafka_server_raftchannelmetrics_$1
type: GAUGE
# Broker metrics related to fetching metadata topic records in KRaft mode
- pattern: "kafka.server<type=broker-metadata-metrics><>(.+):"
name: kafka_server_brokermetadatametrics_$1
type: GAUGE
{{- end }}
{{- end }}

View File

@ -0,0 +1,49 @@
{{- if and (index .Values "data-prepper" "enabled" ) .Values.metrics.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "kafka" | trunc 63 | trimSuffix "-" }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
selector:
matchExpressions:
- key: "strimzi.io/kind"
operator: In
values: ["Kafka", "KafkaConnect", "KafkaMirrorMaker", "KafkaMirrorMaker2"]
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
podMetricsEndpoints:
- path: /metrics
port: tcp-prometheus
relabelings:
- separator: ;
regex: __meta_kubernetes_pod_label_(strimzi_io_.+)
replacement: $1
action: labelmap
- sourceLabels: [__meta_kubernetes_namespace]
separator: ;
regex: (.*)
targetLabel: namespace
replacement: $1
action: replace
- sourceLabels: [__meta_kubernetes_pod_name]
separator: ;
regex: (.*)
targetLabel: kubernetes_pod_name
replacement: $1
action: replace
- sourceLabels: [__meta_kubernetes_pod_node_name]
separator: ;
regex: (.*)
targetLabel: node_name
replacement: $1
action: replace
- sourceLabels: [__meta_kubernetes_pod_host_ip]
separator: ;
regex: (.*)
targetLabel: node_ip
replacement: $1
action: replace
{{- end }}

View File

@ -1,8 +1,11 @@
metrics:
enabled: false
data-prepper: data-prepper:
enabled: false enabled: false
#image: image:
# tag: 2.10.1 tag: 2.10.1
securityContext: securityContext:
capabilities: capabilities:
@ -82,6 +85,11 @@ data-prepper:
bulk_size: 4 bulk_size: 4
config: config:
data-prepper-config.yaml: |
ssl: false
peer_forwarder:
ssl: false
log4j2-rolling.properties: | log4j2-rolling.properties: |
status = error status = error
dest = err dest = err

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero name: kubezero
description: KubeZero - Root App of Apps chart description: KubeZero - Root App of Apps chart
type: application type: application
version: 1.30.6 version: 1.31.4-alpha
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -13,6 +13,6 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.6" version: ">= 0.2.1"
repository: https://cdn.zero-downtime.net/charts repository: https://cdn.zero-downtime.net/charts
kubeVersion: ">= 1.26.0-0" kubeVersion: ">= 1.29.0-0"

View File

@ -1,5 +0,0 @@
.PHONY: sync
sync:
rm -rf scripts templates
cp -r ../../kubezero/charts/kubezero/* .

View File

@ -1,6 +1,6 @@
# kubezero # kubezero
![Version: 1.30.5](https://img.shields.io/badge/Version-1.30.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.31.3](https://img.shields.io/badge/Version-1.31.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero - Root App of Apps chart KubeZero - Root App of Apps chart
@ -18,7 +18,7 @@ Kubernetes: `>= 1.26.0-0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts | kubezero-lib | >= 0.2.1 |
## Values ## Values
@ -32,16 +32,16 @@ Kubernetes: `>= 1.26.0-0`
| addons.external-dns.enabled | bool | `false` | | | addons.external-dns.enabled | bool | `false` | |
| addons.forseti.enabled | bool | `false` | | | addons.forseti.enabled | bool | `false` | |
| addons.sealed-secrets.enabled | bool | `false` | | | addons.sealed-secrets.enabled | bool | `false` | |
| addons.targetRevision | string | `"0.8.9"` | | | addons.targetRevision | string | `"0.8.11"` | |
| argo.argo-cd.enabled | bool | `false` | | | argo.argo-cd.enabled | bool | `false` | |
| argo.argo-cd.istio.enabled | bool | `false` | | | argo.argo-cd.istio.enabled | bool | `false` | |
| argo.argocd-image-updater.enabled | bool | `false` | | | argo.argocd-image-updater.enabled | bool | `false` | |
| argo.enabled | bool | `false` | | | argo.enabled | bool | `false` | |
| argo.namespace | string | `"argocd"` | | | argo.namespace | string | `"argocd"` | |
| argo.targetRevision | string | `"0.2.4"` | | | argo.targetRevision | string | `"0.2.6"` | |
| cert-manager.enabled | bool | `false` | | | cert-manager.enabled | bool | `false` | |
| cert-manager.namespace | string | `"cert-manager"` | | | cert-manager.namespace | string | `"cert-manager"` | |
| cert-manager.targetRevision | string | `"0.9.9"` | | | cert-manager.targetRevision | string | `"0.9.10"` | |
| falco.enabled | bool | `false` | | | falco.enabled | bool | `false` | |
| falco.k8saudit.enabled | bool | `false` | | | falco.k8saudit.enabled | bool | `false` | |
| falco.targetRevision | string | `"0.1.2"` | | | falco.targetRevision | string | `"0.1.2"` | |
@ -54,35 +54,35 @@ Kubernetes: `>= 1.26.0-0`
| istio-ingress.enabled | bool | `false` | | | istio-ingress.enabled | bool | `false` | |
| istio-ingress.gateway.service | object | `{}` | | | istio-ingress.gateway.service | object | `{}` | |
| istio-ingress.namespace | string | `"istio-ingress"` | | | istio-ingress.namespace | string | `"istio-ingress"` | |
| istio-ingress.targetRevision | string | `"0.22.3-1"` | | | istio-ingress.targetRevision | string | `"0.23.2"` | |
| istio-private-ingress.chart | string | `"kubezero-istio-gateway"` | | | istio-private-ingress.chart | string | `"kubezero-istio-gateway"` | |
| istio-private-ingress.enabled | bool | `false` | | | istio-private-ingress.enabled | bool | `false` | |
| istio-private-ingress.gateway.service | object | `{}` | | | istio-private-ingress.gateway.service | object | `{}` | |
| istio-private-ingress.namespace | string | `"istio-ingress"` | | | istio-private-ingress.namespace | string | `"istio-ingress"` | |
| istio-private-ingress.targetRevision | string | `"0.22.3-1"` | | | istio-private-ingress.targetRevision | string | `"0.23.2"` | |
| istio.enabled | bool | `false` | | | istio.enabled | bool | `false` | |
| istio.namespace | string | `"istio-system"` | | | istio.namespace | string | `"istio-system"` | |
| istio.targetRevision | string | `"0.22.3-1"` | | | istio.targetRevision | string | `"0.23.2"` | |
| kubezero.defaultTargetRevision | string | `"*"` | | | kubezero.defaultTargetRevision | string | `"*"` | |
| kubezero.gitSync | object | `{}` | | | kubezero.gitSync | object | `{}` | |
| kubezero.repoURL | string | `"https://cdn.zero-downtime.net/charts"` | | | kubezero.repoURL | string | `"https://cdn.zero-downtime.net/charts"` | |
| kubezero.server | string | `"https://kubernetes.default.svc"` | | | kubezero.server | string | `"https://kubernetes.default.svc"` | |
| logging.enabled | bool | `false` | | | logging.enabled | bool | `false` | |
| logging.namespace | string | `"logging"` | | | logging.namespace | string | `"logging"` | |
| logging.targetRevision | string | `"0.8.12"` | | | logging.targetRevision | string | `"0.8.13"` | |
| metrics.enabled | bool | `false` | | | metrics.enabled | bool | `false` | |
| metrics.istio.grafana | object | `{}` | | | metrics.istio.grafana | object | `{}` | |
| metrics.istio.prometheus | object | `{}` | | | metrics.istio.prometheus | object | `{}` | |
| metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigs | list | `[]` | | | metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigs | list | `[]` | |
| metrics.namespace | string | `"monitoring"` | | | metrics.namespace | string | `"monitoring"` | |
| metrics.targetRevision | string | `"0.10.0"` | | | metrics.targetRevision | string | `"0.10.2"` | |
| network.cilium.cluster | object | `{}` | | | network.cilium.cluster | object | `{}` | |
| network.enabled | bool | `true` | | | network.enabled | bool | `true` | |
| network.retain | bool | `true` | | | network.retain | bool | `true` | |
| network.targetRevision | string | `"0.5.5"` | | | network.targetRevision | string | `"0.5.5"` | |
| operators.enabled | bool | `false` | | | operators.enabled | bool | `false` | |
| operators.namespace | string | `"operators"` | | | operators.namespace | string | `"operators"` | |
| operators.targetRevision | string | `"0.1.4"` | | | operators.targetRevision | string | `"0.1.6"` | |
| storage.aws-ebs-csi-driver.enabled | bool | `false` | | | storage.aws-ebs-csi-driver.enabled | bool | `false` | |
| storage.aws-efs-csi-driver.enabled | bool | `false` | | | storage.aws-efs-csi-driver.enabled | bool | `false` | |
| storage.enabled | bool | `false` | | | storage.enabled | bool | `false` | |
@ -90,7 +90,10 @@ Kubernetes: `>= 1.26.0-0`
| storage.k8up.enabled | bool | `false` | | | storage.k8up.enabled | bool | `false` | |
| storage.lvm-localpv.enabled | bool | `false` | | | storage.lvm-localpv.enabled | bool | `false` | |
| storage.snapshotController.enabled | bool | `false` | | | storage.snapshotController.enabled | bool | `false` | |
| storage.targetRevision | string | `"0.8.8"` | | | storage.targetRevision | string | `"0.8.9"` | |
| telemetry.enabled | bool | `false` | | | telemetry.enabled | bool | `false` | |
| telemetry.namespace | string | `"telemetry"` | | | telemetry.namespace | string | `"telemetry"` | |
| telemetry.targetRevision | string | `"0.4.0"` | | | telemetry.targetRevision | string | `"0.4.1"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@ -6,7 +6,7 @@ clusterBackup:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
{{- if .Values.global.aws.region }} {{- if eq .Values.global.platform "aws" }}
# AWS # AWS
extraEnv: extraEnv:
- name: AWS_DEFAULT_REGION - name: AWS_DEFAULT_REGION
@ -20,7 +20,7 @@ forseti:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
{{- if .Values.global.aws.region }} {{- if eq .Values.global.platform "aws" }}
# AWS # AWS
aws: aws:
region: {{ $.Values.global.aws.region }} region: {{ $.Values.global.aws.region }}
@ -34,7 +34,7 @@ external-dns:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
{{- if .Values.global.aws.region }} {{- if eq .Values.global.platform "aws" }}
# AWS # AWS
txtOwnerId: {{ .Values.global.clusterName }} txtOwnerId: {{ .Values.global.clusterName }}
provider: aws provider: aws
@ -67,13 +67,18 @@ external-dns:
cluster-autoscaler: cluster-autoscaler:
enabled: {{ ternary "true" "false" (or (hasKey .Values.global.aws "region") (index .Values "addons" "cluster-autoscaler" "enabled")) }} enabled: {{ ternary "true" "false" (or (hasKey .Values.global.aws "region") (index .Values "addons" "cluster-autoscaler" "enabled")) }}
autoDiscovery:
clusterName: {{ .Values.global.clusterName }}
{{- if not .Values.global.highAvailable }}
extraArgs:
leader-elect: false
{{- end }}
{{- with omit (index .Values "addons" "cluster-autoscaler") "enabled" }} {{- with omit (index .Values "addons" "cluster-autoscaler") "enabled" }}
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
autoDiscovery:
clusterName: {{ .Values.global.clusterName }}
{{- with .Values.metrics }} {{- with .Values.metrics }}
serviceMonitor: serviceMonitor:
enabled: {{ .enabled }} enabled: {{ .enabled }}
@ -82,7 +87,7 @@ cluster-autoscaler:
# enabled: {{ .enabled }} # enabled: {{ .enabled }}
{{- end }} {{- end }}
{{- if .Values.global.aws.region }} {{- if eq .Values.global.platform "aws" }}
# AWS # AWS
awsRegion: {{ .Values.global.aws.region }} awsRegion: {{ .Values.global.aws.region }}

View File

@ -10,26 +10,13 @@ cert-manager:
{{- end }} {{- end }}
{{- if eq .Values.global.platform "aws" }} {{- if eq .Values.global.platform "aws" }}
# map everything to the control-plane {{- include "kubezero-lib.control-plane" . | nindent 2 }}
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
webhook: webhook:
tolerations: {{- include "kubezero-lib.control-plane" . | nindent 4 }}
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
cainjector: cainjector:
tolerations: {{- include "kubezero-lib.control-plane" . | nindent 4 }}
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
extraEnv: extraEnv:
- name: AWS_REGION - name: AWS_REGION

View File

@ -13,19 +13,15 @@ istiod:
telemetry: telemetry:
enabled: {{ $.Values.metrics.enabled }} enabled: {{ $.Values.metrics.enabled }}
pilot: pilot:
{{- if eq .Values.global.platform "aws" }} {{- if eq .Values.global.platform "aws" }}
nodeSelector: {{- include "kubezero-lib.control-plane" . | nindent 4 }}
node-role.kubernetes.io/control-plane: "" {{- end }}
tolerations: {{- if .Values.global.highAvailable }}
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{{- end }}
{{- if .Values.global.highAvailable }}
replicaCount: 2 replicaCount: 2
{{- else }} {{- else }}
extraContainerArgs: extraContainerArgs:
- --leader-elect=false - --leader-elect=false
{{- end }} {{- end }}
{{- with index .Values "istio" "kiali-server" }} {{- with index .Values "istio" "kiali-server" }}
kiali-server: kiali-server:

View File

@ -2,16 +2,42 @@
{{- with index .Values "operators" "opensearch-operator" }} {{- with index .Values "operators" "opensearch-operator" }}
opensearch-operator: opensearch-operator:
{{- if eq $.Values.global.platform "aws" }}
{{- include "kubezero-lib.control-plane" . | nindent 2 }}
{{- end }}
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
{{- with index .Values "operators" "eck-operator" }} {{- with index .Values "operators" "eck-operator" }}
eck-operator: eck-operator:
{{- if eq $.Values.global.platform "aws" }}
{{- include "kubezero-lib.control-plane" . | nindent 2 }}
{{- end }}
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
{{- with index .Values "operators" "cloudnative-pg" }} {{- with index .Values "operators" "cloudnative-pg" }}
cloudnative-pg: cloudnative-pg:
{{- if eq $.Values.global.platform "aws" }}
{{- include "kubezero-lib.control-plane" . | nindent 2 }}
{{- end }}
{{- toYaml . | nindent 2 }}
{{- with $.Values.metrics }}
monitoring:
podMonitorEnabled: {{ .enabled }}
{{- end }}
{{- end }}
{{- with index .Values "operators" "strimzi-kafka-operator" }}
strimzi-kafka-operator:
{{- if eq $.Values.global.platform "aws" }}
{{- include "kubezero-lib.control-plane" . | nindent 2 }}
{{- end }}
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- with $.Values.metrics }} {{- with $.Values.metrics }}

View File

@ -57,11 +57,13 @@ aws-efs-csi-driver:
enabled: {{ default false (index .Values "storage" "aws-efs-csi-driver" "enabled")}} enabled: {{ default false (index .Values "storage" "aws-efs-csi-driver" "enabled")}}
replicaCount: {{ ternary 2 1 .Values.global.highAvailable }} replicaCount: {{ ternary 2 1 .Values.global.highAvailable }}
controller: controller:
{{- with index .Values "storage" "aws-efs-csi-driver" "IamArn" }}
extraEnv: extraEnv:
- name: AWS_ROLE_ARN - name: AWS_ROLE_ARN
value: {{ index .Values "storage" "aws-efs-csi-driver" "IamArn" | quote }} value: {{ . | quote }}
- name: AWS_WEB_IDENTITY_TOKEN_FILE - name: AWS_WEB_IDENTITY_TOKEN_FILE
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token" value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
{{- end }}
# The EFS controller steel needs hostnetwork and cannot update on single node control planes otherwise # The EFS controller steel needs hostnetwork and cannot update on single node control planes otherwise
{{- if not .Values.global.highAvailable }} {{- if not .Values.global.highAvailable }}

View File

@ -1,5 +1,8 @@
{{- define "telemetry-values" }} {{- define "telemetry-values" }}
metrics:
enabled: {{ .Values.metrics.enabled }}
{{- if index .Values "telemetry" "fluent-bit" }} {{- if index .Values "telemetry" "fluent-bit" }}
fluent-bit: fluent-bit:
{{- with index .Values.telemetry "fluent-bit" }} {{- with index .Values.telemetry "fluent-bit" }}

9
charts/kubezero/update.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash
set -ex
. ../../scripts/lib-update.sh
login_ecr_public
update_helm
update_docs

View File

@ -96,7 +96,7 @@ telemetry:
operators: operators:
enabled: false enabled: false
namespace: operators namespace: operators
targetRevision: 0.1.6 targetRevision: 0.1.8
metrics: metrics:
enabled: false enabled: false

BIN
docs/images/k8s-1.31.png (Stored with Git LFS) Normal file

Binary file not shown.

17
docs/v1.31.md Normal file
View File

@ -0,0 +1,17 @@
# ![k8s-v1.31](images/k8s-1.31.png) KubeZero 1.31 - Elli
## What's new - Major themes
- all KubeZero and support AMIs based on Alpine 3.21
## Features and fixes
## Version upgrades
<WIP>
- cilium 1.16.3
- istio 1.22.3
- ArgoCD 2.13.0 [custom ZDT image](https://git.zero-downtime.net/ZeroDownTime/zdt-argocd)
- Prometheus 2.55.1 / Grafana 11.3.0
- Nvidia container toolkit 1.17, drivers 565.57.01, Cuda 12.7
## Resources
- [Kubernetes v1.31 upstream release blog](https://kubernetes.io/blog/2024/08/13/kubernetes-v1-31-release/)