Compare commits

..

1 Commits

Author SHA1 Message Date
394ddb1e6f chore(deps): update helm release fluent-bit to v0.48.3 2024-11-30 03:02:06 +00:00
51 changed files with 239 additions and 664 deletions

View File

@ -227,10 +227,7 @@ cluster-autoscaler:
scan-interval: 30s
skip-nodes-with-local-storage: false
balance-similar-node-groups: true
ignore-daemonsets-utilization: true
ignore-taint: "node.cilium.io/agent-not-ready"
# Disable for non-clustered control-plane
# leader-elect: false
#securityContext:
# runAsNonRoot: true

View File

@ -1,7 +1,7 @@
apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo
version: 0.2.7
version: 0.2.6
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -18,11 +18,11 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: argo-events
version: 2.4.9
version: 2.4.8
repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled
- name: argo-cd
version: 7.7.7
version: 7.7.2
repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled
- name: argocd-apps

View File

@ -45,9 +45,7 @@ argo-cd:
format: json
image:
repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v2.13.1
networkPolicy:
create: true
tag: v2.13.0
configs:
styles: |
@ -56,8 +54,8 @@ argo-cd:
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
cm:
ui.bannercontent: "KubeZero v1.31 - Release notes"
ui.bannerurl: "https://kubezero.com/releases/v1.31"
ui.bannercontent: "KubeZero v1.30 - Release notes"
ui.bannerurl: "https://kubezero.com/releases/v1.30"
ui.bannerpermanent: "true"
ui.bannerposition: "bottom"

View File

@ -1,6 +1,6 @@
# kubezero-ci
![Version: 0.8.19](https://img.shields.io/badge/Version-0.8.19-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.8.18](https://img.shields.io/badge/Version-0.8.18-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI
@ -18,11 +18,11 @@ Kubernetes: `>= 1.25.0`
| Repository | Name | Version |
|------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.9.0 |
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.8.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.jenkins.io | jenkins | 5.7.15 |
| https://charts.jenkins.io | jenkins | 5.7.12 |
| https://dl.gitea.io/charts/ | gitea | 10.6.0 |
| https://docs.renovatebot.com/helm-charts | renovate | 39.33.1 |
| https://docs.renovatebot.com/helm-charts | renovate | 38.142.6 |
# Jenkins
- default build retention 10 builds, 32days
@ -92,7 +92,7 @@ Kubernetes: `>= 1.25.0`
| jenkins.agent.defaultsProviderTemplate | string | `"podman-aws"` | |
| jenkins.agent.idleMinutes | int | `30` | |
| jenkins.agent.image.repository | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
| jenkins.agent.image.tag | string | `"v0.7.0"` | |
| jenkins.agent.image.tag | string | `"v0.6.2"` | |
| jenkins.agent.inheritYamlMergeStrategy | bool | `true` | |
| jenkins.agent.podName | string | `"podman-aws"` | |
| jenkins.agent.podRetention | string | `"Default"` | |
@ -113,7 +113,7 @@ Kubernetes: `>= 1.25.0`
| jenkins.controller.containerEnv[1].value | string | `"none"` | |
| jenkins.controller.disableRememberMe | bool | `true` | |
| jenkins.controller.enableRawHtmlMarkupFormatter | bool | `true` | |
| jenkins.controller.image.tag | string | `"lts-alpine-jdk21"` | |
| jenkins.controller.image.tag | string | `"alpine-jdk21"` | |
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
@ -162,6 +162,7 @@ Kubernetes: `>= 1.25.0`
| renovate.env.LOG_FORMAT | string | `"json"` | |
| renovate.securityContext.fsGroup | int | `1000` | |
| trivy.enabled | bool | `false` | |
| trivy.image.tag | string | `"0.56.2"` | |
| trivy.persistence.enabled | bool | `true` | |
| trivy.persistence.size | string | `"1Gi"` | |
| trivy.rbac.create | bool | `false` | |

View File

@ -12,18 +12,6 @@ Use the following links to reference issues, PRs, and commits prior to v2.6.0.
The changelog until v1.5.7 was auto-generated based on git commits.
Those entries include a reference to the git commit to be able to get more details.
## 5.7.15
Update `jenkins/jenkins` to version `2.479.2-jdk17`
## 5.7.14
Update `kubernetes` to version `4296.v20a_7e4d77cf6`
## 5.7.13
Update `configuration-as-code` to version `1897.v79281e066ea_7`
## 5.7.12
Update `configuration-as-code` to version `1887.v9e47623cb_043`

View File

@ -1,10 +1,10 @@
annotations:
artifacthub.io/category: integration-delivery
artifacthub.io/changes: |
- Update `jenkins/jenkins` to version `2.479.2-jdk17`
- Update `configuration-as-code` to version `1887.v9e47623cb_043`
artifacthub.io/images: |
- name: jenkins
image: docker.io/jenkins/jenkins:2.479.2-jdk17
image: docker.io/jenkins/jenkins:2.479.1-jdk17
- name: k8s-sidecar
image: docker.io/kiwigrid/k8s-sidecar:1.28.0
- name: inbound-agent
@ -18,7 +18,7 @@ annotations:
- name: support
url: https://github.com/jenkinsci/helm-charts/issues
apiVersion: v2
appVersion: 2.479.2
appVersion: 2.479.1
description: 'Jenkins - Build great things at any scale! As the leading open source
automation server, Jenkins provides over 1800 plugins to support building, deploying
and automating any project. '
@ -46,4 +46,4 @@ sources:
- https://github.com/maorfr/kube-tasks
- https://github.com/jenkinsci/configuration-as-code-plugin
type: application
version: 5.7.15
version: 5.7.12

View File

@ -165,7 +165,7 @@ The following tables list the configurable parameters of the Jenkins chart and t
| [controller.initializeOnce](./values.yaml#L420) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` |
| [controller.installLatestPlugins](./values.yaml#L409) | bool | Download the minimum required version or latest version of all dependencies | `true` |
| [controller.installLatestSpecifiedPlugins](./values.yaml#L412) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` |
| [controller.installPlugins](./values.yaml#L401) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4296.v20a_7e4d77cf6","workflow-aggregator:600.vb_57cdd26fdd7","git:5.6.0","configuration-as-code:1897.v79281e066ea_7"]` |
| [controller.installPlugins](./values.yaml#L401) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4295.v7fa_01b_309c95","workflow-aggregator:600.vb_57cdd26fdd7","git:5.6.0","configuration-as-code:1887.v9e47623cb_043"]` |
| [controller.javaOpts](./values.yaml#L162) | string | Append to `JAVA_OPTS` env var | `nil` |
| [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` |
| [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` |

View File

@ -399,10 +399,10 @@ controller:
# Plugins will be installed during Jenkins controller start
# -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false`
installPlugins:
- kubernetes:4296.v20a_7e4d77cf6
- kubernetes:4295.v7fa_01b_309c95
- workflow-aggregator:600.vb_57cdd26fdd7
- git:5.6.0
- configuration-as-code:1897.v79281e066ea_7
- configuration-as-code:1887.v9e47623cb_043
# If set to false, Jenkins will download the minimum required version of all dependencies.
# -- Download the minimum required version or latest version of all dependencies

View File

@ -2,7 +2,7 @@ gitea:
enabled: false
image:
tag: 1.22.6
tag: 1.22.3
rootless: true
repliaCount: 1
@ -90,7 +90,7 @@ jenkins:
controller:
image:
tag: lts-alpine-jdk21
tag: alpine-jdk21
#tagLabel: alpine
disableRememberMe: true
prometheus:
@ -276,8 +276,8 @@ jenkins:
trivy:
enabled: false
#image:
#tag: 0.57.0
image:
tag: 0.57.0
persistence:
enabled: true
size: 1Gi

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-lib
description: KubeZero helm library - common helm functions and blocks
type: library
version: 0.2.1
version: 0.2.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -10,4 +10,4 @@ keywords:
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
kubeVersion: ">= 1.30.0"
kubeVersion: ">= 1.20.0"

View File

@ -2,7 +2,7 @@ kubezero-lib
============
KubeZero helm library - common helm functions and blocks
Current chart version is `0.2.1`
Current chart version is `0.1.0`
Source code can be found [here](https://kubezero.com)

View File

@ -1,10 +0,0 @@
{{- /*
maps pods to the kube control-plane
*/ -}}
{{- define "kubezero-lib.control-plane" -}}
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{{- end -}}

View File

@ -25,9 +25,9 @@ Common naming functions
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- /*
{{/*
Selector labels
*/ -}}
*/}}
{{- define "kubezero-lib.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kubezero-lib.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
@ -49,7 +49,7 @@ This takes an array of three values:
- the top context
- the template name of the overrides (destination)
- the template name of the base (source)
*/ -}}
*/}}
{{- define "kubezero-lib.util.merge" -}}
{{- $top := first . -}}
{{- $overrides := fromYaml (include (index . 1) $top) | default (dict ) -}}

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack
type: application
version: 0.8.13
version: 0.8.14
appVersion: 1.6.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -24,7 +24,7 @@ dependencies:
repository: https://fluent.github.io/helm-charts
condition: fluentd.enabled
- name: fluent-bit
version: 0.47.10
version: 0.48.3
repository: https://fluent.github.io/helm-charts
condition: fluent-bit.enabled
kubeVersion: ">= 1.26.0"

View File

@ -90,11 +90,12 @@ Kubernetes: `>= 1.26.0`
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].equal[0] | string | `"namespace"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].source_matchers[0] | string | `"alertname = InfoInhibitor"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].target_matchers[0] | string | `"severity = info"` | |
| kube-prometheus-stack.alertmanager.config.route.group_by[0] | string | `"alertname"` | |
| kube-prometheus-stack.alertmanager.config.route.group_by[1] | string | `"severity"` | |
| kube-prometheus-stack.alertmanager.config.route.group_by[2] | string | `"status"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[3].source_matchers[0] | string | `"alertname = ClusterAutoscalerNodeGroupsEnabled"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[3].target_matchers[0] | string | `"alertname =~ \"KubeCPUOvercommit|KubeMemoryOvercommit\""` | |
| kube-prometheus-stack.alertmanager.config.route.group_by[0] | string | `"severity"` | |
| kube-prometheus-stack.alertmanager.config.route.group_by[1] | string | `"clusterName"` | |
| kube-prometheus-stack.alertmanager.config.route.group_interval | string | `"5m"` | |
| kube-prometheus-stack.alertmanager.config.route.group_wait | string | `"30s"` | |
| kube-prometheus-stack.alertmanager.config.route.group_wait | string | `"10s"` | |
| kube-prometheus-stack.alertmanager.config.route.repeat_interval | string | `"4h"` | |
| kube-prometheus-stack.alertmanager.config.route.routes[0].matchers[0] | string | `"severity = none"` | |
| kube-prometheus-stack.alertmanager.config.route.routes[0].receiver | string | `"null"` | |

View File

@ -18,7 +18,7 @@
"subdir": "contrib/mixin"
}
},
"version": "2b323071a8bd4f02ddaf63e0dfa1fd98c221dccb",
"version": "19aa0dbe8fd6317a237bae9b6ea52a4f1b445b19",
"sum": "IXI3LQIT9NmTPJAk8WLUJd5+qZfcGpeNCyWIK7oEpws="
},
{
@ -88,7 +88,7 @@
"subdir": "grafana-builder"
}
},
"version": "767befa8fb46a07be516dec2777d7d89909a529d",
"version": "5a6b86b475e427b2dbd9e4af0bcafbb6da0507a5",
"sum": "yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo="
},
{
@ -118,8 +118,8 @@
"subdir": ""
}
},
"version": "a3fbf21977deb89b7d843eb8371170c011ea6835",
"sum": "57zW2IGJ9zbYd8BI0qe6JkoWTRSMNiBUWC6+YcnEsWo="
"version": "bdbf7f45cedf37d07567be7519fa4139043f9335",
"sum": "j4EAKfqkbPvBFGnBjt4hex2bdNHPpuFWrCxfq5L6EkU="
},
{
"source": {
@ -128,7 +128,7 @@
"subdir": "jsonnet/kube-state-metrics"
}
},
"version": "32e7727ff4613b0f55dfc18aff15afb8c04d03c5",
"version": "17151aca659e0659259b5e1f5675acf849281ade",
"sum": "lO7jUSzAIy8Yk9pOWJIWgPRhubkWzVh56W6wtYfbVH4="
},
{
@ -138,7 +138,7 @@
"subdir": "jsonnet/kube-state-metrics-mixin"
}
},
"version": "32e7727ff4613b0f55dfc18aff15afb8c04d03c5",
"version": "17151aca659e0659259b5e1f5675acf849281ade",
"sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c="
},
{
@ -148,8 +148,8 @@
"subdir": "jsonnet/kube-prometheus"
}
},
"version": "7e5a571a3fb735c78e17c76a637eb7e8bb5dd086",
"sum": "uTw/Mj+X91S+oqUpAX81xcfWPDlox0tdSZY/YBw7nGE="
"version": "c503e5cc5403dd5d56b1c0c5933827baee64aeaf",
"sum": "fJqINQiYJPmllXFFO+Hl5HrPYANMbhHFUQ28tl0Vi00="
},
{
"source": {
@ -158,7 +158,7 @@
"subdir": "jsonnet/mixin"
}
},
"version": "a366602bacb2c8d773a9cee058b6971b8d2e3732",
"version": "e951bd3037a053fea681510ccde211c28dc657e1",
"sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=",
"name": "prometheus-operator-mixin"
},
@ -169,8 +169,8 @@
"subdir": "jsonnet/prometheus-operator"
}
},
"version": "a366602bacb2c8d773a9cee058b6971b8d2e3732",
"sum": "z0/lCiMusMHTqntsosMVGYkVcSZjCpyZBmUMVUsK5nA="
"version": "e951bd3037a053fea681510ccde211c28dc657e1",
"sum": "YOJjmladGD1PcgNae0h88Mm235CsZSfwf2a4DIcMJFU="
},
{
"source": {
@ -179,7 +179,7 @@
"subdir": "doc/alertmanager-mixin"
}
},
"version": "0f65e8fa5fc72d2678655105c0213b416ca6f34c",
"version": "f6b942cf9b3a503d59192eada300d2ad97cba82f",
"sum": "Mf4h1BYLle2nrgjf/HXrBbl0Zk8N+xaoEM017o0BC+k=",
"name": "alertmanager"
},
@ -190,7 +190,7 @@
"subdir": "docs/node-mixin"
}
},
"version": "cf8c6891cc610e54f70383addd4bb6079f0add35",
"version": "49d177bf95417b117ab612a376e2434d5dd61c2d",
"sum": "cQCW+1N0Xae5yXecCWDK2oAlN0luBS/5GrwBYSlaFms="
},
{
@ -200,8 +200,8 @@
"subdir": "documentation/prometheus-mixin"
}
},
"version": "b407c2930da4f50c0d17fc39404c6302a9eb740b",
"sum": "OYT5u3S8DbamuJV/v3gbWSteOvFzMeNwMj+u4Apk7jM=",
"version": "789c9b1a5e455850ed9b3c89cafb37df75ce1e50",
"sum": "dYLcLzGH4yF3qB7OGC/7z4nqeTNjv42L7Q3BENU8XJI=",
"name": "prometheus"
},
{
@ -222,7 +222,7 @@
"subdir": "mixin"
}
},
"version": "7037331e6ea7dbe85a1b7af37bf8ea277a80663d",
"version": "f9da21ec0b28073875520159fe72ab744c255b2e",
"sum": "ieCD4eMgGbOlrI8GmckGPHBGQDcLasE1rULYq56W/bs=",
"name": "thanos-mixin"
}

View File

@ -29,45 +29,14 @@ local etcdMixin = addMixin({
},
});
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
},
kubernetesControlPlane+: {
prometheusRule+: {
spec+: {
groups: [
(
if group.name == 'kubernetes-resources' then
group {
rules: std.filter(
function(rule)
rule.alert != 'KubeCPUOvercommit' && rule.alert != 'KubeMemoryOvercommit',
group.rules
) + [{
alert: 'ClusterAutoscalerNodeGroupsEnabled',
expr: 'cluster_autoscaler_node_groups_count{job="addons-aws-cluster-autoscaler",node_group_type="autoscaled"} > 0',
'for': '5m',
labels: {
severity: 'none',
},
annotations: {
description: 'Inhibitor rule if the Cluster Autoscaler found at least one node group',
summary: 'Cluster Autoscaler found at least one node group.',
},
}],
}
else
group
)
for group in super.groups
],
},
},
},
};
local kp = (import 'kube-prometheus/main.libsonnet') +
{
values+:: {
common+: {
namespace: 'monitoring',
},
},
};
// We just want the Prometheus Rules
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +

View File

@ -123,7 +123,7 @@
{
"alert": "KubeDaemonSetRolloutStuck",
"annotations": {
"description": "DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} has not finished or progressed for at least 15m.",
"description": "DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} has not finished or progressed for at least 15 minutes.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetrolloutstuck",
"summary": "DaemonSet rollout is stuck."
},
@ -228,6 +228,32 @@
{
"name": "kubernetes-resources",
"rules": [
{
"alert": "KubeCPUOvercommit",
"annotations": {
"description": "Cluster {{ $labels.cluster }} has overcommitted CPU resource requests for Pods by {{ $value }} CPU shares and cannot tolerate node failure.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuovercommit",
"summary": "Cluster has overcommitted CPU resource requests."
},
"expr": "sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) by (cluster) - (sum(kube_node_status_allocatable{job=\"kube-state-metrics\",resource=\"cpu\"}) by (cluster) - max(kube_node_status_allocatable{job=\"kube-state-metrics\",resource=\"cpu\"}) by (cluster)) > 0\nand\n(sum(kube_node_status_allocatable{job=\"kube-state-metrics\",resource=\"cpu\"}) by (cluster) - max(kube_node_status_allocatable{job=\"kube-state-metrics\",resource=\"cpu\"}) by (cluster)) > 0\n",
"for": "10m",
"labels": {
"severity": "warning"
}
},
{
"alert": "KubeMemoryOvercommit",
"annotations": {
"description": "Cluster {{ $labels.cluster }} has overcommitted memory resource requests for Pods by {{ $value | humanize }} bytes and cannot tolerate node failure.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryovercommit",
"summary": "Cluster has overcommitted memory resource requests."
},
"expr": "sum(namespace_memory:kube_pod_container_resource_requests:sum{}) by (cluster) - (sum(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"}) by (cluster) - max(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"}) by (cluster)) > 0\nand\n(sum(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"}) by (cluster) - max(kube_node_status_allocatable{resource=\"memory\", job=\"kube-state-metrics\"}) by (cluster)) > 0\n",
"for": "10m",
"labels": {
"severity": "warning"
}
},
{
"alert": "KubeCPUQuotaOvercommit",
"annotations": {
@ -305,18 +331,6 @@
"labels": {
"severity": "info"
}
},
{
"alert": "ClusterAutoscalerNodeGroupsEnabled",
"annotations": {
"description": "Inhibitor rule if the Cluster Autoscaler found at least one node group",
"summary": "Cluster Autoscaler found at least one node group."
},
"expr": "cluster_autoscaler_node_groups_count{job=\"addons-aws-cluster-autoscaler\",node_group_type=\"autoscaled\"} > 0",
"for": "5m",
"labels": {
"severity": "none"
}
}
]
},
@ -492,7 +506,7 @@
{
"alert": "KubeClientCertificateExpiration",
"annotations": {
"description": "A client certificate used to authenticate to kubernetes apiserver is expiring in less than 7.0 days on cluster {{ $labels.cluster }}.",
"description": "A client certificate used to authenticate to kubernetes apiserver is expiring in less than 7.0 days.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration",
"summary": "Client certificate is about to expire."
},
@ -505,7 +519,7 @@
{
"alert": "KubeClientCertificateExpiration",
"annotations": {
"description": "A client certificate used to authenticate to kubernetes apiserver is expiring in less than 24.0 hours on cluster {{ $labels.cluster }}.",
"description": "A client certificate used to authenticate to kubernetes apiserver is expiring in less than 24.0 hours.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration",
"summary": "Client certificate is about to expire."
},

View File

@ -6,7 +6,7 @@
"app.kubernetes.io/component": "controller",
"app.kubernetes.io/name": "prometheus-operator",
"app.kubernetes.io/part-of": "kube-prometheus",
"app.kubernetes.io/version": "0.78.2",
"app.kubernetes.io/version": "0.78.1",
"prometheus": "k8s",
"role": "alert-rules"
},

View File

@ -7,7 +7,7 @@
"app.kubernetes.io/instance": "k8s",
"app.kubernetes.io/name": "prometheus",
"app.kubernetes.io/part-of": "kube-prometheus",
"app.kubernetes.io/version": "3.0.1",
"app.kubernetes.io/version": "2.55.1",
"prometheus": "k8s",
"role": "alert-rules"
},
@ -74,9 +74,9 @@
{
"alert": "PrometheusErrorSendingAlertsToSomeAlertmanagers",
"annotations": {
"description": "{{ printf \"%.1f\" $value }}% of alerts sent by Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}} were affected by errors.",
"description": "{{ printf \"%.1f\" $value }}% errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}}.",
"runbook_url": "https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstosomealertmanagers",
"summary": "More than 1% of alerts sent by Prometheus to a specific Alertmanager were affected by errors."
"summary": "Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager."
},
"expr": "(\n rate(prometheus_notifications_errors_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n/\n rate(prometheus_notifications_sent_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n)\n* 100\n> 1\n",
"for": "15m",

View File

@ -89,7 +89,7 @@ spec:
severity: warning
- alert: KubeDaemonSetRolloutStuck
annotations:
description: DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} has not finished or progressed for at least 15m.
description: DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} has not finished or progressed for at least 15 minutes.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubedaemonsetrolloutstuck
summary: DaemonSet rollout is stuck.
expr: "(\n (\n kube_daemonset_status_current_number_scheduled{job=\"kube-state-metrics\"}\n !=\n kube_daemonset_status_desired_number_scheduled{job=\"kube-state-metrics\"}\n ) or (\n kube_daemonset_status_number_misscheduled{job=\"kube-state-metrics\"}\n !=\n 0\n ) or (\n kube_daemonset_status_updated_number_scheduled{job=\"kube-state-metrics\"}\n !=\n kube_daemonset_status_desired_number_scheduled{job=\"kube-state-metrics\"}\n ) or (\n kube_daemonset_status_number_available{job=\"kube-state-metrics\"}\n !=\n kube_daemonset_status_desired_number_scheduled{job=\"kube-state-metrics\"}\n )\n) and (\n changes(kube_daemonset_status_updated_number_scheduled{job=\"kube-state-metrics\"}[5m])\n ==\n 0\n)\n"
@ -166,6 +166,36 @@ spec:
severity: warning
- name: kubernetes-resources
rules:
- alert: KubeCPUOvercommit
annotations:
description: Cluster {{`{{`}} $labels.cluster {{`}}`}} has overcommitted CPU resource requests for Pods by {{`{{`}} $value {{`}}`}} CPU shares and cannot tolerate node failure.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuovercommit
summary: Cluster has overcommitted CPU resource requests.
expr: 'sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) by (cluster) - (sum(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster) - max(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster)) > 0
and
(sum(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster) - max(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster)) > 0
'
for: 10m
labels:
severity: warning
- alert: KubeMemoryOvercommit
annotations:
description: Cluster {{`{{`}} $labels.cluster {{`}}`}} has overcommitted memory resource requests for Pods by {{`{{`}} $value | humanize {{`}}`}} bytes and cannot tolerate node failure.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubememoryovercommit
summary: Cluster has overcommitted memory resource requests.
expr: 'sum(namespace_memory:kube_pod_container_resource_requests:sum{}) by (cluster) - (sum(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster) - max(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster)) > 0
and
(sum(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster) - max(kube_node_status_allocatable{resource="memory", job="kube-state-metrics"}) by (cluster)) > 0
'
for: 10m
labels:
severity: warning
- alert: KubeCPUQuotaOvercommit
annotations:
description: Cluster {{`{{`}} $labels.cluster {{`}}`}} has overcommitted CPU resource requests for Namespaces.
@ -220,14 +250,6 @@ spec:
for: 15m
labels:
severity: info
- alert: ClusterAutoscalerNodeGroupsEnabled
annotations:
description: Inhibitor rule if the Cluster Autoscaler found at least one node group
summary: Cluster Autoscaler found at least one node group.
expr: cluster_autoscaler_node_groups_count{job="addons-aws-cluster-autoscaler",node_group_type="autoscaled"} > 0
for: 5m
labels:
severity: none
- name: kubernetes-storage
rules:
- alert: KubePersistentVolumeFillingUp
@ -373,7 +395,7 @@ spec:
rules:
- alert: KubeClientCertificateExpiration
annotations:
description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 7.0 days on cluster {{`{{`}} $labels.cluster {{`}}`}}.
description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 7.0 days.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration
summary: Client certificate is about to expire.
expr: 'histogram_quantile(0.01, sum without (namespace, service, endpoint) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 604800
@ -388,7 +410,7 @@ spec:
severity: warning
- alert: KubeClientCertificateExpiration
annotations:
description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 24.0 hours on cluster {{`{{`}} $labels.cluster {{`}}`}}.
description: A client certificate used to authenticate to kubernetes apiserver is expiring in less than 24.0 hours.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeclientcertificateexpiration
summary: Client certificate is about to expire.
expr: 'histogram_quantile(0.01, sum without (namespace, service, endpoint) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 86400

View File

@ -57,9 +57,9 @@ spec:
severity: warning
- alert: PrometheusErrorSendingAlertsToSomeAlertmanagers
annotations:
description: '{{`{{`}} printf "%.1f" $value {{`}}`}}% of alerts sent by Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} to Alertmanager {{`{{`}}$labels.alertmanager{{`}}`}} were affected by errors.'
description: '{{`{{`}} printf "%.1f" $value {{`}}`}}% errors while sending alerts from Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} to Alertmanager {{`{{`}}$labels.alertmanager{{`}}`}}.'
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuserrorsendingalertstosomealertmanagers
summary: More than 1% of alerts sent by Prometheus to a specific Alertmanager were affected by errors.
summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager.
expr: "(\n rate(prometheus_notifications_errors_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n/\n rate(prometheus_notifications_sent_total{job=\"prometheus-k8s\",namespace=\"monitoring\"}[5m])\n)\n* 100\n> 1\n"
for: 15m
labels:

View File

@ -0,0 +1,19 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "zdt-inhibitors" | trunc 63 | trimSuffix "-" }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
groups:
- name: zdt-inhibitors
rules:
- alert: ClusterAutoscalerNodeGroupsEnabled
annotations:
description: "This rule is meant to inhibit other rules and should not be forwarded.\nThe Cluster Autoscaler found at least one node group"
summary: Cluster Autoscaler found at least one node group.
expr: 'cluster_autoscaler_node_groups_count{job="addons-aws-cluster-autoscaler",node_group_type="autoscaled"} > 0'
for: 15m
labels:
severity: none

View File

@ -223,8 +223,8 @@ kube-prometheus-stack:
global:
resolve_timeout: 5m
route:
group_by: ['alertname', 'severity', 'status']
group_wait: 30s
group_by: ['severity', 'clusterName']
group_wait: 10s
group_interval: 5m
repeat_interval: 4h
routes:
@ -252,6 +252,11 @@ kube-prometheus-stack:
- alertname = InfoInhibitor
target_matchers:
- severity = info
# Disable cluster overcommiy alerts if we have cluster autoscaler available
- source_matchers:
- alertname = ClusterAutoscalerNodeGroupsEnabled
target_matchers:
- alertname =~ "KubeCPUOvercommit|KubeMemoryOvercommit"
alertmanagerSpec:
# externalUrl:
logFormat: json

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-operators
description: Various operators supported by KubeZero
type: application
version: 0.1.8
version: 0.1.6
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -10,7 +10,6 @@ keywords:
- operators
- opensearch
- postgres
- kafka
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
@ -30,8 +29,4 @@ dependencies:
version: 0.22.1
repository: https://cloudnative-pg.github.io/charts
condition: cloudnative-pg.enabled
- name: strimzi-kafka-operator
version: 0.44.0
repository: "oci://quay.io/strimzi-helm"
condition: strimzi-kafka-operator.enabled
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-operators
![Version: 0.1.7](https://img.shields.io/badge/Version-0.1.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.1.6](https://img.shields.io/badge/Version-0.1.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
Various operators supported by KubeZero
@ -22,7 +22,6 @@ Kubernetes: `>= 1.26.0`
| https://cloudnative-pg.github.io/charts | cloudnative-pg | 0.22.1 |
| https://helm.elastic.co | eck-operator | 2.15.0 |
| https://opensearch-project.github.io/opensearch-k8s-operator/ | opensearch-operator | 2.7.0 |
| oci://quay.io/strimzi-helm | strimzi-kafka-operator | 0.44.0 |
## Values
@ -47,17 +46,6 @@ Kubernetes: `>= 1.26.0`
| opensearch-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| opensearch-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| opensearch-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| strimzi-kafka-operator.enabled | bool | `false` | |
| strimzi-kafka-operator.leaderElection.enable | bool | `false` | |
| strimzi-kafka-operator.monitoring.podMonitorEnabled | bool | `false` | |
| strimzi-kafka-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| strimzi-kafka-operator.resources.limits.memory | string | `"384Mi"` | |
| strimzi-kafka-operator.resources.requests.cpu | string | `"20m"` | |
| strimzi-kafka-operator.resources.requests.memory | string | `"256Mi"` | |
| strimzi-kafka-operator.revisionHistoryLimit | int | `3` | |
| strimzi-kafka-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| strimzi-kafka-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| strimzi-kafka-operator.watchAnyNamespace | bool | `true` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@ -1,10 +0,0 @@
configmap: grafana-dashboards-strimzi
gzip: true
condition: 'index .Values "strimzi-kafka-operator" "monitoring" "podMonitorEnabled"'
folder: Operators
dashboards:
- name: strimzi-kafka-operator
url: https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/refs/heads/main/helm-charts/helm3/strimzi-kafka-operator/files/grafana-dashboards/strimzi-operators.json
tags:
- Kafka
- Strimzi

View File

@ -1,7 +1,7 @@
configmap: grafana-dashboards-pg
configmap: grafana-dashboards
gzip: true
condition: 'index .Values "cloudnative-pg" "monitoring" "podMonitorEnabled"'
folder: Operators
folder: KubeZero
dashboards:
- name: Cloudnative-pg
url: https://raw.githubusercontent.com/cloudnative-pg/grafana-dashboards/refs/heads/main/charts/cluster/grafana-dashboard.json

File diff suppressed because one or more lines are too long

View File

@ -1,15 +0,0 @@
{{- if index .Values "strimzi-kafka-operator" "monitoring" "podMonitorEnabled" }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "grafana-dashboards-strimzi" | trunc 63 | trimSuffix "-" }}
namespace: {{ .Release.Namespace }}
labels:
grafana_dashboard: "1"
{{- include "kubezero-lib.labels" . | nindent 4 }}
annotations:
k8s-sidecar-target-directory: Operators
binaryData:
strimzi-kafka-operator.json.gz:
H4sIAAAAAAAC/+1dW3PbuBV+96/g8KETzzhbUbIcb2f2wXE2u2ljx2M76bSJRwORkIQ1SXBBMLbiqr+9B+ANvElyZMdy57zYIkACONfvI0CQdzuWZY9Ggv6ZMEFj+2/WZyixrDv9F+rkPKJQak8FmZCQ2Ht5BfNai0MS6PN/q1d8pSJmPFR1r37a/2lo64rFXnt3EQmpX+8slkQ2e7pQpUsb84gkMU+ES+stRoIHVM5oEjfbPSvr7j9UyQIaU8FoS8OXZV3aMPy9UmfZJAw5SANqUpZIe7J9FsvCLmX/UDNOmC/fKZU6e2WpIW1VCuMcGpKxr+qlSKhRPmNeSylzeXjMfS5Ug2I6Ji96e1bfceDPcLhnObtm07mUR6Us1l+sI58KWRlCaZx4NuZEeHZWt9D/r3YyjdvUY7I2WnvCYpf4/6JEgPWFPOGhnEF9T1eCT0azS859yaKiUJvFGejfPguvtaunWtcGbHF9l/s+iWKqrpwQPy5UslrFMAbmnfHSiql2a5a6geP+vlFwmw83O56r41wtVQ8rLjOGf1WUgVKmVMaG25iOs56TwFmCTt5pdz6yi+JF9svojEltHfs4iSUPrHOatmyoIze24DftwbRaoxNGfe+YhxM2rWrVoxOS+LKq69R82mPNQigOuHZxW84g382478W2ccKiIn9AooiF06oe67rUZ/KoGrXVLol0le3tMPH9ioYLPccgQevFSnn0VtXZp389shv1i1rJot58ofs4oi4jfrUJ8/KriuyGfjp1SMYx9xNJazJBoqZRU2lWm25yK0HEUBq2Kecr8RPVm1LeKmGXdSAgkJc0f9jbWabZq04nSUKmzRPykBphYoQxB+yDfJDC61UjnjtzxUEtV6xKFU57qugXhwG5fQOBdsZZqOPF6RVNtHpwqryTzNpaQKNLnWe7Kv+AXMAm87yaJJKb1RzgL0zhQVXP4PgbJHFihoeyWOLSD62hZUP+d5s+ZvsklqdcnqpI6/RtnUvUxXY132lfiPN032pLCImbMwp5KpTHMxJOaR0csoDtEvwG/OA9mfNEZnjWsFnkJ1MWfqrTpYfO7fQ20mERJ8GLWAoWfGNAA7PMfXfNQu+XL/Y/yOSafLEXu9VrJ1xATst5zqhGdFLPCyUVoM+3xJU6/pxNkEWPwzo+b4GUuJP+IaasxpQeIgoiSoko++shijNESEFI2QBSPgJkbAGsqGEgqiCqIKo8Kqocrocqwx6iCqLKBqhyySPmbgGs6HHECCwILAgsGwHLYAWwOP0171ccRBZElg2QBZJuSF25BdiSjQTBBcEFweVxweVgTXA5QHBBcNkAXE6YEOAF5JqK/hYgTDocS4/H6iPQINAg0Dwq0PTXXMd3cCEfgeaBgGbbcAZRBlEGUeaHzpXtd6DMAFEGUWYDlHkNHjqlWwAw6UAQWhBaEFp+7ExZB7QM9xFaEFo2X4bhYnsWYjjeuyDAIMD84BmyLoDB55IRYDYBmHM6Jj4J3W24fSnGcl+A2Y59q686phd6W7hx9Zy6PHSZz7K919u8bzUiPpWSvnQh/GPmdiO6q3fjNhsityx+zYVHxQUEeDOmi5OOqfJn6v2bCr7sNDNxagBvOek9GVO/kYWyyjOfuDSA3lqyiD5pTMSRz6Zhdk6vVu0JcnMh56ktfRY2YHrCfP9DRFwm5xUXLLO7p3J0R4JPYwCS2VvRok+ViOmUhl67ipQTFVvR2+u/sm+NPNwC+pBeIOnodB93mEPJ/k4ZLeJ+ATiqkAi75dR/Mk/O6mlKBaYCzQv2TSljWGc8AEb0DVNpdJxkXdx1MbGs66VSaZTJUdoOKVCIBsuCTLFMbEiFLmTxadtQpoInUZoJGooPTHsvHWTJEXM/u+tqjE8m3cyzGqHV9xC0UnGkqhtRVVCWkA/LVQ9rQHi4AgcPO97fcLicjhY5pZ0JVv3CY3Hkk4KB6jeGVE6IzAw75lIBQ+UEFYXv8y4NrlbVWZnJKqPKXS8AOGO1dpX+m/cM28AEWegKSmJqUMIKCxjFiQv8MJ4k/khy4OufndnV7q41nlsvFGd8cH6Ymvxt0c6dpqaLxUbU5qIQwqqyHCuiwpqBxppsR9ZfUIOkB0kPkh4kPUh6kPS0kp7DNUnPz0h6tpz0TAjzqfeMCc9bLQCSHSQ7SHaQ7CDZQbLz0GSn8bhLB9sZOsh2tpzt+Ny9ftZsp0ZzbpgEliOt9yAXch7kPMh5kPMg50HO8+CrWs5BxwzPK+Q8W855nvFalnrXP87tIM9BnoM8B3kO8pzHX8jq5DkHyHO2nOcALWDcY6DzZ0x5zgohkPUg60HWg6wHWc//O+t54tWsFsqjEon6auGHbAyNjUXpelcfOdG2bG6rUiEvEfoX0Bqo8OJRQG4fggk1txU+BUc6ATwOksCqSm2p0d+fHK25eU4lkUfYNNfv2tj68/Jdc8WmdABbNgG6KJ/864tdTE5xopoCS8YDzmN86nOtzerPEG3uCwgQ1UAUVqPCTteY0235VLRxgPR7teP5KQlok26Uidz+9TZiYm4pHFhOPCLBI+WHdE1lpiPQ0i5RpHJSDUJH8buY29+n00dQixqSdcmtVD1PqhnF9U/zGF9LNzv10u8gFKuT2rAjqa1YK1IA/jslnjbQ4+wfL8ATMN16EbBQg6LrQ/KiYs9SiXPPyreKj9SnjYHXu3TXKqDWLTPuiCoXSIFWw4wkQTQK4t1ugNXJrgagcFkoGx9hXoq6dVj9PjgN8pvFCrip8osZm8hmhQE6ufuXtYKEcSppZtkOE6Q+rlhgAvd/9K0Cmur4u144orCExAVRkTw1wLye1/O8n75j4RwCpHZCWtGWBdIa7SLqbQtLbpLasnOLnFxMSajuUteTkN6CL3r0dZqGWgaos3EDLdvuwz16u7KZXiPRfcpyzH6jJguS5q14+pKXWsSYn6EtwbpqtsaNe0ldBkuFE1T10y1dLkQFwpYIZB+nP6325N8qnX1aHKyWsw4ana+yuVT/13G6ZjjnfFenmC1+T8Rg0PGiiMMtfFHE3z+d4MshcKIVJ1pxohUnWp/7RKtH3fFc0geeb311zyfpBh1TPv0ezqY+7WzqH1+DUUADLuajBFjSSDvLnave3QapS/zy3y82lIuX6p6eSC7+I9V3H8vD7BbxZUYsiwr1brH0PjNv6gesRRd9bTjZCgzIOtFKsT7GRoDi6jOSIiRFSIqQFD1vUhT8YDp0uC4dws2UT0yHgLtQzYmm7igiwHyK1WSofRha9HkY5I/oPVdu9NtxdXoReRHyIuRFyIuQF+FehHtSo8aTeZ3cCB+821pu5PIklMiOSnZ0rBSyBj3ayRpR3ahcpU4a9FJhADtmNCCliQf6+TiwrPEBk/SN/llP9kWqV7toVtIgUtBmJPvU2Uu3ML8OkQiRuvyd6cRUfcKy8TSbhvjaVC8L9Ur+ke+3fEXBzzmN4XlGdRoLzcvCdL3bfnMxOjv/cPLr5e+/fryofHqiyAZmkNt/JjRdho7aXNtQuFMpndLbGu+y42sWfRT+xTx02z4OkdnWkGrHdBOVKxepMZixcm9PUnYEgX7z0pnlFpQ8K7Mrl0H0XBuPcOWjH+V+bsa5PTTotNMzDgbmgVMmNHto/HbMg0HPrJmVv/vGb8dLJb7KZVBRaVhlZS9mwwdmw2Yv/X3zwJiTfeWZ483HUlHfN5VMc6uaW611uFgfsvSTxd3XIuI0Ntk3lF7rXdm6iZ3F/wDuyXP7T6AAAA==
{{- end }}

View File

@ -1,35 +0,0 @@
{{- if index .Values "cloudnative-pg" "monitoring" "podMonitorEnabled" }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "strimzi-kafka-operator" | trunc 63 | trimSuffix "-" }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
strimzi.io/kind: cluster-operator
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
podMetricsEndpoints:
- path: /metrics
port: http
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "strimzi-kafka-entity" | trunc 63 | trimSuffix "-" }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: entity-operator
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
podMetricsEndpoints:
- path: /metrics
port: healthcheck
{{- end }}

View File

@ -17,7 +17,6 @@ rm -rf charts/eck-operator/charts
yq eval -Mi 'del(.dependencies)' charts/eck-operator/Chart.yaml
# Create ZDT dashboard configmap
../kubezero-metrics/sync_grafana_dashboards.py dashboards-pg.yaml templates/cloudnative-pg/grafana-dashboards.yaml
../kubezero-metrics/sync_grafana_dashboards.py dashboards-strimzi.yaml templates/strimzi/grafana-dashboards.yaml
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/cloudnative-pg/grafana-dashboards.yaml
update_docs

View File

@ -13,34 +13,31 @@ opensearch-operator:
- name: SKIP_INIT_CONTAINER
value: "true"
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
eck-operator:
enabled: false
installCRDs: false
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
cloudnative-pg:
enabled: false
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
monitoring:
podMonitorEnabled: false
# We pull in the dashboard via our pipeline !
grafanaDashboard:
create: false
strimzi-kafka-operator:
enabled: false
revisionHistoryLimit: 3
watchAnyNamespace: true
leaderElection:
enable: false
resources:
requests:
cpu: 20m
memory: 256Mi
limits:
memory: 384Mi
monitoring:
podMonitorEnabled: false

View File

@ -1,7 +1,7 @@
configmap: grafana-dashboards
gzip: true
folder: Telemetry
condition: '.Values.metrics.enabled'
#condition: '.Values.telemetry.metrics.enabled'
dashboards:
#- name: jaeger
# url: https://grafana.com/api/dashboards/10001/revisions/2/download
@ -9,19 +9,14 @@ dashboards:
# - Jaeger
# - Telemetry
- name: opensearch
url: "https://grafana.com/api/dashboards/15178/revisions/2/download"
url: https://grafana.com/api/dashboards/15178/revisions/2/download
tags:
- OpenSearch
- Telemetry
- name: fluent-logging
url: "https://grafana.com/api/dashboards/7752/revisions/6/download"
url: https://grafana.com/api/dashboards/7752/revisions/6/download
#url: https://grafana.com/api/dashboards/13042/revisions/2/download
tags:
- fluentd
- fluent-bit
- Telemetry
- name: kafka
url: "https://raw.githubusercontent.com/strimzi/strimzi-kafka-operator/refs/heads/main/examples/metrics/grafana-dashboards/strimzi-kraft.json"
tags:
- Telemetry
- Kafka

File diff suppressed because one or more lines are too long

View File

@ -1,236 +0,0 @@
{{- if index .Values "data-prepper" "enabled" }}
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaNodePool
metadata:
name: kraft
labels:
strimzi.io/cluster: telemetry
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
replicas: 1
roles:
- controller
- broker
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
size: 4Gi
deleteClaim: true
kraftMetadata: shared
---
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: telemetry
annotations:
strimzi.io/node-pools: enabled
strimzi.io/kraft: enabled
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
kafka:
version: 3.8.0
metadataVersion: 3.8-IV0
listeners:
- name: plain
port: 9092
type: internal
tls: false
- name: tls
port: 9093
type: internal
tls: true
config:
offsets.topic.replication.factor: 1
transaction.state.log.replication.factor: 1
transaction.state.log.min.isr: 1
default.replication.factor: 1
min.insync.replicas: 1
{{- if .Values.metrics.enabled }}
metricsConfig:
type: jmxPrometheusExporter
valueFrom:
configMapKeyRef:
name: kafka-metrics
key: kafka-metrics-config.yml
{{- end }}
# entityOperator:
# topicOperator: {}
# userOperator: {}
{{- if .Values.metrics.enabled }}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kafka-metrics
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
data:
kafka-metrics-config.yml: |
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
lowercaseOutputName: true
rules:
# Special cases and very specific rules
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), topic=(.+), partition=(.*)><>Value
name: kafka_server_$1_$2
type: GAUGE
labels:
clientId: "$3"
topic: "$4"
partition: "$5"
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Value
name: kafka_server_$1_$2
type: GAUGE
labels:
clientId: "$3"
broker: "$4:$5"
- pattern: kafka.server<type=(.+), cipher=(.+), protocol=(.+), listener=(.+), networkProcessor=(.+)><>connections
name: kafka_server_$1_connections_tls_info
type: GAUGE
labels:
cipher: "$2"
protocol: "$3"
listener: "$4"
networkProcessor: "$5"
- pattern: kafka.server<type=(.+), clientSoftwareName=(.+), clientSoftwareVersion=(.+), listener=(.+), networkProcessor=(.+)><>connections
name: kafka_server_$1_connections_software
type: GAUGE
labels:
clientSoftwareName: "$2"
clientSoftwareVersion: "$3"
listener: "$4"
networkProcessor: "$5"
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total):"
name: kafka_server_$1_$4
type: COUNTER
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+):"
name: kafka_server_$1_$4
type: GAUGE
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total)
name: kafka_server_$1_$4
type: COUNTER
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+)
name: kafka_server_$1_$4
type: GAUGE
labels:
listener: "$2"
networkProcessor: "$3"
# Some percent metrics use MeanRate attribute
# Ex) kafka.server<type=(KafkaRequestHandlerPool), name=(RequestHandlerAvgIdlePercent)><>MeanRate
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>MeanRate
name: kafka_$1_$2_$3_percent
type: GAUGE
# Generic gauges for percents
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>Value
name: kafka_$1_$2_$3_percent
type: GAUGE
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*, (.+)=(.+)><>Value
name: kafka_$1_$2_$3_percent
type: GAUGE
labels:
"$4": "$5"
# Generic per-second counters with 0-2 key/value pairs
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+)><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
# Generic gauges with 0-2 key/value pairs
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
# Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
# Note that these are missing the '_sum' metric!
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*), (.+)=(.+)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
"$6": "$7"
quantile: "0.$8"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
quantile: "0.$6"
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
quantile: "0.$4"
# KRaft overall related metrics
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
- pattern: "kafka.server<type=raft-metrics><>(.+-total|.+-max):"
name: kafka_server_raftmetrics_$1
type: COUNTER
- pattern: "kafka.server<type=raft-metrics><>(current-state): (.+)"
name: kafka_server_raftmetrics_$1
value: 1
type: UNTYPED
labels:
$1: "$2"
- pattern: "kafka.server<type=raft-metrics><>(.+):"
name: kafka_server_raftmetrics_$1
type: GAUGE
# KRaft "low level" channels related metrics
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
- pattern: "kafka.server<type=raft-channel-metrics><>(.+-total|.+-max):"
name: kafka_server_raftchannelmetrics_$1
type: COUNTER
- pattern: "kafka.server<type=raft-channel-metrics><>(.+):"
name: kafka_server_raftchannelmetrics_$1
type: GAUGE
# Broker metrics related to fetching metadata topic records in KRaft mode
- pattern: "kafka.server<type=broker-metadata-metrics><>(.+):"
name: kafka_server_brokermetadatametrics_$1
type: GAUGE
{{- end }}
{{- end }}

View File

@ -1,49 +0,0 @@
{{- if and (index .Values "data-prepper" "enabled" ) .Values.metrics.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "kafka" | trunc 63 | trimSuffix "-" }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
selector:
matchExpressions:
- key: "strimzi.io/kind"
operator: In
values: ["Kafka", "KafkaConnect", "KafkaMirrorMaker", "KafkaMirrorMaker2"]
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
podMetricsEndpoints:
- path: /metrics
port: tcp-prometheus
relabelings:
- separator: ;
regex: __meta_kubernetes_pod_label_(strimzi_io_.+)
replacement: $1
action: labelmap
- sourceLabels: [__meta_kubernetes_namespace]
separator: ;
regex: (.*)
targetLabel: namespace
replacement: $1
action: replace
- sourceLabels: [__meta_kubernetes_pod_name]
separator: ;
regex: (.*)
targetLabel: kubernetes_pod_name
replacement: $1
action: replace
- sourceLabels: [__meta_kubernetes_pod_node_name]
separator: ;
regex: (.*)
targetLabel: node_name
replacement: $1
action: replace
- sourceLabels: [__meta_kubernetes_pod_host_ip]
separator: ;
regex: (.*)
targetLabel: node_ip
replacement: $1
action: replace
{{- end }}

View File

@ -1,11 +1,8 @@
metrics:
enabled: false
data-prepper:
enabled: false
image:
tag: 2.10.1
#image:
# tag: 2.10.1
securityContext:
capabilities:
@ -85,11 +82,6 @@ data-prepper:
bulk_size: 4
config:
data-prepper-config.yaml: |
ssl: false
peer_forwarder:
ssl: false
log4j2-rolling.properties: |
status = error
dest = err

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero
description: KubeZero - Root App of Apps chart
type: application
version: 1.31.4-alpha
version: 1.30.6
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -13,6 +13,6 @@ maintainers:
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: ">= 0.2.1"
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts
kubeVersion: ">= 1.29.0-0"
kubeVersion: ">= 1.26.0-0"

5
charts/kubezero/Makefile Normal file
View File

@ -0,0 +1,5 @@
.PHONY: sync
sync:
rm -rf scripts templates
cp -r ../../kubezero/charts/kubezero/* .

View File

@ -1,6 +1,6 @@
# kubezero
![Version: 1.31.3](https://img.shields.io/badge/Version-1.31.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 1.30.5](https://img.shields.io/badge/Version-1.30.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero - Root App of Apps chart
@ -18,7 +18,7 @@ Kubernetes: `>= 1.26.0-0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts | kubezero-lib | >= 0.2.1 |
| https://cdn.zero-downtime.net/charts | kubezero-lib | >= 0.1.6 |
## Values
@ -32,16 +32,16 @@ Kubernetes: `>= 1.26.0-0`
| addons.external-dns.enabled | bool | `false` | |
| addons.forseti.enabled | bool | `false` | |
| addons.sealed-secrets.enabled | bool | `false` | |
| addons.targetRevision | string | `"0.8.11"` | |
| addons.targetRevision | string | `"0.8.9"` | |
| argo.argo-cd.enabled | bool | `false` | |
| argo.argo-cd.istio.enabled | bool | `false` | |
| argo.argocd-image-updater.enabled | bool | `false` | |
| argo.enabled | bool | `false` | |
| argo.namespace | string | `"argocd"` | |
| argo.targetRevision | string | `"0.2.6"` | |
| argo.targetRevision | string | `"0.2.4"` | |
| cert-manager.enabled | bool | `false` | |
| cert-manager.namespace | string | `"cert-manager"` | |
| cert-manager.targetRevision | string | `"0.9.10"` | |
| cert-manager.targetRevision | string | `"0.9.9"` | |
| falco.enabled | bool | `false` | |
| falco.k8saudit.enabled | bool | `false` | |
| falco.targetRevision | string | `"0.1.2"` | |
@ -54,35 +54,35 @@ Kubernetes: `>= 1.26.0-0`
| istio-ingress.enabled | bool | `false` | |
| istio-ingress.gateway.service | object | `{}` | |
| istio-ingress.namespace | string | `"istio-ingress"` | |
| istio-ingress.targetRevision | string | `"0.23.2"` | |
| istio-ingress.targetRevision | string | `"0.22.3-1"` | |
| istio-private-ingress.chart | string | `"kubezero-istio-gateway"` | |
| istio-private-ingress.enabled | bool | `false` | |
| istio-private-ingress.gateway.service | object | `{}` | |
| istio-private-ingress.namespace | string | `"istio-ingress"` | |
| istio-private-ingress.targetRevision | string | `"0.23.2"` | |
| istio-private-ingress.targetRevision | string | `"0.22.3-1"` | |
| istio.enabled | bool | `false` | |
| istio.namespace | string | `"istio-system"` | |
| istio.targetRevision | string | `"0.23.2"` | |
| istio.targetRevision | string | `"0.22.3-1"` | |
| kubezero.defaultTargetRevision | string | `"*"` | |
| kubezero.gitSync | object | `{}` | |
| kubezero.repoURL | string | `"https://cdn.zero-downtime.net/charts"` | |
| kubezero.server | string | `"https://kubernetes.default.svc"` | |
| logging.enabled | bool | `false` | |
| logging.namespace | string | `"logging"` | |
| logging.targetRevision | string | `"0.8.13"` | |
| logging.targetRevision | string | `"0.8.12"` | |
| metrics.enabled | bool | `false` | |
| metrics.istio.grafana | object | `{}` | |
| metrics.istio.prometheus | object | `{}` | |
| metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigs | list | `[]` | |
| metrics.namespace | string | `"monitoring"` | |
| metrics.targetRevision | string | `"0.10.2"` | |
| metrics.targetRevision | string | `"0.10.0"` | |
| network.cilium.cluster | object | `{}` | |
| network.enabled | bool | `true` | |
| network.retain | bool | `true` | |
| network.targetRevision | string | `"0.5.5"` | |
| operators.enabled | bool | `false` | |
| operators.namespace | string | `"operators"` | |
| operators.targetRevision | string | `"0.1.6"` | |
| operators.targetRevision | string | `"0.1.4"` | |
| storage.aws-ebs-csi-driver.enabled | bool | `false` | |
| storage.aws-efs-csi-driver.enabled | bool | `false` | |
| storage.enabled | bool | `false` | |
@ -90,10 +90,7 @@ Kubernetes: `>= 1.26.0-0`
| storage.k8up.enabled | bool | `false` | |
| storage.lvm-localpv.enabled | bool | `false` | |
| storage.snapshotController.enabled | bool | `false` | |
| storage.targetRevision | string | `"0.8.9"` | |
| storage.targetRevision | string | `"0.8.8"` | |
| telemetry.enabled | bool | `false` | |
| telemetry.namespace | string | `"telemetry"` | |
| telemetry.targetRevision | string | `"0.4.1"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)
| telemetry.targetRevision | string | `"0.4.0"` | |

View File

@ -6,7 +6,7 @@ clusterBackup:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- if eq .Values.global.platform "aws" }}
{{- if .Values.global.aws.region }}
# AWS
extraEnv:
- name: AWS_DEFAULT_REGION
@ -20,7 +20,7 @@ forseti:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- if eq .Values.global.platform "aws" }}
{{- if .Values.global.aws.region }}
# AWS
aws:
region: {{ $.Values.global.aws.region }}
@ -34,7 +34,7 @@ external-dns:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- if eq .Values.global.platform "aws" }}
{{- if .Values.global.aws.region }}
# AWS
txtOwnerId: {{ .Values.global.clusterName }}
provider: aws
@ -67,18 +67,13 @@ external-dns:
cluster-autoscaler:
enabled: {{ ternary "true" "false" (or (hasKey .Values.global.aws "region") (index .Values "addons" "cluster-autoscaler" "enabled")) }}
autoDiscovery:
clusterName: {{ .Values.global.clusterName }}
{{- if not .Values.global.highAvailable }}
extraArgs:
leader-elect: false
{{- end }}
{{- with omit (index .Values "addons" "cluster-autoscaler") "enabled" }}
{{- toYaml . | nindent 2 }}
{{- end }}
autoDiscovery:
clusterName: {{ .Values.global.clusterName }}
{{- with .Values.metrics }}
serviceMonitor:
enabled: {{ .enabled }}
@ -87,7 +82,7 @@ cluster-autoscaler:
# enabled: {{ .enabled }}
{{- end }}
{{- if eq .Values.global.platform "aws" }}
{{- if .Values.global.aws.region }}
# AWS
awsRegion: {{ .Values.global.aws.region }}

View File

@ -10,13 +10,26 @@ cert-manager:
{{- end }}
{{- if eq .Values.global.platform "aws" }}
{{- include "kubezero-lib.control-plane" . | nindent 2 }}
# map everything to the control-plane
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
webhook:
{{- include "kubezero-lib.control-plane" . | nindent 4 }}
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
cainjector:
{{- include "kubezero-lib.control-plane" . | nindent 4 }}
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
extraEnv:
- name: AWS_REGION

View File

@ -13,15 +13,19 @@ istiod:
telemetry:
enabled: {{ $.Values.metrics.enabled }}
pilot:
{{- if eq .Values.global.platform "aws" }}
{{- include "kubezero-lib.control-plane" . | nindent 4 }}
{{- end }}
{{- if .Values.global.highAvailable }}
{{- if eq .Values.global.platform "aws" }}
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
{{- end }}
{{- if .Values.global.highAvailable }}
replicaCount: 2
{{- else }}
{{- else }}
extraContainerArgs:
- --leader-elect=false
{{- end }}
{{- end }}
{{- with index .Values "istio" "kiali-server" }}
kiali-server:

View File

@ -2,42 +2,16 @@
{{- with index .Values "operators" "opensearch-operator" }}
opensearch-operator:
{{- if eq $.Values.global.platform "aws" }}
{{- include "kubezero-lib.control-plane" . | nindent 2 }}
{{- end }}
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with index .Values "operators" "eck-operator" }}
eck-operator:
{{- if eq $.Values.global.platform "aws" }}
{{- include "kubezero-lib.control-plane" . | nindent 2 }}
{{- end }}
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with index .Values "operators" "cloudnative-pg" }}
cloudnative-pg:
{{- if eq $.Values.global.platform "aws" }}
{{- include "kubezero-lib.control-plane" . | nindent 2 }}
{{- end }}
{{- toYaml . | nindent 2 }}
{{- with $.Values.metrics }}
monitoring:
podMonitorEnabled: {{ .enabled }}
{{- end }}
{{- end }}
{{- with index .Values "operators" "strimzi-kafka-operator" }}
strimzi-kafka-operator:
{{- if eq $.Values.global.platform "aws" }}
{{- include "kubezero-lib.control-plane" . | nindent 2 }}
{{- end }}
{{- toYaml . | nindent 2 }}
{{- with $.Values.metrics }}

View File

@ -57,13 +57,11 @@ aws-efs-csi-driver:
enabled: {{ default false (index .Values "storage" "aws-efs-csi-driver" "enabled")}}
replicaCount: {{ ternary 2 1 .Values.global.highAvailable }}
controller:
{{- with index .Values "storage" "aws-efs-csi-driver" "IamArn" }}
extraEnv:
- name: AWS_ROLE_ARN
value: {{ . | quote }}
value: {{ index .Values "storage" "aws-efs-csi-driver" "IamArn" | quote }}
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
{{- end }}
# The EFS controller steel needs hostnetwork and cannot update on single node control planes otherwise
{{- if not .Values.global.highAvailable }}

View File

@ -1,8 +1,5 @@
{{- define "telemetry-values" }}
metrics:
enabled: {{ .Values.metrics.enabled }}
{{- if index .Values "telemetry" "fluent-bit" }}
fluent-bit:
{{- with index .Values.telemetry "fluent-bit" }}

View File

@ -1,9 +0,0 @@
#!/bin/bash
set -ex
. ../../scripts/lib-update.sh
login_ecr_public
update_helm
update_docs

View File

@ -96,7 +96,7 @@ telemetry:
operators:
enabled: false
namespace: operators
targetRevision: 0.1.8
targetRevision: 0.1.6
metrics:
enabled: false

BIN
docs/images/k8s-1.31.png (Stored with Git LFS)

Binary file not shown.

View File

@ -1,17 +0,0 @@
# ![k8s-v1.31](images/k8s-1.31.png) KubeZero 1.31 - Elli
## What's new - Major themes
- all KubeZero and support AMIs based on Alpine 3.21
## Features and fixes
## Version upgrades
<WIP>
- cilium 1.16.3
- istio 1.22.3
- ArgoCD 2.13.0 [custom ZDT image](https://git.zero-downtime.net/ZeroDownTime/zdt-argocd)
- Prometheus 2.55.1 / Grafana 11.3.0
- Nvidia container toolkit 1.17, drivers 565.57.01, Cuda 12.7
## Resources
- [Kubernetes v1.31 upstream release blog](https://kubernetes.io/blog/2024/08/13/kubernetes-v1-31-release/)