feat: upgrade all operators

This commit is contained in:
Stefan Reimer 2025-02-13 17:34:14 +00:00
parent c53b56a14a
commit 190e21ea6f
20 changed files with 229 additions and 183 deletions

View File

@ -86,7 +86,7 @@ helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $
# Root KubeZero apply directly and exit
if [ ${ARTIFACTS[0]} == "kubezero" ]; then
kubectl apply -f $WORKDIR/kubezero/templates
kubectl apply --server-side --force-conflicts -f $WORKDIR/kubezero/templates
exit $?
# "catch all" apply all enabled modules

View File

@ -121,18 +121,16 @@ control_plane_upgrade() {
get_kubezero_values $ARGOCD
# tumble new config through migrate.py
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml \
&& mv "$WORKDIR"/new-kubezero-values.yaml "$WORKDIR"/kubezero-values.yaml
# Update kubezero-values CM
kubectl get cm -n kubezero kubezero-values -o=yaml | \
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
kubectl apply --server-side --force-conflicts -f -
update_kubezero_cm
if [ "$ARGOCD" == "True" ]; then
# update argo app
export kubezero_chart_version=$(yq .version $CHARTS/kubezero/Chart.yaml)
kubectl get application kubezero -n argocd -o yaml | \
yq '.spec.source.helm.valuesObject |= load("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml
kubectl apply --server-side --force-conflicts -f $WORKDIR/new-argocd-app.yaml

View File

@ -46,15 +46,19 @@ function get_kubezero_values() {
}
# Update kubezero-values CM
# Overwrite kubezero-values CM with file
function update_kubezero_cm() {
kubectl get application kubezero -n argocd -o yaml | yq .spec.source.helm.valuesObject > ${WORKDIR}/kubezero-values.yaml
kubectl get cm -n kubezero kubezero-values -o=yaml | \
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/kubezero-values.yaml")' | \
yq e ".data.\"values.yaml\" |= load_str(\"$WORKDIR/kubezero-values.yaml\")" | \
kubectl apply --server-side --force-conflicts -f -
}
# sync kubezero-values CM from ArgoCD app
function sync_kubezero_cm_from_argo() {
get_kubezero_values True
update_kubezero_cm
}
function disable_argo() {
cat > _argoapp_patch.yaml <<EOF

View File

@ -22,25 +22,6 @@ nats:
# url: "mq.example.com"
gateway: istio-ingress/private-ingressgateway
# rabbitmq cluster operator
rabbitmq-cluster-operator:
enabled: false
rabbitmqImage:
tag: 3.11.4-debian-11-r0
useCertManager: true
clusterOperator:
metrics:
enabled: false
serviceMonitor:
enabled: true
msgTopologyOperator:
metrics:
enabled: false
serviceMonitor:
enabled: true
# rabbitmq
rabbitmq:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-operators
description: Various operators supported by KubeZero
type: application
version: 0.1.9
version: 0.2.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -11,6 +11,7 @@ keywords:
- opensearch
- postgres
- kafka
- rabbitmq
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
@ -34,4 +35,8 @@ dependencies:
version: 0.45.0
repository: "oci://quay.io/strimzi-helm"
condition: strimzi-kafka-operator.enabled
kubeVersion: ">= 1.26.0"
- name: rabbitmq-cluster-operator
version: 4.4.3
repository: https://charts.bitnami.com/bitnami
condition: rabbitmq-cluster-operator.enabled
kubeVersion: ">= 1.30.0-0"

View File

@ -1,6 +1,6 @@
# kubezero-operators
![Version: 0.1.7](https://img.shields.io/badge/Version-0.1.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.2.0](https://img.shields.io/badge/Version-0.2.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
Various operators supported by KubeZero
@ -14,15 +14,16 @@ Various operators supported by KubeZero
## Requirements
Kubernetes: `>= 1.26.0`
Kubernetes: `>= 1.30.0-0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://cloudnative-pg.github.io/charts | cloudnative-pg | 0.22.1 |
| https://helm.elastic.co | eck-operator | 2.15.0 |
| https://charts.bitnami.com/bitnami | rabbitmq-cluster-operator | 4.4.3 |
| https://cloudnative-pg.github.io/charts | cloudnative-pg | 0.23.0 |
| https://helm.elastic.co | eck-operator | 2.16.1 |
| https://opensearch-project.github.io/opensearch-k8s-operator/ | opensearch-operator | 2.7.0 |
| oci://quay.io/strimzi-helm | strimzi-kafka-operator | 0.44.0 |
| oci://quay.io/strimzi-helm | strimzi-kafka-operator | 0.45.0 |
## Values
@ -31,32 +32,23 @@ Kubernetes: `>= 1.26.0`
| cloudnative-pg.enabled | bool | `false` | |
| cloudnative-pg.monitoring.grafanaDashboard.create | bool | `false` | |
| cloudnative-pg.monitoring.podMonitorEnabled | bool | `false` | |
| cloudnative-pg.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cloudnative-pg.tolerations[0].effect | string | `"NoSchedule"` | |
| cloudnative-pg.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| eck-operator.enabled | bool | `false` | |
| eck-operator.installCRDs | bool | `false` | |
| eck-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| eck-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| eck-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| opensearch-operator.enabled | bool | `false` | |
| opensearch-operator.fullnameOverride | string | `"opensearch-operator"` | |
| opensearch-operator.kubeRbacProxy.enable | bool | `false` | |
| opensearch-operator.manager.extraEnv[0].name | string | `"SKIP_INIT_CONTAINER"` | |
| opensearch-operator.manager.extraEnv[0].value | string | `"true"` | |
| opensearch-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| opensearch-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| opensearch-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.enabled | bool | `false` | |
| rabbitmq-cluster-operator.clusterOperator.metrics.serviceMonitor.enabled | bool | `true` | |
| rabbitmq-cluster-operator.enabled | bool | `false` | |
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.enabled | bool | `false` | |
| rabbitmq-cluster-operator.msgTopologyOperator.metrics.serviceMonitor.enabled | bool | `true` | |
| rabbitmq-cluster-operator.useCertManager | bool | `true` | |
| strimzi-kafka-operator.enabled | bool | `false` | |
| strimzi-kafka-operator.leaderElection.enable | bool | `false` | |
| strimzi-kafka-operator.monitoring.podMonitorEnabled | bool | `false` | |
| strimzi-kafka-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| strimzi-kafka-operator.resources.limits.memory | string | `"384Mi"` | |
| strimzi-kafka-operator.resources.requests.cpu | string | `"20m"` | |
| strimzi-kafka-operator.resources.requests.memory | string | `"256Mi"` | |
| strimzi-kafka-operator.revisionHistoryLimit | int | `3` | |
| strimzi-kafka-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| strimzi-kafka-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| strimzi-kafka-operator.revisionHistoryLimit | int | `2` | |
| strimzi-kafka-operator.watchAnyNamespace | bool | `true` | |
----------------------------------------------

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 2.15.0
appVersion: 2.16.1
description: Elastic Cloud on Kubernetes (ECK) operator
home: https://github.com/elastic/cloud-on-k8s
icon: https://helm.elastic.co/icons/eck.png
@ -18,4 +18,4 @@ maintainers:
name: Elastic
name: eck-operator
type: application
version: 2.15.0
version: 2.16.1

View File

@ -4,14 +4,14 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.4
controller-gen.kubebuilder.io/version: v0.16.5
helm.sh/resource-policy: keep
labels:
app.kubernetes.io/instance: 'logging'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eck-operator-crds'
app.kubernetes.io/version: '2.15.0'
helm.sh/chart: 'eck-operator-crds-2.15.0'
app.kubernetes.io/version: '2.16.1'
helm.sh/chart: 'eck-operator-crds-2.16.1'
name: agents.agent.k8s.elastic.co
spec:
group: agent.k8s.elastic.co
@ -1137,14 +1137,14 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.4
controller-gen.kubebuilder.io/version: v0.16.5
helm.sh/resource-policy: keep
labels:
app.kubernetes.io/instance: 'logging'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eck-operator-crds'
app.kubernetes.io/version: '2.15.0'
helm.sh/chart: 'eck-operator-crds-2.15.0'
app.kubernetes.io/version: '2.16.1'
helm.sh/chart: 'eck-operator-crds-2.16.1'
name: apmservers.apm.k8s.elastic.co
spec:
group: apm.k8s.elastic.co
@ -2372,14 +2372,14 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.4
controller-gen.kubebuilder.io/version: v0.16.5
helm.sh/resource-policy: keep
labels:
app.kubernetes.io/instance: 'logging'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eck-operator-crds'
app.kubernetes.io/version: '2.15.0'
helm.sh/chart: 'eck-operator-crds-2.15.0'
app.kubernetes.io/version: '2.16.1'
helm.sh/chart: 'eck-operator-crds-2.16.1'
name: beats.beat.k8s.elastic.co
spec:
group: beat.k8s.elastic.co
@ -2854,14 +2854,14 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.4
controller-gen.kubebuilder.io/version: v0.16.5
helm.sh/resource-policy: keep
labels:
app.kubernetes.io/instance: 'logging'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eck-operator-crds'
app.kubernetes.io/version: '2.15.0'
helm.sh/chart: 'eck-operator-crds-2.15.0'
app.kubernetes.io/version: '2.16.1'
helm.sh/chart: 'eck-operator-crds-2.16.1'
name: elasticmapsservers.maps.k8s.elastic.co
spec:
group: maps.k8s.elastic.co
@ -3459,14 +3459,14 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.4
controller-gen.kubebuilder.io/version: v0.16.5
helm.sh/resource-policy: keep
labels:
app.kubernetes.io/instance: 'logging'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eck-operator-crds'
app.kubernetes.io/version: '2.15.0'
helm.sh/chart: 'eck-operator-crds-2.15.0'
app.kubernetes.io/version: '2.16.1'
helm.sh/chart: 'eck-operator-crds-2.16.1'
name: elasticsearchautoscalers.autoscaling.k8s.elastic.co
spec:
group: autoscaling.k8s.elastic.co
@ -3818,14 +3818,14 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.4
controller-gen.kubebuilder.io/version: v0.16.5
helm.sh/resource-policy: keep
labels:
app.kubernetes.io/instance: 'logging'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eck-operator-crds'
app.kubernetes.io/version: '2.15.0'
helm.sh/chart: 'eck-operator-crds-2.15.0'
app.kubernetes.io/version: '2.16.1'
helm.sh/chart: 'eck-operator-crds-2.16.1'
name: elasticsearches.elasticsearch.k8s.elastic.co
spec:
group: elasticsearch.k8s.elastic.co
@ -4843,6 +4843,14 @@ spec:
type: string
type: object
type: object
remoteClusterServer:
description: |-
RemoteClusterServer specifies if the remote cluster server should be enabled.
This must be enabled if this cluster is a remote cluster which is expected to be accessed using API key authentication.
properties:
enabled:
type: boolean
type: object
remoteClusters:
description: RemoteClusters enables you to establish uni-directional
connections to a remote Elasticsearch cluster.
@ -4850,6 +4858,55 @@ spec:
description: RemoteCluster declares a remote Elasticsearch cluster
connection.
properties:
apiKey:
description: 'APIKey can be used to enable remote cluster access
using Cross-Cluster API keys: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-cross-cluster-api-key.html'
properties:
access:
description: Access is the name of the API Key. It is automatically
generated if not set or empty.
properties:
replication:
properties:
names:
items:
type: string
type: array
required:
- names
type: object
search:
properties:
allow_restricted_indices:
type: boolean
field_security:
properties:
except:
items:
type: string
type: array
grant:
items:
type: string
type: array
required:
- except
- grant
type: object
names:
items:
type: string
type: array
query:
type: object
x-kubernetes-preserve-unknown-fields: true
required:
- names
type: object
type: object
required:
- access
type: object
elasticsearchRef:
description: ElasticsearchRef is a reference to an Elasticsearch
cluster running within the same k8s cluster.
@ -6562,14 +6619,14 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.4
controller-gen.kubebuilder.io/version: v0.16.5
helm.sh/resource-policy: keep
labels:
app.kubernetes.io/instance: 'logging'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eck-operator-crds'
app.kubernetes.io/version: '2.15.0'
helm.sh/chart: 'eck-operator-crds-2.15.0'
app.kubernetes.io/version: '2.16.1'
helm.sh/chart: 'eck-operator-crds-2.16.1'
name: enterprisesearches.enterprisesearch.k8s.elastic.co
spec:
group: enterprisesearch.k8s.elastic.co
@ -7731,14 +7788,14 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.4
controller-gen.kubebuilder.io/version: v0.16.5
helm.sh/resource-policy: keep
labels:
app.kubernetes.io/instance: 'logging'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eck-operator-crds'
app.kubernetes.io/version: '2.15.0'
helm.sh/chart: 'eck-operator-crds-2.15.0'
app.kubernetes.io/version: '2.16.1'
helm.sh/chart: 'eck-operator-crds-2.16.1'
name: kibanas.kibana.k8s.elastic.co
spec:
group: kibana.k8s.elastic.co
@ -9046,14 +9103,14 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.4
controller-gen.kubebuilder.io/version: v0.16.5
helm.sh/resource-policy: keep
labels:
app.kubernetes.io/instance: 'logging'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eck-operator-crds'
app.kubernetes.io/version: '2.15.0'
helm.sh/chart: 'eck-operator-crds-2.15.0'
app.kubernetes.io/version: '2.16.1'
helm.sh/chart: 'eck-operator-crds-2.16.1'
name: logstashes.logstash.k8s.elastic.co
spec:
group: logstash.k8s.elastic.co
@ -10293,14 +10350,14 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.4
controller-gen.kubebuilder.io/version: v0.16.5
helm.sh/resource-policy: keep
labels:
app.kubernetes.io/instance: 'logging'
app.kubernetes.io/managed-by: 'Helm'
app.kubernetes.io/name: 'eck-operator-crds'
app.kubernetes.io/version: '2.15.0'
helm.sh/chart: 'eck-operator-crds-2.15.0'
app.kubernetes.io/version: '2.16.1'
helm.sh/chart: 'eck-operator-crds-2.16.1'
name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co
spec:
group: stackconfigpolicy.k8s.elastic.co

View File

@ -103,7 +103,7 @@ kind: ClusterRole
metadata:
labels:
{{- include "eck-operator.labels" . | nindent 4 }}
name: "{{ include "eck-operator.fullname" . }}-proxy-role"
name: "{{ include "eck-operator.fullname" . }}-metrics-auth-role"
rules:
- apiGroups:
- authentication.k8s.io

View File

@ -13,11 +13,8 @@ data:
{{- if and .Values.config.metrics.secureMode.enabled (eq $metricsPort 0) }}
{{- fail "config.metrics.port must be greater than 0 when config.metrics.secureMode.enabled is true" }}
{{- end }}
{{- if .Values.config.metrics.secureMode.enabled }}
metrics-port: {{ add $metricsPort 1 }}
{{- else }}
metrics-port: {{ $metricsPort }}
{{- end }}
metrics-secure: {{ .Values.config.metrics.secureMode.enabled }}
container-registry: {{ .Values.config.containerRegistry }}
{{- with .Values.config.containerSuffix }}
container-suffix: {{ . }}

View File

@ -85,11 +85,11 @@ kind: ClusterRoleBinding
metadata:
labels:
{{- include "eck-operator.labels" $ | nindent 4 }}
name: "{{ include "eck-operator.fullname" . }}-proxy-rolebinding"
name: "{{ include "eck-operator.fullname" . }}-metrics-auth-rolebinding"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "{{ include "eck-operator.fullname" . }}-proxy-role"
name: "{{ include "eck-operator.fullname" . }}-metrics-auth-role"
subjects:
- kind: ServiceAccount
name: {{ $svcAccount }}

View File

@ -1,4 +1,4 @@
{{- if .Values.config.metrics.secureMode.enabled }}
{{- if and .Values.config.metrics.secureMode.enabled .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
@ -19,10 +19,13 @@ spec:
scheme: https
interval: 30s
tlsConfig:
insecureSkipVerify: {{ .Values.config.metrics.secureMode.tls.insecureSkipVerify | default false }}
{{- if (not .Values.config.metrics.secureMode.tls.insecureSkipVerify) }}
{{- $leading_path := trimSuffix "/" .Values.config.metrics.secureMode.tls.caMountDirectory }}
{{- with .Values.config.metrics.secureMode.tls.caSecret }}
{{- $insecureSkipVerify := (ternary .Values.config.metrics.secureMode.tls.insecureSkipVerify .Values.serviceMonitor.insecureSkipVerify (hasKey .Values.config.metrics.secureMode.tls "insecureSkipVerify")) }}
insecureSkipVerify: {{ $insecureSkipVerify }}
{{- if (not $insecureSkipVerify) }}
{{- $caMountDirectory := or (.Values.config.metrics.secureMode.tls.caMountDirectory) (.Values.serviceMonitor.caMountDirectory) -}}
{{- $leading_path := trimSuffix "/" $caMountDirectory }}
{{- $caSecret := or (.Values.config.metrics.secureMode.tls.caSecret) (.Values.serviceMonitor.caSecret) -}}
{{- with $caSecret }}
caFile: "{{ $leading_path }}/{{ . }}/ca.crt"
{{- end }}
serverName: "{{ include "eck-operator.fullname" . }}-metrics.{{ .Release.Namespace }}.svc"

View File

@ -51,7 +51,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- image: "{{ .Values.image.repository }}{{- if .Values.config.ubiOnly -}}-ubi{{- end -}}:{{ default .Chart.AppVersion .Values.image.tag }}"
- image: "{{ .Values.image.repository }}{{- if .Values.config.ubiOnly -}}-ubi{{- end -}}{{- if .Values.image.fips -}}-fips{{- end -}}:{{ default .Chart.AppVersion .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
name: manager
args:
@ -89,7 +89,7 @@ spec:
{{- end }}
{{- if or .Values.webhook.enabled (gt $metricsPort 0) }}
ports:
{{- if and (gt $metricsPort 0) (not .Values.config.metrics.secureMode.enabled) }}
{{- if (gt $metricsPort 0) }}
- containerPort: {{ $metricsPort }}
name: metrics
protocol: TCP
@ -109,49 +109,14 @@ spec:
name: cert
readOnly: true
{{- end }}
{{- if .Values.config.metrics.secureMode.tls.certificateSecret }}
- mountPath: "/tmp/k8s-metrics-server/serving-certs"
name: tls-certificate
readOnly: true
{{- end }}
{{- with .Values.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.config.metrics.secureMode.enabled }}
- name: kube-rbac-proxy
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0
args:
- "--secure-listen-address=0.0.0.0:{{ $metricsPort }}"
- "--upstream=http://127.0.0.1:{{ add $metricsPort 1 }}/"
- "--logtostderr=true"
- "--v=0"
{{- if .Values.config.metrics.secureMode.tls.certificateSecret }}
- "--tls-cert-file=/tls/tls.crt"
- "--tls-private-key-file=/tls/tls.key"
{{- end }}
{{- if or .Values.config.metrics.secureMode.tls.certificateSecret .Values.config.metrics.secureMode.volumeMounts }}
volumeMounts:
{{- with .Values.config.metrics.secureMode.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.config.metrics.secureMode.tls.certificateSecret }}
- mountPath: "/tls"
name: tls-certificate
readOnly: true
{{- end }}
{{- end }}
ports:
- containerPort: {{ $metricsPort }}
protocol: TCP
name: metrics
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
{{- end }}
volumes:
- name: conf
configMap:

View File

@ -24,6 +24,10 @@ image:
pullPolicy: IfNotPresent
# tag is the container image tag. If not defined, defaults to chart appVersion.
tag: null
# fips specifies whether the operator will use a FIPS compliant container image for its own StatefulSet image.
# This setting does not apply to Elastic Stack applications images.
# Can be combined with config.ubiOnly.
fips: false
# priorityClassName defines the PriorityClass to be used by the operator pods.
priorityClassName: ""
@ -178,29 +182,12 @@ config:
port: "0"
# secureMode contains the options for enabling and configuring RBAC and TLS/HTTPs for the metrics endpoint.
secureMode:
# secureMode.enabled specifies whether to enable RBAC and TLS/HTTPs for the metrics endpoint. (Will be enabled by default in v2.14.0)
# * This option requires using a ServiceMonitor to scrape the metrics and as such is mutually exclusive with the podMonitor.enabled option.
# secureMode.enabled specifies whether to enable RBAC and TLS/HTTPs for the metrics endpoint.
# * This option makes most sense when using a ServiceMonitor to scrape the metrics and is therefore mutually exclusive with the podMonitor.enabled option.
# * This option also requires using cluster scoped resources (ClusterRole, ClusterRoleBinding) to
# grant access to the /metrics endpoint. (createClusterScopedResources: true is required)
#
# This option requires the following settings within Prometheus to function:
# 1. RBAC settings for the Prometheus instance to access the metrics endpoint.
#
# - nonResourceURLs:
# - /metrics
# verbs:
# - get
#
# 2. If using the Prometheus Operator and your Prometheus instance is not in the same namespace as the operator you will need
# the Prometheus Operator configured with the following Helm values:
#
# prometheus:
# prometheusSpec:
# serviceMonitorNamespaceSelector: {}
# serviceMonitorSelectorNilUsesHelmValues: false
enabled: false
# additional volume mounts for the kube-rbac-proxy container.
volumeMounts: []
tls:
# certificateSecret is the name of the tls secret containing the custom TLS certificate and key for the secure metrics endpoint.
#
@ -212,27 +199,6 @@ config:
# example: kubectl create secret tls eck-metrics-tls-certificate -n elastic-system \
# --cert=/path/to/tls.crt --key=/path/to/tls.key
certificateSecret: ""
# caSecret is the name of the secret containing the custom CA certificate used to generate the custom TLS certificate for the secure metrics endpoint.
#
# * This *must* be the name of the secret containing the CA certificate used to sign the custom TLS certificate.
# * This secret *must* be in the same namespace as the Prometheus instance that will scrape the metrics.
# * If using the Prometheus operator this secret must be within the `spec.secrets` field of the `Prometheus` custom resource such that it is mounted into the Prometheus pod at `caMountDirectory`, which defaults to /etc/prometheus/secrets/{secret-name}.
# * This is an optional setting and is only required if you are using a custom TLS certificate.
# * Key must be named ca.crt.
#
# example: kubectl create secret generic eck-metrics-tls-ca -n monitoring \
# --from-file=ca.crt=/path/to/ca.pem
caSecret: ""
# caMountDirectory is the directory at which the CA certificate is mounted within the Prometheus pod.
#
# * You should only need to adjust this if you are *not* using the Prometheus operator.
caMountDirectory: "/etc/prometheus/secrets/"
# insecureSkipVerify specifies whether to skip verification of the TLS certificate for the secure metrics endpoint.
#
# * If this setting is set to false, then the following settings are required:
# - certificateSecret
# - caSecret
insecureSkipVerify: true
# containerRegistry to use for pulling Elasticsearch and other application container images.
containerRegistry: docker.elastic.co
@ -333,11 +299,49 @@ podMonitor:
# Prometheus ServiceMonitor configuration
# Only used when config.enableSecureMetrics is true
# Reference: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#servicemonitor
serviceMonitor: {}
serviceMonitor:
# This option requires the following settings within Prometheus to function:
# 1. RBAC settings for the Prometheus instance to access the metrics endpoint.
#
# - nonResourceURLs:
# - /metrics
# verbs:
# - get
#
# 2. If using the Prometheus Operator and your Prometheus instance is not in the same namespace as the operator you will need
# the Prometheus Operator configured with the following Helm values:
#
# prometheus:
# prometheusSpec:
# serviceMonitorNamespaceSelector: {}
# serviceMonitorSelectorNilUsesHelmValues: false
#
# allows to disable the serviceMonitor, enabled by default for backwards compatibility
enabled: true
# namespace determines in which namespace the serviceMonitor will be deployed.
# If not set the serviceMonitor will be created in the namespace where the Helm release is installed into
# namespace: monitoring
# caSecret is the name of the secret containing the custom CA certificate used to generate the custom TLS certificate for the secure metrics endpoint.
#
# * This *must* be the name of the secret containing the CA certificate used to sign the custom TLS certificate for the metrics endpoint.
# * This secret *must* be in the same namespace as the Prometheus instance that will scrape the metrics.
# * If using the Prometheus operator this secret must be within the `spec.secrets` field of the `Prometheus` custom resource such that it is mounted into the Prometheus pod at `caMountDirectory`, which defaults to /etc/prometheus/secrets/{secret-name}.
# * This is an optional setting and is only required if you are using a custom TLS certificate.
# * Key must be named ca.crt.
#
# example: kubectl create secret generic eck-metrics-tls-ca -n monitoring \
# --from-file=ca.crt=/path/to/ca.pem
caSecret: ""
# caMountDirectory is the directory at which the CA certificate is mounted within the Prometheus pod.
#
# * You should only need to adjust this if you are *not* using the Prometheus operator.
caMountDirectory: "/etc/prometheus/secrets/"
# insecureSkipVerify specifies whether to skip verification of the TLS certificate for the secure metrics endpoint.
#
# * If this setting is set to false, then the following settings are required:
# - certificateSecret
# - caSecret
insecureSkipVerify: true
# Globals meant for internal use only
global:

View File

@ -0,0 +1,16 @@
apiVersion: postgresql.cnpg.io/v1
kind: ClusterImageCatalog
metadata:
name: postgresql
spec:
images:
- major: 13
image: ghcr.io/cloudnative-pg/postgresql:13.18-34-bookworm@sha256:79ffc8faf88cbaf768791a23f15031cc400778321378237ead6cb77e8cfcf192
- major: 14
image: ghcr.io/cloudnative-pg/postgresql:14.15-34-bookworm@sha256:95b3f1a94c3d6755128a57e335d35ff196597078e09b93438009b8a9dcb2d409
- major: 15
image: ghcr.io/cloudnative-pg/postgresql:15.10-34-bookworm@sha256:4e8945ec4d6b744aa612f80c6b80cc525eafb411e44527c81f249fda35363765
- major: 16
image: ghcr.io/cloudnative-pg/postgresql:16.6-33-bookworm@sha256:7dfda49485274b61ada9bb347caffac01dee442ffd119eb19317a2692347657b
- major: 17
image: ghcr.io/cloudnative-pg/postgresql:17.2-33-bookworm@sha256:52b78e8e4a297e268be168c7e107a2117072dc38f4a11d9d056ff0cc13d4007f

File diff suppressed because one or more lines are too long

View File

@ -16,6 +16,9 @@ helm template charts/eck-operator/charts/eck-operator-crds --name-template loggi
rm -rf charts/eck-operator/charts
yq eval -Mi 'del(.dependencies)' charts/eck-operator/Chart.yaml
# get latest cloudnative-pg clusterimagecatalog
wget -qO templates/cloudnative-pg/ClusterImageCatalog-bookworm.yaml https://raw.githubusercontent.com/cloudnative-pg/postgres-containers/refs/heads/main/Debian/ClusterImageCatalog-bookworm.yaml
# Create ZDT dashboard configmap
../kubezero-metrics/sync_grafana_dashboards.py dashboards-pg.yaml templates/cloudnative-pg/grafana-dashboards.yaml
../kubezero-metrics/sync_grafana_dashboards.py dashboards-strimzi.yaml templates/strimzi/grafana-dashboards.yaml

View File

@ -48,3 +48,24 @@ strimzi-kafka-operator:
monitoring:
podMonitorEnabled: false
# rabbitmq cluster operator
rabbitmq-cluster-operator:
enabled: false
#rabbitmqImage:
# tag: 3.11.4-debian-11-r0
useCertManager: true
clusterOperator:
metrics:
enabled: false
serviceMonitor:
enabled: true
msgTopologyOperator:
enabled: false
metrics:
enabled: false
serviceMonitor:
enabled: true

View File

@ -96,7 +96,7 @@ telemetry:
operators:
enabled: false
namespace: operators
targetRevision: 0.1.8
targetRevision: 0.2.0
metrics:
enabled: false