Feat: introduce Percona XtraDB operator to SQL module

This commit is contained in:
Stefan Reimer 2022-11-03 13:19:57 +01:00
parent 50764341e8
commit 9ece3ca388
7 changed files with 636 additions and 20 deletions

View File

@ -1,14 +1,15 @@
apiVersion: v2
name: kubezero-sql
description: KubeZero umbrella chart for SQL databases like MariaDB, PostgreSQL
description: KubeZero umbrella chart for SQL databases, Percona XtraDB Cluster
type: application
version: 0.2.1
version: 0.3.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- mariadb
- postgresql
- mysql
- percona
# - postgresql
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
@ -16,8 +17,12 @@ dependencies:
- name: kubezero-lib
version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts/
- name: pxc-operator
version: 1.11.1
repository: https://percona.github.io/percona-helm-charts/
condition: pxc-operator.enabled
- name: mariadb-galera
version: 7.4.3
version: 7.4.7
repository: https://charts.bitnami.com/bitnami
condition: mariadb-galera.enabled
kubeVersion: ">= 1.20.0"

View File

@ -1,8 +1,8 @@
# kubezero-sql
![Version: 0.2.1](https://img.shields.io/badge/Version-0.2.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.3.0](https://img.shields.io/badge/Version-0.3.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for SQL databases like MariaDB, PostgreSQL
KubeZero umbrella chart for SQL databases, Percona XtraDB Cluster
**Homepage:** <https://kubezero.com>
@ -19,25 +19,32 @@ Kubernetes: `>= 1.20.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://charts.bitnami.com/bitnami | mariadb-galera | 7.4.3 |
| https://charts.bitnami.com/bitnami | mariadb-galera | 7.4.7 |
| https://percona.github.io/percona-helm-charts/ | pxc-operator | 1.11.1 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| mariadb-galera.configurationConfigMap | string | `"{{ .Release.Name }}-mariadb-galera-configuration"` | |
| mariadb-galera.db.password | string | `"12345qwert"` | |
| mariadb-galera.db.user | string | `"mariadb"` | |
| mariadb-galera.enabled | bool | `false` | |
| mariadb-galera.galera.mariabackup.password | string | `"12345qwert"` | |
| mariadb-galera.galera | string | `nil` | |
| mariadb-galera.istio.enabled | bool | `false` | |
| mariadb-galera.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| mariadb-galera.istio.url | string | `"mariadb.example.com"` | |
| mariadb-galera.metrics.enabled | bool | `false` | |
| mariadb-galera.metrics.installDashboard | bool | `true` | |
| mariadb-galera.metrics.prometheusRules.enabled | bool | `false` | |
| mariadb-galera.metrics.serviceMonitor.enabled | bool | `false` | |
| mariadb-galera.replicaCount | int | `2` | |
| mariadb-galera.rootUser.password | string | `"12345qwert"` | |
| pxc-operator.enabled | bool | `false` | |
| pxc-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| pxc-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| pxc-operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| pxc-operator.tolerations[1].effect | string | `"NoSchedule"` | |
| pxc-operator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| pxc-operator.watchAllNamespaces | bool | `true` | |
# Changes

591
charts/kubezero-sql/cr.yaml Normal file
View File

@ -0,0 +1,591 @@
apiVersion: pxc.percona.com/v1-11-0
kind: PerconaXtraDBCluster
metadata:
name: best-db
finalizers:
- delete-pxc-pods-in-order
# - delete-ssl
# - delete-proxysql-pvc
# - delete-pxc-pvc
# annotations:
# percona.com/issue-vault-token: "true"
spec:
crVersion: 1.11.0
# secretsName: cluster1-secrets
# vaultSecretName: keyring-secret-vault
# sslSecretName: cluster1-ssl
# sslInternalSecretName: cluster1-ssl-internal
# logCollectorSecretName: cluster1-log-collector-secrets
# initImage: percona/percona-xtradb-cluster-operator:1.12.0
# enableCRValidationWebhook: true
# tls:
# SANs:
# - pxc-1.example.com
# - pxc-2.example.com
# - pxc-3.example.com
# issuerConf:
# name: special-selfsigned-issuer
# kind: ClusterIssuer
# group: cert-manager.io
allowUnsafeConfigurations: true
# pause: false
updateStrategy: SmartUpdate
upgradeOptions:
versionServiceEndpoint: https://check.percona.com
apply: disabled
schedule: "0 4 * * *"
pxc:
size: 1
image: percona/percona-xtradb-cluster:8.0.27-18.1
autoRecovery: true
# expose:
# enabled: true
# type: LoadBalancer
# trafficPolicy: Local
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# annotations:
# networking.gke.io/load-balancer-type: "Internal"
# replicationChannels:
# - name: pxc1_to_pxc2
# isSource: true
# - name: pxc2_to_pxc1
# isSource: false
# configuration:
# sourceRetryCount: 3
# sourceConnectRetry: 60
# sourcesList:
# - host: 10.95.251.101
# port: 3306
# weight: 100
# schedulerName: mycustom-scheduler
# readinessDelaySec: 15
# livenessDelaySec: 600
# configuration: |
# [mysqld]
# wsrep_debug=CLIENT
# wsrep_provider_options="gcache.size=1G; gcache.recover=yes"
# [sst]
# xbstream-opts=--decompress
# [xtrabackup]
# compress=lz4
# for PXC 5.7
# [xtrabackup]
# compress
# imagePullSecrets:
# - name: private-registry-credentials
# priorityClassName: high-priority
# annotations:
# iam.amazonaws.com/role: role-arn
# labels:
# rack: rack-22
# readinessProbes:
# initialDelaySeconds: 15
# timeoutSeconds: 15
# periodSeconds: 30
# successThreshold: 1
# failureThreshold: 5
# livenessProbes:
# initialDelaySeconds: 300
# timeoutSeconds: 5
# periodSeconds: 10
# successThreshold: 1
# failureThreshold: 3
# containerSecurityContext:
# privileged: false
# podSecurityContext:
# runAsUser: 1001
# runAsGroup: 1001
# supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload
# imagePullPolicy: Always
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
# name: my-sidecar-1
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# envVarsSecret: my-env-var-secrets
resources:
requests:
memory: 512M
cpu: 200m
# ephemeral-storage: 1G
# limits:
# memory: 1G
# cpu: "1"
# ephemeral-storage: 1G
# nodeSelector:
# disktype: ssd
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# tolerations:
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
# podDisruptionBudget:
# maxUnavailable: 1
# minAvailable: 0
volumeSpec:
# emptyDir: {}
# hostPath:
# path: /data
# type: Directory
persistentVolumeClaim:
# storageClassName: standard
# accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2G
gracePeriod: 600
haproxy:
enabled: true
size: 1
image: perconalab/percona-xtradb-cluster-operator:main-haproxy
# imagePullPolicy: Always
# schedulerName: mycustom-scheduler
# readinessDelaySec: 15
# livenessDelaySec: 600
# configuration: |
#
# the actual default configuration file can be found here https://github.com/percona/percona-docker/blob/main/haproxy/dockerdir/etc/haproxy/haproxy-global.cfg
#
# global
# maxconn 2048
# external-check
# insecure-fork-wanted
# stats socket /etc/haproxy/pxc/haproxy.sock mode 600 expose-fd listeners level admin
#
# defaults
# default-server init-addr last,libc,none
# log global
# mode tcp
# retries 10
# timeout client 28800s
# timeout connect 100500
# timeout server 28800s
#
# frontend galera-in
# bind *:3309 accept-proxy
# bind *:3306
# mode tcp
# option clitcpka
# default_backend galera-nodes
#
# frontend galera-admin-in
# bind *:33062
# mode tcp
# option clitcpka
# default_backend galera-admin-nodes
#
# frontend galera-replica-in
# bind *:3307
# mode tcp
# option clitcpka
# default_backend galera-replica-nodes
#
# frontend galera-mysqlx-in
# bind *:33060
# mode tcp
# option clitcpka
# default_backend galera-mysqlx-nodes
#
# frontend stats
# bind *:8404
# mode http
# option http-use-htx
# http-request use-service prometheus-exporter if { path /metrics }
# imagePullSecrets:
# - name: private-registry-credentials
# annotations:
# iam.amazonaws.com/role: role-arn
# labels:
# rack: rack-22
# readinessProbes:
# initialDelaySeconds: 15
# timeoutSeconds: 1
# periodSeconds: 5
# successThreshold: 1
# failureThreshold: 3
# livenessProbes:
# initialDelaySeconds: 60
# timeoutSeconds: 5
# periodSeconds: 30
# successThreshold: 1
# failureThreshold: 4
# serviceType: ClusterIP
# externalTrafficPolicy: Cluster
# loadbalancersourceranges:
# - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
# replicasServiceEnabled: false
# replicasLoadBalancerSourceRanges:
# - 10.0.0.0/8
# replicasLoadBalancerIP: 127.0.0.1
# replicasServiceType: ClusterIP
# replicasExternalTrafficPolicy: Cluster
# replicasServiceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# replicasServiceLabels:
# rack: rack-23
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
# name: my-sidecar-1
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# envVarsSecret: my-env-var-secrets
resources:
requests:
memory: 100M
cpu: 100m
# limits:
# memory: 1G
# cpu: 700m
# priorityClassName: high-priority
# nodeSelector:
# disktype: ssd
# sidecarResources:
# requests:
# memory: 1G
# cpu: 500m
# limits:
# memory: 2G
# cpu: 600m
# containerSecurityContext:
# privileged: false
# podSecurityContext:
# runAsUser: 1001
# runAsGroup: 1001
# supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# tolerations:
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
# podDisruptionBudget:
# maxUnavailable: 1
# minAvailable: 0
gracePeriod: 30
proxysql:
enabled: false
size: 3
image: perconalab/percona-xtradb-cluster-operator:main-proxysql
# imagePullPolicy: Always
# configuration: |
# datadir="/var/lib/proxysql"
#
# admin_variables =
# {
# admin_credentials="proxyadmin:admin_password"
# mysql_ifaces="0.0.0.0:6032"
# refresh_interval=2000
#
# cluster_username="proxyadmin"
# cluster_password="admin_password"
# checksum_admin_variables=false
# checksum_ldap_variables=false
# checksum_mysql_variables=false
# cluster_check_interval_ms=200
# cluster_check_status_frequency=100
# cluster_mysql_query_rules_save_to_disk=true
# cluster_mysql_servers_save_to_disk=true
# cluster_mysql_users_save_to_disk=true
# cluster_proxysql_servers_save_to_disk=true
# cluster_mysql_query_rules_diffs_before_sync=1
# cluster_mysql_servers_diffs_before_sync=1
# cluster_mysql_users_diffs_before_sync=1
# cluster_proxysql_servers_diffs_before_sync=1
# }
#
# mysql_variables=
# {
# monitor_password="monitor"
# monitor_galera_healthcheck_interval=1000
# threads=2
# max_connections=2048
# default_query_delay=0
# default_query_timeout=10000
# poll_timeout=2000
# interfaces="0.0.0.0:3306"
# default_schema="information_schema"
# stacksize=1048576
# connect_timeout_server=10000
# monitor_history=60000
# monitor_connect_interval=20000
# monitor_ping_interval=10000
# ping_timeout_server=200
# commands_stats=true
# sessions_sort=true
# have_ssl=true
# ssl_p2s_ca="/etc/proxysql/ssl-internal/ca.crt"
# ssl_p2s_cert="/etc/proxysql/ssl-internal/tls.crt"
# ssl_p2s_key="/etc/proxysql/ssl-internal/tls.key"
# ssl_p2s_cipher="ECDHE-RSA-AES128-GCM-SHA256"
# }
# readinessDelaySec: 15
# livenessDelaySec: 600
# schedulerName: mycustom-scheduler
# imagePullSecrets:
# - name: private-registry-credentials
# annotations:
# iam.amazonaws.com/role: role-arn
# labels:
# rack: rack-22
# serviceType: ClusterIP
# loadbalancersourceranges:
# - 10.0.0.0/8
# loadBalancerIP: 127.0.0.1
# externalTrafficPolicy: Cluster
# runtimeClassName: image-rc
# sidecars:
# - image: busybox
# command: ["/bin/sh"]
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
# name: my-sidecar-1
# resources:
# requests:
# memory: 100M
# cpu: 100m
# limits:
# memory: 200M
# cpu: 200m
# envVarsSecret: my-env-var-secrets
resources:
requests:
memory: 1G
cpu: 600m
# limits:
# memory: 1G
# cpu: 700m
# priorityClassName: high-priority
# nodeSelector:
# disktype: ssd
# sidecarResources:
# requests:
# memory: 1G
# cpu: 500m
# limits:
# memory: 2G
# cpu: 600m
# containerSecurityContext:
# privileged: false
# podSecurityContext:
# runAsUser: 1001
# runAsGroup: 1001
# supplementalGroups: [1001]
# serviceAccountName: percona-xtradb-cluster-operator-workload
affinity:
antiAffinityTopologyKey: "kubernetes.io/hostname"
# advanced:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# tolerations:
# - key: "node.alpha.kubernetes.io/unreachable"
# operator: "Exists"
# effect: "NoExecute"
# tolerationSeconds: 6000
volumeSpec:
# emptyDir: {}
# hostPath:
# path: /data
# type: Directory
persistentVolumeClaim:
# storageClassName: standard
# accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2G
podDisruptionBudget:
maxUnavailable: 1
# minAvailable: 0
gracePeriod: 30
# loadBalancerSourceRanges:
# - 10.0.0.0/8
# serviceAnnotations:
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
# serviceLabels:
# rack: rack-23
logcollector:
enabled: false
image: perconalab/percona-xtradb-cluster-operator:main-logcollector
# configuration: |
# [OUTPUT]
# Name es
# Match *
# Host 192.168.2.3
# Port 9200
# Index my_index
# Type my_type
resources:
requests:
memory: 100M
cpu: 200m
pmm:
enabled: false
image: percona/pmm-client:2.28.0
serverHost: monitoring-service
# serverUser: admin
# pxcParams: "--disable-tablestats-limit=2000"
# proxysqlParams: "--custom-labels=CUSTOM-LABELS"
resources:
requests:
memory: 150M
cpu: 300m
backup:
enabled: false
image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup
# backoffLimit: 6
# serviceAccountName: percona-xtradb-cluster-operator
# imagePullSecrets:
# - name: private-registry-credentials
pitr:
enabled: false
storageName: STORAGE-NAME-HERE
timeBetweenUploads: 60
# resources:
# requests:
# memory: 0.1G
# cpu: 100m
# limits:
# memory: 1G
# cpu: 700m
storages:
s3-us-west:
type: s3
verifyTLS: true
# nodeSelector:
# storage: tape
# backupWorker: 'True'
# resources:
# requests:
# memory: 1G
# cpu: 600m
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: backupWorker
# operator: In
# values:
# - 'True'
# tolerations:
# - key: "backupWorker"
# operator: "Equal"
# value: "True"
# effect: "NoSchedule"
# annotations:
# testName: scheduled-backup
# labels:
# backupWorker: 'True'
# schedulerName: 'default-scheduler'
# priorityClassName: 'high-priority'
# containerSecurityContext:
# privileged: true
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups: [1001, 1002, 1003]
s3:
bucket: S3-BACKUP-BUCKET-NAME-HERE
credentialsSecret: my-cluster-name-backup-s3
region: us-west-2
fs-pvc:
type: filesystem
# nodeSelector:
# storage: tape
# backupWorker: 'True'
# resources:
# requests:
# memory: 1G
# cpu: 600m
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: backupWorker
# operator: In
# values:
# - 'True'
# tolerations:
# - key: "backupWorker"
# operator: "Equal"
# value: "True"
# effect: "NoSchedule"
# annotations:
# testName: scheduled-backup
# labels:
# backupWorker: 'True'
# schedulerName: 'default-scheduler'
# priorityClassName: 'high-priority'
# containerSecurityContext:
# privileged: true
# podSecurityContext:
# fsGroup: 1001
# supplementalGroups: [1001, 1002, 1003]
volume:
persistentVolumeClaim:
# storageClassName: standard
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 6G
schedule:
- name: "sat-night-backup"
schedule: "0 0 * * 6"
keep: 3
storageName: s3-us-west
- name: "daily-backup"
schedule: "0 0 * * *"
keep: 5
storageName: fs-pvc

View File

@ -78,7 +78,7 @@ innodb_file_format=Barracuda
##
log_error=/opt/bitnami/mariadb/logs/mysqld.log
slow_query_log_file=/opt/bitnami/mariadb/logs/mysqld.log
log_queries_not_using_indexes=0
log_queries_not_using_indexes=1
slow_query_log=1
## SSL

View File

@ -6,7 +6,7 @@ metadata:
namespace: {{ .Release.Namespace }}
labels:
grafana_dashboard: "1"
{{ include "kubezero-lib.labels" . | indent 4 }}
{{- include "kubezero-lib.labels" . | nindent 4 }}
binaryData:
mariadb-galera.json.gz:
H4sIAAAAAAAC/+2dWW/buBaA3/MrBGFw0QJuKrtZOn1r0kmnmGSSTtKZe1EEBi3RMicyqZJUErfIf79ctJNeszkpX1qYlEmehed8XGL92PA8v99HOM048995X8Vnz/uh/hU1GIyhKPU/nPZP/jo++u3s99++nPqdojoBA5jI+pSSMeQjmLGqMoIspCjliGD5SFXBJ6lqNAIcMJLREFZ1aZLFCH+KpjWq6//Mh3VSPaEeuBH/nne0SBR+yxCFFqGK/lOAxejLppHqNAZZXBtPoYCPzeJLSFkhl+65Y+8jpmAIMDB6aReX/bQraj3tbvY2g9nd2UWiIB1Zu6oXLyySzWxoqsGwaSpbl93NYCXZGAfc7Oy0UbqwZNYeOBgkFoc4axabfZSuCDAmYjyiVvqi7tRPEOOlZ1ZDETWDDCX8k2yp26lKa0q3K1M8A7Ea0zuP0wzWykcospSikOB9khAqG6TxALwIOl6v2xX/bG93vO7LetOF1O8rWbz/eO8TSHljCJWPsNGAABr5ed2N+v98I1d+OziMAUUgGngxEE0CL0wyxiH1iFDrJYJXm54Ql6KQeUMhuTeesG9J5MHrlFDx2KYagQ8jxFsK8GMMuQom3TfdYEcXSbc/IyThKBUVgSpUpsZZkuhPolGQD627E3R/3Q223m4HO1uqNkH4QkUVbV7lNJYoM9di/hDBJNoneIji0jHyyDkEWcJZo1SUh0IrZNwqlR6WoBjXBKhVDYUzCWG0WoYgYbBWf1N/WNggTRGOWc0t285ZNErVKHy/065RegyMYg6vpbf7X07Mr3Bibyp3pa5RcQmSTOm06zeqbjq3GnV36qg/HP/z512OO2iNu/bpvGERPhL5a0SSiJk2HxM1qX0wYCTJOGwNRARGmJqmNNUi3aqIAjGFEBsS1YYuHaxVe9NZogMKo1nNB+22G5/PN2w1tf59GS6oCHZtuduqAzwcQWrotAz5g8mZNKJhQZIWUdzH2XggmpjlgDIdpiJAIrigHXTnepJvRoilCZgcSTPPUJnW7asBCC9iSjIc+SvYJ090lbfN6ND8+nxnnOOS08a1mPfMd9Epili022mzotVx196rpfR8Y95Tixst95YrFPHRLLP1glUmV1FajkcoA0UnpBmP/JH4uF2bh1dSHb1awXUzK/iT2mS/afJW+bXabKt1JRz06ncIIjV9W0TDBA3sTVRmNtrW64a/2zBdrUkAjSFvemcj9wrYUN6g2KOfpQ1l+0NCRVQxcVHLhQWlYm4MWFWJ7CxsZKSSsuYAhFz5YSOh+AmMIY4Oym6bX6ZwqJdQ76twUFj1vJIZjeGBTop1clDlpyM05GYFTzRfSywLoSc5u040nALMtC5y003RpnZeDScHEoPY3kQt6hpi2DwgV44gxMgWjhSp2oNMbofQGk//VvNkY9ZEvbHnH1OtOQNrT7AuOJ40G/7cjPLWQYpoOcOIz8QS5j9GSus4yHrekGXMvltR1gMTUKNgBgJtLYlA9w07cUIGIOkzlewF+kg+uG/8WQ+++dKU1fHNOvDNomQyBnLSvWmWIdzeoHpwgpkDGLfllwlMEnI1q4fuZhAE3dv0MRfCetYubkNK97kU3bbH4Z3ZcVjYMQvhsXUO+yFIQtMx/AQw7k/d71OTgpnhTmmVGZBe15VMCmeFIx/KY7ny+WlPHQF6IVLEoySRK0Zh2s+3+fsMfZ+eUEQA7jNIJaBOSSstKdc4r+zn5xqndYGXzSrq0JWdEZ1XFswoyoUOi+PaMlMsGfL14ahb0j6phDA3WK9tnDaAeUqgfjs7UCtNHOWG00LX2lXngUUtoBDUK/8VLoiGk7I646ReTURYwrw4KjSqV8kQYwjwvWQIeZrVkONRI35z4/DpB/TbbYQ+RkiXI0yFvxsrszLaq7sV1mAvIjBg6vIAaxC5PwDU4A51GeAQ4pjLSd8NGuXQ9vgaLB7ctubdJgGRmJP6IYYq+EhBhLQDBvPTxe6yWN8191dGKIogPtU42XY8fQ2gon4dWpoDAZexhavDjOYTqV2jF6FGqVqGtkspikf81HpZR4K7pVQ8+4/aJlW7ofVgT7gKlu0urHmiUk+CMGTtcCALr/JeKvtJfzwhIi4XOUUW+LMTsbzcw8v1x9SFBxRTX2TVGBoWmpehUjkg6VQZaxwlqnLT4MJmEaRqH80fJqR2ZUwvOI4bPl1VpiCEtoAmAmZ4YfQiw0EKo0OhRqNu4ZRKAYcvbHn1W6aG+rU7Pn/pr5gzf/zwivTh3dyslEHr4fLrYpn1LxgXufF8mZSr55r3+aR2EZWX97kaG6VARj3rOXErXKh50S8SH8IRukRRJnRneGftYmf99uQ1uEYtbx9k4YW2bXOhUuaP1j5qccmu9bR96pcT2RJrJ+AaznCpam0tmqa85Ro5T7TXVn5C4j3AjJtNeYAzHtcRziiuCWPLIus9TsPrJ6bRi7VpO/Cq8kN4WQ66cS/S8ZXjq/XnK2M5vhpgBbMBS06U9+zMfpXZ4VdVetFKnU+AyLafEZHZYEy4Wz9jMOqHBGMYaoUvv7F9R3fCClJ65S0NeJ0lZL+Ud/fFbNXir4XkR+B6Uan31gRrj6RKvX2L9hzcOrh1cLsWcPtkdr52VwGzntv5cui1GHrZg5qa2fL7VxS1lm3ViYhSD4wBR5fw1f9mZd/HoDo0daMNYUyiQV8GmD6FIGJfu4Gx5/aQkKMGcQ9wt5gKlIkfXQdqFE8N9ZRqPS9C7MLTRrxkntangz4HfQ76HPStth23EvVtOepz1Hcn1PeaiTG/fmbYN5iItCyAL4Ri4NHX7UdGPj2MB6U+rQFp20eWni2xmblerIchvyJU0l5uPwF8UhqHew73HO453KvfYc5xr7c1Z4+vt7UK7b19OrTn7rA9sztsnKQXL7Y73lTSCMl4DHDE+sr0ijZWvtO2T8Z9QQp5k+t/re2MpN5+Pth9kkk53W6QwwOHB/eABxEM0Rgkzfj6DJhh7rngm94KzNC7/YWtVuE8oGg9rn34bpjBmq0dMjy5a+8khRhGfTFnfqa770eT08+H3oEQ2jsWClBXax0jOEZwjOAYYVVGMI6RVoOEnoMEBwnrcxNb8oGmg1XBQCZYlWrZw12grkbdT9C49RN7q439ULazyqlJZ3ml5/eDcDa+A/1/Eo192POWN8P+WsFaNX5Hao7UHKndK6l1f6rdnK1V/vyut3V/oNZGsumcNutwyPajZQ7injvETb9/whJydSc/c3AqGvI+64Y8gr0nuPVTF8HxxCo88csvI5Fo/oAygPpk8C8M+btft6Zdalpv5rDKsr3msrhLKM9ps2g1BtlZi80iBxoONOqgAQbydXLlH+/fDjbe68aKv+hm3gvAORynnL2U7PGQF3hLuRIZCe5ILN2W90KmcZLxZYTaWyukahnK/en9XZJVL9h5PmjVC3Z/XrYqX2cqprGcoVLqN4GeKj4LR3AMqmyiE7yI15Mkfx0ovdBPimRUuZB/JHe+P+zlitMT0i97kuEyARzheJH3pi7yhlT989Dtn6MqXpBan7jlm41tb0E9yJupz6YLlH6hyekEhxbdFpMbRCMSTnslqpq2paDF2yoxuXrVLQJC/tJJUeY3vpYiERJoCbmq7DtRWdLP9V4EvY/qDauvc82LkH2cv2NVP5flb/L9b/w9+HbwMbda9Xbb3sbN/wH5nSdeM3oAAA==

View File

@ -1,8 +1,9 @@
#!/bin/bash
set -ex
### MariaDB
helm dep update
### MariaDB
# Fetch dashboards
../kubezero-metrics/sync_grafana_dashboards.py dashboards-mariadb.yaml templates/mariadb/grafana-dashboards.yaml

View File

@ -1,17 +1,27 @@
pxc-operator:
enabled: false
# we want a clusterwide operator
watchAllNamespaces: true
# running on the control-plane
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
mariadb-galera:
enabled: false
replicaCount: 2
# Passwords should be fixed otherwise helm will create random new ones each time we template|apply
rootUser:
password: 12345qwert
db:
user: mariadb
password: 12345qwert
galera:
mariabackup:
password: 12345qwert
# For a single node "cluster" force bootstrap
#bootstrap:
# bootstrapFromNode: 0
@ -19,6 +29,8 @@ mariadb-galera:
metrics:
enabled: false
# set to false for any subsequent installation of the chart in the same cluster to prevent overwriting each other
installDashboard: true
serviceMonitor:
enabled: false