feat: cleanup SQL module, remove Percona operator
This commit is contained in:
parent
40506f4589
commit
097551f7f3
@ -1,15 +1,14 @@
|
|||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
name: kubezero-sql
|
name: kubezero-sql
|
||||||
description: KubeZero umbrella chart for SQL databases, Percona XtraDB Cluster
|
description: KubeZero umbrella chart for SQL databases, mariadb-galera
|
||||||
type: application
|
type: application
|
||||||
version: 0.3.3
|
version: 0.4.0
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
- kubezero
|
- kubezero
|
||||||
- mysql
|
- mariadb
|
||||||
- percona
|
- galera
|
||||||
# - postgresql
|
|
||||||
maintainers:
|
maintainers:
|
||||||
- name: Stefan Reimer
|
- name: Stefan Reimer
|
||||||
email: stefan@zero-downtime.net
|
email: stefan@zero-downtime.net
|
||||||
@ -17,12 +16,8 @@ dependencies:
|
|||||||
- name: kubezero-lib
|
- name: kubezero-lib
|
||||||
version: ">= 0.1.6"
|
version: ">= 0.1.6"
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: pxc-operator
|
|
||||||
version: 1.12.1
|
|
||||||
repository: https://percona.github.io/percona-helm-charts/
|
|
||||||
condition: pxc-operator.enabled
|
|
||||||
- name: mariadb-galera
|
- name: mariadb-galera
|
||||||
version: 14.0.10
|
version: 14.0.10
|
||||||
repository: https://charts.bitnami.com/bitnami
|
repository: https://charts.bitnami.com/bitnami
|
||||||
condition: mariadb-galera.enabled
|
condition: mariadb-galera.enabled
|
||||||
kubeVersion: ">= 1.24.0"
|
kubeVersion: ">= 1.26.0"
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
# kubezero-sql
|
# kubezero-sql
|
||||||
|
|
||||||
![Version: 0.3.2](https://img.shields.io/badge/Version-0.3.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero umbrella chart for SQL databases, Percona XtraDB Cluster
|
KubeZero umbrella chart for SQL databases, mariadb-galera
|
||||||
|
|
||||||
**Homepage:** <https://kubezero.com>
|
**Homepage:** <https://kubezero.com>
|
||||||
|
|
||||||
@ -14,13 +14,12 @@ KubeZero umbrella chart for SQL databases, Percona XtraDB Cluster
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.24.0`
|
Kubernetes: `>= 1.26.0`
|
||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||||
| https://charts.bitnami.com/bitnami | mariadb-galera | 7.4.7 |
|
| https://charts.bitnami.com/bitnami | mariadb-galera | 14.0.10 |
|
||||||
| https://percona.github.io/percona-helm-charts/ | pxc-operator | 1.12.1 |
|
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
@ -38,14 +37,6 @@ Kubernetes: `>= 1.24.0`
|
|||||||
| mariadb-galera.metrics.prometheusRules.enabled | bool | `false` | |
|
| mariadb-galera.metrics.prometheusRules.enabled | bool | `false` | |
|
||||||
| mariadb-galera.metrics.serviceMonitor.enabled | bool | `false` | |
|
| mariadb-galera.metrics.serviceMonitor.enabled | bool | `false` | |
|
||||||
| mariadb-galera.replicaCount | int | `2` | |
|
| mariadb-galera.replicaCount | int | `2` | |
|
||||||
| pxc-operator.enabled | bool | `false` | |
|
|
||||||
| pxc-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
|
||||||
| pxc-operator.resources.limits.memory | string | `"512Mi"` | |
|
|
||||||
| pxc-operator.resources.requests.cpu | string | `"50m"` | |
|
|
||||||
| pxc-operator.resources.requests.memory | string | `"32Mi"` | |
|
|
||||||
| pxc-operator.tolerations[0].effect | string | `"NoSchedule"` | |
|
|
||||||
| pxc-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
|
||||||
| pxc-operator.watchAllNamespaces | bool | `true` | |
|
|
||||||
|
|
||||||
# Changes
|
# Changes
|
||||||
|
|
||||||
|
@ -1,591 +0,0 @@
|
|||||||
apiVersion: pxc.percona.com/v1-11-0
|
|
||||||
kind: PerconaXtraDBCluster
|
|
||||||
metadata:
|
|
||||||
name: best-db
|
|
||||||
finalizers:
|
|
||||||
- delete-pxc-pods-in-order
|
|
||||||
# - delete-ssl
|
|
||||||
# - delete-proxysql-pvc
|
|
||||||
# - delete-pxc-pvc
|
|
||||||
# annotations:
|
|
||||||
# percona.com/issue-vault-token: "true"
|
|
||||||
spec:
|
|
||||||
crVersion: 1.11.0
|
|
||||||
# secretsName: cluster1-secrets
|
|
||||||
# vaultSecretName: keyring-secret-vault
|
|
||||||
# sslSecretName: cluster1-ssl
|
|
||||||
# sslInternalSecretName: cluster1-ssl-internal
|
|
||||||
# logCollectorSecretName: cluster1-log-collector-secrets
|
|
||||||
# initImage: percona/percona-xtradb-cluster-operator:1.12.0
|
|
||||||
# enableCRValidationWebhook: true
|
|
||||||
# tls:
|
|
||||||
# SANs:
|
|
||||||
# - pxc-1.example.com
|
|
||||||
# - pxc-2.example.com
|
|
||||||
# - pxc-3.example.com
|
|
||||||
# issuerConf:
|
|
||||||
# name: special-selfsigned-issuer
|
|
||||||
# kind: ClusterIssuer
|
|
||||||
# group: cert-manager.io
|
|
||||||
allowUnsafeConfigurations: true
|
|
||||||
# pause: false
|
|
||||||
updateStrategy: SmartUpdate
|
|
||||||
upgradeOptions:
|
|
||||||
versionServiceEndpoint: https://check.percona.com
|
|
||||||
apply: disabled
|
|
||||||
schedule: "0 4 * * *"
|
|
||||||
pxc:
|
|
||||||
size: 1
|
|
||||||
image: percona/percona-xtradb-cluster:8.0.27-18.1
|
|
||||||
autoRecovery: true
|
|
||||||
# expose:
|
|
||||||
# enabled: true
|
|
||||||
# type: LoadBalancer
|
|
||||||
# trafficPolicy: Local
|
|
||||||
# loadBalancerSourceRanges:
|
|
||||||
# - 10.0.0.0/8
|
|
||||||
# annotations:
|
|
||||||
# networking.gke.io/load-balancer-type: "Internal"
|
|
||||||
# replicationChannels:
|
|
||||||
# - name: pxc1_to_pxc2
|
|
||||||
# isSource: true
|
|
||||||
# - name: pxc2_to_pxc1
|
|
||||||
# isSource: false
|
|
||||||
# configuration:
|
|
||||||
# sourceRetryCount: 3
|
|
||||||
# sourceConnectRetry: 60
|
|
||||||
# sourcesList:
|
|
||||||
# - host: 10.95.251.101
|
|
||||||
# port: 3306
|
|
||||||
# weight: 100
|
|
||||||
# schedulerName: mycustom-scheduler
|
|
||||||
# readinessDelaySec: 15
|
|
||||||
# livenessDelaySec: 600
|
|
||||||
# configuration: |
|
|
||||||
# [mysqld]
|
|
||||||
# wsrep_debug=CLIENT
|
|
||||||
# wsrep_provider_options="gcache.size=1G; gcache.recover=yes"
|
|
||||||
# [sst]
|
|
||||||
# xbstream-opts=--decompress
|
|
||||||
# [xtrabackup]
|
|
||||||
# compress=lz4
|
|
||||||
# for PXC 5.7
|
|
||||||
# [xtrabackup]
|
|
||||||
# compress
|
|
||||||
# imagePullSecrets:
|
|
||||||
# - name: private-registry-credentials
|
|
||||||
# priorityClassName: high-priority
|
|
||||||
# annotations:
|
|
||||||
# iam.amazonaws.com/role: role-arn
|
|
||||||
# labels:
|
|
||||||
# rack: rack-22
|
|
||||||
# readinessProbes:
|
|
||||||
# initialDelaySeconds: 15
|
|
||||||
# timeoutSeconds: 15
|
|
||||||
# periodSeconds: 30
|
|
||||||
# successThreshold: 1
|
|
||||||
# failureThreshold: 5
|
|
||||||
# livenessProbes:
|
|
||||||
# initialDelaySeconds: 300
|
|
||||||
# timeoutSeconds: 5
|
|
||||||
# periodSeconds: 10
|
|
||||||
# successThreshold: 1
|
|
||||||
# failureThreshold: 3
|
|
||||||
# containerSecurityContext:
|
|
||||||
# privileged: false
|
|
||||||
# podSecurityContext:
|
|
||||||
# runAsUser: 1001
|
|
||||||
# runAsGroup: 1001
|
|
||||||
# supplementalGroups: [1001]
|
|
||||||
# serviceAccountName: percona-xtradb-cluster-operator-workload
|
|
||||||
# imagePullPolicy: Always
|
|
||||||
# runtimeClassName: image-rc
|
|
||||||
# sidecars:
|
|
||||||
# - image: busybox
|
|
||||||
# command: ["/bin/sh"]
|
|
||||||
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
|
|
||||||
# name: my-sidecar-1
|
|
||||||
# resources:
|
|
||||||
# requests:
|
|
||||||
# memory: 100M
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 200M
|
|
||||||
# cpu: 200m
|
|
||||||
# envVarsSecret: my-env-var-secrets
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 512M
|
|
||||||
cpu: 200m
|
|
||||||
# ephemeral-storage: 1G
|
|
||||||
# limits:
|
|
||||||
# memory: 1G
|
|
||||||
# cpu: "1"
|
|
||||||
# ephemeral-storage: 1G
|
|
||||||
# nodeSelector:
|
|
||||||
# disktype: ssd
|
|
||||||
affinity:
|
|
||||||
antiAffinityTopologyKey: "kubernetes.io/hostname"
|
|
||||||
# advanced:
|
|
||||||
# nodeAffinity:
|
|
||||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
# nodeSelectorTerms:
|
|
||||||
# - matchExpressions:
|
|
||||||
# - key: kubernetes.io/e2e-az-name
|
|
||||||
# operator: In
|
|
||||||
# values:
|
|
||||||
# - e2e-az1
|
|
||||||
# - e2e-az2
|
|
||||||
# tolerations:
|
|
||||||
# - key: "node.alpha.kubernetes.io/unreachable"
|
|
||||||
# operator: "Exists"
|
|
||||||
# effect: "NoExecute"
|
|
||||||
# tolerationSeconds: 6000
|
|
||||||
# podDisruptionBudget:
|
|
||||||
# maxUnavailable: 1
|
|
||||||
# minAvailable: 0
|
|
||||||
volumeSpec:
|
|
||||||
# emptyDir: {}
|
|
||||||
# hostPath:
|
|
||||||
# path: /data
|
|
||||||
# type: Directory
|
|
||||||
persistentVolumeClaim:
|
|
||||||
# storageClassName: standard
|
|
||||||
# accessModes: [ "ReadWriteOnce" ]
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 2G
|
|
||||||
gracePeriod: 600
|
|
||||||
haproxy:
|
|
||||||
enabled: true
|
|
||||||
size: 1
|
|
||||||
image: perconalab/percona-xtradb-cluster-operator:main-haproxy
|
|
||||||
# imagePullPolicy: Always
|
|
||||||
# schedulerName: mycustom-scheduler
|
|
||||||
# readinessDelaySec: 15
|
|
||||||
# livenessDelaySec: 600
|
|
||||||
# configuration: |
|
|
||||||
#
|
|
||||||
# the actual default configuration file can be found here https://github.com/percona/percona-docker/blob/main/haproxy/dockerdir/etc/haproxy/haproxy-global.cfg
|
|
||||||
#
|
|
||||||
# global
|
|
||||||
# maxconn 2048
|
|
||||||
# external-check
|
|
||||||
# insecure-fork-wanted
|
|
||||||
# stats socket /etc/haproxy/pxc/haproxy.sock mode 600 expose-fd listeners level admin
|
|
||||||
#
|
|
||||||
# defaults
|
|
||||||
# default-server init-addr last,libc,none
|
|
||||||
# log global
|
|
||||||
# mode tcp
|
|
||||||
# retries 10
|
|
||||||
# timeout client 28800s
|
|
||||||
# timeout connect 100500
|
|
||||||
# timeout server 28800s
|
|
||||||
#
|
|
||||||
# frontend galera-in
|
|
||||||
# bind *:3309 accept-proxy
|
|
||||||
# bind *:3306
|
|
||||||
# mode tcp
|
|
||||||
# option clitcpka
|
|
||||||
# default_backend galera-nodes
|
|
||||||
#
|
|
||||||
# frontend galera-admin-in
|
|
||||||
# bind *:33062
|
|
||||||
# mode tcp
|
|
||||||
# option clitcpka
|
|
||||||
# default_backend galera-admin-nodes
|
|
||||||
#
|
|
||||||
# frontend galera-replica-in
|
|
||||||
# bind *:3307
|
|
||||||
# mode tcp
|
|
||||||
# option clitcpka
|
|
||||||
# default_backend galera-replica-nodes
|
|
||||||
#
|
|
||||||
# frontend galera-mysqlx-in
|
|
||||||
# bind *:33060
|
|
||||||
# mode tcp
|
|
||||||
# option clitcpka
|
|
||||||
# default_backend galera-mysqlx-nodes
|
|
||||||
#
|
|
||||||
# frontend stats
|
|
||||||
# bind *:8404
|
|
||||||
# mode http
|
|
||||||
# option http-use-htx
|
|
||||||
# http-request use-service prometheus-exporter if { path /metrics }
|
|
||||||
# imagePullSecrets:
|
|
||||||
# - name: private-registry-credentials
|
|
||||||
# annotations:
|
|
||||||
# iam.amazonaws.com/role: role-arn
|
|
||||||
# labels:
|
|
||||||
# rack: rack-22
|
|
||||||
# readinessProbes:
|
|
||||||
# initialDelaySeconds: 15
|
|
||||||
# timeoutSeconds: 1
|
|
||||||
# periodSeconds: 5
|
|
||||||
# successThreshold: 1
|
|
||||||
# failureThreshold: 3
|
|
||||||
# livenessProbes:
|
|
||||||
# initialDelaySeconds: 60
|
|
||||||
# timeoutSeconds: 5
|
|
||||||
# periodSeconds: 30
|
|
||||||
# successThreshold: 1
|
|
||||||
# failureThreshold: 4
|
|
||||||
# serviceType: ClusterIP
|
|
||||||
# externalTrafficPolicy: Cluster
|
|
||||||
# loadbalancersourceranges:
|
|
||||||
# - 10.0.0.0/8
|
|
||||||
# loadBalancerIP: 127.0.0.1
|
|
||||||
# serviceAnnotations:
|
|
||||||
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
|
|
||||||
# serviceLabels:
|
|
||||||
# rack: rack-23
|
|
||||||
# replicasServiceEnabled: false
|
|
||||||
# replicasLoadBalancerSourceRanges:
|
|
||||||
# - 10.0.0.0/8
|
|
||||||
# replicasLoadBalancerIP: 127.0.0.1
|
|
||||||
# replicasServiceType: ClusterIP
|
|
||||||
# replicasExternalTrafficPolicy: Cluster
|
|
||||||
# replicasServiceAnnotations:
|
|
||||||
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
|
|
||||||
# replicasServiceLabels:
|
|
||||||
# rack: rack-23
|
|
||||||
# runtimeClassName: image-rc
|
|
||||||
# sidecars:
|
|
||||||
# - image: busybox
|
|
||||||
# command: ["/bin/sh"]
|
|
||||||
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
|
|
||||||
# name: my-sidecar-1
|
|
||||||
# resources:
|
|
||||||
# requests:
|
|
||||||
# memory: 100M
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 200M
|
|
||||||
# cpu: 200m
|
|
||||||
# envVarsSecret: my-env-var-secrets
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 100M
|
|
||||||
cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 1G
|
|
||||||
# cpu: 700m
|
|
||||||
# priorityClassName: high-priority
|
|
||||||
# nodeSelector:
|
|
||||||
# disktype: ssd
|
|
||||||
# sidecarResources:
|
|
||||||
# requests:
|
|
||||||
# memory: 1G
|
|
||||||
# cpu: 500m
|
|
||||||
# limits:
|
|
||||||
# memory: 2G
|
|
||||||
# cpu: 600m
|
|
||||||
# containerSecurityContext:
|
|
||||||
# privileged: false
|
|
||||||
# podSecurityContext:
|
|
||||||
# runAsUser: 1001
|
|
||||||
# runAsGroup: 1001
|
|
||||||
# supplementalGroups: [1001]
|
|
||||||
# serviceAccountName: percona-xtradb-cluster-operator-workload
|
|
||||||
affinity:
|
|
||||||
antiAffinityTopologyKey: "kubernetes.io/hostname"
|
|
||||||
# advanced:
|
|
||||||
# nodeAffinity:
|
|
||||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
# nodeSelectorTerms:
|
|
||||||
# - matchExpressions:
|
|
||||||
# - key: kubernetes.io/e2e-az-name
|
|
||||||
# operator: In
|
|
||||||
# values:
|
|
||||||
# - e2e-az1
|
|
||||||
# - e2e-az2
|
|
||||||
# tolerations:
|
|
||||||
# - key: "node.alpha.kubernetes.io/unreachable"
|
|
||||||
# operator: "Exists"
|
|
||||||
# effect: "NoExecute"
|
|
||||||
# tolerationSeconds: 6000
|
|
||||||
# podDisruptionBudget:
|
|
||||||
# maxUnavailable: 1
|
|
||||||
# minAvailable: 0
|
|
||||||
gracePeriod: 30
|
|
||||||
proxysql:
|
|
||||||
enabled: false
|
|
||||||
size: 3
|
|
||||||
image: perconalab/percona-xtradb-cluster-operator:main-proxysql
|
|
||||||
# imagePullPolicy: Always
|
|
||||||
# configuration: |
|
|
||||||
# datadir="/var/lib/proxysql"
|
|
||||||
#
|
|
||||||
# admin_variables =
|
|
||||||
# {
|
|
||||||
# admin_credentials="proxyadmin:admin_password"
|
|
||||||
# mysql_ifaces="0.0.0.0:6032"
|
|
||||||
# refresh_interval=2000
|
|
||||||
#
|
|
||||||
# cluster_username="proxyadmin"
|
|
||||||
# cluster_password="admin_password"
|
|
||||||
# checksum_admin_variables=false
|
|
||||||
# checksum_ldap_variables=false
|
|
||||||
# checksum_mysql_variables=false
|
|
||||||
# cluster_check_interval_ms=200
|
|
||||||
# cluster_check_status_frequency=100
|
|
||||||
# cluster_mysql_query_rules_save_to_disk=true
|
|
||||||
# cluster_mysql_servers_save_to_disk=true
|
|
||||||
# cluster_mysql_users_save_to_disk=true
|
|
||||||
# cluster_proxysql_servers_save_to_disk=true
|
|
||||||
# cluster_mysql_query_rules_diffs_before_sync=1
|
|
||||||
# cluster_mysql_servers_diffs_before_sync=1
|
|
||||||
# cluster_mysql_users_diffs_before_sync=1
|
|
||||||
# cluster_proxysql_servers_diffs_before_sync=1
|
|
||||||
# }
|
|
||||||
#
|
|
||||||
# mysql_variables=
|
|
||||||
# {
|
|
||||||
# monitor_password="monitor"
|
|
||||||
# monitor_galera_healthcheck_interval=1000
|
|
||||||
# threads=2
|
|
||||||
# max_connections=2048
|
|
||||||
# default_query_delay=0
|
|
||||||
# default_query_timeout=10000
|
|
||||||
# poll_timeout=2000
|
|
||||||
# interfaces="0.0.0.0:3306"
|
|
||||||
# default_schema="information_schema"
|
|
||||||
# stacksize=1048576
|
|
||||||
# connect_timeout_server=10000
|
|
||||||
# monitor_history=60000
|
|
||||||
# monitor_connect_interval=20000
|
|
||||||
# monitor_ping_interval=10000
|
|
||||||
# ping_timeout_server=200
|
|
||||||
# commands_stats=true
|
|
||||||
# sessions_sort=true
|
|
||||||
# have_ssl=true
|
|
||||||
# ssl_p2s_ca="/etc/proxysql/ssl-internal/ca.crt"
|
|
||||||
# ssl_p2s_cert="/etc/proxysql/ssl-internal/tls.crt"
|
|
||||||
# ssl_p2s_key="/etc/proxysql/ssl-internal/tls.key"
|
|
||||||
# ssl_p2s_cipher="ECDHE-RSA-AES128-GCM-SHA256"
|
|
||||||
# }
|
|
||||||
# readinessDelaySec: 15
|
|
||||||
# livenessDelaySec: 600
|
|
||||||
# schedulerName: mycustom-scheduler
|
|
||||||
# imagePullSecrets:
|
|
||||||
# - name: private-registry-credentials
|
|
||||||
# annotations:
|
|
||||||
# iam.amazonaws.com/role: role-arn
|
|
||||||
# labels:
|
|
||||||
# rack: rack-22
|
|
||||||
# serviceType: ClusterIP
|
|
||||||
# loadbalancersourceranges:
|
|
||||||
# - 10.0.0.0/8
|
|
||||||
# loadBalancerIP: 127.0.0.1
|
|
||||||
# externalTrafficPolicy: Cluster
|
|
||||||
# runtimeClassName: image-rc
|
|
||||||
# sidecars:
|
|
||||||
# - image: busybox
|
|
||||||
# command: ["/bin/sh"]
|
|
||||||
# args: ["-c", "while true; do trap 'exit 0' SIGINT SIGTERM SIGQUIT SIGKILL; done;"]
|
|
||||||
# name: my-sidecar-1
|
|
||||||
# resources:
|
|
||||||
# requests:
|
|
||||||
# memory: 100M
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 200M
|
|
||||||
# cpu: 200m
|
|
||||||
# envVarsSecret: my-env-var-secrets
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 1G
|
|
||||||
cpu: 600m
|
|
||||||
# limits:
|
|
||||||
# memory: 1G
|
|
||||||
# cpu: 700m
|
|
||||||
# priorityClassName: high-priority
|
|
||||||
# nodeSelector:
|
|
||||||
# disktype: ssd
|
|
||||||
# sidecarResources:
|
|
||||||
# requests:
|
|
||||||
# memory: 1G
|
|
||||||
# cpu: 500m
|
|
||||||
# limits:
|
|
||||||
# memory: 2G
|
|
||||||
# cpu: 600m
|
|
||||||
# containerSecurityContext:
|
|
||||||
# privileged: false
|
|
||||||
# podSecurityContext:
|
|
||||||
# runAsUser: 1001
|
|
||||||
# runAsGroup: 1001
|
|
||||||
# supplementalGroups: [1001]
|
|
||||||
# serviceAccountName: percona-xtradb-cluster-operator-workload
|
|
||||||
affinity:
|
|
||||||
antiAffinityTopologyKey: "kubernetes.io/hostname"
|
|
||||||
# advanced:
|
|
||||||
# nodeAffinity:
|
|
||||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
# nodeSelectorTerms:
|
|
||||||
# - matchExpressions:
|
|
||||||
# - key: kubernetes.io/e2e-az-name
|
|
||||||
# operator: In
|
|
||||||
# values:
|
|
||||||
# - e2e-az1
|
|
||||||
# - e2e-az2
|
|
||||||
# tolerations:
|
|
||||||
# - key: "node.alpha.kubernetes.io/unreachable"
|
|
||||||
# operator: "Exists"
|
|
||||||
# effect: "NoExecute"
|
|
||||||
# tolerationSeconds: 6000
|
|
||||||
volumeSpec:
|
|
||||||
# emptyDir: {}
|
|
||||||
# hostPath:
|
|
||||||
# path: /data
|
|
||||||
# type: Directory
|
|
||||||
persistentVolumeClaim:
|
|
||||||
# storageClassName: standard
|
|
||||||
# accessModes: [ "ReadWriteOnce" ]
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 2G
|
|
||||||
podDisruptionBudget:
|
|
||||||
maxUnavailable: 1
|
|
||||||
# minAvailable: 0
|
|
||||||
gracePeriod: 30
|
|
||||||
# loadBalancerSourceRanges:
|
|
||||||
# - 10.0.0.0/8
|
|
||||||
# serviceAnnotations:
|
|
||||||
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
|
|
||||||
# serviceLabels:
|
|
||||||
# rack: rack-23
|
|
||||||
logcollector:
|
|
||||||
enabled: false
|
|
||||||
image: perconalab/percona-xtradb-cluster-operator:main-logcollector
|
|
||||||
# configuration: |
|
|
||||||
# [OUTPUT]
|
|
||||||
# Name es
|
|
||||||
# Match *
|
|
||||||
# Host 192.168.2.3
|
|
||||||
# Port 9200
|
|
||||||
# Index my_index
|
|
||||||
# Type my_type
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 100M
|
|
||||||
cpu: 200m
|
|
||||||
pmm:
|
|
||||||
enabled: false
|
|
||||||
image: percona/pmm-client:2.28.0
|
|
||||||
serverHost: monitoring-service
|
|
||||||
# serverUser: admin
|
|
||||||
# pxcParams: "--disable-tablestats-limit=2000"
|
|
||||||
# proxysqlParams: "--custom-labels=CUSTOM-LABELS"
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
memory: 150M
|
|
||||||
cpu: 300m
|
|
||||||
backup:
|
|
||||||
enabled: false
|
|
||||||
image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup
|
|
||||||
# backoffLimit: 6
|
|
||||||
# serviceAccountName: percona-xtradb-cluster-operator
|
|
||||||
# imagePullSecrets:
|
|
||||||
# - name: private-registry-credentials
|
|
||||||
pitr:
|
|
||||||
enabled: false
|
|
||||||
storageName: STORAGE-NAME-HERE
|
|
||||||
timeBetweenUploads: 60
|
|
||||||
# resources:
|
|
||||||
# requests:
|
|
||||||
# memory: 0.1G
|
|
||||||
# cpu: 100m
|
|
||||||
# limits:
|
|
||||||
# memory: 1G
|
|
||||||
# cpu: 700m
|
|
||||||
storages:
|
|
||||||
s3-us-west:
|
|
||||||
type: s3
|
|
||||||
verifyTLS: true
|
|
||||||
# nodeSelector:
|
|
||||||
# storage: tape
|
|
||||||
# backupWorker: 'True'
|
|
||||||
# resources:
|
|
||||||
# requests:
|
|
||||||
# memory: 1G
|
|
||||||
# cpu: 600m
|
|
||||||
# affinity:
|
|
||||||
# nodeAffinity:
|
|
||||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
# nodeSelectorTerms:
|
|
||||||
# - matchExpressions:
|
|
||||||
# - key: backupWorker
|
|
||||||
# operator: In
|
|
||||||
# values:
|
|
||||||
# - 'True'
|
|
||||||
# tolerations:
|
|
||||||
# - key: "backupWorker"
|
|
||||||
# operator: "Equal"
|
|
||||||
# value: "True"
|
|
||||||
# effect: "NoSchedule"
|
|
||||||
# annotations:
|
|
||||||
# testName: scheduled-backup
|
|
||||||
# labels:
|
|
||||||
# backupWorker: 'True'
|
|
||||||
# schedulerName: 'default-scheduler'
|
|
||||||
# priorityClassName: 'high-priority'
|
|
||||||
# containerSecurityContext:
|
|
||||||
# privileged: true
|
|
||||||
# podSecurityContext:
|
|
||||||
# fsGroup: 1001
|
|
||||||
# supplementalGroups: [1001, 1002, 1003]
|
|
||||||
s3:
|
|
||||||
bucket: S3-BACKUP-BUCKET-NAME-HERE
|
|
||||||
credentialsSecret: my-cluster-name-backup-s3
|
|
||||||
region: us-west-2
|
|
||||||
fs-pvc:
|
|
||||||
type: filesystem
|
|
||||||
# nodeSelector:
|
|
||||||
# storage: tape
|
|
||||||
# backupWorker: 'True'
|
|
||||||
# resources:
|
|
||||||
# requests:
|
|
||||||
# memory: 1G
|
|
||||||
# cpu: 600m
|
|
||||||
# affinity:
|
|
||||||
# nodeAffinity:
|
|
||||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
# nodeSelectorTerms:
|
|
||||||
# - matchExpressions:
|
|
||||||
# - key: backupWorker
|
|
||||||
# operator: In
|
|
||||||
# values:
|
|
||||||
# - 'True'
|
|
||||||
# tolerations:
|
|
||||||
# - key: "backupWorker"
|
|
||||||
# operator: "Equal"
|
|
||||||
# value: "True"
|
|
||||||
# effect: "NoSchedule"
|
|
||||||
# annotations:
|
|
||||||
# testName: scheduled-backup
|
|
||||||
# labels:
|
|
||||||
# backupWorker: 'True'
|
|
||||||
# schedulerName: 'default-scheduler'
|
|
||||||
# priorityClassName: 'high-priority'
|
|
||||||
# containerSecurityContext:
|
|
||||||
# privileged: true
|
|
||||||
# podSecurityContext:
|
|
||||||
# fsGroup: 1001
|
|
||||||
# supplementalGroups: [1001, 1002, 1003]
|
|
||||||
volume:
|
|
||||||
persistentVolumeClaim:
|
|
||||||
# storageClassName: standard
|
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 6G
|
|
||||||
schedule:
|
|
||||||
- name: "sat-night-backup"
|
|
||||||
schedule: "0 0 * * 6"
|
|
||||||
keep: 3
|
|
||||||
storageName: s3-us-west
|
|
||||||
- name: "daily-backup"
|
|
||||||
schedule: "0 0 * * *"
|
|
||||||
keep: 5
|
|
||||||
storageName: fs-pvc
|
|
@ -1,9 +1,15 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
helm dep update
|
. ../../scripts/lib-update.sh
|
||||||
|
|
||||||
|
#login_ecr_public
|
||||||
|
update_helm
|
||||||
|
|
||||||
### MariaDB
|
### MariaDB
|
||||||
|
|
||||||
# Fetch dashboards
|
# Fetch dashboards
|
||||||
../kubezero-metrics/sync_grafana_dashboards.py dashboards-mariadb.yaml templates/mariadb/grafana-dashboards.yaml
|
../kubezero-metrics/sync_grafana_dashboards.py dashboards-mariadb.yaml templates/mariadb/grafana-dashboards.yaml
|
||||||
|
|
||||||
|
update_docs
|
||||||
|
|
||||||
|
@ -1,24 +1,3 @@
|
|||||||
pxc-operator:
|
|
||||||
enabled: false
|
|
||||||
|
|
||||||
# we want a clusterwide operator
|
|
||||||
watchAllNamespaces: true
|
|
||||||
|
|
||||||
# running on the control-plane
|
|
||||||
tolerations:
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
effect: NoSchedule
|
|
||||||
nodeSelector:
|
|
||||||
node-role.kubernetes.io/control-plane: ""
|
|
||||||
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
#cpu: 200m
|
|
||||||
memory: 512Mi
|
|
||||||
requests:
|
|
||||||
cpu: 50m
|
|
||||||
memory: 32Mi
|
|
||||||
|
|
||||||
mariadb-galera:
|
mariadb-galera:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user