feat: v1.24 alpha
This commit is contained in:
parent
f9a904b619
commit
fa12bc5d2b
@ -3,7 +3,7 @@ ARG ALPINE_VERSION=3.16
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
|
||||
ARG ALPINE_VERSION
|
||||
ARG KUBE_VERSION=1.23
|
||||
ARG KUBE_VERSION=1.24
|
||||
|
||||
RUN cd /etc/apk/keys && \
|
||||
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubeadm
|
||||
description: KubeZero Kubeadm cluster config
|
||||
type: application
|
||||
version: 1.23.11
|
||||
version: 1.24.7
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -11,4 +11,4 @@ keywords:
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
kubeVersion: ">= 1.20.0"
|
||||
kubeVersion: ">= 1.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubeadm
|
||||
|
||||
![Version: 1.23.10](https://img.shields.io/badge/Version-1.23.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 1.24.7](https://img.shields.io/badge/Version-1.24.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero Kubeadm cluster config
|
||||
|
||||
@ -14,7 +14,7 @@ KubeZero Kubeadm cluster config
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
Kubernetes: `>= 1.24.0`
|
||||
|
||||
## Values
|
||||
|
||||
@ -44,7 +44,7 @@ Kubernetes: `>= 1.20.0`
|
||||
## Resources
|
||||
|
||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/
|
||||
- https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2
|
||||
- https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
||||
|
@ -18,7 +18,7 @@
|
||||
## Resources
|
||||
|
||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/
|
||||
- https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2
|
||||
- https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
||||
|
@ -2,8 +2,8 @@ apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: ClusterConfiguration
|
||||
kubernetesVersion: {{ .Chart.Version }}
|
||||
clusterName: {{ .Values.global.clusterName }}
|
||||
featureGates:
|
||||
UnversionedKubeletConfigMap: true
|
||||
#featureGates:
|
||||
# NonGracefulFailover: true
|
||||
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
||||
networking:
|
||||
podSubnet: 10.244.0.0/16
|
||||
|
@ -1,6 +1,6 @@
|
||||
{{- /* Feature gates for all control plane components */ -}}
|
||||
{{- define "kubeadm.featuregates" }}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "KubeletCredentialProviders"}}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "CronJobTimeZone" "NodeOutOfServiceVolumeDetach" }}
|
||||
{{- if eq .return "csv" }}
|
||||
{{- range $key := $gates }}
|
||||
{{- $key }}=true,
|
||||
|
@ -1,4 +1,4 @@
|
||||
apiVersion: kubelet.config.k8s.io/v1alpha1
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: CredentialProviderConfig
|
||||
providers:
|
||||
- name: amazon-ecr-credential-helper
|
||||
|
@ -112,6 +112,8 @@ spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
|
||||
containers:
|
||||
- name: aws-iam-authenticator
|
||||
|
@ -2,8 +2,8 @@ apiVersion: v2
|
||||
name: kubezero-addons
|
||||
description: KubeZero umbrella chart for various optional cluster addons
|
||||
type: application
|
||||
version: 0.6.3
|
||||
appVersion: v1.23.11
|
||||
version: 0.7.0
|
||||
appVersion: v1.24
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -34,4 +34,4 @@ dependencies:
|
||||
# https://github.com/NVIDIA/k8s-device-plugin
|
||||
repository: https://nvidia.github.io/k8s-device-plugin
|
||||
condition: nvidia-device-plugin.enabled
|
||||
kubeVersion: ">= 1.20.0"
|
||||
kubeVersion: ">= 1.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-addons
|
||||
|
||||
![Version: 0.6.3](https://img.shields.io/badge/Version-0.6.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.23.11](https://img.shields.io/badge/AppVersion-v1.23.11-informational?style=flat-square)
|
||||
![Version: 0.7.0](https://img.shields.io/badge/Version-0.7.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.24](https://img.shields.io/badge/AppVersion-v1.24-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for various optional cluster addons
|
||||
|
||||
@ -14,7 +14,7 @@ KubeZero umbrella chart for various optional cluster addons
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
Kubernetes: `>= 1.24.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
@ -63,6 +63,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
||||
| aws-node-termination-handler.taintNode | bool | `true` | |
|
||||
| aws-node-termination-handler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| aws-node-termination-handler.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| aws-node-termination-handler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| aws-node-termination-handler.useProviderId | bool | `true` | |
|
||||
| awsNeuron.enabled | bool | `false` | |
|
||||
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
|
||||
@ -80,6 +82,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
||||
| cluster-autoscaler.serviceMonitor.interval | string | `"30s"` | |
|
||||
| cluster-autoscaler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cluster-autoscaler.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cluster-autoscaler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| clusterBackup.enabled | bool | `false` | |
|
||||
| clusterBackup.extraEnv | list | `[]` | |
|
||||
| clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | |
|
||||
@ -104,6 +108,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
||||
| external-dns.sources[0] | string | `"service"` | |
|
||||
| external-dns.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| external-dns.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| external-dns.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| external-dns.triggerLoopOnEvent | bool | `true` | |
|
||||
| forseti.aws.iamRoleArn | string | `""` | "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti" |
|
||||
| forseti.aws.region | string | `""` | |
|
||||
|
@ -56,5 +56,7 @@ spec:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
restartPolicy: Never
|
||||
{{- end }}
|
||||
|
@ -71,6 +71,8 @@ spec:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: aws-token
|
||||
projected:
|
||||
|
@ -67,6 +67,8 @@ aws-node-termination-handler:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
@ -140,6 +142,8 @@ cluster-autoscaler:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
|
||||
# On AWS enable Projected Service Accounts to assume IAM role
|
||||
#extraEnv:
|
||||
@ -170,6 +174,8 @@ external-dns:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-cert-manager
|
||||
description: KubeZero Umbrella Chart for cert-manager
|
||||
type: application
|
||||
version: 0.9.2
|
||||
version: 0.9.3
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -18,4 +18,4 @@ dependencies:
|
||||
- name: cert-manager
|
||||
version: 1.9.1
|
||||
repository: https://charts.jetstack.io
|
||||
kubeVersion: ">= 1.20.0"
|
||||
kubeVersion: ">= 1.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-cert-manager
|
||||
|
||||
![Version: 0.9.2](https://img.shields.io/badge/Version-0.9.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.9.3](https://img.shields.io/badge/Version-0.9.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero Umbrella Chart for cert-manager
|
||||
|
||||
@ -14,7 +14,7 @@ KubeZero Umbrella Chart for cert-manager
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
Kubernetes: `>= 1.24.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
@ -35,6 +35,8 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
|
||||
| cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cert-manager.cainjector.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.cainjector.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.enabled | bool | `true` | |
|
||||
| cert-manager.extraArgs[0] | string | `"--dns01-recursive-nameservers-only"` | |
|
||||
| cert-manager.global.leaderElection.namespace | string | `"cert-manager"` | |
|
||||
@ -45,9 +47,13 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
|
||||
| cert-manager.startupapicheck.enabled | bool | `false` | |
|
||||
| cert-manager.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cert-manager.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.webhook.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cert-manager.webhook.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cert-manager.webhook.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.webhook.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| clusterIssuer | object | `{}` | |
|
||||
| localCA.enabled | bool | `false` | |
|
||||
| localCA.selfsigning | bool | `true` | |
|
||||
|
@ -49,6 +49,8 @@ cert-manager:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
@ -60,6 +62,8 @@ cert-manager:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
@ -67,6 +71,8 @@ cert-manager:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-ci
|
||||
description: KubeZero umbrella chart for all things CI
|
||||
type: application
|
||||
version: 0.5.14
|
||||
version: 0.5.15
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -26,7 +26,7 @@ dependencies:
|
||||
repository: https://dl.gitea.io/charts/
|
||||
condition: gitea.enabled
|
||||
- name: jenkins
|
||||
version: 4.2.6
|
||||
version: 4.2.8
|
||||
repository: https://charts.jenkins.io
|
||||
condition: jenkins.enabled
|
||||
- name: trivy
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-ci
|
||||
|
||||
![Version: 0.5.14](https://img.shields.io/badge/Version-0.5.14-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.5.15](https://img.shields.io/badge/Version-0.5.15-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for all things CI
|
||||
|
||||
@ -20,7 +20,7 @@ Kubernetes: `>= 1.20.0`
|
||||
|------------|------|---------|
|
||||
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.17 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
||||
| https://charts.jenkins.io | jenkins | 4.2.6 |
|
||||
| https://charts.jenkins.io | jenkins | 4.2.8 |
|
||||
| https://dl.gitea.io/charts/ | gitea | 5.0.9 |
|
||||
| https://gocd.github.io/helm-chart | gocd | 1.40.8 |
|
||||
|
||||
@ -85,7 +85,7 @@ Kubernetes: `>= 1.20.0`
|
||||
| jenkins.agent.resources.requests.cpu | string | `"512m"` | |
|
||||
| jenkins.agent.resources.requests.memory | string | `"1024Mi"` | |
|
||||
| jenkins.agent.showRawYaml | bool | `false` | |
|
||||
| jenkins.agent.tag | string | `"v0.3.2"` | |
|
||||
| jenkins.agent.tag | string | `"v0.4.0"` | |
|
||||
| jenkins.agent.yamlMergeStrategy | string | `"merge"` | |
|
||||
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n serviceAccountName: jenkins-podman-aws\n containers:\n - name: jnlp\n resources:\n limits:\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | |
|
||||
| jenkins.controller.JCasC.configScripts.zdt-settings | string | `"jenkins:\n noUsageStatistics: true\n disabledAdministrativeMonitors:\n - \"jenkins.security.ResourceDomainRecommendation\"\nunclassified:\n buildDiscarders:\n configuredBuildDiscarders:\n - \"jobBuildDiscarder\"\n - defaultBuildDiscarder:\n discarder:\n logRotator:\n artifactDaysToKeepStr: \"32\"\n artifactNumToKeepStr: \"10\"\n daysToKeepStr: \"100\"\n numToKeepStr: \"10\"\n"` | |
|
||||
@ -95,7 +95,7 @@ Kubernetes: `>= 1.20.0`
|
||||
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
|
||||
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
|
||||
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
|
||||
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3706.vdfb_d599579f3"` | |
|
||||
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3724.v0920c1e0ec69"` | |
|
||||
| jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | |
|
||||
| jenkins.controller.installPlugins[2] | string | `"git:4.12.1"` | |
|
||||
| jenkins.controller.installPlugins[3] | string | `"configuration-as-code:1512.vb_79d418d5fc8"` | |
|
||||
@ -104,7 +104,7 @@ Kubernetes: `>= 1.20.0`
|
||||
| jenkins.controller.installPlugins[6] | string | `"htmlpublisher:1.31"` | |
|
||||
| jenkins.controller.installPlugins[7] | string | `"build-discarder:139.v05696a_7fe240"` | |
|
||||
| jenkins.controller.installPlugins[8] | string | `"dark-theme:245.vb_a_2b_b_010ea_96"` | |
|
||||
| jenkins.controller.installPlugins[9] | string | `"kubernetes-credentials-provider:1.196.va_55f5e31e3c2"` | |
|
||||
| jenkins.controller.installPlugins[9] | string | `"kubernetes-credentials-provider:1.199.v4a_1d1f5d074f"` | |
|
||||
| jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | |
|
||||
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | |
|
||||
| jenkins.controller.prometheus.enabled | bool | `false` | |
|
||||
|
@ -114,7 +114,7 @@ jenkins:
|
||||
numToKeepStr: "10"
|
||||
|
||||
installPlugins:
|
||||
- kubernetes:3706.vdfb_d599579f3
|
||||
- kubernetes:3724.v0920c1e0ec69
|
||||
- workflow-aggregator:581.v0c46fa_697ffd
|
||||
- git:4.12.1
|
||||
- configuration-as-code:1512.vb_79d418d5fc8
|
||||
@ -123,7 +123,7 @@ jenkins:
|
||||
- htmlpublisher:1.31
|
||||
- build-discarder:139.v05696a_7fe240
|
||||
- dark-theme:245.vb_a_2b_b_010ea_96
|
||||
- kubernetes-credentials-provider:1.196.va_55f5e31e3c2
|
||||
- kubernetes-credentials-provider:1.199.v4a_1d1f5d074f
|
||||
|
||||
serviceAccountAgent:
|
||||
create: true
|
||||
@ -132,7 +132,7 @@ jenkins:
|
||||
# Preconfigure agents to use zdt podman requires fuse/overlayfs
|
||||
agent:
|
||||
image: public.ecr.aws/zero-downtime/jenkins-podman
|
||||
tag: v0.3.2
|
||||
tag: v0.4.0
|
||||
resources:
|
||||
requests:
|
||||
cpu: "512m"
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-istio
|
||||
description: KubeZero Umbrella Chart for Istio
|
||||
type: application
|
||||
version: 0.8.4
|
||||
version: 0.8.5
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -22,7 +22,7 @@ dependencies:
|
||||
version: 1.14.3
|
||||
repository: https://istio-release.storage.googleapis.com/charts
|
||||
- name: kiali-server
|
||||
version: 1.54
|
||||
version: "1.54"
|
||||
repository: https://kiali.org/helm-charts
|
||||
condition: kiali-server.enabled
|
||||
kubeVersion: ">= 1.20.0"
|
||||
kubeVersion: ">= 1.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-istio
|
||||
|
||||
![Version: 0.8.3](https://img.shields.io/badge/Version-0.8.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.8.5](https://img.shields.io/badge/Version-0.8.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero Umbrella Chart for Istio
|
||||
|
||||
@ -16,7 +16,7 @@ Installs the Istio control plane
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
Kubernetes: `>= 1.24.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
@ -44,6 +44,8 @@ Kubernetes: `>= 1.20.0`
|
||||
| istiod.pilot.resources.requests.memory | string | `"128Mi"` | |
|
||||
| istiod.pilot.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| istiod.pilot.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| istiod.pilot.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| istiod.pilot.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| istiod.telemetry.enabled | bool | `false` | |
|
||||
| kiali-server.auth.strategy | string | `"anonymous"` | |
|
||||
| kiali-server.deployment.ingress_enabled | bool | `false` | |
|
||||
|
@ -19,6 +19,8 @@ istiod:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
|
||||
resources:
|
||||
requests:
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-logging
|
||||
description: KubeZero Umbrella Chart for complete EFK stack
|
||||
type: application
|
||||
version: 0.8.3
|
||||
version: 0.8.4
|
||||
appVersion: 1.6.0
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
@ -29,4 +29,4 @@ dependencies:
|
||||
- name: fluent-bit
|
||||
version: 0.20.6
|
||||
condition: fluent-bit.enabled
|
||||
kubeVersion: ">= 1.20.0"
|
||||
kubeVersion: ">= 1.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-logging
|
||||
|
||||
![Version: 0.8.3](https://img.shields.io/badge/Version-0.8.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
|
||||
![Version: 0.8.4](https://img.shields.io/badge/Version-0.8.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
|
||||
|
||||
KubeZero Umbrella Chart for complete EFK stack
|
||||
|
||||
@ -14,7 +14,7 @@ KubeZero Umbrella Chart for complete EFK stack
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
Kubernetes: `>= 1.24.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
@ -62,6 +62,8 @@ Kubernetes: `>= 1.20.0`
|
||||
| eck-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| eck-operator.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| eck-operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| eck-operator.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| eck-operator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| elastic_password | string | `""` | |
|
||||
| es.nodeSets | list | `[]` | |
|
||||
| es.prometheus | bool | `false` | |
|
||||
|
@ -7,6 +7,8 @@ eck-operator:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-metrics
|
||||
description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
|
||||
type: application
|
||||
version: 0.8.5
|
||||
version: 0.8.7
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -18,7 +18,7 @@ dependencies:
|
||||
version: ">= 0.1.5"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: kube-prometheus-stack
|
||||
version: 40.0.0
|
||||
version: 41.4.1
|
||||
# Switch back to upstream once all alerts are fixed eg. etcd gpcr
|
||||
# repository: https://prometheus-community.github.io/helm-charts
|
||||
- name: prometheus-adapter
|
||||
@ -26,8 +26,8 @@ dependencies:
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
condition: prometheus-adapter.enabled
|
||||
- name: prometheus-pushgateway
|
||||
version: 1.18.2
|
||||
version: 1.20.0
|
||||
# Switch back to upstream once namespaces are supported
|
||||
# repository: https://prometheus-community.github.io/helm-charts
|
||||
condition: prometheus-pushgateway.enabled
|
||||
kubeVersion: ">= 1.20.0"
|
||||
kubeVersion: ">= 1.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-metrics
|
||||
|
||||
![Version: 0.8.5](https://img.shields.io/badge/Version-0.8.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.8.7](https://img.shields.io/badge/Version-0.8.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
|
||||
|
||||
@ -14,12 +14,12 @@ KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
Kubernetes: `>= 1.24.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| | kube-prometheus-stack | 40.0.0 |
|
||||
| | prometheus-pushgateway | 1.18.2 |
|
||||
| | kube-prometheus-stack | 41.4.1 |
|
||||
| | prometheus-pushgateway | 1.20.0 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
||||
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 3.4.0 |
|
||||
|
||||
@ -127,6 +127,8 @@ Kubernetes: `>= 1.20.0`
|
||||
| kube-prometheus-stack.kube-state-metrics.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| kube-prometheus-stack.kube-state-metrics.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| kube-prometheus-stack.kube-state-metrics.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| kube-prometheus-stack.kube-state-metrics.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| kube-prometheus-stack.kube-state-metrics.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| kube-prometheus-stack.kubeApiServer.enabled | bool | `true` | |
|
||||
| kube-prometheus-stack.kubeControllerManager.enabled | bool | `true` | |
|
||||
| kube-prometheus-stack.kubeControllerManager.service.port | int | `10257` | |
|
||||
@ -168,10 +170,11 @@ Kubernetes: `>= 1.20.0`
|
||||
| kube-prometheus-stack.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues | bool | `false` | |
|
||||
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | |
|
||||
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | |
|
||||
| kube-prometheus-stack.prometheus.prometheusSpec.walCompression | bool | `true` | |
|
||||
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| kube-prometheus-stack.prometheusOperator.enabled | bool | `true` | |
|
||||
| kube-prometheus-stack.prometheusOperator.logFormat | string | `"json"` | |
|
||||
| kube-prometheus-stack.prometheusOperator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
@ -180,6 +183,8 @@ Kubernetes: `>= 1.20.0`
|
||||
| kube-prometheus-stack.prometheusOperator.resources.requests.memory | string | `"32Mi"` | |
|
||||
| kube-prometheus-stack.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| kube-prometheus-stack.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| kube-prometheus-stack.prometheusOperator.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| kube-prometheus-stack.prometheusOperator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| prometheus-adapter.enabled | bool | `true` | |
|
||||
| prometheus-adapter.logLevel | int | `1` | |
|
||||
| prometheus-adapter.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
@ -200,6 +205,8 @@ Kubernetes: `>= 1.20.0`
|
||||
| prometheus-adapter.rules.resource.window | string | `"5m"` | |
|
||||
| prometheus-adapter.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| prometheus-adapter.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| prometheus-adapter.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| prometheus-adapter.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| prometheus-pushgateway.enabled | bool | `false` | |
|
||||
| prometheus-pushgateway.serviceMonitor.enabled | bool | `true` | |
|
||||
|
||||
|
@ -6,20 +6,20 @@ annotations:
|
||||
url: https://github.com/prometheus-operator/kube-prometheus
|
||||
artifacthub.io/operator: "true"
|
||||
apiVersion: v2
|
||||
appVersion: 0.59.1
|
||||
appVersion: 0.60.1
|
||||
dependencies:
|
||||
- condition: kubeStateMetrics.enabled
|
||||
name: kube-state-metrics
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
version: 4.18.*
|
||||
version: 4.20.*
|
||||
- condition: nodeExporter.enabled
|
||||
name: prometheus-node-exporter
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
version: 4.2.*
|
||||
version: 4.3.*
|
||||
- condition: grafana.enabled
|
||||
name: grafana
|
||||
repository: https://grafana.github.io/helm-charts
|
||||
version: 6.38.*
|
||||
version: 6.40.*
|
||||
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
|
||||
and Prometheus rules combined with documentation and scripts to provide easy to
|
||||
operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
|
||||
@ -51,4 +51,4 @@ sources:
|
||||
- https://github.com/prometheus-community/helm-charts
|
||||
- https://github.com/prometheus-operator/kube-prometheus
|
||||
type: application
|
||||
version: 40.0.0
|
||||
version: 41.4.1
|
||||
|
@ -80,6 +80,27 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
|
||||
|
||||
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
|
||||
|
||||
### From 40.x to 41.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.60.1, Prometheus to v2.39.1 and Thanos to v0.28.1.
|
||||
This version also upgrades the Helm charts of kube-state-metrics to 4.20.2, prometheus-node-exporter to 4.3.0 and Grafana to 6.40.4.
|
||||
|
||||
Run these commands to update the CRDs before applying the upgrade.
|
||||
|
||||
```console
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
```
|
||||
|
||||
This version splits kubeScheduler recording and altering rules in separate config values.
|
||||
Instead of `defaultRules.rules.kubeScheduler` the 2 new variables `defaultRules.rules.kubeSchedulerAlerting` and `defaultRules.rules.kubeSchedulerRecording` are used.
|
||||
|
||||
### From 39.x to 40.x
|
||||
|
||||
This version upgrades Prometheus-Operator to v0.59.1, Prometheus to v2.38.0, kube-state-metrics to v2.6.0 and Thanos to v0.28.0.
|
||||
|
@ -1,5 +1,5 @@
|
||||
apiVersion: v2
|
||||
appVersion: 9.1.4
|
||||
appVersion: 9.1.7
|
||||
description: The leading tool for querying and visualizing time series and metrics.
|
||||
home: https://grafana.net
|
||||
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
|
||||
@ -19,4 +19,4 @@ name: grafana
|
||||
sources:
|
||||
- https://github.com/grafana/grafana
|
||||
type: application
|
||||
version: 6.38.0
|
||||
version: 6.40.4
|
||||
|
@ -150,6 +150,15 @@ This version requires Helm >= 3.1.0.
|
||||
| `sidecar.resources` | Sidecar resources | `{}` |
|
||||
| `sidecar.securityContext` | Sidecar securityContext | `{}` |
|
||||
| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` |
|
||||
| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` |
|
||||
| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` |
|
||||
| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` |
|
||||
| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||
| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
|
||||
| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||
| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` |
|
||||
| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
|
||||
| `sidecar.alerts.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` |
|
||||
| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
|
||||
| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` |
|
||||
| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` |
|
||||
@ -166,21 +175,28 @@ This version requires Helm >= 3.1.0.
|
||||
| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` |
|
||||
| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` |
|
||||
| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
|
||||
| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||
| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||
| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` |
|
||||
| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||
| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` |
|
||||
| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
|
||||
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
|
||||
| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` |
|
||||
| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||
| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||
| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
|
||||
| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||
| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` |
|
||||
| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
|
||||
| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` |
|
||||
| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` |
|
||||
| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` |
|
||||
| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||
| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` |
|
||||
| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||
| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
|
||||
| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||
| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` |
|
||||
| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
|
||||
| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` |
|
||||
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
|
||||
| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
|
||||
| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
|
||||
@ -319,6 +335,14 @@ dashboards:
|
||||
gnetId: 2
|
||||
revision: 2
|
||||
datasource: Prometheus
|
||||
loki-dashboard-quick-search:
|
||||
gnetId: 12019
|
||||
revision: 2
|
||||
datasource:
|
||||
- name: DS_PROMETHEUS
|
||||
value: Prometheus
|
||||
- name: DS_LOKI
|
||||
value: Loki
|
||||
local-dashboard:
|
||||
url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
|
||||
```
|
||||
|
@ -15,7 +15,7 @@ hostAliases:
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.notifiers.enabled .Values.extraInitContainers (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources)) }}
|
||||
{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.extraInitContainers (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources) (and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers)) }}
|
||||
initContainers:
|
||||
{{- end }}
|
||||
{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }}
|
||||
@ -123,7 +123,7 @@ initContainers:
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.datasources.searchNamespace }}
|
||||
- name: NAMESPACE
|
||||
value: "{{ .Values.sidecar.datasources.searchNamespace | join "," }}"
|
||||
value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.skipTlsVerify }}
|
||||
- name: SKIP_TLS_VERIFY
|
||||
@ -141,8 +141,8 @@ initContainers:
|
||||
- name: sc-datasources-volume
|
||||
mountPath: "/etc/grafana/provisioning/datasources"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.notifiers.enabled }}
|
||||
- name: {{ template "grafana.name" . }}-sc-notifiers
|
||||
{{- if and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers }}
|
||||
- name: {{ template "grafana.name" . }}-init-sc-notifiers
|
||||
{{- if .Values.sidecar.image.sha }}
|
||||
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
|
||||
{{- else }}
|
||||
@ -180,7 +180,7 @@ initContainers:
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.notifiers.searchNamespace }}
|
||||
- name: NAMESPACE
|
||||
value: "{{ .Values.sidecar.notifiers.searchNamespace | join "," }}"
|
||||
value: "{{ tpl (.Values.sidecar.notifiers.searchNamespace | join ",") . }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.skipTlsVerify }}
|
||||
- name: SKIP_TLS_VERIFY
|
||||
@ -220,6 +220,109 @@ imagePullSecrets:
|
||||
enableServiceLinks: {{ .Values.enableServiceLinks }}
|
||||
{{- end }}
|
||||
containers:
|
||||
{{- if .Values.sidecar.alerts.enabled }}
|
||||
- name: {{ template "grafana.name" . }}-sc-alerts
|
||||
{{- if .Values.sidecar.image.sha }}
|
||||
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
|
||||
{{- else }}
|
||||
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
|
||||
env:
|
||||
{{- range $key, $value := .Values.sidecar.alerts.env }}
|
||||
- name: "{{ $key }}"
|
||||
value: "{{ $value }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }}
|
||||
- name: IGNORE_ALREADY_PROCESSED
|
||||
value: "true"
|
||||
{{- end }}
|
||||
- name: METHOD
|
||||
value: {{ .Values.sidecar.alerts.watchMethod }}
|
||||
- name: LABEL
|
||||
value: "{{ .Values.sidecar.alerts.label }}"
|
||||
{{- with .Values.sidecar.alerts.labelValue }}
|
||||
- name: LABEL_VALUE
|
||||
value: {{ quote . }}
|
||||
{{- end }}
|
||||
{{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }}
|
||||
- name: LOG_LEVEL
|
||||
value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }}
|
||||
{{- end }}
|
||||
- name: FOLDER
|
||||
value: "/etc/grafana/provisioning/alerting"
|
||||
- name: RESOURCE
|
||||
value: {{ quote .Values.sidecar.alerts.resource }}
|
||||
{{- if .Values.sidecar.enableUniqueFilenames }}
|
||||
- name: UNIQUE_FILENAMES
|
||||
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.alerts.searchNamespace }}
|
||||
- name: NAMESPACE
|
||||
value: {{ . | join "," | quote }}
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.alerts.skipTlsVerify }}
|
||||
- name: SKIP_TLS_VERIFY
|
||||
value: {{ quote . }}
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.alerts.script }}
|
||||
- name: SCRIPT
|
||||
value: {{ quote . }}
|
||||
{{- end }}
|
||||
{{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
|
||||
- name: REQ_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
|
||||
key: {{ .Values.admin.userKey | default "admin-user" }}
|
||||
{{- end }}
|
||||
{{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
|
||||
- name: REQ_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
|
||||
key: {{ .Values.admin.passwordKey | default "admin-password" }}
|
||||
{{- end }}
|
||||
{{- if not .Values.sidecar.alerts.skipReload }}
|
||||
- name: REQ_URL
|
||||
value: {{ .Values.sidecar.alerts.reloadURL }}
|
||||
- name: REQ_METHOD
|
||||
value: POST
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.alerts.watchServerTimeout }}
|
||||
{{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }}
|
||||
{{- fail (printf "Cannot use .Values.sidecar.alerts.watchServerTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }}
|
||||
{{- end }}
|
||||
- name: WATCH_SERVER_TIMEOUT
|
||||
value: "{{ .Values.sidecar.alerts.watchServerTimeout }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.alerts.watchClientTimeout }}
|
||||
{{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }}
|
||||
{{- fail (printf "Cannot use .Values.sidecar.alerts.watchClientTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }}
|
||||
{{- end }}
|
||||
- name: WATCH_CLIENT_TIMEOUT
|
||||
value: "{{ .Values.sidecar.alerts.watchClientTimeout }}"
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.livenessProbe }}
|
||||
livenessProbe:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.readinessProbe }}
|
||||
readinessProbe:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: sc-alerts-volume
|
||||
mountPath: "/etc/grafana/provisioning/alerting"
|
||||
{{- end}}
|
||||
{{- if .Values.sidecar.dashboards.enabled }}
|
||||
- name: {{ template "grafana.name" . }}-sc-dashboard
|
||||
{{- if .Values.sidecar.image.sha }}
|
||||
@ -259,7 +362,7 @@ containers:
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.dashboards.searchNamespace }}
|
||||
- name: NAMESPACE
|
||||
value: "{{ .Values.sidecar.dashboards.searchNamespace | join "," }}"
|
||||
value: "{{ tpl (.Values.sidecar.dashboards.searchNamespace | join ",") . }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.skipTlsVerify }}
|
||||
- name: SKIP_TLS_VERIFY
|
||||
@ -349,7 +452,7 @@ containers:
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.datasources.searchNamespace }}
|
||||
- name: NAMESPACE
|
||||
value: "{{ .Values.sidecar.datasources.searchNamespace | join "," }}"
|
||||
value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.skipTlsVerify }}
|
||||
- name: SKIP_TLS_VERIFY
|
||||
@ -413,6 +516,109 @@ containers:
|
||||
- name: sc-datasources-volume
|
||||
mountPath: "/etc/grafana/provisioning/datasources"
|
||||
{{- end}}
|
||||
{{- if .Values.sidecar.notifiers.enabled }}
|
||||
- name: {{ template "grafana.name" . }}-sc-notifiers
|
||||
{{- if .Values.sidecar.image.sha }}
|
||||
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
|
||||
{{- else }}
|
||||
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
|
||||
env:
|
||||
{{- range $key, $value := .Values.sidecar.notifiers.env }}
|
||||
- name: "{{ $key }}"
|
||||
value: "{{ $value }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }}
|
||||
- name: IGNORE_ALREADY_PROCESSED
|
||||
value: "true"
|
||||
{{- end }}
|
||||
- name: METHOD
|
||||
value: {{ .Values.sidecar.notifiers.watchMethod }}
|
||||
- name: LABEL
|
||||
value: "{{ .Values.sidecar.notifiers.label }}"
|
||||
{{- if .Values.sidecar.notifiers.labelValue }}
|
||||
- name: LABEL_VALUE
|
||||
value: {{ quote .Values.sidecar.notifiers.labelValue }}
|
||||
{{- end }}
|
||||
{{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }}
|
||||
- name: LOG_LEVEL
|
||||
value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }}
|
||||
{{- end }}
|
||||
- name: FOLDER
|
||||
value: "/etc/grafana/provisioning/notifiers"
|
||||
- name: RESOURCE
|
||||
value: {{ quote .Values.sidecar.notifiers.resource }}
|
||||
{{- if .Values.sidecar.enableUniqueFilenames }}
|
||||
- name: UNIQUE_FILENAMES
|
||||
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.notifiers.searchNamespace }}
|
||||
- name: NAMESPACE
|
||||
value: "{{ tpl (.Values.sidecar.notifiers.searchNamespace | join ",") . }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.skipTlsVerify }}
|
||||
- name: SKIP_TLS_VERIFY
|
||||
value: "{{ .Values.sidecar.skipTlsVerify }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.notifiers.script }}
|
||||
- name: SCRIPT
|
||||
value: "{{ .Values.sidecar.notifiers.script }}"
|
||||
{{- end }}
|
||||
{{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
|
||||
- name: REQ_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
|
||||
key: {{ .Values.admin.userKey | default "admin-user" }}
|
||||
{{- end }}
|
||||
{{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
|
||||
- name: REQ_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
|
||||
key: {{ .Values.admin.passwordKey | default "admin-password" }}
|
||||
{{- end }}
|
||||
{{- if not .Values.sidecar.notifiers.skipReload }}
|
||||
- name: REQ_URL
|
||||
value: {{ .Values.sidecar.notifiers.reloadURL }}
|
||||
- name: REQ_METHOD
|
||||
value: POST
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.notifiers.watchServerTimeout }}
|
||||
{{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }}
|
||||
{{- fail (printf "Cannot use .Values.sidecar.notifiers.watchServerTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }}
|
||||
{{- end }}
|
||||
- name: WATCH_SERVER_TIMEOUT
|
||||
value: "{{ .Values.sidecar.notifiers.watchServerTimeout }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.notifiers.watchClientTimeout }}
|
||||
{{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }}
|
||||
{{- fail (printf "Cannot use .Values.sidecar.notifiers.watchClientTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }}
|
||||
{{- end }}
|
||||
- name: WATCH_CLIENT_TIMEOUT
|
||||
value: "{{ .Values.sidecar.notifiers.watchClientTimeout }}"
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.livenessProbe }}
|
||||
livenessProbe:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.readinessProbe }}
|
||||
readinessProbe:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: sc-notifiers-volume
|
||||
mountPath: "/etc/grafana/provisioning/notifiers"
|
||||
{{- end}}
|
||||
{{- if .Values.sidecar.plugins.enabled }}
|
||||
- name: {{ template "grafana.name" . }}-sc-plugins
|
||||
{{- if .Values.sidecar.image.sha }}
|
||||
@ -452,7 +658,7 @@ containers:
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.plugins.searchNamespace }}
|
||||
- name: NAMESPACE
|
||||
value: "{{ .Values.sidecar.plugins.searchNamespace | join "," }}"
|
||||
value: "{{ tpl (.Values.sidecar.plugins.searchNamespace | join ",") . }}"
|
||||
{{- end }}
|
||||
{{- if .Values.sidecar.plugins.script }}
|
||||
- name: SCRIPT
|
||||
@ -526,7 +732,7 @@ containers:
|
||||
{{- if .Values.command }}
|
||||
command:
|
||||
{{- range .Values.command }}
|
||||
- {{ . }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end}}
|
||||
{{- with .Values.containerSecurityContext }}
|
||||
@ -599,6 +805,10 @@ containers:
|
||||
subPath: {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.sidecar.alerts.enabled }}
|
||||
- name: sc-alerts-volume
|
||||
mountPath: "/etc/grafana/provisioning/alerting"
|
||||
{{- end}}
|
||||
{{- if .Values.sidecar.dashboards.enabled }}
|
||||
- name: sc-dashboard-volume
|
||||
mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
|
||||
@ -811,6 +1021,15 @@ volumes:
|
||||
emptyDir: {}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if .Values.sidecar.alerts.enabled }}
|
||||
- name: sc-alerts-volume
|
||||
{{- if .Values.sidecar.alerts.sizeLimit }}
|
||||
emptyDir:
|
||||
sizeLimit: {{ .Values.sidecar.alerts.sizeLimit }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if .Values.sidecar.dashboards.enabled }}
|
||||
- name: sc-dashboard-volume
|
||||
{{- if .Values.sidecar.dashboards.sizeLimit }}
|
||||
|
@ -1,4 +1,4 @@
|
||||
{{- if .Values.sidecar.dashboards.enabled }}
|
||||
{{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
|
@ -60,7 +60,7 @@ data:
|
||||
{{ $root := . }}
|
||||
{{- range $key, $value := .Values.alerting }}
|
||||
{{ $key }}: |
|
||||
{{ tpl $value $root | indent 4 }}
|
||||
{{ tpl (toYaml $value | indent 4) $root }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
@ -101,16 +101,33 @@ data:
|
||||
-H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \
|
||||
{{- end }}
|
||||
-H "Content-Type: application/json;charset=UTF-8" \
|
||||
{{ end }}
|
||||
{{- end -}}
|
||||
{{- $dpPath := "" -}}
|
||||
{{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers -}}
|
||||
{{- if eq $kd.name $provider -}}
|
||||
{{- $dpPath = $kd.options.path -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \
|
||||
{{- if $value.url }}
|
||||
"{{ $value.url }}" \
|
||||
{{- else }}
|
||||
"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \
|
||||
{{- end -}}
|
||||
{{- if $value.datasource }}
|
||||
{{- if kindIs "string" $value.datasource }}
|
||||
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \
|
||||
{{- end -}}
|
||||
{{- if kindIs "slice" $value.datasource -}}
|
||||
{{- range $value.datasource }}
|
||||
| sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- if $value.b64content }}
|
||||
| base64 -d \
|
||||
{{- end }}
|
||||
> "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json"
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -3,6 +3,7 @@ apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}
|
||||
namespace: {{ template "grafana.namespace" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "grafana.name" . }}
|
||||
helm.sh/chart: {{ template "grafana.chart" . }}
|
||||
|
@ -128,7 +128,7 @@ extraLabels: {}
|
||||
|
||||
downloadDashboardsImage:
|
||||
repository: curlimages/curl
|
||||
tag: 7.73.0
|
||||
tag: 7.85.0
|
||||
sha: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
@ -522,66 +522,66 @@ datasources: {}
|
||||
## ref: http://docs.grafana.org/administration/provisioning/#alerting
|
||||
##
|
||||
alerting: {}
|
||||
# rules.yaml: |
|
||||
# apiVersion: 1
|
||||
# groups:
|
||||
# - orgId: 1
|
||||
# name: {{ .Chart.Name }}_my_rule_group
|
||||
# folder: my_first_folder
|
||||
# interval: 60s
|
||||
# rules:
|
||||
# - uid: my_id_1
|
||||
# title: my_first_rule
|
||||
# condition: A
|
||||
# data:
|
||||
# - refId: A
|
||||
# datasourceUid: '-100'
|
||||
# model:
|
||||
# conditions:
|
||||
# - evaluator:
|
||||
# params:
|
||||
# - 3
|
||||
# type: gt
|
||||
# operator:
|
||||
# type: and
|
||||
# query:
|
||||
# params:
|
||||
# - A
|
||||
# reducer:
|
||||
# type: last
|
||||
# type: query
|
||||
# datasource:
|
||||
# type: __expr__
|
||||
# uid: '-100'
|
||||
# expression: 1==0
|
||||
# intervalMs: 1000
|
||||
# maxDataPoints: 43200
|
||||
# refId: A
|
||||
# type: math
|
||||
# dashboardUid: my_dashboard
|
||||
# panelId: 123
|
||||
# noDataState: Alerting
|
||||
# for: 60s
|
||||
# annotations:
|
||||
# some_key: some_value
|
||||
# labels:
|
||||
# team: sre_team_1
|
||||
# contactpoints.yaml: |
|
||||
# apiVersion: 1
|
||||
# contactPoints:
|
||||
# - orgId: 1
|
||||
# name: cp_1
|
||||
# receivers:
|
||||
# - uid: first_uid
|
||||
# type: pagerduty
|
||||
# settings:
|
||||
# integrationKey: XXX
|
||||
# severity: critical
|
||||
# class: ping failure
|
||||
# component: Grafana
|
||||
# group: app-stack
|
||||
# summary: |
|
||||
# {{ `{{ template "default.message" . }}` }}
|
||||
# rules.yaml:
|
||||
# apiVersion: 1
|
||||
# groups:
|
||||
# - orgId: 1
|
||||
# name: '{{ .Chart.Name }}_my_rule_group'
|
||||
# folder: my_first_folder
|
||||
# interval: 60s
|
||||
# rules:
|
||||
# - uid: my_id_1
|
||||
# title: my_first_rule
|
||||
# condition: A
|
||||
# data:
|
||||
# - refId: A
|
||||
# datasourceUid: '-100'
|
||||
# model:
|
||||
# conditions:
|
||||
# - evaluator:
|
||||
# params:
|
||||
# - 3
|
||||
# type: gt
|
||||
# operator:
|
||||
# type: and
|
||||
# query:
|
||||
# params:
|
||||
# - A
|
||||
# reducer:
|
||||
# type: last
|
||||
# type: query
|
||||
# datasource:
|
||||
# type: __expr__
|
||||
# uid: '-100'
|
||||
# expression: 1==0
|
||||
# intervalMs: 1000
|
||||
# maxDataPoints: 43200
|
||||
# refId: A
|
||||
# type: math
|
||||
# dashboardUid: my_dashboard
|
||||
# panelId: 123
|
||||
# noDataState: Alerting
|
||||
# for: 60s
|
||||
# annotations:
|
||||
# some_key: some_value
|
||||
# labels:
|
||||
# team: sre_team_1
|
||||
# contactpoints.yaml:
|
||||
# apiVersion: 1
|
||||
# contactPoints:
|
||||
# - orgId: 1
|
||||
# name: cp_1
|
||||
# receivers:
|
||||
# - uid: first_uid
|
||||
# type: pagerduty
|
||||
# settings:
|
||||
# integrationKey: XXX
|
||||
# severity: critical
|
||||
# class: ping failure
|
||||
# component: Grafana
|
||||
# group: app-stack
|
||||
# summary: |
|
||||
# {{ `{{ template "default.message" . }}` }}
|
||||
|
||||
## Configure notifiers
|
||||
## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
|
||||
@ -678,7 +678,7 @@ grafana.ini:
|
||||
grafana_net:
|
||||
url: https://grafana.net
|
||||
server:
|
||||
domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ end }}"
|
||||
domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}"
|
||||
## grafana Authentication can be enabled with the following values on grafana.ini
|
||||
# server:
|
||||
# The full public facing url you use in browser, used for redirects and emails
|
||||
@ -758,6 +758,44 @@ sidecar:
|
||||
livenessProbe: {}
|
||||
# Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO
|
||||
# logLevel: INFO
|
||||
alerts:
|
||||
enabled: false
|
||||
# Additional environment variables for the alerts sidecar
|
||||
env: {}
|
||||
# Do not reprocess already processed unchanged resources on k8s API reconnect.
|
||||
# ignoreAlreadyProcessed: true
|
||||
# label that the configmaps with alert are marked with
|
||||
label: grafana_alert
|
||||
# value of label that the configmaps with alert are set to
|
||||
labelValue: ""
|
||||
# Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL.
|
||||
# logLevel: INFO
|
||||
# If specified, the sidecar will search for alert config-maps inside this namespace.
|
||||
# Otherwise the namespace in which the sidecar is running will be used.
|
||||
# It's also possible to specify ALL to search in all namespaces
|
||||
searchNamespace: null
|
||||
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
|
||||
watchMethod: WATCH
|
||||
# search in configmap, secret or both
|
||||
resource: both
|
||||
# watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
|
||||
# defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
|
||||
# watchServerTimeout: 3600
|
||||
#
|
||||
# watchClientTimeout: is a client-side timeout, configuring your local socket.
|
||||
# If you have a network outage dropping all packets with no RST/FIN,
|
||||
# this is how long your client waits before realizing & dropping the connection.
|
||||
# defaults to 66sec (sic!)
|
||||
# watchClientTimeout: 60
|
||||
#
|
||||
# Endpoint to send request to reload alerts
|
||||
reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload"
|
||||
# Absolute path to shell script to execute after a alert got reloaded
|
||||
script: null
|
||||
skipReload: false
|
||||
# Deploy the alert sidecar as an initContainer in addition to a container.
|
||||
# Sets the size limit of the alert sidecar emptyDir volume
|
||||
sizeLimit: {}
|
||||
dashboards:
|
||||
enabled: false
|
||||
# Additional environment variables for the dashboards sidecar
|
||||
@ -914,8 +952,28 @@ sidecar:
|
||||
# Otherwise the namespace in which the sidecar is running will be used.
|
||||
# It's also possible to specify ALL to search in all namespaces
|
||||
searchNamespace: null
|
||||
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
|
||||
watchMethod: WATCH
|
||||
# search in configmap, secret or both
|
||||
resource: both
|
||||
# watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
|
||||
# defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
|
||||
# watchServerTimeout: 3600
|
||||
#
|
||||
# watchClientTimeout: is a client-side timeout, configuring your local socket.
|
||||
# If you have a network outage dropping all packets with no RST/FIN,
|
||||
# this is how long your client waits before realizing & dropping the connection.
|
||||
# defaults to 66sec (sic!)
|
||||
# watchClientTimeout: 60
|
||||
#
|
||||
# Endpoint to send request to reload notifiers
|
||||
reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload"
|
||||
# Absolute path to shell script to execute after a notifier got reloaded
|
||||
script: null
|
||||
skipReload: false
|
||||
# Deploy the notifier sidecar as an initContainer in addition to a container.
|
||||
# This is needed if skipReload is true, to load any notifiers defined at startup time.
|
||||
initNotifiers: false
|
||||
# Sets the size limit of the notifier sidecar emptyDir volume
|
||||
sizeLimit: {}
|
||||
|
||||
|
@ -18,4 +18,4 @@ name: kube-state-metrics
|
||||
sources:
|
||||
- https://github.com/kubernetes/kube-state-metrics/
|
||||
type: application
|
||||
version: 4.18.0
|
||||
version: 4.20.2
|
||||
|
@ -74,10 +74,17 @@ spec:
|
||||
{{- if .Values.metricDenylist }}
|
||||
- --metric-denylist={{ .Values.metricDenylist | join "," }}
|
||||
{{- end }}
|
||||
{{- $namespaces := list }}
|
||||
{{- if .Values.namespaces }}
|
||||
{{- range $ns := join "," .Values.namespaces | split "," }}
|
||||
{{- $namespaces = append $namespaces (tpl $ns $) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.releaseNamespace }}
|
||||
- --namespaces={{ template "kube-state-metrics.namespace" . }}
|
||||
{{- else if .Values.namespaces }}
|
||||
- --namespaces={{ tpl (.Values.namespaces | join ",") $ }}
|
||||
{{- $namespaces = append $namespaces ( include "kube-state-metrics.namespace" . ) }}
|
||||
{{- end }}
|
||||
{{- if $namespaces }}
|
||||
- --namespaces={{ $namespaces | mustUniq | join "," }}
|
||||
{{- end }}
|
||||
{{- if .Values.namespacesDenylist }}
|
||||
- --namespaces-denylist={{ tpl (.Values.namespacesDenylist | join ",") $ }}
|
||||
|
@ -1,5 +1,5 @@
|
||||
{{- if and (eq .Values.rbac.create true) (not .Values.rbac.useExistingRole) -}}
|
||||
{{- range (ternary (split "," .Values.namespaces) (list "") (eq $.Values.rbac.useClusterRole false)) }}
|
||||
{{- range (ternary (join "," .Values.namespaces | split "," ) (list "") (eq $.Values.rbac.useClusterRole false)) }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- if eq $.Values.rbac.useClusterRole false }}
|
||||
|
@ -1,5 +1,5 @@
|
||||
{{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}}
|
||||
{{- range (split "," $.Values.namespaces) }}
|
||||
{{- range (join "," $.Values.namespaces) | split "," }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
|
@ -0,0 +1,34 @@
|
||||
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }}
|
||||
apiVersion: autoscaling.k8s.io/v1
|
||||
kind: VerticalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ template "kube-state-metrics.fullname" . }}
|
||||
namespace: {{ template "kube-state-metrics.namespace" . }}
|
||||
labels:
|
||||
{{- include "kube-state-metrics.labels" . | indent 4 }}
|
||||
spec:
|
||||
resourcePolicy:
|
||||
containerPolicies:
|
||||
- containerName: {{ template "kube-state-metrics.name" . }}
|
||||
{{- if .Values.verticalPodAutoscaler.controlledResources }}
|
||||
controlledResources: {{ .Values.verticalPodAutoscaler.controlledResources }}
|
||||
{{- end }}
|
||||
{{- if .Values.verticalPodAutoscaler.maxAllowed }}
|
||||
maxAllowed:
|
||||
{{ toYaml .Values.verticalPodAutoscaler.maxAllowed | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.verticalPodAutoscaler.minAllowed }}
|
||||
minAllowed:
|
||||
{{ toYaml .Values.verticalPodAutoscaler.minAllowed | nindent 8 }}
|
||||
{{- end }}
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ template "kube-state-metrics.fullname" . }}
|
||||
{{- if .Values.verticalPodAutoscaler.updatePolicy }}
|
||||
updatePolicy:
|
||||
{{- if .Values.verticalPodAutoscaler.updatePolicy.updateMode }}
|
||||
updateMode: {{ .Values.verticalPodAutoscaler.updatePolicy.updateMode }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -211,10 +211,10 @@ kubeconfig:
|
||||
secret:
|
||||
|
||||
# Enable only the release namespace for collecting resources. By default all namespaces are collected.
|
||||
# If releaseNamespace and namespaces are both set only releaseNamespace will be used.
|
||||
# If releaseNamespace and namespaces are both set a merged list will be collected.
|
||||
releaseNamespace: false
|
||||
|
||||
# Comma-separated list of namespaces to be enabled for collecting resources. By default all namespaces are collected.
|
||||
# Comma-separated list(string) or yaml list of namespaces to be enabled for collecting resources. By default all namespaces are collected.
|
||||
namespaces: ""
|
||||
|
||||
# Comma-separated list of namespaces not to be enabled. If namespaces and namespaces-denylist are both set,
|
||||
@ -251,6 +251,26 @@ selfMonitor:
|
||||
# telemetryPort: 8081
|
||||
# telemetryNodePort: 0
|
||||
|
||||
# Enable vertical pod autoscaler support for kube-state-metrics
|
||||
verticalPodAutoscaler:
|
||||
enabled: false
|
||||
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
|
||||
controlledResources: []
|
||||
|
||||
# Define the max allowed resources for the pod
|
||||
maxAllowed: {}
|
||||
# cpu: 200m
|
||||
# memory: 100Mi
|
||||
# Define the min allowed resources for the pod
|
||||
minAllowed: {}
|
||||
# cpu: 200m
|
||||
# memory: 100Mi
|
||||
|
||||
# updatePolicy:
|
||||
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
|
||||
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
|
||||
# updateMode: Auto
|
||||
|
||||
# volumeMounts are used to add custom volume mounts to deployment.
|
||||
# See example below
|
||||
volumeMounts: []
|
||||
|
@ -15,4 +15,4 @@ name: prometheus-node-exporter
|
||||
sources:
|
||||
- https://github.com/prometheus/node_exporter/
|
||||
type: application
|
||||
version: 4.2.0
|
||||
version: 4.3.0
|
||||
|
@ -1,5 +1,5 @@
|
||||
{{- if .Values.prometheus.monitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
apiVersion: {{ .Values.prometheus.monitor.apiVersion | default "monitoring.coreos.com/v1" }}
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "prometheus-node-exporter.fullname" . }}
|
||||
|
@ -51,6 +51,8 @@ prometheus:
|
||||
metricRelabelings: []
|
||||
interval: ""
|
||||
scrapeTimeout: 10s
|
||||
## prometheus.monitor.apiVersion ApiVersion for the serviceMonitor Resource(defaults to "monitoring.coreos.com/v1")
|
||||
apiVersion: ""
|
||||
|
||||
## Customize the updateStrategy if set
|
||||
updateStrategy:
|
||||
|
@ -1,4 +1,4 @@
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
@ -46,7 +46,7 @@ spec:
|
||||
properties:
|
||||
inhibitRules:
|
||||
description: List of inhibition rules. The rules will only apply to
|
||||
alerts matching the resource’s namespace.
|
||||
alerts matching the resource's namespace.
|
||||
items:
|
||||
description: InhibitRule defines an inhibition rule that allows
|
||||
to mute alerts when other alerts are already firing. See https://prometheus.io/docs/alerting/latest/configuration/#inhibit_rule
|
||||
@ -60,7 +60,7 @@ spec:
|
||||
sourceMatch:
|
||||
description: Matchers for which one or more alerts have to exist
|
||||
for the inhibition to take effect. The operator enforces that
|
||||
the alert matches the resource’s namespace.
|
||||
the alert matches the resource's namespace.
|
||||
items:
|
||||
description: Matcher defines how to match on alert's labels.
|
||||
properties:
|
||||
@ -93,7 +93,7 @@ spec:
|
||||
targetMatch:
|
||||
description: Matchers that have to be fulfilled in the alerts
|
||||
to be muted. The operator enforces that the alert matches
|
||||
the resource’s namespace.
|
||||
the resource's namespace.
|
||||
items:
|
||||
description: Matcher defines how to match on alert's labels.
|
||||
properties:
|
||||
@ -1779,7 +1779,7 @@ spec:
|
||||
type: string
|
||||
token:
|
||||
description: The secret's key that contains the registered
|
||||
application’s API token, see https://pushover.net/apps.
|
||||
application's API token, see https://pushover.net/apps.
|
||||
The secret needs to be in the same namespace as the
|
||||
AlertmanagerConfig object and accessible by the Prometheus
|
||||
Operator.
|
||||
@ -1809,7 +1809,7 @@ spec:
|
||||
type: string
|
||||
userKey:
|
||||
description: The secret's key that contains the recipient
|
||||
user’s user key. The secret needs to be in the same
|
||||
user's user key. The secret needs to be in the same
|
||||
namespace as the AlertmanagerConfig object and accessible
|
||||
by the Prometheus Operator.
|
||||
properties:
|
||||
@ -4380,7 +4380,7 @@ spec:
|
||||
type: array
|
||||
route:
|
||||
description: The Alertmanager route definition for alerts matching
|
||||
the resource’s namespace. If present, it will be added to the generated
|
||||
the resource's namespace. If present, it will be added to the generated
|
||||
Alertmanager configuration as a first-level route.
|
||||
properties:
|
||||
continue:
|
||||
@ -4406,7 +4406,7 @@ spec:
|
||||
Example: "30s"'
|
||||
type: string
|
||||
matchers:
|
||||
description: 'List of matchers that the alert’s labels should
|
||||
description: 'List of matchers that the alert''s labels should
|
||||
match. For the first level route, the operator removes any existing
|
||||
equality and regexp matcher on the `namespace` label and adds
|
||||
a `namespace: <object namespace>` matcher.'
|
||||
|
@ -1,4 +1,4 @@
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
@ -25,13 +25,18 @@ spec:
|
||||
jsonPath: .spec.version
|
||||
name: Version
|
||||
type: string
|
||||
- description: The desired replicas number of Alertmanagers
|
||||
- description: The number of desired replicas
|
||||
jsonPath: .spec.replicas
|
||||
name: Replicas
|
||||
type: integer
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
- description: Whether the resource reconciliation is paused or not
|
||||
jsonPath: .status.paused
|
||||
name: Paused
|
||||
priority: 1
|
||||
type: boolean
|
||||
name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
@ -1342,6 +1347,51 @@ spec:
|
||||
and inhibition rules.
|
||||
minLength: 1
|
||||
type: string
|
||||
templates:
|
||||
description: Custom notification templates.
|
||||
items:
|
||||
description: SecretOrConfigMap allows to specify data as a Secret
|
||||
or ConfigMap. Fields are mutually exclusive.
|
||||
properties:
|
||||
configMap:
|
||||
description: ConfigMap containing data to use for the targets.
|
||||
properties:
|
||||
key:
|
||||
description: The key to select.
|
||||
type: string
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?'
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the ConfigMap or its key
|
||||
must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
secret:
|
||||
description: Secret containing data to use for the targets.
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must
|
||||
be a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?'
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or its key must
|
||||
be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
baseImage:
|
||||
description: 'Base image that is used to deploy pods, without tag.
|
||||
@ -1367,14 +1417,17 @@ spec:
|
||||
configMaps:
|
||||
description: ConfigMaps is a list of ConfigMaps in the same namespace
|
||||
as the Alertmanager object, which shall be mounted into the Alertmanager
|
||||
Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/<configmap-name>.
|
||||
Pods. Each ConfigMap is added to the StatefulSet definition as a
|
||||
volume named `configmap-<configmap-name>`. The ConfigMaps are mounted
|
||||
into `/etc/alertmanager/configmaps/<configmap-name>` in the 'alertmanager'
|
||||
container.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
configSecret:
|
||||
description: "ConfigSecret is the name of a Kubernetes Secret in the
|
||||
same namespace as the Alertmanager object, which contains the configuration
|
||||
for this Alertmanager instance. If empty, it defaults to 'alertmanager-<alertmanager-name>'.
|
||||
for this Alertmanager instance. If empty, it defaults to `alertmanager-<alertmanager-name>`.
|
||||
\n The Alertmanager configuration should be available under the
|
||||
`alertmanager.yaml` key. Additional keys from the original secret
|
||||
are copied to the generated secret. \n If either the secret or the
|
||||
@ -4019,7 +4072,9 @@ spec:
|
||||
secrets:
|
||||
description: Secrets is a list of Secrets in the same namespace as
|
||||
the Alertmanager object, which shall be mounted into the Alertmanager
|
||||
Pods. The Secrets are mounted into /etc/alertmanager/secrets/<secret-name>.
|
||||
Pods. Each Secret is added to the StatefulSet definition as a volume
|
||||
named `secret-<secret-name>`. The Secrets are mounted into `/etc/alertmanager/secrets/<secret-name>`
|
||||
in the 'alertmanager' container.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
|
@ -1,4 +1,4 @@
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
@ -187,6 +187,10 @@ spec:
|
||||
enableHttp2:
|
||||
description: Whether to enable HTTP2.
|
||||
type: boolean
|
||||
filterRunning:
|
||||
description: 'Drop pods that are not running. (Failed, Succeeded).
|
||||
Enabled by default. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase'
|
||||
type: boolean
|
||||
followRedirects:
|
||||
description: FollowRedirects configures whether scrape requests
|
||||
follow HTTP 3xx redirects.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
|
@ -1,4 +1,4 @@
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
@ -26,13 +26,28 @@ spec:
|
||||
jsonPath: .spec.version
|
||||
name: Version
|
||||
type: string
|
||||
- description: The desired replicas number of Prometheuses
|
||||
- description: The number of desired replicas
|
||||
jsonPath: .spec.replicas
|
||||
name: Replicas
|
||||
name: Desired
|
||||
type: integer
|
||||
- description: The number of ready replicas
|
||||
jsonPath: .status.availableReplicas
|
||||
name: Ready
|
||||
type: integer
|
||||
- jsonPath: .status.conditions[?(@.type == 'Reconciled')].status
|
||||
name: Reconciled
|
||||
type: string
|
||||
- jsonPath: .status.conditions[?(@.type == 'Available')].status
|
||||
name: Available
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
- description: Whether the resource reconciliation is paused or not
|
||||
jsonPath: .status.paused
|
||||
name: Paused
|
||||
priority: 1
|
||||
type: boolean
|
||||
name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
@ -115,8 +130,8 @@ spec:
|
||||
the Prometheus container. It is intended for e.g. activating hidden
|
||||
flags which are not supported by the dedicated configuration options
|
||||
yet. The arguments are passed as-is to the Prometheus container
|
||||
which may cause issues if they are invalid or not supporeted by
|
||||
the given Prometheus version. In case of an argument conflict (e.g.
|
||||
which may cause issues if they are invalid or not supported by the
|
||||
given Prometheus version. In case of an argument conflict (e.g.
|
||||
an argument which is already set by the operator itself) or when
|
||||
providing an invalid argument the reconciliation will fail and an
|
||||
error will be logged.
|
||||
@ -1460,7 +1475,10 @@ spec:
|
||||
configMaps:
|
||||
description: ConfigMaps is a list of ConfigMaps in the same namespace
|
||||
as the Prometheus object, which shall be mounted into the Prometheus
|
||||
Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/<configmap-name>.
|
||||
Pods. Each ConfigMap is added to the StatefulSet definition as a
|
||||
volume named `configmap-<configmap-name>`. The ConfigMaps are mounted
|
||||
into /etc/prometheus/configmaps/<configmap-name> in the 'prometheus'
|
||||
container.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@ -2872,6 +2890,12 @@ spec:
|
||||
x-kubernetes-list-map-keys:
|
||||
- ip
|
||||
x-kubernetes-list-type: map
|
||||
hostNetwork:
|
||||
description: Use the host's network namespace if true. Make sure to
|
||||
understand the security implications if you want to enable it. When
|
||||
hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet
|
||||
automatically.
|
||||
type: boolean
|
||||
ignoreNamespaceSelectors:
|
||||
description: IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector
|
||||
settings from all PodMonitor, ServiceMonitor and Probe objects.
|
||||
@ -5515,7 +5539,9 @@ spec:
|
||||
secrets:
|
||||
description: Secrets is a list of Secrets in the same namespace as
|
||||
the Prometheus object, which shall be mounted into the Prometheus
|
||||
Pods. The Secrets are mounted into /etc/prometheus/secrets/<secret-name>.
|
||||
Pods. Each Secret is added to the StatefulSet definition as a volume
|
||||
named `secret-<secret-name>`. The Secrets are mounted into /etc/prometheus/secrets/<secret-name>
|
||||
in the 'prometheus' container.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
@ -6376,7 +6402,7 @@ spec:
|
||||
description: AdditionalArgs allows setting additional arguments
|
||||
for the Thanos container. The arguments are passed as-is to
|
||||
the Thanos container which may cause issues if they are invalid
|
||||
or not supporeted the given Thanos version. In case of an argument
|
||||
or not supported the given Thanos version. In case of an argument
|
||||
conflict (e.g. an argument which is already set by the operator
|
||||
itself) or when providing an invalid argument the reconciliation
|
||||
will fail and an error will be logged.
|
||||
@ -6399,9 +6425,14 @@ spec:
|
||||
description: 'Thanos base image if other than default. Deprecated:
|
||||
use ''image'' instead'
|
||||
type: string
|
||||
grpcListenLocal:
|
||||
description: If true, the Thanos sidecar listens on the loopback
|
||||
interface for the gRPC endpoints. It has no effect if `listenLocal`
|
||||
is true.
|
||||
type: boolean
|
||||
grpcServerTlsConfig:
|
||||
description: 'GRPCServerTLSConfig configures the gRPC server from
|
||||
which Thanos Querier reads recorded rule data. Note: Currently
|
||||
description: 'GRPCServerTLSConfig configures the TLS parameters
|
||||
for the gRPC server providing the StoreAPI. Note: Currently
|
||||
only the CAFile, CertFile, and KeyFile fields are supported.
|
||||
Maps to the ''--grpc-server-tls-*'' CLI args.'
|
||||
properties:
|
||||
@ -6534,6 +6565,11 @@ spec:
|
||||
description: Used to verify the hostname for the targets.
|
||||
type: string
|
||||
type: object
|
||||
httpListenLocal:
|
||||
description: If true, the Thanos sidecar listens on the loopback
|
||||
interface for the HTTP endpoints. It has no effect if `listenLocal`
|
||||
is true.
|
||||
type: boolean
|
||||
image:
|
||||
description: Image if specified has precedence over baseImage,
|
||||
tag and sha combinations. Specifying the version is still necessary
|
||||
@ -6541,8 +6577,10 @@ spec:
|
||||
is being configured.
|
||||
type: string
|
||||
listenLocal:
|
||||
description: ListenLocal makes the Thanos sidecar listen on loopback,
|
||||
so that it does not bind against the Pod IP.
|
||||
description: 'If true, the Thanos sidecar listens on the loopback
|
||||
interface for the HTTP and gRPC endpoints. It takes precedence
|
||||
over `grpcListenLocal` and `httpListenLocal`. Deprecated: use
|
||||
`grpcListenLocal` and `httpListenLocal` instead.'
|
||||
type: boolean
|
||||
logFormat:
|
||||
description: LogFormat for Thanos sidecar to be configured with.
|
||||
@ -6919,6 +6957,20 @@ spec:
|
||||
- whenUnsatisfiable
|
||||
type: object
|
||||
type: array
|
||||
tsdb:
|
||||
description: Defines the runtime reloadable configuration of the timeseries
|
||||
database (TSDB).
|
||||
properties:
|
||||
outOfOrderTimeWindow:
|
||||
description: Configures how old an out-of-order/out-of-bounds
|
||||
sample can be w.r.t. the TSDB max time. An out-of-order/out-of-bounds
|
||||
sample is ingested into the TSDB as long as the timestamp of
|
||||
the sample is >= (TSDB.MaxTime - outOfOrderTimeWindow). Out
|
||||
of order ingestion is an experimental feature and requires Prometheus
|
||||
>= v2.39.0.
|
||||
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
|
||||
type: string
|
||||
type: object
|
||||
version:
|
||||
description: Version of Prometheus to be deployed.
|
||||
type: string
|
||||
@ -8711,6 +8763,14 @@ spec:
|
||||
description: Human-readable message indicating details for the
|
||||
condition's last transition.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration represents the .metadata.generation
|
||||
that the condition was set based upon. For instance, if .metadata.generation
|
||||
is currently 12, but the .status.conditions[x].observedGeneration
|
||||
is 9, the condition is out of date with respect to the current
|
||||
state of the instance.
|
||||
format: int64
|
||||
type: integer
|
||||
reason:
|
||||
description: Reason for the condition's last transition.
|
||||
type: string
|
||||
|
@ -1,4 +1,4 @@
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
|
@ -1,4 +1,4 @@
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
|
@ -1,4 +1,4 @@
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
@ -21,13 +21,18 @@ spec:
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: The desired replicas number of Thanos Rulers
|
||||
- description: The number of desired replicas
|
||||
jsonPath: .spec.replicas
|
||||
name: Replicas
|
||||
type: integer
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
- description: Whether the resource reconciliation is paused or not
|
||||
jsonPath: .status.paused
|
||||
name: Paused
|
||||
priority: 1
|
||||
type: boolean
|
||||
name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
|
@ -89,7 +89,7 @@ spec:
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.storage }}
|
||||
storage:
|
||||
{{ toYaml .Values.alertmanager.alertmanagerSpec.storage | indent 4 }}
|
||||
{{ tpl (toYaml .Values.alertmanager.alertmanagerSpec.storage | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.podMetadata }}
|
||||
podMetadata:
|
||||
@ -162,4 +162,7 @@ spec:
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.forceEnableClusterMode }}
|
||||
forceEnableClusterMode: {{ .Values.alertmanager.alertmanagerSpec.forceEnableClusterMode }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.alertmanagerSpec.minReadySeconds }}
|
||||
minReadySeconds: {{ .Values.alertmanager.alertmanagerSpec.minReadySeconds }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -1,4 +1,5 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
@ -19,3 +20,4 @@ rules:
|
||||
resourceNames:
|
||||
- {{ template "kube-prometheus-stack.fullname" . }}-alertmanager
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -1,4 +1,5 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
@ -16,3 +17,4 @@ subjects:
|
||||
name: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -1,4 +1,5 @@
|
||||
{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
@ -43,4 +44,4 @@ spec:
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: false
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
||||
|
@ -27,6 +27,9 @@ spec:
|
||||
{{- if .Values.alertmanager.serviceMonitor.scheme }}
|
||||
scheme: {{ .Values.alertmanager.serviceMonitor.scheme }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.enableHttp2 }}
|
||||
enableHttp2: {{ .Values.alertmanager.serviceMonitor.enableHttp2 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alertmanager.serviceMonitor.bearerTokenFile }}
|
||||
bearerTokenFile: {{ .Values.alertmanager.serviceMonitor.bearerTokenFile }}
|
||||
{{- end }}
|
||||
|
@ -7,6 +7,9 @@ metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
{{- with .Values.prometheusOperator.admissionWebhooks.annotations }}
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" $ }}-admission-create
|
||||
{{- include "kube-prometheus-stack.labels" $ | indent 4 }}
|
||||
|
@ -7,6 +7,9 @@ metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": post-install,post-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
{{- with .Values.prometheusOperator.admissionWebhooks.patch.annotations }}
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" $ }}-admission-patch
|
||||
{{- include "kube-prometheus-stack.labels" $ | indent 4 }}
|
||||
@ -20,7 +23,7 @@ spec:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-admission-patch
|
||||
{{- with .Values.prometheusOperator.admissionWebhooks.patch.podAnnotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" $ }}-admission-patch
|
||||
|
@ -9,6 +9,9 @@ metadata:
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-operator
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
{{- if .Values.prometheusOperator.labels }}
|
||||
{{ toYaml .Values.prometheusOperator.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheusOperator.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.prometheusOperator.annotations | indent 4 }}
|
||||
@ -54,14 +57,19 @@ spec:
|
||||
- --log-level={{ .Values.prometheusOperator.logLevel }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheusOperator.denyNamespaces }}
|
||||
- --deny-namespaces={{ .Values.prometheusOperator.denyNamespaces | join "," }}
|
||||
- --deny-namespaces={{ tpl (.Values.prometheusOperator.denyNamespaces | join ",") $ }}
|
||||
{{- end }}
|
||||
{{- with $.Values.prometheusOperator.namespaces }}
|
||||
{{ $ns := default (list nil) .additional }}
|
||||
{{- $namespaces := list }}
|
||||
{{- if .releaseNamespace }}
|
||||
{{- $ns = append $ns $namespace }}
|
||||
{{- $namespaces = append $namespaces $namespace }}
|
||||
{{- end }}
|
||||
- --namespaces={{ $ns | join "," }}
|
||||
{{- if .additional }}
|
||||
{{- range $ns := .additional }}
|
||||
{{- $namespaces = append $namespaces (tpl $ns $) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- --namespaces={{ $namespaces | mustUniq | join "," }}
|
||||
{{- end }}
|
||||
- --localhost=127.0.0.1
|
||||
{{- if .Values.prometheusOperator.prometheusDefaultBaseImage }}
|
||||
|
@ -1,4 +1,5 @@
|
||||
{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
@ -18,3 +19,4 @@ rules:
|
||||
resourceNames:
|
||||
- {{ template "kube-prometheus-stack.fullname" . }}-operator
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -1,4 +1,5 @@
|
||||
{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
@ -15,3 +16,4 @@ subjects:
|
||||
name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -1,4 +1,5 @@
|
||||
{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
@ -43,3 +44,4 @@ spec:
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: false
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -0,0 +1,35 @@
|
||||
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.prometheusOperator.verticalPodAutoscaler.enabled) }}
|
||||
apiVersion: autoscaling.k8s.io/v1
|
||||
kind: VerticalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}-operator
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
labels:
|
||||
app: {{ template "kube-prometheus-stack.name" . }}-operator
|
||||
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
|
||||
spec:
|
||||
resourcePolicy:
|
||||
containerPolicies:
|
||||
- containerName: {{ template "kube-prometheus-stack.name" . }}
|
||||
{{- if .Values.prometheusOperator.verticalPodAutoscaler.controlledResources }}
|
||||
controlledResources: {{ .Values.prometheusOperator.verticalPodAutoscaler.controlledResources }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheusOperator.verticalPodAutoscaler.maxAllowed }}
|
||||
maxAllowed:
|
||||
{{ toYaml .Values.prometheusOperator.verticalPodAutoscaler.maxAllowed | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheusOperator.verticalPodAutoscaler.minAllowed }}
|
||||
minAllowed:
|
||||
{{ toYaml .Values.prometheusOperator.verticalPodAutoscaler.minAllowed | nindent 8 }}
|
||||
{{- end }}
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ template "kube-prometheus-stack.fullname" . }}
|
||||
{{- if .Values.prometheusOperator.verticalPodAutoscaler.updatePolicy }}
|
||||
updatePolicy:
|
||||
{{- if .Values.prometheusOperator.verticalPodAutoscaler.updatePolicy.updateMode }}
|
||||
updateMode: {{ .Values.prometheusOperator.verticalPodAutoscaler.updatePolicy.updateMode }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -46,6 +46,10 @@ spec:
|
||||
sha: {{ .Values.prometheus.prometheusSpec.image.sha }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.prometheusSpec.additionalArgs }}
|
||||
additionalArgs:
|
||||
{{ toYaml .Values.prometheus.prometheusSpec.additionalArgs | indent 4}}
|
||||
{{- end -}}
|
||||
{{- if .Values.prometheus.prometheusSpec.externalLabels }}
|
||||
externalLabels:
|
||||
{{ tpl (toYaml .Values.prometheus.prometheusSpec.externalLabels | indent 4) . }}
|
||||
@ -217,7 +221,7 @@ spec:
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.prometheusSpec.storageSpec }}
|
||||
storage:
|
||||
{{ toYaml .Values.prometheus.prometheusSpec.storageSpec | indent 4 }}
|
||||
{{ tpl (toYaml .Values.prometheus.prometheusSpec.storageSpec | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.prometheusSpec.podMetadata }}
|
||||
podMetadata:
|
||||
@ -239,7 +243,7 @@ spec:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- {key: app.kubernetes.io/name, operator: In, values: [prometheus]}
|
||||
- {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-prometheus]}
|
||||
- {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.prometheus.crname" . }}]}
|
||||
{{- else if eq .Values.prometheus.prometheusSpec.podAntiAffinity "soft" }}
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
@ -249,7 +253,7 @@ spec:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- {key: app.kubernetes.io/name, operator: In, values: [prometheus]}
|
||||
- {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-prometheus]}
|
||||
- {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.prometheus.crname" . }}]}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.prometheusSpec.tolerations }}
|
||||
@ -380,4 +384,7 @@ spec:
|
||||
{{- if .Values.prometheus.prometheusSpec.allowOverlappingBlocks }}
|
||||
allowOverlappingBlocks: {{ .Values.prometheus.prometheusSpec.allowOverlappingBlocks }}
|
||||
{{- end }}
|
||||
{{- if .Values.prometheus.prometheusSpec.minReadySeconds }}
|
||||
minReadySeconds: {{ .Values.prometheus.prometheusSpec.minReadySeconds }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -1,4 +1,5 @@
|
||||
{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
@ -18,3 +19,4 @@ rules:
|
||||
resourceNames:
|
||||
- {{ template "kube-prometheus-stack.fullname" . }}-prometheus
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -1,4 +1,5 @@
|
||||
{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
@ -15,4 +16,4 @@ subjects:
|
||||
name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }}
|
||||
namespace: {{ template "kube-prometheus-stack.namespace" . }}
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
||||
|
@ -1,4 +1,5 @@
|
||||
{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
|
||||
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
@ -54,3 +55,4 @@ spec:
|
||||
{{ toYaml .Values.prometheus.podSecurityPolicy.allowedHostPaths | indent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -78,6 +78,10 @@ spec:
|
||||
queryEndpoints:
|
||||
{{ toYaml .Values.thanosRuler.thanosRulerSpec.queryEndpoints | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.thanosRuler.thanosRulerSpec.queryConfig }}
|
||||
queryConfig:
|
||||
{{ toYaml .Values.thanosRuler.thanosRulerSpec.queryConfig | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.thanosRuler.thanosRulerSpec.resources }}
|
||||
resources:
|
||||
{{ toYaml .Values.thanosRuler.thanosRulerSpec.resources | indent 4 }}
|
||||
|
@ -51,7 +51,8 @@ defaultRules:
|
||||
kubernetesResources: true
|
||||
kubernetesStorage: true
|
||||
kubernetesSystem: true
|
||||
kubeScheduler: true
|
||||
kubeSchedulerAlerting: true
|
||||
kubeSchedulerRecording: true
|
||||
kubeStateMetrics: true
|
||||
network: true
|
||||
node: true
|
||||
@ -415,6 +416,10 @@ alertmanager:
|
||||
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
|
||||
scheme: ""
|
||||
|
||||
## enableHttp2: Whether to enable HTTP2.
|
||||
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint
|
||||
enableHttp2: false
|
||||
|
||||
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
|
||||
## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
|
||||
tlsConfig: {}
|
||||
@ -686,6 +691,10 @@ alertmanager:
|
||||
## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each.
|
||||
forceEnableClusterMode: false
|
||||
|
||||
## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
|
||||
## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
|
||||
minReadySeconds: 0
|
||||
|
||||
## ExtraSecret can be used to store various data in an extra secret
|
||||
## (use it for example to store hashed basic auth credentials)
|
||||
extraSecret:
|
||||
@ -1285,8 +1294,8 @@ kubeEtcd:
|
||||
##
|
||||
service:
|
||||
enabled: true
|
||||
port: 2379
|
||||
targetPort: 2379
|
||||
port: 2381
|
||||
targetPort: 2381
|
||||
# selector:
|
||||
# component: etcd
|
||||
|
||||
@ -1607,6 +1616,9 @@ prometheusOperator:
|
||||
## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own
|
||||
## certs ahead of time if you wish.
|
||||
##
|
||||
annotations: {}
|
||||
# argocd.argoproj.io/hook: PreSync
|
||||
# argocd.argoproj.io/hook-delete-policy: HookSucceeded
|
||||
patch:
|
||||
enabled: true
|
||||
image:
|
||||
@ -1618,6 +1630,9 @@ prometheusOperator:
|
||||
## Provide a priority class name to the webhook patching job
|
||||
##
|
||||
priorityClassName: ""
|
||||
annotations: {}
|
||||
# argocd.argoproj.io/hook: PreSync
|
||||
# argocd.argoproj.io/hook-delete-policy: HookSucceeded
|
||||
podAnnotations: {}
|
||||
nodeSelector: {}
|
||||
affinity: {}
|
||||
@ -1723,6 +1738,10 @@ prometheusOperator:
|
||||
##
|
||||
externalIPs: []
|
||||
|
||||
# ## Labels to add to the operator deployment
|
||||
# ##
|
||||
labels: {}
|
||||
|
||||
## Annotations to add to the operator deployment
|
||||
##
|
||||
annotations: {}
|
||||
@ -1846,11 +1865,31 @@ prometheusOperator:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
|
||||
# Enable vertical pod autoscaler support for prometheus-operator
|
||||
verticalPodAutoscaler:
|
||||
enabled: false
|
||||
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
|
||||
controlledResources: []
|
||||
|
||||
# Define the max allowed resources for the pod
|
||||
maxAllowed: {}
|
||||
# cpu: 200m
|
||||
# memory: 100Mi
|
||||
# Define the min allowed resources for the pod
|
||||
minAllowed: {}
|
||||
# cpu: 200m
|
||||
# memory: 100Mi
|
||||
|
||||
updatePolicy:
|
||||
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
|
||||
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
|
||||
updateMode: Auto
|
||||
|
||||
## Prometheus-operator image
|
||||
##
|
||||
image:
|
||||
repository: quay.io/prometheus-operator/prometheus-operator
|
||||
tag: v0.59.1
|
||||
tag: v0.60.1
|
||||
sha: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
@ -1867,7 +1906,7 @@ prometheusOperator:
|
||||
prometheusConfigReloader:
|
||||
image:
|
||||
repository: quay.io/prometheus-operator/prometheus-config-reloader
|
||||
tag: v0.59.1
|
||||
tag: v0.60.1
|
||||
sha: ""
|
||||
|
||||
# resource config for prometheusConfigReloader
|
||||
@ -1883,7 +1922,7 @@ prometheusOperator:
|
||||
##
|
||||
thanosImage:
|
||||
repository: quay.io/thanos/thanos
|
||||
tag: v0.28.0
|
||||
tag: v0.28.1
|
||||
sha: ""
|
||||
|
||||
## Set a Field Selector to filter watched secrets
|
||||
@ -2271,6 +2310,10 @@ prometheus:
|
||||
##
|
||||
apiserverConfig: {}
|
||||
|
||||
## Allows setting additional arguments for the Prometheus container
|
||||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Prometheus
|
||||
additionalArgs: []
|
||||
|
||||
## Interval between consecutive scrapes.
|
||||
## Defaults to 30s.
|
||||
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
|
||||
@ -2316,7 +2359,7 @@ prometheus:
|
||||
##
|
||||
image:
|
||||
repository: quay.io/prometheus/prometheus
|
||||
tag: v2.38.0
|
||||
tag: v2.39.1
|
||||
sha: ""
|
||||
|
||||
## Tolerations for use with node taints
|
||||
@ -2856,6 +2899,10 @@ prometheus:
|
||||
## in Prometheus so it may change in any upcoming release.
|
||||
allowOverlappingBlocks: false
|
||||
|
||||
## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
|
||||
## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
|
||||
minReadySeconds: 0
|
||||
|
||||
additionalRulesForClusterRole: []
|
||||
# - apiGroups: [ "" ]
|
||||
# resources:
|
||||
@ -3147,7 +3194,7 @@ thanosRuler:
|
||||
##
|
||||
image:
|
||||
repository: quay.io/thanos/thanos
|
||||
tag: v0.28.0
|
||||
tag: v0.28.1
|
||||
sha: ""
|
||||
|
||||
## Namespaces to be selected for PrometheusRules discovery.
|
||||
@ -3249,6 +3296,14 @@ thanosRuler:
|
||||
## When used alongside with ObjectStorageConfig, ObjectStorageConfigFile takes precedence.
|
||||
objectStorageConfigFile: ""
|
||||
|
||||
## QueryEndpoints defines Thanos querier endpoints from which to query metrics.
|
||||
## Maps to the --query flag of thanos ruler.
|
||||
queryEndpoints: []
|
||||
|
||||
## Define configuration for connecting to thanos query instances. If this is defined, the queryEndpoints field will be ignored.
|
||||
## Maps to the query.config CLI argument. Only available with thanos v0.11.0 and higher.
|
||||
queryConfig: {}
|
||||
|
||||
## Labels configure the external label pairs to ThanosRuler. A default replica
|
||||
## label `thanos_ruler_replica` will be always added as a label with the value
|
||||
## of the pod's name and it will be dropped in the alerts.
|
||||
|
@ -13,4 +13,4 @@ maintainers:
|
||||
name: prometheus-pushgateway
|
||||
sources:
|
||||
- https://github.com/prometheus/pushgateway
|
||||
version: 1.18.2
|
||||
version: 1.20.0
|
||||
|
@ -14,6 +14,6 @@
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus-pushgateway.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
echo "Visit http://127.0.0.1:9091 to use your application"
|
||||
kubectl port-forward $POD_NAME 9091
|
||||
{{- end }}
|
||||
|
@ -77,6 +77,10 @@ Returns pod spec
|
||||
imagePullSecrets:
|
||||
{{ toYaml .Values.imagePullSecrets | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.extraInitContainers }}
|
||||
initContainers:
|
||||
{{ toYaml .Values.extraInitContainers | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
{{- if .Values.extraContainers }}
|
||||
{{ toYaml .Values.extraContainers | indent 8 }}
|
||||
@ -128,6 +132,10 @@ Returns pod spec
|
||||
{{- if .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{ toYaml .Values.topologySpreadConstraints | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
|
@ -56,6 +56,10 @@ extraVars: []
|
||||
## - --persistence.interval=5m
|
||||
extraArgs: []
|
||||
|
||||
## Additional InitContainers to initialize the pod
|
||||
##
|
||||
extraInitContainers: []
|
||||
|
||||
# Optional additional containers (sidecar)
|
||||
extraContainers: []
|
||||
# - name: oAuth2-proxy
|
||||
@ -184,6 +188,10 @@ containerSecurityContext: {}
|
||||
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
affinity: {}
|
||||
|
||||
## Topology spread constraints for pods
|
||||
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
topologySpreadConstraints: []
|
||||
|
||||
# Enable this if you're using https://github.com/coreos/prometheus-operator
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
@ -282,10 +290,10 @@ persistentVolume:
|
||||
##
|
||||
subPath: ""
|
||||
|
||||
extraVolumes: {}
|
||||
extraVolumes: []
|
||||
# - name: extra
|
||||
# emptyDir: {}
|
||||
extraVolumeMounts: {}
|
||||
extraVolumeMounts: []
|
||||
# - name: extra
|
||||
# mountPath: /usr/share/extras
|
||||
# readOnly: true
|
||||
|
@ -18,7 +18,7 @@
|
||||
"subdir": "contrib/mixin"
|
||||
}
|
||||
},
|
||||
"version": "19002cfc689fba2b8f56605e5797bf79f8b61fdd",
|
||||
"version": "62169d12ebf61cc58e8d781eb201d9416487e4a0",
|
||||
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
|
||||
},
|
||||
{
|
||||
@ -48,7 +48,7 @@
|
||||
"subdir": "grafana-builder"
|
||||
}
|
||||
},
|
||||
"version": "dbf6fc14105c28b6fd0253005f7ca2da37d3d4e1",
|
||||
"version": "d73aff453c9784cd6922119f3ce33d8d355a79e1",
|
||||
"sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0="
|
||||
},
|
||||
{
|
||||
@ -68,7 +68,7 @@
|
||||
"subdir": "lib/promgrafonnet"
|
||||
}
|
||||
},
|
||||
"version": "5e44626d70c2bf2d35c37f3fee5a6261a5335cc6",
|
||||
"version": "05a58f765eda05902d4f7dd22098a2b870f7ca1e",
|
||||
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
|
||||
},
|
||||
{
|
||||
|
@ -1,9 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
VERSION=$(yq eval '.dependencies[] | select(.name=="kube-prometheus-stack") | .version' Chart.yaml)
|
||||
PG_VER=$(yq eval '.dependencies[] | select(.name=="prometheus-pushgateway") | .version' Chart.yaml)
|
||||
|
||||
helm repo update
|
||||
|
||||
VERSION=$(yq eval '.dependencies[] | select(.name=="kube-prometheus-stack") | .version' Chart.yaml)
|
||||
rm -rf charts/kube-prometheus-stack
|
||||
helm pull prometheus-community/kube-prometheus-stack --untar --untardir charts --version $VERSION
|
||||
|
||||
|
@ -52,6 +52,8 @@ kube-prometheus-stack:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
resources:
|
||||
@ -66,6 +68,8 @@ kube-prometheus-stack:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
@ -111,7 +115,6 @@ kube-prometheus-stack:
|
||||
memory: 4Gi
|
||||
# cpu: "1000m"
|
||||
|
||||
walCompression: true
|
||||
storageSpec:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
@ -120,6 +123,27 @@ kube-prometheus-stack:
|
||||
resources:
|
||||
requests:
|
||||
storage: 16Gi
|
||||
#volumes:
|
||||
# - name: aws-token
|
||||
# projected:
|
||||
# sources:
|
||||
# - serviceAccountToken:
|
||||
# path: token
|
||||
# expirationSeconds: 86400
|
||||
# audience: "sts.amazonaws.com"
|
||||
#volumeMounts:
|
||||
# - name: aws-token
|
||||
# mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
|
||||
# readOnly: true
|
||||
#containers:
|
||||
# - name: prometheus
|
||||
# env:
|
||||
# - name: AWS_ROLE_ARN
|
||||
# value: "<prometheus IAM ROLE ARN>"
|
||||
# - name: AWS_WEB_IDENTITY_TOKEN_FILE
|
||||
# value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
||||
# - name: AWS_STS_REGIONAL_ENDPOINTS
|
||||
# value: regional
|
||||
|
||||
# Custom Grafana tweaks
|
||||
grafana:
|
||||
@ -188,6 +212,8 @@ kube-prometheus-stack:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
@ -301,6 +327,8 @@ prometheus-adapter:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
# Basic rules for HPA to work replacing heaptster, taken from kube-prometheus project
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-network
|
||||
description: KubeZero umbrella chart for all things network
|
||||
type: application
|
||||
version: 0.3.4
|
||||
version: 0.4.0
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -29,4 +29,4 @@ dependencies:
|
||||
- name: calico
|
||||
version: 0.2.2
|
||||
condition: calico.enabled
|
||||
kubeVersion: ">= 1.20.0"
|
||||
kubeVersion: ">= 1.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-network
|
||||
|
||||
![Version: 0.3.4](https://img.shields.io/badge/Version-0.3.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for all things network
|
||||
|
||||
@ -14,7 +14,7 @@ KubeZero umbrella chart for all things network
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
Kubernetes: `>= 1.24.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
@ -47,6 +47,8 @@ Kubernetes: `>= 1.20.0`
|
||||
| cilium.operator.replicas | int | `1` | |
|
||||
| cilium.operator.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cilium.operator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cilium.policyEnforcementMode | string | `"never"` | |
|
||||
| cilium.prometheus.enabled | bool | `false` | |
|
||||
| cilium.prometheus.port | int | `9091` | |
|
||||
@ -55,6 +57,8 @@ Kubernetes: `>= 1.20.0`
|
||||
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| metallb.controller.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| metallb.controller.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| metallb.enabled | bool | `false` | |
|
||||
| metallb.ipAddressPools | list | `[]` | |
|
||||
| multus.clusterNetwork | string | `"calico"` | |
|
||||
|
@ -605,6 +605,8 @@ spec:
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
serviceAccountName: calico-kube-controllers
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
|
@ -24,6 +24,7 @@ spec:
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
serviceAccountName: multus
|
||||
priorityClassName: system-node-critical
|
||||
containers:
|
||||
- name: kube-multus
|
||||
image: ghcr.io/k8snetworkplumbingwg/multus-cni:{{ .Values.multus.tag }}
|
||||
|
@ -5,6 +5,8 @@ metallb:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
@ -84,6 +86,8 @@ cilium:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-sql
|
||||
|
||||
![Version: 0.2.0](https://img.shields.io/badge/Version-0.2.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.2.1](https://img.shields.io/badge/Version-0.2.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for SQL databases like MariaDB, PostgreSQL
|
||||
|
||||
@ -19,7 +19,7 @@ Kubernetes: `>= 1.20.0`
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
||||
| https://charts.bitnami.com/bitnami | mariadb-galera | 7.4.2 |
|
||||
| https://charts.bitnami.com/bitnami | mariadb-galera | 7.4.3 |
|
||||
|
||||
## Values
|
||||
|
||||
@ -28,7 +28,7 @@ Kubernetes: `>= 1.20.0`
|
||||
| mariadb-galera.configurationConfigMap | string | `"{{ .Release.Name }}-mariadb-galera-configuration"` | |
|
||||
| mariadb-galera.db.password | string | `"12345qwert"` | |
|
||||
| mariadb-galera.db.user | string | `"mariadb"` | |
|
||||
| mariadb-galera.enabled | bool | `true` | |
|
||||
| mariadb-galera.enabled | bool | `false` | |
|
||||
| mariadb-galera.galera.mariabackup.password | string | `"12345qwert"` | |
|
||||
| mariadb-galera.istio.enabled | bool | `false` | |
|
||||
| mariadb-galera.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-storage
|
||||
description: KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini
|
||||
type: application
|
||||
version: 0.7.2
|
||||
version: 0.7.3
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -35,4 +35,4 @@ dependencies:
|
||||
version: 2.2.9
|
||||
condition: aws-efs-csi-driver.enabled
|
||||
# repository: https://kubernetes-sigs.github.io/aws-ebs-csi-driver
|
||||
kubeVersion: ">= 1.20.0"
|
||||
kubeVersion: ">= 1.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-storage
|
||||
|
||||
![Version: 0.7.1](https://img.shields.io/badge/Version-0.7.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.7.3](https://img.shields.io/badge/Version-0.7.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini
|
||||
|
||||
@ -14,12 +14,12 @@ KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, g
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
Kubernetes: `>= 1.24.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| | aws-ebs-csi-driver | 2.11.0 |
|
||||
| | aws-efs-csi-driver | 2.2.7 |
|
||||
| | aws-ebs-csi-driver | 2.12.0 |
|
||||
| | aws-efs-csi-driver | 2.2.9 |
|
||||
| | gemini | 1.0.0 |
|
||||
| | lvm-localpv | 1.0.0 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
||||
@ -37,13 +37,22 @@ Kubernetes: `>= 1.20.0`
|
||||
| aws-ebs-csi-driver.controller.resources.requests.memory | string | `"24Mi"` | |
|
||||
| aws-ebs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| aws-ebs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| aws-ebs-csi-driver.controller.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| aws-ebs-csi-driver.controller.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| aws-ebs-csi-driver.enabled | bool | `false` | |
|
||||
| aws-ebs-csi-driver.node.resources.limits.memory | string | `"32Mi"` | |
|
||||
| aws-ebs-csi-driver.node.resources.requests.cpu | string | `"10m"` | |
|
||||
| aws-ebs-csi-driver.node.resources.requests.memory | string | `"16Mi"` | |
|
||||
| aws-ebs-csi-driver.node.tolerateAllTaints | bool | `false` | |
|
||||
| aws-ebs-csi-driver.node.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| aws-ebs-csi-driver.node.tolerations[0].key | string | `"kubezero-workergroup"` | |
|
||||
| aws-ebs-csi-driver.node.tolerations[0].operator | string | `"Exists"` | |
|
||||
| aws-ebs-csi-driver.node.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| aws-ebs-csi-driver.node.tolerations[1].key | string | `"nvidia.com/gpu"` | |
|
||||
| aws-ebs-csi-driver.node.tolerations[1].operator | string | `"Exists"` | |
|
||||
| aws-ebs-csi-driver.node.tolerations[2].effect | string | `"NoSchedule"` | |
|
||||
| aws-ebs-csi-driver.node.tolerations[2].key | string | `"aws.amazon.com/neuron"` | |
|
||||
| aws-ebs-csi-driver.node.tolerations[2].operator | string | `"Exists"` | |
|
||||
| aws-ebs-csi-driver.storageClasses[0].allowVolumeExpansion | bool | `true` | |
|
||||
| aws-ebs-csi-driver.storageClasses[0].name | string | `"ebs-sc-gp2-xfs"` | |
|
||||
| aws-ebs-csi-driver.storageClasses[0].parameters."csi.storage.k8s.io/fstype" | string | `"xfs"` | |
|
||||
@ -62,6 +71,8 @@ Kubernetes: `>= 1.20.0`
|
||||
| aws-efs-csi-driver.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| aws-efs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| aws-efs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| aws-efs-csi-driver.controller.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| aws-efs-csi-driver.controller.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| aws-efs-csi-driver.enabled | bool | `false` | |
|
||||
| aws-efs-csi-driver.node.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node.kubernetes.io/csi.efs.fs"` | |
|
||||
| aws-efs-csi-driver.node.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"Exists"` | |
|
||||
@ -72,6 +83,12 @@ Kubernetes: `>= 1.20.0`
|
||||
| aws-efs-csi-driver.node.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| aws-efs-csi-driver.node.tolerations[0].key | string | `"kubezero-workergroup"` | |
|
||||
| aws-efs-csi-driver.node.tolerations[0].operator | string | `"Exists"` | |
|
||||
| aws-efs-csi-driver.node.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| aws-efs-csi-driver.node.tolerations[1].key | string | `"nvidia.com/gpu"` | |
|
||||
| aws-efs-csi-driver.node.tolerations[1].operator | string | `"Exists"` | |
|
||||
| aws-efs-csi-driver.node.tolerations[2].effect | string | `"NoSchedule"` | |
|
||||
| aws-efs-csi-driver.node.tolerations[2].key | string | `"aws.amazon.com/neuron"` | |
|
||||
| aws-efs-csi-driver.node.tolerations[2].operator | string | `"Exists"` | |
|
||||
| aws-efs-csi-driver.replicaCount | int | `1` | |
|
||||
| gemini.enabled | bool | `false` | |
|
||||
| gemini.resources.limits.cpu | string | `"400m"` | |
|
||||
@ -84,6 +101,8 @@ Kubernetes: `>= 1.20.0`
|
||||
| lvm-localpv.lvmController.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| lvm-localpv.lvmController.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| lvm-localpv.lvmController.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| lvm-localpv.lvmController.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| lvm-localpv.lvmController.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| lvm-localpv.lvmNode.logLevel | int | `2` | |
|
||||
| lvm-localpv.lvmNode.nodeSelector."node.kubernetes.io/lvm" | string | `"openebs"` | |
|
||||
| lvm-localpv.lvmNode.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
@ -102,6 +121,8 @@ Kubernetes: `>= 1.20.0`
|
||||
| snapshotController.resources.requests.memory | string | `"16Mi"` | |
|
||||
| snapshotController.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| snapshotController.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| snapshotController.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| snapshotController.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
|
||||
# Snapshotter
|
||||
- https://kubernetes-csi.github.io/docs/snapshot-controller.html#deployment
|
||||
|
@ -24,6 +24,8 @@ spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
containers:
|
||||
- command:
|
||||
- gemini
|
||||
|
@ -1,7 +1,7 @@
|
||||
diff -rtubN charts/gemini/templates/deployment.yaml charts/gemini.zdt/templates/deployment.yaml
|
||||
--- charts/gemini/templates/deployment.yaml 2021-04-19 12:00:43.605005861 +0200
|
||||
+++ charts/gemini.zdt/templates/deployment.yaml 2021-04-19 12:00:08.365005781 +0200
|
||||
@@ -19,6 +19,11 @@
|
||||
@@ -19,6 +19,13 @@
|
||||
{{- else }}
|
||||
serviceAccountName: {{ .Values.rbac.serviceAccountName }}
|
||||
{{- end }}
|
||||
@ -10,6 +10,8 @@ diff -rtubN charts/gemini/templates/deployment.yaml charts/gemini.zdt/templates/
|
||||
+ tolerations:
|
||||
+ - effect: NoSchedule
|
||||
+ key: node-role.kubernetes.io/master
|
||||
+ - effect: NoSchedule
|
||||
+ key: node-role.kubernetes.io/control-plane
|
||||
containers:
|
||||
- command:
|
||||
- gemini
|
||||
|
@ -18,7 +18,7 @@
|
||||
"subdir": "contrib/mixin"
|
||||
}
|
||||
},
|
||||
"version": "acc7463fb2dc7037f571a02e65afd6e573a6344c",
|
||||
"version": "7cd9e5a3383d688b072808cea5dedeb209cc6d47",
|
||||
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
|
||||
},
|
||||
{
|
||||
@ -38,7 +38,7 @@
|
||||
"subdir": "grafana-builder"
|
||||
}
|
||||
},
|
||||
"version": "d73aff453c9784cd6922119f3ce33d8d355a79e1",
|
||||
"version": "187833fc2d104a75dadf28ea5d628818e21619cb",
|
||||
"sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0="
|
||||
},
|
||||
{
|
||||
@ -58,7 +58,7 @@
|
||||
"subdir": "lib/promgrafonnet"
|
||||
}
|
||||
},
|
||||
"version": "7b559e800a32a2a80caf4c968f37c4999ec44689",
|
||||
"version": "05a58f765eda05902d4f7dd22098a2b870f7ca1e",
|
||||
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
|
||||
},
|
||||
{
|
||||
|
@ -6,6 +6,8 @@ snapshotController:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
@ -34,6 +36,8 @@ lvm-localpv:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
@ -74,6 +78,8 @@ aws-ebs-csi-driver:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
|
||||
# k8sTagClusterId: <CLUSTER_NAME>
|
||||
# region: <AWS_DEFAULT_REGION>
|
||||
@ -146,6 +152,8 @@ aws-efs-csi-driver:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero
|
||||
description: KubeZero - Root App of Apps chart
|
||||
type: application
|
||||
version: 1.23.11
|
||||
version: 1.24.7
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -15,4 +15,4 @@ dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.5"
|
||||
repository: https://cdn.zero-downtime.net/charts
|
||||
kubeVersion: ">= 1.20.0"
|
||||
kubeVersion: ">= 1.24.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero
|
||||
|
||||
![Version: 1.23.11](https://img.shields.io/badge/Version-1.23.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 1.24.7](https://img.shields.io/badge/Version-1.24.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero - Root App of Apps chart
|
||||
|
||||
@ -14,7 +14,7 @@ KubeZero - Root App of Apps chart
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
Kubernetes: `>= 1.24.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
@ -30,14 +30,14 @@ Kubernetes: `>= 1.20.0`
|
||||
| addons.enabled | bool | `true` | |
|
||||
| addons.external-dns.enabled | bool | `false` | |
|
||||
| addons.forseti.enabled | bool | `false` | |
|
||||
| addons.targetRevision | string | `"0.6.3"` | |
|
||||
| addons.targetRevision | string | `"0.7.0"` | |
|
||||
| argocd.enabled | bool | `false` | |
|
||||
| argocd.istio.enabled | bool | `false` | |
|
||||
| argocd.namespace | string | `"argocd"` | |
|
||||
| argocd.targetRevision | string | `"0.10.2"` | |
|
||||
| cert-manager.enabled | bool | `false` | |
|
||||
| cert-manager.namespace | string | `"cert-manager"` | |
|
||||
| cert-manager.targetRevision | string | `"0.9.2"` | |
|
||||
| cert-manager.targetRevision | string | `"0.9.3"` | |
|
||||
| global.clusterName | string | `"zdt-trial-cluster"` | |
|
||||
| global.highAvailable | bool | `false` | |
|
||||
| istio-ingress.chart | string | `"kubezero-istio-gateway"` | |
|
||||
@ -52,27 +52,27 @@ Kubernetes: `>= 1.20.0`
|
||||
| istio-private-ingress.targetRevision | string | `"0.8.2"` | |
|
||||
| istio.enabled | bool | `false` | |
|
||||
| istio.namespace | string | `"istio-system"` | |
|
||||
| istio.targetRevision | string | `"0.8.3"` | |
|
||||
| istio.targetRevision | string | `"0.8.5"` | |
|
||||
| kubezero.defaultTargetRevision | string | `"*"` | |
|
||||
| kubezero.gitSync | object | `{}` | |
|
||||
| kubezero.repoURL | string | `"https://cdn.zero-downtime.net/charts"` | |
|
||||
| kubezero.server | string | `"https://kubernetes.default.svc"` | |
|
||||
| logging.enabled | bool | `false` | |
|
||||
| logging.namespace | string | `"logging"` | |
|
||||
| logging.targetRevision | string | `"0.8.3"` | |
|
||||
| logging.targetRevision | string | `"0.8.4"` | |
|
||||
| metrics.enabled | bool | `false` | |
|
||||
| metrics.istio.grafana | object | `{}` | |
|
||||
| metrics.istio.prometheus | object | `{}` | |
|
||||
| metrics.namespace | string | `"monitoring"` | |
|
||||
| metrics.targetRevision | string | `"0.8.5"` | |
|
||||
| metrics.targetRevision | string | `"0.8.7"` | |
|
||||
| network.cilium.cluster | object | `{}` | |
|
||||
| network.enabled | bool | `true` | |
|
||||
| network.retain | bool | `true` | |
|
||||
| network.targetRevision | string | `"0.3.4"` | |
|
||||
| network.targetRevision | string | `"0.4.0"` | |
|
||||
| storage.aws-ebs-csi-driver.enabled | bool | `false` | |
|
||||
| storage.aws-efs-csi-driver.enabled | bool | `false` | |
|
||||
| storage.enabled | bool | `false` | |
|
||||
| storage.targetRevision | string | `"0.7.1"` | |
|
||||
| storage.targetRevision | string | `"0.7.3"` | |
|
||||
|
||||
----------------------------------------------
|
||||
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)
|
||||
|
@ -10,7 +10,7 @@ global:
|
||||
|
||||
addons:
|
||||
enabled: true
|
||||
targetRevision: 0.6.3
|
||||
targetRevision: 0.7.0
|
||||
external-dns:
|
||||
enabled: false
|
||||
forseti:
|
||||
@ -25,18 +25,18 @@ addons:
|
||||
network:
|
||||
enabled: true
|
||||
retain: true
|
||||
targetRevision: 0.3.4
|
||||
targetRevision: 0.4.0
|
||||
cilium:
|
||||
cluster: {}
|
||||
|
||||
cert-manager:
|
||||
enabled: false
|
||||
namespace: cert-manager
|
||||
targetRevision: 0.9.2
|
||||
targetRevision: 0.9.3
|
||||
|
||||
storage:
|
||||
enabled: false
|
||||
targetRevision: 0.7.2
|
||||
targetRevision: 0.7.3
|
||||
aws-ebs-csi-driver:
|
||||
enabled: false
|
||||
aws-efs-csi-driver:
|
||||
@ -45,7 +45,7 @@ storage:
|
||||
istio:
|
||||
enabled: false
|
||||
namespace: istio-system
|
||||
targetRevision: 0.8.3
|
||||
targetRevision: 0.8.5
|
||||
|
||||
istio-ingress:
|
||||
enabled: false
|
||||
@ -66,7 +66,7 @@ istio-private-ingress:
|
||||
metrics:
|
||||
enabled: false
|
||||
namespace: monitoring
|
||||
targetRevision: 0.8.5
|
||||
targetRevision: 0.8.7
|
||||
istio:
|
||||
grafana: {}
|
||||
prometheus: {}
|
||||
@ -74,7 +74,7 @@ metrics:
|
||||
logging:
|
||||
enabled: false
|
||||
namespace: logging
|
||||
targetRevision: 0.8.3
|
||||
targetRevision: 0.8.4
|
||||
|
||||
argocd:
|
||||
enabled: false
|
||||
|
@ -1,27 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: neuron-top
|
||||
name: neuron-top
|
||||
namespace: default
|
||||
spec:
|
||||
containers:
|
||||
- image: public.ecr.aws/zero-downtime/dumpster:latest
|
||||
command:
|
||||
- "bash"
|
||||
- "-c"
|
||||
- "sleep 3600"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: neuron-top
|
||||
resources:
|
||||
limits:
|
||||
#hugepages-2Mi: 256Mi
|
||||
aws.amazon.com/neuron: 1
|
||||
requests:
|
||||
memory: 1024Mi
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: kubezero-workergroup
|
||||
operator: Equal
|
||||
value: public
|
19
docs/nvidia-test.yaml
Normal file
19
docs/nvidia-test.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nvidia-test
|
||||
namespace: default
|
||||
spec:
|
||||
containers:
|
||||
- image: nvidia/cuda:10.1-runtime-ubuntu16.04
|
||||
command:
|
||||
- "bash"
|
||||
- "-c"
|
||||
- "sleep 3600"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: nvidia-test
|
||||
resources:
|
||||
limits:
|
||||
nvidia.com/gpu: 1
|
||||
requests:
|
||||
memory: 1024Mi
|
48
docs/v1.24.md
Normal file
48
docs/v1.24.md
Normal file
@ -0,0 +1,48 @@
|
||||
# KubeZero 1.24
|
||||
|
||||
## TODO
|
||||
|
||||
### FeatureGates
|
||||
- PodAndContainerStatsFromCRI
|
||||
- DelegateFSGroupToCSIDriver
|
||||
|
||||
## What's new - Major themes
|
||||
|
||||
- Cilium added as second CNI to prepare full migration to Cilium with 1.24 upgrade
|
||||
- support for Nvidia g5 instances incl. pre-installed kernel drivers, cudo toolchain and CRI intergration
|
||||
- updated inf1 neuron drivers
|
||||
- ExtendedResourceToleration AdmissionController and auto-taints allowing Neuron and Nvidia pods ONLY to be scheduled on dedicated workers
|
||||
- full Cluster-Autoscaler integration
|
||||
|
||||
## Version upgrades
|
||||
- Istio to 1.14.4
|
||||
- Logging: ECK operator to 2.4, fluent-bit 1.9.8
|
||||
- Metrics: Prometheus and all Grafana charts to latest to match V1.23
|
||||
- ArgoCD to V2.4 ( access to pod via shell disabled by default )
|
||||
- AWS EBS/EFS CSI drivers to latest versions
|
||||
- cert-manager to V1.9.1
|
||||
|
||||
# Upgrade
|
||||
`(No, really, you MUST read this before you upgrade)`
|
||||
|
||||
- Ensure your Kube context points to the correct cluster !
|
||||
|
||||
1. Enable `containerProxy` for NAT instances and upgrade NAT instance using the new V2 Pulumi stacks
|
||||
|
||||
2. Review CFN config for controller and workers ( enable containerProxy, remove legacy version settings etc )
|
||||
|
||||
3. Upgrade CFN stacks for the control plane and all worker groups
|
||||
|
||||
4. Trigger fully-automated cluster upgrade:
|
||||
`./admin/upgrade_cluster.sh <path to the argocd app kubezero yaml for THIS cluster>`
|
||||
|
||||
5. Reboot controller(s) one by one
|
||||
Wait each time for controller to join and all pods running.
|
||||
Might take a while ...
|
||||
|
||||
6. Launch new set of workers eg. by doubling `desired` for each worker ASG
|
||||
once new workers are ready, cordon and drain all old workers
|
||||
The cluster-autoscaler will remove the old workers automatically after about 10min !
|
||||
|
||||
7. If all looks good, commit the ArgoApp resouce for Kubezero, before re-enabling ArgoCD itself.
|
||||
git add / commit / push `<cluster/env/kubezero/application.yaml>`
|
@ -18,6 +18,8 @@ function reset_index() {
|
||||
aws s3 sync $REPO_URL_S3/ $TMPDIR/
|
||||
helm repo index $TMPDIR --url $REPO_URL
|
||||
aws s3 cp $TMPDIR/index.yaml $REPO_URL_S3/ --cache-control max-age=1
|
||||
|
||||
exit 0
|
||||
}
|
||||
|
||||
|
||||
@ -49,9 +51,10 @@ function publish_chart() {
|
||||
}
|
||||
|
||||
|
||||
#reset_index
|
||||
|
||||
publish_chart
|
||||
|
||||
CF_DIST=E1YFUJXMCXT2RN
|
||||
aws cloudfront create-invalidation --distribution $CF_DIST --paths "/charts/*"
|
||||
|
||||
#reset_index
|
||||
|
Loading…
Reference in New Issue
Block a user