feat: v1.24 alpha

This commit is contained in:
Stefan Reimer 2022-10-27 14:27:42 +02:00
parent f9a904b619
commit fa12bc5d2b
97 changed files with 1110 additions and 268 deletions

View File

@ -3,7 +3,7 @@ ARG ALPINE_VERSION=3.16
FROM alpine:${ALPINE_VERSION} FROM alpine:${ALPINE_VERSION}
ARG ALPINE_VERSION ARG ALPINE_VERSION
ARG KUBE_VERSION=1.23 ARG KUBE_VERSION=1.24
RUN cd /etc/apk/keys && \ RUN cd /etc/apk/keys && \
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \ wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubeadm name: kubeadm
description: KubeZero Kubeadm cluster config description: KubeZero Kubeadm cluster config
type: application type: application
version: 1.23.11 version: 1.24.7
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -11,4 +11,4 @@ keywords:
maintainers: maintainers:
- name: Stefan Reimer - name: Stefan Reimer
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubeadm # kubeadm
![Version: 1.23.10](https://img.shields.io/badge/Version-1.23.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.24.7](https://img.shields.io/badge/Version-1.24.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Kubeadm cluster config KubeZero Kubeadm cluster config
@ -14,7 +14,7 @@ KubeZero Kubeadm cluster config
## Requirements ## Requirements
Kubernetes: `>= 1.20.0` Kubernetes: `>= 1.24.0`
## Values ## Values
@ -44,7 +44,7 @@ Kubernetes: `>= 1.20.0`
## Resources ## Resources
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/ - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/
- https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 - https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 - https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go - https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/ - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/

View File

@ -18,7 +18,7 @@
## Resources ## Resources
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/ - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/
- https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 - https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3 - https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go - https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/ - https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/

View File

@ -2,8 +2,8 @@ apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration kind: ClusterConfiguration
kubernetesVersion: {{ .Chart.Version }} kubernetesVersion: {{ .Chart.Version }}
clusterName: {{ .Values.global.clusterName }} clusterName: {{ .Values.global.clusterName }}
featureGates: #featureGates:
UnversionedKubeletConfigMap: true # NonGracefulFailover: true
controlPlaneEndpoint: {{ .Values.api.endpoint }} controlPlaneEndpoint: {{ .Values.api.endpoint }}
networking: networking:
podSubnet: 10.244.0.0/16 podSubnet: 10.244.0.0/16

View File

@ -1,6 +1,6 @@
{{- /* Feature gates for all control plane components */ -}} {{- /* Feature gates for all control plane components */ -}}
{{- define "kubeadm.featuregates" }} {{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "KubeletCredentialProviders"}} {{- $gates := list "CustomCPUCFSQuotaPeriod" "CronJobTimeZone" "NodeOutOfServiceVolumeDetach" }}
{{- if eq .return "csv" }} {{- if eq .return "csv" }}
{{- range $key := $gates }} {{- range $key := $gates }}
{{- $key }}=true, {{- $key }}=true,

View File

@ -1,4 +1,4 @@
apiVersion: kubelet.config.k8s.io/v1alpha1 apiVersion: kubelet.config.k8s.io/v1beta1
kind: CredentialProviderConfig kind: CredentialProviderConfig
providers: providers:
- name: amazon-ecr-credential-helper - name: amazon-ecr-credential-helper

View File

@ -112,6 +112,8 @@ spec:
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule
key: node-role.kubernetes.io/master key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
containers: containers:
- name: aws-iam-authenticator - name: aws-iam-authenticator

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-addons name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons description: KubeZero umbrella chart for various optional cluster addons
type: application type: application
version: 0.6.3 version: 0.7.0
appVersion: v1.23.11 appVersion: v1.24
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -34,4 +34,4 @@ dependencies:
# https://github.com/NVIDIA/k8s-device-plugin # https://github.com/NVIDIA/k8s-device-plugin
repository: https://nvidia.github.io/k8s-device-plugin repository: https://nvidia.github.io/k8s-device-plugin
condition: nvidia-device-plugin.enabled condition: nvidia-device-plugin.enabled
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubezero-addons # kubezero-addons
![Version: 0.6.3](https://img.shields.io/badge/Version-0.6.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.23.11](https://img.shields.io/badge/AppVersion-v1.23.11-informational?style=flat-square) ![Version: 0.7.0](https://img.shields.io/badge/Version-0.7.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.24](https://img.shields.io/badge/AppVersion-v1.24-informational?style=flat-square)
KubeZero umbrella chart for various optional cluster addons KubeZero umbrella chart for various optional cluster addons
@ -14,7 +14,7 @@ KubeZero umbrella chart for various optional cluster addons
## Requirements ## Requirements
Kubernetes: `>= 1.20.0` Kubernetes: `>= 1.24.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
@ -63,6 +63,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| aws-node-termination-handler.taintNode | bool | `true` | | | aws-node-termination-handler.taintNode | bool | `true` | |
| aws-node-termination-handler.tolerations[0].effect | string | `"NoSchedule"` | | | aws-node-termination-handler.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| aws-node-termination-handler.tolerations[1].effect | string | `"NoSchedule"` | |
| aws-node-termination-handler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| aws-node-termination-handler.useProviderId | bool | `true` | | | aws-node-termination-handler.useProviderId | bool | `true` | |
| awsNeuron.enabled | bool | `false` | | | awsNeuron.enabled | bool | `false` | |
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | | | awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
@ -80,6 +82,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| cluster-autoscaler.serviceMonitor.interval | string | `"30s"` | | | cluster-autoscaler.serviceMonitor.interval | string | `"30s"` | |
| cluster-autoscaler.tolerations[0].effect | string | `"NoSchedule"` | | | cluster-autoscaler.tolerations[0].effect | string | `"NoSchedule"` | |
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cluster-autoscaler.tolerations[1].effect | string | `"NoSchedule"` | |
| cluster-autoscaler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| clusterBackup.enabled | bool | `false` | | | clusterBackup.enabled | bool | `false` | |
| clusterBackup.extraEnv | list | `[]` | | | clusterBackup.extraEnv | list | `[]` | |
| clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | | | clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | |
@ -104,6 +108,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| external-dns.sources[0] | string | `"service"` | | | external-dns.sources[0] | string | `"service"` | |
| external-dns.tolerations[0].effect | string | `"NoSchedule"` | | | external-dns.tolerations[0].effect | string | `"NoSchedule"` | |
| external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| external-dns.tolerations[1].effect | string | `"NoSchedule"` | |
| external-dns.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| external-dns.triggerLoopOnEvent | bool | `true` | | | external-dns.triggerLoopOnEvent | bool | `true` | |
| forseti.aws.iamRoleArn | string | `""` | "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti" | | forseti.aws.iamRoleArn | string | `""` | "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti" |
| forseti.aws.region | string | `""` | | | forseti.aws.region | string | `""` | |

View File

@ -56,5 +56,7 @@ spec:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
restartPolicy: Never restartPolicy: Never
{{- end }} {{- end }}

View File

@ -71,6 +71,8 @@ spec:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
volumes: volumes:
- name: aws-token - name: aws-token
projected: projected:

View File

@ -67,6 +67,8 @@ aws-node-termination-handler:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
@ -140,6 +142,8 @@ cluster-autoscaler:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
# On AWS enable Projected Service Accounts to assume IAM role # On AWS enable Projected Service Accounts to assume IAM role
#extraEnv: #extraEnv:
@ -170,6 +174,8 @@ external-dns:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cert-manager name: kubezero-cert-manager
description: KubeZero Umbrella Chart for cert-manager description: KubeZero Umbrella Chart for cert-manager
type: application type: application
version: 0.9.2 version: 0.9.3
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -18,4 +18,4 @@ dependencies:
- name: cert-manager - name: cert-manager
version: 1.9.1 version: 1.9.1
repository: https://charts.jetstack.io repository: https://charts.jetstack.io
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubezero-cert-manager # kubezero-cert-manager
![Version: 0.9.2](https://img.shields.io/badge/Version-0.9.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.9.3](https://img.shields.io/badge/Version-0.9.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for cert-manager KubeZero Umbrella Chart for cert-manager
@ -14,7 +14,7 @@ KubeZero Umbrella Chart for cert-manager
## Requirements ## Requirements
Kubernetes: `>= 1.20.0` Kubernetes: `>= 1.24.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
@ -35,6 +35,8 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
| cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | | | cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cert-manager.cainjector.tolerations[1].effect | string | `"NoSchedule"` | |
| cert-manager.cainjector.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cert-manager.enabled | bool | `true` | | | cert-manager.enabled | bool | `true` | |
| cert-manager.extraArgs[0] | string | `"--dns01-recursive-nameservers-only"` | | | cert-manager.extraArgs[0] | string | `"--dns01-recursive-nameservers-only"` | |
| cert-manager.global.leaderElection.namespace | string | `"cert-manager"` | | | cert-manager.global.leaderElection.namespace | string | `"cert-manager"` | |
@ -45,9 +47,13 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
| cert-manager.startupapicheck.enabled | bool | `false` | | | cert-manager.startupapicheck.enabled | bool | `false` | |
| cert-manager.tolerations[0].effect | string | `"NoSchedule"` | | | cert-manager.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cert-manager.tolerations[1].effect | string | `"NoSchedule"` | |
| cert-manager.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cert-manager.webhook.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | cert-manager.webhook.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cert-manager.webhook.tolerations[0].effect | string | `"NoSchedule"` | | | cert-manager.webhook.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cert-manager.webhook.tolerations[1].effect | string | `"NoSchedule"` | |
| cert-manager.webhook.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| clusterIssuer | object | `{}` | | | clusterIssuer | object | `{}` | |
| localCA.enabled | bool | `false` | | | localCA.enabled | bool | `false` | |
| localCA.selfsigning | bool | `true` | | | localCA.selfsigning | bool | `true` | |

View File

@ -49,6 +49,8 @@ cert-manager:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
@ -60,6 +62,8 @@ cert-manager:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
@ -67,6 +71,8 @@ cert-manager:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-ci name: kubezero-ci
description: KubeZero umbrella chart for all things CI description: KubeZero umbrella chart for all things CI
type: application type: application
version: 0.5.14 version: 0.5.15
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -26,7 +26,7 @@ dependencies:
repository: https://dl.gitea.io/charts/ repository: https://dl.gitea.io/charts/
condition: gitea.enabled condition: gitea.enabled
- name: jenkins - name: jenkins
version: 4.2.6 version: 4.2.8
repository: https://charts.jenkins.io repository: https://charts.jenkins.io
condition: jenkins.enabled condition: jenkins.enabled
- name: trivy - name: trivy

View File

@ -1,6 +1,6 @@
# kubezero-ci # kubezero-ci
![Version: 0.5.14](https://img.shields.io/badge/Version-0.5.14-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.5.15](https://img.shields.io/badge/Version-0.5.15-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI KubeZero umbrella chart for all things CI
@ -20,7 +20,7 @@ Kubernetes: `>= 1.20.0`
|------------|------|---------| |------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.17 | | https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.17 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://charts.jenkins.io | jenkins | 4.2.6 | | https://charts.jenkins.io | jenkins | 4.2.8 |
| https://dl.gitea.io/charts/ | gitea | 5.0.9 | | https://dl.gitea.io/charts/ | gitea | 5.0.9 |
| https://gocd.github.io/helm-chart | gocd | 1.40.8 | | https://gocd.github.io/helm-chart | gocd | 1.40.8 |
@ -85,7 +85,7 @@ Kubernetes: `>= 1.20.0`
| jenkins.agent.resources.requests.cpu | string | `"512m"` | | | jenkins.agent.resources.requests.cpu | string | `"512m"` | |
| jenkins.agent.resources.requests.memory | string | `"1024Mi"` | | | jenkins.agent.resources.requests.memory | string | `"1024Mi"` | |
| jenkins.agent.showRawYaml | bool | `false` | | | jenkins.agent.showRawYaml | bool | `false` | |
| jenkins.agent.tag | string | `"v0.3.2"` | | | jenkins.agent.tag | string | `"v0.4.0"` | |
| jenkins.agent.yamlMergeStrategy | string | `"merge"` | | | jenkins.agent.yamlMergeStrategy | string | `"merge"` | |
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n serviceAccountName: jenkins-podman-aws\n containers:\n - name: jnlp\n resources:\n limits:\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | | | jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n serviceAccountName: jenkins-podman-aws\n containers:\n - name: jnlp\n resources:\n limits:\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | |
| jenkins.controller.JCasC.configScripts.zdt-settings | string | `"jenkins:\n noUsageStatistics: true\n disabledAdministrativeMonitors:\n - \"jenkins.security.ResourceDomainRecommendation\"\nunclassified:\n buildDiscarders:\n configuredBuildDiscarders:\n - \"jobBuildDiscarder\"\n - defaultBuildDiscarder:\n discarder:\n logRotator:\n artifactDaysToKeepStr: \"32\"\n artifactNumToKeepStr: \"10\"\n daysToKeepStr: \"100\"\n numToKeepStr: \"10\"\n"` | | | jenkins.controller.JCasC.configScripts.zdt-settings | string | `"jenkins:\n noUsageStatistics: true\n disabledAdministrativeMonitors:\n - \"jenkins.security.ResourceDomainRecommendation\"\nunclassified:\n buildDiscarders:\n configuredBuildDiscarders:\n - \"jobBuildDiscarder\"\n - defaultBuildDiscarder:\n discarder:\n logRotator:\n artifactDaysToKeepStr: \"32\"\n artifactNumToKeepStr: \"10\"\n daysToKeepStr: \"100\"\n numToKeepStr: \"10\"\n"` | |
@ -95,7 +95,7 @@ Kubernetes: `>= 1.20.0`
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | | | jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | | | jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | | | jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3706.vdfb_d599579f3"` | | | jenkins.controller.installPlugins[0] | string | `"kubernetes:3724.v0920c1e0ec69"` | |
| jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | | | jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | |
| jenkins.controller.installPlugins[2] | string | `"git:4.12.1"` | | | jenkins.controller.installPlugins[2] | string | `"git:4.12.1"` | |
| jenkins.controller.installPlugins[3] | string | `"configuration-as-code:1512.vb_79d418d5fc8"` | | | jenkins.controller.installPlugins[3] | string | `"configuration-as-code:1512.vb_79d418d5fc8"` | |
@ -104,7 +104,7 @@ Kubernetes: `>= 1.20.0`
| jenkins.controller.installPlugins[6] | string | `"htmlpublisher:1.31"` | | | jenkins.controller.installPlugins[6] | string | `"htmlpublisher:1.31"` | |
| jenkins.controller.installPlugins[7] | string | `"build-discarder:139.v05696a_7fe240"` | | | jenkins.controller.installPlugins[7] | string | `"build-discarder:139.v05696a_7fe240"` | |
| jenkins.controller.installPlugins[8] | string | `"dark-theme:245.vb_a_2b_b_010ea_96"` | | | jenkins.controller.installPlugins[8] | string | `"dark-theme:245.vb_a_2b_b_010ea_96"` | |
| jenkins.controller.installPlugins[9] | string | `"kubernetes-credentials-provider:1.196.va_55f5e31e3c2"` | | | jenkins.controller.installPlugins[9] | string | `"kubernetes-credentials-provider:1.199.v4a_1d1f5d074f"` | |
| jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | | | jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | |
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | | | jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | |
| jenkins.controller.prometheus.enabled | bool | `false` | | | jenkins.controller.prometheus.enabled | bool | `false` | |

View File

@ -114,7 +114,7 @@ jenkins:
numToKeepStr: "10" numToKeepStr: "10"
installPlugins: installPlugins:
- kubernetes:3706.vdfb_d599579f3 - kubernetes:3724.v0920c1e0ec69
- workflow-aggregator:581.v0c46fa_697ffd - workflow-aggregator:581.v0c46fa_697ffd
- git:4.12.1 - git:4.12.1
- configuration-as-code:1512.vb_79d418d5fc8 - configuration-as-code:1512.vb_79d418d5fc8
@ -123,7 +123,7 @@ jenkins:
- htmlpublisher:1.31 - htmlpublisher:1.31
- build-discarder:139.v05696a_7fe240 - build-discarder:139.v05696a_7fe240
- dark-theme:245.vb_a_2b_b_010ea_96 - dark-theme:245.vb_a_2b_b_010ea_96
- kubernetes-credentials-provider:1.196.va_55f5e31e3c2 - kubernetes-credentials-provider:1.199.v4a_1d1f5d074f
serviceAccountAgent: serviceAccountAgent:
create: true create: true
@ -132,7 +132,7 @@ jenkins:
# Preconfigure agents to use zdt podman requires fuse/overlayfs # Preconfigure agents to use zdt podman requires fuse/overlayfs
agent: agent:
image: public.ecr.aws/zero-downtime/jenkins-podman image: public.ecr.aws/zero-downtime/jenkins-podman
tag: v0.3.2 tag: v0.4.0
resources: resources:
requests: requests:
cpu: "512m" cpu: "512m"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio name: kubezero-istio
description: KubeZero Umbrella Chart for Istio description: KubeZero Umbrella Chart for Istio
type: application type: application
version: 0.8.4 version: 0.8.5
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -22,7 +22,7 @@ dependencies:
version: 1.14.3 version: 1.14.3
repository: https://istio-release.storage.googleapis.com/charts repository: https://istio-release.storage.googleapis.com/charts
- name: kiali-server - name: kiali-server
version: 1.54 version: "1.54"
repository: https://kiali.org/helm-charts repository: https://kiali.org/helm-charts
condition: kiali-server.enabled condition: kiali-server.enabled
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubezero-istio # kubezero-istio
![Version: 0.8.3](https://img.shields.io/badge/Version-0.8.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.8.5](https://img.shields.io/badge/Version-0.8.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Istio KubeZero Umbrella Chart for Istio
@ -16,7 +16,7 @@ Installs the Istio control plane
## Requirements ## Requirements
Kubernetes: `>= 1.20.0` Kubernetes: `>= 1.24.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
@ -44,6 +44,8 @@ Kubernetes: `>= 1.20.0`
| istiod.pilot.resources.requests.memory | string | `"128Mi"` | | | istiod.pilot.resources.requests.memory | string | `"128Mi"` | |
| istiod.pilot.tolerations[0].effect | string | `"NoSchedule"` | | | istiod.pilot.tolerations[0].effect | string | `"NoSchedule"` | |
| istiod.pilot.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | istiod.pilot.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| istiod.pilot.tolerations[1].effect | string | `"NoSchedule"` | |
| istiod.pilot.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| istiod.telemetry.enabled | bool | `false` | | | istiod.telemetry.enabled | bool | `false` | |
| kiali-server.auth.strategy | string | `"anonymous"` | | | kiali-server.auth.strategy | string | `"anonymous"` | |
| kiali-server.deployment.ingress_enabled | bool | `false` | | | kiali-server.deployment.ingress_enabled | bool | `false` | |

View File

@ -19,6 +19,8 @@ istiod:
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule
key: node-role.kubernetes.io/master key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
resources: resources:
requests: requests:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack description: KubeZero Umbrella Chart for complete EFK stack
type: application type: application
version: 0.8.3 version: 0.8.4
appVersion: 1.6.0 appVersion: 1.6.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -29,4 +29,4 @@ dependencies:
- name: fluent-bit - name: fluent-bit
version: 0.20.6 version: 0.20.6
condition: fluent-bit.enabled condition: fluent-bit.enabled
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubezero-logging # kubezero-logging
![Version: 0.8.3](https://img.shields.io/badge/Version-0.8.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square) ![Version: 0.8.4](https://img.shields.io/badge/Version-0.8.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack KubeZero Umbrella Chart for complete EFK stack
@ -14,7 +14,7 @@ KubeZero Umbrella Chart for complete EFK stack
## Requirements ## Requirements
Kubernetes: `>= 1.20.0` Kubernetes: `>= 1.24.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
@ -62,6 +62,8 @@ Kubernetes: `>= 1.20.0`
| eck-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | eck-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| eck-operator.tolerations[0].effect | string | `"NoSchedule"` | | | eck-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| eck-operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | eck-operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| eck-operator.tolerations[1].effect | string | `"NoSchedule"` | |
| eck-operator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| elastic_password | string | `""` | | | elastic_password | string | `""` | |
| es.nodeSets | list | `[]` | | | es.nodeSets | list | `[]` | |
| es.prometheus | bool | `false` | | | es.prometheus | bool | `false` | |

View File

@ -7,6 +7,8 @@ eck-operator:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-metrics name: kubezero-metrics
description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations. description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
type: application type: application
version: 0.8.5 version: 0.8.7
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -18,7 +18,7 @@ dependencies:
version: ">= 0.1.5" version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: kube-prometheus-stack - name: kube-prometheus-stack
version: 40.0.0 version: 41.4.1
# Switch back to upstream once all alerts are fixed eg. etcd gpcr # Switch back to upstream once all alerts are fixed eg. etcd gpcr
# repository: https://prometheus-community.github.io/helm-charts # repository: https://prometheus-community.github.io/helm-charts
- name: prometheus-adapter - name: prometheus-adapter
@ -26,8 +26,8 @@ dependencies:
repository: https://prometheus-community.github.io/helm-charts repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-adapter.enabled condition: prometheus-adapter.enabled
- name: prometheus-pushgateway - name: prometheus-pushgateway
version: 1.18.2 version: 1.20.0
# Switch back to upstream once namespaces are supported # Switch back to upstream once namespaces are supported
# repository: https://prometheus-community.github.io/helm-charts # repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-pushgateway.enabled condition: prometheus-pushgateway.enabled
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubezero-metrics # kubezero-metrics
![Version: 0.8.5](https://img.shields.io/badge/Version-0.8.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.8.7](https://img.shields.io/badge/Version-0.8.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations. KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
@ -14,12 +14,12 @@ KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all
## Requirements ## Requirements
Kubernetes: `>= 1.20.0` Kubernetes: `>= 1.24.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| | kube-prometheus-stack | 40.0.0 | | | kube-prometheus-stack | 41.4.1 |
| | prometheus-pushgateway | 1.18.2 | | | prometheus-pushgateway | 1.20.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 3.4.0 | | https://prometheus-community.github.io/helm-charts | prometheus-adapter | 3.4.0 |
@ -127,6 +127,8 @@ Kubernetes: `>= 1.20.0`
| kube-prometheus-stack.kube-state-metrics.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | kube-prometheus-stack.kube-state-metrics.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[0].effect | string | `"NoSchedule"` | | | kube-prometheus-stack.kube-state-metrics.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | kube-prometheus-stack.kube-state-metrics.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[1].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| kube-prometheus-stack.kubeApiServer.enabled | bool | `true` | | | kube-prometheus-stack.kubeApiServer.enabled | bool | `true` | |
| kube-prometheus-stack.kubeControllerManager.enabled | bool | `true` | | | kube-prometheus-stack.kubeControllerManager.enabled | bool | `true` | |
| kube-prometheus-stack.kubeControllerManager.service.port | int | `10257` | | | kube-prometheus-stack.kubeControllerManager.service.port | int | `10257` | |
@ -168,10 +170,11 @@ Kubernetes: `>= 1.20.0`
| kube-prometheus-stack.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues | bool | `false` | | | kube-prometheus-stack.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues | bool | `false` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | | kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | | | kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.walCompression | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].effect | string | `"NoSchedule"` | | | kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[1].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| kube-prometheus-stack.prometheusOperator.enabled | bool | `true` | | | kube-prometheus-stack.prometheusOperator.enabled | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.logFormat | string | `"json"` | | | kube-prometheus-stack.prometheusOperator.logFormat | string | `"json"` | |
| kube-prometheus-stack.prometheusOperator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | kube-prometheus-stack.prometheusOperator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
@ -180,6 +183,8 @@ Kubernetes: `>= 1.20.0`
| kube-prometheus-stack.prometheusOperator.resources.requests.memory | string | `"32Mi"` | | | kube-prometheus-stack.prometheusOperator.resources.requests.memory | string | `"32Mi"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | | | kube-prometheus-stack.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | kube-prometheus-stack.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[1].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| prometheus-adapter.enabled | bool | `true` | | | prometheus-adapter.enabled | bool | `true` | |
| prometheus-adapter.logLevel | int | `1` | | | prometheus-adapter.logLevel | int | `1` | |
| prometheus-adapter.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | prometheus-adapter.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
@ -200,6 +205,8 @@ Kubernetes: `>= 1.20.0`
| prometheus-adapter.rules.resource.window | string | `"5m"` | | | prometheus-adapter.rules.resource.window | string | `"5m"` | |
| prometheus-adapter.tolerations[0].effect | string | `"NoSchedule"` | | | prometheus-adapter.tolerations[0].effect | string | `"NoSchedule"` | |
| prometheus-adapter.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | prometheus-adapter.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| prometheus-adapter.tolerations[1].effect | string | `"NoSchedule"` | |
| prometheus-adapter.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| prometheus-pushgateway.enabled | bool | `false` | | | prometheus-pushgateway.enabled | bool | `false` | |
| prometheus-pushgateway.serviceMonitor.enabled | bool | `true` | | | prometheus-pushgateway.serviceMonitor.enabled | bool | `true` | |

View File

@ -6,20 +6,20 @@ annotations:
url: https://github.com/prometheus-operator/kube-prometheus url: https://github.com/prometheus-operator/kube-prometheus
artifacthub.io/operator: "true" artifacthub.io/operator: "true"
apiVersion: v2 apiVersion: v2
appVersion: 0.59.1 appVersion: 0.60.1
dependencies: dependencies:
- condition: kubeStateMetrics.enabled - condition: kubeStateMetrics.enabled
name: kube-state-metrics name: kube-state-metrics
repository: https://prometheus-community.github.io/helm-charts repository: https://prometheus-community.github.io/helm-charts
version: 4.18.* version: 4.20.*
- condition: nodeExporter.enabled - condition: nodeExporter.enabled
name: prometheus-node-exporter name: prometheus-node-exporter
repository: https://prometheus-community.github.io/helm-charts repository: https://prometheus-community.github.io/helm-charts
version: 4.2.* version: 4.3.*
- condition: grafana.enabled - condition: grafana.enabled
name: grafana name: grafana
repository: https://grafana.github.io/helm-charts repository: https://grafana.github.io/helm-charts
version: 6.38.* version: 6.40.*
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards, description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
and Prometheus rules combined with documentation and scripts to provide easy to and Prometheus rules combined with documentation and scripts to provide easy to
operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
@ -51,4 +51,4 @@ sources:
- https://github.com/prometheus-community/helm-charts - https://github.com/prometheus-community/helm-charts
- https://github.com/prometheus-operator/kube-prometheus - https://github.com/prometheus-operator/kube-prometheus
type: application type: application
version: 40.0.0 version: 41.4.1

View File

@ -80,6 +80,27 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
### From 40.x to 41.x
This version upgrades Prometheus-Operator to v0.60.1, Prometheus to v2.39.1 and Thanos to v0.28.1.
This version also upgrades the Helm charts of kube-state-metrics to 4.20.2, prometheus-node-exporter to 4.3.0 and Grafana to 6.40.4.
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
This version splits kubeScheduler recording and altering rules in separate config values.
Instead of `defaultRules.rules.kubeScheduler` the 2 new variables `defaultRules.rules.kubeSchedulerAlerting` and `defaultRules.rules.kubeSchedulerRecording` are used.
### From 39.x to 40.x ### From 39.x to 40.x
This version upgrades Prometheus-Operator to v0.59.1, Prometheus to v2.38.0, kube-state-metrics to v2.6.0 and Thanos to v0.28.0. This version upgrades Prometheus-Operator to v0.59.1, Prometheus to v2.38.0, kube-state-metrics to v2.6.0 and Thanos to v0.28.0.

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 9.1.4 appVersion: 9.1.7
description: The leading tool for querying and visualizing time series and metrics. description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.net home: https://grafana.net
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
@ -19,4 +19,4 @@ name: grafana
sources: sources:
- https://github.com/grafana/grafana - https://github.com/grafana/grafana
type: application type: application
version: 6.38.0 version: 6.40.4

View File

@ -150,6 +150,15 @@ This version requires Helm >= 3.1.0.
| `sidecar.resources` | Sidecar resources | `{}` | | `sidecar.resources` | Sidecar resources | `{}` |
| `sidecar.securityContext` | Sidecar securityContext | `{}` | | `sidecar.securityContext` | Sidecar securityContext | `{}` |
| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` | | `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` |
| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` |
| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` |
| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` |
| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` |
| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
| `sidecar.alerts.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` |
| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | | `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | | `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` |
| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | | `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` |
@ -174,13 +183,20 @@ This version requires Helm >= 3.1.0.
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | | `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` | | `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` |
| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | | `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | | `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` | | `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` |
| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | | `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` |
| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | | `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` |
| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | | `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` |
| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` |
| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | | `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | | `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` |
| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` |
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | | `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | | `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | | `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
@ -319,6 +335,14 @@ dashboards:
gnetId: 2 gnetId: 2
revision: 2 revision: 2
datasource: Prometheus datasource: Prometheus
loki-dashboard-quick-search:
gnetId: 12019
revision: 2
datasource:
- name: DS_PROMETHEUS
value: Prometheus
- name: DS_LOKI
value: Loki
local-dashboard: local-dashboard:
url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
``` ```

View File

@ -15,7 +15,7 @@ hostAliases:
{{- if .Values.priorityClassName }} {{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }}
{{- end }} {{- end }}
{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.notifiers.enabled .Values.extraInitContainers (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources)) }} {{- if ( or .Values.persistence.enabled .Values.dashboards .Values.extraInitContainers (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources) (and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers)) }}
initContainers: initContainers:
{{- end }} {{- end }}
{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} {{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }}
@ -123,7 +123,7 @@ initContainers:
{{- end }} {{- end }}
{{- if .Values.sidecar.datasources.searchNamespace }} {{- if .Values.sidecar.datasources.searchNamespace }}
- name: NAMESPACE - name: NAMESPACE
value: "{{ .Values.sidecar.datasources.searchNamespace | join "," }}" value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}"
{{- end }} {{- end }}
{{- if .Values.sidecar.skipTlsVerify }} {{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY - name: SKIP_TLS_VERIFY
@ -141,8 +141,8 @@ initContainers:
- name: sc-datasources-volume - name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources" mountPath: "/etc/grafana/provisioning/datasources"
{{- end }} {{- end }}
{{- if .Values.sidecar.notifiers.enabled }} {{- if and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers }}
- name: {{ template "grafana.name" . }}-sc-notifiers - name: {{ template "grafana.name" . }}-init-sc-notifiers
{{- if .Values.sidecar.image.sha }} {{- if .Values.sidecar.image.sha }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }} {{- else }}
@ -180,7 +180,7 @@ initContainers:
{{- end }} {{- end }}
{{- if .Values.sidecar.notifiers.searchNamespace }} {{- if .Values.sidecar.notifiers.searchNamespace }}
- name: NAMESPACE - name: NAMESPACE
value: "{{ .Values.sidecar.notifiers.searchNamespace | join "," }}" value: "{{ tpl (.Values.sidecar.notifiers.searchNamespace | join ",") . }}"
{{- end }} {{- end }}
{{- if .Values.sidecar.skipTlsVerify }} {{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY - name: SKIP_TLS_VERIFY
@ -220,6 +220,109 @@ imagePullSecrets:
enableServiceLinks: {{ .Values.enableServiceLinks }} enableServiceLinks: {{ .Values.enableServiceLinks }}
{{- end }} {{- end }}
containers: containers:
{{- if .Values.sidecar.alerts.enabled }}
- name: {{ template "grafana.name" . }}-sc-alerts
{{- if .Values.sidecar.image.sha }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
{{- range $key, $value := .Values.sidecar.alerts.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
{{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }}
- name: IGNORE_ALREADY_PROCESSED
value: "true"
{{- end }}
- name: METHOD
value: {{ .Values.sidecar.alerts.watchMethod }}
- name: LABEL
value: "{{ .Values.sidecar.alerts.label }}"
{{- with .Values.sidecar.alerts.labelValue }}
- name: LABEL_VALUE
value: {{ quote . }}
{{- end }}
{{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }}
- name: LOG_LEVEL
value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }}
{{- end }}
- name: FOLDER
value: "/etc/grafana/provisioning/alerting"
- name: RESOURCE
value: {{ quote .Values.sidecar.alerts.resource }}
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- with .Values.sidecar.alerts.searchNamespace }}
- name: NAMESPACE
value: {{ . | join "," | quote }}
{{- end }}
{{- with .Values.sidecar.alerts.skipTlsVerify }}
- name: SKIP_TLS_VERIFY
value: {{ quote . }}
{{- end }}
{{- with .Values.sidecar.alerts.script }}
- name: SCRIPT
value: {{ quote . }}
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.userKey | default "admin-user" }}
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.passwordKey | default "admin-password" }}
{{- end }}
{{- if not .Values.sidecar.alerts.skipReload }}
- name: REQ_URL
value: {{ .Values.sidecar.alerts.reloadURL }}
- name: REQ_METHOD
value: POST
{{- end }}
{{- if .Values.sidecar.alerts.watchServerTimeout }}
{{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }}
{{- fail (printf "Cannot use .Values.sidecar.alerts.watchServerTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }}
{{- end }}
- name: WATCH_SERVER_TIMEOUT
value: "{{ .Values.sidecar.alerts.watchServerTimeout }}"
{{- end }}
{{- if .Values.sidecar.alerts.watchClientTimeout }}
{{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }}
{{- fail (printf "Cannot use .Values.sidecar.alerts.watchClientTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }}
{{- end }}
- name: WATCH_CLIENT_TIMEOUT
value: "{{ .Values.sidecar.alerts.watchClientTimeout }}"
{{- end }}
{{- with .Values.sidecar.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.sidecar.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.sidecar.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.sidecar.securityContext }}
securityContext:
{{- toYaml . | nindent 6 }}
{{- end }}
volumeMounts:
- name: sc-alerts-volume
mountPath: "/etc/grafana/provisioning/alerting"
{{- end}}
{{- if .Values.sidecar.dashboards.enabled }} {{- if .Values.sidecar.dashboards.enabled }}
- name: {{ template "grafana.name" . }}-sc-dashboard - name: {{ template "grafana.name" . }}-sc-dashboard
{{- if .Values.sidecar.image.sha }} {{- if .Values.sidecar.image.sha }}
@ -259,7 +362,7 @@ containers:
{{- end }} {{- end }}
{{- if .Values.sidecar.dashboards.searchNamespace }} {{- if .Values.sidecar.dashboards.searchNamespace }}
- name: NAMESPACE - name: NAMESPACE
value: "{{ .Values.sidecar.dashboards.searchNamespace | join "," }}" value: "{{ tpl (.Values.sidecar.dashboards.searchNamespace | join ",") . }}"
{{- end }} {{- end }}
{{- if .Values.sidecar.skipTlsVerify }} {{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY - name: SKIP_TLS_VERIFY
@ -349,7 +452,7 @@ containers:
{{- end }} {{- end }}
{{- if .Values.sidecar.datasources.searchNamespace }} {{- if .Values.sidecar.datasources.searchNamespace }}
- name: NAMESPACE - name: NAMESPACE
value: "{{ .Values.sidecar.datasources.searchNamespace | join "," }}" value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}"
{{- end }} {{- end }}
{{- if .Values.sidecar.skipTlsVerify }} {{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY - name: SKIP_TLS_VERIFY
@ -413,6 +516,109 @@ containers:
- name: sc-datasources-volume - name: sc-datasources-volume
mountPath: "/etc/grafana/provisioning/datasources" mountPath: "/etc/grafana/provisioning/datasources"
{{- end}} {{- end}}
{{- if .Values.sidecar.notifiers.enabled }}
- name: {{ template "grafana.name" . }}-sc-notifiers
{{- if .Values.sidecar.image.sha }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}"
{{- else }}
image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}"
{{- end }}
imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}
env:
{{- range $key, $value := .Values.sidecar.notifiers.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
{{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }}
- name: IGNORE_ALREADY_PROCESSED
value: "true"
{{- end }}
- name: METHOD
value: {{ .Values.sidecar.notifiers.watchMethod }}
- name: LABEL
value: "{{ .Values.sidecar.notifiers.label }}"
{{- if .Values.sidecar.notifiers.labelValue }}
- name: LABEL_VALUE
value: {{ quote .Values.sidecar.notifiers.labelValue }}
{{- end }}
{{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }}
- name: LOG_LEVEL
value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }}
{{- end }}
- name: FOLDER
value: "/etc/grafana/provisioning/notifiers"
- name: RESOURCE
value: {{ quote .Values.sidecar.notifiers.resource }}
{{- if .Values.sidecar.enableUniqueFilenames }}
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- if .Values.sidecar.notifiers.searchNamespace }}
- name: NAMESPACE
value: "{{ tpl (.Values.sidecar.notifiers.searchNamespace | join ",") . }}"
{{- end }}
{{- if .Values.sidecar.skipTlsVerify }}
- name: SKIP_TLS_VERIFY
value: "{{ .Values.sidecar.skipTlsVerify }}"
{{- end }}
{{- if .Values.sidecar.notifiers.script }}
- name: SCRIPT
value: "{{ .Values.sidecar.notifiers.script }}"
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.userKey | default "admin-user" }}
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.passwordKey | default "admin-password" }}
{{- end }}
{{- if not .Values.sidecar.notifiers.skipReload }}
- name: REQ_URL
value: {{ .Values.sidecar.notifiers.reloadURL }}
- name: REQ_METHOD
value: POST
{{- end }}
{{- if .Values.sidecar.notifiers.watchServerTimeout }}
{{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }}
{{- fail (printf "Cannot use .Values.sidecar.notifiers.watchServerTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }}
{{- end }}
- name: WATCH_SERVER_TIMEOUT
value: "{{ .Values.sidecar.notifiers.watchServerTimeout }}"
{{- end }}
{{- if .Values.sidecar.notifiers.watchClientTimeout }}
{{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }}
{{- fail (printf "Cannot use .Values.sidecar.notifiers.watchClientTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }}
{{- end }}
- name: WATCH_CLIENT_TIMEOUT
value: "{{ .Values.sidecar.notifiers.watchClientTimeout }}"
{{- end }}
{{- with .Values.sidecar.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.sidecar.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.sidecar.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.sidecar.securityContext }}
securityContext:
{{- toYaml . | nindent 6 }}
{{- end }}
volumeMounts:
- name: sc-notifiers-volume
mountPath: "/etc/grafana/provisioning/notifiers"
{{- end}}
{{- if .Values.sidecar.plugins.enabled }} {{- if .Values.sidecar.plugins.enabled }}
- name: {{ template "grafana.name" . }}-sc-plugins - name: {{ template "grafana.name" . }}-sc-plugins
{{- if .Values.sidecar.image.sha }} {{- if .Values.sidecar.image.sha }}
@ -452,7 +658,7 @@ containers:
{{- end }} {{- end }}
{{- if .Values.sidecar.plugins.searchNamespace }} {{- if .Values.sidecar.plugins.searchNamespace }}
- name: NAMESPACE - name: NAMESPACE
value: "{{ .Values.sidecar.plugins.searchNamespace | join "," }}" value: "{{ tpl (.Values.sidecar.plugins.searchNamespace | join ",") . }}"
{{- end }} {{- end }}
{{- if .Values.sidecar.plugins.script }} {{- if .Values.sidecar.plugins.script }}
- name: SCRIPT - name: SCRIPT
@ -526,7 +732,7 @@ containers:
{{- if .Values.command }} {{- if .Values.command }}
command: command:
{{- range .Values.command }} {{- range .Values.command }}
- {{ . }} - {{ . | quote }}
{{- end }} {{- end }}
{{- end}} {{- end}}
{{- with .Values.containerSecurityContext }} {{- with .Values.containerSecurityContext }}
@ -599,6 +805,10 @@ containers:
subPath: {{ . | quote }} subPath: {{ . | quote }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- with .Values.sidecar.alerts.enabled }}
- name: sc-alerts-volume
mountPath: "/etc/grafana/provisioning/alerting"
{{- end}}
{{- if .Values.sidecar.dashboards.enabled }} {{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume - name: sc-dashboard-volume
mountPath: {{ .Values.sidecar.dashboards.folder | quote }} mountPath: {{ .Values.sidecar.dashboards.folder | quote }}
@ -811,6 +1021,15 @@ volumes:
emptyDir: {} emptyDir: {}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{- if .Values.sidecar.alerts.enabled }}
- name: sc-alerts-volume
{{- if .Values.sidecar.alerts.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.sidecar.alerts.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- end -}}
{{- if .Values.sidecar.dashboards.enabled }} {{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume - name: sc-dashboard-volume
{{- if .Values.sidecar.dashboards.sizeLimit }} {{- if .Values.sidecar.dashboards.sizeLimit }}

View File

@ -1,4 +1,4 @@
{{- if .Values.sidecar.dashboards.enabled }} {{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }}
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:

View File

@ -60,7 +60,7 @@ data:
{{ $root := . }} {{ $root := . }}
{{- range $key, $value := .Values.alerting }} {{- range $key, $value := .Values.alerting }}
{{ $key }}: | {{ $key }}: |
{{ tpl $value $root | indent 4 }} {{ tpl (toYaml $value | indent 4) $root }}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
@ -101,16 +101,33 @@ data:
-H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \ -H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \
{{- end }} {{- end }}
-H "Content-Type: application/json;charset=UTF-8" \ -H "Content-Type: application/json;charset=UTF-8" \
{{ end }} {{- end -}}
{{- $dpPath := "" -}} {{- $dpPath := "" -}}
{{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers -}} {{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers -}}
{{- if eq $kd.name $provider -}} {{- if eq $kd.name $provider -}}
{{- $dpPath = $kd.options.path -}} {{- $dpPath = $kd.options.path -}}
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \ {{- if $value.url }}
> "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json" "{{ $value.url }}" \
{{- else }}
"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \
{{- end -}}
{{- if $value.datasource }}
{{- if kindIs "string" $value.datasource }}
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \
{{- end -}}
{{- if kindIs "slice" $value.datasource -}}
{{- range $value.datasource }}
| sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \
{{- end -}}
{{- end -}}
{{- end -}}
{{- if $value.b64content }}
| base64 -d \
{{- end }} {{- end }}
> "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json"
{{ end }}
{{- end -}} {{- end -}}
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@ -3,6 +3,7 @@ apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler kind: HorizontalPodAutoscaler
metadata: metadata:
name: {{ template "grafana.fullname" . }} name: {{ template "grafana.fullname" . }}
namespace: {{ template "grafana.namespace" . }}
labels: labels:
app.kubernetes.io/name: {{ template "grafana.name" . }} app.kubernetes.io/name: {{ template "grafana.name" . }}
helm.sh/chart: {{ template "grafana.chart" . }} helm.sh/chart: {{ template "grafana.chart" . }}

View File

@ -128,7 +128,7 @@ extraLabels: {}
downloadDashboardsImage: downloadDashboardsImage:
repository: curlimages/curl repository: curlimages/curl
tag: 7.73.0 tag: 7.85.0
sha: "" sha: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@ -522,11 +522,11 @@ datasources: {}
## ref: http://docs.grafana.org/administration/provisioning/#alerting ## ref: http://docs.grafana.org/administration/provisioning/#alerting
## ##
alerting: {} alerting: {}
# rules.yaml: | # rules.yaml:
# apiVersion: 1 # apiVersion: 1
# groups: # groups:
# - orgId: 1 # - orgId: 1
# name: {{ .Chart.Name }}_my_rule_group # name: '{{ .Chart.Name }}_my_rule_group'
# folder: my_first_folder # folder: my_first_folder
# interval: 60s # interval: 60s
# rules: # rules:
@ -566,7 +566,7 @@ alerting: {}
# some_key: some_value # some_key: some_value
# labels: # labels:
# team: sre_team_1 # team: sre_team_1
# contactpoints.yaml: | # contactpoints.yaml:
# apiVersion: 1 # apiVersion: 1
# contactPoints: # contactPoints:
# - orgId: 1 # - orgId: 1
@ -678,7 +678,7 @@ grafana.ini:
grafana_net: grafana_net:
url: https://grafana.net url: https://grafana.net
server: server:
domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ end }}" domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}"
## grafana Authentication can be enabled with the following values on grafana.ini ## grafana Authentication can be enabled with the following values on grafana.ini
# server: # server:
# The full public facing url you use in browser, used for redirects and emails # The full public facing url you use in browser, used for redirects and emails
@ -758,6 +758,44 @@ sidecar:
livenessProbe: {} livenessProbe: {}
# Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO
# logLevel: INFO # logLevel: INFO
alerts:
enabled: false
# Additional environment variables for the alerts sidecar
env: {}
# Do not reprocess already processed unchanged resources on k8s API reconnect.
# ignoreAlreadyProcessed: true
# label that the configmaps with alert are marked with
label: grafana_alert
# value of label that the configmaps with alert are set to
labelValue: ""
# Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL.
# logLevel: INFO
# If specified, the sidecar will search for alert config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
watchMethod: WATCH
# search in configmap, secret or both
resource: both
# watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
# defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
# watchServerTimeout: 3600
#
# watchClientTimeout: is a client-side timeout, configuring your local socket.
# If you have a network outage dropping all packets with no RST/FIN,
# this is how long your client waits before realizing & dropping the connection.
# defaults to 66sec (sic!)
# watchClientTimeout: 60
#
# Endpoint to send request to reload alerts
reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload"
# Absolute path to shell script to execute after a alert got reloaded
script: null
skipReload: false
# Deploy the alert sidecar as an initContainer in addition to a container.
# Sets the size limit of the alert sidecar emptyDir volume
sizeLimit: {}
dashboards: dashboards:
enabled: false enabled: false
# Additional environment variables for the dashboards sidecar # Additional environment variables for the dashboards sidecar
@ -914,8 +952,28 @@ sidecar:
# Otherwise the namespace in which the sidecar is running will be used. # Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces # It's also possible to specify ALL to search in all namespaces
searchNamespace: null searchNamespace: null
# Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds.
watchMethod: WATCH
# search in configmap, secret or both # search in configmap, secret or both
resource: both resource: both
# watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
# defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
# watchServerTimeout: 3600
#
# watchClientTimeout: is a client-side timeout, configuring your local socket.
# If you have a network outage dropping all packets with no RST/FIN,
# this is how long your client waits before realizing & dropping the connection.
# defaults to 66sec (sic!)
# watchClientTimeout: 60
#
# Endpoint to send request to reload notifiers
reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload"
# Absolute path to shell script to execute after a notifier got reloaded
script: null
skipReload: false
# Deploy the notifier sidecar as an initContainer in addition to a container.
# This is needed if skipReload is true, to load any notifiers defined at startup time.
initNotifiers: false
# Sets the size limit of the notifier sidecar emptyDir volume # Sets the size limit of the notifier sidecar emptyDir volume
sizeLimit: {} sizeLimit: {}

View File

@ -18,4 +18,4 @@ name: kube-state-metrics
sources: sources:
- https://github.com/kubernetes/kube-state-metrics/ - https://github.com/kubernetes/kube-state-metrics/
type: application type: application
version: 4.18.0 version: 4.20.2

View File

@ -74,10 +74,17 @@ spec:
{{- if .Values.metricDenylist }} {{- if .Values.metricDenylist }}
- --metric-denylist={{ .Values.metricDenylist | join "," }} - --metric-denylist={{ .Values.metricDenylist | join "," }}
{{- end }} {{- end }}
{{- $namespaces := list }}
{{- if .Values.namespaces }}
{{- range $ns := join "," .Values.namespaces | split "," }}
{{- $namespaces = append $namespaces (tpl $ns $) }}
{{- end }}
{{- end }}
{{- if .Values.releaseNamespace }} {{- if .Values.releaseNamespace }}
- --namespaces={{ template "kube-state-metrics.namespace" . }} {{- $namespaces = append $namespaces ( include "kube-state-metrics.namespace" . ) }}
{{- else if .Values.namespaces }} {{- end }}
- --namespaces={{ tpl (.Values.namespaces | join ",") $ }} {{- if $namespaces }}
- --namespaces={{ $namespaces | mustUniq | join "," }}
{{- end }} {{- end }}
{{- if .Values.namespacesDenylist }} {{- if .Values.namespacesDenylist }}
- --namespaces-denylist={{ tpl (.Values.namespacesDenylist | join ",") $ }} - --namespaces-denylist={{ tpl (.Values.namespacesDenylist | join ",") $ }}

View File

@ -1,5 +1,5 @@
{{- if and (eq .Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} {{- if and (eq .Values.rbac.create true) (not .Values.rbac.useExistingRole) -}}
{{- range (ternary (split "," .Values.namespaces) (list "") (eq $.Values.rbac.useClusterRole false)) }} {{- range (ternary (join "," .Values.namespaces | split "," ) (list "") (eq $.Values.rbac.useClusterRole false)) }}
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
{{- if eq $.Values.rbac.useClusterRole false }} {{- if eq $.Values.rbac.useClusterRole false }}

View File

@ -1,5 +1,5 @@
{{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} {{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}}
{{- range (split "," $.Values.namespaces) }} {{- range (join "," $.Values.namespaces) | split "," }}
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding

View File

@ -0,0 +1,34 @@
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
spec:
resourcePolicy:
containerPolicies:
- containerName: {{ template "kube-state-metrics.name" . }}
{{- if .Values.verticalPodAutoscaler.controlledResources }}
controlledResources: {{ .Values.verticalPodAutoscaler.controlledResources }}
{{- end }}
{{- if .Values.verticalPodAutoscaler.maxAllowed }}
maxAllowed:
{{ toYaml .Values.verticalPodAutoscaler.maxAllowed | nindent 8 }}
{{- end }}
{{- if .Values.verticalPodAutoscaler.minAllowed }}
minAllowed:
{{ toYaml .Values.verticalPodAutoscaler.minAllowed | nindent 8 }}
{{- end }}
targetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ template "kube-state-metrics.fullname" . }}
{{- if .Values.verticalPodAutoscaler.updatePolicy }}
updatePolicy:
{{- if .Values.verticalPodAutoscaler.updatePolicy.updateMode }}
updateMode: {{ .Values.verticalPodAutoscaler.updatePolicy.updateMode }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -211,10 +211,10 @@ kubeconfig:
secret: secret:
# Enable only the release namespace for collecting resources. By default all namespaces are collected. # Enable only the release namespace for collecting resources. By default all namespaces are collected.
# If releaseNamespace and namespaces are both set only releaseNamespace will be used. # If releaseNamespace and namespaces are both set a merged list will be collected.
releaseNamespace: false releaseNamespace: false
# Comma-separated list of namespaces to be enabled for collecting resources. By default all namespaces are collected. # Comma-separated list(string) or yaml list of namespaces to be enabled for collecting resources. By default all namespaces are collected.
namespaces: "" namespaces: ""
# Comma-separated list of namespaces not to be enabled. If namespaces and namespaces-denylist are both set, # Comma-separated list of namespaces not to be enabled. If namespaces and namespaces-denylist are both set,
@ -251,6 +251,26 @@ selfMonitor:
# telemetryPort: 8081 # telemetryPort: 8081
# telemetryNodePort: 0 # telemetryNodePort: 0
# Enable vertical pod autoscaler support for kube-state-metrics
verticalPodAutoscaler:
enabled: false
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
controlledResources: []
# Define the max allowed resources for the pod
maxAllowed: {}
# cpu: 200m
# memory: 100Mi
# Define the min allowed resources for the pod
minAllowed: {}
# cpu: 200m
# memory: 100Mi
# updatePolicy:
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
# updateMode: Auto
# volumeMounts are used to add custom volume mounts to deployment. # volumeMounts are used to add custom volume mounts to deployment.
# See example below # See example below
volumeMounts: [] volumeMounts: []

View File

@ -15,4 +15,4 @@ name: prometheus-node-exporter
sources: sources:
- https://github.com/prometheus/node_exporter/ - https://github.com/prometheus/node_exporter/
type: application type: application
version: 4.2.0 version: 4.3.0

View File

@ -1,5 +1,5 @@
{{- if .Values.prometheus.monitor.enabled }} {{- if .Values.prometheus.monitor.enabled }}
apiVersion: monitoring.coreos.com/v1 apiVersion: {{ .Values.prometheus.monitor.apiVersion | default "monitoring.coreos.com/v1" }}
kind: ServiceMonitor kind: ServiceMonitor
metadata: metadata:
name: {{ template "prometheus-node-exporter.fullname" . }} name: {{ template "prometheus-node-exporter.fullname" . }}

View File

@ -51,6 +51,8 @@ prometheus:
metricRelabelings: [] metricRelabelings: []
interval: "" interval: ""
scrapeTimeout: 10s scrapeTimeout: 10s
## prometheus.monitor.apiVersion ApiVersion for the serviceMonitor Resource(defaults to "monitoring.coreos.com/v1")
apiVersion: ""
## Customize the updateStrategy if set ## Customize the updateStrategy if set
updateStrategy: updateStrategy:

View File

@ -1,4 +1,4 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml # https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
--- ---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
@ -46,7 +46,7 @@ spec:
properties: properties:
inhibitRules: inhibitRules:
description: List of inhibition rules. The rules will only apply to description: List of inhibition rules. The rules will only apply to
alerts matching the resources namespace. alerts matching the resource's namespace.
items: items:
description: InhibitRule defines an inhibition rule that allows description: InhibitRule defines an inhibition rule that allows
to mute alerts when other alerts are already firing. See https://prometheus.io/docs/alerting/latest/configuration/#inhibit_rule to mute alerts when other alerts are already firing. See https://prometheus.io/docs/alerting/latest/configuration/#inhibit_rule
@ -60,7 +60,7 @@ spec:
sourceMatch: sourceMatch:
description: Matchers for which one or more alerts have to exist description: Matchers for which one or more alerts have to exist
for the inhibition to take effect. The operator enforces that for the inhibition to take effect. The operator enforces that
the alert matches the resources namespace. the alert matches the resource's namespace.
items: items:
description: Matcher defines how to match on alert's labels. description: Matcher defines how to match on alert's labels.
properties: properties:
@ -93,7 +93,7 @@ spec:
targetMatch: targetMatch:
description: Matchers that have to be fulfilled in the alerts description: Matchers that have to be fulfilled in the alerts
to be muted. The operator enforces that the alert matches to be muted. The operator enforces that the alert matches
the resources namespace. the resource's namespace.
items: items:
description: Matcher defines how to match on alert's labels. description: Matcher defines how to match on alert's labels.
properties: properties:
@ -1779,7 +1779,7 @@ spec:
type: string type: string
token: token:
description: The secret's key that contains the registered description: The secret's key that contains the registered
applications API token, see https://pushover.net/apps. application's API token, see https://pushover.net/apps.
The secret needs to be in the same namespace as the The secret needs to be in the same namespace as the
AlertmanagerConfig object and accessible by the Prometheus AlertmanagerConfig object and accessible by the Prometheus
Operator. Operator.
@ -1809,7 +1809,7 @@ spec:
type: string type: string
userKey: userKey:
description: The secret's key that contains the recipient description: The secret's key that contains the recipient
users user key. The secret needs to be in the same user's user key. The secret needs to be in the same
namespace as the AlertmanagerConfig object and accessible namespace as the AlertmanagerConfig object and accessible
by the Prometheus Operator. by the Prometheus Operator.
properties: properties:
@ -4380,7 +4380,7 @@ spec:
type: array type: array
route: route:
description: The Alertmanager route definition for alerts matching description: The Alertmanager route definition for alerts matching
the resources namespace. If present, it will be added to the generated the resource's namespace. If present, it will be added to the generated
Alertmanager configuration as a first-level route. Alertmanager configuration as a first-level route.
properties: properties:
continue: continue:
@ -4406,7 +4406,7 @@ spec:
Example: "30s"' Example: "30s"'
type: string type: string
matchers: matchers:
description: 'List of matchers that the alerts labels should description: 'List of matchers that the alert''s labels should
match. For the first level route, the operator removes any existing match. For the first level route, the operator removes any existing
equality and regexp matcher on the `namespace` label and adds equality and regexp matcher on the `namespace` label and adds
a `namespace: <object namespace>` matcher.' a `namespace: <object namespace>` matcher.'

View File

@ -1,4 +1,4 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml # https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
--- ---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
@ -25,13 +25,18 @@ spec:
jsonPath: .spec.version jsonPath: .spec.version
name: Version name: Version
type: string type: string
- description: The desired replicas number of Alertmanagers - description: The number of desired replicas
jsonPath: .spec.replicas jsonPath: .spec.replicas
name: Replicas name: Replicas
type: integer type: integer
- jsonPath: .metadata.creationTimestamp - jsonPath: .metadata.creationTimestamp
name: Age name: Age
type: date type: date
- description: Whether the resource reconciliation is paused or not
jsonPath: .status.paused
name: Paused
priority: 1
type: boolean
name: v1 name: v1
schema: schema:
openAPIV3Schema: openAPIV3Schema:
@ -1342,6 +1347,51 @@ spec:
and inhibition rules. and inhibition rules.
minLength: 1 minLength: 1
type: string type: string
templates:
description: Custom notification templates.
items:
description: SecretOrConfigMap allows to specify data as a Secret
or ConfigMap. Fields are mutually exclusive.
properties:
configMap:
description: ConfigMap containing data to use for the targets.
properties:
key:
description: The key to select.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the ConfigMap or its key
must be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
secret:
description: Secret containing data to use for the targets.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
x-kubernetes-map-type: atomic
type: object
type: array
type: object type: object
baseImage: baseImage:
description: 'Base image that is used to deploy pods, without tag. description: 'Base image that is used to deploy pods, without tag.
@ -1367,14 +1417,17 @@ spec:
configMaps: configMaps:
description: ConfigMaps is a list of ConfigMaps in the same namespace description: ConfigMaps is a list of ConfigMaps in the same namespace
as the Alertmanager object, which shall be mounted into the Alertmanager as the Alertmanager object, which shall be mounted into the Alertmanager
Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/<configmap-name>. Pods. Each ConfigMap is added to the StatefulSet definition as a
volume named `configmap-<configmap-name>`. The ConfigMaps are mounted
into `/etc/alertmanager/configmaps/<configmap-name>` in the 'alertmanager'
container.
items: items:
type: string type: string
type: array type: array
configSecret: configSecret:
description: "ConfigSecret is the name of a Kubernetes Secret in the description: "ConfigSecret is the name of a Kubernetes Secret in the
same namespace as the Alertmanager object, which contains the configuration same namespace as the Alertmanager object, which contains the configuration
for this Alertmanager instance. If empty, it defaults to 'alertmanager-<alertmanager-name>'. for this Alertmanager instance. If empty, it defaults to `alertmanager-<alertmanager-name>`.
\n The Alertmanager configuration should be available under the \n The Alertmanager configuration should be available under the
`alertmanager.yaml` key. Additional keys from the original secret `alertmanager.yaml` key. Additional keys from the original secret
are copied to the generated secret. \n If either the secret or the are copied to the generated secret. \n If either the secret or the
@ -4019,7 +4072,9 @@ spec:
secrets: secrets:
description: Secrets is a list of Secrets in the same namespace as description: Secrets is a list of Secrets in the same namespace as
the Alertmanager object, which shall be mounted into the Alertmanager the Alertmanager object, which shall be mounted into the Alertmanager
Pods. The Secrets are mounted into /etc/alertmanager/secrets/<secret-name>. Pods. Each Secret is added to the StatefulSet definition as a volume
named `secret-<secret-name>`. The Secrets are mounted into `/etc/alertmanager/secrets/<secret-name>`
in the 'alertmanager' container.
items: items:
type: string type: string
type: array type: array

View File

@ -1,4 +1,4 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml # https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
--- ---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
@ -187,6 +187,10 @@ spec:
enableHttp2: enableHttp2:
description: Whether to enable HTTP2. description: Whether to enable HTTP2.
type: boolean type: boolean
filterRunning:
description: 'Drop pods that are not running. (Failed, Succeeded).
Enabled by default. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase'
type: boolean
followRedirects: followRedirects:
description: FollowRedirects configures whether scrape requests description: FollowRedirects configures whether scrape requests
follow HTTP 3xx redirects. follow HTTP 3xx redirects.

View File

@ -1,4 +1,4 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml # https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
--- ---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition

View File

@ -1,4 +1,4 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml # https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
--- ---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
@ -26,13 +26,28 @@ spec:
jsonPath: .spec.version jsonPath: .spec.version
name: Version name: Version
type: string type: string
- description: The desired replicas number of Prometheuses - description: The number of desired replicas
jsonPath: .spec.replicas jsonPath: .spec.replicas
name: Replicas name: Desired
type: integer type: integer
- description: The number of ready replicas
jsonPath: .status.availableReplicas
name: Ready
type: integer
- jsonPath: .status.conditions[?(@.type == 'Reconciled')].status
name: Reconciled
type: string
- jsonPath: .status.conditions[?(@.type == 'Available')].status
name: Available
type: string
- jsonPath: .metadata.creationTimestamp - jsonPath: .metadata.creationTimestamp
name: Age name: Age
type: date type: date
- description: Whether the resource reconciliation is paused or not
jsonPath: .status.paused
name: Paused
priority: 1
type: boolean
name: v1 name: v1
schema: schema:
openAPIV3Schema: openAPIV3Schema:
@ -115,8 +130,8 @@ spec:
the Prometheus container. It is intended for e.g. activating hidden the Prometheus container. It is intended for e.g. activating hidden
flags which are not supported by the dedicated configuration options flags which are not supported by the dedicated configuration options
yet. The arguments are passed as-is to the Prometheus container yet. The arguments are passed as-is to the Prometheus container
which may cause issues if they are invalid or not supporeted by which may cause issues if they are invalid or not supported by the
the given Prometheus version. In case of an argument conflict (e.g. given Prometheus version. In case of an argument conflict (e.g.
an argument which is already set by the operator itself) or when an argument which is already set by the operator itself) or when
providing an invalid argument the reconciliation will fail and an providing an invalid argument the reconciliation will fail and an
error will be logged. error will be logged.
@ -1460,7 +1475,10 @@ spec:
configMaps: configMaps:
description: ConfigMaps is a list of ConfigMaps in the same namespace description: ConfigMaps is a list of ConfigMaps in the same namespace
as the Prometheus object, which shall be mounted into the Prometheus as the Prometheus object, which shall be mounted into the Prometheus
Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/<configmap-name>. Pods. Each ConfigMap is added to the StatefulSet definition as a
volume named `configmap-<configmap-name>`. The ConfigMaps are mounted
into /etc/prometheus/configmaps/<configmap-name> in the 'prometheus'
container.
items: items:
type: string type: string
type: array type: array
@ -2872,6 +2890,12 @@ spec:
x-kubernetes-list-map-keys: x-kubernetes-list-map-keys:
- ip - ip
x-kubernetes-list-type: map x-kubernetes-list-type: map
hostNetwork:
description: Use the host's network namespace if true. Make sure to
understand the security implications if you want to enable it. When
hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet
automatically.
type: boolean
ignoreNamespaceSelectors: ignoreNamespaceSelectors:
description: IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector description: IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector
settings from all PodMonitor, ServiceMonitor and Probe objects. settings from all PodMonitor, ServiceMonitor and Probe objects.
@ -5515,7 +5539,9 @@ spec:
secrets: secrets:
description: Secrets is a list of Secrets in the same namespace as description: Secrets is a list of Secrets in the same namespace as
the Prometheus object, which shall be mounted into the Prometheus the Prometheus object, which shall be mounted into the Prometheus
Pods. The Secrets are mounted into /etc/prometheus/secrets/<secret-name>. Pods. Each Secret is added to the StatefulSet definition as a volume
named `secret-<secret-name>`. The Secrets are mounted into /etc/prometheus/secrets/<secret-name>
in the 'prometheus' container.
items: items:
type: string type: string
type: array type: array
@ -6376,7 +6402,7 @@ spec:
description: AdditionalArgs allows setting additional arguments description: AdditionalArgs allows setting additional arguments
for the Thanos container. The arguments are passed as-is to for the Thanos container. The arguments are passed as-is to
the Thanos container which may cause issues if they are invalid the Thanos container which may cause issues if they are invalid
or not supporeted the given Thanos version. In case of an argument or not supported the given Thanos version. In case of an argument
conflict (e.g. an argument which is already set by the operator conflict (e.g. an argument which is already set by the operator
itself) or when providing an invalid argument the reconciliation itself) or when providing an invalid argument the reconciliation
will fail and an error will be logged. will fail and an error will be logged.
@ -6399,9 +6425,14 @@ spec:
description: 'Thanos base image if other than default. Deprecated: description: 'Thanos base image if other than default. Deprecated:
use ''image'' instead' use ''image'' instead'
type: string type: string
grpcListenLocal:
description: If true, the Thanos sidecar listens on the loopback
interface for the gRPC endpoints. It has no effect if `listenLocal`
is true.
type: boolean
grpcServerTlsConfig: grpcServerTlsConfig:
description: 'GRPCServerTLSConfig configures the gRPC server from description: 'GRPCServerTLSConfig configures the TLS parameters
which Thanos Querier reads recorded rule data. Note: Currently for the gRPC server providing the StoreAPI. Note: Currently
only the CAFile, CertFile, and KeyFile fields are supported. only the CAFile, CertFile, and KeyFile fields are supported.
Maps to the ''--grpc-server-tls-*'' CLI args.' Maps to the ''--grpc-server-tls-*'' CLI args.'
properties: properties:
@ -6534,6 +6565,11 @@ spec:
description: Used to verify the hostname for the targets. description: Used to verify the hostname for the targets.
type: string type: string
type: object type: object
httpListenLocal:
description: If true, the Thanos sidecar listens on the loopback
interface for the HTTP endpoints. It has no effect if `listenLocal`
is true.
type: boolean
image: image:
description: Image if specified has precedence over baseImage, description: Image if specified has precedence over baseImage,
tag and sha combinations. Specifying the version is still necessary tag and sha combinations. Specifying the version is still necessary
@ -6541,8 +6577,10 @@ spec:
is being configured. is being configured.
type: string type: string
listenLocal: listenLocal:
description: ListenLocal makes the Thanos sidecar listen on loopback, description: 'If true, the Thanos sidecar listens on the loopback
so that it does not bind against the Pod IP. interface for the HTTP and gRPC endpoints. It takes precedence
over `grpcListenLocal` and `httpListenLocal`. Deprecated: use
`grpcListenLocal` and `httpListenLocal` instead.'
type: boolean type: boolean
logFormat: logFormat:
description: LogFormat for Thanos sidecar to be configured with. description: LogFormat for Thanos sidecar to be configured with.
@ -6919,6 +6957,20 @@ spec:
- whenUnsatisfiable - whenUnsatisfiable
type: object type: object
type: array type: array
tsdb:
description: Defines the runtime reloadable configuration of the timeseries
database (TSDB).
properties:
outOfOrderTimeWindow:
description: Configures how old an out-of-order/out-of-bounds
sample can be w.r.t. the TSDB max time. An out-of-order/out-of-bounds
sample is ingested into the TSDB as long as the timestamp of
the sample is >= (TSDB.MaxTime - outOfOrderTimeWindow). Out
of order ingestion is an experimental feature and requires Prometheus
>= v2.39.0.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
type: object
version: version:
description: Version of Prometheus to be deployed. description: Version of Prometheus to be deployed.
type: string type: string
@ -8711,6 +8763,14 @@ spec:
description: Human-readable message indicating details for the description: Human-readable message indicating details for the
condition's last transition. condition's last transition.
type: string type: string
observedGeneration:
description: ObservedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if .metadata.generation
is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
type: integer
reason: reason:
description: Reason for the condition's last transition. description: Reason for the condition's last transition.
type: string type: string

View File

@ -1,4 +1,4 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml # https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
--- ---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition

View File

@ -1,4 +1,4 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml # https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
--- ---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition

View File

@ -1,4 +1,4 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml # https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
--- ---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
@ -21,13 +21,18 @@ spec:
scope: Namespaced scope: Namespaced
versions: versions:
- additionalPrinterColumns: - additionalPrinterColumns:
- description: The desired replicas number of Thanos Rulers - description: The number of desired replicas
jsonPath: .spec.replicas jsonPath: .spec.replicas
name: Replicas name: Replicas
type: integer type: integer
- jsonPath: .metadata.creationTimestamp - jsonPath: .metadata.creationTimestamp
name: Age name: Age
type: date type: date
- description: Whether the resource reconciliation is paused or not
jsonPath: .status.paused
name: Paused
priority: 1
type: boolean
name: v1 name: v1
schema: schema:
openAPIV3Schema: openAPIV3Schema:

View File

@ -89,7 +89,7 @@ spec:
{{- end }} {{- end }}
{{- if .Values.alertmanager.alertmanagerSpec.storage }} {{- if .Values.alertmanager.alertmanagerSpec.storage }}
storage: storage:
{{ toYaml .Values.alertmanager.alertmanagerSpec.storage | indent 4 }} {{ tpl (toYaml .Values.alertmanager.alertmanagerSpec.storage | indent 4) . }}
{{- end }} {{- end }}
{{- if .Values.alertmanager.alertmanagerSpec.podMetadata }} {{- if .Values.alertmanager.alertmanagerSpec.podMetadata }}
podMetadata: podMetadata:
@ -162,4 +162,7 @@ spec:
{{- if .Values.alertmanager.alertmanagerSpec.forceEnableClusterMode }} {{- if .Values.alertmanager.alertmanagerSpec.forceEnableClusterMode }}
forceEnableClusterMode: {{ .Values.alertmanager.alertmanagerSpec.forceEnableClusterMode }} forceEnableClusterMode: {{ .Values.alertmanager.alertmanagerSpec.forceEnableClusterMode }}
{{- end }} {{- end }}
{{- if .Values.alertmanager.alertmanagerSpec.minReadySeconds }}
minReadySeconds: {{ .Values.alertmanager.alertmanagerSpec.minReadySeconds }}
{{- end }}
{{- end }} {{- end }}

View File

@ -1,4 +1,5 @@
{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} {{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
kind: Role kind: Role
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -19,3 +20,4 @@ rules:
resourceNames: resourceNames:
- {{ template "kube-prometheus-stack.fullname" . }}-alertmanager - {{ template "kube-prometheus-stack.fullname" . }}-alertmanager
{{- end }} {{- end }}
{{- end }}

View File

@ -1,4 +1,5 @@
{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} {{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding
metadata: metadata:
@ -16,3 +17,4 @@ subjects:
name: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }} name: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }}
namespace: {{ template "kube-prometheus-stack.namespace" . }} namespace: {{ template "kube-prometheus-stack.namespace" . }}
{{- end }} {{- end }}
{{- end }}

View File

@ -1,4 +1,5 @@
{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} {{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
apiVersion: policy/v1beta1 apiVersion: policy/v1beta1
kind: PodSecurityPolicy kind: PodSecurityPolicy
metadata: metadata:
@ -43,4 +44,4 @@ spec:
max: 65535 max: 65535
readOnlyRootFilesystem: false readOnlyRootFilesystem: false
{{- end }} {{- end }}
{{- end }}

View File

@ -27,6 +27,9 @@ spec:
{{- if .Values.alertmanager.serviceMonitor.scheme }} {{- if .Values.alertmanager.serviceMonitor.scheme }}
scheme: {{ .Values.alertmanager.serviceMonitor.scheme }} scheme: {{ .Values.alertmanager.serviceMonitor.scheme }}
{{- end }} {{- end }}
{{- if .Values.alertmanager.serviceMonitor.enableHttp2 }}
enableHttp2: {{ .Values.alertmanager.serviceMonitor.enableHttp2 }}
{{- end }}
{{- if .Values.alertmanager.serviceMonitor.bearerTokenFile }} {{- if .Values.alertmanager.serviceMonitor.bearerTokenFile }}
bearerTokenFile: {{ .Values.alertmanager.serviceMonitor.bearerTokenFile }} bearerTokenFile: {{ .Values.alertmanager.serviceMonitor.bearerTokenFile }}
{{- end }} {{- end }}

View File

@ -7,6 +7,9 @@ metadata:
annotations: annotations:
"helm.sh/hook": pre-install,pre-upgrade "helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
{{- with .Values.prometheusOperator.admissionWebhooks.annotations }}
{{ toYaml . | indent 4 }}
{{- end }}
labels: labels:
app: {{ template "kube-prometheus-stack.name" $ }}-admission-create app: {{ template "kube-prometheus-stack.name" $ }}-admission-create
{{- include "kube-prometheus-stack.labels" $ | indent 4 }} {{- include "kube-prometheus-stack.labels" $ | indent 4 }}

View File

@ -7,6 +7,9 @@ metadata:
annotations: annotations:
"helm.sh/hook": post-install,post-upgrade "helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
{{- with .Values.prometheusOperator.admissionWebhooks.patch.annotations }}
{{ toYaml . | indent 4 }}
{{- end }}
labels: labels:
app: {{ template "kube-prometheus-stack.name" $ }}-admission-patch app: {{ template "kube-prometheus-stack.name" $ }}-admission-patch
{{- include "kube-prometheus-stack.labels" $ | indent 4 }} {{- include "kube-prometheus-stack.labels" $ | indent 4 }}

View File

@ -9,6 +9,9 @@ metadata:
labels: labels:
app: {{ template "kube-prometheus-stack.name" . }}-operator app: {{ template "kube-prometheus-stack.name" . }}-operator
{{ include "kube-prometheus-stack.labels" . | indent 4 }} {{ include "kube-prometheus-stack.labels" . | indent 4 }}
{{- if .Values.prometheusOperator.labels }}
{{ toYaml .Values.prometheusOperator.labels | indent 4 }}
{{- end }}
{{- if .Values.prometheusOperator.annotations }} {{- if .Values.prometheusOperator.annotations }}
annotations: annotations:
{{ toYaml .Values.prometheusOperator.annotations | indent 4 }} {{ toYaml .Values.prometheusOperator.annotations | indent 4 }}
@ -54,14 +57,19 @@ spec:
- --log-level={{ .Values.prometheusOperator.logLevel }} - --log-level={{ .Values.prometheusOperator.logLevel }}
{{- end }} {{- end }}
{{- if .Values.prometheusOperator.denyNamespaces }} {{- if .Values.prometheusOperator.denyNamespaces }}
- --deny-namespaces={{ .Values.prometheusOperator.denyNamespaces | join "," }} - --deny-namespaces={{ tpl (.Values.prometheusOperator.denyNamespaces | join ",") $ }}
{{- end }} {{- end }}
{{- with $.Values.prometheusOperator.namespaces }} {{- with $.Values.prometheusOperator.namespaces }}
{{ $ns := default (list nil) .additional }} {{- $namespaces := list }}
{{- if .releaseNamespace }} {{- if .releaseNamespace }}
{{- $ns = append $ns $namespace }} {{- $namespaces = append $namespaces $namespace }}
{{- end }} {{- end }}
- --namespaces={{ $ns | join "," }} {{- if .additional }}
{{- range $ns := .additional }}
{{- $namespaces = append $namespaces (tpl $ns $) }}
{{- end }}
{{- end }}
- --namespaces={{ $namespaces | mustUniq | join "," }}
{{- end }} {{- end }}
- --localhost=127.0.0.1 - --localhost=127.0.0.1
{{- if .Values.prometheusOperator.prometheusDefaultBaseImage }} {{- if .Values.prometheusOperator.prometheusDefaultBaseImage }}

View File

@ -1,4 +1,5 @@
{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -18,3 +19,4 @@ rules:
resourceNames: resourceNames:
- {{ template "kube-prometheus-stack.fullname" . }}-operator - {{ template "kube-prometheus-stack.fullname" . }}-operator
{{- end }} {{- end }}
{{- end }}

View File

@ -1,4 +1,5 @@
{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -15,3 +16,4 @@ subjects:
name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }} name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }}
namespace: {{ template "kube-prometheus-stack.namespace" . }} namespace: {{ template "kube-prometheus-stack.namespace" . }}
{{- end }} {{- end }}
{{- end }}

View File

@ -1,4 +1,5 @@
{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
apiVersion: policy/v1beta1 apiVersion: policy/v1beta1
kind: PodSecurityPolicy kind: PodSecurityPolicy
metadata: metadata:
@ -43,3 +44,4 @@ spec:
max: 65535 max: 65535
readOnlyRootFilesystem: false readOnlyRootFilesystem: false
{{- end }} {{- end }}
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.prometheusOperator.verticalPodAutoscaler.enabled) }}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-operator
namespace: {{ template "kube-prometheus-stack.namespace" . }}
labels:
app: {{ template "kube-prometheus-stack.name" . }}-operator
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
spec:
resourcePolicy:
containerPolicies:
- containerName: {{ template "kube-prometheus-stack.name" . }}
{{- if .Values.prometheusOperator.verticalPodAutoscaler.controlledResources }}
controlledResources: {{ .Values.prometheusOperator.verticalPodAutoscaler.controlledResources }}
{{- end }}
{{- if .Values.prometheusOperator.verticalPodAutoscaler.maxAllowed }}
maxAllowed:
{{ toYaml .Values.prometheusOperator.verticalPodAutoscaler.maxAllowed | nindent 8 }}
{{- end }}
{{- if .Values.prometheusOperator.verticalPodAutoscaler.minAllowed }}
minAllowed:
{{ toYaml .Values.prometheusOperator.verticalPodAutoscaler.minAllowed | nindent 8 }}
{{- end }}
targetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ template "kube-prometheus-stack.fullname" . }}
{{- if .Values.prometheusOperator.verticalPodAutoscaler.updatePolicy }}
updatePolicy:
{{- if .Values.prometheusOperator.verticalPodAutoscaler.updatePolicy.updateMode }}
updateMode: {{ .Values.prometheusOperator.verticalPodAutoscaler.updatePolicy.updateMode }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -46,6 +46,10 @@ spec:
sha: {{ .Values.prometheus.prometheusSpec.image.sha }} sha: {{ .Values.prometheus.prometheusSpec.image.sha }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.prometheus.prometheusSpec.additionalArgs }}
additionalArgs:
{{ toYaml .Values.prometheus.prometheusSpec.additionalArgs | indent 4}}
{{- end -}}
{{- if .Values.prometheus.prometheusSpec.externalLabels }} {{- if .Values.prometheus.prometheusSpec.externalLabels }}
externalLabels: externalLabels:
{{ tpl (toYaml .Values.prometheus.prometheusSpec.externalLabels | indent 4) . }} {{ tpl (toYaml .Values.prometheus.prometheusSpec.externalLabels | indent 4) . }}
@ -217,7 +221,7 @@ spec:
{{- end }} {{- end }}
{{- if .Values.prometheus.prometheusSpec.storageSpec }} {{- if .Values.prometheus.prometheusSpec.storageSpec }}
storage: storage:
{{ toYaml .Values.prometheus.prometheusSpec.storageSpec | indent 4 }} {{ tpl (toYaml .Values.prometheus.prometheusSpec.storageSpec | indent 4) . }}
{{- end }} {{- end }}
{{- if .Values.prometheus.prometheusSpec.podMetadata }} {{- if .Values.prometheus.prometheusSpec.podMetadata }}
podMetadata: podMetadata:
@ -239,7 +243,7 @@ spec:
labelSelector: labelSelector:
matchExpressions: matchExpressions:
- {key: app.kubernetes.io/name, operator: In, values: [prometheus]} - {key: app.kubernetes.io/name, operator: In, values: [prometheus]}
- {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-prometheus]} - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.prometheus.crname" . }}]}
{{- else if eq .Values.prometheus.prometheusSpec.podAntiAffinity "soft" }} {{- else if eq .Values.prometheus.prometheusSpec.podAntiAffinity "soft" }}
podAntiAffinity: podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution: preferredDuringSchedulingIgnoredDuringExecution:
@ -249,7 +253,7 @@ spec:
labelSelector: labelSelector:
matchExpressions: matchExpressions:
- {key: app.kubernetes.io/name, operator: In, values: [prometheus]} - {key: app.kubernetes.io/name, operator: In, values: [prometheus]}
- {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-prometheus]} - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.prometheus.crname" . }}]}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.prometheus.prometheusSpec.tolerations }} {{- if .Values.prometheus.prometheusSpec.tolerations }}
@ -380,4 +384,7 @@ spec:
{{- if .Values.prometheus.prometheusSpec.allowOverlappingBlocks }} {{- if .Values.prometheus.prometheusSpec.allowOverlappingBlocks }}
allowOverlappingBlocks: {{ .Values.prometheus.prometheusSpec.allowOverlappingBlocks }} allowOverlappingBlocks: {{ .Values.prometheus.prometheusSpec.allowOverlappingBlocks }}
{{- end }} {{- end }}
{{- if .Values.prometheus.prometheusSpec.minReadySeconds }}
minReadySeconds: {{ .Values.prometheus.prometheusSpec.minReadySeconds }}
{{- end }}
{{- end }} {{- end }}

View File

@ -1,4 +1,5 @@
{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} {{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -18,3 +19,4 @@ rules:
resourceNames: resourceNames:
- {{ template "kube-prometheus-stack.fullname" . }}-prometheus - {{ template "kube-prometheus-stack.fullname" . }}-prometheus
{{- end }} {{- end }}
{{- end }}

View File

@ -1,4 +1,5 @@
{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} {{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
@ -15,4 +16,4 @@ subjects:
name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }} name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }}
namespace: {{ template "kube-prometheus-stack.namespace" . }} namespace: {{ template "kube-prometheus-stack.namespace" . }}
{{- end }} {{- end }}
{{- end }}

View File

@ -1,4 +1,5 @@
{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} {{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }}
{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }}
apiVersion: policy/v1beta1 apiVersion: policy/v1beta1
kind: PodSecurityPolicy kind: PodSecurityPolicy
metadata: metadata:
@ -54,3 +55,4 @@ spec:
{{ toYaml .Values.prometheus.podSecurityPolicy.allowedHostPaths | indent 4 }} {{ toYaml .Values.prometheus.podSecurityPolicy.allowedHostPaths | indent 4 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }}

View File

@ -78,6 +78,10 @@ spec:
queryEndpoints: queryEndpoints:
{{ toYaml .Values.thanosRuler.thanosRulerSpec.queryEndpoints | indent 4 }} {{ toYaml .Values.thanosRuler.thanosRulerSpec.queryEndpoints | indent 4 }}
{{- end }} {{- end }}
{{- if .Values.thanosRuler.thanosRulerSpec.queryConfig }}
queryConfig:
{{ toYaml .Values.thanosRuler.thanosRulerSpec.queryConfig | indent 4 }}
{{- end }}
{{- if .Values.thanosRuler.thanosRulerSpec.resources }} {{- if .Values.thanosRuler.thanosRulerSpec.resources }}
resources: resources:
{{ toYaml .Values.thanosRuler.thanosRulerSpec.resources | indent 4 }} {{ toYaml .Values.thanosRuler.thanosRulerSpec.resources | indent 4 }}

View File

@ -51,7 +51,8 @@ defaultRules:
kubernetesResources: true kubernetesResources: true
kubernetesStorage: true kubernetesStorage: true
kubernetesSystem: true kubernetesSystem: true
kubeScheduler: true kubeSchedulerAlerting: true
kubeSchedulerRecording: true
kubeStateMetrics: true kubeStateMetrics: true
network: true network: true
node: true node: true
@ -415,6 +416,10 @@ alertmanager:
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
scheme: "" scheme: ""
## enableHttp2: Whether to enable HTTP2.
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint
enableHttp2: false
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
tlsConfig: {} tlsConfig: {}
@ -686,6 +691,10 @@ alertmanager:
## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each.
forceEnableClusterMode: false forceEnableClusterMode: false
## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
minReadySeconds: 0
## ExtraSecret can be used to store various data in an extra secret ## ExtraSecret can be used to store various data in an extra secret
## (use it for example to store hashed basic auth credentials) ## (use it for example to store hashed basic auth credentials)
extraSecret: extraSecret:
@ -1285,8 +1294,8 @@ kubeEtcd:
## ##
service: service:
enabled: true enabled: true
port: 2379 port: 2381
targetPort: 2379 targetPort: 2381
# selector: # selector:
# component: etcd # component: etcd
@ -1607,6 +1616,9 @@ prometheusOperator:
## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own
## certs ahead of time if you wish. ## certs ahead of time if you wish.
## ##
annotations: {}
# argocd.argoproj.io/hook: PreSync
# argocd.argoproj.io/hook-delete-policy: HookSucceeded
patch: patch:
enabled: true enabled: true
image: image:
@ -1618,6 +1630,9 @@ prometheusOperator:
## Provide a priority class name to the webhook patching job ## Provide a priority class name to the webhook patching job
## ##
priorityClassName: "" priorityClassName: ""
annotations: {}
# argocd.argoproj.io/hook: PreSync
# argocd.argoproj.io/hook-delete-policy: HookSucceeded
podAnnotations: {} podAnnotations: {}
nodeSelector: {} nodeSelector: {}
affinity: {} affinity: {}
@ -1723,6 +1738,10 @@ prometheusOperator:
## ##
externalIPs: [] externalIPs: []
# ## Labels to add to the operator deployment
# ##
labels: {}
## Annotations to add to the operator deployment ## Annotations to add to the operator deployment
## ##
annotations: {} annotations: {}
@ -1846,11 +1865,31 @@ prometheusOperator:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
# Enable vertical pod autoscaler support for prometheus-operator
verticalPodAutoscaler:
enabled: false
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
controlledResources: []
# Define the max allowed resources for the pod
maxAllowed: {}
# cpu: 200m
# memory: 100Mi
# Define the min allowed resources for the pod
minAllowed: {}
# cpu: 200m
# memory: 100Mi
updatePolicy:
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
updateMode: Auto
## Prometheus-operator image ## Prometheus-operator image
## ##
image: image:
repository: quay.io/prometheus-operator/prometheus-operator repository: quay.io/prometheus-operator/prometheus-operator
tag: v0.59.1 tag: v0.60.1
sha: "" sha: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@ -1867,7 +1906,7 @@ prometheusOperator:
prometheusConfigReloader: prometheusConfigReloader:
image: image:
repository: quay.io/prometheus-operator/prometheus-config-reloader repository: quay.io/prometheus-operator/prometheus-config-reloader
tag: v0.59.1 tag: v0.60.1
sha: "" sha: ""
# resource config for prometheusConfigReloader # resource config for prometheusConfigReloader
@ -1883,7 +1922,7 @@ prometheusOperator:
## ##
thanosImage: thanosImage:
repository: quay.io/thanos/thanos repository: quay.io/thanos/thanos
tag: v0.28.0 tag: v0.28.1
sha: "" sha: ""
## Set a Field Selector to filter watched secrets ## Set a Field Selector to filter watched secrets
@ -2271,6 +2310,10 @@ prometheus:
## ##
apiserverConfig: {} apiserverConfig: {}
## Allows setting additional arguments for the Prometheus container
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Prometheus
additionalArgs: []
## Interval between consecutive scrapes. ## Interval between consecutive scrapes.
## Defaults to 30s. ## Defaults to 30s.
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
@ -2316,7 +2359,7 @@ prometheus:
## ##
image: image:
repository: quay.io/prometheus/prometheus repository: quay.io/prometheus/prometheus
tag: v2.38.0 tag: v2.39.1
sha: "" sha: ""
## Tolerations for use with node taints ## Tolerations for use with node taints
@ -2856,6 +2899,10 @@ prometheus:
## in Prometheus so it may change in any upcoming release. ## in Prometheus so it may change in any upcoming release.
allowOverlappingBlocks: false allowOverlappingBlocks: false
## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
minReadySeconds: 0
additionalRulesForClusterRole: [] additionalRulesForClusterRole: []
# - apiGroups: [ "" ] # - apiGroups: [ "" ]
# resources: # resources:
@ -3147,7 +3194,7 @@ thanosRuler:
## ##
image: image:
repository: quay.io/thanos/thanos repository: quay.io/thanos/thanos
tag: v0.28.0 tag: v0.28.1
sha: "" sha: ""
## Namespaces to be selected for PrometheusRules discovery. ## Namespaces to be selected for PrometheusRules discovery.
@ -3249,6 +3296,14 @@ thanosRuler:
## When used alongside with ObjectStorageConfig, ObjectStorageConfigFile takes precedence. ## When used alongside with ObjectStorageConfig, ObjectStorageConfigFile takes precedence.
objectStorageConfigFile: "" objectStorageConfigFile: ""
## QueryEndpoints defines Thanos querier endpoints from which to query metrics.
## Maps to the --query flag of thanos ruler.
queryEndpoints: []
## Define configuration for connecting to thanos query instances. If this is defined, the queryEndpoints field will be ignored.
## Maps to the query.config CLI argument. Only available with thanos v0.11.0 and higher.
queryConfig: {}
## Labels configure the external label pairs to ThanosRuler. A default replica ## Labels configure the external label pairs to ThanosRuler. A default replica
## label `thanos_ruler_replica` will be always added as a label with the value ## label `thanos_ruler_replica` will be always added as a label with the value
## of the pod's name and it will be dropped in the alerts. ## of the pod's name and it will be dropped in the alerts.

View File

@ -13,4 +13,4 @@ maintainers:
name: prometheus-pushgateway name: prometheus-pushgateway
sources: sources:
- https://github.com/prometheus/pushgateway - https://github.com/prometheus/pushgateway
version: 1.18.2 version: 1.20.0

View File

@ -14,6 +14,6 @@
echo http://$SERVICE_IP:{{ .Values.service.port }} echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }} {{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus-pushgateway.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus-pushgateway.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application" echo "Visit http://127.0.0.1:9091 to use your application"
kubectl port-forward $POD_NAME 8080:80 kubectl port-forward $POD_NAME 9091
{{- end }} {{- end }}

View File

@ -76,6 +76,10 @@ Returns pod spec
{{- if .Values.imagePullSecrets }} {{- if .Values.imagePullSecrets }}
imagePullSecrets: imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }} {{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.extraInitContainers }}
initContainers:
{{ toYaml .Values.extraInitContainers | nindent 8 }}
{{- end }} {{- end }}
containers: containers:
{{- if .Values.extraContainers }} {{- if .Values.extraContainers }}
@ -128,6 +132,10 @@ Returns pod spec
{{- if .Values.affinity }} {{- if .Values.affinity }}
affinity: affinity:
{{ toYaml .Values.affinity | indent 8 }} {{ toYaml .Values.affinity | indent 8 }}
{{- end }}
{{- if .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{ toYaml .Values.topologySpreadConstraints | indent 8 }}
{{- end }} {{- end }}
{{- if .Values.securityContext }} {{- if .Values.securityContext }}
securityContext: securityContext:

View File

@ -56,6 +56,10 @@ extraVars: []
## - --persistence.interval=5m ## - --persistence.interval=5m
extraArgs: [] extraArgs: []
## Additional InitContainers to initialize the pod
##
extraInitContainers: []
# Optional additional containers (sidecar) # Optional additional containers (sidecar)
extraContainers: [] extraContainers: []
# - name: oAuth2-proxy # - name: oAuth2-proxy
@ -184,6 +188,10 @@ containerSecurityContext: {}
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {} affinity: {}
## Topology spread constraints for pods
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
# Enable this if you're using https://github.com/coreos/prometheus-operator # Enable this if you're using https://github.com/coreos/prometheus-operator
serviceMonitor: serviceMonitor:
enabled: false enabled: false
@ -282,10 +290,10 @@ persistentVolume:
## ##
subPath: "" subPath: ""
extraVolumes: {} extraVolumes: []
# - name: extra # - name: extra
# emptyDir: {} # emptyDir: {}
extraVolumeMounts: {} extraVolumeMounts: []
# - name: extra # - name: extra
# mountPath: /usr/share/extras # mountPath: /usr/share/extras
# readOnly: true # readOnly: true

View File

@ -18,7 +18,7 @@
"subdir": "contrib/mixin" "subdir": "contrib/mixin"
} }
}, },
"version": "19002cfc689fba2b8f56605e5797bf79f8b61fdd", "version": "62169d12ebf61cc58e8d781eb201d9416487e4a0",
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc=" "sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
}, },
{ {
@ -48,7 +48,7 @@
"subdir": "grafana-builder" "subdir": "grafana-builder"
} }
}, },
"version": "dbf6fc14105c28b6fd0253005f7ca2da37d3d4e1", "version": "d73aff453c9784cd6922119f3ce33d8d355a79e1",
"sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0=" "sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0="
}, },
{ {
@ -68,7 +68,7 @@
"subdir": "lib/promgrafonnet" "subdir": "lib/promgrafonnet"
} }
}, },
"version": "5e44626d70c2bf2d35c37f3fee5a6261a5335cc6", "version": "05a58f765eda05902d4f7dd22098a2b870f7ca1e",
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps=" "sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
}, },
{ {

View File

@ -1,9 +1,11 @@
#!/bin/bash #!/bin/bash
set -ex set -ex
VERSION=$(yq eval '.dependencies[] | select(.name=="kube-prometheus-stack") | .version' Chart.yaml)
PG_VER=$(yq eval '.dependencies[] | select(.name=="prometheus-pushgateway") | .version' Chart.yaml) PG_VER=$(yq eval '.dependencies[] | select(.name=="prometheus-pushgateway") | .version' Chart.yaml)
helm repo update
VERSION=$(yq eval '.dependencies[] | select(.name=="kube-prometheus-stack") | .version' Chart.yaml)
rm -rf charts/kube-prometheus-stack rm -rf charts/kube-prometheus-stack
helm pull prometheus-community/kube-prometheus-stack --untar --untardir charts --version $VERSION helm pull prometheus-community/kube-prometheus-stack --untar --untardir charts --version $VERSION

View File

@ -52,6 +52,8 @@ kube-prometheus-stack:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
resources: resources:
@ -66,6 +68,8 @@ kube-prometheus-stack:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
@ -111,7 +115,6 @@ kube-prometheus-stack:
memory: 4Gi memory: 4Gi
# cpu: "1000m" # cpu: "1000m"
walCompression: true
storageSpec: storageSpec:
volumeClaimTemplate: volumeClaimTemplate:
spec: spec:
@ -120,6 +123,27 @@ kube-prometheus-stack:
resources: resources:
requests: requests:
storage: 16Gi storage: 16Gi
#volumes:
# - name: aws-token
# projected:
# sources:
# - serviceAccountToken:
# path: token
# expirationSeconds: 86400
# audience: "sts.amazonaws.com"
#volumeMounts:
# - name: aws-token
# mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
# readOnly: true
#containers:
# - name: prometheus
# env:
# - name: AWS_ROLE_ARN
# value: "<prometheus IAM ROLE ARN>"
# - name: AWS_WEB_IDENTITY_TOKEN_FILE
# value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
# - name: AWS_STS_REGIONAL_ENDPOINTS
# value: regional
# Custom Grafana tweaks # Custom Grafana tweaks
grafana: grafana:
@ -188,6 +212,8 @@ kube-prometheus-stack:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
@ -301,6 +327,8 @@ prometheus-adapter:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
# Basic rules for HPA to work replacing heaptster, taken from kube-prometheus project # Basic rules for HPA to work replacing heaptster, taken from kube-prometheus project

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-network name: kubezero-network
description: KubeZero umbrella chart for all things network description: KubeZero umbrella chart for all things network
type: application type: application
version: 0.3.4 version: 0.4.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -29,4 +29,4 @@ dependencies:
- name: calico - name: calico
version: 0.2.2 version: 0.2.2
condition: calico.enabled condition: calico.enabled
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubezero-network # kubezero-network
![Version: 0.3.4](https://img.shields.io/badge/Version-0.3.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things network KubeZero umbrella chart for all things network
@ -14,7 +14,7 @@ KubeZero umbrella chart for all things network
## Requirements ## Requirements
Kubernetes: `>= 1.20.0` Kubernetes: `>= 1.24.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
@ -47,6 +47,8 @@ Kubernetes: `>= 1.20.0`
| cilium.operator.replicas | int | `1` | | | cilium.operator.replicas | int | `1` | |
| cilium.operator.tolerations[0].effect | string | `"NoSchedule"` | | | cilium.operator.tolerations[0].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cilium.policyEnforcementMode | string | `"never"` | | | cilium.policyEnforcementMode | string | `"never"` | |
| cilium.prometheus.enabled | bool | `false` | | | cilium.prometheus.enabled | bool | `false` | |
| cilium.prometheus.port | int | `9091` | | | cilium.prometheus.port | int | `9091` | |
@ -55,6 +57,8 @@ Kubernetes: `>= 1.20.0`
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | | | metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| metallb.controller.tolerations[1].effect | string | `"NoSchedule"` | |
| metallb.controller.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| metallb.enabled | bool | `false` | | | metallb.enabled | bool | `false` | |
| metallb.ipAddressPools | list | `[]` | | | metallb.ipAddressPools | list | `[]` | |
| multus.clusterNetwork | string | `"calico"` | | | multus.clusterNetwork | string | `"calico"` | |

View File

@ -605,6 +605,8 @@ spec:
operator: Exists operator: Exists
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
serviceAccountName: calico-kube-controllers serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:

View File

@ -24,6 +24,7 @@ spec:
- operator: Exists - operator: Exists
effect: NoExecute effect: NoExecute
serviceAccountName: multus serviceAccountName: multus
priorityClassName: system-node-critical
containers: containers:
- name: kube-multus - name: kube-multus
image: ghcr.io/k8snetworkplumbingwg/multus-cni:{{ .Values.multus.tag }} image: ghcr.io/k8snetworkplumbingwg/multus-cni:{{ .Values.multus.tag }}

View File

@ -5,6 +5,8 @@ metallb:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
@ -84,6 +86,8 @@ cilium:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""

View File

@ -1,6 +1,6 @@
# kubezero-sql # kubezero-sql
![Version: 0.2.0](https://img.shields.io/badge/Version-0.2.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.2.1](https://img.shields.io/badge/Version-0.2.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for SQL databases like MariaDB, PostgreSQL KubeZero umbrella chart for SQL databases like MariaDB, PostgreSQL
@ -19,7 +19,7 @@ Kubernetes: `>= 1.20.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://charts.bitnami.com/bitnami | mariadb-galera | 7.4.2 | | https://charts.bitnami.com/bitnami | mariadb-galera | 7.4.3 |
## Values ## Values
@ -28,7 +28,7 @@ Kubernetes: `>= 1.20.0`
| mariadb-galera.configurationConfigMap | string | `"{{ .Release.Name }}-mariadb-galera-configuration"` | | | mariadb-galera.configurationConfigMap | string | `"{{ .Release.Name }}-mariadb-galera-configuration"` | |
| mariadb-galera.db.password | string | `"12345qwert"` | | | mariadb-galera.db.password | string | `"12345qwert"` | |
| mariadb-galera.db.user | string | `"mariadb"` | | | mariadb-galera.db.user | string | `"mariadb"` | |
| mariadb-galera.enabled | bool | `true` | | | mariadb-galera.enabled | bool | `false` | |
| mariadb-galera.galera.mariabackup.password | string | `"12345qwert"` | | | mariadb-galera.galera.mariabackup.password | string | `"12345qwert"` | |
| mariadb-galera.istio.enabled | bool | `false` | | | mariadb-galera.istio.enabled | bool | `false` | |
| mariadb-galera.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | | | mariadb-galera.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-storage name: kubezero-storage
description: KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini description: KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini
type: application type: application
version: 0.7.2 version: 0.7.3
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -35,4 +35,4 @@ dependencies:
version: 2.2.9 version: 2.2.9
condition: aws-efs-csi-driver.enabled condition: aws-efs-csi-driver.enabled
# repository: https://kubernetes-sigs.github.io/aws-ebs-csi-driver # repository: https://kubernetes-sigs.github.io/aws-ebs-csi-driver
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubezero-storage # kubezero-storage
![Version: 0.7.1](https://img.shields.io/badge/Version-0.7.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.7.3](https://img.shields.io/badge/Version-0.7.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini
@ -14,12 +14,12 @@ KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, g
## Requirements ## Requirements
Kubernetes: `>= 1.20.0` Kubernetes: `>= 1.24.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| | aws-ebs-csi-driver | 2.11.0 | | | aws-ebs-csi-driver | 2.12.0 |
| | aws-efs-csi-driver | 2.2.7 | | | aws-efs-csi-driver | 2.2.9 |
| | gemini | 1.0.0 | | | gemini | 1.0.0 |
| | lvm-localpv | 1.0.0 | | | lvm-localpv | 1.0.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
@ -37,13 +37,22 @@ Kubernetes: `>= 1.20.0`
| aws-ebs-csi-driver.controller.resources.requests.memory | string | `"24Mi"` | | | aws-ebs-csi-driver.controller.resources.requests.memory | string | `"24Mi"` | |
| aws-ebs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | | | aws-ebs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-ebs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | aws-ebs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| aws-ebs-csi-driver.controller.tolerations[1].effect | string | `"NoSchedule"` | |
| aws-ebs-csi-driver.controller.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| aws-ebs-csi-driver.enabled | bool | `false` | | | aws-ebs-csi-driver.enabled | bool | `false` | |
| aws-ebs-csi-driver.node.resources.limits.memory | string | `"32Mi"` | | | aws-ebs-csi-driver.node.resources.limits.memory | string | `"32Mi"` | |
| aws-ebs-csi-driver.node.resources.requests.cpu | string | `"10m"` | | | aws-ebs-csi-driver.node.resources.requests.cpu | string | `"10m"` | |
| aws-ebs-csi-driver.node.resources.requests.memory | string | `"16Mi"` | | | aws-ebs-csi-driver.node.resources.requests.memory | string | `"16Mi"` | |
| aws-ebs-csi-driver.node.tolerateAllTaints | bool | `false` | |
| aws-ebs-csi-driver.node.tolerations[0].effect | string | `"NoSchedule"` | | | aws-ebs-csi-driver.node.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-ebs-csi-driver.node.tolerations[0].key | string | `"kubezero-workergroup"` | | | aws-ebs-csi-driver.node.tolerations[0].key | string | `"kubezero-workergroup"` | |
| aws-ebs-csi-driver.node.tolerations[0].operator | string | `"Exists"` | | | aws-ebs-csi-driver.node.tolerations[0].operator | string | `"Exists"` | |
| aws-ebs-csi-driver.node.tolerations[1].effect | string | `"NoSchedule"` | |
| aws-ebs-csi-driver.node.tolerations[1].key | string | `"nvidia.com/gpu"` | |
| aws-ebs-csi-driver.node.tolerations[1].operator | string | `"Exists"` | |
| aws-ebs-csi-driver.node.tolerations[2].effect | string | `"NoSchedule"` | |
| aws-ebs-csi-driver.node.tolerations[2].key | string | `"aws.amazon.com/neuron"` | |
| aws-ebs-csi-driver.node.tolerations[2].operator | string | `"Exists"` | |
| aws-ebs-csi-driver.storageClasses[0].allowVolumeExpansion | bool | `true` | | | aws-ebs-csi-driver.storageClasses[0].allowVolumeExpansion | bool | `true` | |
| aws-ebs-csi-driver.storageClasses[0].name | string | `"ebs-sc-gp2-xfs"` | | | aws-ebs-csi-driver.storageClasses[0].name | string | `"ebs-sc-gp2-xfs"` | |
| aws-ebs-csi-driver.storageClasses[0].parameters."csi.storage.k8s.io/fstype" | string | `"xfs"` | | | aws-ebs-csi-driver.storageClasses[0].parameters."csi.storage.k8s.io/fstype" | string | `"xfs"` | |
@ -62,6 +71,8 @@ Kubernetes: `>= 1.20.0`
| aws-efs-csi-driver.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | aws-efs-csi-driver.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| aws-efs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | | | aws-efs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-efs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | aws-efs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| aws-efs-csi-driver.controller.tolerations[1].effect | string | `"NoSchedule"` | |
| aws-efs-csi-driver.controller.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| aws-efs-csi-driver.enabled | bool | `false` | | | aws-efs-csi-driver.enabled | bool | `false` | |
| aws-efs-csi-driver.node.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node.kubernetes.io/csi.efs.fs"` | | | aws-efs-csi-driver.node.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node.kubernetes.io/csi.efs.fs"` | |
| aws-efs-csi-driver.node.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"Exists"` | | | aws-efs-csi-driver.node.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"Exists"` | |
@ -72,6 +83,12 @@ Kubernetes: `>= 1.20.0`
| aws-efs-csi-driver.node.tolerations[0].effect | string | `"NoSchedule"` | | | aws-efs-csi-driver.node.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-efs-csi-driver.node.tolerations[0].key | string | `"kubezero-workergroup"` | | | aws-efs-csi-driver.node.tolerations[0].key | string | `"kubezero-workergroup"` | |
| aws-efs-csi-driver.node.tolerations[0].operator | string | `"Exists"` | | | aws-efs-csi-driver.node.tolerations[0].operator | string | `"Exists"` | |
| aws-efs-csi-driver.node.tolerations[1].effect | string | `"NoSchedule"` | |
| aws-efs-csi-driver.node.tolerations[1].key | string | `"nvidia.com/gpu"` | |
| aws-efs-csi-driver.node.tolerations[1].operator | string | `"Exists"` | |
| aws-efs-csi-driver.node.tolerations[2].effect | string | `"NoSchedule"` | |
| aws-efs-csi-driver.node.tolerations[2].key | string | `"aws.amazon.com/neuron"` | |
| aws-efs-csi-driver.node.tolerations[2].operator | string | `"Exists"` | |
| aws-efs-csi-driver.replicaCount | int | `1` | | | aws-efs-csi-driver.replicaCount | int | `1` | |
| gemini.enabled | bool | `false` | | | gemini.enabled | bool | `false` | |
| gemini.resources.limits.cpu | string | `"400m"` | | | gemini.resources.limits.cpu | string | `"400m"` | |
@ -84,6 +101,8 @@ Kubernetes: `>= 1.20.0`
| lvm-localpv.lvmController.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | lvm-localpv.lvmController.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| lvm-localpv.lvmController.tolerations[0].effect | string | `"NoSchedule"` | | | lvm-localpv.lvmController.tolerations[0].effect | string | `"NoSchedule"` | |
| lvm-localpv.lvmController.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | lvm-localpv.lvmController.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| lvm-localpv.lvmController.tolerations[1].effect | string | `"NoSchedule"` | |
| lvm-localpv.lvmController.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| lvm-localpv.lvmNode.logLevel | int | `2` | | | lvm-localpv.lvmNode.logLevel | int | `2` | |
| lvm-localpv.lvmNode.nodeSelector."node.kubernetes.io/lvm" | string | `"openebs"` | | | lvm-localpv.lvmNode.nodeSelector."node.kubernetes.io/lvm" | string | `"openebs"` | |
| lvm-localpv.lvmNode.tolerations[0].effect | string | `"NoSchedule"` | | | lvm-localpv.lvmNode.tolerations[0].effect | string | `"NoSchedule"` | |
@ -102,6 +121,8 @@ Kubernetes: `>= 1.20.0`
| snapshotController.resources.requests.memory | string | `"16Mi"` | | | snapshotController.resources.requests.memory | string | `"16Mi"` | |
| snapshotController.tolerations[0].effect | string | `"NoSchedule"` | | | snapshotController.tolerations[0].effect | string | `"NoSchedule"` | |
| snapshotController.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | snapshotController.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| snapshotController.tolerations[1].effect | string | `"NoSchedule"` | |
| snapshotController.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
# Snapshotter # Snapshotter
- https://kubernetes-csi.github.io/docs/snapshot-controller.html#deployment - https://kubernetes-csi.github.io/docs/snapshot-controller.html#deployment

View File

@ -24,6 +24,8 @@ spec:
tolerations: tolerations:
- effect: NoSchedule - effect: NoSchedule
key: node-role.kubernetes.io/master key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
containers: containers:
- command: - command:
- gemini - gemini

View File

@ -1,7 +1,7 @@
diff -rtubN charts/gemini/templates/deployment.yaml charts/gemini.zdt/templates/deployment.yaml diff -rtubN charts/gemini/templates/deployment.yaml charts/gemini.zdt/templates/deployment.yaml
--- charts/gemini/templates/deployment.yaml 2021-04-19 12:00:43.605005861 +0200 --- charts/gemini/templates/deployment.yaml 2021-04-19 12:00:43.605005861 +0200
+++ charts/gemini.zdt/templates/deployment.yaml 2021-04-19 12:00:08.365005781 +0200 +++ charts/gemini.zdt/templates/deployment.yaml 2021-04-19 12:00:08.365005781 +0200
@@ -19,6 +19,11 @@ @@ -19,6 +19,13 @@
{{- else }} {{- else }}
serviceAccountName: {{ .Values.rbac.serviceAccountName }} serviceAccountName: {{ .Values.rbac.serviceAccountName }}
{{- end }} {{- end }}
@ -10,6 +10,8 @@ diff -rtubN charts/gemini/templates/deployment.yaml charts/gemini.zdt/templates/
+ tolerations: + tolerations:
+ - effect: NoSchedule + - effect: NoSchedule
+ key: node-role.kubernetes.io/master + key: node-role.kubernetes.io/master
+ - effect: NoSchedule
+ key: node-role.kubernetes.io/control-plane
containers: containers:
- command: - command:
- gemini - gemini

View File

@ -18,7 +18,7 @@
"subdir": "contrib/mixin" "subdir": "contrib/mixin"
} }
}, },
"version": "acc7463fb2dc7037f571a02e65afd6e573a6344c", "version": "7cd9e5a3383d688b072808cea5dedeb209cc6d47",
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc=" "sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
}, },
{ {
@ -38,7 +38,7 @@
"subdir": "grafana-builder" "subdir": "grafana-builder"
} }
}, },
"version": "d73aff453c9784cd6922119f3ce33d8d355a79e1", "version": "187833fc2d104a75dadf28ea5d628818e21619cb",
"sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0=" "sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0="
}, },
{ {
@ -58,7 +58,7 @@
"subdir": "lib/promgrafonnet" "subdir": "lib/promgrafonnet"
} }
}, },
"version": "7b559e800a32a2a80caf4c968f37c4999ec44689", "version": "05a58f765eda05902d4f7dd22098a2b870f7ca1e",
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps=" "sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
}, },
{ {

View File

@ -6,6 +6,8 @@ snapshotController:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
@ -34,6 +36,8 @@ lvm-localpv:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
@ -74,6 +78,8 @@ aws-ebs-csi-driver:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
# k8sTagClusterId: <CLUSTER_NAME> # k8sTagClusterId: <CLUSTER_NAME>
# region: <AWS_DEFAULT_REGION> # region: <AWS_DEFAULT_REGION>
@ -146,6 +152,8 @@ aws-efs-csi-driver:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero name: kubezero
description: KubeZero - Root App of Apps chart description: KubeZero - Root App of Apps chart
type: application type: application
version: 1.23.11 version: 1.24.7
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -15,4 +15,4 @@ dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.5" version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts repository: https://cdn.zero-downtime.net/charts
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubezero # kubezero
![Version: 1.23.11](https://img.shields.io/badge/Version-1.23.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.24.7](https://img.shields.io/badge/Version-1.24.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero - Root App of Apps chart KubeZero - Root App of Apps chart
@ -14,7 +14,7 @@ KubeZero - Root App of Apps chart
## Requirements ## Requirements
Kubernetes: `>= 1.20.0` Kubernetes: `>= 1.24.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
@ -30,14 +30,14 @@ Kubernetes: `>= 1.20.0`
| addons.enabled | bool | `true` | | | addons.enabled | bool | `true` | |
| addons.external-dns.enabled | bool | `false` | | | addons.external-dns.enabled | bool | `false` | |
| addons.forseti.enabled | bool | `false` | | | addons.forseti.enabled | bool | `false` | |
| addons.targetRevision | string | `"0.6.3"` | | | addons.targetRevision | string | `"0.7.0"` | |
| argocd.enabled | bool | `false` | | | argocd.enabled | bool | `false` | |
| argocd.istio.enabled | bool | `false` | | | argocd.istio.enabled | bool | `false` | |
| argocd.namespace | string | `"argocd"` | | | argocd.namespace | string | `"argocd"` | |
| argocd.targetRevision | string | `"0.10.2"` | | | argocd.targetRevision | string | `"0.10.2"` | |
| cert-manager.enabled | bool | `false` | | | cert-manager.enabled | bool | `false` | |
| cert-manager.namespace | string | `"cert-manager"` | | | cert-manager.namespace | string | `"cert-manager"` | |
| cert-manager.targetRevision | string | `"0.9.2"` | | | cert-manager.targetRevision | string | `"0.9.3"` | |
| global.clusterName | string | `"zdt-trial-cluster"` | | | global.clusterName | string | `"zdt-trial-cluster"` | |
| global.highAvailable | bool | `false` | | | global.highAvailable | bool | `false` | |
| istio-ingress.chart | string | `"kubezero-istio-gateway"` | | | istio-ingress.chart | string | `"kubezero-istio-gateway"` | |
@ -52,27 +52,27 @@ Kubernetes: `>= 1.20.0`
| istio-private-ingress.targetRevision | string | `"0.8.2"` | | | istio-private-ingress.targetRevision | string | `"0.8.2"` | |
| istio.enabled | bool | `false` | | | istio.enabled | bool | `false` | |
| istio.namespace | string | `"istio-system"` | | | istio.namespace | string | `"istio-system"` | |
| istio.targetRevision | string | `"0.8.3"` | | | istio.targetRevision | string | `"0.8.5"` | |
| kubezero.defaultTargetRevision | string | `"*"` | | | kubezero.defaultTargetRevision | string | `"*"` | |
| kubezero.gitSync | object | `{}` | | | kubezero.gitSync | object | `{}` | |
| kubezero.repoURL | string | `"https://cdn.zero-downtime.net/charts"` | | | kubezero.repoURL | string | `"https://cdn.zero-downtime.net/charts"` | |
| kubezero.server | string | `"https://kubernetes.default.svc"` | | | kubezero.server | string | `"https://kubernetes.default.svc"` | |
| logging.enabled | bool | `false` | | | logging.enabled | bool | `false` | |
| logging.namespace | string | `"logging"` | | | logging.namespace | string | `"logging"` | |
| logging.targetRevision | string | `"0.8.3"` | | | logging.targetRevision | string | `"0.8.4"` | |
| metrics.enabled | bool | `false` | | | metrics.enabled | bool | `false` | |
| metrics.istio.grafana | object | `{}` | | | metrics.istio.grafana | object | `{}` | |
| metrics.istio.prometheus | object | `{}` | | | metrics.istio.prometheus | object | `{}` | |
| metrics.namespace | string | `"monitoring"` | | | metrics.namespace | string | `"monitoring"` | |
| metrics.targetRevision | string | `"0.8.5"` | | | metrics.targetRevision | string | `"0.8.7"` | |
| network.cilium.cluster | object | `{}` | | | network.cilium.cluster | object | `{}` | |
| network.enabled | bool | `true` | | | network.enabled | bool | `true` | |
| network.retain | bool | `true` | | | network.retain | bool | `true` | |
| network.targetRevision | string | `"0.3.4"` | | | network.targetRevision | string | `"0.4.0"` | |
| storage.aws-ebs-csi-driver.enabled | bool | `false` | | | storage.aws-ebs-csi-driver.enabled | bool | `false` | |
| storage.aws-efs-csi-driver.enabled | bool | `false` | | | storage.aws-efs-csi-driver.enabled | bool | `false` | |
| storage.enabled | bool | `false` | | | storage.enabled | bool | `false` | |
| storage.targetRevision | string | `"0.7.1"` | | | storage.targetRevision | string | `"0.7.3"` | |
---------------------------------------------- ----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)

View File

@ -10,7 +10,7 @@ global:
addons: addons:
enabled: true enabled: true
targetRevision: 0.6.3 targetRevision: 0.7.0
external-dns: external-dns:
enabled: false enabled: false
forseti: forseti:
@ -25,18 +25,18 @@ addons:
network: network:
enabled: true enabled: true
retain: true retain: true
targetRevision: 0.3.4 targetRevision: 0.4.0
cilium: cilium:
cluster: {} cluster: {}
cert-manager: cert-manager:
enabled: false enabled: false
namespace: cert-manager namespace: cert-manager
targetRevision: 0.9.2 targetRevision: 0.9.3
storage: storage:
enabled: false enabled: false
targetRevision: 0.7.2 targetRevision: 0.7.3
aws-ebs-csi-driver: aws-ebs-csi-driver:
enabled: false enabled: false
aws-efs-csi-driver: aws-efs-csi-driver:
@ -45,7 +45,7 @@ storage:
istio: istio:
enabled: false enabled: false
namespace: istio-system namespace: istio-system
targetRevision: 0.8.3 targetRevision: 0.8.5
istio-ingress: istio-ingress:
enabled: false enabled: false
@ -66,7 +66,7 @@ istio-private-ingress:
metrics: metrics:
enabled: false enabled: false
namespace: monitoring namespace: monitoring
targetRevision: 0.8.5 targetRevision: 0.8.7
istio: istio:
grafana: {} grafana: {}
prometheus: {} prometheus: {}
@ -74,7 +74,7 @@ metrics:
logging: logging:
enabled: false enabled: false
namespace: logging namespace: logging
targetRevision: 0.8.3 targetRevision: 0.8.4
argocd: argocd:
enabled: false enabled: false

View File

@ -1,27 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: neuron-top
name: neuron-top
namespace: default
spec:
containers:
- image: public.ecr.aws/zero-downtime/dumpster:latest
command:
- "bash"
- "-c"
- "sleep 3600"
imagePullPolicy: IfNotPresent
name: neuron-top
resources:
limits:
#hugepages-2Mi: 256Mi
aws.amazon.com/neuron: 1
requests:
memory: 1024Mi
tolerations:
- effect: NoSchedule
key: kubezero-workergroup
operator: Equal
value: public

19
docs/nvidia-test.yaml Normal file
View File

@ -0,0 +1,19 @@
apiVersion: v1
kind: Pod
metadata:
name: nvidia-test
namespace: default
spec:
containers:
- image: nvidia/cuda:10.1-runtime-ubuntu16.04
command:
- "bash"
- "-c"
- "sleep 3600"
imagePullPolicy: IfNotPresent
name: nvidia-test
resources:
limits:
nvidia.com/gpu: 1
requests:
memory: 1024Mi

48
docs/v1.24.md Normal file
View File

@ -0,0 +1,48 @@
# KubeZero 1.24
## TODO
### FeatureGates
- PodAndContainerStatsFromCRI
- DelegateFSGroupToCSIDriver
## What's new - Major themes
- Cilium added as second CNI to prepare full migration to Cilium with 1.24 upgrade
- support for Nvidia g5 instances incl. pre-installed kernel drivers, cudo toolchain and CRI intergration
- updated inf1 neuron drivers
- ExtendedResourceToleration AdmissionController and auto-taints allowing Neuron and Nvidia pods ONLY to be scheduled on dedicated workers
- full Cluster-Autoscaler integration
## Version upgrades
- Istio to 1.14.4
- Logging: ECK operator to 2.4, fluent-bit 1.9.8
- Metrics: Prometheus and all Grafana charts to latest to match V1.23
- ArgoCD to V2.4 ( access to pod via shell disabled by default )
- AWS EBS/EFS CSI drivers to latest versions
- cert-manager to V1.9.1
# Upgrade
`(No, really, you MUST read this before you upgrade)`
- Ensure your Kube context points to the correct cluster !
1. Enable `containerProxy` for NAT instances and upgrade NAT instance using the new V2 Pulumi stacks
2. Review CFN config for controller and workers ( enable containerProxy, remove legacy version settings etc )
3. Upgrade CFN stacks for the control plane and all worker groups
4. Trigger fully-automated cluster upgrade:
`./admin/upgrade_cluster.sh <path to the argocd app kubezero yaml for THIS cluster>`
5. Reboot controller(s) one by one
Wait each time for controller to join and all pods running.
Might take a while ...
6. Launch new set of workers eg. by doubling `desired` for each worker ASG
once new workers are ready, cordon and drain all old workers
The cluster-autoscaler will remove the old workers automatically after about 10min !
7. If all looks good, commit the ArgoApp resouce for Kubezero, before re-enabling ArgoCD itself.
git add / commit / push `<cluster/env/kubezero/application.yaml>`

View File

@ -18,6 +18,8 @@ function reset_index() {
aws s3 sync $REPO_URL_S3/ $TMPDIR/ aws s3 sync $REPO_URL_S3/ $TMPDIR/
helm repo index $TMPDIR --url $REPO_URL helm repo index $TMPDIR --url $REPO_URL
aws s3 cp $TMPDIR/index.yaml $REPO_URL_S3/ --cache-control max-age=1 aws s3 cp $TMPDIR/index.yaml $REPO_URL_S3/ --cache-control max-age=1
exit 0
} }
@ -49,9 +51,10 @@ function publish_chart() {
} }
#reset_index
publish_chart publish_chart
CF_DIST=E1YFUJXMCXT2RN CF_DIST=E1YFUJXMCXT2RN
aws cloudfront create-invalidation --distribution $CF_DIST --paths "/charts/*" aws cloudfront create-invalidation --distribution $CF_DIST --paths "/charts/*"
#reset_index