V1.24 beta, metrics updates, minor tweaks and fixes

pull/51/head
Stefan Reimer 2 weeks ago
parent 46bb9382fd
commit 74a1515d3c

@ -146,7 +146,7 @@ waitSystemPodsRunning
argo_used && disable_argo
# all_nodes_upgrade ""
#all_nodes_upgrade ""
control_plane_upgrade kubeadm_upgrade
@ -154,10 +154,18 @@ echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015
argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
# Remove calico
#kubectl delete deployment calico-kube-controllers -n kube-system || true
#kubectl delete daemonset calico-node -n kube-system || true
#kubectl delete network-attachment-definitions calico -n kube-system || true
# Remove previous cilium config as the helm options are additive only -> fail
kubectl delete configmap cilium-config -n kube-system || true
control_plane_upgrade "apply_network, apply_addons, apply_storage"
kubectl rollout restart daemonset/cilium -n kube-system
kubectl rollout restart daemonset/kube-multus-ds -n kube-system
kubectl rollout restart daemonset/cilium -n kube-system
echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning

@ -17,8 +17,8 @@ protectKernelDefaults: {{ .Values.protectKernelDefaults }}
tlsCipherSuites: [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256]
featureGates:
{{- include "kubeadm.featuregates" ( dict "return" "map" ) | nindent 2 }}
# Minimal unit is 50m per pod
podsPerCore: 20
# Minimal unit is 40m per pod
podsPerCore: 25
# cpuCFSQuotaPeriod: 10ms
# Basic OS incl. crio
systemReserved:

@ -133,7 +133,7 @@ spec:
resources:
requests:
memory: 20Mi
memory: 32Mi
cpu: 10m
limits:
memory: 64Mi

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons
type: application
version: 0.7.2
version: 0.7.3
appVersion: v1.24
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -11,18 +11,14 @@ keywords:
- fuse-device-plugin
- neuron-device-plugin
- nvidia-device-plugin
- aws-node-termination-handler
- cluster-autoscaler
- sealed-secrets
- external-dns
- aws-node-termination-handler
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
dependencies:
- name: aws-node-termination-handler
version: 0.20.1
# repository: https://aws.github.io/eks-charts
condition: aws-node-termination-handler.enabled
- name: external-dns
version: 1.11.0
repository: https://kubernetes-sigs.github.io/external-dns/
@ -32,7 +28,7 @@ dependencies:
repository: https://kubernetes.github.io/autoscaler
condition: cluster-autoscaler.enabled
- name: nvidia-device-plugin
version: 0.12.3
version: 0.13.0
# https://github.com/NVIDIA/k8s-device-plugin
repository: https://nvidia.github.io/k8s-device-plugin
condition: nvidia-device-plugin.enabled
@ -40,4 +36,12 @@ dependencies:
version: 2.7.1
repository: https://bitnami-labs.github.io/sealed-secrets
condition: sealed-secrets.enabled
- name: aws-node-termination-handler
version: 0.20.1
# repository: https://aws.github.io/eks-charts
condition: aws-node-termination-handler.enabled
- name: aws-eks-asg-rolling-update-handler
version: 1.2.7
# repository: https://twin.github.io/helm-charts
condition: aws-eks-asg-rolling-update-handler.enabled
kubeVersion: ">= 1.24.0"

@ -1,6 +1,6 @@
# kubezero-addons
![Version: 0.7.2](https://img.shields.io/badge/Version-0.7.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.24](https://img.shields.io/badge/AppVersion-v1.24-informational?style=flat-square)
![Version: 0.7.3](https://img.shields.io/badge/Version-0.7.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.24](https://img.shields.io/badge/AppVersion-v1.24-informational?style=flat-square)
KubeZero umbrella chart for various optional cluster addons
@ -18,11 +18,12 @@ Kubernetes: `>= 1.24.0`
| Repository | Name | Version |
|------------|------|---------|
| | aws-eks-asg-rolling-update-handler | 1.2.7 |
| | aws-node-termination-handler | 0.20.1 |
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.7.1 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.11.0 |
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.21.0 |
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.12.3 |
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.13.0 |
# MetalLB
@ -40,6 +41,34 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| aws-eks-asg-rolling-update-handler.enabled | bool | `false` | |
| aws-eks-asg-rolling-update-handler.environmentVars[0].name | string | `"CLUSTER_NAME"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[0].value | string | `""` | |
| aws-eks-asg-rolling-update-handler.environmentVars[1].name | string | `"AWS_REGION"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[1].value | string | `"us-west-2"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[2].name | string | `"EXECUTION_INTERVAL"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[2].value | string | `"60"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[3].name | string | `"METRICS"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[3].value | string | `"true"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[4].name | string | `"EAGER_CORDONING"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[4].value | string | `"true"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[5].name | string | `"SLOW_MODE"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[5].value | string | `"true"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[6].name | string | `"AWS_ROLE_ARN"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[6].value | string | `""` | |
| aws-eks-asg-rolling-update-handler.environmentVars[7].name | string | `"AWS_WEB_IDENTITY_TOKEN_FILE"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[7].value | string | `"/var/run/secrets/sts.amazonaws.com/serviceaccount/token"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[8].name | string | `"AWS_STS_REGIONAL_ENDPOINTS"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[8].value | string | `"regional"` | |
| aws-eks-asg-rolling-update-handler.image.tag | string | `"v1.7.0"` | |
| aws-eks-asg-rolling-update-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| aws-eks-asg-rolling-update-handler.resources.limits.memory | string | `"128Mi"` | |
| aws-eks-asg-rolling-update-handler.resources.requests.cpu | string | `"10m"` | |
| aws-eks-asg-rolling-update-handler.resources.requests.memory | string | `"32Mi"` | |
| aws-eks-asg-rolling-update-handler.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-eks-asg-rolling-update-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| aws-eks-asg-rolling-update-handler.tolerations[1].effect | string | `"NoSchedule"` | |
| aws-eks-asg-rolling-update-handler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| aws-node-termination-handler.deleteLocalData | bool | `true` | |
| aws-node-termination-handler.emitKubernetesEvents | bool | `true` | |
| aws-node-termination-handler.enableProbesServer | bool | `true` | |

@ -0,0 +1,8 @@
apiVersion: v2
description: Handles rolling upgrades for AWS ASGs for EKS by replacing outdated nodes
by new nodes.
home: https://github.com/TwiN/aws-eks-asg-rolling-update-handler
maintainers:
- name: TwiN
name: aws-eks-asg-rolling-update-handler
version: 1.2.7

@ -0,0 +1,31 @@
{{/*
Create a default app name.
*/}}
{{- define "aws-eks-asg-rolling-update-handler.name" -}}
{{- .Chart.Name -}}
{{- end -}}
{{/*
Create a default namespace.
*/}}
{{- define "aws-eks-asg-rolling-update-handler.namespace" -}}
{{- .Release.Namespace -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "aws-eks-asg-rolling-update-handler.labels" -}}
app.kubernetes.io/name: {{ include "aws-eks-asg-rolling-update-handler.name" . }}
{{- end -}}
{{/*
Create the name of the service account to use.
*/}}
{{- define "aws-eks-asg-rolling-update-handler.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "aws-eks-asg-rolling-update-handler.name" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

@ -0,0 +1,15 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
labels:
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
roleRef:
kind: ClusterRole
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }}
namespace: {{ template "aws-eks-asg-rolling-update-handler.namespace" . }}

@ -0,0 +1,41 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
labels:
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
rules:
- apiGroups:
- "*"
resources:
- "*"
verbs:
- get
- list
- watch
- apiGroups:
- "*"
resources:
- nodes
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- "*"
resources:
- pods/eviction
verbs:
- get
- list
- create
- apiGroups:
- "*"
resources:
- pods
verbs:
- get
- list

@ -0,0 +1,56 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
namespace: {{ template "aws-eks-asg-rolling-update-handler.namespace" . }}
labels:
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 6 }}
template:
metadata:
labels:
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 8 }}
spec:
automountServiceAccountToken: true
serviceAccountName: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }}
restartPolicy: Always
dnsPolicy: Default
containers:
- name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
{{- toYaml .Values.environmentVars | nindent 12 }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: aws-token
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
readOnly: true
volumes:
- name: aws-token
projected:
sources:
- serviceAccountToken:
path: token
expirationSeconds: 86400
audience: "sts.amazonaws.com"
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}

@ -0,0 +1,13 @@
{{ if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }}
namespace: {{ template "aws-eks-asg-rolling-update-handler.namespace" . }}
labels:
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{ end }}

@ -0,0 +1,28 @@
replicaCount: 1
image:
repository: twinproduction/aws-eks-asg-rolling-update-handler
tag: v1.4.3
pullPolicy: IfNotPresent
#imagePullSecrets:
#- imagePullSecret
environmentVars:
- name: CLUSTER_NAME
value: "cluster-name" # REPLACE THIS WITH THE NAME OF YOUR EKS CLUSTER
#- name: AUTO_SCALING_GROUP_NAMES
# value: "asg-1,asg-2,asg-3" # REPLACE THESE VALUES FOR THE NAMES OF THE ASGs, if CLUSTER_NAME is provided, this is ignored
#- name: IGNORE_DAEMON_SETS
# value: "true"
#- name: DELETE_LOCAL_DATA
# value: "true"
#- name: AWS_REGION
# value: us-west-2
#- name: ENVIRONMENT
# value: ""
serviceAccount:
create: true
#name: aws-eks-asg-rolling-update-handler
annotations: {}

@ -0,0 +1,36 @@
diff -tuNr charts/aws-eks-asg-rolling-update-handler.orig/templates/deployment.yaml charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml
--- charts/aws-eks-asg-rolling-update-handler.orig/templates/deployment.yaml 2022-12-16 13:10:26.049272371 +0000
+++ charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml 2022-12-16 15:56:00.880666339 +0000
@@ -25,7 +25,31 @@
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
-{{- toYaml .Values.environmentVars | nindent 12 }}
+ {{- toYaml .Values.environmentVars | nindent 12 }}
+ {{- with .Values.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: aws-token
+ mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
+ readOnly: true
+ volumes:
+ - name: aws-token
+ projected:
+ sources:
+ - serviceAccountToken:
+ path: token
+ expirationSeconds: 86400
+ audience: "sts.amazonaws.com"
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}

@ -4,6 +4,7 @@ set -ex
helm repo update
NTH_VERSION=$(yq eval '.dependencies[] | select(.name=="aws-node-termination-handler") | .version' Chart.yaml)
RUH_VERSION=$(yq eval '.dependencies[] | select(.name=="aws-eks-asg-rolling-update-handler") | .version' Chart.yaml)
rm -rf charts/aws-node-termination-handler
helm pull eks/aws-node-termination-handler --untar --untardir charts --version $NTH_VERSION
@ -11,4 +12,8 @@ helm pull eks/aws-node-termination-handler --untar --untardir charts --version $
# diff -tuNr charts/aws-node-termination-handler.orig charts/aws-node-termination-handler > nth.patch
patch -p0 -i nth.patch --no-backup-if-mismatch
rm -rf charts/aws-eks-asg-rolling-update-handler
helm pull twin/aws-eks-asg-rolling-update-handler --untar --untardir charts --version $RUH_VERSION
patch -p0 -i ruh.patch --no-backup-if-mismatch
helm dep update

@ -52,6 +52,47 @@ sealed-secrets:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
aws-eks-asg-rolling-update-handler:
enabled: false
image:
tag: v1.7.0
environmentVars:
- name: CLUSTER_NAME
value: ""
- name: AWS_REGION
value: us-west-2
- name: EXECUTION_INTERVAL
value: "60"
- name: METRICS
value: "true"
- name: EAGER_CORDONING
value: "true"
# Only disable if all services have PDBs across AZs
- name: SLOW_MODE
value: "true"
- name: AWS_ROLE_ARN
value: ""
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
- name: AWS_STS_REGIONAL_ENDPOINTS
value: "regional"
resources:
requests:
cpu: 10m
memory: 32Mi
limits:
memory: 128Mi
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
aws-node-termination-handler:
enabled: false

@ -1,7 +1,7 @@
apiVersion: v2
description: KubeZero ArgoCD - config, branding, image-updater (optional)
name: kubezero-argocd
version: 0.11.1
version: 0.11.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -17,10 +17,10 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: argo-cd
version: 5.16.1
version: 5.16.10
repository: https://argoproj.github.io/argo-helm
- name: argocd-apps
version: 0.0.4
version: 0.0.6
repository: https://argoproj.github.io/argo-helm
- name: argocd-image-updater
version: 0.8.1

@ -1,6 +1,6 @@
# kubezero-argocd
![Version: 0.11.1](https://img.shields.io/badge/Version-0.11.1-informational?style=flat-square)
![Version: 0.11.2](https://img.shields.io/badge/Version-0.11.2-informational?style=flat-square)
KubeZero ArgoCD - config, branding, image-updater (optional)
@ -18,8 +18,8 @@ Kubernetes: `>= 1.24.0`
| Repository | Name | Version |
|------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 5.16.1 |
| https://argoproj.github.io/argo-helm | argocd-apps | 0.0.4 |
| https://argoproj.github.io/argo-helm | argo-cd | 5.16.10 |
| https://argoproj.github.io/argo-helm | argocd-apps | 0.0.6 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.8.1 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
@ -36,16 +36,16 @@ Kubernetes: `>= 1.24.0`
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.24"` | |
| argo-cd.configs.cm.url | string | `"argocd.example.com"` | |
| argo-cd.configs.knownHosts.data.ssh_known_hosts | string | `"bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==\ngithub.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==\ngitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=\ngitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf\ngitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9\ngit.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==\n"` | |
| argo-cd.configs.metrics.enabled | bool | `false` | |
| argo-cd.configs.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
| argo-cd.configs.params."server.insecure" | bool | `true` | |
| argo-cd.configs.resources.requests.cpu | string | `"100m"` | |
| argo-cd.configs.resources.requests.memory | string | `"256Mi"` | |
| argo-cd.configs.secret.createSecret | bool | `false` | |
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
| argo-cd.controller.metrics.enabled | bool | `false` | |
| argo-cd.controller.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.controller.resources.requests.cpu | string | `"100m"` | |
| argo-cd.controller.resources.requests.memory | string | `"256Mi"` | |
| argo-cd.dex.enabled | bool | `false` | |
| argo-cd.global.logging.format | string | `"json"` | |
| argo-cd.notifications.enabled | bool | `false` | |

@ -86,6 +86,7 @@ argo-cd:
server.insecure: true
server.enable.gzip: true
controller:
metrics:
enabled: false
serviceMonitor:

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-ci
description: KubeZero umbrella chart for all things CI
type: application
version: 0.5.23
version: 0.5.24
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -22,7 +22,7 @@ dependencies:
repository: https://gocd.github.io/helm-chart
condition: gocd.enabled
- name: gitea
version: 6.0.3
version: 6.0.5
repository: https://dl.gitea.io/charts/
condition: gitea.enabled
- name: jenkins

@ -1,6 +1,6 @@
# kubezero-ci
![Version: 0.5.20](https://img.shields.io/badge/Version-0.5.20-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.5.24](https://img.shields.io/badge/Version-0.5.24-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI
@ -20,8 +20,8 @@ Kubernetes: `>= 1.20.0`
|------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.17 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.jenkins.io | jenkins | 4.2.13 |
| https://dl.gitea.io/charts/ | gitea | 6.0.3 |
| https://charts.jenkins.io | jenkins | 4.2.17 |
| https://dl.gitea.io/charts/ | gitea | 6.0.5 |
| https://gocd.github.io/helm-chart | gocd | 1.40.8 |
# Jenkins
@ -52,9 +52,8 @@ Kubernetes: `>= 1.20.0`
| gitea.gitea.config.database.DB_TYPE | string | `"sqlite3"` | |
| gitea.gitea.demo | bool | `false` | |
| gitea.gitea.metrics.enabled | bool | `false` | |
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `false` | |
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | |
| gitea.image.rootless | bool | `true` | |
| gitea.image.tag | string | `"1.17.3"` | |
| gitea.istio.enabled | bool | `false` | |
| gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| gitea.istio.url | string | `"git.example.com"` | |
@ -64,6 +63,9 @@ Kubernetes: `>= 1.20.0`
| gitea.persistence.enabled | bool | `true` | |
| gitea.persistence.size | string | `"4Gi"` | |
| gitea.postgresql.enabled | bool | `false` | |
| gitea.resources.limits.memory | string | `"2048Mi"` | |
| gitea.resources.requests.cpu | string | `"150m"` | |
| gitea.resources.requests.memory | string | `"320Mi"` | |
| gitea.securityContext.allowPrivilegeEscalation | bool | `false` | |
| gitea.securityContext.capabilities.add[0] | string | `"SYS_CHROOT"` | |
| gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | |
@ -76,39 +78,36 @@ Kubernetes: `>= 1.20.0`
| jenkins.agent.annotations."container.apparmor.security.beta.kubernetes.io/jnlp" | string | `"unconfined"` | |
| jenkins.agent.containerCap | int | `2` | |
| jenkins.agent.customJenkinsLabels[0] | string | `"podman-aws-trivy"` | |
| jenkins.agent.idleMinutes | int | `10` | |
| jenkins.agent.idleMinutes | int | `15` | |
| jenkins.agent.image | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
| jenkins.agent.podName | string | `"podman-aws"` | |
| jenkins.agent.podRetention | string | `"Default"` | |
| jenkins.agent.resources.limits.cpu | string | `"4"` | |
| jenkins.agent.resources.limits.memory | string | `"6144Mi"` | |
| jenkins.agent.resources.requests.cpu | string | `"512m"` | |
| jenkins.agent.resources.requests.memory | string | `"1024Mi"` | |
| jenkins.agent.showRawYaml | bool | `false` | |
| jenkins.agent.tag | string | `"v0.4.1"` | |
| jenkins.agent.yamlMergeStrategy | string | `"merge"` | |
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n serviceAccountName: jenkins-podman-aws\n containers:\n - name: jnlp\n resources:\n limits:\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | |
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n serviceAccountName: jenkins-podman-aws\n containers:\n - name: jnlp\n resources:\n requests:\n cpu: \"512m\"\n memory: \"1024Mi\"\n limits:\n cpu: \"4\"\n memory: \"6144Mi\"\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | |
| jenkins.controller.JCasC.configScripts.zdt-settings | string | `"jenkins:\n noUsageStatistics: true\n disabledAdministrativeMonitors:\n - \"jenkins.security.ResourceDomainRecommendation\"\nunclassified:\n buildDiscarders:\n configuredBuildDiscarders:\n - \"jobBuildDiscarder\"\n - defaultBuildDiscarder:\n discarder:\n logRotator:\n artifactDaysToKeepStr: \"32\"\n artifactNumToKeepStr: \"10\"\n daysToKeepStr: \"100\"\n numToKeepStr: \"10\"\n"` | |
| jenkins.controller.disableRememberMe | bool | `true` | |
| jenkins.controller.enableRawHtmlMarkupFormatter | bool | `true` | |
| jenkins.controller.initContainerResources.limits.cpu | string | `"1000m"` | |
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3734.v562b_b_a_627ea_c"` | |
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3743.v1fa_4c724c3b_7"` | |
| jenkins.controller.installPlugins[10] | string | `"build-discarder:139.v05696a_7fe240"` | |
| jenkins.controller.installPlugins[11] | string | `"dark-theme:262.v0202a_4c8fb_6a"` | |
| jenkins.controller.installPlugins[12] | string | `"kubernetes-credentials-provider:1.206.v7ce2cf7b_0c8b"` | |
| jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | |
| jenkins.controller.installPlugins[2] | string | `"git:4.14.2"` | |
| jenkins.controller.installPlugins[3] | string | `"configuration-as-code:1569.vb_72405b_80249"` | |
| jenkins.controller.installPlugins[4] | string | `"antisamy-markup-formatter:155.v795fb_8702324"` | |
| jenkins.controller.installPlugins[5] | string | `"prometheus:2.0.11"` | |
| jenkins.controller.installPlugins[6] | string | `"htmlpublisher:1.31"` | |
| jenkins.controller.installPlugins[7] | string | `"build-discarder:139.v05696a_7fe240"` | |
| jenkins.controller.installPlugins[8] | string | `"dark-theme:262.v0202a_4c8fb_6a"` | |
| jenkins.controller.installPlugins[9] | string | `"kubernetes-credentials-provider:1.206.v7ce2cf7b_0c8b"` | |
| jenkins.controller.installPlugins[2] | string | `"git:4.14.3"` | |
| jenkins.controller.installPlugins[3] | string | `"basic-branch-build-strategies:71.vc1421f89888e"` | |
| jenkins.controller.installPlugins[4] | string | `"pipeline-graph-view:144.vf3924feb_7e35"` | |
| jenkins.controller.installPlugins[5] | string | `"pipeline-stage-view:2.28"` | |
| jenkins.controller.installPlugins[6] | string | `"configuration-as-code:1569.vb_72405b_80249"` | |
| jenkins.controller.installPlugins[7] | string | `"antisamy-markup-formatter:155.v795fb_8702324"` | |
| jenkins.controller.installPlugins[8] | string | `"prometheus:2.0.11"` | |
| jenkins.controller.installPlugins[9] | string | `"htmlpublisher:1.31"` | |
| jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | |
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | |
| jenkins.controller.prometheus.enabled | bool | `false` | |
| jenkins.controller.resources.limits.cpu | string | `"2000m"` | |
| jenkins.controller.resources.limits.memory | string | `"4096Mi"` | |
| jenkins.controller.resources.requests.cpu | string | `"250m"` | |
| jenkins.controller.resources.requests.memory | string | `"1280Mi"` | |
@ -129,7 +128,7 @@ Kubernetes: `>= 1.20.0`
| jenkins.serviceAccountAgent.create | bool | `true` | |
| jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | |
| trivy.enabled | bool | `false` | |
| trivy.image.tag | string | `"0.34.0"` | |
| trivy.image.tag | string | `"0.35.0"` | |
| trivy.persistence.enabled | bool | `true` | |
| trivy.persistence.size | string | `"1Gi"` | |
| trivy.rbac.create | bool | `false` | |

@ -0,0 +1,9 @@
configmap: grafana-dashboards
gzip: true
condition: '.Values.gitea.gitea.metrics.enabled'
folder: KubeZero
dashboards:
- name: Gitea
url: https://grafana.com/api/dashboards/13192/revisions/1/download
tags:
- CI

@ -0,0 +1,15 @@
{{- if .Values.gitea.gitea.metrics.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "grafana-dashboards" | trunc 63 | trimSuffix "-" }}
namespace: {{ .Release.Namespace }}
labels:
grafana_dashboard: "1"
{{- include "kubezero-lib.labels" . | nindent 4 }}
annotations:
k8s-sidecar-target-directory: KubeZero
binaryData:
Gitea.json.gz:
H4sIAAAAAAAC/+1cW2/bNhR+z68QhGLYgDSwnMRJC+whS5cuaG5I2vVhDQxKomU2FKmSVBw38H8fSd2oixNjSBqrY4G61jmSeK7fR4ku7zccxx2PEUlSwd23zj/y2HHu9afUEBBDKXXfXY0vLs9P//z415+frtzNQo2BD7HSXzAaQzGFKa+UIeQBQ4lAlKhTKoWYJ/qmIRCA05QFsNIlOI0QOQ6VPum4aaY/y80yhtUnLOTn9WbmEoPfUsRgh1PF+BEDE0BAdXMUdoqLILxvKm4h47l3o629re3ciM3u4RJAZLDagyXTzqFMsTHQw2N0hRQtDSZph7FrSG9rsDX4D75xAUR7sKuatO1ZmUJACJXnSq3KYTakixEXZUYrQ6Tm1asp4NMPcK7uRf2vMBBvh4O93XIseY6fIiyO1WieITWC1h0MeQ4kwMdKL1gKDfkUhR1SFFBySDFl6oYs8sGvg01n6HnyY3d30/F+M29dBOag8tf5xTnAkImaCVWO+dSngIVurlvof6838vQ0G++UEiQoc94jAYHDIZMx1/d1YYhEwy03IlDo9vO2vTfDTKSK8SOlWKBEKgZaqFNMUoz1EUbkRndaljpdDh2dBzACXAdGp3RRloEPtGQCMIcVgEhHTyCJxFSZM6jJYdfpj6TRnSCMzdxrgWy1EEGiqmq3VEQMhRe0qrss1/JwZCRkJo/3jeO7Ijj5sarEQZGj8tayYkJIriBDHT7oqG5XBmIYQRLWzQC3UfM6KQ1SxjIvmpoY3HVJEemQ8imddYiFLEzcIb8FOK28aLkqq0Jrze7QwhkKdVKrnKpKuqCIiFOqG0oLqszRpI4DZcJPysJrjZ5AWQpEgAi2wpyxyN9N+K70yhJVF6m697Aub2dNBj6EDOpun2BqwBvXaT6XHScLClYdkikTEMCuApfAGdy0RuECJgkMpcdthwRgETQIvI6NqtXvEpbTQAA5l+zIpUFEjGMYUzYf+3MB+f1X6v/+xY0UUHxxFwb4qNIkQkKHLgO3rsmK9IiyGIi2lsFJRucHbile5N+qWIiptGhKcdiIkUAxPJKtbIBNKb+EUV4UjQuupmgi2lcIjXTuZe66k7luzEtKiLs3OwIwGLYhnlMmGv2uu2FcwDQiIbpFYSoj1ipNWVtEZj9v2Fp7GJMTcwZwB+5Qo/z9NLjJcm76qVo7byEVig6iaZydt3zDvbKzOxprDu7gA6U2KQtB3pqJRq3kM8a6EUpBoz8Ah3VqLtGrdXoGXy2x4UxVbJs9sbPVFPN20iWDRl3AreUn8LY0ujY3aE7bfn4e3n8KHh5aHrY8/Lw8HCTpmEP5uBDysc7uIxwsTeoqrp+Jnw8vPjk16rLEbInZEvNPQsze6CmYedsys2Xm52VmmkAynoQPPxRXUKUwfZw58xScXWiOQCD0a0Svz5R+LmPpHL27spRuKf3/RumPs+5SLt1pcOnokZfNo1b5aLYcPkwSgZpuFKSia9Es5AmCODzvuE5dCXBQL848n1ycUXGm+MnQXNeyEsIJSLFo3lNnN0kQieog04VZ9euMZgQ+pzgV0N1snqEYom1xs1zrocnWXNjNa78eGuO8LGq1Yqj+LBqS641l2kUtQrSTE5fPKmrXZ5hWThUoqSX1a8oFmswLtZoE5GtCxdhMzfhAsYQEUkHbyPrYHGFlytXcOgaB4t02fT7rY+0DXPcwpZ2lsQ+ZQyfOQdPuklf0AuiPBoXRaqDg7VtUsKiw/qjAYEK5Wr9G/UOGyy7bXxYdvOGKc4YdCw8WHtYfHlL5yN07XPhUM/qFAWF/NUDYtXhg8WD98QBxnvZvonBct/rHIsLOI28VvMGSJwjPQsKTQMIcYkxnFhSe8c1CVma9e7FQN/tlYWFnVVgYWFiwsNAHWNDrDr1DhZO61S8LCvsrgsLQzhUsKPQCFCiLAEHfQS9nDOedxr8sRLTeOC6dOIwsRliM6ANGyL9x77DhY83oF8aE0YqYYBcpLST0AhImVAW4d6Bw1DD7ZWFhuOqrx6F9x2BxoRe4kKQ+RsENnPcOGi605R9My9dsXWJnyXPEngWHJwGHJGUJtouVzwcOso369+OFq5rRa7YmsQwS7K+ZLCT0AhJm0J9SetM7VPjctHvN1iWWAcMbCwwWGPrxO2gMAe/jb6Abdq/basQSZLAPERYYegEMAY1jSETvgOGwafe6LUksAYZtCwwWGPoADDHCkAtK+jdnOG1bvm4LE0vAYWj/m4RFh368aAAimPYPGj43zO7ChXIHaWmWKmGl3h5k17hcXh6DKozDbI9jLuYYFtWXnSlAVEXTPTx2y/sKGCdYpotE7Y2pq22Y9ZYgpX6SOS6LY/baKzYbcWWiM5lbuyxRS0asujh3ZFzkx8xy6Zo+8OLq+67x3TMPtgemxtj5ZGh89/L9pesOfad6bx3XZ3TGi32kyzzpLaYzWZrtAU7OvnnHs9333zPpLWBI7TjNHwpctSf43sbiX1mpsRuhXgAA
{{- end }}

@ -3,4 +3,5 @@
helm dep update
# Create ZDT dashboard configmap
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
../kubezero-metrics/sync_grafana_dashboards.py dashboard-jenkins.yaml templates/jenkins/grafana-dashboard.yaml
../kubezero-metrics/sync_grafana_dashboards.py dashboard-gitea.yaml templates/gitea/grafana-dashboard.yaml

@ -17,7 +17,7 @@ gitea:
enabled: false
image:
tag: 1.17.3
#tag: 1.17.4
rootless: true
securityContext:
@ -49,7 +49,7 @@ gitea:
metrics:
enabled: false
serviceMonitor:
enabled: false
enabled: true
config:
database:

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-metrics
description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
type: application
version: 0.8.8
version: 0.8.9
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -19,11 +19,11 @@ dependencies:
repository: https://cdn.zero-downtime.net/charts/
# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack
- name: kube-prometheus-stack
version: 42.2.1
version: 43.2.0
# Switch back to upstream once all alerts are fixed eg. etcd gpcr
# repository: https://prometheus-community.github.io/helm-charts
- name: prometheus-adapter
version: 3.4.2
version: 3.5.0
repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-adapter.enabled
- name: prometheus-pushgateway

@ -1,6 +1,6 @@
# kubezero-metrics
![Version: 0.8.8](https://img.shields.io/badge/Version-0.8.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.8.9](https://img.shields.io/badge/Version-0.8.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
@ -18,9 +18,9 @@ Kubernetes: `>= 1.24.0`
| Repository | Name | Version |
|------------|------|---------|
| | kube-prometheus-stack | 42.2.1 |
| | kube-prometheus-stack | 43.2.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 3.4.2 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 3.5.0 |
| https://prometheus-community.github.io/helm-charts | prometheus-pushgateway | 2.0.2 |
## Values
@ -87,15 +87,15 @@ Kubernetes: `>= 1.24.0`
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].equal[0] | string | `"namespace"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].source_matchers[0] | string | `"alertname = InfoInhibitor"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].target_matchers[0] | string | `"severity = info"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[3].source_matchers[0] | string | `"alertname = ClusterAutoscalerNodeGroupsEnabled"` | |
| kube-prometheus-stack.alertmanager.config.inhibit_rules[3].target_matchers[0] | string | `"alertname =~ \"KubeCPUOvercommit|KubeMemoryOvercommit\""` | |
| kube-prometheus-stack.alertmanager.config.route.group_by[0] | string | `"severity"` | |
| kube-prometheus-stack.alertmanager.config.route.group_by[1] | string | `"clusterName"` | |
| kube-prometheus-stack.alertmanager.config.route.group_interval | string | `"5m"` | |
| kube-prometheus-stack.alertmanager.config.route.group_wait | string | `"30s"` | |
| kube-prometheus-stack.alertmanager.config.route.repeat_interval | string | `"6h"` | |
| kube-prometheus-stack.alertmanager.config.route.routes[0].matchers[0] | string | `"alertname = Watchdog"` | |
| kube-prometheus-stack.alertmanager.config.route.group_wait | string | `"10s"` | |
| kube-prometheus-stack.alertmanager.config.route.repeat_interval | string | `"4h"` | |
| kube-prometheus-stack.alertmanager.config.route.routes[0].matchers[0] | string | `"severity = none"` | |
| kube-prometheus-stack.alertmanager.config.route.routes[0].receiver | string | `"null"` | |
| kube-prometheus-stack.alertmanager.config.route.routes[1].matchers[0] | string | `"alertname = InfoInhibitor"` | |
| kube-prometheus-stack.alertmanager.config.route.routes[1].receiver | string | `"null"` | |
| kube-prometheus-stack.alertmanager.enabled | bool | `false` | |
| kube-prometheus-stack.coreDns.enabled | bool | `true` | |
| kube-prometheus-stack.defaultRules.create | bool | `false` | |

@ -1,4 +1,5 @@
annotations:
artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
@ -6,7 +7,7 @@ annotations:
url: https://github.com/prometheus-operator/kube-prometheus
artifacthub.io/operator: "true"
apiVersion: v2
appVersion: 0.60.1
appVersion: 0.61.1
dependencies:
- condition: kubeStateMetrics.enabled
name: kube-state-metrics
@ -19,7 +20,7 @@ dependencies:
- condition: grafana.enabled
name: grafana
repository: https://grafana.github.io/helm-charts
version: 6.45.*
version: 6.48.*
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
and Prometheus rules combined with documentation and scripts to provide easy to
operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
@ -51,4 +52,4 @@ sources:
- https://github.com/prometheus-community/helm-charts
- https://github.com/prometheus-operator/kube-prometheus
type: application
version: 42.2.1
version: 43.2.0

@ -80,6 +80,23 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
### From 42.x to 43.x
This version upgrades Prometheus-Operator to v0.61.1, Prometheus to v2.40.5 and Thanos to v0.29.0.
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
### From 41.x to 42.x
This includes the overridability of container registry for all containers at the global level using `global.imageRegistry` or per container image. The defaults have not changed but if you were using a custom image, you will have to override the registry of said custom container image before you upgrade.

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 9.3.0
appVersion: 9.3.1
description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.net
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
@ -19,4 +19,4 @@ name: grafana
sources:
- https://github.com/grafana/grafana
type: application
version: 6.45.0
version: 6.48.0

@ -179,6 +179,8 @@ This version requires Helm >= 3.1.0.
| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` |
| `sidecar.dashboards.reloadURL` | Full url of dashboards configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/dashboards/reload"` |
| `sidecar.dashboards.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` |
| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |

@ -141,6 +141,17 @@ Return the appropriate apiVersion for ingress.
{{- end }}
{{- end }}
{{/*
Return the appropriate apiVersion for Horizontal Pod Autoscaler.
*/}}
{{- define "grafana.hpa.apiVersion" -}}
{{- if semverCompare "<1.23-0" .Capabilities.KubeVersion.Version }}
{{- print "autoscaling/v2beta1" }}
{{- else }}
{{- print "autoscaling/v2" }}
{{- end }}
{{- end }}
{{/*
Return the appropriate apiVersion for podDisruptionBudget.
*/}}

@ -1,4 +1,5 @@
{{- define "grafana.pod" -}}
{{- $sts := list "sts" "StatefulSet" "statefulset" -}}
{{- $root := . -}}
{{- with .Values.schedulerName }}
schedulerName: "{{ . }}"
@ -384,6 +385,26 @@ containers:
- name: SCRIPT
value: "{{ . }}"
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.userKey | default "admin-user" }}
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.passwordKey | default "admin-password" }}
{{- end }}
{{- if not .Values.sidecar.dashboards.skipReload }}
- name: REQ_URL
value: {{ .Values.sidecar.dashboards.reloadURL }}
- name: REQ_METHOD
value: POST
{{- end }}
{{- if .Values.sidecar.dashboards.watchServerTimeout }}
{{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }}
{{- fail (printf "Cannot use .Values.sidecar.dashboards.watchServerTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }}
@ -561,7 +582,7 @@ containers:
- name: UNIQUE_FILENAMES
value: "{{ .Values.sidecar.enableUniqueFilenames }}"
{{- end }}
{{- if .Values.sidecar.notifiers.searchNamespace }}
{{- with .Values.sidecar.notifiers.searchNamespace }}
- name: NAMESPACE
value: "{{ tpl (. | join ",") $root }}"
{{- end }}
@ -1013,8 +1034,8 @@ volumes:
- name: storage
persistentVolumeClaim:
claimName: {{ tpl (.Values.persistence.existingClaim | default (include "grafana.fullname" .)) . }}
{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }}
# nothing
{{- else if and .Values.persistence.enabled (has .Values.persistence.type $sts) }}
{{/* nothing */}}
{{- else }}
- name: storage
{{- if .Values.persistence.inMemory.enabled }}

@ -1,4 +1,4 @@
{{- $sts := list "sts" "StatefulSet" -}}
{{- $sts := list "sts" "StatefulSet" "statefulset" -}}
{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }}
apiVersion: v1
kind: Service

@ -1,6 +1,6 @@
{{- $sts := list "sts" "StatefulSet" -}}
{{- $sts := list "sts" "StatefulSet" "statefulset" -}}
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
apiVersion: {{ include "grafana.hpa.apiVersion" . }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "grafana.fullname" . }}
@ -22,5 +22,28 @@ spec:
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- toYaml .Values.autoscaling.metrics | nindent 4 }}
{{- if .Values.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
{{- if semverCompare "<1.23-0" .Capabilities.KubeVersion.Version }}
targetAverageUtilization: {{ .Values.autoscaling.targetMemory }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemory }}
{{- end }}
{{- end }}
{{- if .Values.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
{{- if semverCompare "<1.23-0" .Capabilities.KubeVersion.Version }}
targetAverageUtilization: {{ .Values.autoscaling.targetCPU }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPU }}
{{- end }}
{{- end }}
{{- end }}

@ -1,4 +1,4 @@
{{- $sts := list "sts" "StatefulSet" -}}
{{- $sts := list "sts" "StatefulSet" "statefulset" -}}
{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}}
apiVersion: apps/v1
kind: StatefulSet

@ -1,4 +1,4 @@
{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:

@ -1,4 +1,4 @@
{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled }}
{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:

@ -1,4 +1,4 @@
{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled }}
{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:

@ -48,17 +48,10 @@ headlessService: false
#
autoscaling:
enabled: false
# minReplicas: 1
# maxReplicas: 10
# metrics:
# - type: Resource
# resource:
# name: cpu
# targetAverageUtilization: 60
# - type: Resource
# resource:
# name: memory
# targetAverageUtilization: 60
minReplicas: 1
maxReplicas: 5
targetCPU: "60"
targetMemory: ""
## See `kubectl explain poddisruptionbudget.spec` for more
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
@ -101,7 +94,7 @@ image:
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Can be templated.
##
# pullSecrets:
pullSecrets: []
# - myRegistrKeySecretName
testFramework:
@ -761,7 +754,7 @@ smtp:
sidecar:
image:
repository: quay.io/kiwigrid/k8s-sidecar
tag: 1.19.2
tag: 1.21.0
sha: ""
imagePullPolicy: IfNotPresent
resources: {}
@ -845,8 +838,11 @@ sidecar:
# If specified, the sidecar will look for annotation with this name to create folder and put graph here.
# You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure.
folderAnnotation: null
# Endpoint to send request to reload alerts
reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload"
# Absolute path to shell script to execute after a configmap got reloaded
script: null
skipReload: false
# watchServerTimeout: request to the server, asking it to cleanly close the connection after that.
# defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S
# watchServerTimeout: 3600

@ -1,4 +1,4 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
@ -313,8 +313,8 @@ spec:
description: TLS configuration
properties:
ca:
description: Struct containing the CA cert to use
for the targets.
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to use
@ -361,8 +361,8 @@ spec:
x-kubernetes-map-type: atomic
type: object
cert:
description: Struct containing the client cert file
for the targets.
description: Client certificate to present when doing
client-authentication.
properties:
configMap:
description: ConfigMap containing data to use
@ -724,8 +724,8 @@ spec:
description: TLS configuration for the client.
properties:
ca:
description: Struct containing the CA cert to
use for the targets.
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
@ -773,8 +773,8 @@ spec:
x-kubernetes-map-type: atomic
type: object
cert:
description: Struct containing the client cert
file for the targets.
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
@ -1170,8 +1170,8 @@ spec:
description: TLS configuration for the client.
properties:
ca:
description: Struct containing the CA cert to
use for the targets.
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
@ -1219,8 +1219,8 @@ spec:
x-kubernetes-map-type: atomic
type: object
cert:
description: Struct containing the client cert
file for the targets.
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
@ -1626,8 +1626,8 @@ spec:
description: TLS configuration for the client.
properties:
ca:
description: Struct containing the CA cert to
use for the targets.
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
@ -1675,8 +1675,8 @@ spec:
x-kubernetes-map-type: atomic
type: object
cert:
description: Struct containing the client cert
file for the targets.
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
@ -2160,8 +2160,8 @@ spec:
description: TLS configuration for the client.
properties:
ca:
description: Struct containing the CA cert to
use for the targets.
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
@ -2209,8 +2209,8 @@ spec:
x-kubernetes-map-type: atomic
type: object
cert:
description: Struct containing the client cert
file for the targets.
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
@ -2552,8 +2552,8 @@ spec:
description: TLS configuration for the client.
properties:
ca:
description: Struct containing the CA cert to
use for the targets.
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
@ -2601,8 +2601,8 @@ spec:
x-kubernetes-map-type: atomic
type: object
cert:
description: Struct containing the client cert
file for the targets.
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
@ -3026,8 +3026,8 @@ spec:
description: TLS configuration for the client.
properties:
ca:
description: Struct containing the CA cert to
use for the targets.
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
@ -3075,8 +3075,8 @@ spec:
x-kubernetes-map-type: atomic
type: object
cert:
description: Struct containing the client cert
file for the targets.
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
@ -3437,8 +3437,8 @@ spec:
description: TLS configuration for the client.
properties:
ca:
description: Struct containing the CA cert to
use for the targets.
description: Certificate authority used when verifying
server certificates.
properties:
configMap:
description: ConfigMap containing data to
@ -3486,8 +3486,8 @@ spec:
x-kubernetes-map-type: atomic
type: object
cert:
description: Struct containing the client cert
file for the targets.
description: Client certificate to present when
doing client-authentication.
properties:
configMap:
description: ConfigMap containing data to
@ -3808,8 +3808,8 @@ spec:
description: TLS configuration for the client.
properties:
ca: