Compare commits
26 Commits
renovate/k
...
main
Author | SHA1 | Date | |
---|---|---|---|
e70822dd28 | |||
6068020397 | |||
89e4277814 | |||
305b8e55c4 | |||
fd52b38a21 | |||
82aad4dabd | |||
b6a54468f4 | |||
3bc23504d2 | |||
dbf32f870b | |||
573dd3ec3e | |||
aeb509cb08 | |||
72733bd95f | |||
dd352f7186 | |||
7b369c5d6d | |||
a731162fcd | |||
5f6179150a | |||
59ac0427bd | |||
b1e5bdfd8d | |||
6910259502 | |||
f82fa4bf40 | |||
4f9c1a41ca | |||
7b9ab50973 | |||
2e45dc4346 | |||
9504f874a0 | |||
d64978e5ea | |||
fe4df83a25 |
10
README.md
10
README.md
@ -28,15 +28,15 @@ KubeZero is distributed as a collection of versioned Helm charts, allowing custo
|
||||
gantt
|
||||
title KubeZero Support Timeline
|
||||
dateFormat YYYY-MM-DD
|
||||
section 1.28
|
||||
beta :128b, 2024-03-01, 2024-04-30
|
||||
release :after 128b, 2024-08-31
|
||||
section 1.29
|
||||
beta :129b, 2024-07-01, 2024-07-31
|
||||
release :after 129b, 2024-11-30
|
||||
section 1.30
|
||||
beta :130b, 2024-09-01, 2024-10-31
|
||||
release :after 130b, 2025-02-28
|
||||
section 1.31
|
||||
beta :131b, 2024-12-01, 2025-01-30
|
||||
release :after 131b, 2025-04-30
|
||||
```
|
||||
|
||||
[Upstream release policy](https://kubernetes.io/releases/)
|
||||
@ -47,9 +47,7 @@ gantt
|
||||
- all compute nodes are running on Alpine V3.20
|
||||
- 1 or 2 GB encrypted root file system
|
||||
- no external dependencies at boot time, apart from container registries
|
||||
- minimal attack surface
|
||||
- extremely small memory footprint / overhead
|
||||
- cri-o container runtime incl. AppArmor support
|
||||
- focused on security and minimal footprint
|
||||
|
||||
## GitOps
|
||||
- cli / cmd line install
|
||||
|
@ -1,4 +1,8 @@
|
||||
#!/bin/bash -e
|
||||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
DEBUG=${DEBUG:-""}
|
||||
LOG=""
|
||||
|
||||
if [ -n "$DEBUG" ]; then
|
||||
set -x
|
||||
@ -60,7 +64,7 @@ render_kubeadm() {
|
||||
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||
done
|
||||
|
||||
if [[ "$phase" =~ ^(bootstrap|restore)$ ]]; then
|
||||
if [[ "$phase" =~ ^(bootstrap|join|restore)$ ]]; then
|
||||
cat ${WORKDIR}/kubeadm/templates/InitConfiguration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||
fi
|
||||
|
||||
@ -80,9 +84,6 @@ parse_kubezero() {
|
||||
export NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||
export PROVIDER_ID=$(yq eval '.providerID // ""' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||
export AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled // "false"' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||
|
||||
# From here on bail out, allows debug_shell even in error cases
|
||||
set -e
|
||||
}
|
||||
|
||||
|
||||
@ -188,8 +189,15 @@ kubeadm_upgrade() {
|
||||
# install re-certed kubectl config for root
|
||||
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
|
||||
|
||||
# post upgrade hook
|
||||
[ -f /var/lib/kubezero/post-upgrade.sh ] && . /var/lib/kubezero/post-upgrade.sh
|
||||
# post upgrade
|
||||
|
||||
# Update kubezero-values CM
|
||||
kubectl get cm -n kube-system kubelet-config -o=yaml | \
|
||||
yq e '.data.kubelet' | yq e '.containerRuntimeEndpoint = "unix:///run/containerd/containerd.sock"' > $WORKDIR/new-kubelet.cm
|
||||
|
||||
kubectl get cm -n kube-system kubelet-config -o=yaml | \
|
||||
yq e '.data.kubelet |= load_str("/tmp/kubezero/new-kubelet.cm")' | \
|
||||
kubectl apply --server-side --force-conflicts -f -
|
||||
|
||||
# Cleanup after kubeadm on the host
|
||||
rm -rf ${HOSTFS}/etc/kubernetes/tmp
|
||||
@ -427,7 +435,7 @@ debug_shell() {
|
||||
|
||||
printf "For manual etcdctl commands use:\n # export ETCDCTL_ENDPOINTS=$ETCD_NODENAME:2379\n"
|
||||
|
||||
/bin/bash
|
||||
bash
|
||||
}
|
||||
|
||||
# First parse kubeadm-values.yaml
|
||||
|
@ -5,6 +5,8 @@ API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a polic
|
||||
|
||||
export HELM_SECRETS_BACKEND="vals"
|
||||
|
||||
LOCAL_DEV=${LOCAL_DEV:-""}
|
||||
|
||||
# Waits for max 300s and retries
|
||||
function wait_for() {
|
||||
local TRIES=0
|
||||
|
@ -19,6 +19,11 @@ echo "Checking that all pods in kube-system are running ..."
|
||||
|
||||
[ "$ARGOCD" == "True" ] && disable_argo
|
||||
|
||||
# 1.30 fix for the missing kubeadm socket annotations
|
||||
for c in $(kubectl get nodes -l "node-role.kubernetes.io/control-plane=" | grep v1.29 | awk {'print $1}'); do
|
||||
kubectl annotate node $c 'kubeadm.alpha.kubernetes.io/cri-socket=unix:///var/run/crio/crio.sock'
|
||||
done
|
||||
|
||||
control_plane_upgrade kubeadm_upgrade
|
||||
|
||||
echo "Control plane upgraded, <Return> to continue"
|
||||
@ -29,17 +34,18 @@ read -r
|
||||
#[ "$ARGOCD" == "True" ] && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kubezero
|
||||
|
||||
### v1.30
|
||||
#
|
||||
kubectl delete runtimeclass crio || true
|
||||
|
||||
# upgrade modules
|
||||
#
|
||||
# Preload cilium images to running nodes
|
||||
all_nodes_upgrade "chroot /host crictl pull quay.io/cilium/cilium:v1.16.3"
|
||||
# Preload cilium images to running nodes, disabled till 1.31
|
||||
# all_nodes_upgrade "chroot /host crictl pull quay.io/cilium/cilium:v1.16.3; chroot /host crictl pull ghcr.io/k8snetworkplumbingwg/multus-cni:v3.9.3"
|
||||
|
||||
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators"
|
||||
|
||||
echo "Checking that all pods in kube-system are running ..."
|
||||
waitSystemPodsRunning
|
||||
# Disabled during 1.30 due to nvidia runtime deadlock
|
||||
#echo "Checking that all pods in kube-system are running ..."
|
||||
#waitSystemPodsRunning
|
||||
|
||||
echo "Applying remaining KubeZero modules..."
|
||||
|
||||
|
@ -8,7 +8,7 @@ patches:
|
||||
directory: {{ . }}
|
||||
{{- end }}
|
||||
nodeRegistration:
|
||||
criSocket: "unix:///var/run/crio/crio.sock"
|
||||
criSocket: "unix:///run/containerd/containerd.sock"
|
||||
ignorePreflightErrors:
|
||||
- DirAvailable--var-lib-etcd
|
||||
- DirAvailable--etc-kubernetes-manifests
|
||||
|
@ -1,6 +0,0 @@
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
criSocket: "unix:///var/run/crio/crio.sock"
|
||||
patches:
|
||||
directory: /tmp/patches
|
@ -6,7 +6,7 @@ cgroupDriver: cgroupfs
|
||||
logging:
|
||||
format: json
|
||||
hairpinMode: hairpin-veth
|
||||
containerRuntimeEndpoint: "unix:///var/run/crio/crio.sock"
|
||||
containerRuntimeEndpoint: "unix:///run/containerd/containerd.sock"
|
||||
{{- if .Values.systemd }}
|
||||
resolvConf: /run/systemd/resolve/resolv.conf
|
||||
{{- end }}
|
||||
@ -21,7 +21,7 @@ featureGates:
|
||||
# Minimal unit is 40m per pod
|
||||
podsPerCore: 25
|
||||
# cpuCFSQuotaPeriod: 10ms
|
||||
# Basic OS incl. crio
|
||||
# Basic OS incl. cri
|
||||
systemReserved:
|
||||
memory: 96Mi
|
||||
#ephemeral-storage: "1Gi"
|
||||
|
@ -1,8 +1,8 @@
|
||||
apiVersion: node.k8s.io/v1
|
||||
kind: RuntimeClass
|
||||
metadata:
|
||||
name: crio
|
||||
name: crun
|
||||
handler: crun
|
||||
overhead:
|
||||
podFixed:
|
||||
memory: 4Mi
|
||||
memory: 8Mi
|
||||
|
@ -2,8 +2,8 @@ apiVersion: v2
|
||||
name: kubezero-addons
|
||||
description: KubeZero umbrella chart for various optional cluster addons
|
||||
type: application
|
||||
version: 0.8.9
|
||||
appVersion: v1.29
|
||||
version: 0.8.11
|
||||
appVersion: v1.30
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -15,6 +15,7 @@ keywords:
|
||||
- sealed-secrets
|
||||
- external-dns
|
||||
- aws-node-termination-handler
|
||||
- py-kube-downscaler
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
@ -24,16 +25,16 @@ dependencies:
|
||||
repository: https://kubernetes-sigs.github.io/external-dns/
|
||||
condition: external-dns.enabled
|
||||
- name: cluster-autoscaler
|
||||
version: 9.43.0
|
||||
version: 9.43.2
|
||||
repository: https://kubernetes.github.io/autoscaler
|
||||
condition: cluster-autoscaler.enabled
|
||||
- name: nvidia-device-plugin
|
||||
version: 0.16.2
|
||||
version: 0.17.0
|
||||
# https://github.com/NVIDIA/k8s-device-plugin
|
||||
repository: https://nvidia.github.io/k8s-device-plugin
|
||||
condition: nvidia-device-plugin.enabled
|
||||
- name: sealed-secrets
|
||||
version: 2.16.1
|
||||
version: 2.16.2
|
||||
repository: https://bitnami-labs.github.io/sealed-secrets
|
||||
condition: sealed-secrets.enabled
|
||||
- name: aws-node-termination-handler
|
||||
@ -44,4 +45,8 @@ dependencies:
|
||||
version: 1.5.0
|
||||
repository: https://twin.github.io/helm-charts
|
||||
condition: aws-eks-asg-rolling-update-handler.enabled
|
||||
- name: py-kube-downscaler
|
||||
version: 0.2.11
|
||||
repository: https://caas-team.github.io/helm-charts/
|
||||
condition: py-kube-downscaler.enabled
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-addons
|
||||
|
||||
![Version: 0.8.9](https://img.shields.io/badge/Version-0.8.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29](https://img.shields.io/badge/AppVersion-v1.29-informational?style=flat-square)
|
||||
![Version: 0.8.11](https://img.shields.io/badge/Version-0.8.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.30](https://img.shields.io/badge/AppVersion-v1.30-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for various optional cluster addons
|
||||
|
||||
@ -18,10 +18,11 @@ Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.16.1 |
|
||||
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.16.2 |
|
||||
| https://caas-team.github.io/helm-charts/ | py-kube-downscaler | 0.2.11 |
|
||||
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.15.0 |
|
||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.43.0 |
|
||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.16.2 |
|
||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.43.2 |
|
||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.17.0 |
|
||||
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
|
||||
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.24.1 |
|
||||
|
||||
@ -156,7 +157,10 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[7] | string | `"g5.48xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[8] | string | `"g4dn.xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[9] | string | `"g4dn.2xlarge"` | |
|
||||
| nvidia-device-plugin.cdi.nvidiaHookPath | string | `"/usr/bin"` | |
|
||||
| nvidia-device-plugin.deviceDiscoveryStrategy | string | `"nvml"` | |
|
||||
| nvidia-device-plugin.enabled | bool | `false` | |
|
||||
| nvidia-device-plugin.runtimeClassName | string | `"nvidia"` | |
|
||||
| nvidia-device-plugin.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| nvidia-device-plugin.tolerations[0].key | string | `"nvidia.com/gpu"` | |
|
||||
| nvidia-device-plugin.tolerations[0].operator | string | `"Exists"` | |
|
||||
|
@ -0,0 +1,10 @@
|
||||
{{- if index .Values "nvidia-device-plugin" "enabled" }}
|
||||
apiVersion: node.k8s.io/v1
|
||||
kind: RuntimeClass
|
||||
metadata:
|
||||
name: nvidia
|
||||
handler: nvidia
|
||||
overhead:
|
||||
podFixed:
|
||||
memory: 16Mi
|
||||
{{- end }}
|
@ -164,6 +164,12 @@ awsNeuron:
|
||||
|
||||
nvidia-device-plugin:
|
||||
enabled: false
|
||||
|
||||
cdi:
|
||||
nvidiaHookPath: /usr/bin
|
||||
deviceDiscoveryStrategy: nvml
|
||||
runtimeClassName: nvidia
|
||||
|
||||
tolerations:
|
||||
- key: nvidia.com/gpu
|
||||
operator: Exists
|
||||
@ -270,3 +276,33 @@ external-dns:
|
||||
#- istio-gateway
|
||||
|
||||
provider: inmemory
|
||||
|
||||
|
||||
py-kube-downscaler:
|
||||
enabled: false
|
||||
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: null
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 20m
|
||||
memory: 48Mi
|
||||
|
||||
# By default no NOT scale down KubeZero modules
|
||||
excludedNamespaces:
|
||||
- kube-system
|
||||
- operators
|
||||
- monitoring
|
||||
- logging
|
||||
- telemetry
|
||||
- istio-system
|
||||
- istio-ingress
|
||||
- cert-manager
|
||||
- argocd
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: v2
|
||||
description: KubeZero Argo - Events, Workflow, CD
|
||||
name: kubezero-argo
|
||||
version: 0.2.5
|
||||
version: 0.2.6
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -22,7 +22,7 @@ dependencies:
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-events.enabled
|
||||
- name: argo-cd
|
||||
version: 7.6.10
|
||||
version: 7.7.2
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-cd.enabled
|
||||
- name: argocd-apps
|
||||
@ -30,7 +30,7 @@ dependencies:
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-cd.enabled
|
||||
- name: argocd-image-updater
|
||||
version: 0.11.0
|
||||
version: 0.11.2
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argocd-image-updater.enabled
|
||||
kubeVersion: ">= 1.26.0-0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-argo
|
||||
|
||||
![Version: 0.2.5](https://img.shields.io/badge/Version-0.2.5-informational?style=flat-square)
|
||||
![Version: 0.2.6](https://img.shields.io/badge/Version-0.2.6-informational?style=flat-square)
|
||||
|
||||
KubeZero Argo - Events, Workflow, CD
|
||||
|
||||
@ -18,10 +18,10 @@ Kubernetes: `>= 1.26.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 7.6.10 |
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 7.7.2 |
|
||||
| https://argoproj.github.io/argo-helm | argo-events | 2.4.8 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-apps | 2.0.2 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.11.0 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.11.2 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
|
||||
## Values
|
||||
@ -30,17 +30,18 @@ Kubernetes: `>= 1.26.0-0`
|
||||
|-----|------|---------|-------------|
|
||||
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
|
||||
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
|
||||
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.29 - Release notes"` | |
|
||||
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.30 - Release notes"` | |
|
||||
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
|
||||
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
|
||||
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.29"` | |
|
||||
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.30"` | |
|
||||
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
|
||||
| argo-cd.configs.params."controller.diff.server.side" | string | `"true"` | |
|
||||
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
|
||||
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
|
||||
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
|
||||
| argo-cd.configs.params."server.insecure" | bool | `true` | |
|
||||
| argo-cd.configs.secret.createSecret | bool | `false` | |
|
||||
| argo-cd.configs.ssh.extraHosts | string | `"git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ=="` | |
|
||||
| argo-cd.configs.ssh.extraHosts | string | `"git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="` | |
|
||||
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar__logo__text-logo { height: 0em; }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
|
||||
| argo-cd.controller.metrics.enabled | bool | `false` | |
|
||||
| argo-cd.controller.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
@ -50,7 +51,7 @@ Kubernetes: `>= 1.26.0-0`
|
||||
| argo-cd.dex.enabled | bool | `false` | |
|
||||
| argo-cd.enabled | bool | `false` | |
|
||||
| argo-cd.global.image.repository | string | `"public.ecr.aws/zero-downtime/zdt-argocd"` | |
|
||||
| argo-cd.global.image.tag | string | `"v2.11.5"` | |
|
||||
| argo-cd.global.image.tag | string | `"v2.12.4"` | |
|
||||
| argo-cd.global.logging.format | string | `"json"` | |
|
||||
| argo-cd.istio.enabled | bool | `false` | |
|
||||
| argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | |
|
||||
|
@ -45,7 +45,7 @@ argo-cd:
|
||||
format: json
|
||||
image:
|
||||
repository: public.ecr.aws/zero-downtime/zdt-argocd
|
||||
tag: v2.12.4
|
||||
tag: v2.13.0
|
||||
|
||||
configs:
|
||||
styles: |
|
||||
@ -102,6 +102,7 @@ argo-cd:
|
||||
params:
|
||||
controller.status.processors: "10"
|
||||
controller.operation.processors: "5"
|
||||
controller.diff.server.side: "true"
|
||||
|
||||
server.insecure: true
|
||||
server.enable.gzip: true
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-auth
|
||||
description: KubeZero umbrella chart for all things Authentication and Identity management
|
||||
type: application
|
||||
version: 0.6.0
|
||||
version: 0.6.1
|
||||
appVersion: 26.0.5
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
@ -18,6 +18,6 @@ dependencies:
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: keycloak
|
||||
repository: "oci://registry-1.docker.io/bitnamicharts"
|
||||
version: 24.0.4
|
||||
version: 24.2.1
|
||||
condition: keycloak.enabled
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-ci
|
||||
description: KubeZero umbrella chart for all things CI
|
||||
type: application
|
||||
version: 0.8.17
|
||||
version: 0.8.18
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -18,11 +18,11 @@ dependencies:
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: gitea
|
||||
version: 10.4.1
|
||||
version: 10.6.0
|
||||
repository: https://dl.gitea.io/charts/
|
||||
condition: gitea.enabled
|
||||
- name: jenkins
|
||||
version: 5.7.6
|
||||
version: 5.7.12
|
||||
repository: https://charts.jenkins.io
|
||||
condition: jenkins.enabled
|
||||
- name: trivy
|
||||
@ -30,7 +30,7 @@ dependencies:
|
||||
repository: https://aquasecurity.github.io/helm-charts/
|
||||
condition: trivy.enabled
|
||||
- name: renovate
|
||||
version: 38.124.1
|
||||
version: 38.142.6
|
||||
repository: https://docs.renovatebot.com/helm-charts
|
||||
condition: renovate.enabled
|
||||
kubeVersion: ">= 1.25.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-ci
|
||||
|
||||
![Version: 0.8.17](https://img.shields.io/badge/Version-0.8.17-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.8.18](https://img.shields.io/badge/Version-0.8.18-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for all things CI
|
||||
|
||||
@ -20,9 +20,9 @@ Kubernetes: `>= 1.25.0`
|
||||
|------------|------|---------|
|
||||
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.8.0 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://charts.jenkins.io | jenkins | 5.7.6 |
|
||||
| https://dl.gitea.io/charts/ | gitea | 10.4.1 |
|
||||
| https://docs.renovatebot.com/helm-charts | renovate | 38.124.1 |
|
||||
| https://charts.jenkins.io | jenkins | 5.7.12 |
|
||||
| https://dl.gitea.io/charts/ | gitea | 10.6.0 |
|
||||
| https://docs.renovatebot.com/helm-charts | renovate | 38.142.6 |
|
||||
|
||||
# Jenkins
|
||||
- default build retention 10 builds, 32days
|
||||
@ -56,6 +56,7 @@ Kubernetes: `>= 1.25.0`
|
||||
| gitea.extraVolumes[0].configMap.name | string | `"gitea-kubezero-ci-themes"` | |
|
||||
| gitea.extraVolumes[0].name | string | `"gitea-themes"` | |
|
||||
| gitea.gitea.admin.existingSecret | string | `"gitea-admin-secret"` | |
|
||||
| gitea.gitea.config."ssh.minimum_key_sizes".RSA | int | `2047` | |
|
||||
| gitea.gitea.config.cache.ADAPTER | string | `"memory"` | |
|
||||
| gitea.gitea.config.database.DB_TYPE | string | `"sqlite3"` | |
|
||||
| gitea.gitea.config.log.LEVEL | string | `"warn"` | |
|
||||
@ -81,7 +82,6 @@ Kubernetes: `>= 1.25.0`
|
||||
| gitea.resources.requests.cpu | string | `"150m"` | |
|
||||
| gitea.resources.requests.memory | string | `"320Mi"` | |
|
||||
| gitea.securityContext.allowPrivilegeEscalation | bool | `false` | |
|
||||
| gitea.securityContext.capabilities.add[0] | string | `"SYS_CHROOT"` | |
|
||||
| gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | |
|
||||
| gitea.strategy.type | string | `"Recreate"` | |
|
||||
| gitea.test.enabled | bool | `false` | |
|
||||
|
@ -12,6 +12,30 @@ Use the following links to reference issues, PRs, and commits prior to v2.6.0.
|
||||
The changelog until v1.5.7 was auto-generated based on git commits.
|
||||
Those entries include a reference to the git commit to be able to get more details.
|
||||
|
||||
## 5.7.12
|
||||
|
||||
Update `configuration-as-code` to version `1887.v9e47623cb_043`
|
||||
|
||||
## 5.7.11
|
||||
|
||||
Update `git` to version `5.6.0`
|
||||
|
||||
## 5.7.10
|
||||
|
||||
Update `jenkins/jenkins` to version `2.479.1-jdk17`
|
||||
|
||||
## 5.7.9
|
||||
|
||||
Update `configuration-as-code` to version `1873.vea_5814ca_9c93`
|
||||
|
||||
## 5.7.8
|
||||
|
||||
Update `jenkins/inbound-agent` to version `3273.v4cfe589b_fd83-1`
|
||||
|
||||
## 5.7.7
|
||||
|
||||
Update `kubernetes` to version `4295.v7fa_01b_309c95`
|
||||
|
||||
## 5.7.5
|
||||
|
||||
Fix helm release deployment with flux revision reconciliation
|
||||
|
@ -1,12 +1,14 @@
|
||||
annotations:
|
||||
artifacthub.io/category: integration-delivery
|
||||
artifacthub.io/changes: |
|
||||
- Update `configuration-as-code` to version `1887.v9e47623cb_043`
|
||||
artifacthub.io/images: |
|
||||
- name: jenkins
|
||||
image: docker.io/jenkins/jenkins:2.462.3-jdk17
|
||||
image: docker.io/jenkins/jenkins:2.479.1-jdk17
|
||||
- name: k8s-sidecar
|
||||
image: docker.io/kiwigrid/k8s-sidecar:1.28.0
|
||||
- name: inbound-agent
|
||||
image: jenkins/inbound-agent:3261.v9c670a_4748a_9-1
|
||||
image: jenkins/inbound-agent:3273.v4cfe589b_fd83-1
|
||||
artifacthub.io/license: Apache-2.0
|
||||
artifacthub.io/links: |
|
||||
- name: Chart Source
|
||||
@ -16,7 +18,7 @@ annotations:
|
||||
- name: support
|
||||
url: https://github.com/jenkinsci/helm-charts/issues
|
||||
apiVersion: v2
|
||||
appVersion: 2.462.3
|
||||
appVersion: 2.479.1
|
||||
description: 'Jenkins - Build great things at any scale! As the leading open source
|
||||
automation server, Jenkins provides over 1800 plugins to support building, deploying
|
||||
and automating any project. '
|
||||
@ -44,4 +46,4 @@ sources:
|
||||
- https://github.com/maorfr/kube-tasks
|
||||
- https://github.com/jenkinsci/configuration-as-code-plugin
|
||||
type: application
|
||||
version: 5.7.6
|
||||
version: 5.7.12
|
||||
|
@ -31,7 +31,7 @@ The following tables list the configurable parameters of the Jenkins chart and t
|
||||
| [agent.hostNetworking](./values.yaml#L973) | bool | Enables the agent to use the host network | `false` |
|
||||
| [agent.idleMinutes](./values.yaml#L1120) | int | Allows the Pod to remain active for reuse until the configured number of minutes has passed since the last step was executed on it | `0` |
|
||||
| [agent.image.repository](./values.yaml#L952) | string | Repository to pull the agent jnlp image from | `"jenkins/inbound-agent"` |
|
||||
| [agent.image.tag](./values.yaml#L954) | string | Tag of the image to pull | `"3261.v9c670a_4748a_9-1"` |
|
||||
| [agent.image.tag](./values.yaml#L954) | string | Tag of the image to pull | `"3273.v4cfe589b_fd83-1"` |
|
||||
| [agent.imagePullSecretName](./values.yaml#L961) | string | Name of the secret to be used to pull the image | `nil` |
|
||||
| [agent.inheritYamlMergeStrategy](./values.yaml#L1140) | bool | Controls whether the defined yaml merge strategy will be inherited if another defined pod template is configured to inherit from the current one | `false` |
|
||||
| [agent.jenkinsTunnel](./values.yaml#L929) | string | Overrides the Kubernetes Jenkins tunnel | `nil` |
|
||||
@ -165,7 +165,7 @@ The following tables list the configurable parameters of the Jenkins chart and t
|
||||
| [controller.initializeOnce](./values.yaml#L420) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` |
|
||||
| [controller.installLatestPlugins](./values.yaml#L409) | bool | Download the minimum required version or latest version of all dependencies | `true` |
|
||||
| [controller.installLatestSpecifiedPlugins](./values.yaml#L412) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` |
|
||||
| [controller.installPlugins](./values.yaml#L401) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4292.v11898cf8fa_66","workflow-aggregator:600.vb_57cdd26fdd7","git:5.5.2","configuration-as-code:1850.va_a_8c31d3158b_"]` |
|
||||
| [controller.installPlugins](./values.yaml#L401) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4295.v7fa_01b_309c95","workflow-aggregator:600.vb_57cdd26fdd7","git:5.6.0","configuration-as-code:1887.v9e47623cb_043"]` |
|
||||
| [controller.javaOpts](./values.yaml#L162) | string | Append to `JAVA_OPTS` env var | `nil` |
|
||||
| [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` |
|
||||
| [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` |
|
||||
|
@ -399,10 +399,10 @@ controller:
|
||||
# Plugins will be installed during Jenkins controller start
|
||||
# -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false`
|
||||
installPlugins:
|
||||
- kubernetes:4292.v11898cf8fa_66
|
||||
- kubernetes:4295.v7fa_01b_309c95
|
||||
- workflow-aggregator:600.vb_57cdd26fdd7
|
||||
- git:5.5.2
|
||||
- configuration-as-code:1850.va_a_8c31d3158b_
|
||||
- git:5.6.0
|
||||
- configuration-as-code:1887.v9e47623cb_043
|
||||
|
||||
# If set to false, Jenkins will download the minimum required version of all dependencies.
|
||||
# -- Download the minimum required version or latest version of all dependencies
|
||||
@ -951,7 +951,7 @@ agent:
|
||||
# -- Repository to pull the agent jnlp image from
|
||||
repository: "jenkins/inbound-agent"
|
||||
# -- Tag of the image to pull
|
||||
tag: "3261.v9c670a_4748a_9-1"
|
||||
tag: "3273.v4cfe589b_fd83-1"
|
||||
# -- Configure working directory for default agent
|
||||
workingDir: "/home/jenkins/agent"
|
||||
nodeUsageMode: "NORMAL"
|
||||
|
@ -21,8 +21,6 @@ gitea:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
# add:
|
||||
# - SYS_CHROOT
|
||||
|
||||
resources:
|
||||
requests:
|
||||
@ -185,7 +183,7 @@ jenkins:
|
||||
agent:
|
||||
image:
|
||||
repository: public.ecr.aws/zero-downtime/jenkins-podman
|
||||
tag: v0.6.2
|
||||
tag: v0.7.0
|
||||
#alwaysPullImage: true
|
||||
podRetention: "Default"
|
||||
showRawYaml: false
|
||||
@ -279,7 +277,7 @@ jenkins:
|
||||
trivy:
|
||||
enabled: false
|
||||
image:
|
||||
tag: 0.56.2
|
||||
tag: 0.57.0
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 1Gi
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-istio
|
||||
description: KubeZero Umbrella Chart for Istio
|
||||
type: application
|
||||
version: 0.23.3
|
||||
version: 0.23.2
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -16,10 +16,10 @@ dependencies:
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: base
|
||||
version: 1.24.0
|
||||
version: 1.23.2
|
||||
repository: https://istio-release.storage.googleapis.com/charts
|
||||
- name: istiod
|
||||
version: 1.24.0
|
||||
version: 1.23.2
|
||||
repository: https://istio-release.storage.googleapis.com/charts
|
||||
- name: kiali-server
|
||||
version: "1.89.7"
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-metrics
|
||||
description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
|
||||
type: application
|
||||
version: 0.10.1
|
||||
version: 0.10.2
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -19,7 +19,7 @@ dependencies:
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: kube-prometheus-stack
|
||||
version: 65.3.1
|
||||
version: 66.1.1
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
- name: prometheus-adapter
|
||||
version: 4.11.0
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-metrics
|
||||
|
||||
![Version: 0.10.1](https://img.shields.io/badge/Version-0.10.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.10.2](https://img.shields.io/badge/Version-0.10.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
|
||||
|
||||
@ -19,7 +19,7 @@ Kubernetes: `>= 1.26.0`
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://prometheus-community.github.io/helm-charts | kube-prometheus-stack | 65.3.1 |
|
||||
| https://prometheus-community.github.io/helm-charts | kube-prometheus-stack | 66.1.1 |
|
||||
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 4.11.0 |
|
||||
| https://prometheus-community.github.io/helm-charts | prometheus-pushgateway | 2.15.0 |
|
||||
|
||||
|
@ -1,5 +0,0 @@
|
||||
root = true
|
||||
|
||||
[files/dashboards/*.json]
|
||||
indent_size = 2
|
||||
indent_style = space
|
@ -1,29 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
# helm/charts
|
||||
OWNERS
|
||||
hack/
|
||||
ci/
|
||||
kube-prometheus-*.tgz
|
||||
|
||||
unittests/
|
||||
files/dashboards/
|
@ -1,12 +0,0 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
## How to contribute to this chart
|
||||
|
||||
1. Fork this repository, develop and test your Chart.
|
||||
1. Bump the chart version for every change.
|
||||
1. Ensure PR title has the prefix `[kube-prometheus-stack]`
|
||||
1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories
|
||||
1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes.
|
||||
1. Check for changes of RBAC rules.
|
||||
1. Check for changes in CRD specs.
|
||||
1. PR must pass the linter (`helm lint`)
|
@ -1,65 +0,0 @@
|
||||
annotations:
|
||||
artifacthub.io/license: Apache-2.0
|
||||
artifacthub.io/links: |
|
||||
- name: Chart Source
|
||||
url: https://github.com/prometheus-community/helm-charts
|
||||
- name: Upstream Project
|
||||
url: https://github.com/prometheus-operator/kube-prometheus
|
||||
artifacthub.io/operator: "true"
|
||||
apiVersion: v2
|
||||
appVersion: v0.77.1
|
||||
dependencies:
|
||||
- condition: crds.enabled
|
||||
name: crds
|
||||
repository: ""
|
||||
version: 0.0.0
|
||||
- condition: kubeStateMetrics.enabled
|
||||
name: kube-state-metrics
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
version: 5.26.*
|
||||
- condition: nodeExporter.enabled
|
||||
name: prometheus-node-exporter
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
version: 4.39.*
|
||||
- condition: grafana.enabled
|
||||
name: grafana
|
||||
repository: https://grafana.github.io/helm-charts
|
||||
version: 8.5.*
|
||||
- condition: windowsMonitoring.enabled
|
||||
name: prometheus-windows-exporter
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
version: 0.7.*
|
||||
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
|
||||
and Prometheus rules combined with documentation and scripts to provide easy to
|
||||
operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
|
||||
Operator.
|
||||
home: https://github.com/prometheus-operator/kube-prometheus
|
||||
icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png
|
||||
keywords:
|
||||
- operator
|
||||
- prometheus
|
||||
- kube-prometheus
|
||||
kubeVersion: '>=1.19.0-0'
|
||||
maintainers:
|
||||
- email: andrew@quadcorps.co.uk
|
||||
name: andrewgkew
|
||||
- email: gianrubio@gmail.com
|
||||
name: gianrubio
|
||||
- email: github.gkarthiks@gmail.com
|
||||
name: gkarthiks
|
||||
- email: kube-prometheus-stack@sisti.pt
|
||||
name: GMartinez-Sisti
|
||||
- email: github@jkroepke.de
|
||||
name: jkroepke
|
||||
- email: scott@r6by.com
|
||||
name: scottrigby
|
||||
- email: miroslav.hadzhiev@gmail.com
|
||||
name: Xtigyro
|
||||
- email: quentin.bisson@gmail.com
|
||||
name: QuentinBisson
|
||||
name: kube-prometheus-stack
|
||||
sources:
|
||||
- https://github.com/prometheus-community/helm-charts
|
||||
- https://github.com/prometheus-operator/kube-prometheus
|
||||
type: application
|
||||
version: 65.3.1
|
File diff suppressed because it is too large
Load Diff
@ -1,3 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: crds
|
||||
version: 0.0.0
|
@ -1,3 +0,0 @@
|
||||
# crds subchart
|
||||
|
||||
See: [https://github.com/prometheus-community/helm-charts/issues/3548](https://github.com/prometheus-community/helm-charts/issues/3548)
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,138 +0,0 @@
|
||||
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.77.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.1
|
||||
operator.prometheus.io/version: 0.77.1
|
||||
argocd.argoproj.io/sync-options: ServerSideApply=true
|
||||
name: prometheusrules.monitoring.coreos.com
|
||||
spec:
|
||||
group: monitoring.coreos.com
|
||||
names:
|
||||
categories:
|
||||
- prometheus-operator
|
||||
kind: PrometheusRule
|
||||
listKind: PrometheusRuleList
|
||||
plural: prometheusrules
|
||||
shortNames:
|
||||
- promrule
|
||||
singular: prometheusrule
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
The `PrometheusRule` custom resource definition (CRD) defines [alerting](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) and [recording](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) rules to be evaluated by `Prometheus` or `ThanosRuler` objects.
|
||||
|
||||
`Prometheus` and `ThanosRuler` objects select `PrometheusRule` objects using label and namespace selectors.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: Specification of desired alerting rule definitions for Prometheus.
|
||||
properties:
|
||||
groups:
|
||||
description: Content of Prometheus rule file
|
||||
items:
|
||||
description: RuleGroup is a list of sequentially evaluated recording and alerting rules.
|
||||
properties:
|
||||
interval:
|
||||
description: Interval determines how often rules in the group are evaluated.
|
||||
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
|
||||
type: string
|
||||
limit:
|
||||
description: |-
|
||||
Limit the number of alerts an alerting rule and series a recording
|
||||
rule can produce.
|
||||
Limit is supported starting with Prometheus >= 2.31 and Thanos Ruler >= 0.24.
|
||||
type: integer
|
||||
name:
|
||||
description: Name of the rule group.
|
||||
minLength: 1
|
||||
type: string
|
||||
partial_response_strategy:
|
||||
description: |-
|
||||
PartialResponseStrategy is only used by ThanosRuler and will
|
||||
be ignored by Prometheus instances.
|
||||
More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response
|
||||
pattern: ^(?i)(abort|warn)?$
|
||||
type: string
|
||||
rules:
|
||||
description: List of alerting and recording rules.
|
||||
items:
|
||||
description: |-
|
||||
Rule describes an alerting or recording rule
|
||||
See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) rule
|
||||
properties:
|
||||
alert:
|
||||
description: |-
|
||||
Name of the alert. Must be a valid label value.
|
||||
Only one of `record` and `alert` must be set.
|
||||
type: string
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Annotations to add to each alert.
|
||||
Only valid for alerting rules.
|
||||
type: object
|
||||
expr:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: PromQL expression to evaluate.
|
||||
x-kubernetes-int-or-string: true
|
||||
for:
|
||||
description: Alerts are considered firing once they have been returned for this long.
|
||||
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
|
||||
type: string
|
||||
keep_firing_for:
|
||||
description: KeepFiringFor defines how long an alert will continue firing after the condition that triggered it has cleared.
|
||||
minLength: 1
|
||||
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
|
||||
type: string
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Labels to add or overwrite.
|
||||
type: object
|
||||
record:
|
||||
description: |-
|
||||
Name of the time series to output to. Must be a valid metric name.
|
||||
Only one of `record` and `alert` must be set.
|
||||
type: string
|
||||
required:
|
||||
- expr
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-map-keys:
|
||||
- name
|
||||
x-kubernetes-list-type: map
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.vscode
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
OWNERS
|
@ -1,35 +0,0 @@
|
||||
annotations:
|
||||
artifacthub.io/license: Apache-2.0
|
||||
artifacthub.io/links: |
|
||||
- name: Chart Source
|
||||
url: https://github.com/grafana/helm-charts
|
||||
- name: Upstream Project
|
||||
url: https://github.com/grafana/grafana
|
||||
apiVersion: v2
|
||||
appVersion: 11.2.2
|
||||
description: The leading tool for querying and visualizing time series and metrics.
|
||||
home: https://grafana.com
|
||||
icon: https://artifacthub.io/image/b4fed1a7-6c8f-4945-b99d-096efa3e4116
|
||||
keywords:
|
||||
- monitoring
|
||||
- metric
|
||||
kubeVersion: ^1.8.0-0
|
||||
maintainers:
|
||||
- email: zanhsieh@gmail.com
|
||||
name: zanhsieh
|
||||
- email: rluckie@cisco.com
|
||||
name: rtluckie
|
||||
- email: maor.friedman@redhat.com
|
||||
name: maorfr
|
||||
- email: miroslav.hadzhiev@gmail.com
|
||||
name: Xtigyro
|
||||
- email: mail@torstenwalter.de
|
||||
name: torstenwalter
|
||||
- email: github@jkroepke.de
|
||||
name: jkroepke
|
||||
name: grafana
|
||||
sources:
|
||||
- https://github.com/grafana/grafana
|
||||
- https://github.com/grafana/helm-charts
|
||||
type: application
|
||||
version: 8.5.5
|
@ -1,783 +0,0 @@
|
||||
# Grafana Helm Chart
|
||||
|
||||
* Installs the web dashboarding system [Grafana](http://grafana.org/)
|
||||
|
||||
## Get Repo Info
|
||||
|
||||
```console
|
||||
helm repo add grafana https://grafana.github.io/helm-charts
|
||||
helm repo update
|
||||
```
|
||||
|
||||
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
helm install my-release grafana/grafana
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the my-release deployment:
|
||||
|
||||
```console
|
||||
helm delete my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Upgrading an existing Release to a new major version
|
||||
|
||||
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an
|
||||
incompatible breaking change needing manual actions.
|
||||
|
||||
### To 4.0.0 (And 3.12.1)
|
||||
|
||||
This version requires Helm >= 2.12.0.
|
||||
|
||||
### To 5.0.0
|
||||
|
||||
You have to add --force to your helm upgrade command as the labels of the chart have changed.
|
||||
|
||||
### To 6.0.0
|
||||
|
||||
This version requires Helm >= 3.1.0.
|
||||
|
||||
### To 7.0.0
|
||||
|
||||
For consistency with other Helm charts, the `global.image.registry` parameter was renamed
|
||||
to `global.imageRegistry`. If you were not previously setting `global.image.registry`, no action
|
||||
is required on upgrade. If you were previously setting `global.image.registry`, you will
|
||||
need to instead set `global.imageRegistry`.
|
||||
|
||||
## Configuration
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------|
|
||||
| `replicas` | Number of nodes | `1` |
|
||||
| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` |
|
||||
| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` |
|
||||
| `podDisruptionBudget.apiVersion` | Pod disruption apiVersion | `nil` |
|
||||
| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` |
|
||||
| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` |
|
||||
| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`|
|
||||
| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` |
|
||||
| `priorityClassName` | Name of Priority Class to assign pods | `nil` |
|
||||
| `image.registry` | Image registry | `docker.io` |
|
||||
| `image.repository` | Image repository | `grafana/grafana` |
|
||||
| `image.tag` | Overrides the Grafana image tag whose default is the chart appVersion (`Must be >= 5.0.0`) | `` |
|
||||
| `image.sha` | Image sha (optional) | `` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecrets` | Image pull secrets (can be templated) | `[]` |
|
||||
| `service.enabled` | Enable grafana service | `true` |
|
||||
| `service.ipFamilies` | Kubernetes service IP families | `[]` |
|
||||
| `service.ipFamilyPolicy` | Kubernetes service IP family policy | `""` |
|
||||
| `service.type` | Kubernetes service type | `ClusterIP` |
|
||||
| `service.port` | Kubernetes port where service is exposed | `80` |
|
||||
| `service.portName` | Name of the port on the service | `service` |
|
||||
| `service.appProtocol` | Adds the appProtocol field to the service | `` |
|
||||
| `service.targetPort` | Internal service is port | `3000` |
|
||||
| `service.nodePort` | Kubernetes service nodePort | `nil` |
|
||||
| `service.annotations` | Service annotations (can be templated) | `{}` |
|
||||
| `service.labels` | Custom labels | `{}` |
|
||||
| `service.clusterIP` | internal cluster service IP | `nil` |
|
||||
| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` |
|
||||
| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` |
|
||||
| `service.externalIPs` | service external IP addresses | `[]` |
|
||||
| `service.externalTrafficPolicy` | change the default externalTrafficPolicy | `nil` |
|
||||
| `headlessService` | Create a headless service | `false` |
|
||||
| `extraExposePorts` | Additional service ports for sidecar containers| `[]` |
|
||||
| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` |
|
||||
| `ingress.enabled` | Enables Ingress | `false` |
|
||||
| `ingress.annotations` | Ingress annotations (values are templated) | `{}` |
|
||||
| `ingress.labels` | Custom labels | `{}` |
|
||||
| `ingress.path` | Ingress accepted path | `/` |
|
||||
| `ingress.pathType` | Ingress type of path | `Prefix` |
|
||||
| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` |
|
||||
| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.6/guide/ingress/annotations/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` |
|
||||
| `ingress.tls` | Ingress TLS configuration | `[]` |
|
||||
| `ingress.ingressClassName` | Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 | `""` |
|
||||
| `resources` | CPU/Memory resource requests/limits | `{}` |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `extraInitContainers` | Init containers to add to the grafana pod | `{}` |
|
||||
| `extraContainers` | Sidecar containers to add to the grafana pod | `""` |
|
||||
| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` |
|
||||
| `extraLabels` | Custom labels for all manifests | `{}` |
|
||||
| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` |
|
||||
| `persistence.enabled` | Use persistent volume to store data | `false` |
|
||||
| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` |
|
||||
| `persistence.size` | Size of persistent volume claim | `10Gi` |
|
||||
| `persistence.existingClaim` | Use an existing PVC to persist data (can be templated) | `nil` |
|
||||
| `persistence.storageClassName` | Type of persistent volume claim | `nil` |
|
||||
| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` |
|
||||
| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` |
|
||||
| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` |
|
||||
| `persistence.extraPvcLabels` | Extra labels to apply to a PVC. | `{}` |
|
||||
| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` |
|
||||
| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` |
|
||||
| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` |
|
||||
| `persistence.disableWarning` | Hide NOTES warning, useful when persisting to a database | `false` |
|
||||
| `initChownData.enabled` | If false, don't reset data ownership at startup | true |
|
||||
| `initChownData.image.registry` | init-chown-data container image registry | `docker.io` |
|
||||
| `initChownData.image.repository` | init-chown-data container image repository | `busybox` |
|
||||
| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` |
|
||||
| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` |
|
||||
| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` |
|
||||
| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` |
|
||||
| `schedulerName` | Alternate scheduler name | `nil` |
|
||||
| `env` | Extra environment variables passed to pods | `{}` |
|
||||
| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` |
|
||||
| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` |
|
||||
| `envFromSecrets` | List of Kubernetes secrets (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` |
|
||||
| `envFromConfigMaps` | List of Kubernetes ConfigMaps (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` |
|
||||
| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret. (passed through [tpl](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function)) | `{}` |
|
||||
| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` |
|
||||
| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
|
||||
| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` |
|
||||
| `extraVolumes` | Additional Grafana server volumes | `[]` |
|
||||
| `automountServiceAccountToken` | Mounted the service account token on the grafana pod. Mandatory, if sidecars are enabled | `true` |
|
||||
| `createConfigmap` | Enable creating the grafana configmap | `true` |
|
||||
| `extraConfigmapMounts` | Additional grafana server configMap volume mounts (values are templated) | `[]` |
|
||||
| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` |
|
||||
| `plugins` | Plugins to be loaded along with Grafana | `[]` |
|
||||
| `datasources` | Configure grafana datasources (passed through tpl) | `{}` |
|
||||
| `alerting` | Configure grafana alerting (passed through tpl) | `{}` |
|
||||
| `notifiers` | Configure grafana notifiers | `{}` |
|
||||
| `dashboardProviders` | Configure grafana dashboard providers | `{}` |
|
||||
| `dashboards` | Dashboards to import | `{}` |
|
||||
| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` |
|
||||
| `grafana.ini` | Grafana's primary configuration | `{}` |
|
||||
| `global.imageRegistry` | Global image pull registry for all images. | `null` |
|
||||
| `global.imagePullSecrets` | Global image pull secrets (can be templated). Allows either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). | `[]` |
|
||||
| `ldap.enabled` | Enable LDAP authentication | `false` |
|
||||
| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
|
||||
| `ldap.config` | Grafana's LDAP configuration | `""` |
|
||||
| `annotations` | Deployment annotations | `{}` |
|
||||
| `labels` | Deployment labels | `{}` |
|
||||
| `podAnnotations` | Pod annotations | `{}` |
|
||||
| `podLabels` | Pod labels | `{}` |
|
||||
| `podPortName` | Name of the grafana port on the pod | `grafana` |
|
||||
| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` |
|
||||
| `sidecar.image.registry` | Sidecar image registry | `quay.io` |
|
||||
| `sidecar.image.repository` | Sidecar image repository | `kiwigrid/k8s-sidecar` |
|
||||
| `sidecar.image.tag` | Sidecar image tag | `1.28.0` |
|
||||
| `sidecar.image.sha` | Sidecar image sha (optional) | `""` |
|
||||
| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` |
|
||||
| `sidecar.resources` | Sidecar resources | `{}` |
|
||||
| `sidecar.securityContext` | Sidecar securityContext | `{}` |
|
||||
| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` |
|
||||
| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` |
|
||||
| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` |
|
||||
| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` |
|
||||
| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||
| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
|
||||
| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||
| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` |
|
||||
| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
|
||||
| `sidecar.alerts.initAlerts` | Set to true to deploy the alerts sidecar as an initContainer. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` |
|
||||
| `sidecar.alerts.extraMounts` | Additional alerts sidecar volume mounts. | `[]` |
|
||||
| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
|
||||
| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` |
|
||||
| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` |
|
||||
| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` |
|
||||
| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` |
|
||||
| `sidecar.dashboards.provider.folderUid` | Allows you to specify the static UID for the logical folder above | `""` |
|
||||
| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` |
|
||||
| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` |
|
||||
| `sidecar.dashboards.provider.type` | Provider type | `file` |
|
||||
| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` |
|
||||
| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
|
||||
| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` |
|
||||
| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` |
|
||||
| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `""` |
|
||||
| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` |
|
||||
| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` |
|
||||
| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
|
||||
| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||
| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` |
|
||||
| `sidecar.dashboards.reloadURL` | Full url of dashboards configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/dashboards/reload"` |
|
||||
| `sidecar.dashboards.skipReload` | Enabling this omits defining the REQ_USERNAME, REQ_PASSWORD, REQ_URL and REQ_METHOD environment variables | `false` |
|
||||
| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||
| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` |
|
||||
| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
|
||||
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
|
||||
| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` |
|
||||
| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||
| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
|
||||
| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||
| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` |
|
||||
| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
|
||||
| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` |
|
||||
| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` |
|
||||
| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` |
|
||||
| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` |
|
||||
| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||
| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
|
||||
| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||
| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` |
|
||||
| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
|
||||
| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` |
|
||||
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
|
||||
| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
|
||||
| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
|
||||
| `admin.existingSecret` | The name of an existing secret containing the admin credentials (can be templated). | `""` |
|
||||
| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` |
|
||||
| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` |
|
||||
| `serviceAccount.automountServiceAccountToken` | Automount the service account token on all pods where is service account is used | `false` |
|
||||
| `serviceAccount.annotations` | ServiceAccount annotations | |
|
||||
| `serviceAccount.create` | Create service account | `true` |
|
||||
| `serviceAccount.labels` | ServiceAccount labels | `{}` |
|
||||
| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` |
|
||||
| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` |
|
||||
| `rbac.create` | Create and use RBAC resources | `true` |
|
||||
| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` |
|
||||
| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` |
|
||||
| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `false` |
|
||||
| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `false` |
|
||||
| `rbac.extraRoleRules` | Additional rules to add to the Role | [] |
|
||||
| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] |
|
||||
| `command` | Define command to be executed by grafana container at startup | `nil` |
|
||||
| `args` | Define additional args if command is used | `nil` |
|
||||
| `testFramework.enabled` | Whether to create test-related resources | `true` |
|
||||
| `testFramework.image.registry` | `test-framework` image registry. | `docker.io` |
|
||||
| `testFramework.image.repository` | `test-framework` image repository. | `bats/bats` |
|
||||
| `testFramework.image.tag` | `test-framework` image tag. | `v1.4.1` |
|
||||
| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` |
|
||||
| `testFramework.securityContext` | `test-framework` securityContext | `{}` |
|
||||
| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` |
|
||||
| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` |
|
||||
| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` |
|
||||
| `downloadDashboardsImage.registry` | Curl docker image registry | `docker.io` |
|
||||
| `downloadDashboardsImage.repository` | Curl docker image repository | `curlimages/curl` |
|
||||
| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` |
|
||||
| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` |
|
||||
| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` |
|
||||
| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) |
|
||||
| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` |
|
||||
| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | |
|
||||
| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` |
|
||||
| `serviceMonitor.path` | Path to scrape | `/metrics` |
|
||||
| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` |
|
||||
| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` |
|
||||
| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` |
|
||||
| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` |
|
||||
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` |
|
||||
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` |
|
||||
| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` |
|
||||
| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` |
|
||||
| `imageRenderer.image.registry` | image-renderer Image registry | `docker.io` |
|
||||
| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` |
|
||||
| `imageRenderer.image.tag` | image-renderer Image tag | `latest` |
|
||||
| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` |
|
||||
| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` |
|
||||
| `imageRenderer.env` | extra env-vars for image-renderer | `{}` |
|
||||
| `imageRenderer.envValueFrom` | Environment variables for image-renderer from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` |
|
||||
| `imageRenderer.extraConfigmapMounts` | Additional image-renderer configMap volume mounts (values are templated) | `[]` |
|
||||
| `imageRenderer.extraSecretMounts` | Additional image-renderer secret volume mounts | `[]` |
|
||||
| `imageRenderer.extraVolumeMounts` | Additional image-renderer volume mounts | `[]` |
|
||||
| `imageRenderer.extraVolumes` | Additional image-renderer volumes | `[]` |
|
||||
| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` |
|
||||
| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` |
|
||||
| `imageRenderer.podAnnotations` | image-renderer image-renderer pod annotation | `{}` |
|
||||
| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` |
|
||||
| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` |
|
||||
| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` |
|
||||
| `imageRenderer.service.portName` | image-renderer service port name | `http` |
|
||||
| `imageRenderer.service.port` | image-renderer port used by deployment | `8081` |
|
||||
| `imageRenderer.service.targetPort` | image-renderer service port used by service | `8081` |
|
||||
| `imageRenderer.appProtocol` | Adds the appProtocol field to the service | `` |
|
||||
| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` |
|
||||
| `imageRenderer.serverURL` | Remote image renderer url | `''` |
|
||||
| `imageRenderer.renderingCallbackURL` | Callback url for the Grafana image renderer | `''` |
|
||||
| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` |
|
||||
| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` |
|
||||
| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` |
|
||||
| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` |
|
||||
| `imageRenderer.resources` | Set resource limits for image-renderer pods | `{}` |
|
||||
| `imageRenderer.nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `imageRenderer.tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `imageRenderer.affinity` | Affinity settings for pod assignment | `{}` |
|
||||
| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` |
|
||||
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
|
||||
| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` |
|
||||
| `networkPolicy.ingress` | Enable the creation of an ingress network policy | `true` |
|
||||
| `networkPolicy.egress.enabled` | Enable the creation of an egress network policy | `false` |
|
||||
| `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` |
|
||||
| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` |
|
||||
|
||||
### Example ingress with path
|
||||
|
||||
With grafana 6.3 and above
|
||||
|
||||
```yaml
|
||||
grafana.ini:
|
||||
server:
|
||||
domain: monitoring.example.com
|
||||
root_url: "%(protocol)s://%(domain)s/grafana"
|
||||
serve_from_sub_path: true
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- "monitoring.example.com"
|
||||
path: "/grafana"
|
||||
```
|
||||
|
||||
### Example of extraVolumeMounts and extraVolumes
|
||||
|
||||
Configure additional volumes with `extraVolumes` and volume mounts with `extraVolumeMounts`.
|
||||
|
||||
Example for `extraVolumeMounts` and corresponding `extraVolumes`:
|
||||
|
||||
```yaml
|
||||
extraVolumeMounts:
|
||||
- name: plugins
|
||||
mountPath: /var/lib/grafana/plugins
|
||||
subPath: configs/grafana/plugins
|
||||
readOnly: false
|
||||
- name: dashboards
|
||||
mountPath: /var/lib/grafana/dashboards
|
||||
hostPath: /usr/shared/grafana/dashboards
|
||||
readOnly: false
|
||||
|
||||
extraVolumes:
|
||||
- name: plugins
|
||||
existingClaim: existing-grafana-claim
|
||||
- name: dashboards
|
||||
hostPath: /usr/shared/grafana/dashboards
|
||||
```
|
||||
|
||||
Volumes default to `emptyDir`. Set to `persistentVolumeClaim`,
|
||||
`hostPath`, `csi`, or `configMap` for other types. For a
|
||||
`persistentVolumeClaim`, specify an existing claim name with
|
||||
`existingClaim`.
|
||||
|
||||
## Import dashboards
|
||||
|
||||
There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method:
|
||||
|
||||
```yaml
|
||||
dashboards:
|
||||
default:
|
||||
some-dashboard:
|
||||
json: |
|
||||
{
|
||||
"annotations":
|
||||
|
||||
...
|
||||
# Complete json file here
|
||||
...
|
||||
|
||||
"title": "Some Dashboard",
|
||||
"uid": "abcd1234",
|
||||
"version": 1
|
||||
}
|
||||
custom-dashboard:
|
||||
# This is a path to a file inside the dashboards directory inside the chart directory
|
||||
file: dashboards/custom-dashboard.json
|
||||
prometheus-stats:
|
||||
# Ref: https://grafana.com/dashboards/2
|
||||
gnetId: 2
|
||||
revision: 2
|
||||
datasource: Prometheus
|
||||
loki-dashboard-quick-search:
|
||||
gnetId: 12019
|
||||
revision: 2
|
||||
datasource:
|
||||
- name: DS_PROMETHEUS
|
||||
value: Prometheus
|
||||
- name: DS_LOKI
|
||||
value: Loki
|
||||
local-dashboard:
|
||||
url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
|
||||
```
|
||||
|
||||
## BASE64 dashboards
|
||||
|
||||
Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit)
|
||||
A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk.
|
||||
If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk.
|
||||
|
||||
### Gerrit use case
|
||||
|
||||
Gerrit API for download files has the following schema: <https://yourgerritserver/a/{project-name}/branches/{branch-id}/files/{file-id}/content> where {project-name} and
|
||||
{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard
|
||||
the url value is <https://yourgerritserver/a/user%2Frepo/branches/master/files/dir1%2Fdir2%2Fdashboard/content>
|
||||
|
||||
## Sidecar for dashboards
|
||||
|
||||
If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana
|
||||
pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with
|
||||
a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written
|
||||
to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported
|
||||
dashboards are deleted/updated.
|
||||
|
||||
A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside
|
||||
one configmap is currently not properly mirrored in grafana.
|
||||
|
||||
Example dashboard config:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: sample-grafana-dashboard
|
||||
labels:
|
||||
grafana_dashboard: "1"
|
||||
data:
|
||||
k8s-dashboard.json: |-
|
||||
[...]
|
||||
```
|
||||
|
||||
## Sidecar for datasources
|
||||
|
||||
If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana
|
||||
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
|
||||
filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in
|
||||
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
|
||||
the data sources in grafana can be imported.
|
||||
|
||||
Should you aim for reloading datasources in Grafana each time the config is changed, set `sidecar.datasources.skipReload: false` and adjust `sidecar.datasources.reloadURL` to `http://<svc-name>.<namespace>.svc.cluster.local/api/admin/provisioning/datasources/reload`.
|
||||
|
||||
Secrets are recommended over configmaps for this usecase because datasources usually contain private
|
||||
data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
|
||||
|
||||
Example values to add a postgres datasource as a kubernetes secret:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: grafana-datasources
|
||||
labels:
|
||||
grafana_datasource: 'true' # default value for: sidecar.datasources.label
|
||||
stringData:
|
||||
pg-db.yaml: |-
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: My pg db datasource
|
||||
type: postgres
|
||||
url: my-postgresql-db:5432
|
||||
user: db-readonly-user
|
||||
secureJsonData:
|
||||
password: 'SUperSEcretPa$$word'
|
||||
jsonData:
|
||||
database: my_datase
|
||||
sslmode: 'disable' # disable/require/verify-ca/verify-full
|
||||
maxOpenConns: 0 # Grafana v5.4+
|
||||
maxIdleConns: 2 # Grafana v5.4+
|
||||
connMaxLifetime: 14400 # Grafana v5.4+
|
||||
postgresVersion: 1000 # 903=9.3, 904=9.4, 905=9.5, 906=9.6, 1000=10
|
||||
timescaledb: false
|
||||
# <bool> allow users to edit datasources from the UI.
|
||||
editable: false
|
||||
```
|
||||
|
||||
Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file):
|
||||
|
||||
```yaml
|
||||
datasources:
|
||||
datasources.yaml:
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
# <string, required> name of the datasource. Required
|
||||
- name: Graphite
|
||||
# <string, required> datasource type. Required
|
||||
type: graphite
|
||||
# <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
|
||||
access: proxy
|
||||
# <int> org id. will default to orgId 1 if not specified
|
||||
orgId: 1
|
||||
# <string> url
|
||||
url: http://localhost:8080
|
||||
# <string> database password, if used
|
||||
password:
|
||||
# <string> database user, if used
|
||||
user:
|
||||
# <string> database name, if used
|
||||
database:
|
||||
# <bool> enable/disable basic auth
|
||||
basicAuth:
|
||||
# <string> basic auth username
|
||||
basicAuthUser:
|
||||
# <string> basic auth password
|
||||
basicAuthPassword:
|
||||
# <bool> enable/disable with credentials headers
|
||||
withCredentials:
|
||||
# <bool> mark as default datasource. Max one per org
|
||||
isDefault:
|
||||
# <map> fields that will be converted to json and stored in json_data
|
||||
jsonData:
|
||||
graphiteVersion: "1.1"
|
||||
tlsAuth: true
|
||||
tlsAuthWithCACert: true
|
||||
# <string> json object of data that will be encrypted.
|
||||
secureJsonData:
|
||||
tlsCACert: "..."
|
||||
tlsClientCert: "..."
|
||||
tlsClientKey: "..."
|
||||
version: 1
|
||||
# <bool> allow users to edit datasources from the UI.
|
||||
editable: false
|
||||
```
|
||||
|
||||
## Sidecar for notifiers
|
||||
|
||||
If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana
|
||||
pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and
|
||||
filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in
|
||||
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
|
||||
the notification channels in grafana can be imported. The secrets must be created before
|
||||
`helm install` so that the notifiers init container can list the secrets.
|
||||
|
||||
Secrets are recommended over configmaps for this usecase because alert notification channels usually contain
|
||||
private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
|
||||
|
||||
Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels):
|
||||
|
||||
```yaml
|
||||
notifiers:
|
||||
- name: notification-channel-1
|
||||
type: slack
|
||||
uid: notifier1
|
||||
# either
|
||||
org_id: 2
|
||||
# or
|
||||
org_name: Main Org.
|
||||
is_default: true
|
||||
send_reminder: true
|
||||
frequency: 1h
|
||||
disable_resolve_message: false
|
||||
# See `Supported Settings` section for settings supporter for each
|
||||
# alert notification type.
|
||||
settings:
|
||||
recipient: 'XXX'
|
||||
token: 'xoxb'
|
||||
uploadImage: true
|
||||
url: https://slack.com
|
||||
|
||||
delete_notifiers:
|
||||
- name: notification-channel-1
|
||||
uid: notifier1
|
||||
org_id: 2
|
||||
- name: notification-channel-2
|
||||
# default org_id: 1
|
||||
```
|
||||
|
||||
## Sidecar for alerting resources
|
||||
|
||||
If the parameter `sidecar.alerts.enabled` is set, a sidecar container is deployed in the grafana
|
||||
pod. This container watches all configmaps (or secrets) in the cluster (namespace defined by `sidecar.alerts.searchNamespace`) and filters out the ones with
|
||||
a label as defined in `sidecar.alerts.label` (default is `grafana_alert`). The files defined in those configmaps are written
|
||||
to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported alerting resources are updated, however, deletions are a little more complicated (see below).
|
||||
|
||||
This sidecar can be used to provision alert rules, contact points, notification policies, notification templates and mute timings as shown in [Grafana Documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/).
|
||||
|
||||
To fetch the alert config which will be provisioned, use the alert provisioning API ([Grafana Documentation](https://grafana.com/docs/grafana/next/developers/http_api/alerting_provisioning/)).
|
||||
You can use either JSON or YAML format.
|
||||
|
||||
Example config for an alert rule:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: sample-grafana-alert
|
||||
labels:
|
||||
grafana_alert: "1"
|
||||
data:
|
||||
k8s-alert.yml: |-
|
||||
apiVersion: 1
|
||||
groups:
|
||||
- orgId: 1
|
||||
name: k8s-alert
|
||||
[...]
|
||||
```
|
||||
|
||||
To delete provisioned alert rules is a two step process, you need to delete the configmap which defined the alert rule
|
||||
and then create a configuration which deletes the alert rule.
|
||||
|
||||
Example deletion configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: delete-sample-grafana-alert
|
||||
namespace: monitoring
|
||||
labels:
|
||||
grafana_alert: "1"
|
||||
data:
|
||||
delete-k8s-alert.yml: |-
|
||||
apiVersion: 1
|
||||
deleteRules:
|
||||
- orgId: 1
|
||||
uid: 16624780-6564-45dc-825c-8bded4ad92d3
|
||||
```
|
||||
|
||||
## Statically provision alerting resources
|
||||
|
||||
If you don't need to change alerting resources (alert rules, contact points, notification policies and notification templates) regularly you could use the `alerting` config option instead of the sidecar option above.
|
||||
This will grab the alerting config and apply it statically at build time for the helm file.
|
||||
|
||||
There are two methods to statically provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method:
|
||||
|
||||
```yaml
|
||||
alerting:
|
||||
team1-alert-rules.yaml:
|
||||
file: alerting/team1/rules.yaml
|
||||
team2-alert-rules.yaml:
|
||||
file: alerting/team2/rules.yaml
|
||||
team3-alert-rules.yaml:
|
||||
file: alerting/team3/rules.yaml
|
||||
notification-policies.yaml:
|
||||
file: alerting/shared/notification-policies.yaml
|
||||
notification-templates.yaml:
|
||||
file: alerting/shared/notification-templates.yaml
|
||||
contactpoints.yaml:
|
||||
apiVersion: 1
|
||||
contactPoints:
|
||||
- orgId: 1
|
||||
name: Slack channel
|
||||
receivers:
|
||||
- uid: default-receiver
|
||||
type: slack
|
||||
settings:
|
||||
# Webhook URL to be filled in
|
||||
url: ""
|
||||
# We need to escape double curly braces for the tpl function.
|
||||
text: '{{ `{{ template "default.message" . }}` }}'
|
||||
title: '{{ `{{ template "default.title" . }}` }}'
|
||||
```
|
||||
|
||||
The two possibilities for static alerting resource provisioning are:
|
||||
|
||||
* Inlining the file contents as shown for contact points in the above example.
|
||||
* Importing a file using a relative path starting from the chart root directory as shown for the alert rules in the above example.
|
||||
|
||||
### Important notes on file provisioning
|
||||
|
||||
* The format of the files is defined in the [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/) on file provisioning.
|
||||
* The chart supports importing YAML and JSON files.
|
||||
* The filename must be unique, otherwise one volume mount will overwrite the other.
|
||||
* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped.
|
||||
* The number of total files under `alerting:` is not limited. Each file will end up as a volume mount in the corresponding provisioning folder of the deployed Grafana instance.
|
||||
* The file size for each import is limited by what the function `.Files.Get` can handle, which suffices for most cases.
|
||||
|
||||
## How to serve Grafana with a path prefix (/grafana)
|
||||
|
||||
In order to serve Grafana with a prefix (e.g., <http://example.com/grafana>), add the following to your values.yaml.
|
||||
|
||||
```yaml
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$1
|
||||
nginx.ingress.kubernetes.io/use-regex: "true"
|
||||
|
||||
path: /grafana/?(.*)
|
||||
hosts:
|
||||
- k8s.example.dev
|
||||
|
||||
grafana.ini:
|
||||
server:
|
||||
root_url: http://localhost:3000/grafana # this host can be localhost
|
||||
```
|
||||
|
||||
## How to securely reference secrets in grafana.ini
|
||||
|
||||
This example uses Grafana [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets.
|
||||
|
||||
In grafana.ini:
|
||||
|
||||
```yaml
|
||||
grafana.ini:
|
||||
[auth.generic_oauth]
|
||||
enabled = true
|
||||
client_id = $__file{/etc/secrets/auth_generic_oauth/client_id}
|
||||
client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret}
|
||||
```
|
||||
|
||||
Existing secret, or created along with helm:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: auth-generic-oauth-secret
|
||||
type: Opaque
|
||||
stringData:
|
||||
client_id: <value>
|
||||
client_secret: <value>
|
||||
```
|
||||
|
||||
Include in the `extraSecretMounts` configuration flag:
|
||||
|
||||
```yaml
|
||||
extraSecretMounts:
|
||||
- name: auth-generic-oauth-secret-mount
|
||||
secretName: auth-generic-oauth-secret
|
||||
defaultMode: 0440
|
||||
mountPath: /etc/secrets/auth_generic_oauth
|
||||
readOnly: true
|
||||
```
|
||||
|
||||
### extraSecretMounts using a Container Storage Interface (CSI) provider
|
||||
|
||||
This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure)
|
||||
|
||||
```yaml
|
||||
extraSecretMounts:
|
||||
- name: secrets-store-inline
|
||||
mountPath: /run/secrets
|
||||
readOnly: true
|
||||
csi:
|
||||
driver: secrets-store.csi.k8s.io
|
||||
readOnly: true
|
||||
volumeAttributes:
|
||||
secretProviderClass: "my-provider"
|
||||
nodePublishSecretRef:
|
||||
name: akv-creds
|
||||
```
|
||||
|
||||
## Image Renderer Plug-In
|
||||
|
||||
This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/README.md#run-in-docker)
|
||||
|
||||
```yaml
|
||||
imageRenderer:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
### Image Renderer NetworkPolicy
|
||||
|
||||
By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance
|
||||
|
||||
### High Availability for unified alerting
|
||||
|
||||
If you want to run Grafana in a high availability cluster you need to enable
|
||||
the headless service by setting `headlessService: true` in your `values.yaml`
|
||||
file.
|
||||
|
||||
As next step you have to setup the `grafana.ini` in your `values.yaml` in a way
|
||||
that it will make use of the headless service to obtain all the IPs of the
|
||||
cluster. You should replace ``{{ Name }}`` with the name of your helm deployment.
|
||||
|
||||
```yaml
|
||||
grafana.ini:
|
||||
...
|
||||
unified_alerting:
|
||||
enabled: true
|
||||
ha_peers: {{ Name }}-headless:9094
|
||||
ha_listen_address: ${POD_IP}:9094
|
||||
ha_advertise_address: ${POD_IP}:9094
|
||||
|
||||
alerting:
|
||||
enabled: false
|
||||
```
|
@ -1 +0,0 @@
|
||||
# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml.
|
@ -1,16 +0,0 @@
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: grafana-test
|
||||
app.kubernetes.io/name: grafana
|
||||
topologyKey: failure-domain.beta.kubernetes.io/zone
|
||||
weight: 100
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: grafana-test
|
||||
app.kubernetes.io/name: grafana
|
||||
topologyKey: kubernetes.io/hostname
|
@ -1,53 +0,0 @@
|
||||
dashboards:
|
||||
my-provider:
|
||||
my-awesome-dashboard:
|
||||
# An empty but valid dashboard
|
||||
json: |
|
||||
{
|
||||
"__inputs": [],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "6.3.5"
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [],
|
||||
"schemaVersion": 19,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": ["5s"]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "Dummy Dashboard",
|
||||
"uid": "IdcYQooWk",
|
||||
"version": 1
|
||||
}
|
||||
datasource: Prometheus
|
@ -1,19 +0,0 @@
|
||||
dashboards:
|
||||
my-provider:
|
||||
my-awesome-dashboard:
|
||||
gnetId: 10000
|
||||
revision: 1
|
||||
datasource: Prometheus
|
||||
dashboardProviders:
|
||||
dashboardproviders.yaml:
|
||||
apiVersion: 1
|
||||
providers:
|
||||
- name: 'my-provider'
|
||||
orgId: 1
|
||||
folder: ''
|
||||
type: file
|
||||
updateIntervalSeconds: 10
|
||||
disableDeletion: true
|
||||
editable: true
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards/my-provider
|
@ -1,7 +0,0 @@
|
||||
extraConfigmapMounts:
|
||||
- name: '{{ include "grafana.fullname" . }}'
|
||||
configMap: '{{ include "grafana.fullname" . }}'
|
||||
mountPath: /var/lib/grafana/dashboards/test-dashboard.json
|
||||
# This is not a realistic test, but for this we only care about extraConfigmapMounts not being empty and pointing to an existing ConfigMap
|
||||
subPath: grafana.ini
|
||||
readOnly: true
|
@ -1,107 +0,0 @@
|
||||
podLabels:
|
||||
customLableA: Aaaaa
|
||||
imageRenderer:
|
||||
enabled: true
|
||||
env:
|
||||
RENDERING_ARGS: --disable-gpu,--window-size=1280x758
|
||||
RENDERING_MODE: clustered
|
||||
podLabels:
|
||||
customLableB: Bbbbb
|
||||
networkPolicy:
|
||||
limitIngress: true
|
||||
limitEgress: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 1000Mi
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 50Mi
|
||||
extraVolumes:
|
||||
- name: empty-renderer-volume
|
||||
emtpyDir: {}
|
||||
extraVolumeMounts:
|
||||
- mountPath: /tmp/renderer
|
||||
name: empty-renderer-volume
|
||||
extraConfigmapMounts:
|
||||
- name: renderer-config
|
||||
mountPath: /usr/src/app/config.json
|
||||
subPath: renderer-config.json
|
||||
configMap: image-renderer-config
|
||||
extraSecretMounts:
|
||||
- name: renderer-certificate
|
||||
mountPath: /usr/src/app/certs/
|
||||
secretName: image-renderer-certificate
|
||||
readOnly: true
|
||||
|
||||
extraObjects:
|
||||
- apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: image-renderer-config
|
||||
data:
|
||||
renderer-config.json: |
|
||||
{
|
||||
"service": {
|
||||
"host": null,
|
||||
"port": 8081,
|
||||
"protocol": "http",
|
||||
"certFile": "",
|
||||
"certKey": "",
|
||||
|
||||
"metrics": {
|
||||
"enabled": true,
|
||||
"collectDefaultMetrics": true,
|
||||
"requestDurationBuckets": [1, 5, 7, 9, 11, 13, 15, 20, 30]
|
||||
},
|
||||
|
||||
"logging": {
|
||||
"level": "info",
|
||||
"console": {
|
||||
"json": true,
|
||||
"colorize": false
|
||||
}
|
||||
},
|
||||
|
||||
"security": {
|
||||
"authToken": "-"
|
||||
}
|
||||
},
|
||||
"rendering": {
|
||||
"chromeBin": null,
|
||||
"args": ["--no-sandbox", "--disable-gpu"],
|
||||
"ignoresHttpsErrors": false,
|
||||
|
||||
"timezone": null,
|
||||
"acceptLanguage": null,
|
||||
"width": 1000,
|
||||
"height": 500,
|
||||
"deviceScaleFactor": 1,
|
||||
"maxWidth": 3080,
|
||||
"maxHeight": 3000,
|
||||
"maxDeviceScaleFactor": 4,
|
||||
"pageZoomLevel": 1,
|
||||
"headed": false,
|
||||
|
||||
"mode": "default",
|
||||
"emulateNetworkConditions": false,
|
||||
"clustering": {
|
||||
"monitor": false,
|
||||
"mode": "browser",
|
||||
"maxConcurrency": 5,
|
||||
"timeout": 30
|
||||
},
|
||||
|
||||
"verboseLogging": false,
|
||||
"dumpio": false,
|
||||
"timingMetrics": false
|
||||
}
|
||||
}
|
||||
- apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: image-renderer-certificate
|
||||
type: Opaque
|
||||
data:
|
||||
# Decodes to 'PLACEHOLDER CERTIFICATE'
|
||||
not-a-real-certificate: UExBQ0VIT0xERVIgQ0VSVElGSUNBVEU=
|
@ -1,6 +0,0 @@
|
||||
global:
|
||||
environment: prod
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- monitoring-{{ .Values.global.environment }}.example.com
|
@ -1,3 +0,0 @@
|
||||
persistence:
|
||||
type: pvc
|
||||
enabled: true
|
@ -1,38 +0,0 @@
|
||||
extraObjects:
|
||||
- apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: '{{ include "grafana.fullname" . }}-test'
|
||||
data:
|
||||
var1: "value1"
|
||||
- apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: '{{ include "grafana.fullname" . }}-test'
|
||||
type: Opaque
|
||||
data:
|
||||
var2: "dmFsdWUy"
|
||||
|
||||
sidecar:
|
||||
dashboards:
|
||||
enabled: true
|
||||
envValueFrom:
|
||||
VAR1:
|
||||
configMapKeyRef:
|
||||
name: '{{ include "grafana.fullname" . }}-test'
|
||||
key: var1
|
||||
VAR2:
|
||||
secretKeyRef:
|
||||
name: '{{ include "grafana.fullname" . }}-test'
|
||||
key: var2
|
||||
datasources:
|
||||
enabled: true
|
||||
envValueFrom:
|
||||
VAR1:
|
||||
configMapKeyRef:
|
||||
name: '{{ include "grafana.fullname" . }}-test'
|
||||
key: var1
|
||||
VAR2:
|
||||
secretKeyRef:
|
||||
name: '{{ include "grafana.fullname" . }}-test'
|
||||
key: var2
|
@ -1 +0,0 @@
|
||||
{}
|
@ -1,55 +0,0 @@
|
||||
1. Get your '{{ .Values.adminUser }}' user password by running:
|
||||
|
||||
kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} -o jsonpath="{.data.{{ .Values.admin.passwordKey | default "admin-password" }}}" | base64 --decode ; echo
|
||||
|
||||
|
||||
2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster:
|
||||
|
||||
{{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}.svc.cluster.local
|
||||
{{ if .Values.ingress.enabled }}
|
||||
If you bind grafana to 80, please update values in values.yaml and reinstall:
|
||||
```
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
runAsGroup: 0
|
||||
fsGroup: 0
|
||||
|
||||
command:
|
||||
- "setcap"
|
||||
- "'cap_net_bind_service=+ep'"
|
||||
- "/usr/sbin/grafana-server &&"
|
||||
- "sh"
|
||||
- "/run.sh"
|
||||
```
|
||||
Details refer to https://grafana.com/docs/installation/configuration/#http-port.
|
||||
Or grafana would always crash.
|
||||
|
||||
From outside the cluster, the server URL(s) are:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
http://{{ . }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
Get the Grafana URL to visit by running these commands in the same shell:
|
||||
{{- if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "grafana.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get svc --namespace {{ include "grafana.namespace" . }} -w {{ include "grafana.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
http://$SERVICE_IP:{{ .Values.service.port -}}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ include "grafana.namespace" . }} -l "app.kubernetes.io/name={{ include "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
kubectl --namespace {{ include "grafana.namespace" . }} port-forward $POD_NAME 3000
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
3. Login with the password from step 1 and the username: {{ .Values.adminUser }}
|
||||
|
||||
{{- if and (not .Values.persistence.enabled) (not .Values.persistence.disableWarning) }}
|
||||
#################################################################################
|
||||
###### WARNING: Persistence is disabled!!! You will lose your data when #####
|
||||
###### the Grafana pod is terminated. #####
|
||||
#################################################################################
|
||||
{{- end }}
|
@ -1,172 +0,0 @@
|
||||
{{/*
|
||||
Generate config map data
|
||||
*/}}
|
||||
{{- define "grafana.configData" -}}
|
||||
{{ include "grafana.assertNoLeakedSecrets" . }}
|
||||
{{- $files := .Files }}
|
||||
{{- $root := . -}}
|
||||
{{- with .Values.plugins }}
|
||||
plugins: {{ join "," . }}
|
||||
{{- end }}
|
||||
grafana.ini: |
|
||||
{{- range $elem, $elemVal := index .Values "grafana.ini" }}
|
||||
{{- if not (kindIs "map" $elemVal) }}
|
||||
{{- if kindIs "invalid" $elemVal }}
|
||||
{{ $elem }} =
|
||||
{{- else if kindIs "string" $elemVal }}
|
||||
{{ $elem }} = {{ tpl $elemVal $ }}
|
||||
{{- else }}
|
||||
{{ $elem }} = {{ $elemVal }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := index .Values "grafana.ini" }}
|
||||
{{- if kindIs "map" $value }}
|
||||
[{{ $key }}]
|
||||
{{- range $elem, $elemVal := $value }}
|
||||
{{- if kindIs "invalid" $elemVal }}
|
||||
{{ $elem }} =
|
||||
{{- else if kindIs "string" $elemVal }}
|
||||
{{ $elem }} = {{ tpl $elemVal $ }}
|
||||
{{- else }}
|
||||
{{ $elem }} = {{ $elemVal }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- range $key, $value := .Values.datasources }}
|
||||
{{- if not (hasKey $value "secret") }}
|
||||
{{ $key }}: |
|
||||
{{- tpl (toYaml $value | nindent 2) $root }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- range $key, $value := .Values.notifiers }}
|
||||
{{- if not (hasKey $value "secret") }}
|
||||
{{ $key }}: |
|
||||
{{- toYaml $value | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- range $key, $value := .Values.alerting }}
|
||||
{{- if (hasKey $value "file") }}
|
||||
{{ $key }}:
|
||||
{{- toYaml ( $files.Get $value.file ) | nindent 2 }}
|
||||
{{- else if (or (hasKey $value "secret") (hasKey $value "secretFile"))}}
|
||||
{{/* will be stored inside secret generated by "configSecret.yaml"*/}}
|
||||
{{- else }}
|
||||
{{ $key }}: |
|
||||
{{- tpl (toYaml $value | nindent 2) $root }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- range $key, $value := .Values.dashboardProviders }}
|
||||
{{ $key }}: |
|
||||
{{- toYaml $value | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.dashboards }}
|
||||
download_dashboards.sh: |
|
||||
#!/usr/bin/env sh
|
||||
set -euf
|
||||
{{- if .Values.dashboardProviders }}
|
||||
{{- range $key, $value := .Values.dashboardProviders }}
|
||||
{{- range $value.providers }}
|
||||
mkdir -p {{ .options.path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{ $dashboardProviders := .Values.dashboardProviders }}
|
||||
{{- range $provider, $dashboards := .Values.dashboards }}
|
||||
{{- range $key, $value := $dashboards }}
|
||||
{{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
|
||||
curl -skf \
|
||||
--connect-timeout 60 \
|
||||
--max-time 60 \
|
||||
{{- if not $value.b64content }}
|
||||
{{- if not $value.acceptHeader }}
|
||||
-H "Accept: application/json" \
|
||||
{{- else }}
|
||||
-H "Accept: {{ $value.acceptHeader }}" \
|
||||
{{- end }}
|
||||
{{- if $value.token }}
|
||||
-H "Authorization: token {{ $value.token }}" \
|
||||
{{- end }}
|
||||
{{- if $value.bearerToken }}
|
||||
-H "Authorization: Bearer {{ $value.bearerToken }}" \
|
||||
{{- end }}
|
||||
{{- if $value.basic }}
|
||||
-H "Authorization: Basic {{ $value.basic }}" \
|
||||
{{- end }}
|
||||
{{- if $value.gitlabToken }}
|
||||
-H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \
|
||||
{{- end }}
|
||||
-H "Content-Type: application/json;charset=UTF-8" \
|
||||
{{- end }}
|
||||
{{- $dpPath := "" -}}
|
||||
{{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }}
|
||||
{{- if eq $kd.name $provider }}
|
||||
{{- $dpPath = $kd.options.path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $value.url }}
|
||||
"{{ $value.url }}" \
|
||||
{{- else }}
|
||||
"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \
|
||||
{{- end }}
|
||||
{{- if $value.datasource }}
|
||||
{{- if kindIs "string" $value.datasource }}
|
||||
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \
|
||||
{{- end }}
|
||||
{{- if kindIs "slice" $value.datasource }}
|
||||
{{- range $value.datasource }}
|
||||
| sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $value.b64content }}
|
||||
| base64 -d \
|
||||
{{- end }}
|
||||
> "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json"
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Generate dashboard json config map data
|
||||
*/}}
|
||||
{{- define "grafana.configDashboardProviderData" -}}
|
||||
provider.yaml: |-
|
||||
apiVersion: 1
|
||||
providers:
|
||||
- name: '{{ .Values.sidecar.dashboards.provider.name }}'
|
||||
orgId: {{ .Values.sidecar.dashboards.provider.orgid }}
|
||||
{{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
|
||||
folder: '{{ .Values.sidecar.dashboards.provider.folder }}'
|
||||
folderUid: '{{ .Values.sidecar.dashboards.provider.folderUid }}'
|
||||
{{- end }}
|
||||
type: {{ .Values.sidecar.dashboards.provider.type }}
|
||||
disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }}
|
||||
allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }}
|
||||
updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }}
|
||||
options:
|
||||
foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
|
||||
path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "grafana.secretsData" -}}
|
||||
{{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }}
|
||||
admin-user: {{ .Values.adminUser | b64enc | quote }}
|
||||
{{- if .Values.adminPassword }}
|
||||
admin-password: {{ .Values.adminPassword | b64enc | quote }}
|
||||
{{- else }}
|
||||
admin-password: {{ include "grafana.password" . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if not .Values.ldap.existingSecret }}
|
||||
ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
@ -1,276 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "grafana.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "grafana.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "grafana.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account
|
||||
*/}}
|
||||
{{- define "grafana.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "grafana.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "grafana.serviceAccountNameTest" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.nameTest }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "grafana.namespace" -}}
|
||||
{{- if .Values.namespaceOverride }}
|
||||
{{- .Values.namespaceOverride }}
|
||||
{{- else }}
|
||||
{{- .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "grafana.labels" -}}
|
||||
helm.sh/chart: {{ include "grafana.chart" . }}
|
||||
{{ include "grafana.selectorLabels" . }}
|
||||
{{- if or .Chart.AppVersion .Values.image.tag }}
|
||||
app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | trunc 63 | trimSuffix "-" | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.extraLabels }}
|
||||
{{ toYaml . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "grafana.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "grafana.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "grafana.imageRenderer.labels" -}}
|
||||
helm.sh/chart: {{ include "grafana.chart" . }}
|
||||
{{ include "grafana.imageRenderer.selectorLabels" . }}
|
||||
{{- if or .Chart.AppVersion .Values.image.tag }}
|
||||
app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | trunc 63 | trimSuffix "-" | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels ImageRenderer
|
||||
*/}}
|
||||
{{- define "grafana.imageRenderer.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Looks if there's an existing secret and reuse its password. If not it generates
|
||||
new password and use it.
|
||||
*/}}
|
||||
{{- define "grafana.password" -}}
|
||||
{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) }}
|
||||
{{- if $secret }}
|
||||
{{- index $secret "data" "admin-password" }}
|
||||
{{- else }}
|
||||
{{- (randAlphaNum 40) | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for rbac.
|
||||
*/}}
|
||||
{{- define "grafana.rbac.apiVersion" -}}
|
||||
{{- if $.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }}
|
||||
{{- print "rbac.authorization.k8s.io/v1" }}
|
||||
{{- else }}
|
||||
{{- print "rbac.authorization.k8s.io/v1beta1" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for ingress.
|
||||
*/}}
|
||||
{{- define "grafana.ingress.apiVersion" -}}
|
||||
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }}
|
||||
{{- print "networking.k8s.io/v1" }}
|
||||
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
|
||||
{{- print "networking.k8s.io/v1beta1" }}
|
||||
{{- else }}
|
||||
{{- print "extensions/v1beta1" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for Horizontal Pod Autoscaler.
|
||||
*/}}
|
||||
{{- define "grafana.hpa.apiVersion" -}}
|
||||
{{- if .Capabilities.APIVersions.Has "autoscaling/v2" }}
|
||||
{{- print "autoscaling/v2" }}
|
||||
{{- else }}
|
||||
{{- print "autoscaling/v2beta2" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for podDisruptionBudget.
|
||||
*/}}
|
||||
{{- define "grafana.podDisruptionBudget.apiVersion" -}}
|
||||
{{- if $.Values.podDisruptionBudget.apiVersion }}
|
||||
{{- print $.Values.podDisruptionBudget.apiVersion }}
|
||||
{{- else if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }}
|
||||
{{- print "policy/v1" }}
|
||||
{{- else }}
|
||||
{{- print "policy/v1beta1" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Return if ingress is stable.
|
||||
*/}}
|
||||
{{- define "grafana.ingress.isStable" -}}
|
||||
{{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Return if ingress supports ingressClassName.
|
||||
*/}}
|
||||
{{- define "grafana.ingress.supportsIngressClassName" -}}
|
||||
{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Return if ingress supports pathType.
|
||||
*/}}
|
||||
{{- define "grafana.ingress.supportsPathType" -}}
|
||||
{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Formats imagePullSecrets. Input is (dict "root" . "imagePullSecrets" .{specific imagePullSecrets})
|
||||
*/}}
|
||||
{{- define "grafana.imagePullSecrets" -}}
|
||||
{{- $root := .root }}
|
||||
{{- range (concat .root.Values.global.imagePullSecrets .imagePullSecrets) }}
|
||||
{{- if eq (typeOf .) "map[string]interface {}" }}
|
||||
- {{ toYaml (dict "name" (tpl .name $root)) | trim }}
|
||||
{{- else }}
|
||||
- name: {{ tpl . $root }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Checks whether or not the configSecret secret has to be created
|
||||
*/}}
|
||||
{{- define "grafana.shouldCreateConfigSecret" -}}
|
||||
{{- $secretFound := false -}}
|
||||
{{- range $key, $value := .Values.datasources }}
|
||||
{{- if hasKey $value "secret" }}
|
||||
{{- $secretFound = true}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.notifiers }}
|
||||
{{- if hasKey $value "secret" }}
|
||||
{{- $secretFound = true}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.alerting }}
|
||||
{{- if (or (hasKey $value "secret") (hasKey $value "secretFile")) }}
|
||||
{{- $secretFound = true}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $secretFound}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Checks whether the user is attempting to store secrets in plaintext
|
||||
in the grafana.ini configmap
|
||||
*/}}
|
||||
{{/* grafana.assertNoLeakedSecrets checks for sensitive keys in values */}}
|
||||
{{- define "grafana.assertNoLeakedSecrets" -}}
|
||||
{{- $sensitiveKeysYaml := `
|
||||
sensitiveKeys:
|
||||
- path: ["database", "password"]
|
||||
- path: ["smtp", "password"]
|
||||
- path: ["security", "secret_key"]
|
||||
- path: ["security", "admin_password"]
|
||||
- path: ["auth.basic", "password"]
|
||||
- path: ["auth.ldap", "bind_password"]
|
||||
- path: ["auth.google", "client_secret"]
|
||||
- path: ["auth.github", "client_secret"]
|
||||
- path: ["auth.gitlab", "client_secret"]
|
||||
- path: ["auth.generic_oauth", "client_secret"]
|
||||
- path: ["auth.okta", "client_secret"]
|
||||
- path: ["auth.azuread", "client_secret"]
|
||||
- path: ["auth.grafana_com", "client_secret"]
|
||||
- path: ["auth.grafananet", "client_secret"]
|
||||
- path: ["azure", "user_identity_client_secret"]
|
||||
- path: ["unified_alerting", "ha_redis_password"]
|
||||
- path: ["metrics", "basic_auth_password"]
|
||||
- path: ["external_image_storage.s3", "secret_key"]
|
||||
- path: ["external_image_storage.webdav", "password"]
|
||||
- path: ["external_image_storage.azure_blob", "account_key"]
|
||||
` | fromYaml -}}
|
||||
{{- if $.Values.assertNoLeakedSecrets -}}
|
||||
{{- $grafanaIni := index .Values "grafana.ini" -}}
|
||||
{{- range $_, $secret := $sensitiveKeysYaml.sensitiveKeys -}}
|
||||
{{- $currentMap := $grafanaIni -}}
|
||||
{{- $shouldContinue := true -}}
|
||||
{{- range $index, $elem := $secret.path -}}
|
||||
{{- if and $shouldContinue (hasKey $currentMap $elem) -}}
|
||||
{{- if eq (len $secret.path) (add1 $index) -}}
|
||||
{{- if not (regexMatch "\\$(?:__(?:env|file|vault))?{[^}]+}" (index $currentMap $elem)) -}}
|
||||
{{- fail (printf "Sensitive key '%s' should not be defined explicitly in values. Use variable expansion instead. You can disable this client-side validation by changing the value of assertNoLeakedSecrets." (join "." $secret.path)) -}}
|
||||
{{- end -}}
|
||||
{{- else -}}
|
||||
{{- $currentMap = index $currentMap $elem -}}
|
||||
{{- end -}}
|
||||
{{- else -}}
|
||||
{{- $shouldContinue = false -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
File diff suppressed because it is too large
Load Diff
@ -1,25 +0,0 @@
|
||||
{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingClusterRole) }}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "grafana.fullname" . }}-clusterrole
|
||||
{{- if or .Values.sidecar.dashboards.enabled .Values.rbac.extraClusterRoleRules .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }}
|
||||
rules:
|
||||
{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }}
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources: ["configmaps", "secrets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- end}}
|
||||
{{- with .Values.rbac.extraClusterRoleRules }}
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end}}
|
||||
{{- else }}
|
||||
rules: []
|
||||
{{- end}}
|
||||
{{- end}}
|
@ -1,24 +0,0 @@
|
||||
{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }}
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-clusterrolebinding
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "grafana.serviceAccountName" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
{{- if .Values.rbac.useExistingClusterRole }}
|
||||
name: {{ .Values.rbac.useExistingClusterRole }}
|
||||
{{- else }}
|
||||
name: {{ include "grafana.fullname" . }}-clusterrole
|
||||
{{- end }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
@ -1,43 +0,0 @@
|
||||
{{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}}
|
||||
{{- if and .Values.createConfigmap $createConfigSecret }}
|
||||
{{- $files := .Files }}
|
||||
{{- $root := . -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: "{{ include "grafana.fullname" . }}-config-secret"
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
data:
|
||||
{{- range $key, $value := .Values.alerting }}
|
||||
{{- if (hasKey $value "secretFile") }}
|
||||
{{- $key | nindent 2 }}:
|
||||
{{- toYaml ( $files.Get $value.secretFile ) | b64enc | nindent 4}}
|
||||
{{/* as of https://helm.sh/docs/chart_template_guide/accessing_files/ this will only work if you fork this chart and add files to it*/}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
stringData:
|
||||
{{- range $key, $value := .Values.datasources }}
|
||||
{{- if (hasKey $value "secret") }}
|
||||
{{- $key | nindent 2 }}: |
|
||||
{{- tpl (toYaml $value.secret | nindent 4) $root }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.notifiers }}
|
||||
{{- if (hasKey $value "secret") }}
|
||||
{{- $key | nindent 2 }}: |
|
||||
{{- tpl (toYaml $value.secret | nindent 4) $root }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.alerting }}
|
||||
{{ if (hasKey $value "secret") }}
|
||||
{{- $key | nindent 2 }}: |
|
||||
{{- tpl (toYaml $value.secret | nindent 4) $root }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,15 +0,0 @@
|
||||
{{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "grafana.fullname" . }}-config-dashboards
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
data:
|
||||
{{- include "grafana.configDashboardProviderData" . | nindent 2 }}
|
||||
{{- end }}
|
@ -1,20 +0,0 @@
|
||||
{{- if .Values.createConfigmap }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- if or .Values.configMapAnnotations .Values.annotations }}
|
||||
annotations:
|
||||
{{- with .Values.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.configMapAnnotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
data:
|
||||
{{- include "grafana.configData" . | nindent 2 }}
|
||||
{{- end }}
|
@ -1,35 +0,0 @@
|
||||
{{- if .Values.dashboards }}
|
||||
{{ $files := .Files }}
|
||||
{{- range $provider, $dashboards := .Values.dashboards }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" $ }}-dashboards-{{ $provider }}
|
||||
namespace: {{ include "grafana.namespace" $ }}
|
||||
labels:
|
||||
{{- include "grafana.labels" $ | nindent 4 }}
|
||||
dashboard-provider: {{ $provider }}
|
||||
{{- if $dashboards }}
|
||||
data:
|
||||
{{- $dashboardFound := false }}
|
||||
{{- range $key, $value := $dashboards }}
|
||||
{{- if (or (hasKey $value "json") (hasKey $value "file")) }}
|
||||
{{- $dashboardFound = true }}
|
||||
{{- print $key | nindent 2 }}.json:
|
||||
{{- if hasKey $value "json" }}
|
||||
|-
|
||||
{{- $value.json | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if hasKey $value "file" }}
|
||||
{{- toYaml ( $files.Get $value.file ) | nindent 4}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if not $dashboardFound }}
|
||||
{}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
@ -1,53 +0,0 @@
|
||||
{{- if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (not .Values.autoscaling.enabled) }}
|
||||
replicas: {{ .Values.replicas }}
|
||||
{{- end }}
|
||||
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "grafana.selectorLabels" . | nindent 6 }}
|
||||
{{- with .Values.deploymentStrategy }}
|
||||
strategy:
|
||||
{{- toYaml . | trim | nindent 4 }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "grafana.selectorLabels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include "grafana.configData" . | sha256sum }}
|
||||
{{- if .Values.dashboards }}
|
||||
checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }}
|
||||
{{- end }}
|
||||
checksum/sc-dashboard-provider-config: {{ include "grafana.configDashboardProviderData" . | sha256sum }}
|
||||
{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
|
||||
checksum/secret: {{ include "grafana.secretsData" . | sha256sum }}
|
||||
{{- end }}
|
||||
{{- if .Values.envRenderSecret }}
|
||||
checksum/secret-env: {{ tpl (toYaml .Values.envRenderSecret) . | sha256sum }}
|
||||
{{- end }}
|
||||
kubectl.kubernetes.io/default-container: {{ .Chart.Name }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- include "grafana.pod" . | nindent 6 }}
|
||||
{{- end }}
|
@ -1,4 +0,0 @@
|
||||
{{ range .Values.extraObjects }}
|
||||
---
|
||||
{{ tpl (toYaml .) $ }}
|
||||
{{ end }}
|
@ -1,22 +0,0 @@
|
||||
{{- $sts := list "sts" "StatefulSet" "statefulset" -}}
|
||||
{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-headless
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
{{- include "grafana.selectorLabels" . | nindent 4 }}
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: {{ .Values.gossipPortName }}-tcp
|
||||
port: 9094
|
||||
{{- end }}
|
@ -1,52 +0,0 @@
|
||||
{{- $sts := list "sts" "StatefulSet" "statefulset" -}}
|
||||
{{- if .Values.autoscaling.enabled }}
|
||||
apiVersion: {{ include "grafana.hpa.apiVersion" . }}
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "grafana.name" . }}
|
||||
helm.sh/chart: {{ include "grafana.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
{{- if has .Values.persistence.type $sts }}
|
||||
kind: StatefulSet
|
||||
{{- else }}
|
||||
kind: Deployment
|
||||
{{- end }}
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetMemory }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
{{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetMemory }}
|
||||
{{- else }}
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetMemory }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetCPU }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
{{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetCPU }}
|
||||
{{- else }}
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetCPU }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.behavior }}
|
||||
behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,199 +0,0 @@
|
||||
{{ if .Values.imageRenderer.enabled }}
|
||||
{{- $root := . -}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-image-renderer
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.imageRenderer.labels" . | nindent 4 }}
|
||||
{{- with .Values.imageRenderer.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }}
|
||||
replicas: {{ .Values.imageRenderer.replicas }}
|
||||
{{- end }}
|
||||
revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
|
||||
|
||||
{{- with .Values.imageRenderer.deploymentStrategy }}
|
||||
strategy:
|
||||
{{- toYaml . | trim | nindent 4 }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }}
|
||||
{{- with .Values.imageRenderer.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- with .Values.imageRenderer.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imageRenderer.schedulerName }}
|
||||
schedulerName: "{{ . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.serviceAccountName }}
|
||||
serviceAccountName: "{{ . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.hostAliases }}
|
||||
hostAliases:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.priorityClassName }}
|
||||
priorityClassName: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range . }}
|
||||
- name: {{ tpl . $root }}
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}-image-renderer
|
||||
{{- $registry := .Values.global.imageRegistry | default .Values.imageRenderer.image.registry -}}
|
||||
{{- if .Values.imageRenderer.image.sha }}
|
||||
image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}"
|
||||
{{- else }}
|
||||
image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}"
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }}
|
||||
{{- if .Values.imageRenderer.command }}
|
||||
command:
|
||||
{{- range .Values.imageRenderer.command }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end}}
|
||||
ports:
|
||||
- name: {{ .Values.imageRenderer.service.portName }}
|
||||
containerPort: {{ .Values.imageRenderer.service.targetPort }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: {{ .Values.imageRenderer.service.portName }}
|
||||
env:
|
||||
- name: HTTP_PORT
|
||||
value: {{ .Values.imageRenderer.service.targetPort | quote }}
|
||||
{{- if .Values.imageRenderer.serviceMonitor.enabled }}
|
||||
- name: ENABLE_METRICS
|
||||
value: "true"
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.imageRenderer.envValueFrom }}
|
||||
- name: {{ $key | quote }}
|
||||
valueFrom:
|
||||
{{- tpl (toYaml $value) $ | nindent 16 }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.imageRenderer.env }}
|
||||
- name: {{ $key | quote }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: image-renderer-tmpfs
|
||||
{{- range .Values.imageRenderer.extraConfigmapMounts }}
|
||||
- name: {{ tpl .name $root }}
|
||||
mountPath: {{ tpl .mountPath $root }}
|
||||
subPath: {{ tpl (.subPath | default "") $root }}
|
||||
readOnly: {{ .readOnly }}
|
||||
{{- end }}
|
||||
{{- range .Values.imageRenderer.extraSecretMounts }}
|
||||
- name: {{ .name }}
|
||||
mountPath: {{ .mountPath }}
|
||||
readOnly: {{ .readOnly }}
|
||||
subPath: {{ .subPath | default "" }}
|
||||
{{- end }}
|
||||
{{- range .Values.imageRenderer.extraVolumeMounts }}
|
||||
- name: {{ .name }}
|
||||
mountPath: {{ .mountPath }}
|
||||
subPath: {{ .subPath | default "" }}
|
||||
readOnly: {{ .readOnly }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.affinity }}
|
||||
affinity:
|
||||
{{- tpl (toYaml .) $root | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: image-renderer-tmpfs
|
||||
emptyDir: {}
|
||||
{{- range .Values.imageRenderer.extraConfigmapMounts }}
|
||||
- name: {{ tpl .name $root }}
|
||||
configMap:
|
||||
name: {{ tpl .configMap $root }}
|
||||
{{- with .items }}
|
||||
items:
|
||||
{{- toYaml . | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range .Values.imageRenderer.extraSecretMounts }}
|
||||
{{- if .secretName }}
|
||||
- name: {{ .name }}
|
||||
secret:
|
||||
secretName: {{ .secretName }}
|
||||
defaultMode: {{ .defaultMode }}
|
||||
{{- with .items }}
|
||||
items:
|
||||
{{- toYaml . | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- else if .projected }}
|
||||
- name: {{ .name }}
|
||||
projected:
|
||||
{{- toYaml .projected | nindent 12 }}
|
||||
{{- else if .csi }}
|
||||
- name: {{ .name }}
|
||||
csi:
|
||||
{{- toYaml .csi | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range .Values.imageRenderer.extraVolumes }}
|
||||
- name: {{ .name }}
|
||||
{{- if .existingClaim }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .existingClaim }}
|
||||
{{- else if .hostPath }}
|
||||
hostPath:
|
||||
{{ toYaml .hostPath | nindent 12 }}
|
||||
{{- else if .csi }}
|
||||
csi:
|
||||
{{- toYaml .csi | nindent 12 }}
|
||||
{{- else if .configMap }}
|
||||
configMap:
|
||||
{{- toYaml .configMap | nindent 12 }}
|
||||
{{- else if .emptyDir }}
|
||||
emptyDir:
|
||||
{{- toYaml .emptyDir | nindent 12 }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,47 +0,0 @@
|
||||
{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }}
|
||||
apiVersion: {{ include "grafana.hpa.apiVersion" . }}
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-image-renderer
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer
|
||||
helm.sh/chart: {{ include "grafana.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "grafana.fullname" . }}-image-renderer
|
||||
minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.imageRenderer.autoscaling.targetMemory }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
{{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
|
||||
targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }}
|
||||
{{- else }}
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.imageRenderer.autoscaling.targetCPU }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
{{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
|
||||
targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }}
|
||||
{{- else }}
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.imageRenderer.autoscaling.behavior }}
|
||||
behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,79 +0,0 @@
|
||||
{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitIngress }}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-image-renderer-ingress
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
annotations:
|
||||
comment: Limit image-renderer ingress traffic from grafana
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
|
||||
{{- with .Values.imageRenderer.podLabels }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
|
||||
policyTypes:
|
||||
- Ingress
|
||||
ingress:
|
||||
- ports:
|
||||
- port: {{ .Values.imageRenderer.service.targetPort }}
|
||||
protocol: TCP
|
||||
from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
kubernetes.io/metadata.name: {{ include "grafana.namespace" . }}
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "grafana.selectorLabels" . | nindent 14 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}}
|
||||
{{ toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-image-renderer-egress
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
annotations:
|
||||
comment: Limit image-renderer egress traffic to grafana
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
|
||||
{{- with .Values.imageRenderer.podLabels }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
# allow dns resolution
|
||||
- ports:
|
||||
- port: 53
|
||||
protocol: UDP
|
||||
- port: 53
|
||||
protocol: TCP
|
||||
# talk only to grafana
|
||||
- ports:
|
||||
- port: {{ .Values.service.targetPort }}
|
||||
protocol: TCP
|
||||
to:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
kubernetes.io/metadata.name: {{ include "grafana.namespace" . }}
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "grafana.selectorLabels" . | nindent 14 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 14 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,31 +0,0 @@
|
||||
{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-image-renderer
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.imageRenderer.labels" . | nindent 4 }}
|
||||
{{- with .Values.imageRenderer.service.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
{{- with .Values.imageRenderer.service.clusterIP }}
|
||||
clusterIP: {{ . }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.imageRenderer.service.portName }}
|
||||
port: {{ .Values.imageRenderer.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.imageRenderer.service.targetPort }}
|
||||
{{- with .Values.imageRenderer.appProtocol }}
|
||||
appProtocol: {{ . }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }}
|
||||
{{- end }}
|
@ -1,48 +0,0 @@
|
||||
{{- if .Values.imageRenderer.serviceMonitor.enabled }}
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-image-renderer
|
||||
{{- if .Values.imageRenderer.serviceMonitor.namespace }}
|
||||
namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }}
|
||||
{{- else }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "grafana.imageRenderer.labels" . | nindent 4 }}
|
||||
{{- with .Values.imageRenderer.serviceMonitor.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: {{ .Values.imageRenderer.service.portName }}
|
||||
{{- with .Values.imageRenderer.serviceMonitor.interval }}
|
||||
interval: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ . }}
|
||||
{{- end }}
|
||||
honorLabels: true
|
||||
path: {{ .Values.imageRenderer.serviceMonitor.path }}
|
||||
scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }}
|
||||
{{- with .Values.imageRenderer.serviceMonitor.tlsConfig }}
|
||||
tlsConfig:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imageRenderer.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
jobLabel: "{{ .Release.Name }}-image-renderer"
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ include "grafana.namespace" . }}
|
||||
{{- with .Values.imageRenderer.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,78 +0,0 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}}
|
||||
{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}}
|
||||
{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}}
|
||||
{{- $fullName := include "grafana.fullname" . -}}
|
||||
{{- $servicePort := .Values.service.port -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
{{- $ingressPathType := .Values.ingress.pathType -}}
|
||||
{{- $extraPaths := .Values.ingress.extraPaths -}}
|
||||
apiVersion: {{ include "grafana.ingress.apiVersion" . }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := . }}
|
||||
{{ $key }}: {{ tpl $value $ | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }}
|
||||
ingressClassName: {{ .Values.ingress.ingressClassName }}
|
||||
{{- end -}}
|
||||
{{- with .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- tpl (toYaml .) $ | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- if .Values.ingress.hosts }}
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ tpl . $ | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- with $extraPaths }}
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
- path: {{ $ingressPath }}
|
||||
{{- if $ingressSupportsPathType }}
|
||||
pathType: {{ $ingressPathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if $ingressApiIsStable }}
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $servicePort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $servicePort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
- http:
|
||||
paths:
|
||||
- backend:
|
||||
{{- if $ingressApiIsStable }}
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $servicePort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $servicePort }}
|
||||
{{- end }}
|
||||
{{- with $ingressPath }}
|
||||
path: {{ . }}
|
||||
{{- end }}
|
||||
{{- if $ingressSupportsPathType }}
|
||||
pathType: {{ $ingressPathType }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
@ -1,61 +0,0 @@
|
||||
{{- if .Values.networkPolicy.enabled }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
policyTypes:
|
||||
{{- if .Values.networkPolicy.ingress }}
|
||||
- Ingress
|
||||
{{- end }}
|
||||
{{- if .Values.networkPolicy.egress.enabled }}
|
||||
- Egress
|
||||
{{- end }}
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "grafana.selectorLabels" . | nindent 6 }}
|
||||
|
||||
{{- if .Values.networkPolicy.egress.enabled }}
|
||||
egress:
|
||||
{{- if not .Values.networkPolicy.egress.blockDNSResolution }}
|
||||
- ports:
|
||||
- port: 53
|
||||
protocol: UDP
|
||||
{{- end }}
|
||||
- ports:
|
||||
{{ .Values.networkPolicy.egress.ports | toJson }}
|
||||
{{- with .Values.networkPolicy.egress.to }}
|
||||
to:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.networkPolicy.ingress }}
|
||||
ingress:
|
||||
- ports:
|
||||
- port: {{ .Values.service.targetPort }}
|
||||
{{- if not .Values.networkPolicy.allowExternal }}
|
||||
from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{ include "grafana.fullname" . }}-client: "true"
|
||||
{{- with .Values.networkPolicy.explicitNamespacesSelector }}
|
||||
- namespaceSelector:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{- include "grafana.labels" . | nindent 14 }}
|
||||
role: read
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,22 +0,0 @@
|
||||
{{- if .Values.podDisruptionBudget }}
|
||||
apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }}
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.podDisruptionBudget.minAvailable }}
|
||||
minAvailable: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.podDisruptionBudget.maxUnavailable }}
|
||||
maxUnavailable: {{ . }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "grafana.selectorLabels" . | nindent 6 }}
|
||||
{{- end }}
|
@ -1,49 +0,0 @@
|
||||
{{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
|
||||
{{- if .Values.rbac.pspUseAppArmor }}
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
{{- end }}
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
requiredDropCapabilities:
|
||||
# Default set from Docker, with DAC_OVERRIDE and CHOWN
|
||||
- ALL
|
||||
volumes:
|
||||
- 'configMap'
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'csi'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
- 'persistentVolumeClaim'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: 'MustRunAs'
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
readOnlyRootFilesystem: false
|
||||
{{- end }}
|
@ -1,39 +0,0 @@
|
||||
{{- if and (not .Values.useStatefulSet) .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.persistence.extraPvcLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.persistence.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.persistence.finalizers }}
|
||||
finalizers:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.persistence.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.size | quote }}
|
||||
{{- if and (.Values.persistence.lookupVolumeName) (lookup "v1" "PersistentVolumeClaim" (include "grafana.namespace" .) (include "grafana.fullname" .)) }}
|
||||
volumeName: {{ (lookup "v1" "PersistentVolumeClaim" (include "grafana.namespace" .) (include "grafana.fullname" .)).spec.volumeName }}
|
||||
{{- end }}
|
||||
{{- with .Values.persistence.storageClassName }}
|
||||
storageClassName: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.persistence.selectorLabels }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,32 +0,0 @@
|
||||
{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }}
|
||||
rules:
|
||||
{{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
|
||||
- apiGroups: ['extensions']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: [{{ include "grafana.fullname" . }}]
|
||||
{{- end }}
|
||||
{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }}
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources: ["configmaps", "secrets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- end }}
|
||||
{{- with .Values.rbac.extraRoleRules }}
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end}}
|
||||
{{- else }}
|
||||
rules: []
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,25 +0,0 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
{{- if .Values.rbac.useExistingRole }}
|
||||
name: {{ .Values.rbac.useExistingRole }}
|
||||
{{- else }}
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
{{- end }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "grafana.serviceAccountName" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
{{- end }}
|
@ -1,14 +0,0 @@
|
||||
{{- if .Values.envRenderSecret }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-env
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- range $key, $val := .Values.envRenderSecret }}
|
||||
{{ $key }}: {{ tpl ($val | toString) $ | b64enc | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,16 +0,0 @@
|
||||
{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- include "grafana.secretsData" . | nindent 2 }}
|
||||
{{- end }}
|
@ -1,67 +0,0 @@
|
||||
{{- if .Values.service.enabled }}
|
||||
{{- $root := . }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.service.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- tpl (toYaml . | nindent 4) $root }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- with .Values.service.clusterIP }}
|
||||
clusterIP: {{ . }}
|
||||
{{- end }}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: LoadBalancer
|
||||
{{- with .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.loadBalancerClass }}
|
||||
loadBalancerClass: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.ipFamilyPolicy }}
|
||||
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.ipFamilies }}
|
||||
ipFamilies: {{ .Values.service.ipFamilies | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ . }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: {{ .Values.service.portName }}
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: {{ .Values.service.targetPort }}
|
||||
{{- with .Values.service.appProtocol }}
|
||||
appProtocol: {{ . }}
|
||||
{{- end }}
|
||||
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{ .Values.service.nodePort }}
|
||||
{{- end }}
|
||||
{{- with .Values.extraExposePorts }}
|
||||
{{- tpl (toYaml . | nindent 4) $root }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "grafana.selectorLabels" . | nindent 4 }}
|
||||
{{- end }}
|
@ -1,17 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.autoMount | default .Values.serviceAccount.automountServiceAccountToken }}
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- tpl (toYaml . | nindent 4) $ }}
|
||||
{{- end }}
|
||||
name: {{ include "grafana.serviceAccountName" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
{{- end }}
|
@ -1,52 +0,0 @@
|
||||
{{- if .Values.serviceMonitor.enabled }}
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
{{- if .Values.serviceMonitor.namespace }}
|
||||
namespace: {{ tpl .Values.serviceMonitor.namespace . }}
|
||||
{{- else }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceMonitor.labels }}
|
||||
{{- tpl (toYaml . | nindent 4) $ }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- port: {{ .Values.service.portName }}
|
||||
{{- with .Values.serviceMonitor.interval }}
|
||||
interval: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceMonitor.scrapeTimeout }}
|
||||
scrapeTimeout: {{ . }}
|
||||
{{- end }}
|
||||
honorLabels: true
|
||||
path: {{ .Values.serviceMonitor.path }}
|
||||
scheme: {{ .Values.serviceMonitor.scheme }}
|
||||
{{- with .Values.serviceMonitor.tlsConfig }}
|
||||
tlsConfig:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
jobLabel: "{{ .Release.Name }}"
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "grafana.selectorLabels" . | nindent 6 }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ include "grafana.namespace" . }}
|
||||
{{- with .Values.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,58 +0,0 @@
|
||||
{{- $sts := list "sts" "StatefulSet" "statefulset" -}}
|
||||
{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "grafana.selectorLabels" . | nindent 6 }}
|
||||
serviceName: {{ include "grafana.fullname" . }}-headless
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "grafana.selectorLabels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }}
|
||||
checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }}
|
||||
{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
|
||||
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
||||
{{- end }}
|
||||
kubectl.kubernetes.io/default-container: {{ .Chart.Name }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- include "grafana.pod" . | nindent 6 }}
|
||||
{{- if .Values.persistence.enabled}}
|
||||
volumeClaimTemplates:
|
||||
- apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: storage
|
||||
spec:
|
||||
accessModes: {{ .Values.persistence.accessModes }}
|
||||
storageClassName: {{ .Values.persistence.storageClassName }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.size }}
|
||||
{{- with .Values.persistence.selectorLabels }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -1,20 +0,0 @@
|
||||
{{- if .Values.testFramework.enabled }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-test
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
data:
|
||||
run.sh: |-
|
||||
@test "Test Health" {
|
||||
url="http://{{ include "grafana.fullname" . }}/api/health"
|
||||
|
||||
code=$(wget --server-response --spider --timeout 90 --tries 10 ${url} 2>&1 | awk '/^ HTTP/{print $2}')
|
||||
[ "$code" == "200" ]
|
||||
}
|
||||
{{- end }}
|
@ -1,32 +0,0 @@
|
||||
{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-test
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
spec:
|
||||
allowPrivilegeEscalation: true
|
||||
privileged: false
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- downwardAPI
|
||||
- emptyDir
|
||||
- projected
|
||||
- csi
|
||||
- secret
|
||||
{{- end }}
|
@ -1,17 +0,0 @@
|
||||
{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-test
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: [{{ include "grafana.fullname" . }}-test]
|
||||
{{- end }}
|
@ -1,20 +0,0 @@
|
||||
{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-test
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "grafana.fullname" . }}-test
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "grafana.serviceAccountNameTest" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
{{- end }}
|
@ -1,12 +0,0 @@
|
||||
{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
name: {{ include "grafana.serviceAccountNameTest" . }}
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
|
||||
{{- end }}
|
@ -1,53 +0,0 @@
|
||||
{{- if .Values.testFramework.enabled }}
|
||||
{{- $root := . }}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ include "grafana.fullname" . }}-test
|
||||
labels:
|
||||
{{- include "grafana.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
|
||||
namespace: {{ include "grafana.namespace" . }}
|
||||
spec:
|
||||
serviceAccountName: {{ include "grafana.serviceAccountNameTest" . }}
|
||||
{{- with .Values.testFramework.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- tpl (toYaml .) $root | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Release.Name }}-test
|
||||
image: "{{ .Values.global.imageRegistry | default .Values.testFramework.image.registry }}/{{ .Values.testFramework.image.repository }}:{{ .Values.testFramework.image.tag }}"
|
||||
imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}"
|
||||
command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"]
|
||||
volumeMounts:
|
||||
- mountPath: /tests
|
||||
name: tests
|
||||
readOnly: true
|
||||
{{- with .Values.testFramework.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: tests
|
||||
configMap:
|
||||
name: {{ include "grafana.fullname" . }}-test
|
||||
restartPolicy: Never
|
||||
{{- end }}
|
File diff suppressed because it is too large
Load Diff
@ -1,21 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
@ -1,26 +0,0 @@
|
||||
annotations:
|
||||
artifacthub.io/license: Apache-2.0
|
||||
artifacthub.io/links: |
|
||||
- name: Chart Source
|
||||
url: https://github.com/prometheus-community/helm-charts
|
||||
apiVersion: v2
|
||||
appVersion: 2.13.0
|
||||
description: Install kube-state-metrics to generate and expose cluster-level metrics
|
||||
home: https://github.com/kubernetes/kube-state-metrics/
|
||||
keywords:
|
||||
- metric
|
||||
- monitoring
|
||||
- prometheus
|
||||
- kubernetes
|
||||
maintainers:
|
||||
- email: tariq.ibrahim@mulesoft.com
|
||||
name: tariq1890
|
||||
- email: manuel@rueg.eu
|
||||
name: mrueg
|
||||
- email: david@0xdc.me
|
||||
name: dotdc
|
||||
name: kube-state-metrics
|
||||
sources:
|
||||
- https://github.com/kubernetes/kube-state-metrics/
|
||||
type: application
|
||||
version: 5.26.0
|
@ -1,85 +0,0 @@
|
||||
# kube-state-metrics Helm Chart
|
||||
|
||||
Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics).
|
||||
|
||||
## Get Repository Info
|
||||
<!-- textlint-disable -->
|
||||
```console
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
helm repo update
|
||||
```
|
||||
|
||||
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
|
||||
<!-- textlint-enable -->
|
||||
|
||||
## Install Chart
|
||||
|
||||
```console
|
||||
helm install [RELEASE_NAME] prometheus-community/kube-state-metrics [flags]
|
||||
```
|
||||
|
||||
_See [configuration](#configuration) below._
|
||||
|
||||
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
|
||||
|
||||
## Uninstall Chart
|
||||
|
||||
```console
|
||||
helm uninstall [RELEASE_NAME]
|
||||
```
|
||||
|
||||
This removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._
|
||||
|
||||
## Upgrading Chart
|
||||
|
||||
```console
|
||||
helm upgrade [RELEASE_NAME] prometheus-community/kube-state-metrics [flags]
|
||||
```
|
||||
|
||||
_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._
|
||||
|
||||
### Migrating from stable/kube-state-metrics and kubernetes/kube-state-metrics
|
||||
|
||||
You can upgrade in-place:
|
||||
|
||||
1. [get repository info](#get-repository-info)
|
||||
1. [upgrade](#upgrading-chart) your existing release name using the new chart repository
|
||||
|
||||
## Upgrading to v3.0.0
|
||||
|
||||
v3.0.0 includes kube-state-metrics v2.0, see the [changelog](https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/CHANGELOG.md) for major changes on the application-side.
|
||||
|
||||
The upgraded chart now the following changes:
|
||||
|
||||
* Dropped support for helm v2 (helm v3 or later is required)
|
||||
* collectors key was renamed to resources
|
||||
* namespace key was renamed to namespaces
|
||||
|
||||
## Configuration
|
||||
|
||||
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments:
|
||||
|
||||
```console
|
||||
helm show values prometheus-community/kube-state-metrics
|
||||
```
|
||||
|
||||
### kube-rbac-proxy
|
||||
|
||||
You can enable `kube-state-metrics` endpoint protection using `kube-rbac-proxy`. By setting `kubeRBACProxy.enabled: true`, this chart will deploy one RBAC proxy container per endpoint (metrics & telemetry).
|
||||
To authorize access, authenticate your requests (via a `ServiceAccount` for example) with a `ClusterRole` attached such as:
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kube-state-metrics-read
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: ["services/kube-state-metrics"]
|
||||
verbs:
|
||||
- get
|
||||
```
|
||||
|
||||
See [kube-rbac-proxy examples](https://github.com/brancz/kube-rbac-proxy/tree/master/examples/resource-attributes) for more details.
|
@ -1,23 +0,0 @@
|
||||
kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects.
|
||||
The exposed metrics can be found here:
|
||||
https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics
|
||||
|
||||
The metrics are exported on the HTTP endpoint /metrics on the listening port.
|
||||
In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics
|
||||
|
||||
They are served either as plaintext or protobuf depending on the Accept header.
|
||||
They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint.
|
||||
|
||||
{{- if .Values.kubeRBACProxy.enabled}}
|
||||
|
||||
kube-rbac-proxy endpoint protections is enabled:
|
||||
- Metrics endpoints are now HTTPS
|
||||
- Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions:
|
||||
```
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: ["services/{{ template "kube-state-metrics.fullname" . }}"]
|
||||
verbs:
|
||||
- get
|
||||
```
|
||||
{{- end }}
|
@ -1,156 +0,0 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "kube-state-metrics.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "kube-state-metrics.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "kube-state-metrics.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Allow the release namespace to be overridden for multi-namespace deployments in combined charts
|
||||
*/}}
|
||||
{{- define "kube-state-metrics.namespace" -}}
|
||||
{{- if .Values.namespaceOverride -}}
|
||||
{{- .Values.namespaceOverride -}}
|
||||
{{- else -}}
|
||||
{{- .Release.Namespace -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "kube-state-metrics.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Generate basic labels
|
||||
*/}}
|
||||
{{- define "kube-state-metrics.labels" }}
|
||||
helm.sh/chart: {{ template "kube-state-metrics.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: metrics
|
||||
app.kubernetes.io/part-of: {{ template "kube-state-metrics.name" . }}
|
||||
{{- include "kube-state-metrics.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.customLabels }}
|
||||
{{ tpl (toYaml .Values.customLabels) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.releaseLabel }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "kube-state-metrics.selectorLabels" }}
|
||||
{{- if .Values.selectorOverride }}
|
||||
{{ toYaml .Values.selectorOverride }}
|
||||
{{- else }}
|
||||
app.kubernetes.io/name: {{ include "kube-state-metrics.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/* Sets default scrape limits for servicemonitor */}}
|
||||
{{- define "servicemonitor.scrapeLimits" -}}
|
||||
{{- with .sampleLimit }}
|
||||
sampleLimit: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .targetLimit }}
|
||||
targetLimit: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .labelLimit }}
|
||||
labelLimit: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .labelNameLengthLimit }}
|
||||
labelNameLengthLimit: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .labelValueLengthLimit }}
|
||||
labelValueLengthLimit: {{ . }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Formats imagePullSecrets. Input is (dict "Values" .Values "imagePullSecrets" .{specific imagePullSecrets})
|
||||
*/}}
|
||||
{{- define "kube-state-metrics.imagePullSecrets" -}}
|
||||
{{- range (concat .Values.global.imagePullSecrets .imagePullSecrets) }}
|
||||
{{- if eq (typeOf .) "map[string]interface {}" }}
|
||||
- {{ toYaml . | trim }}
|
||||
{{- else }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
The image to use for kube-state-metrics
|
||||
*/}}
|
||||
{{- define "kube-state-metrics.image" -}}
|
||||
{{- if .Values.image.sha }}
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }}
|
||||
{{- else }}
|
||||
{{- printf "%s/%s:%s@%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }}
|
||||
{{- else }}
|
||||
{{- printf "%s/%s:%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
The image to use for kubeRBACProxy
|
||||
*/}}
|
||||
{{- define "kubeRBACProxy.image" -}}
|
||||
{{- if .Values.kubeRBACProxy.image.sha }}
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }}
|
||||
{{- else }}
|
||||
{{- printf "%s/%s:%s@%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }}
|
||||
{{- else }}
|
||||
{{- printf "%s/%s:%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user