Compare commits

..

20 Commits

Author SHA1 Message Date
Renovate Bot 54878bae60 chore(deps): update kubezero-redis-dependencies 2024-04-09 03:09:47 +00:00
Stefan Reimer 7a80650d9c fix: disable feature flag for now 2024-04-08 18:09:22 +00:00
Stefan Reimer 75fc295066 fix: upgrade flow tweaks 2024-04-08 19:08:45 +01:00
Stefan Reimer 705f36f9aa feat: logging module version bumps 2024-04-08 12:30:01 +00:00
Stefan Reimer 8b7b1ec8fa Merge pull request 'chore(deps): update kubezero-logging-dependencies' (#160) from renovate/kubezero-logging-kubezero-logging-dependencies into master
Reviewed-on: #160
2024-04-04 13:41:31 +00:00
Stefan Reimer e2770079eb feat: version upgrades for kubezero-metrics 2024-04-04 13:39:36 +00:00
Renovate Bot b2d8a11854 chore(deps): update kubezero-logging-dependencies 2024-04-04 03:10:31 +00:00
Stefan Reimer 1bdbb7c538 feat: version upgrades for opensearch and operators 2024-04-03 14:36:59 +00:00
Stefan Reimer 1350500f7f Merge pull request 'chore(deps): update kubezero-metrics-dependencies' (#158) from renovate/kubezero-metrics-kubezero-metrics-dependencies into master
Reviewed-on: #158
2024-04-03 14:35:48 +00:00
Stefan Reimer 1cb0ff2c0d Merge pull request 'chore(deps): update helm release kube-prometheus-stack to v57' (#149) from renovate/kubezero-metrics-major-kubezero-metrics-dependencies into master
Reviewed-on: #149
2024-04-03 14:35:31 +00:00
Stefan Reimer 734f19010f Merge pull request 'chore(deps): update helm release eck-operator to v2.12.1' (#180) from renovate/kubezero-operators-kubezero-operators-dependencies into master
Reviewed-on: #180
2024-04-03 13:18:24 +00:00
Stefan Reimer 3013c39061 Merge pull request 'chore(deps): update helm release jaeger to v2' (#173) from renovate/kubezero-telemetry-major-kubezero-telemetry-dependencies into master
Reviewed-on: #173
2024-04-03 13:11:11 +00:00
Stefan Reimer ca14178e94 feat: Falco version upgrade 2024-04-03 13:11:07 +00:00
Stefan Reimer 4b4431919a Merge pull request 'chore(deps): update helm release falco to v4' (#163) from renovate/kubezero-falco-major-kubezero-falco-dependencies into master
Reviewed-on: #163
2024-04-03 11:49:53 +00:00
Stefan Reimer 32e71b4129 feat: Istio upgrade to 1.21 2024-04-03 11:49:07 +00:00
Renovate Bot e8204779a5 chore(deps): update helm release kube-prometheus-stack to v57 2024-03-28 03:07:08 +00:00
Renovate Bot 9a56c99ee5 chore(deps): update helm release eck-operator to v2.12.1 2024-03-28 03:06:41 +00:00
Renovate Bot d9146abf72 chore(deps): update kubezero-metrics-dependencies 2024-03-20 19:56:58 +00:00
Renovate Bot 7d354402d6 chore(deps): update helm release jaeger to v2 2024-03-15 03:23:54 +00:00
Renovate Bot 91a0034b26 chore(deps): update helm release falco to v4 2024-03-15 03:23:44 +00:00
206 changed files with 37207 additions and 48918 deletions

View File

@ -13,6 +13,13 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning
### v1.28
# - remove old argocd app, all resources will be taken over by argo.argo-cd
argo_used && kubectl patch app argocd -n argocd \
--type json \
--patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]' && \
kubectl delete app argocd -n argocd || true
argo_used && disable_argo
#all_nodes_upgrade ""
@ -24,20 +31,13 @@ control_plane_upgrade kubeadm_upgrade
#argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
# upgrade modules
control_plane_upgrade "apply_network apply_addons, apply_storage, apply_operators"
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators"
echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning
echo "Applying remaining KubeZero modules..."
### v1.28
# - remove old argocd app, all resources will be taken over by argo.argo-cd
kubectl patch app argocd -n argocd \
--type json \
--patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]' && \
kubectl delete app argocd -n argocd || true
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
# Trigger backup of upgraded cluster state

View File

@ -2,8 +2,9 @@
{{- /* Issues: "MemoryQoS" */ -}}
{{- /* v1.30?: "NodeSwap" */ -}}
{{- /* v1.29: remove/beta now "SidecarContainers" */ -}}
{{- /* v1.28: "PodAndContainerStatsFromCRI" still not working */ -}}
{{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "SidecarContainers" "PodAndContainerStatsFromCRI" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "SidecarContainers" }}
{{- if eq .return "csv" }}
{{- range $key := $gates }}
{{- $key }}=true,

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-falco
description: Falco Container Security and Audit components
type: application
version: 0.1.1
version: 0.1.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -16,7 +16,7 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: falco
version: 3.8.7
version: 4.2.5
repository: https://falcosecurity.github.io/charts
condition: k8saudit.enabled
alias: k8saudit

View File

@ -0,0 +1,64 @@
# kubezero-falco
![Version: 0.1.2](https://img.shields.io/badge/Version-0.1.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
Falco Container Security and Audit components
**Homepage:** <https://kubezero.com>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Stefan Reimer | <stefan@zero-downtime.net> | |
## Requirements
Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://falcosecurity.github.io/charts | k8saudit(falco) | 4.2.5 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| k8saudit.collectors | object | `{"enabled":false}` | Disable the collectors, no syscall events to enrich with metadata. |
| k8saudit.controller | object | `{"deployment":{"replicas":1},"kind":"deployment"}` | Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurabale. |
| k8saudit.controller.deployment.replicas | int | `1` | Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing. For more info check the section on Plugins in the README.md file. |
| k8saudit.driver | object | `{"enabled":false}` | Disable the drivers since we want to deploy only the k8saudit plugin. |
| k8saudit.enabled | bool | `false` | |
| k8saudit.falco.buffered_outputs | bool | `true` | |
| k8saudit.falco.json_output | bool | `true` | |
| k8saudit.falco.load_plugins[0] | string | `"k8saudit"` | |
| k8saudit.falco.load_plugins[1] | string | `"json"` | |
| k8saudit.falco.log_syslog | bool | `false` | |
| k8saudit.falco.plugins[0].init_config.maxEventSize | int | `1048576` | |
| k8saudit.falco.plugins[0].library_path | string | `"libk8saudit.so"` | |
| k8saudit.falco.plugins[0].name | string | `"k8saudit"` | |
| k8saudit.falco.plugins[0].open_params | string | `"http://:9765/k8s-audit"` | |
| k8saudit.falco.plugins[1].init_config | string | `""` | |
| k8saudit.falco.plugins[1].library_path | string | `"libjson.so"` | |
| k8saudit.falco.plugins[1].name | string | `"json"` | |
| k8saudit.falco.rules_file[0] | string | `"/etc/falco/rules.d"` | |
| k8saudit.falco.syslog_output.enabled | bool | `false` | |
| k8saudit.falcoctl.artifact.follow.enabled | bool | `false` | |
| k8saudit.falcoctl.artifact.install.enabled | bool | `false` | |
| k8saudit.fullnameOverride | string | `"falco-k8saudit"` | |
| k8saudit.mounts.volumeMounts[0].mountPath | string | `"/etc/falco/rules.d"` | |
| k8saudit.mounts.volumeMounts[0].name | string | `"rules-volume"` | |
| k8saudit.mounts.volumes[0].configMap.name | string | `"falco-k8saudit-rules"` | |
| k8saudit.mounts.volumes[0].name | string | `"rules-volume"` | |
| k8saudit.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| k8saudit.resources.limits.cpu | string | `"1000m"` | |
| k8saudit.resources.limits.memory | string | `"512Mi"` | |
| k8saudit.resources.requests.cpu | string | `"100m"` | |
| k8saudit.resources.requests.memory | string | `"256Mi"` | |
| k8saudit.services[0].name | string | `"webhook"` | |
| k8saudit.services[0].ports[0].port | int | `9765` | |
| k8saudit.services[0].ports[0].protocol | string | `"TCP"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)

View File

@ -20,10 +20,12 @@
- required_plugin_versions:
- name: k8saudit
version: 0.6.0
version: 0.7.0
alternatives:
- name: k8saudit-eks
version: 0.2.0
version: 0.4.0
- name: k8saudit-gke
version: 0.1.0
- name: json
version: 0.7.0
@ -79,7 +81,45 @@
"eks:vpc-resource-controller",
"eks:addon-manager",
]
-
- list: k8s_audit_sensitive_mount_images
items: [
falcosecurity/falco, docker.io/falcosecurity/falco, public.ecr.aws/falcosecurity/falco,
docker.io/sysdig/sysdig, sysdig/sysdig,
gcr.io/google_containers/hyperkube,
gcr.io/google_containers/kube-proxy, docker.io/calico/node,
docker.io/rook/toolbox, docker.io/cloudnativelabs/kube-router, docker.io/consul,
docker.io/datadog/docker-dd-agent, docker.io/datadog/agent, docker.io/docker/ucp-agent, docker.io/gliderlabs/logspout,
docker.io/netdata/netdata, docker.io/google/cadvisor, docker.io/prom/node-exporter,
amazon/amazon-ecs-agent, prom/node-exporter, amazon/cloudwatch-agent
]
- list: k8s_audit_privileged_images
items: [
falcosecurity/falco, docker.io/falcosecurity/falco, public.ecr.aws/falcosecurity/falco,
docker.io/calico/node, calico/node,
docker.io/cloudnativelabs/kube-router,
docker.io/docker/ucp-agent,
docker.io/mesosphere/mesos-slave,
docker.io/rook/toolbox,
docker.io/sysdig/sysdig,
gcr.io/google_containers/kube-proxy,
gcr.io/google-containers/startup-script,
gcr.io/projectcalico-org/node,
gke.gcr.io/kube-proxy,
gke.gcr.io/gke-metadata-server,
gke.gcr.io/netd-amd64,
gke.gcr.io/watcher-daemonset,
gcr.io/google-containers/prometheus-to-sd,
registry.k8s.io/ip-masq-agent-amd64,
registry.k8s.io/kube-proxy,
registry.k8s.io/prometheus-to-sd,
quay.io/calico/node,
sysdig/sysdig,
registry.k8s.io/dns/k8s-dns-node-cache,
mcr.microsoft.com/oss/kubernetes/kube-proxy
]
- rule: Disallowed K8s User
desc: Detect any k8s operation by users outside of an allowed set of users.
condition: kevt and non_system_user and not ka.user.name in (allowed_k8s_users) and not ka.user.name in (eks_allowed_k8s_users)
@ -166,7 +206,7 @@
- rule: Create Privileged Pod
desc: >
Detect an attempt to start a pod with a privileged container
condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (falco_privileged_images)
condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_privileged_images)
output: Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
@ -180,7 +220,7 @@
desc: >
Detect an attempt to start a pod with a volume from a sensitive host directory (i.e. /proc).
Exceptions are made for known trusted images.
condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.pod.containers.image.repository in (falco_sensitive_mount_images)
condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.pod.containers.image.repository in (k8s_audit_sensitive_mount_images)
output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace resource=%ka.target.resource images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes])
priority: WARNING
source: k8s_audit
@ -188,7 +228,7 @@
# These container images are allowed to run with hostnetwork=true
# TODO: Remove k8s.gcr.io reference after 01/Dec/2023
- list: falco_hostnetwork_images
- list: k8s_audit_hostnetwork_images
items: [
gcr.io/google-containers/prometheus-to-sd,
gcr.io/projectcalico-org/typha,
@ -196,8 +236,6 @@
gke.gcr.io/gke-metadata-server,
gke.gcr.io/kube-proxy,
gke.gcr.io/netd-amd64,
k8s.gcr.io/ip-masq-agent-amd64,
k8s.gcr.io/prometheus-to-sd,
registry.k8s.io/ip-masq-agent-amd64,
registry.k8s.io/prometheus-to-sd
]
@ -205,29 +243,29 @@
# Corresponds to K8s CIS Benchmark 1.7.4
- rule: Create HostNetwork Pod
desc: Detect an attempt to start a pod using the host network.
condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostnetwork_images)
condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostnetwork_images)
output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
tags: [k8s]
- list: falco_hostpid_images
- list: k8s_audit_hostpid_images
items: []
- rule: Create HostPid Pod
desc: Detect an attempt to start a pod using the host pid namespace.
condition: kevt and pod and kcreate and ka.req.pod.host_pid intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostpid_images)
condition: kevt and pod and kcreate and ka.req.pod.host_pid intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostpid_images)
output: Pod started using host pid namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
tags: [k8s]
- list: falco_hostipc_images
- list: k8s_audit_hostipc_images
items: []
- rule: Create HostIPC Pod
desc: Detect an attempt to start a pod using the host ipc namespace.
condition: kevt and pod and kcreate and ka.req.pod.host_ipc intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostipc_images)
condition: kevt and pod and kcreate and ka.req.pod.host_ipc intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostipc_images)
output: Pod started using host ipc namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
@ -298,6 +336,18 @@
source: k8s_audit
tags: [k8s]
- macro: user_known_portforward_activities
condition: (k8s_audit_never_true)
- rule: port-forward
desc: >
Detect any attempt to portforward
condition: ka.target.subresource in (portforward) and not user_known_portforward_activities
output: Portforward to pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace action=%ka.target.subresource )
priority: NOTICE
source: k8s_audit
tags: [k8s]
- macro: user_known_pod_debug_activities
condition: (k8s_audit_never_true)
@ -344,19 +394,11 @@
gke.gcr.io/addon-resizer,
gke.gcr.io/heapster,
gke.gcr.io/gke-metadata-server,
k8s.gcr.io/ip-masq-agent-amd64,
k8s.gcr.io/kube-apiserver,
registry.k8s.io/ip-masq-agent-amd64,
registry.k8s.io/kube-apiserver,
gke.gcr.io/kube-proxy,
gke.gcr.io/netd-amd64,
gke.gcr.io/watcher-daemonset,
k8s.gcr.io/addon-resizer,
k8s.gcr.io/prometheus-to-sd,
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64,
k8s.gcr.io/k8s-dns-kube-dns-amd64,
k8s.gcr.io/k8s-dns-sidecar-amd64,
k8s.gcr.io/metrics-server-amd64,
registry.k8s.io/addon-resizer,
registry.k8s.io/prometheus-to-sd,
registry.k8s.io/k8s-dns-dnsmasq-nanny-amd64,

View File

@ -15,9 +15,9 @@ k8saudit:
resources:
requests:
cpu: 100m
memory: 256Mi
memory: 64Mi
limits:
cpu: 1000m
cpu: 1
memory: 512Mi
nodeSelector:
@ -43,10 +43,16 @@ k8saudit:
falcoctl:
artifact:
install:
enabled: false
follow:
enabled: false
# Since 0.37 the plugins are not part of the image anymore
# but we provide our rules static via our CM
config:
artifact:
allowedTypes:
- plugin
install:
refs: [k8saudit:0.7.0,json:0.7.2]
services:
- name: webhook

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio-gateway
description: KubeZero Umbrella Chart for Istio gateways
type: application
version: 0.19.5
version: 0.21.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -1,6 +1,6 @@
# kubezero-istio-gateway
![Version: 0.19.4](https://img.shields.io/badge/Version-0.19.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.21.0](https://img.shields.io/badge/Version-0.21.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Istio gateways
@ -21,7 +21,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://istio-release.storage.googleapis.com/charts | gateway | 1.19.4 |
| https://istio-release.storage.googleapis.com/charts | gateway | 1.21.0 |
## Values
@ -41,6 +41,8 @@ Kubernetes: `>= 1.26.0`
| gateway.service.externalTrafficPolicy | string | `"Local"` | |
| gateway.service.type | string | `"NodePort"` | |
| gateway.terminationGracePeriodSeconds | int | `120` | |
| hardening.rejectUnderscoresHeaders | bool | `true` | |
| hardening.unescapeSlashes | bool | `true` | |
| proxyProtocol | bool | `true` | |
| telemetry.enabled | bool | `false` | |

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 1.19.4
appVersion: 1.21.0
description: Helm chart for deploying Istio gateways
icon: https://istio.io/latest/favicons/android-192x192.png
keywords:
@ -9,4 +9,4 @@ name: gateway
sources:
- https://github.com/istio/istio
type: application
version: 1.19.4
version: 1.21.0

View File

@ -35,6 +35,28 @@ To view support configuration options and documentation, run:
helm show values istio/gateway
```
### Profiles
Istio Helm charts have a concept of a `profile`, which is a bundled collection of value presets.
These can be set with `--set profile=<profile>`.
For example, the `demo` profile offers a preset configuration to try out Istio in a test environment, with additional features enabled and lowered resource requirements.
For consistency, the same profiles are used across each chart, even if they do not impact a given chart.
Explicitly set values have highest priority, then profile settings, then chart defaults.
As an implementation detail of profiles, the default values for the chart are all nested under `defaults`.
When configuring the chart, you should not include this.
That is, `--set some.field=true` should be passed, not `--set defaults.some.field=true`.
### OpenShift
When deploying the gateway in an OpenShift cluster, use the `openshift` profile to override the default values, for example:
```console
helm install istio-ingressgateway istio/gateway -- set profile=openshift
```
### `image: auto` Information
The image used by the chart, `auto`, may be unintuitive.

View File

@ -0,0 +1,25 @@
# The ambient profile enables ambient mode. The Istiod, CNI, and ztunnel charts must be deployed
meshConfig:
defaultConfig:
proxyMetadata:
ISTIO_META_ENABLE_HBONE: "true"
variant: distroless
pilot:
variant: distroless
env:
# Setup more secure default that is off in 'default' only for backwards compatibility
VERIFY_CERTIFICATE_AT_CLIENT: "true"
ENABLE_AUTO_SNI: "true"
PILOT_ENABLE_HBONE: "true"
CA_TRUSTED_NODE_ACCOUNTS: "istio-system/ztunnel,kube-system/ztunnel"
PILOT_ENABLE_AMBIENT_CONTROLLERS: "true"
cni:
logLevel: info
privileged: true
ambient:
enabled: true
# Default excludes istio-system; its actually fine to redirect there since we opt-out istiod, ztunnel, and istio-cni
excludeNamespaces:
- kube-system

View File

@ -0,0 +1,6 @@
pilot:
env:
ENABLE_EXTERNAL_NAME_ALIAS: "false"
PERSIST_OLDEST_FIRST_HEURISTIC_FOR_VIRTUAL_SERVICE_HOST_MATCHING: "true"
VERIFY_CERTIFICATE_AT_CLIENT: "false"
ENABLE_AUTO_SNI: "false"

View File

@ -0,0 +1,69 @@
# The demo profile enables a variety of things to try out Istio in non-production environments.
# * Lower resource utilization.
# * Some additional features are enabled by default; especially ones used in some tasks in istio.io.
# * More ports enabled on the ingress, which is used in some tasks.
meshConfig:
accessLogFile: /dev/stdout
extensionProviders:
- name: otel
envoyOtelAls:
service: opentelemetry-collector.istio-system.svc.cluster.local
port: 4317
- name: skywalking
skywalking:
service: tracing.istio-system.svc.cluster.local
port: 11800
- name: otel-tracing
opentelemetry:
port: 4317
service: opentelemetry-collector.otel-collector.svc.cluster.local
global:
proxy:
resources:
requests:
cpu: 10m
memory: 40Mi
pilot:
autoscaleEnabled: false
traceSampling: 100
resources:
requests:
cpu: 10m
memory: 100Mi
gateways:
istio-egressgateway:
autoscaleEnabled: false
resources:
requests:
cpu: 10m
memory: 40Mi
istio-ingressgateway:
autoscaleEnabled: false
ports:
## You can add custom gateway ports in user values overrides, but it must include those ports since helm replaces.
# Note that AWS ELB will by default perform health checks on the first port
# on this list. Setting this to the health check port will ensure that health
# checks always work. https://github.com/istio/istio/issues/12503
- port: 15021
targetPort: 15021
name: status-port
- port: 80
targetPort: 8080
name: http2
- port: 443
targetPort: 8443
name: https
- port: 31400
targetPort: 31400
name: tcp
# This is the port where sni routing happens
- port: 15443
targetPort: 15443
name: tls
resources:
requests:
cpu: 10m
memory: 40Mi

View File

@ -0,0 +1,18 @@
# The OpenShift profile provides a basic set of settings to run Istio on OpenShift
# CNI must be installed.
cni:
cniBinDir: /var/lib/cni/bin
cniConfDir: /etc/cni/multus/net.d
chained: false
cniConfFileName: "istio-cni.conf"
excludeNamespaces:
- istio-system
- kube-system
logLevel: info
privileged: true
provider: "multus"
global:
platform: openshift
istio_cni:
enabled: true
chained: false

View File

@ -0,0 +1,9 @@
# The preview profile contains features that are experimental.
# This is intended to explore new features coming to Istio.
# Stability, security, and performance are not guaranteed - use at your own risk.
meshConfig:
defaultConfig:
proxyMetadata:
# Enable Istio agent to handle DNS requests for known hosts
# Unknown hosts will automatically be resolved using upstream dns servers in resolv.conf
ISTIO_META_DNS_CAPTURE: "true"

View File

@ -46,6 +46,10 @@ spec:
- name: net.ipv4.ip_unprivileged_port_start
value: "0"
{{- end }}
{{- with .Values.volumes }}
volumes:
{{ toYaml . | nindent 8 }}
{{- end }}
containers:
- name: istio-proxy
# "auto" will be populated at runtime by the mutating webhook. See https://istio.io/latest/docs/setup/additional-setup/sidecar-injection/#customizing-injection
@ -94,9 +98,9 @@ spec:
name: http-envoy-prom
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if .Values.volumeMounts }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml .Values.volumeMounts | nindent 12 }}
{{ toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
@ -118,7 +122,3 @@ spec:
{{- with .Values.priorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -28,4 +28,15 @@ spec:
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
type: Utilization
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
type: Utilization
{{- end }}
{{- if .Values.autoscaling.autoscaleBehavior }}
behavior: {{ toYaml .Values.autoscaling.autoscaleBehavior | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -15,12 +15,19 @@ spec:
{{- with .Values.service.loadBalancerIP }}
loadBalancerIP: "{{ . }}"
{{- end }}
{{- with .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: "{{ . }}"
{{- if eq .Values.service.type "LoadBalancer" }}
{{- if hasKey .Values.service "allocateLoadBalancerNodePorts" }}
allocateLoadBalancerNodePorts: {{ .Values.service.allocateLoadBalancerNodePorts }}
{{- end }}
{{- end }}
{{- with .Values.service.ipFamilies }}
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies:
{{ toYaml . | indent 4 }}
{{- range .Values.service.ipFamilies }}
- {{ . }}
{{- end }}
{{- end }}
{{- with .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:

View File

@ -0,0 +1,34 @@
{{/*
Complex logic ahead...
We have three sets of values, in order of precedence (last wins):
1. The builtin values.yaml defaults
2. The profile the user selects
3. Users input (-f or --set)
Unfortunately, Helm provides us (1) and (3) together (as .Values), making it hard to insert (2).
However, we can workaround this by placing all of (1) under a specific key (.Values.defaults).
We can then merge the profile onto the defaults, then the user settings onto that.
Finally, we can set all of that under .Values so the chart behaves without awareness.
*/}}
{{- $defaults := $.Values.defaults }}
{{- $_ := unset $.Values "defaults" }}
{{- $profile := dict }}
{{- with .Values.profile }}
{{- with $.Files.Get (printf "files/profile-%s.yaml" .)}}
{{- $profile = (. | fromYaml) }}
{{- else }}
{{ fail (cat "unknown profile" $.Values.profile) }}
{{- end }}
{{- end }}
{{- with .Values.compatibilityVersion }}
{{- with $.Files.Get (printf "files/profile-compatibility-version-%s.yaml" .) }}
{{- $ignore := mustMergeOverwrite $profile (. | fromYaml) }}
{{- else }}
{{ fail (cat "unknown compatibility version" $.Values.compatibilityVersion) }}
{{- end }}
{{- end }}
{{- if $profile }}
{{- $a := mustMergeOverwrite $defaults $profile }}
{{- end }}
{{- $b := set $ "Values" (mustMergeOverwrite $defaults $.Values) }}

View File

@ -2,240 +2,300 @@
"$schema": "http://json-schema.org/schema#",
"type": "object",
"additionalProperties": false,
"properties": {
"global": {
"type": "object"
},
"affinity": {
"type": "object"
},
"securityContext": {
"type": ["object", "null"]
},
"containerSecurityContext": {
"type": ["object", "null"]
},
"kind":{
"type": "string",
"enum": ["Deployment", "DaemonSet"]
},
"annotations": {
"additionalProperties": {
"type": [
"string",
"integer"
]
},
"type": "object"
},
"autoscaling": {
"$defs": {
"values": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"maxReplicas": {
"type": "integer"
},
"minReplicas": {
"type": "integer"
},
"targetCPUUtilizationPercentage": {
"type": "integer"
}
}
},
"env": {
"type": "object"
},
"labels": {
"type": "object"
},
"volumes": {
"type": "array"
},
"volumeMounts": {
"type": "array"
},
"name": {
"type": "string"
},
"nodeSelector": {
"type": "object"
},
"podAnnotations": {
"type": "object",
"properties": {
"inject.istio.io/templates": {
"type": "string"
},
"prometheus.io/path": {
"type": "string"
},
"prometheus.io/port": {
"type": "string"
},
"prometheus.io/scrape": {
"type": "string"
}
}
},
"replicaCount": {
"type": [ "integer", "null" ]
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpu": {
"type": "string"
},
"memory": {
"type": "string"
}
}
},
"requests": {
"type": "object",
"properties": {
"cpu": {
"type": "string"
},
"memory": {
"type": "string"
}
}
}
}
},
"revision": {
"type": "string"
},
"runAsRoot": {
"type": "boolean"
},
"unprivilegedPort": {
"type": ["string", "boolean"],
"enum": [true, false, "auto"]
},
"service": {
"type": "object",
"properties": {
"annotations": {
"global": {
"type": "object"
},
"externalTrafficPolicy": {
"affinity": {
"type": "object"
},
"securityContext": {
"type": [
"object",
"null"
]
},
"containerSecurityContext": {
"type": [
"object",
"null"
]
},
"kind": {
"type": "string",
"enum": [
"Deployment",
"DaemonSet"
]
},
"annotations": {
"additionalProperties": {
"type": [
"string",
"integer"
]
},
"type": "object"
},
"autoscaling": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"maxReplicas": {
"type": "integer"
},
"minReplicas": {
"type": "integer"
},
"targetCPUUtilizationPercentage": {
"type": "integer"
}
}
},
"env": {
"type": "object"
},
"labels": {
"type": "object"
},
"name": {
"type": "string"
},
"loadBalancerIP": {
"nodeSelector": {
"type": "object"
},
"podAnnotations": {
"type": "object",
"properties": {
"inject.istio.io/templates": {
"type": "string"
},
"prometheus.io/path": {
"type": "string"
},
"prometheus.io/port": {
"type": "string"
},
"prometheus.io/scrape": {
"type": "string"
}
}
},
"replicaCount": {
"type": [
"integer",
"null"
]
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpu": {
"type": "string"
},
"memory": {
"type": "string"
}
}
},
"requests": {
"type": "object",
"properties": {
"cpu": {
"type": "string"
},
"memory": {
"type": "string"
}
}
}
}
},
"revision": {
"type": "string"
},
"loadBalancerSourceRanges": {
"compatibilityVersion": {
"type": "string"
},
"runAsRoot": {
"type": "boolean"
},
"unprivilegedPort": {
"type": [
"string",
"boolean"
],
"enum": [
true,
false,
"auto"
]
},
"service": {
"type": "object",
"properties": {
"annotations": {
"type": "object"
},
"externalTrafficPolicy": {
"type": "string"
},
"loadBalancerIP": {
"type": "string"
},
"loadBalancerSourceRanges": {
"type": "array"
},
"ipFamilies": {
"items": {
"type": "string",
"enum": [
"IPv4",
"IPv6"
]
}
},
"ipFamilyPolicy": {
"type": "string",
"enum": [
"",
"SingleStack",
"PreferDualStack",
"RequireDualStack"
]
},
"ports": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"port": {
"type": "integer"
},
"protocol": {
"type": "string"
},
"targetPort": {
"type": "integer"
}
}
}
},
"type": {
"type": "string"
}
}
},
"serviceAccount": {
"type": "object",
"properties": {
"annotations": {
"type": "object"
},
"name": {
"type": "string"
},
"create": {
"type": "boolean"
}
}
},
"rbac": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
}
}
},
"tolerations": {
"type": "array"
},
"ipFamilies" : {
"items": {
"type": "string",
"enum": ["IPv4", "IPv6"]
}
"topologySpreadConstraints": {
"type": "array"
},
"ipFamilyPolicy" : {
"networkGateway": {
"type": "string"
},
"imagePullPolicy": {
"type": "string",
"enum": ["", "SingleStack", "PreferDualStack", "RequireDualStack"]
"enum": [
"",
"Always",
"IfNotPresent",
"Never"
]
},
"ports": {
"imagePullSecrets": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"port": {
"type": "integer"
},
"protocol": {
"type": "string"
},
"targetPort": {
"type": "integer"
}
}
}
},
"type": {
"type": "string"
}
}
},
"serviceAccount": {
"type": "object",
"properties": {
"annotations": {
"type": "object"
},
"name": {
"type": "string"
},
"create": {
"type": "boolean"
}
}
},
"rbac": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
}
}
},
"tolerations": {
"type": "array"
},
"topologySpreadConstraints": {
"type": "array"
},
"networkGateway": {
"type": "string"
},
"imagePullPolicy": {
"type": "string",
"enum": ["", "Always", "IfNotPresent", "Never"]
},
"imagePullSecrets": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
"podDisruptionBudget": {
"type": "object",
"properties": {
"minAvailable": {
"type": [
"integer",
"string"
]
},
"maxUnavailable": {
"type": [
"integer",
"string"
]
},
"unhealthyPodEvictionPolicy": {
"type": "string",
"enum": [
"",
"IfHealthyBudget",
"AlwaysAllow"
]
}
}
},
"terminationGracePeriodSeconds": {
"type": "number"
},
"volumes": {
"type": "array",
"items": {
"type": "object"
}
},
"volumeMounts": {
"type": "array",
"items": {
"type": "object"
}
},
"priorityClassName": {
"type": "string"
}
}
},
"podDisruptionBudget": {
"type": "object",
"properties": {
"minAvailable": {
"type": ["integer", "string"]
},
"maxUnavailable": {
"type": ["integer", "string"]
},
"unhealthyPodEvictionPolicy": {
"type": "string",
"enum": ["", "IfHealthyBudget", "AlwaysAllow"]
}
}
},
"terminationGracePeriodSeconds": {
"type": "number"
},
"priorityClassName": {
"type": "string"
}
}
},
"defaults": {
"$ref": "#/$defs/values"
},
"$ref": "#/$defs/values"
}

View File

@ -1,139 +1,152 @@
# Name allows overriding the release name. Generally this should not be set
name: ""
# revision declares which revision this gateway is a part of
revision: ""
# Controls the spec.replicas setting for the Gateway deployment if set.
# Otherwise defaults to Kubernetes Deployment default (1).
replicaCount:
kind: Deployment
rbac:
# If enabled, roles will be created to enable accessing certificates from Gateways. This is not needed
# when using http://gateway-api.org/.
enabled: true
serviceAccount:
# If set, a service account will be created. Otherwise, the default is used
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set, the release name is used
defaults:
# Name allows overriding the release name. Generally this should not be set
name: ""
# revision declares which revision this gateway is a part of
revision: ""
podAnnotations:
prometheus.io/port: "15020"
prometheus.io/scrape: "true"
prometheus.io/path: "/stats/prometheus"
inject.istio.io/templates: "gateway"
sidecar.istio.io/inject: "true"
# Controls the spec.replicas setting for the Gateway deployment if set.
# Otherwise defaults to Kubernetes Deployment default (1).
replicaCount:
# Define the security context for the pod.
# If unset, this will be automatically set to the minimum privileges required to bind to port 80 and 443.
# On Kubernetes 1.22+, this only requires the `net.ipv4.ip_unprivileged_port_start` sysctl.
securityContext: ~
containerSecurityContext: ~
kind: Deployment
service:
# Type of service. Set to "None" to disable the service entirely
type: LoadBalancer
ports:
- name: status-port
port: 15021
protocol: TCP
targetPort: 15021
- name: http2
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
rbac:
# If enabled, roles will be created to enable accessing certificates from Gateways. This is not needed
# when using http://gateway-api.org/.
enabled: true
serviceAccount:
# If set, a service account will be created. Otherwise, the default is used
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set, the release name is used
name: ""
podAnnotations:
prometheus.io/port: "15020"
prometheus.io/scrape: "true"
prometheus.io/path: "/stats/prometheus"
inject.istio.io/templates: "gateway"
sidecar.istio.io/inject: "true"
# Define the security context for the pod.
# If unset, this will be automatically set to the minimum privileges required to bind to port 80 and 443.
# On Kubernetes 1.22+, this only requires the `net.ipv4.ip_unprivileged_port_start` sysctl.
securityContext: ~
containerSecurityContext: ~
service:
# Type of service. Set to "None" to disable the service entirely
type: LoadBalancer
ports:
- name: status-port
port: 15021
protocol: TCP
targetPort: 15021
- name: http2
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
annotations: {}
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
externalIPs: []
ipFamilyPolicy: ""
ipFamilies: []
## Whether to automatically allocate NodePorts (only for LoadBalancers).
# allocateLoadBalancerNodePorts: false
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: {}
autoscaleBehavior: {}
# Pod environment variables
env: {}
# Labels to apply to all resources
labels: {}
# Annotations to apply to all resources
annotations: {}
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
externalIPs: []
ipFamilyPolicy: ""
ipFamilies: []
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
nodeSelector: {}
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
tolerations: []
# Pod environment variables
env: {}
topologySpreadConstraints: []
# Labels to apply to all resources
labels: {}
affinity: {}
# Annotations to apply to all resources
annotations: {}
# If specified, the gateway will act as a network gateway for the given network.
networkGateway: ""
nodeSelector: {}
# Specify image pull policy if default behavior isn't desired.
# Default behavior: latest images will be Always else IfNotPresent
imagePullPolicy: ""
tolerations: []
imagePullSecrets: []
topologySpreadConstraints: []
# This value is used to configure a Kubernetes PodDisruptionBudget for the gateway.
#
# By default, the `podDisruptionBudget` is disabled (set to `{}`),
# which means that no PodDisruptionBudget resource will be created.
#
# To enable the PodDisruptionBudget, configure it by specifying the
# `minAvailable` or `maxUnavailable`. For example, to set the
# minimum number of available replicas to 1, you can update this value as follows:
#
# podDisruptionBudget:
# minAvailable: 1
#
# Or, to allow a maximum of 1 unavailable replica, you can set:
#
# podDisruptionBudget:
# maxUnavailable: 1
#
# You can also specify the `unhealthyPodEvictionPolicy` field, and the valid values are `IfHealthyBudget` and `AlwaysAllow`.
# For example, to set the `unhealthyPodEvictionPolicy` to `AlwaysAllow`, you can update this value as follows:
#
# podDisruptionBudget:
# minAvailable: 1
# unhealthyPodEvictionPolicy: AlwaysAllow
#
# To disable the PodDisruptionBudget, you can leave it as an empty object `{}`:
#
# podDisruptionBudget: {}
#
podDisruptionBudget: {}
affinity: {}
terminationGracePeriodSeconds: 30
# If specified, the gateway will act as a network gateway for the given network.
networkGateway: ""
# A list of `Volumes` added into the Gateway Pods. See
# https://kubernetes.io/docs/concepts/storage/volumes/.
volumes: []
# Specify image pull policy if default behavior isn't desired.
# Default behavior: latest images will be Always else IfNotPresent
imagePullPolicy: ""
# A list of `VolumeMounts` added into the Gateway Pods. See
# https://kubernetes.io/docs/concepts/storage/volumes/.
volumeMounts: []
imagePullSecrets: []
# This value is used to configure a Kubernetes PodDisruptionBudget for the gateway.
#
# By default, the `podDisruptionBudget` is disabled (set to `{}`),
# which means that no PodDisruptionBudget resource will be created.
#
# To enable the PodDisruptionBudget, configure it by specifying the
# `minAvailable` or `maxUnavailable`. For example, to set the
# minimum number of available replicas to 1, you can update this value as follows:
#
# podDisruptionBudget:
# minAvailable: 1
#
# Or, to allow a maximum of 1 unavailable replica, you can set:
#
# podDisruptionBudget:
# maxUnavailable: 1
#
# You can also specify the `unhealthyPodEvictionPolicy` field, and the valid values are `IfHealthyBudget` and `AlwaysAllow`.
# For example, to set the `unhealthyPodEvictionPolicy` to `AlwaysAllow`, you can update this value as follows:
#
# podDisruptionBudget:
# minAvailable: 1
# unhealthyPodEvictionPolicy: AlwaysAllow
#
# To disable the PodDisruptionBudget, you can leave it as an empty object `{}`:
#
# podDisruptionBudget: {}
#
podDisruptionBudget: {}
terminationGracePeriodSeconds: 30
# Configure this to a higher priority class in order to make sure your Istio gateway pods
# will not be killed because of low priority class.
# Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
# for more detail.
priorityClassName: ""
# Configure this to a higher priority class in order to make sure your Istio gateway pods
# will not be killed because of low priority class.
# Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
# for more detail.
priorityClassName: ""

View File

@ -11,25 +11,6 @@ diff -tubr charts/gateway.orig/templates/deployment.yaml charts/gateway/template
selector:
matchLabels:
{{- include "gateway.selectorLabels" . | nindent 6 }}
@@ -86,6 +90,10 @@
name: http-envoy-prom
resources:
{{- toYaml .Values.resources | nindent 12 }}
+ {{- if .Values.volumeMounts }}
+ volumeMounts:
+ {{- toYaml .Values.volumeMounts | nindent 12 }}
+ {{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
@@ -102,3 +110,7 @@
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
+ {{- with .Values.volumes }}
+ volumes:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff -tubr charts/gateway.orig/templates/service.yaml charts/gateway/templates/service.yaml
--- charts/gateway.orig/templates/service.yaml 2022-12-09 14:58:33.000000000 +0000
+++ charts/gateway/templates/service.yaml 2022-12-12 22:52:27.629670669 +0000
@ -49,19 +30,3 @@ diff -tubr charts/gateway.orig/templates/service.yaml charts/gateway/templates/s
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs: {{- range .Values.service.externalIPs }}
diff -tubr charts/gateway.orig/values.schema.json charts/gateway/values.schema.json
--- charts/gateway.orig/values.schema.json 2022-12-09 14:58:33.000000000 +0000
+++ charts/gateway/values.schema.json 2022-12-12 22:52:27.629670669 +0000
@@ -51,6 +51,12 @@
"labels": {
"type": "object"
},
+ "volumes": {
+ "type": "array"
+ },
+ "volumeMounts": {
+ "type": "array"
+ },
"name": {
"type": "string"
},

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio
description: KubeZero Umbrella Chart for Istio
type: application
version: 0.19.5
version: 0.21.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -1,6 +1,6 @@
# kubezero-istio
![Version: 0.19.4](https://img.shields.io/badge/Version-0.19.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.21.0](https://img.shields.io/badge/Version-0.21.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Istio
@ -21,9 +21,9 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://istio-release.storage.googleapis.com/charts | base | 1.19.4 |
| https://istio-release.storage.googleapis.com/charts | istiod | 1.19.4 |
| https://kiali.org/helm-charts | kiali-server | 1.76.0 |
| https://istio-release.storage.googleapis.com/charts | base | 1.21.0 |
| https://istio-release.storage.googleapis.com/charts | istiod | 1.21.0 |
| https://kiali.org/helm-charts | kiali-server | 1.82.0 |
## Values

View File

@ -5,18 +5,18 @@ folder: Istio
condition: '.Values.istiod.telemetry.enabled'
dashboards:
- name: istio-control-plane
url: https://grafana.com/api/dashboards/7645/revisions/187/download
url: https://grafana.com/api/dashboards/7645/revisions/201/download
tags:
- Istio
- name: istio-mesh
url: https://grafana.com/api/dashboards/7639/revisions/187/download
url: https://grafana.com/api/dashboards/7639/revisions/201/download
tags:
- Istio
- name: istio-service
url: https://grafana.com/api/dashboards/7636/revisions/187/download
url: https://grafana.com/api/dashboards/7636/revisions/201/download
tags:
- Istio
- name: istio-workload
url: https://grafana.com/api/dashboards/7630/revisions/187/download
url: https://grafana.com/api/dashboards/7630/revisions/201/download
tags:
- Istio

File diff suppressed because one or more lines are too long

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack
type: application
version: 0.8.10
version: 0.8.11
appVersion: 1.6.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -20,11 +20,11 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: fluentd
version: 0.5.0
version: 0.5.2
repository: https://fluent.github.io/helm-charts
condition: fluentd.enabled
- name: fluent-bit
version: 0.40.0
version: 0.46.0
repository: https://fluent.github.io/helm-charts
condition: fluent-bit.enabled
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-logging
![Version: 0.8.9](https://img.shields.io/badge/Version-0.8.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
![Version: 0.8.11](https://img.shields.io/badge/Version-0.8.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack
@ -19,8 +19,8 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.40.0 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.0 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.46.0 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
## Changes from upstream
### ECK
@ -56,11 +56,6 @@ Kubernetes: `>= 1.26.0`
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| eck-operator.enabled | bool | `false` | |
| eck-operator.installCRDs | bool | `false` | |
| eck-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| eck-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| eck-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| elastic_password | string | `""` | |
| es.nodeSets | list | `[]` | |
| es.prometheus | bool | `false` | |
@ -87,11 +82,10 @@ Kubernetes: `>= 1.26.0`
| fluent-bit.daemonSetVolumes[1].hostPath.path | string | `"/var/lib/containers/logs"` | |
| fluent-bit.daemonSetVolumes[1].name | string | `"newlog"` | |
| fluent-bit.enabled | bool | `false` | |
| fluent-bit.image | string | `nil` | |
| fluent-bit.luaScripts."kubezero.lua" | string | `"function nest_k8s_ns(tag, timestamp, record)\n if not record['kubernetes']['namespace_name'] then\n return 0, 0, 0\n end\n new_record = {}\n for key, val in pairs(record) do\n if key == 'kube' then\n new_record[key] = {}\n new_record[key][record['kubernetes']['namespace_name']] = record[key]\n else\n new_record[key] = record[key]\n end\n end\n return 1, timestamp, new_record\nend\n"` | |
| fluent-bit.resources.limits.memory | string | `"128Mi"` | |
| fluent-bit.resources.requests.cpu | string | `"20m"` | |
| fluent-bit.resources.requests.memory | string | `"32Mi"` | |
| fluent-bit.resources.requests.memory | string | `"48Mi"` | |
| fluent-bit.serviceMonitor.enabled | bool | `false` | |
| fluent-bit.serviceMonitor.selector.release | string | `"metrics"` | |
| fluent-bit.testFramework.enabled | bool | `false` | |
@ -100,17 +94,15 @@ Kubernetes: `>= 1.26.0`
| fluentd.configMapConfigs[0] | string | `"fluentd-prometheus-conf"` | |
| fluentd.dashboards.enabled | bool | `false` | |
| fluentd.enabled | bool | `false` | |
| fluentd.env[0].name | string | `"FLUENTD_CONF"` | |
| fluentd.env[0].value | string | `"../../etc/fluent/fluent.conf"` | |
| fluentd.env[1].name | string | `"OUTPUT_PASSWORD"` | |
| fluentd.env[1].valueFrom.secretKeyRef.key | string | `"elastic"` | |
| fluentd.env[1].valueFrom.secretKeyRef.name | string | `"logging-es-elastic-user"` | |
| fluentd.env[0].name | string | `"OUTPUT_PASSWORD"` | |
| fluentd.env[0].valueFrom.secretKeyRef.key | string | `"elastic"` | |
| fluentd.env[0].valueFrom.secretKeyRef.name | string | `"logging-es-elastic-user"` | |
| fluentd.fileConfigs."00_system.conf" | string | `"<system>\n root_dir /fluentd/log\n log_level info\n ignore_repeated_log_interval 60s\n ignore_same_log_interval 60s\n workers 1\n</system>"` | |
| fluentd.fileConfigs."01_sources.conf" | string | `"<source>\n @type http\n @label @KUBERNETES\n port 9880\n bind 0.0.0.0\n keepalive_timeout 30\n</source>\n\n<source>\n @type forward\n @label @KUBERNETES\n port 24224\n bind 0.0.0.0\n # skip_invalid_event true\n send_keepalive_packet true\n <security>\n self_hostname \"#{ENV['HOSTNAME']}\"\n shared_key {{ .Values.shared_key }}\n </security>\n</source>"` | |
| fluentd.fileConfigs."02_filters.conf" | string | `"<label @KUBERNETES>\n # prevent log feedback loops eg. ES has issues etc.\n # discard logs from our own pods\n <match kube.logging.fluentd>\n @type relabel\n @label @FLUENT_LOG\n </match>\n\n # Exclude current fluent-bit multiline noise\n <filter kube.logging.fluent-bit>\n @type grep\n <exclude>\n key log\n pattern /could not append content to multiline context/\n </exclude>\n </filter>\n\n # Generate Hash ID to break endless loop for already ingested events during retries\n <filter **>\n @type elasticsearch_genid\n use_entire_record true\n </filter>\n\n # Route through DISPATCH for Prometheus metrics\n <match **>\n @type relabel\n @label @DISPATCH\n </match>\n</label>"` | |
| fluentd.fileConfigs."04_outputs.conf" | string | `"<label @OUTPUT>\n <match **>\n @id out_es\n @type elasticsearch\n # @log_level debug\n include_tag_key true\n\n id_key _hash\n remove_keys _hash\n write_operation create\n\n # KubeZero pipeline incl. GeoIP etc.\n pipeline fluentd\n\n hosts \"{{ .Values.output.host }}\"\n port 9200\n scheme http\n user elastic\n password \"#{ENV['OUTPUT_PASSWORD']}\"\n\n log_es_400_reason\n logstash_format true\n reconnect_on_error true\n reload_on_failure true\n request_timeout 300s\n slow_flush_log_threshold 55.0\n\n #with_transporter_log true\n\n verify_es_version_at_startup false\n default_elasticsearch_version 7\n suppress_type_name true\n\n # Retry failed bulk requests\n # https://github.com/uken/fluent-plugin-elasticsearch#unrecoverable-error-types\n unrecoverable_error_types [\"out_of_memory_error\"]\n bulk_message_request_threshold 1048576\n\n <buffer>\n @type file\n\n flush_mode interval\n flush_thread_count 2\n flush_interval 10s\n\n chunk_limit_size 2MB\n total_limit_size 1GB\n\n flush_at_shutdown true\n retry_type exponential_backoff\n retry_timeout 6h\n overflow_action drop_oldest_chunk\n disable_chunk_backup true\n </buffer>\n </match>\n</label>"` | |
| fluentd.image.repository | string | `"public.ecr.aws/zero-downtime/fluentd-concenter"` | |
| fluentd.image.tag | string | `"v1.16.0"` | |
| fluentd.image.tag | string | `"v1.16.3"` | |
| fluentd.istio.enabled | bool | `false` | |
| fluentd.kind | string | `"Deployment"` | |
| fluentd.metrics.serviceMonitor.additionalLabels.release | string | `"metrics"` | |

View File

@ -1,9 +1,9 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: "Updated Fluent Bit OCI image to v2.2.0."
description: "Updated _Fluent Bit_ OCI image to [v3.0.0](https://github.com/fluent/fluent-bit/releases/tag/v3.0.0)."
apiVersion: v1
appVersion: 2.2.0
appVersion: 3.0.1
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems.
home: https://fluentbit.io/
@ -24,4 +24,4 @@ maintainers:
name: fluent-bit
sources:
- https://github.com/fluent/fluent-bit/
version: 0.40.0
version: 0.46.0

View File

@ -1,3 +1,6 @@
testFramework:
enabled: true
logLevel: debug
dashboards:

View File

@ -14,7 +14,9 @@ metadata:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- range $key, $value := . }}
{{ printf "%s: %s" $key ((tpl $value $) | quote) }}
{{- end }}
{{- end }}
spec:
{{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }}

View File

@ -17,6 +17,11 @@ spec:
{{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if (eq .Values.kind "DaemonSet") }}
{{- with .Values.service.internalTrafficPolicy }}
internalTrafficPolicy: {{ . }}
{{- end }}
{{- end }}
{{- if (eq .Values.service.type "LoadBalancer")}}
{{- with .Values.service.loadBalancerClass}}
loadBalancerClass: {{ . }}

View File

@ -13,7 +13,7 @@ spec:
jobLabel: app.kubernetes.io/instance
endpoints:
- port: http
path: /api/v1/metrics/prometheus
path: {{ default "/api/v2/metrics/prometheus" .Values.serviceMonitor.path }}
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}

View File

@ -5,16 +5,19 @@ metadata:
name: "{{ include "fluent-bit.fullname" . }}-test-connection"
namespace: {{ default .Release.Namespace .Values.testFramework.namespace }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
helm.sh/chart: {{ include "fluent-bit.chart" . }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
annotations:
"helm.sh/hook": test-success
helm.sh/hook: test
helm.sh/hook-delete-policy: hook-succeeded
spec:
containers:
- name: wget
image: {{ include "fluent-bit.image" .Values.testFramework.image | quote }}
imagePullPolicy: {{ .Values.testFramework.image.pullPolicy }}
command: ['wget']
args: ['{{ include "fluent-bit.fullname" . }}:{{ .Values.service.port }}']
command: ["sh"]
args: ["-c", "wget -O- {{ include "fluent-bit.fullname" . }}:{{ .Values.service.port }}"]
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 4 }}

View File

@ -12,7 +12,7 @@ image:
# Set to "-" to not use the default value
tag:
digest:
pullPolicy: Always
pullPolicy: IfNotPresent
testFramework:
enabled: true
@ -91,6 +91,7 @@ securityContext: {}
service:
type: ClusterIP
port: 2020
internalTrafficPolicy:
loadBalancerClass:
loadBalancerSourceRanges: []
labels: {}
@ -128,7 +129,7 @@ serviceMonitor:
# scheme: ""
# tlsConfig: {}
## Beare in mind if youn want to collec metrics from a different port
## Bear in mind if you want to collect metrics from a different port
## you will need to configure the new ports on the extraPorts property.
additionalEndpoints: []
# - port: metrics
@ -418,7 +419,7 @@ config:
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
# This allows adding more files with arbitary filenames to /fluent-bit/etc/conf by providing key/value pairs.
# This allows adding more files with arbitrary filenames to /fluent-bit/etc/conf by providing key/value pairs.
# The key becomes the filename, the value becomes the file content.
extraFiles: {}
# upstream.conf: |

View File

@ -12,4 +12,4 @@ name: fluentd
sources:
- https://github.com/fluent/fluentd/
- https://github.com/fluent/fluentd-kubernetes-daemonset
version: 0.5.0
version: 0.5.2

View File

@ -90,3 +90,15 @@ Name of the configMap used for additional configuration files; allows users to o
{{ printf "%s-%s" "fluentd-config" ( include "fluentd.shortReleaseName" . ) }}
{{- end -}}
{{- end -}}
{{/*
HPA ApiVersion according k8s version
Check legacy first so helm template / kustomize will default to latest version
*/}}
{{- define "fluentd.hpa.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "autoscaling/v2beta2") (semverCompare "<1.23-0" .Capabilities.KubeVersion.GitVersion) -}}
autoscaling/v2beta2
{{- else -}}
autoscaling/v2
{{- end -}}
{{- end -}}

View File

@ -1,5 +1,5 @@
{{- if and ( eq .Values.kind "Deployment" ) .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta2
apiVersion: {{ include "fluentd.hpa.apiVersion" . }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "fluentd.fullname" . }}

View File

@ -1,4 +1,11 @@
{{/*
Target the very simple case where
fluentd is deployed with the default values
If the fluentd config is overriden and the metrics server removed
this will fail.
*/}}
{{- if .Values.testFramework.enabled }}
{{ if empty .Values.service.ports }}
apiVersion: v1
kind: Pod
metadata:
@ -11,7 +18,14 @@ spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "fluentd.fullname" . }}:{{ .Values.service.port }}']
command:
- sh
- -c
- |
set -e
# Give fluentd some time to start up
while :; do nc -vz {{ include "fluentd.fullname" . }}:24231 && break; sleep 1; done
wget '{{ include "fluentd.fullname" . }}:24231/metrics'
restartPolicy: Never
{{ end }}
{{- end }}

View File

@ -321,6 +321,14 @@ fileConfigs:
emit_unmatched_lines true
</source>
# expose metrics in prometheus format
<source>
@type prometheus
bind 0.0.0.0
port 24231
metrics_path /metrics
</source>
02_filters.conf: |-
<label @KUBERNETES>
<match kubernetes.var.log.containers.fluentd**>
@ -378,6 +386,8 @@ fileConfigs:
path ""
user elastic
password changeme
# Don't wait for elastic to start up.
verify_es_version_at_startup false
</match>
</label>

View File

@ -1,32 +1,38 @@
diff -tubrN charts/fluentd/templates/fluentd-configurations-cm.yaml charts/fluentd.zdt/templates/fluentd-configurations-cm.yaml
--- charts/fluentd/templates/fluentd-configurations-cm.yaml 2021-02-12 18:13:04.000000000 +0100
+++ charts/fluentd.zdt/templates/fluentd-configurations-cm.yaml 2021-03-09 17:54:34.904992401 +0100
@@ -7,7 +7,7 @@
diff -rtuN charts/fluentd.orig/templates/fluentd-configurations-cm.yaml charts/fluentd/templates/fluentd-configurations-cm.yaml
--- charts/fluentd.orig/templates/fluentd-configurations-cm.yaml 2024-04-08 11:00:03.030515998 +0000
+++ charts/fluentd/templates/fluentd-configurations-cm.yaml 2024-04-08 11:00:03.040516045 +0000
@@ -9,7 +9,7 @@
data:
{{- range $key, $value := .Values.fileConfigs }}
{{$key }}: |-
- {{- $value | nindent 4 }}
+ {{- (tpl $value $) | nindent 4 }}
{{- end }}
{{- end }}
---
diff -tubrN charts/fluentd/templates/tests/test-connection.yaml charts/fluentd.zdt/templates/tests/test-connection.yaml
--- charts/fluentd/templates/tests/test-connection.yaml 2021-02-12 18:13:04.000000000 +0100
+++ charts/fluentd.zdt/templates/tests/test-connection.yaml 2021-03-09 17:54:34.904992401 +0100
@@ -1,3 +1,4 @@
diff -rtuN charts/fluentd.orig/templates/tests/test-connection.yaml charts/fluentd/templates/tests/test-connection.yaml
--- charts/fluentd.orig/templates/tests/test-connection.yaml 2024-04-08 11:00:03.030515998 +0000
+++ charts/fluentd/templates/tests/test-connection.yaml 2024-04-08 11:03:16.254774985 +0000
@@ -4,6 +4,7 @@
If the fluentd config is overriden and the metrics server removed
this will fail.
*/}}
+{{- if .Values.testFramework.enabled }}
{{ if empty .Values.service.ports }}
apiVersion: v1
kind: Pod
metadata:
@@ -13,3 +14,4 @@
command: ['wget']
args: ['{{ include "fluentd.fullname" . }}:{{ .Values.service.port }}']
@@ -26,4 +27,5 @@
while :; do nc -vz {{ include "fluentd.fullname" . }}:24231 && break; sleep 1; done
wget '{{ include "fluentd.fullname" . }}:24231/metrics'
restartPolicy: Never
-{{ end }}
\ No newline at end of file
+{{ end }}
+{{- end }}
diff -tubrN charts/fluentd/values.yaml charts/fluentd.zdt/values.yaml
--- charts/fluentd/values.yaml 2021-02-12 18:13:04.000000000 +0100
+++ charts/fluentd.zdt/values.yaml 2021-03-09 17:54:34.908325735 +0100
@@ -12,6 +12,9 @@
diff -rtuN charts/fluentd.orig/values.yaml charts/fluentd/values.yaml
--- charts/fluentd.orig/values.yaml 2024-04-08 11:00:03.030515998 +0000
+++ charts/fluentd/values.yaml 2024-04-08 11:00:03.040516045 +0000
@@ -13,6 +13,9 @@
pullPolicy: "IfNotPresent"
tag: ""

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-metrics
description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
type: application
version: 0.9.5
version: 0.9.6
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -19,14 +19,14 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: kube-prometheus-stack
version: 54.2.2
version: 57.2.0
repository: https://prometheus-community.github.io/helm-charts
- name: prometheus-adapter
version: 4.9.0
version: 4.9.1
repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-adapter.enabled
- name: prometheus-pushgateway
version: 2.4.2
version: 2.8.0
repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-pushgateway.enabled
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-metrics
![Version: 0.9.5](https://img.shields.io/badge/Version-0.9.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.9.6](https://img.shields.io/badge/Version-0.9.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
@ -19,9 +19,9 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://prometheus-community.github.io/helm-charts | kube-prometheus-stack | 54.2.2 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 4.9.0 |
| https://prometheus-community.github.io/helm-charts | prometheus-pushgateway | 2.4.2 |
| https://prometheus-community.github.io/helm-charts | kube-prometheus-stack | 57.2.0 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 4.9.1 |
| https://prometheus-community.github.io/helm-charts | prometheus-pushgateway | 2.8.0 |
## Values
@ -177,29 +177,30 @@ Kubernetes: `>= 1.26.0`
| kube-prometheus-stack.prometheusOperator.enabled | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.logFormat | string | `"json"` | |
| kube-prometheus-stack.prometheusOperator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| kube-prometheus-stack.prometheusOperator.resources.limits.memory | string | `"64Mi"` | |
| kube-prometheus-stack.prometheusOperator.resources.requests.cpu | string | `"20m"` | |
| kube-prometheus-stack.prometheusOperator.resources.requests.memory | string | `"32Mi"` | |
| kube-prometheus-stack.prometheusOperator.resources.limits.memory | string | `"128Mi"` | |
| kube-prometheus-stack.prometheusOperator.resources.requests.cpu | string | `"10m"` | |
| kube-prometheus-stack.prometheusOperator.resources.requests.memory | string | `"64Mi"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| prometheus-adapter.enabled | bool | `true` | |
| prometheus-adapter.logLevel | int | `1` | |
| prometheus-adapter.metricsRelistInterval | string | `"3m"` | |
| prometheus-adapter.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| prometheus-adapter.prometheus.url | string | `"http://metrics-kube-prometheus-st-prometheus"` | |
| prometheus-adapter.rules.default | bool | `false` | |
| prometheus-adapter.rules.resource.cpu.containerLabel | string | `"container"` | |
| prometheus-adapter.rules.resource.cpu.containerQuery | string | `"sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!=\"POD\",container!=\"\",pod!=\"\"}[5m])) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.cpu.nodeQuery | string | `"sum(1 - irate(node_cpu_seconds_total{mode=\"idle\"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.cpu.containerQuery | string | `"sum by (<<.GroupBy>>) (\n irate (\n container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!=\"\",pod!=\"\"}[120s]\n )\n)\n"` | |
| prometheus-adapter.rules.resource.cpu.nodeQuery | string | `"sum(1 - irate(node_cpu_seconds_total{<<.LabelMatchers>>, mode=\"idle\"}[120s])) by (<<.GroupBy>>)\n"` | |
| prometheus-adapter.rules.resource.cpu.resources.overrides.instance.resource | string | `"node"` | |
| prometheus-adapter.rules.resource.cpu.resources.overrides.namespace.resource | string | `"namespace"` | |
| prometheus-adapter.rules.resource.cpu.resources.overrides.node.resource | string | `"node"` | |
| prometheus-adapter.rules.resource.cpu.resources.overrides.pod.resource | string | `"pod"` | |
| prometheus-adapter.rules.resource.memory.containerLabel | string | `"container"` | |
| prometheus-adapter.rules.resource.memory.containerQuery | string | `"sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container!=\"POD\",container!=\"\",pod!=\"\"}) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.memory.nodeQuery | string | `"sum(node_memory_MemTotal_bytes{job=\"node-exporter\",<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{job=\"node-exporter\",<<.LabelMatchers>>}) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.memory.containerQuery | string | `"sum by (<<.GroupBy>>) (\n container_memory_working_set_bytes{<<.LabelMatchers>>,container!=\"\",pod!=\"\",container!=\"POD\"}\n)\n"` | |
| prometheus-adapter.rules.resource.memory.nodeQuery | string | `"sum(node_memory_MemTotal_bytes{<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>)\n"` | |
| prometheus-adapter.rules.resource.memory.resources.overrides.instance.resource | string | `"node"` | |
| prometheus-adapter.rules.resource.memory.resources.overrides.namespace.resource | string | `"namespace"` | |
| prometheus-adapter.rules.resource.memory.resources.overrides.node.resource | string | `"node"` | |
| prometheus-adapter.rules.resource.memory.resources.overrides.pod.resource | string | `"pod"` | |
| prometheus-adapter.rules.resource.window | string | `"5m"` | |
| prometheus-adapter.rules.resource.window | string | `"2m"` | |
| prometheus-adapter.tolerations[0].effect | string | `"NoSchedule"` | |
| prometheus-adapter.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| prometheus-pushgateway.enabled | bool | `false` | |

View File

@ -0,0 +1,5 @@
root = true
[files/dashboards/*.json]
indent_size = 2
indent_style = space

View File

@ -26,3 +26,4 @@ ci/
kube-prometheus-*.tgz
unittests/
files/dashboards/

View File

@ -7,7 +7,7 @@ annotations:
url: https://github.com/prometheus-operator/kube-prometheus
artifacthub.io/operator: "true"
apiVersion: v2
appVersion: v0.69.1
appVersion: v0.72.0
dependencies:
- condition: crds.enabled
name: crds
@ -16,19 +16,19 @@ dependencies:
- condition: kubeStateMetrics.enabled
name: kube-state-metrics
repository: https://prometheus-community.github.io/helm-charts
version: 5.15.*
version: 5.18.*
- condition: nodeExporter.enabled
name: prometheus-node-exporter
repository: https://prometheus-community.github.io/helm-charts
version: 4.24.*
version: 4.32.*
- condition: grafana.enabled
name: grafana
repository: https://grafana.github.io/helm-charts
version: 7.0.*
version: 7.3.*
- condition: windowsMonitoring.enabled
name: prometheus-windows-exporter
repository: https://prometheus-community.github.io/helm-charts
version: 0.1.*
version: 0.3.*
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
and Prometheus rules combined with documentation and scripts to provide easy to
operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
@ -49,6 +49,8 @@ maintainers:
name: gkarthiks
- email: kube-prometheus-stack@sisti.pt
name: GMartinez-Sisti
- email: github@jkroepke.de
name: jkroepke
- email: scott@r6by.com
name: scottrigby
- email: miroslav.hadzhiev@gmail.com
@ -60,4 +62,4 @@ sources:
- https://github.com/prometheus-community/helm-charts
- https://github.com/prometheus-operator/kube-prometheus
type: application
version: 54.2.2
version: 57.2.0

View File

@ -82,6 +82,63 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
### From 56.x to 57.x
This version upgrades Prometheus-Operator to v0.72.0
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
### From 55.x to 56.x
This version upgrades Prometheus-Operator to v0.71.0, Prometheus to 2.49.1
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.71.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
### From 54.x to 55.x
This version upgrades Prometheus-Operator to v0.70.0
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.70.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
### From 53.x to 54.x
Grafana Helm Chart has bumped to version 7

View File

@ -1,133 +1,112 @@
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.69.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.72.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.11.1
operator.prometheus.io/version: 0.69.1
controller-gen.kubebuilder.io/version: v0.13.0
operator.prometheus.io/version: 0.72.0
argocd.argoproj.io/sync-options: ServerSideApply=true
creationTimestamp: null
name: prometheusrules.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
categories:
- prometheus-operator
- prometheus-operator
kind: PrometheusRule
listKind: PrometheusRuleList
plural: prometheusrules
shortNames:
- promrule
- promrule
singular: prometheusrule
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: PrometheusRule defines recording and alerting rules for a Prometheus
instance
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired alerting rule definitions for Prometheus.
properties:
groups:
description: Content of Prometheus rule file
items:
description: RuleGroup is a list of sequentially evaluated recording
and alerting rules.
properties:
interval:
description: Interval determines how often rules in the group
are evaluated.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
limit:
description: Limit the number of alerts an alerting rule and
series a recording rule can produce. Limit is supported starting
with Prometheus >= 2.31 and Thanos Ruler >= 0.24.
type: integer
name:
description: Name of the rule group.
minLength: 1
type: string
partial_response_strategy:
description: 'PartialResponseStrategy is only used by ThanosRuler
and will be ignored by Prometheus instances. More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response'
pattern: ^(?i)(abort|warn)?$
type: string
rules:
description: List of alerting and recording rules.
items:
description: 'Rule describes an alerting or recording rule
See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/)
or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules)
rule'
properties:
alert:
description: Name of the alert. Must be a valid label
value. Only one of `record` and `alert` must be set.
type: string
annotations:
additionalProperties:
- name: v1
schema:
openAPIV3Schema:
description: PrometheusRule defines recording and alerting rules for a Prometheus instance
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of desired alerting rule definitions for Prometheus.
properties:
groups:
description: Content of Prometheus rule file
items:
description: RuleGroup is a list of sequentially evaluated recording and alerting rules.
properties:
interval:
description: Interval determines how often rules in the group are evaluated.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
limit:
description: Limit the number of alerts an alerting rule and series a recording rule can produce. Limit is supported starting with Prometheus >= 2.31 and Thanos Ruler >= 0.24.
type: integer
name:
description: Name of the rule group.
minLength: 1
type: string
partial_response_strategy:
description: 'PartialResponseStrategy is only used by ThanosRuler and will be ignored by Prometheus instances. More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response'
pattern: ^(?i)(abort|warn)?$
type: string
rules:
description: List of alerting and recording rules.
items:
description: 'Rule describes an alerting or recording rule See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) rule'
properties:
alert:
description: Name of the alert. Must be a valid label value. Only one of `record` and `alert` must be set.
type: string
description: Annotations to add to each alert. Only valid
for alerting rules.
type: object
expr:
anyOf:
- type: integer
- type: string
description: PromQL expression to evaluate.
x-kubernetes-int-or-string: true
for:
description: Alerts are considered firing once they have
been returned for this long.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
keep_firing_for:
description: KeepFiringFor defines how long an alert will
continue firing after the condition that triggered it
has cleared.
minLength: 1
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
labels:
additionalProperties:
annotations:
additionalProperties:
type: string
description: Annotations to add to each alert. Only valid for alerting rules.
type: object
expr:
anyOf:
- type: integer
- type: string
description: PromQL expression to evaluate.
x-kubernetes-int-or-string: true
for:
description: Alerts are considered firing once they have been returned for this long.
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
description: Labels to add or overwrite.
type: object
record:
description: Name of the time series to output to. Must
be a valid metric name. Only one of `record` and `alert`
must be set.
type: string
required:
- expr
type: object
type: array
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
type: object
required:
- spec
type: object
served: true
storage: true
keep_firing_for:
description: KeepFiringFor defines how long an alert will continue firing after the condition that triggered it has cleared.
minLength: 1
pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
type: string
labels:
additionalProperties:
type: string
description: Labels to add or overwrite.
type: object
record:
description: Name of the time series to output to. Must be a valid metric name. Only one of `record` and `alert` must be set.
type: string
required:
- expr
type: object
type: array
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
type: object
required:
- spec
type: object
served: true
storage: true

View File

@ -1,15 +1,15 @@
annotations:
artifacthub.io/license: AGPL-3.0-only
artifacthub.io/license: Apache-2.0
artifacthub.io/links: |
- name: Chart Source
url: https://github.com/grafana/helm-charts
- name: Upstream Project
url: https://github.com/grafana/grafana
apiVersion: v2
appVersion: 10.1.5
appVersion: 10.4.0
description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.net
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
home: https://grafana.com
icon: https://artifacthub.io/image/b4fed1a7-6c8f-4945-b99d-096efa3e4116
keywords:
- monitoring
- metric
@ -30,4 +30,4 @@ sources:
- https://github.com/grafana/grafana
- https://github.com/grafana/helm-charts
type: application
version: 7.0.8
version: 7.3.7

View File

@ -48,7 +48,7 @@ This version requires Helm >= 3.1.0.
### To 7.0.0
For consistency with other Helm charts, the `global.image.registry` parameter was renamed
For consistency with other Helm charts, the `global.image.registry` parameter was renamed
to `global.imageRegistry`. If you were not previously setting `global.image.registry`, no action
is required on upgrade. If you were previously setting `global.image.registry`, you will
need to instead set `global.imageRegistry`.
@ -136,6 +136,8 @@ need to instead set `global.imageRegistry`.
| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` |
| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` |
| `extraVolumes` | Additional Grafana server volumes | `[]` |
| `automountServiceAccountToken` | Mounted the service account token on the grafana pod. Mandatory, if sidecars are enabled | `true` |
| `createConfigmap` | Enable creating the grafana configmap | `true` |
| `extraConfigmapMounts` | Additional grafana server configMap volume mounts (values are templated) | `[]` |
| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` |
@ -160,7 +162,7 @@ need to instead set `global.imageRegistry`.
| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` |
| `sidecar.image.registry` | Sidecar image registry | `quay.io` |
| `sidecar.image.repository` | Sidecar image repository | `kiwigrid/k8s-sidecar` |
| `sidecar.image.tag` | Sidecar image tag | `1.24.6` |
| `sidecar.image.tag` | Sidecar image tag | `1.26.0` |
| `sidecar.image.sha` | Sidecar image sha (optional) | `""` |
| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` |
| `sidecar.resources` | Sidecar resources | `{}` |
@ -174,7 +176,7 @@ need to instead set `global.imageRegistry`.
| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` |
| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
| `sidecar.alerts.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` |
| `sidecar.alerts.initAlerts` | Set to true to deploy the alerts sidecar as an initContainer. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` |
| `sidecar.alerts.extraMounts` | Additional alerts sidecar volume mounts. | `[]` |
| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` |
@ -222,7 +224,7 @@ need to instead set `global.imageRegistry`.
| `admin.existingSecret` | The name of an existing secret containing the admin credentials (can be templated). | `""` |
| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` |
| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` |
| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` |
| `serviceAccount.automountServiceAccountToken` | Automount the service account token on all pods where is service account is used | `false` |
| `serviceAccount.annotations` | ServiceAccount annotations | |
| `serviceAccount.create` | Create service account | `true` |
| `serviceAccount.labels` | ServiceAccount labels | `{}` |
@ -315,24 +317,35 @@ ingress:
path: "/grafana"
```
### Example of extraVolumeMounts
### Example of extraVolumeMounts and extraVolumes
Volume can be type persistentVolumeClaim or hostPath but not both at same time.
If neither existingClaim or hostPath argument is given then type is emptyDir.
Configure additional volumes with `extraVolumes` and volume mounts with `extraVolumeMounts`.
Example for `extraVolumeMounts` and corresponding `extraVolumes`:
```yaml
- extraVolumeMounts:
extraVolumeMounts:
- name: plugins
mountPath: /var/lib/grafana/plugins
subPath: configs/grafana/plugins
existingClaim: existing-grafana-claim
readOnly: false
- name: dashboards
mountPath: /var/lib/grafana/dashboards
hostPath: /usr/shared/grafana/dashboards
readOnly: false
extraVolumes:
- name: plugins
existingClaim: existing-grafana-claim
- name: dashboards
hostPath: /usr/shared/grafana/dashboards
```
Volumes default to `emptyDir`. Set to `persistentVolumeClaim`,
`hostPath`, `csi`, or `configMap` for other types. For a
`persistentVolumeClaim`, specify an existing claim name with
`existingClaim`.
## Import dashboards
There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method:
@ -544,9 +557,61 @@ delete_notifiers:
# default org_id: 1
```
## Provision alert rules, contact points, notification policies and notification templates
## Sidecar for alerting resources
There are two methods to provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method:
If the parameter `sidecar.alerts.enabled` is set, a sidecar container is deployed in the grafana
pod. This container watches all configmaps (or secrets) in the cluster (namespace defined by `sidecar.alerts.searchNamespace`) and filters out the ones with
a label as defined in `sidecar.alerts.label` (default is `grafana_alert`). The files defined in those configmaps are written
to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported alerting resources are updated, however, deletions are a little more complicated (see below).
This sidecar can be used to provision alert rules, contact points, notification policies, notification templates and mute timings as shown in [Grafana Documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/).
To fetch the alert config which will be provisioned, use the alert provisioning API ([Grafana Documentation](https://grafana.com/docs/grafana/next/developers/http_api/alerting_provisioning/)).
You can use either JSON or YAML format.
Example config for an alert rule:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: sample-grafana-alert
labels:
grafana_alert: "1"
data:
k8s-alert.yml: |-
apiVersion: 1
groups:
- orgId: 1
name: k8s-alert
[...]
```
To delete provisioned alert rules is a two step process, you need to delete the configmap which defined the alert rule
and then create a configuration which deletes the alert rule.
Example deletion configuration:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: delete-sample-grafana-alert
namespace: monitoring
labels:
grafana_alert: "1"
data:
delete-k8s-alert.yml: |-
apiVersion: 1
deleteRules:
- orgId: 1
uid: 16624780-6564-45dc-825c-8bded4ad92d3
```
## Statically provision alerting resources
If you don't need to change alerting resources (alert rules, contact points, notification policies and notification templates) regularly you could use the `alerting` config option instead of the sidecar option above.
This will grab the alerting config and apply it statically at build time for the helm file.
There are two methods to statically provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method:
```yaml
alerting:
@ -576,13 +641,14 @@ alerting:
title: '{{ `{{ template "default.title" . }}` }}'
```
There are two possibilities:
The two possibilities for static alerting resource provisioning are:
* Inlining the file contents as described in the example `values.yaml` and the official [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/).
* Importing a file using a relative path starting from the chart root directory.
* Inlining the file contents as shown for contact points in the above example.
* Importing a file using a relative path starting from the chart root directory as shown for the alert rules in the above example.
### Important notes on file provisioning
* The format of the files is defined in the [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/) on file provisioning.
* The chart supports importing YAML and JSON files.
* The filename must be unique, otherwise one volume mount will overwrite the other.
* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped.

View File

@ -0,0 +1,171 @@
{{/*
Generate config map data
*/}}
{{- define "grafana.configData" -}}
{{ include "grafana.assertNoLeakedSecrets" . }}
{{- $files := .Files }}
{{- $root := . -}}
{{- with .Values.plugins }}
plugins: {{ join "," . }}
{{- end }}
grafana.ini: |
{{- range $elem, $elemVal := index .Values "grafana.ini" }}
{{- if not (kindIs "map" $elemVal) }}
{{- if kindIs "invalid" $elemVal }}
{{ $elem }} =
{{- else if kindIs "string" $elemVal }}
{{ $elem }} = {{ tpl $elemVal $ }}
{{- else }}
{{ $elem }} = {{ $elemVal }}
{{- end }}
{{- end }}
{{- end }}
{{- range $key, $value := index .Values "grafana.ini" }}
{{- if kindIs "map" $value }}
[{{ $key }}]
{{- range $elem, $elemVal := $value }}
{{- if kindIs "invalid" $elemVal }}
{{ $elem }} =
{{- else if kindIs "string" $elemVal }}
{{ $elem }} = {{ tpl $elemVal $ }}
{{- else }}
{{ $elem }} = {{ $elemVal }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.datasources }}
{{- if not (hasKey $value "secret") }}
{{ $key }}: |
{{- tpl (toYaml $value | nindent 2) $root }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.notifiers }}
{{- if not (hasKey $value "secret") }}
{{ $key }}: |
{{- toYaml $value | nindent 2 }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.alerting }}
{{- if (hasKey $value "file") }}
{{ $key }}:
{{- toYaml ( $files.Get $value.file ) | nindent 2 }}
{{- else if (or (hasKey $value "secret") (hasKey $value "secretFile"))}}
{{/* will be stored inside secret generated by "configSecret.yaml"*/}}
{{- else }}
{{ $key }}: |
{{- tpl (toYaml $value | nindent 2) $root }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.dashboardProviders }}
{{ $key }}: |
{{- toYaml $value | nindent 2 }}
{{- end }}
{{- if .Values.dashboards }}
download_dashboards.sh: |
#!/usr/bin/env sh
set -euf
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{- range $value.providers }}
mkdir -p {{ .options.path }}
{{- end }}
{{- end }}
{{- end }}
{{ $dashboardProviders := .Values.dashboardProviders }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
curl -skf \
--connect-timeout 60 \
--max-time 60 \
{{- if not $value.b64content }}
{{- if not $value.acceptHeader }}
-H "Accept: application/json" \
{{- else }}
-H "Accept: {{ $value.acceptHeader }}" \
{{- end }}
{{- if $value.token }}
-H "Authorization: token {{ $value.token }}" \
{{- end }}
{{- if $value.bearerToken }}
-H "Authorization: Bearer {{ $value.bearerToken }}" \
{{- end }}
{{- if $value.basic }}
-H "Authorization: Basic {{ $value.basic }}" \
{{- end }}
{{- if $value.gitlabToken }}
-H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \
{{- end }}
-H "Content-Type: application/json;charset=UTF-8" \
{{- end }}
{{- $dpPath := "" -}}
{{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }}
{{- if eq $kd.name $provider }}
{{- $dpPath = $kd.options.path }}
{{- end }}
{{- end }}
{{- if $value.url }}
"{{ $value.url }}" \
{{- else }}
"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \
{{- end }}
{{- if $value.datasource }}
{{- if kindIs "string" $value.datasource }}
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \
{{- end }}
{{- if kindIs "slice" $value.datasource }}
{{- range $value.datasource }}
| sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \
{{- end }}
{{- end }}
{{- end }}
{{- if $value.b64content }}
| base64 -d \
{{- end }}
> "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json"
{{ end }}
{{- end }}
{{- end }}
{{- end }}
{{- end -}}
{{/*
Generate dashboard json config map data
*/}}
{{- define "grafana.configDashboardProviderData" -}}
provider.yaml: |-
apiVersion: 1
providers:
- name: '{{ .Values.sidecar.dashboards.provider.name }}'
orgId: {{ .Values.sidecar.dashboards.provider.orgid }}
{{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
folder: '{{ .Values.sidecar.dashboards.provider.folder }}'
{{- end }}
type: {{ .Values.sidecar.dashboards.provider.type }}
disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }}
allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }}
updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }}
options:
foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}
{{- end -}}
{{- define "grafana.secretsData" -}}
{{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }}
admin-user: {{ .Values.adminUser | b64enc | quote }}
{{- if .Values.adminPassword }}
admin-password: {{ .Values.adminPassword | b64enc | quote }}
{{- else }}
admin-password: {{ include "grafana.password" . }}
{{- end }}
{{- end }}
{{- if not .Values.ldap.existingSecret }}
ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }}
{{- end }}
{{- end -}}

View File

@ -225,3 +225,54 @@ Formats imagePullSecrets. Input is (dict "root" . "imagePullSecrets" .{specific
{{- end }}
{{- $secretFound}}
{{- end -}}
{{/*
Checks whether the user is attempting to store secrets in plaintext
in the grafana.ini configmap
*/}}
{{/* grafana.assertNoLeakedSecrets checks for sensitive keys in values */}}
{{- define "grafana.assertNoLeakedSecrets" -}}
{{- $sensitiveKeysYaml := `
sensitiveKeys:
- path: ["database", "password"]
- path: ["smtp", "password"]
- path: ["security", "secret_key"]
- path: ["security", "admin_password"]
- path: ["auth.basic", "password"]
- path: ["auth.ldap", "bind_password"]
- path: ["auth.google", "client_secret"]
- path: ["auth.github", "client_secret"]
- path: ["auth.gitlab", "client_secret"]
- path: ["auth.generic_oauth", "client_secret"]
- path: ["auth.okta", "client_secret"]
- path: ["auth.azuread", "client_secret"]
- path: ["auth.grafana_com", "client_secret"]
- path: ["auth.grafananet", "client_secret"]
- path: ["azure", "user_identity_client_secret"]
- path: ["unified_alerting", "ha_redis_password"]
- path: ["metrics", "basic_auth_password"]
- path: ["external_image_storage.s3", "secret_key"]
- path: ["external_image_storage.webdav", "password"]
- path: ["external_image_storage.azure_blob", "account_key"]
` | fromYaml -}}
{{- if $.Values.assertNoLeakedSecrets -}}
{{- $grafanaIni := index .Values "grafana.ini" -}}
{{- range $_, $secret := $sensitiveKeysYaml.sensitiveKeys -}}
{{- $currentMap := $grafanaIni -}}
{{- $shouldContinue := true -}}
{{- range $index, $elem := $secret.path -}}
{{- if and $shouldContinue (hasKey $currentMap $elem) -}}
{{- if eq (len $secret.path) (add1 $index) -}}
{{- if not (regexMatch "\\$(?:__(?:env|file|vault))?{[^}]+}" (index $currentMap $elem)) -}}
{{- fail (printf "Sensitive key '%s' should not be defined explicitly in values. Use variable expansion instead. You can disable this client-side validation by changing the value of assertNoLeakedSecrets." (join "." $secret.path)) -}}
{{- end -}}
{{- else -}}
{{- $currentMap = index $currentMap $elem -}}
{{- end -}}
{{- else -}}
{{- $shouldContinue = false -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}

View File

@ -5,7 +5,7 @@
schedulerName: "{{ . }}"
{{- end }}
serviceAccountName: {{ include "grafana.serviceAccountName" . }}
automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 2 }}
@ -14,6 +14,13 @@ securityContext:
hostAliases:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- if .Values.dnsPolicy }}
dnsPolicy: {{ .Values.dnsPolicy }}
{{- end }}
{{- with .Values.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.priorityClassName }}
priorityClassName: {{ . }}
{{- end }}
@ -169,7 +176,7 @@ initContainers:
mountPath: "/etc/grafana/provisioning/alerting"
{{- with .Values.sidecar.alerts.extraMounts }}
{{- toYaml . | trim | nindent 6 }}
{{- end }}
{{- end }}
{{- end }}
{{- if and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources }}
- name: {{ include "grafana.name" . }}-init-sc-datasources
@ -411,7 +418,7 @@ containers:
mountPath: "/etc/grafana/provisioning/alerting"
{{- with .Values.sidecar.alerts.extraMounts }}
{{- toYaml . | trim | nindent 6 }}
{{- end }}
{{- end }}
{{- end}}
{{- if .Values.sidecar.dashboards.enabled }}
- name: {{ include "grafana.name" . }}-sc-dashboard
@ -427,6 +434,11 @@ containers:
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
{{- range $key, $value := .Values.sidecar.datasources.envValueFrom }}
- name: {{ $key | quote }}
valueFrom:
{{- tpl (toYaml $value) $ | nindent 10 }}
{{- end }}
{{- if .Values.sidecar.dashboards.ignoreAlreadyProcessed }}
- name: IGNORE_ALREADY_PROCESSED
value: "true"
@ -898,26 +910,47 @@ containers:
{{- end }}
{{- end }}
{{- with .Values.datasources }}
{{- $datasources := . }}
{{- range (keys . | sortAlpha) }}
{{- if (or (hasKey (index $datasources .) "secret")) }} {{/*check if current datasource should be handeled as secret */}}
- name: config-secret
mountPath: "/etc/grafana/provisioning/datasources/{{ . }}"
subPath: {{ . | quote }}
{{- else }}
- name: config
mountPath: "/etc/grafana/provisioning/datasources/{{ . }}"
subPath: {{ . | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.notifiers }}
{{- $notifiers := . }}
{{- range (keys . | sortAlpha) }}
{{- if (or (hasKey (index $notifiers .) "secret")) }} {{/*check if current notifier should be handeled as secret */}}
- name: config-secret
mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}"
subPath: {{ . | quote }}
{{- else }}
- name: config
mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}"
subPath: {{ . | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.alerting }}
{{- $alertingmap := .}}
{{- range (keys . | sortAlpha) }}
{{- if (or (hasKey (index $.Values.alerting .) "secret") (hasKey (index $.Values.alerting .) "secretFile")) }} {{/*check if current alerting entry should be handeled as secret */}}
- name: config-secret
mountPath: "/etc/grafana/provisioning/alerting/{{ . }}"
subPath: {{ . | quote }}
{{- else }}
- name: config
mountPath: "/etc/grafana/provisioning/alerting/{{ . }}"
subPath: {{ . | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.dashboardProviders }}
{{- range (keys . | sortAlpha) }}
- name: config
@ -1051,11 +1084,17 @@ containers:
- secretRef:
name: {{ tpl .name $ }}
optional: {{ .optional | default false }}
{{- if .prefix }}
prefix: {{ tpl .prefix $ }}
{{- end }}
{{- end }}
{{- range .Values.envFromConfigMaps }}
- configMapRef:
name: {{ tpl .name $ }}
optional: {{ .optional | default false }}
{{- if .prefix }}
prefix: {{ tpl .prefix $ }}
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.livenessProbe }}
@ -1097,6 +1136,12 @@ volumes:
- name: config
configMap:
name: {{ include "grafana.fullname" . }}
{{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}}
{{- if and .Values.createConfigmap $createConfigSecret }}
- name: config-secret
secret:
secretName: {{ include "grafana.fullname" . }}-config-secret
{{- end }}
{{- range .Values.extraConfigmapMounts }}
- name: {{ tpl .name $root }}
configMap:
@ -1230,10 +1275,13 @@ volumes:
{{ toYaml .hostPath | nindent 6 }}
{{- else if .csi }}
csi:
{{- toYaml .data | nindent 6 }}
{{- toYaml .csi | nindent 6 }}
{{- else if .configMap }}
configMap:
{{- toYaml .configMap | nindent 6 }}
{{- else if .emptyDir }}
emptyDir:
{{- toYaml .emptyDir | nindent 6 }}
{{- else }}
emptyDir: {}
{{- end }}
@ -1246,4 +1294,3 @@ volumes:
{{- tpl (toYaml .) $root | nindent 2 }}
{{- end }}
{{- end }}

View File

@ -25,13 +25,13 @@ stringData:
{{- range $key, $value := .Values.datasources }}
{{- if (hasKey $value "secret") }}
{{- $key | nindent 2 }}: |
{{- tpl (toYaml $value | nindent 4) $root }}
{{- tpl (toYaml $value.secret | nindent 4) $root }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.notifiers }}
{{- if (hasKey $value "secret") }}
{{- $key | nindent 2 }}: |
{{- tpl (toYaml $value | nindent 4) $root }}
{{- tpl (toYaml $value.secret | nindent 4) $root }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.alerting }}
@ -40,4 +40,4 @@ stringData:
{{- tpl (toYaml $value.secret | nindent 4) $root }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -11,19 +11,5 @@ metadata:
name: {{ include "grafana.fullname" . }}-config-dashboards
namespace: {{ include "grafana.namespace" . }}
data:
provider.yaml: |-
apiVersion: 1
providers:
- name: '{{ .Values.sidecar.dashboards.provider.name }}'
orgId: {{ .Values.sidecar.dashboards.provider.orgid }}
{{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
folder: '{{ .Values.sidecar.dashboards.provider.folder }}'
{{- end }}
type: {{ .Values.sidecar.dashboards.provider.type }}
disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }}
allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }}
updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }}
options:
foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }}
path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}
{{- include "grafana.configDashboardProviderData" . | nindent 2 }}
{{- end }}

View File

@ -1,6 +1,4 @@
{{- if .Values.createConfigmap }}
{{- $files := .Files }}
{{- $root := . -}}
apiVersion: v1
kind: ConfigMap
metadata:
@ -13,132 +11,5 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
data:
{{- with .Values.plugins }}
plugins: {{ join "," . }}
{{- end }}
grafana.ini: |
{{- range $elem, $elemVal := index .Values "grafana.ini" }}
{{- if not (kindIs "map" $elemVal) }}
{{- if kindIs "invalid" $elemVal }}
{{ $elem }} =
{{- else if kindIs "string" $elemVal }}
{{ $elem }} = {{ tpl $elemVal $ }}
{{- else }}
{{ $elem }} = {{ $elemVal }}
{{- end }}
{{- end }}
{{- end }}
{{- range $key, $value := index .Values "grafana.ini" }}
{{- if kindIs "map" $value }}
[{{ $key }}]
{{- range $elem, $elemVal := $value }}
{{- if kindIs "invalid" $elemVal }}
{{ $elem }} =
{{- else if kindIs "string" $elemVal }}
{{ $elem }} = {{ tpl $elemVal $ }}
{{- else }}
{{ $elem }} = {{ $elemVal }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.datasources }}
{{- if not (hasKey $value "secret") }}
{{- $key | nindent 2 }}: |
{{- tpl (toYaml $value | nindent 4) $root }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.notifiers }}
{{- if not (hasKey $value "secret") }}
{{- $key | nindent 2 }}: |
{{- toYaml $value | nindent 4 }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.alerting }}
{{- if (hasKey $value "file") }}
{{- $key | nindent 2 }}:
{{- toYaml ( $files.Get $value.file ) | nindent 4}}
{{- else if (or (hasKey $value "secret") (hasKey $value "secretFile"))}}
{{/* will be stored inside secret generated by "configSecret.yaml"*/}}
{{- else }}
{{- $key | nindent 2 }}: |
{{- tpl (toYaml $value | nindent 4) $root }}
{{- end }}
{{- end }}
{{- range $key, $value := .Values.dashboardProviders }}
{{- $key | nindent 2 }}: |
{{- toYaml $value | nindent 4 }}
{{- end }}
{{- if .Values.dashboards }}
download_dashboards.sh: |
#!/usr/bin/env sh
set -euf
{{- if .Values.dashboardProviders }}
{{- range $key, $value := .Values.dashboardProviders }}
{{- range $value.providers }}
mkdir -p {{ .options.path }}
{{- end }}
{{- end }}
{{- end }}
{{ $dashboardProviders := .Values.dashboardProviders }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
curl -skf \
--connect-timeout 60 \
--max-time 60 \
{{- if not $value.b64content }}
{{- if not $value.acceptHeader }}
-H "Accept: application/json" \
{{- else }}
-H "Accept: {{ $value.acceptHeader }}" \
{{- end }}
{{- if $value.token }}
-H "Authorization: token {{ $value.token }}" \
{{- end }}
{{- if $value.bearerToken }}
-H "Authorization: Bearer {{ $value.bearerToken }}" \
{{- end }}
{{- if $value.basic }}
-H "Authorization: Basic {{ $value.basic }}" \
{{- end }}
{{- if $value.gitlabToken }}
-H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \
{{- end }}
-H "Content-Type: application/json;charset=UTF-8" \
{{- end }}
{{- $dpPath := "" -}}
{{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }}
{{- if eq $kd.name $provider }}
{{- $dpPath = $kd.options.path }}
{{- end }}
{{- end }}
{{- if $value.url }}
"{{ $value.url }}" \
{{- else }}
"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \
{{- end }}
{{- if $value.datasource }}
{{- if kindIs "string" $value.datasource }}
| sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \
{{- end }}
{{- if kindIs "slice" $value.datasource }}
{{- range $value.datasource }}
| sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \
{{- end }}
{{- end }}
{{- end }}
{{- if $value.b64content }}
| base64 -d \
{{- end }}
> "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json"
{{ end }}
{{- end }}
{{- end }}
{{- end }}
{{- include "grafana.configData" . | nindent 2 }}
{{- end }}

View File

@ -33,14 +33,16 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/config: {{ include "grafana.configData" . | sha256sum }}
{{- if .Values.dashboards }}
checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }}
checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }}
{{- end }}
checksum/sc-dashboard-provider-config: {{ include "grafana.configDashboardProviderData" . | sha256sum }}
{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
checksum/secret: {{ include "grafana.secretsData" . | sha256sum }}
{{- end }}
{{- if .Values.envRenderSecret }}
checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }}
checksum/secret-env: {{ tpl (toYaml .Values.envRenderSecret) . | sha256sum }}
{{- end }}
kubectl.kubernetes.io/default-container: {{ .Chart.Name }}
{{- with .Values.podAnnotations }}

View File

@ -34,7 +34,7 @@ spec:
rules:
{{- if .Values.ingress.hosts }}
{{- range .Values.ingress.hosts }}
- host: {{ tpl . $ }}
- host: {{ tpl . $ | quote }}
http:
paths:
{{- with $extraPaths }}

View File

@ -12,15 +12,5 @@ metadata:
{{- end }}
type: Opaque
data:
{{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }}
admin-user: {{ .Values.adminUser | b64enc | quote }}
{{- if .Values.adminPassword }}
admin-password: {{ .Values.adminPassword | b64enc | quote }}
{{- else }}
admin-password: {{ include "grafana.password" . }}
{{- end }}
{{- end }}
{{- if not .Values.ldap.existingSecret }}
ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }}
{{- end }}
{{- include "grafana.secretsData" . | nindent 2 }}
{{- end }}

View File

@ -21,10 +21,13 @@ spec:
clusterIP: {{ . }}
{{- end }}
{{- else if eq .Values.service.type "LoadBalancer" }}
type: {{ .Values.service.type }}
type: LoadBalancer
{{- with .Values.service.loadBalancerIP }}
loadBalancerIP: {{ . }}
{{- end }}
{{- with .Values.service.loadBalancerClass }}
loadBalancerClass: {{ . }}
{{- end }}
{{- with .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{- toYaml . | nindent 4 }}

View File

@ -1,7 +1,7 @@
{{- if .Values.serviceAccount.create }}
{{- $root := . -}}
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: {{ .Values.serviceAccount.autoMount | default .Values.serviceAccount.automountServiceAccountToken }}
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
@ -10,7 +10,7 @@ metadata:
{{- end }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- tpl (toYaml . | nindent 4) $root }}
{{- tpl (toYaml . | nindent 4) $ }}
{{- end }}
name: {{ include "grafana.serviceAccountName" . }}
namespace: {{ include "grafana.namespace" . }}

View File

@ -12,7 +12,7 @@ metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- tpl (toYaml . | nindent 4) $ }}
{{- end }}
spec:
endpoints:

View File

@ -38,16 +38,22 @@ serviceAccount:
nameTest:
## ServiceAccount labels.
labels: {}
## Service account annotations. Can be templated.
# annotations:
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
autoMount: true
## Service account annotations. Can be templated.
# annotations:
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
## autoMount is deprecated in favor of automountServiceAccountToken
# autoMount: false
automountServiceAccountToken: false
replicas: 1
## Create a headless service for the deployment
headlessService: false
## Should the service account be auto mounted on the pod
automountServiceAccountToken: true
## Create HorizontalPodAutoscaler object for deployment type
#
autoscaling:
@ -116,6 +122,16 @@ testFramework:
imagePullPolicy: IfNotPresent
securityContext: {}
# dns configuration for pod
dnsPolicy: ~
dnsConfig: {}
# nameservers:
# - 8.8.8.8
# options:
# - name: ndots
# value: "2"
# - name: edns0
securityContext:
runAsNonRoot: true
runAsUser: 472
@ -197,6 +213,9 @@ gossipPortName: gossip
service:
enabled: true
type: ClusterIP
loadBalancerIP: ""
loadBalancerClass: ""
loadBalancerSourceRanges: []
port: 80
targetPort: 3000
# targetPort: 4181 To be used with a proxy extraContainer
@ -477,6 +496,7 @@ envRenderSecret: {}
## Name is templated.
envFromSecrets: []
## - name: secret-name
## prefix: prefix
## optional: true
## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment
@ -485,6 +505,7 @@ envFromSecrets: []
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core
envFromConfigMaps: []
## - name: configmap-name
## prefix: prefix
## optional: true
# Inject Kubernetes services as environment variables.
@ -530,15 +551,22 @@ extraVolumeMounts: []
# - name: extra-volume-0
# mountPath: /mnt/volume0
# readOnly: true
# existingClaim: volume-claim
# - name: extra-volume-1
# mountPath: /mnt/volume1
# readOnly: true
# hostPath: /usr/shared/
# - name: grafana-secrets
# mountPath: /mnt/volume2
# csi: true
# data:
## Additional Grafana server volumes
extraVolumes: []
# - name: extra-volume-0
# existingClaim: volume-claim
# - name: extra-volume-1
# hostPath:
# path: /usr/shared/
# type: ""
# - name: grafana-secrets
# csi:
# driver: secrets-store.csi.k8s.io
# readOnly: true
# volumeAttributes:
@ -811,7 +839,7 @@ sidecar:
# -- The Docker registry
registry: quay.io
repository: kiwigrid/k8s-sidecar
tag: 1.25.2
tag: 1.26.1
sha: ""
imagePullPolicy: IfNotPresent
resources: {}
@ -944,6 +972,7 @@ sidecar:
enabled: false
# Additional environment variables for the datasourcessidecar
env: {}
envValueFrom: {}
# Do not reprocess already processed unchanged resources on k8s API reconnect.
# ignoreAlreadyProcessed: true
# label that the configmaps with datasources are marked with
@ -975,8 +1004,8 @@ sidecar:
# Absolute path to shell script to execute after a datasource got reloaded
script: null
skipReload: false
# Deploy the datasource sidecar as an initContainer in addition to a container.
# This is needed if skipReload is true, to load any datasources defined at startup time.
# Deploy the datasources sidecar as an initContainer.
initDatasources: false
# Sets the size limit of the datasource sidecar emptyDir volume
sizeLimit: {}
@ -1280,3 +1309,13 @@ extraObjects: []
# data:
# - key: grafana-admin-password
# name: adminPassword
# assertNoLeakedSecrets is a helper function defined in _helpers.tpl that checks if secret
# values are not exposed in the rendered grafana.ini configmap. It is enabled by default.
#
# To pass values into grafana.ini without exposing them in a configmap, use variable expansion:
# https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#variable-expansion
#
# Alternatively, if you wish to allow secret values to be exposed in the rendered grafana.ini configmap,
# you can disable this check by setting assertNoLeakedSecrets to false.
assertNoLeakedSecrets: true

View File

@ -4,7 +4,7 @@ annotations:
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
apiVersion: v2
appVersion: 2.10.1
appVersion: 2.11.0
description: Install kube-state-metrics to generate and expose cluster-level metrics
home: https://github.com/kubernetes/kube-state-metrics/
keywords:
@ -23,4 +23,4 @@ name: kube-state-metrics
sources:
- https://github.com/kubernetes/kube-state-metrics/
type: application
version: 5.15.2
version: 5.18.0

View File

@ -49,10 +49,10 @@ spec:
{{- toYaml . | nindent 6 }}
{{- end }}
containers:
{{- $httpPort := ternary 9090 (.Values.service.port | default 8080) .Values.kubeRBACProxy.enabled}}
{{- $servicePort := ternary 9090 (.Values.service.port | default 8080) .Values.kubeRBACProxy.enabled}}
{{- $telemetryPort := ternary 9091 (.Values.selfMonitor.telemetryPort | default 8081) .Values.kubeRBACProxy.enabled}}
- name: {{ template "kube-state-metrics.name" . }}
{{- if .Values.autosharding.enabled }}
{{- if .Values.autosharding.enabled }}
env:
- name: POD_NAME
valueFrom:
@ -67,7 +67,7 @@ spec:
{{- if .Values.extraArgs }}
{{- .Values.extraArgs | toYaml | nindent 8 }}
{{- end }}
- --port={{ $httpPort }}
- --port={{ $servicePort }}
{{- if .Values.collectors }}
- --resources={{ .Values.collectors | join "," }}
{{- end }}
@ -115,10 +115,10 @@ spec:
{{- if .Values.selfMonitor.telemetryPort }}
- --telemetry-port={{ $telemetryPort }}
{{- end }}
{{- end }}
{{- if .Values.customResourceState.enabled }}
- --custom-resource-state-config-file=/etc/customresourcestate/config.yaml
{{- end }}
{{- end }}
{{- if or (.Values.kubeconfig.enabled) (.Values.customResourceState.enabled) (.Values.volumeMounts) }}
volumeMounts:
{{- if .Values.kubeconfig.enabled }}
@ -147,17 +147,41 @@ spec:
{{- end }}
{{- end }}
livenessProbe:
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
httpGet:
{{- if .Values.hostNetwork }}
host: 127.0.0.1
{{- end }}
httpHeaders:
{{- range $_, $header := .Values.livenessProbe.httpGet.httpHeaders }}
- name: {{ $header.name }}
value: {{ $header.value }}
{{- end }}
path: /healthz
port: {{ $httpPort }}
initialDelaySeconds: 5
timeoutSeconds: 5
port: {{ $servicePort }}
scheme: {{ upper .Values.livenessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
readinessProbe:
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
httpGet:
{{- if .Values.hostNetwork }}
host: 127.0.0.1
{{- end }}
httpHeaders:
{{- range $_, $header := .Values.readinessProbe.httpGet.httpHeaders }}
- name: {{ $header.name }}
value: {{ $header.value }}
{{- end }}
path: /
port: {{ $httpPort }}
initialDelaySeconds: 5
timeoutSeconds: 5
port: {{ $servicePort }}
scheme: {{ upper .Values.readinessProbe.httpGet.scheme }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
{{- if .Values.resources }}
resources:
{{ toYaml .Values.resources | indent 10 }}
@ -173,7 +197,7 @@ spec:
{{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 8 }}
{{- end }}
- --secure-listen-address=:{{ .Values.service.port | default 8080}}
- --upstream=http://127.0.0.1:{{ $httpPort }}/
- --upstream=http://127.0.0.1:{{ $servicePort }}/
- --proxy-endpoints-port=8888
- --config-file=/etc/kube-rbac-proxy-config/config-file.yaml
volumeMounts:

View File

@ -10,6 +10,8 @@ metadata:
annotations:
{{ toYaml .Values.serviceAccount.annotations | indent 4 }}
{{- end }}
{{- if or .Values.serviceAccount.imagePullSecrets .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }}
{{- end }}
{{- end -}}

View File

@ -37,7 +37,10 @@ autosharding:
replicas: 1
# Change the deployment strategy when autosharding is disabled
# Change the deployment strategy when autosharding is disabled.
# ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
# The default is "RollingUpdate" as per Kubernetes defaults.
# During a release, 'RollingUpdate' can lead to two running instances for a short period of time while 'Recreate' can create a small gap in data.
# updateStrategy: Recreate
# Number of old history to retain to allow rollback
@ -96,7 +99,7 @@ kubeRBACProxy:
image:
registry: quay.io
repository: brancz/kube-rbac-proxy
tag: v0.14.0
tag: v0.16.0
sha: ""
pullPolicy: IfNotPresent
@ -108,7 +111,12 @@ kubeRBACProxy:
## Specify security settings for a Container
## Allows overrides and additional options compared to (Pod) securityContext
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
containerSecurityContext: {}
containerSecurityContext:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
@ -245,6 +253,7 @@ securityContext:
## Allows overrides and additional options compared to (Pod) securityContext
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
containerSecurityContext:
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
capabilities:
drop:
@ -454,3 +463,27 @@ containers: []
initContainers: []
# - name: crd-sidecar
# image: kiwigrid/k8s-sidecar:latest
## Liveness probe
##
livenessProbe:
failureThreshold: 3
httpGet:
httpHeaders: []
scheme: http
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
## Readiness probe
##
readinessProbe:
failureThreshold: 3
httpGet:
httpHeaders: []
scheme: http
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5

View File

@ -22,4 +22,4 @@ name: prometheus-node-exporter
sources:
- https://github.com/prometheus/node_exporter/
type: application
version: 4.24.0
version: 4.32.0

View File

@ -183,3 +183,20 @@ labelNameLengthLimit: {{ . }}
labelValueLengthLimit: {{ . }}
{{- end }}
{{- end }}
{{/* Sets sidecar volumeMounts */}}
{{- define "prometheus-node-exporter.sidecarVolumeMounts" -}}
{{- range $_, $mount := $.Values.sidecarVolumeMount }}
- name: {{ $mount.name }}
mountPath: {{ $mount.mountPath }}
readOnly: {{ $mount.readOnly }}
{{- end }}
{{- range $_, $mount := $.Values.sidecarHostVolumeMounts }}
- name: {{ $mount.name }}
mountPath: {{ $mount.mountPath }}
readOnly: {{ $mount.readOnly }}
{{- if $mount.mountPropagation }}
mountPropagation: {{ $mount.mountPropagation }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -40,8 +40,11 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "prometheus-node-exporter.serviceAccountName" . }}
{{- with .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ . }}
{{- end }}
containers:
{{- $servicePort := ternary 8100 .Values.service.port .Values.kubeRBACProxy.enabled }}
{{- $servicePort := ternary .Values.kubeRBACProxy.port .Values.service.port .Values.kubeRBACProxy.enabled }}
- name: node-exporter
image: {{ include "prometheus-node-exporter.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
@ -50,7 +53,7 @@ spec:
- --path.sysfs=/host/sys
{{- if .Values.hostRootFsMount.enabled }}
- --path.rootfs=/host/root
{{- if semverCompare ">=1.4.0" (default .Chart.AppVersion .Values.image.tag) }}
{{- if semverCompare ">=1.4.0-0" (coalesce .Values.version .Values.image.tag .Chart.AppVersion) }}
- --path.udev.data=/host/root/run/udev/data
{{- end }}
{{- end }}
@ -124,12 +127,24 @@ spec:
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.terminationMessageParams.enabled }}
{{- with .Values.terminationMessageParams }}
terminationMessagePath: {{ .terminationMessagePath }}
terminationMessagePolicy: {{ .terminationMessagePolicy }}
{{- end }}
{{- end }}
volumeMounts:
- name: proc
mountPath: /host/proc
{{- with .Values.hostProcFsMount.mountPropagation }}
mountPropagation: {{ . }}
{{- end }}
readOnly: true
- name: sys
mountPath: /host/sys
{{- with .Values.hostSysFsMount.mountPropagation }}
mountPropagation: {{ . }}
{{- end }}
readOnly: true
{{- if .Values.hostRootFsMount.enabled }}
- name: root
@ -160,24 +175,10 @@ spec:
- name: {{ .name }}
mountPath: {{ .mountPath }}
{{- end }}
{{- with .Values.sidecars }}
{{- toYaml . | nindent 8 }}
{{- if or $.Values.sidecarVolumeMount $.Values.sidecarHostVolumeMounts }}
volumeMounts:
{{- range $_, $mount := $.Values.sidecarVolumeMount }}
- name: {{ $mount.name }}
mountPath: {{ $mount.mountPath }}
readOnly: {{ $mount.readOnly }}
{{- end }}
{{- range $_, $mount := $.Values.sidecarHostVolumeMounts }}
- name: {{ $mount.name }}
mountPath: {{ $mount.mountPath }}
readOnly: {{ $mount.readOnly }}
{{- if $mount.mountPropagation }}
mountPropagation: {{ $mount.mountPropagation }}
{{- end }}
{{- end }}
{{- end }}
{{- range .Values.sidecars }}
{{- $overwrites := dict "volumeMounts" (concat (include "prometheus-node-exporter.sidecarVolumeMounts" $ | fromYamlArray) (.volumeMounts | default list) | default list) }}
{{- $defaults := dict "image" (include "prometheus-node-exporter.image" $) "securityContext" $.Values.containerSecurityContext "imagePullPolicy" $.Values.image.pullPolicy }}
- {{- toYaml (merge $overwrites . $defaults) | nindent 10 }}
{{- end }}
{{- if .Values.kubeRBACProxy.enabled }}
- name: kube-rbac-proxy
@ -187,7 +188,7 @@ spec:
{{- end }}
- --secure-listen-address=:{{ .Values.service.port}}
- --upstream=http://127.0.0.1:{{ $servicePort }}/
- --proxy-endpoints-port=8888
- --proxy-endpoints-port={{ .Values.kubeRBACProxy.proxyEndpointsPort }}
- --config-file=/etc/kube-rbac-proxy-config/config-file.yaml
volumeMounts:
- name: kube-rbac-proxy-config
@ -200,19 +201,38 @@ spec:
{{- end }}
ports:
- containerPort: {{ .Values.service.port}}
name: "http"
- containerPort: 8888
name: {{ .Values.kubeRBACProxy.portName }}
{{- if .Values.kubeRBACProxy.enableHostPort }}
hostPort: {{ .Values.service.port }}
{{- end }}
- containerPort: {{ .Values.kubeRBACProxy.proxyEndpointsPort }}
{{- if .Values.kubeRBACProxy.enableProxyEndpointsHostPort }}
hostPort: {{ .Values.kubeRBACProxy.proxyEndpointsPort }}
{{- end }}
name: "http-healthz"
readinessProbe:
httpGet:
scheme: HTTPS
port: 8888
port: {{ .Values.kubeRBACProxy.proxyEndpointsPort }}
path: healthz
initialDelaySeconds: 5
timeoutSeconds: 5
{{- if .Values.kubeRBACProxy.resources }}
resources:
{{ toYaml .Values.kubeRBACProxy.resources | nindent 12 }}
{{- toYaml .Values.kubeRBACProxy.resources | nindent 12 }}
{{- end }}
{{- if .Values.terminationMessageParams.enabled }}
{{- with .Values.terminationMessageParams }}
terminationMessagePath: {{ .terminationMessagePath }}
terminationMessagePolicy: {{ .terminationMessagePolicy }}
{{- end }}
{{- end }}
{{- with .Values.kubeRBACProxy.env }}
env:
{{- range $key, $value := $.Values.kubeRBACProxy.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
{{- if .Values.kubeRBACProxy.containerSecurityContext }}
securityContext:
@ -237,6 +257,9 @@ spec:
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.restartPolicy }}
restartPolicy: {{ . }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
@ -257,6 +280,9 @@ spec:
- name: {{ $mount.name }}
hostPath:
path: {{ $mount.hostPath }}
{{- with $mount.type }}
type: {{ . }}
{{- end }}
{{- end }}
{{- range $_, $mount := .Values.sidecarVolumeMount }}
- name: {{ $mount.name }}

View File

@ -39,14 +39,17 @@ global:
# The requests are served through the same service but requests are HTTPS.
kubeRBACProxy:
enabled: false
## Set environment variables as name/value pairs
env: {}
# VARIABLE: value
image:
registry: quay.io
repository: brancz/kube-rbac-proxy
tag: v0.15.0
tag: v0.16.0
sha: ""
pullPolicy: IfNotPresent
# List of additional cli arguments to configure kube-rbac-prxy
# List of additional cli arguments to configure kube-rbac-proxy
# for example: --tls-cipher-suites, --log-file, etc.
# all the possible args can be found here: https://github.com/brancz/kube-rbac-proxy#usage
extraArgs: []
@ -56,6 +59,19 @@ kubeRBACProxy:
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
containerSecurityContext: {}
# Specify the port used for the Node exporter container (upstream port)
port: 8100
# Specify the name of the container port
portName: http
# Configure a hostPort. If true, hostPort will be enabled in the container and set to service.port.
enableHostPort: false
# Configure Proxy Endpoints Port
# This is the port being probed for readiness
proxyEndpointsPort: 8888
# Configure a hostPort. If true, hostPort will be enabled in the container and set to proxyEndpointsPort.
enableProxyEndpointsHostPort: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
@ -259,6 +275,10 @@ resources: {}
# cpu: 100m
# memory: 30Mi
# Specify the container restart policy passed to the Node Export container
# Possible Values: Always (default)|OnFailure|Never
restartPolicy: null
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
@ -310,6 +330,16 @@ hostRootFsMount:
# https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation
mountPropagation: HostToContainer
# Mount the node's proc file system (/proc) at /host/proc in the container
hostProcFsMount:
# Possible values are None, HostToContainer, and Bidirectional
mountPropagation: ""
# Mount the node's sys file system (/sys) at /host/sys in the container
hostSysFsMount:
# Possible values are None, HostToContainer, and Bidirectional
mountPropagation: ""
## Assign a group of affinity scheduling rules
##
affinity: {}
@ -354,10 +384,23 @@ nodeSelector:
kubernetes.io/os: linux
# kubernetes.io/arch: amd64
# Specify grace period for graceful termination of pods. Defaults to 30 if null or not specified
terminationGracePeriodSeconds: null
tolerations:
- effect: NoSchedule
operator: Exists
# Enable or disable container termination message settings
# https://kubernetes.io/docs/tasks/debug/debug-application/determine-reason-pod-failure/
terminationMessageParams:
enabled: false
# If enabled, specify the path for termination messages
terminationMessagePath: /dev/termination-log
# If enabled, specify the policy for termination messages
terminationMessagePolicy: File
## Assign a PriorityClassName to pods if set
# priorityClassName: ""
@ -372,6 +415,8 @@ extraArgs: []
extraHostVolumeMounts: []
# - name: <mountName>
# hostPath: <hostPath>
# https://kubernetes.io/docs/concepts/storage/volumes/#hostpath-volume-types
# type: "" (Default)|DirectoryOrCreate|Directory|FileOrCreate|File|Socket|CharDevice|BlockDevice
# mountPath: <mountPath>
# readOnly: true|false
# mountPropagation: None|HostToContainer|Bidirectional
@ -388,18 +433,21 @@ secrets: []
##
namespaceOverride: ""
## Additional containers for export metrics to text file
## Additional containers for export metrics to text file; fields image,imagePullPolicy,securityContext take default value from main container
##
sidecars: []
## - name: nvidia-dcgm-exporter
## image: nvidia/dcgm-exporter:1.4.3
# - name: nvidia-dcgm-exporter
# image: nvidia/dcgm-exporter:1.4.3
# volumeMounts:
# - name: tmp
# mountPath: /tmp
## Volume for sidecar containers
##
sidecarVolumeMount: []
## - name: collector-textfiles
## mountPath: /run/prometheus
## readOnly: false
# - name: collector-textfiles
# mountPath: /run/prometheus
# readOnly: false
## Additional mounts from the host to sidecar containers
##
@ -478,3 +526,6 @@ extraManifests: []
# name: prometheus-extra
# data:
# extra-data: "value"
# Override version of app, required if image.tag is defined and does not follow semver
version: ""

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 0.22.0
appVersion: 0.25.1
description: A Helm chart for prometheus windows-exporter
home: https://github.com/prometheus-community/windows_exporter/
keywords:
@ -14,4 +14,4 @@ name: prometheus-windows-exporter
sources:
- https://github.com/prometheus-community/windows_exporter/
type: application
version: 0.1.2
version: 0.3.1

View File

@ -49,7 +49,7 @@ spec:
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- --config.file=%CONTAINER_SANDBOX_MOUNT_POINT%/config.yml
- --collector.textfile.directory=%CONTAINER_SANDBOX_MOUNT_POINT%
- --collector.textfile.directories=%CONTAINER_SANDBOX_MOUNT_POINT%
- --web.listen-address=:{{ .Values.service.port }}
{{- with .Values.extraArgs }}
{{- toYaml . | nindent 12 }}

View File

@ -24,10 +24,15 @@ The longest name that gets created adds and extra 37 characters, so truncation s
{{- end -}}
{{- end -}}
{{/* Fullname suffixed with operator */}}
{{/* Fullname suffixed with -operator */}}
{{/* Adding 9 to 26 truncation of kube-prometheus-stack.fullname */}}
{{- define "kube-prometheus-stack.operator.fullname" -}}
{{- if .Values.prometheusOperator.fullnameOverride -}}
{{- .Values.prometheusOperator.fullnameOverride | trunc 35 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-operator" (include "kube-prometheus-stack.fullname" .) -}}
{{- end }}
{{- end }}
{{/* Prometheus custom resource instance name */}}
{{- define "kube-prometheus-stack.prometheus.crname" -}}
@ -91,6 +96,15 @@ heritage: {{ $.Release.Service | quote }}
{{- end -}}
{{- end -}}
{{/* Create the name of kube-prometheus-stack service account to use */}}
{{- define "kube-prometheus-stack.operator.admissionWebhooks.serviceAccountName" -}}
{{- if .Values.prometheusOperator.serviceAccount.create -}}
{{ default (printf "%s-webhook" (include "kube-prometheus-stack.operator.fullname" .)) .Values.prometheusOperator.admissionWebhooks.deployment.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.prometheusOperator.admissionWebhooks.deployment.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/* Create the name of prometheus service account to use */}}
{{- define "kube-prometheus-stack.prometheus.serviceAccountName" -}}
{{- if .Values.prometheus.serviceAccount.create -}}
@ -140,6 +154,17 @@ Use the grafana namespace override for multi-namespace deployments in combined c
{{- end -}}
{{- end -}}
{{/*
Allow kube-state-metrics job name to be overridden
*/}}
{{- define "kube-prometheus-stack-kube-state-metrics.name" -}}
{{- if index .Values "kube-state-metrics" "nameOverride" -}}
{{- index .Values "kube-state-metrics" "nameOverride" -}}
{{- else -}}
{{- print "kube-state-metrics" -}}
{{- end -}}
{{- end -}}
{{/*
Use the kube-state-metrics namespace override for multi-namespace deployments in combined charts
*/}}
@ -277,3 +302,14 @@ global:
{{- end }}
{{- end }}
{{- end -}}
{{- define "kube-prometheus-stack.operator.admission-webhook.dnsNames" }}
{{- $fullname := include "kube-prometheus-stack.operator.fullname" . }}
{{- $namespace := include "kube-prometheus-stack.namespace" . }}
{{- $fullname }}
{{ $fullname }}.{{ $namespace }}.svc
{{- if .Values.prometheusOperator.admissionWebhooks.deployment.enabled }}
{{ $fullname }}-webhook
{{ $fullname }}-webhook.{{ $namespace }}.svc
{{- end }}
{{- end }}

View File

@ -31,6 +31,7 @@ spec:
replicas: {{ .Values.alertmanager.alertmanagerSpec.replicas }}
listenLocal: {{ .Values.alertmanager.alertmanagerSpec.listenLocal }}
serviceAccountName: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }}
automountServiceAccountToken: {{ .Values.alertmanager.alertmanagerSpec.automountServiceAccountToken }}
{{- if .Values.alertmanager.alertmanagerSpec.externalUrl }}
externalUrl: "{{ tpl .Values.alertmanager.alertmanagerSpec.externalUrl . }}"
{{- else if and .Values.alertmanager.ingress.enabled .Values.alertmanager.ingress.hosts }}

View File

@ -14,7 +14,7 @@ metadata:
namespace: {{ template "kube-prometheus-stack.namespace" . }}
{{- if .Values.alertmanager.ingress.annotations }}
annotations:
{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }}
{{- tpl (toYaml .Values.alertmanager.ingress.annotations) . | nindent 4 }}
{{- end }}
labels:
app: {{ template "kube-prometheus-stack.name" . }}-alertmanager
@ -31,7 +31,7 @@ spec:
rules:
{{- if .Values.alertmanager.ingress.hosts }}
{{- range $host := .Values.alertmanager.ingress.hosts }}
- host: {{ tpl $host $ }}
- host: {{ tpl $host $ | quote }}
http:
paths:
{{- range $p := $paths }}

View File

@ -25,7 +25,7 @@ items:
{{- end }}
{{- if $ingressValues.annotations }}
annotations:
{{ toYaml $ingressValues.annotations | indent 8 }}
{{- tpl (toYaml $ingressValues.annotations) $ | nindent 8 }}
{{- end }}
spec:
{{- if $apiIsStable }}

View File

@ -58,6 +58,11 @@ spec:
alertmanager: {{ template "kube-prometheus-stack.alertmanager.crname" . }}
{{- if .Values.alertmanager.service.sessionAffinity }}
sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }}
{{- end }}
{{- if eq .Values.alertmanager.service.sessionAffinity "ClientIP" }}
sessionAffinityConfig:
clientIP:
timeoutSeconds: {{ .Values.alertmanager.service.sessionAffinityConfig.clientIP.timeoutSeconds }}
{{- end }}
type: "{{ .Values.alertmanager.service.type }}"
{{- end }}

View File

@ -52,7 +52,12 @@ spec:
{{- if .Values.alertmanager.serviceMonitor.proxyUrl }}
proxyUrl: {{ .Values.alertmanager.serviceMonitor.proxyUrl}}
{{- end }}
scheme: http
{{- if .Values.alertmanager.serviceMonitor.scheme }}
scheme: {{ .Values.alertmanager.serviceMonitor.scheme }}
{{- end }}
{{- if .Values.alertmanager.serviceMonitor.tlsConfig }}
tlsConfig: {{- toYaml .Values.alertmanager.serviceMonitor.tlsConfig | nindent 6 }}
{{- end }}
path: "/metrics"
{{- if .Values.alertmanager.serviceMonitor.metricRelabelings }}
metricRelabelings: {{- tpl (toYaml .Values.alertmanager.serviceMonitor.metricRelabelings | nindent 6) . }}

View File

@ -1,4 +1,4 @@
{{- if and .Values.coreDns.enabled .Values.kubernetesServiceMonitors.enabled }}
{{- if and .Values.coreDns.enabled .Values.coreDns.service.enabled .Values.kubernetesServiceMonitors.enabled }}
apiVersion: v1
kind: Service
metadata:
@ -11,7 +11,7 @@ metadata:
spec:
clusterIP: None
ports:
- name: http-metrics
- name: {{ .Values.coreDns.serviceMonitor.port }}
port: {{ .Values.coreDns.service.port }}
protocol: TCP
targetPort: {{ .Values.coreDns.service.targetPort }}

View File

@ -1,9 +1,13 @@
{{- if and .Values.coreDns.enabled .Values.kubernetesServiceMonitors.enabled }}
{{- if and .Values.coreDns.enabled .Values.coreDns.serviceMonitor.enabled .Values.kubernetesServiceMonitors.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-coredns
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
namespace: kube-system
{{- else }}
namespace: {{ template "kube-prometheus-stack.namespace" . }}
{{- end }}
labels:
app: {{ template "kube-prometheus-stack.name" . }}-coredns
{{- with .Values.coreDns.serviceMonitor.additionalLabels }}
@ -11,17 +15,21 @@ metadata:
{{- end }}
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
spec:
jobLabel: jobLabel
jobLabel: {{ .Values.coreDns.serviceMonitor.jobLabel }}
{{- include "servicemonitor.scrapeLimits" .Values.coreDns.serviceMonitor | nindent 2 }}
selector:
{{- if .Values.coreDns.serviceMonitor.selector }}
{{ tpl (toYaml .Values.coreDns.serviceMonitor.selector | nindent 4) . }}
{{- else }}
matchLabels:
app: {{ template "kube-prometheus-stack.name" . }}-coredns
release: {{ $.Release.Name | quote }}
{{- end }}
namespaceSelector:
matchNames:
- "kube-system"
endpoints:
- port: http-metrics
- port: {{ .Values.coreDns.serviceMonitor.port }}
{{- if .Values.coreDns.serviceMonitor.interval}}
interval: {{ .Values.coreDns.serviceMonitor.interval }}
{{- end }}

View File

@ -3,7 +3,11 @@ apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-apiserver
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
namespace: default
{{- else }}
namespace: {{ template "kube-prometheus-stack.namespace" . }}
{{- end }}
labels:
app: {{ template "kube-prometheus-stack.name" . }}-apiserver
{{- with .Values.kubeApiServer.serviceMonitor.additionalLabels }}

View File

@ -14,7 +14,7 @@ subsets:
- ip: {{ . }}
{{- end }}
ports:
- name: http-metrics
- name: {{ .Values.kubeControllerManager.serviceMonitor.port }}
{{- $kubeControllerManagerDefaultInsecurePort := 10252 }}
{{- $kubeControllerManagerDefaultSecurePort := 10257 }}
port: {{ include "kube-prometheus-stack.kubeControllerManager.insecureScrape" (list . $kubeControllerManagerDefaultInsecurePort $kubeControllerManagerDefaultSecurePort .Values.kubeControllerManager.service.port) }}

View File

@ -11,7 +11,7 @@ metadata:
spec:
clusterIP: None
ports:
- name: http-metrics
- name: {{ .Values.kubeControllerManager.serviceMonitor.port }}
{{- $kubeControllerManagerDefaultInsecurePort := 10252 }}
{{- $kubeControllerManagerDefaultSecurePort := 10257 }}
port: {{ include "kube-prometheus-stack.kubeControllerManager.insecureScrape" (list . $kubeControllerManagerDefaultInsecurePort $kubeControllerManagerDefaultSecurePort .Values.kubeControllerManager.service.port) }}

View File

@ -3,7 +3,11 @@ apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-controller-manager
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
namespace: kube-system
{{- else }}
namespace: {{ template "kube-prometheus-stack.namespace" . }}
{{- end }}
labels:
app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager
{{- with .Values.kubeControllerManager.serviceMonitor.additionalLabels }}
@ -11,17 +15,21 @@ metadata:
{{- end }}
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
spec:
jobLabel: jobLabel
jobLabel: {{ .Values.kubeControllerManager.serviceMonitor.jobLabel }}
{{- include "servicemonitor.scrapeLimits" .Values.kubeControllerManager.serviceMonitor | nindent 2 }}
selector:
{{- if .Values.kubeControllerManager.serviceMonitor.selector }}
{{ tpl (toYaml .Values.kubeControllerManager.serviceMonitor.selector | nindent 4) . }}
{{- else }}
matchLabels:
app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager
release: {{ $.Release.Name | quote }}
{{- end }}
namespaceSelector:
matchNames:
- "kube-system"
endpoints:
- port: http-metrics
- port: {{ .Values.kubeControllerManager.serviceMonitor.port }}
{{- if .Values.kubeControllerManager.serviceMonitor.interval }}
interval: {{ .Values.kubeControllerManager.serviceMonitor.interval }}
{{- end }}

View File

@ -3,7 +3,11 @@ apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-dns
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
namespace: kube-system
{{- else }}
namespace: {{ template "kube-prometheus-stack.namespace" . }}
{{- end }}
labels:
app: {{ template "kube-prometheus-stack.name" . }}-kube-dns
{{- with .Values.kubeDns.serviceMonitor.additionalLabels }}
@ -11,12 +15,16 @@ metadata:
{{- end }}
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
spec:
jobLabel: jobLabel
jobLabel: {{ .Values.kubeDns.serviceMonitor.jobLabel }}
{{- include "servicemonitor.scrapeLimits" .Values.kubeDns.serviceMonitor | nindent 2 }}
selector:
{{- if .Values.kubeDns.serviceMonitor.selector }}
{{ tpl (toYaml .Values.kubeDns.serviceMonitor.selector | nindent 4) . }}
{{- else }}
matchLabels:
app: {{ template "kube-prometheus-stack.name" . }}-kube-dns
release: {{ $.Release.Name | quote }}
{{- end }}
namespaceSelector:
matchNames:
- "kube-system"

View File

@ -14,7 +14,7 @@ subsets:
- ip: {{ . }}
{{- end }}
ports:
- name: http-metrics
- name: {{ .Values.kubeEtcd.serviceMonitor.port }}
port: {{ .Values.kubeEtcd.service.port }}
protocol: TCP
{{- end }}

View File

@ -11,7 +11,7 @@ metadata:
spec:
clusterIP: None
ports:
- name: http-metrics
- name: {{ .Values.kubeEtcd.serviceMonitor.port }}
port: {{ .Values.kubeEtcd.service.port }}
protocol: TCP
targetPort: {{ .Values.kubeEtcd.service.targetPort }}

View File

@ -3,7 +3,11 @@ apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kube-prometheus-stack.fullname" . }}-kube-etcd
{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }}
namespace: kube-system
{{- else }}
namespace: {{ template "kube-prometheus-stack.namespace" . }}
{{- end }}
labels:
app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd
{{- with .Values.kubeEtcd.serviceMonitor.additionalLabels }}
@ -11,17 +15,21 @@ metadata:
{{- end }}
{{ include "kube-prometheus-stack.labels" . | indent 4 }}
spec:
jobLabel: jobLabel
jobLabel: {{ .Values.kubeEtcd.serviceMonitor.jobLabel }}
{{- include "servicemonitor.scrapeLimits" .Values.kubeEtcd.serviceMonitor | nindent 4 }}
selector:
{{- if .Values.kubeEtcd.serviceMonitor.selector }}
{{ tpl (toYaml .Values.kubeEtcd.serviceMonitor.selector | nindent 4) . }}
{{- else }}
matchLabels:
app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd
release: {{ $.Release.Name | quote }}
{{- end }}
namespaceSelector:
matchNames:
- "kube-system"
endpoints:
- port: http-metrics
- port: {{ .Values.kubeEtcd.serviceMonitor.port }}
{{- if .Values.kubeEtcd.serviceMonitor.interval }}
interval: {{ .Values.kubeEtcd.serviceMonitor.interval }}
{{- end }}

View File

@ -14,7 +14,7 @@ subsets:
- ip: {{ . }}
{{- end }}
ports:
- name: http-metrics
- name: {{ .Values.kubeProxy.serviceMonitor.port }}
port: {{ .Values.kubeProxy.service.port }}
protocol: TCP
{{- end }}

View File

@ -11,7 +11,7 @@ metadata:
spec:
clusterIP: None
ports:
- name: http-metrics
- name: {{ .Values.kubeProxy.serviceMonitor.port }}
port: {{ .Values.kubeProxy.service.port }}
protocol: TCP
targetPort: {{ .Values.kubeProxy.service.targetPort }}

Some files were not shown because too many files have changed in this diff Show More