kubezero/charts/kubezero-metrics/values.yaml

97 lines
2.8 KiB
YAML
Raw Normal View History

2020-07-30 16:19:04 +00:00
prometheus-operator:
alertmanager:
enabled: false
coreDns:
enabled: false
kubeApiServer:
enabled: false
kubeControllerManager:
enabled: false
kubeDns:
enabled: false
kubeEtcd:
enabled: false
kubeProxy:
enabled: false
kubeScheduler:
enabled: false
kubeStateMetrics:
enabled: false
kubelet:
enabled: false
nodeExporter:
2020-08-03 12:16:48 +00:00
enabled: true
2020-07-30 16:19:04 +00:00
grafana:
enabled: false
prometheus:
enabled: false
defaultRules:
create: false
2020-07-30 16:19:04 +00:00
prometheusOperator:
enabled: true
image:
tag: v0.41.0
prometheusConfigReloaderImage:
tag: v0.41.0
2020-07-30 17:56:46 +00:00
2020-07-31 00:18:07 +00:00
# Run on controller nodes
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
# Argo takes care of CRDs
2020-07-30 17:56:46 +00:00
manageCrds: false
createCustomResource: true
2020-07-31 00:18:07 +00:00
# Operator has TLS support starting 0.39, but chart does not support CAConfig and operator flags yet
# see: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/webhook.md#deploying-the-admission-webhook
# Until then we disable them as the patching interferes with Argo anyways
2020-07-30 16:19:04 +00:00
tlsProxy:
enabled: false
2020-07-30 16:19:04 +00:00
admissionWebhooks:
enabled: false
2020-07-31 00:18:07 +00:00
#namespaces:
# releaseNamespace: true
# additional:
# - kube-system
# Metrics adapter
prometheus-adapter:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
2020-08-03 12:16:48 +00:00
# Basic rules for HPA to work replacing heaptster, taken from kube-prometheus project
# https://github.com/coreos/kube-prometheus/blob/master/manifests/prometheus-adapter-configMap.yaml
resource:
cpu:
containerQuery: sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}[5m])) by (<<.GroupBy>>)
nodeQuery: sum(1 - irate(node_cpu_seconds_total{mode="idle"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)
resources:
overrides:
node:
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
memory:
containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}) by (<<.GroupBy>>)
nodeQuery: sum(node_memory_MemTotal_bytes{job="node-exporter",<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{job="node-exporter",<<.LabelMatchers>>}) by (<<.GroupBy>>)
resources:
overrides:
instance:
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
window: 5m