kubezero/charts/kubezero-metrics/values.yaml

183 lines
4.4 KiB
YAML
Raw Normal View History

2020-08-03 15:24:32 +00:00
grafana:
istio:
enabled: false
ipBlocks: []
2020-08-03 15:24:32 +00:00
url: ""
gateway: istio-system/ingressgateway
2020-08-03 15:24:32 +00:00
2020-08-03 16:44:58 +00:00
prometheus:
istio:
enabled: false
url: ""
gateway: istio-system/ingressgateway
2020-08-03 16:44:58 +00:00
2020-07-30 16:19:04 +00:00
prometheus-operator:
2020-08-03 16:01:39 +00:00
defaultRules:
create: true
2020-07-30 16:19:04 +00:00
coreDns:
2020-08-03 16:01:39 +00:00
enabled: true
kubeDns:
enabled: false
2020-07-30 16:19:04 +00:00
kubeApiServer:
2020-08-03 16:01:39 +00:00
enabled: true
2020-08-03 17:47:11 +00:00
kubeStateMetrics:
enabled: true
2020-08-04 13:45:42 +00:00
kubeProxy:
2020-08-03 17:47:11 +00:00
enabled: true
2020-07-30 16:19:04 +00:00
kubeEtcd:
2020-09-02 14:05:57 +00:00
enabled: true
service:
port: 2381
targetPort: 2381
kubeControllerManager:
enabled: true
service:
port: 10257
targetPort: 10257
serviceMonitor:
https: true
insecureSkipVerify: true
2020-07-30 16:19:04 +00:00
kubeScheduler:
enabled: true
service:
port: 10259
targetPort: 10259
serviceMonitor:
https: true
insecureSkipVerify: true
2020-08-04 13:45:42 +00:00
kubelet:
enabled: true
serviceMonitor:
# removed with 1.18, but still required for all container metrics ??
cAdvisor: true
2020-08-04 13:45:42 +00:00
2020-07-30 16:19:04 +00:00
prometheusOperator:
enabled: true
2020-08-03 14:43:56 +00:00
#image:
# tag: v0.41.0
#prometheusConfigReloaderImage:
# tag: v0.41.0
2020-07-30 17:56:46 +00:00
2020-07-31 00:18:07 +00:00
# Run on controller nodes
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
# Argo takes care of CRDs
2020-07-30 17:56:46 +00:00
manageCrds: false
createCustomResource: true
2020-07-31 00:18:07 +00:00
# Operator has TLS support starting 0.39, but chart does not support CAConfig and operator flags yet
# see: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/webhook.md#deploying-the-admission-webhook
# Until then we disable them as the patching interferes with Argo anyways
2020-07-30 16:19:04 +00:00
tlsProxy:
enabled: false
2020-07-30 16:19:04 +00:00
admissionWebhooks:
enabled: false
2020-07-31 00:18:07 +00:00
2020-08-03 12:50:32 +00:00
namespaces:
releaseNamespace: true
2020-08-03 12:57:14 +00:00
additional:
- kube-system
- logging
2020-08-03 12:26:00 +00:00
nodeExporter:
enabled: true
2020-08-04 00:38:26 +00:00
serviceMonitor:
relabelings:
- sourceLabels: [__meta_kubernetes_pod_node_name]
separator: ;
regex: ^(.*)$
targetLabel: node
replacement: $1
action: replace
2020-08-03 12:26:00 +00:00
prometheus:
enabled: true
2020-08-03 14:51:44 +00:00
prometheusSpec:
2020-08-03 16:44:58 +00:00
retention: 8d
portName: http-prometheus
2020-08-03 14:51:44 +00:00
resources:
requests:
memory: 512Mi
2020-08-03 12:26:00 +00:00
2020-08-03 14:51:44 +00:00
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: ebs-sc-gp2-xfs
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 8Gi
2020-08-03 12:26:00 +00:00
2020-08-03 15:08:16 +00:00
# Custom Grafana tweaks
# - persistence, plugins, auth
grafana:
enabled: true
persistence:
enabled: true
size: 4Gi
storageClassName: ebs-sc-gp2-xfs
plugins:
- grafana-piechart-panel
service:
portName: http-grafana
initChownData:
enabled: false
testFramework:
enabled: false
2020-08-03 16:15:12 +00:00
# Todo
alertmanager:
enabled: false
# Metrics adapter
prometheus-adapter:
2020-08-03 19:52:57 +00:00
prometheus:
2020-08-03 19:56:52 +00:00
url: http://metrics-prometheus-operato-prometheus
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
2020-08-03 12:16:48 +00:00
# Basic rules for HPA to work replacing heaptster, taken from kube-prometheus project
# https://github.com/coreos/kube-prometheus/blob/master/manifests/prometheus-adapter-configMap.yaml
2020-08-03 17:53:18 +00:00
rules:
2020-08-03 20:29:24 +00:00
default: false
2020-08-03 17:53:18 +00:00
resource:
cpu:
2020-08-03 21:15:05 +00:00
containerQuery: sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}[5m])) by (<<.GroupBy>>)
nodeQuery: sum(1 - irate(node_cpu_seconds_total{mode="idle"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)
2020-08-03 17:53:18 +00:00
resources:
overrides:
2020-08-03 21:15:05 +00:00
node:
2020-08-03 17:53:18 +00:00
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
memory:
2020-08-03 21:15:05 +00:00
containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}) by (<<.GroupBy>>)
nodeQuery: sum(node_memory_MemTotal_bytes{job="node-exporter",<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{job="node-exporter",<<.LabelMatchers>>}) by (<<.GroupBy>>)
2020-08-03 17:53:18 +00:00
resources:
overrides:
2020-08-03 21:19:16 +00:00
node:
2020-08-03 17:53:18 +00:00
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
2020-08-03 21:15:05 +00:00
window: 5m