grafana: istio: enabled: false url: "" gateway: ingressgateway.istio-system.svc.cluster.local prometheus: istio: enabled: false url: "" gateway: ingressgateway.istio-system.svc.cluster.local prometheus-operator: defaultRules: create: true coreDns: enabled: true kubeApiServer: enabled: true kubeControllerManager: enabled: false kubeDns: enabled: false kubeEtcd: enabled: false kubeProxy: enabled: false kubeScheduler: enabled: false kubeStateMetrics: enabled: true kubelet: enabled: true prometheusOperator: enabled: true #image: # tag: v0.41.0 #prometheusConfigReloaderImage: # tag: v0.41.0 # Run on controller nodes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule nodeSelector: node-role.kubernetes.io/master: "" # Argo takes care of CRDs manageCrds: false createCustomResource: true # Operator has TLS support starting 0.39, but chart does not support CAConfig and operator flags yet # see: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/webhook.md#deploying-the-admission-webhook # Until then we disable them as the patching interferes with Argo anyways tlsProxy: enabled: false admissionWebhooks: enabled: false namespaces: releaseNamespace: true additional: - kube-system nodeExporter: enabled: true prometheus: enabled: true prometheusSpec: retention: 8d portName: http-prometheus resources: requests: memory: 512Mi storageSpec: volumeClaimTemplate: spec: storageClassName: ebs-sc-gp2-xfs accessModes: ["ReadWriteOnce"] resources: requests: storage: 8Gi # Custom Grafana tweaks # - persistence, plugins, auth grafana: enabled: true persistence: enabled: true size: 4Gi storageClassName: ebs-sc-gp2-xfs plugins: - grafana-piechart-panel service: portName: http-grafana initChownData: enabled: false testFramework: enabled: false # Todo alertmanager: enabled: false # Metrics adapter prometheus-adapter: tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule nodeSelector: node-role.kubernetes.io/master: "" # Basic rules for HPA to work replacing heaptster, taken from kube-prometheus project # https://github.com/coreos/kube-prometheus/blob/master/manifests/prometheus-adapter-configMap.yaml resource: cpu: containerQuery: sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}[5m])) by (<<.GroupBy>>) nodeQuery: sum(1 - irate(node_cpu_seconds_total{mode="idle"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>) resources: overrides: node: resource: node namespace: resource: namespace pod: resource: pod containerLabel: container memory: containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}) by (<<.GroupBy>>) nodeQuery: sum(node_memory_MemTotal_bytes{job="node-exporter",<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{job="node-exporter",<<.LabelMatchers>>}) by (<<.GroupBy>>) resources: overrides: instance: resource: node namespace: resource: namespace pod: resource: pod containerLabel: container window: 5m