New Prometheus upstream chart, cert-manager version bump, logging fixes, multi PV support for EFS #29

Merged
stefan merged 13 commits from master into stable 2020-10-27 17:48:34 +00:00
15 changed files with 130 additions and 160 deletions

View File

@ -1,7 +1,7 @@
apiVersion: v2
description: KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
name: kubezero-argo-cd
version: 0.5.6
version: 0.6.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -15,6 +15,6 @@ dependencies:
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: argo-cd
version: 2.8.0
version: 2.9.3
repository: https://argoproj.github.io/argo-helm
kubeVersion: ">= 1.17.0"

View File

@ -1,6 +1,6 @@
# kubezero-argo-cd
![Version: 0.5.3](https://img.shields.io/badge/Version-0.5.3-informational?style=flat-square)
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square)
KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
@ -14,11 +14,11 @@ KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Appl
## Requirements
Kubernetes: `>= 1.16.0`
Kubernetes: `>= 1.17.0`
| Repository | Name | Version |
|------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 2.7.0 |
| https://argoproj.github.io/argo-helm | argo-cd | 2.9.3 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## Values
@ -26,45 +26,35 @@ Kubernetes: `>= 1.16.0`
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| argo-cd.controller.args.appResyncPeriod | string | `"300"` | |
| argo-cd.controller.args.operationProcessors | string | `"2"` | |
| argo-cd.controller.args.statusProcessors | string | `"4"` | |
| argo-cd.controller.args.operationProcessors | string | `"4"` | |
| argo-cd.controller.args.statusProcessors | string | `"8"` | |
| argo-cd.controller.logFormat | string | `"json"` | |
| argo-cd.controller.metrics.enabled | bool | `false` | |
| argo-cd.controller.metrics.serviceMonitor.additionalLabels.release | string | `"metrics"` | |
| argo-cd.controller.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.controller.metrics.serviceMonitor.namespace | string | `"monitoring"` | |
| argo-cd.controller.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.controller.resources.limits.memory | string | `"1536Mi"` | |
| argo-cd.controller.resources.requests.cpu | string | `"100m"` | |
| argo-cd.controller.resources.requests.memory | string | `"256Mi"` | |
| argo-cd.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.dex.enabled | bool | `false` | |
| argo-cd.global.image.tag | string | `"v1.7.5"` | |
| argo-cd.global.image.tag | string | `"v1.7.8"` | |
| argo-cd.installCRDs | bool | `false` | |
| argo-cd.istio.enabled | bool | `false` | Deploy Istio VirtualService to expose ArgoCD |
| argo-cd.istio.gateway | string | `"istio-system/ingressgateway"` | Name of the Istio gateway to add the VirtualService to |
| argo-cd.istio.ipBlocks | list | `[]` | |
| argo-cd.redis.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.redis.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.redis.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.repoServer.logFormat | string | `"json"` | |
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.serviceMonitor.additionalLabels.release | string | `"metrics"` | |
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.repoServer.metrics.serviceMonitor.namespace | string | `"monitoring"` | |
| argo-cd.repoServer.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.repoServer.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.repoServer.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.server.config."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
| argo-cd.server.config.url | string | `"argocd.example.com"` | ArgoCD hostname to be exposed via Istio |
| argo-cd.server.extraArgs[0] | string | `"--insecure"` | |
| argo-cd.server.logFormat | string | `"json"` | |
| argo-cd.server.metrics.enabled | bool | `false` | |
| argo-cd.server.metrics.serviceMonitor.additionalLabels.release | string | `"metrics"` | |
| argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.server.metrics.serviceMonitor.namespace | string | `"monitoring"` | |
| argo-cd.server.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
| argo-cd.server.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.server.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kubezero.global.defaultDestination | object | `{"server":"https://kubernetes.default.svc"}` | Destination cluster |
| kubezero.global.defaultSource.pathPrefix | string | `""` | optional path prefix within repoURL to support eg. remote subtrees |
| kubezero.global.defaultSource.repoURL | string | `"https://github.com/zero-down-time/kubezero"` | default repository for argocd applications |

View File

@ -31,14 +31,14 @@ argo-cd:
global:
image:
tag: v1.7.7
tag: v1.7.8
controller:
args:
statusProcessors: "4"
operationProcessors: "2"
statusProcessors: "8"
operationProcessors: "4"
appResyncPeriod: "300"
# logFormat: json
logFormat: json
metrics:
enabled: false
@ -48,22 +48,16 @@ argo-cd:
additionalLabels:
release: metrics
# controller to masters
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
resources:
limits:
# limits:
# cpu: 500m
memory: 1536Mi
# memory: 2048Mi
requests:
cpu: 100m
memory: 256Mi
repoServer:
# logFormat: json
logFormat: json
metrics:
enabled: false
serviceMonitor:
@ -72,14 +66,8 @@ argo-cd:
additionalLabels:
release: metrics
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
server:
# logFormat: json
logFormat: json
config:
# argo-cd.server.config.url -- ArgoCD hostname to be exposed via Istio
url: argocd.example.com
@ -123,18 +111,10 @@ argo-cd:
extraArgs:
- --insecure
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
redis:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# redis:
# We might want to try to keep redis close to the controller
# affinity:
dex:
enabled: false

View File

@ -19,7 +19,7 @@ KubeZero Umbrella Chart for aws-efs-csi-driver
## Requirements
Kubernetes: `>=1.17.0-0`
Kubernetes: `>=1.16.0-0`
| Repository | Name | Version |
|------------|------|---------|
@ -36,13 +36,6 @@ Details also see: [Reserve PV](https://kubernetes.io/docs/concepts/storage/persi
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| PersistentVolumes[0].claimRef.name | string | `"foo-pvc"` | |
| PersistentVolumes[0].claimRef.namespace | string | `"foo"` | |
| PersistentVolumes[0].mountOptions[0] | string | `"tls"` | |
| PersistentVolumes[0].name | string | `"example-pv"` | |
| PersistentVolumes[0].volumeHandle | string | `"<efs-id>:/path"` | |
| PersistentVolumes[1].name | string | `"example-pv2"` | |
| PersistentVolumes[1].volumeHandle | string | `"<efs-id>:/path2"` | |
| StorageClass.create | bool | `true` | |
| StorageClass.default | bool | `false` | |
| aws-efs-csi-driver.nodeSelector | object | `{}` | |

View File

@ -1,3 +1,4 @@
{{- if .Values.PersistentVolume }}
{{- if .Values.PersistentVolume.create }}
apiVersion: v1
kind: PersistentVolume
@ -17,3 +18,4 @@ spec:
driver: efs.csi.aws.com
volumeHandle: {{ .Values.PersistentVolume.EfsId }}
{{- end }}
{{- end }}

View File

@ -19,7 +19,7 @@ StorageClass:
# volumeHandle: "<efs-id>:/path2"
# Deprecated and removed with next release
PersistentVolume:
create: false
EfsId: ""
Name: ""
# PersistentVolume:
# create: false
# EfsId: ""
# Name: ""

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cert-manager
description: KubeZero Umbrella Chart for cert-manager
type: application
version: 0.3.6
version: 0.4.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -15,6 +15,6 @@ dependencies:
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: cert-manager
version: 0.15.1
version: 1.0.3
repository: https://charts.jetstack.io
kubeVersion: ">= 1.16.0"

View File

@ -1,6 +1,6 @@
# kubezero-cert-manager
![Version: 0.3.6](https://img.shields.io/badge/Version-0.3.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for cert-manager
@ -18,7 +18,7 @@ Kubernetes: `>= 1.16.0`
| Repository | Name | Version |
|------------|------|---------|
| https://charts.jetstack.io | cert-manager | 0.15.1 |
| https://charts.jetstack.io | cert-manager | 1.0.3 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## AWS - IAM Role

View File

@ -1,6 +1,6 @@
# kubezero-logging
![Version: 0.3.9](https://img.shields.io/badge/Version-0.3.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.2.1](https://img.shields.io/badge/AppVersion-1.2.1-informational?style=flat-square)
![Version: 0.4.1](https://img.shields.io/badge/Version-0.4.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.2.1](https://img.shields.io/badge/AppVersion-1.2.1-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack
@ -61,11 +61,12 @@ Kubernetes: `>= 1.16.0`
| es.prometheus | bool | `false` | |
| es.s3Snapshot.enabled | bool | `false` | |
| es.s3Snapshot.iamrole | string | `""` | |
| fluent-bit.config.filters | string | `"[FILTER]\n Name lua\n Match kube.*\n script /fluent-bit/etc/functions.lua\n call reassemble_cri_logs\n\n[FILTER]\n Name kubernetes\n Match kube.*\n Merge_Log On\n Keep_Log Off\n K8S-Logging.Parser On\n K8S-Logging.Exclude On\n\n[FILTER]\n Name lua\n Match kube.*\n script /fluent-bit/etc/functions.lua\n call dedot\n"` | |
| fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n Parser cri\n Tag kube.*\n Mem_Buf_Limit 16MB\n Skip_Long_Lines On\n Refresh_Interval 10\n Exclude_Path *.gz,*.zip\n DB /var/log/flb_kube.db\n DB.Sync Normal\n[INPUT]\n Name tail\n Path /var/log/kubernetes/audit.log\n Parser json\n Tag audit.api-server\n Mem_Buf_Limit 8MB\n Skip_Long_Lines On\n DB /var/log/flb_kube_audit.db\n DB.Sync Normal\n"` | |
| fluent-bit.config.lua | string | `"function dedot(tag, timestamp, record)\n if record[\"kubernetes\"] == nil then\n return 0, 0, 0\n end\n dedot_keys(record[\"kubernetes\"][\"annotations\"])\n dedot_keys(record[\"kubernetes\"][\"labels\"])\n return 1, timestamp, record\nend\n\nfunction dedot_keys(map)\n if map == nil then\n return\n end\n local new_map = {}\n local changed_keys = {}\n for k, v in pairs(map) do\n local dedotted = string.gsub(k, \"%.\", \"_\")\n if dedotted ~= k then\n new_map[dedotted] = v\n changed_keys[k] = true\n end\n end\n for k in pairs(changed_keys) do\n map[k] = nil\n end\n for k, v in pairs(new_map) do\n map[k] = v\n end\nend\n\nlocal reassemble_state = {}\n\nfunction reassemble_cri_logs(tag, timestamp, record)\n -- IMPORTANT: reassemble_key must be unique for each parser stream\n -- otherwise entries from different sources will get mixed up.\n -- Either make sure that your parser tags satisfy this or construct\n -- reassemble_key some other way\n local reassemble_key = tag\n -- if partial line, accumulate\n if record.logtag == 'P' then\n reassemble_state[reassemble_key] = reassemble_state[reassemble_key] or \"\" .. record.message\n return -1, 0, 0\n end\n -- otherwise it's a full line, concatenate with accumulated partial lines if any\n record.message = reassemble_state[reassemble_key] or \"\" .. (record.message or \"\")\n reassemble_state[reassemble_key] = nil\n return 1, timestamp, record\nend\n"` | |
| fluent-bit.config.outputs | string | `"[OUTPUT]\n Match *\n Name forward\n Host logging-fluentd\n Port 24224\n"` | |
| fluent-bit.config.service | string | `"[SERVICE]\n Flush 5\n Daemon Off\n Log_Level warn\n Parsers_File parsers.conf\n Parsers_File custom_parsers.conf\n HTTP_Server On\n HTTP_Listen 0.0.0.0\n HTTP_Port 2020\n"` | |
| fluent-bit.config.customParsers | string | `"[PARSER]\n Name cri-log\n Format regex\n Regex ^(?<time>[^ ]+) (?<stream>stdout|stderr) (?<logtag>[^ ]*) (?<log>.*)$\n Time_Key time\n Time_Format %Y-%m-%dT%H:%M:%S.%L%z\n"` | |
| fluent-bit.config.filters | string | `"[FILTER]\n Name lua\n Match cri.*\n script /fluent-bit/etc/functions.lua\n call reassemble_cri_logs\n\n[FILTER]\n Name kubernetes\n Match cri.*\n Merge_Log On\n Merge_Log_Key kube\n Kube_Tag_Prefix cri.var.log.containers.\n Keep_Log Off\n K8S-Logging.Parser Off\n K8S-Logging.Exclude Off\n\n[FILTER]\n Name rewrite_tag\n Match cri.*\n Emitter_Name kube_tag_rewriter\n Rule logtag F kube.$kubernetes['namespace_name'].$kubernetes['container_name'] false\n\n[FILTER]\n Name lua\n Match kube.*\n script /fluent-bit/etc/functions.lua\n call nest_k8s_ns\n"` | |
| fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n Parser cri-log\n Tag cri.*\n Mem_Buf_Limit 16MB\n Skip_Long_Lines On\n Refresh_Interval 10\n DB /var/log/flb_kube.db\n DB.Sync Normal\n"` | |
| fluent-bit.config.lua | string | `"local reassemble_state = {}\n\nfunction reassemble_cri_logs(tag, timestamp, record)\n local reassemble_key = tag\n if record.logtag == 'P' then\n reassemble_state[reassemble_key] = reassemble_state[reassemble_key] or \"\" .. record.log\n return -1, 0, 0\n end\n record.log = reassemble_state[reassemble_key] or \"\" .. (record.log or \"\")\n reassemble_state[reassemble_key] = nil\n return 1, timestamp, record\nend\n\nfunction nest_k8s_ns(tag, timestamp, record)\n if not record['kubernetes']['namespace_name'] then\n return 0, 0, 0\n end\n new_record = {}\n for key, val in pairs(record) do\n if key == 'kube' then\n new_record[key] = {}\n new_record[key][record['kubernetes']['namespace_name']] = record[key]\n else\n new_record[key] = record[key]\n end\n end\n return 1, timestamp, new_record\nend\n"` | |
| fluent-bit.config.outputs | string | `"[OUTPUT]\n Match *\n Name forward\n Host logging-fluentd\n Port 24224\n Shared_Key cloudbender\n Send_options true\n Require_ack_response true\n"` | |
| fluent-bit.config.service | string | `"[SERVICE]\n Flush 1\n Daemon Off\n Log_Level warn\n Parsers_File parsers.conf\n Parsers_File custom_parsers.conf\n HTTP_Server On\n HTTP_Listen 0.0.0.0\n HTTP_Port 2020\n"` | |
| fluent-bit.enabled | bool | `false` | |
| fluent-bit.serviceMonitor.enabled | bool | `true` | |
| fluent-bit.serviceMonitor.namespace | string | `"monitoring"` | |
@ -73,7 +74,7 @@ Kubernetes: `>= 1.16.0`
| fluent-bit.test.enabled | bool | `false` | |
| fluent-bit.tolerations[0].effect | string | `"NoSchedule"` | |
| fluent-bit.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| fluentd.configMaps."filter.conf" | string | `"<filter disabled.kube.**>\n @type parser\n key_name message\n remove_key_name_field true\n reserve_data true\n # inject_key_prefix message_json.\n emit_invalid_record_to_error false\n <parse>\n @type json\n </parse>\n</filter>\n"` | |
| fluentd.configMaps."filter.conf" | string | `"<filter disabled.kube.**>\n @type parser\n key_name message\n remove_key_name_field true\n reserve_data true\n reserve_time true\n # inject_key_prefix message_json.\n emit_invalid_record_to_error false\n <parse>\n @type json\n </parse>\n</filter>\n"` | |
| fluentd.configMaps."forward-input.conf" | string | `"<source>\n @type forward\n port 24224\n bind 0.0.0.0\n skip_invalid_event true\n send_keepalive_packet true\n <security>\n self_hostname \"#{ENV['HOSTNAME']}\"\n shared_key \"#{ENV['FLUENTD_SHARED_KEY']}\"\n </security>\n</source>\n"` | |
| fluentd.configMaps."general.conf" | string | `"<label @FLUENT_LOG>\n <match **>\n @type null\n </match>\n</label>\n<source>\n @type http\n port 9880\n bind 0.0.0.0\n keepalive_timeout 30\n</source>\n<source>\n @type monitor_agent\n bind 0.0.0.0\n port 24220\n tag fluentd.monitor.metrics\n</source>\n"` | |
| fluentd.configMaps."output.conf" | string | `"<match **>\n @id elasticsearch\n @type elasticsearch\n @log_level info\n include_tag_key true\n id_key id\n remove_keys id\n\n # KubeZero pipeline incl. GeoIP etc.\n # pipeline fluentd\n\n host \"#{ENV['OUTPUT_HOST']}\"\n port \"#{ENV['OUTPUT_PORT']}\"\n scheme \"#{ENV['OUTPUT_SCHEME']}\"\n ssl_version \"#{ENV['OUTPUT_SSL_VERSION']}\"\n ssl_verify \"#{ENV['OUTPUT_SSL_VERIFY']}\"\n user \"#{ENV['OUTPUT_USER']}\"\n password \"#{ENV['OUTPUT_PASSWORD']}\"\n\n log_es_400_reason\n logstash_format true\n reconnect_on_error true\n # reload_on_failure true\n request_timeout 15s\n suppress_type_name true\n\n <buffer tag>\n @type file_single\n path /var/log/fluentd-buffers/kubernetes.system.buffer\n flush_mode interval\n flush_thread_count 2\n flush_interval 30s\n flush_at_shutdown true\n retry_type exponential_backoff\n retry_timeout 60m\n overflow_action drop_oldest_chunk\n </buffer>\n</match>\n"` | |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-metrics
description: KubeZero Umbrella Chart for prometheus-operator
type: application
version: 0.1.4
version: 0.2.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -15,10 +15,10 @@ dependencies:
- name: kubezero-lib
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: prometheus-operator
version: 9.3.1
repository: https://kubernetes-charts.storage.googleapis.com/
- name: kube-prometheus-stack
version: 10.1.3
repository: https://prometheus-community.github.io/helm-charts
- name: prometheus-adapter
version: 2.5.0
repository: https://kubernetes-charts.storage.googleapis.com/
version: 2.7.0
repository: https://prometheus-community.github.io/helm-charts
kubeVersion: ">= 1.16.0"

View File

@ -1,6 +1,6 @@
# kubezero-metrics
![Version: 0.1.4](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.2.0](https://img.shields.io/badge/Version-0.2.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for prometheus-operator
@ -18,8 +18,8 @@ Kubernetes: `>= 1.16.0`
| Repository | Name | Version |
|------------|------|---------|
| https://kubernetes-charts.storage.googleapis.com/ | prometheus-adapter | 2.5.0 |
| https://kubernetes-charts.storage.googleapis.com/ | prometheus-operator | 9.3.1 |
| https://prometheus-community.github.io/helm-charts | kube-prometheus-stack | 10.0.1 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 2.7.0 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## Values
@ -30,12 +30,70 @@ Kubernetes: `>= 1.16.0`
| grafana.istio.gateway | string | `"istio-system/ingressgateway"` | |
| grafana.istio.ipBlocks | list | `[]` | |
| grafana.istio.url | string | `""` | |
| kube-prometheus-stack.alertmanager.enabled | bool | `false` | |
| kube-prometheus-stack.coreDns.enabled | bool | `true` | |
| kube-prometheus-stack.defaultRules.create | bool | `true` | |
| kube-prometheus-stack.grafana.enabled | bool | `true` | |
| kube-prometheus-stack.grafana.initChownData.enabled | bool | `false` | |
| kube-prometheus-stack.grafana.persistence.enabled | bool | `true` | |
| kube-prometheus-stack.grafana.persistence.size | string | `"4Gi"` | |
| kube-prometheus-stack.grafana.persistence.storageClassName | string | `"ebs-sc-gp2-xfs"` | |
| kube-prometheus-stack.grafana.plugins[0] | string | `"grafana-piechart-panel"` | |
| kube-prometheus-stack.grafana.service.portName | string | `"http-grafana"` | |
| kube-prometheus-stack.grafana.testFramework.enabled | bool | `false` | |
| kube-prometheus-stack.kubeApiServer.enabled | bool | `true` | |
| kube-prometheus-stack.kubeControllerManager.enabled | bool | `true` | |
| kube-prometheus-stack.kubeControllerManager.service.port | int | `10257` | |
| kube-prometheus-stack.kubeControllerManager.service.targetPort | int | `10257` | |
| kube-prometheus-stack.kubeControllerManager.serviceMonitor.https | bool | `true` | |
| kube-prometheus-stack.kubeControllerManager.serviceMonitor.insecureSkipVerify | bool | `true` | |
| kube-prometheus-stack.kubeDns.enabled | bool | `false` | |
| kube-prometheus-stack.kubeEtcd.enabled | bool | `true` | |
| kube-prometheus-stack.kubeEtcd.service.port | int | `2381` | |
| kube-prometheus-stack.kubeEtcd.service.targetPort | int | `2381` | |
| kube-prometheus-stack.kubeProxy.enabled | bool | `true` | |
| kube-prometheus-stack.kubeScheduler.enabled | bool | `true` | |
| kube-prometheus-stack.kubeScheduler.service.port | int | `10259` | |
| kube-prometheus-stack.kubeScheduler.service.targetPort | int | `10259` | |
| kube-prometheus-stack.kubeScheduler.serviceMonitor.https | bool | `true` | |
| kube-prometheus-stack.kubeScheduler.serviceMonitor.insecureSkipVerify | bool | `true` | |
| kube-prometheus-stack.kubeStateMetrics.enabled | bool | `true` | |
| kube-prometheus-stack.kubelet.enabled | bool | `true` | |
| kube-prometheus-stack.kubelet.serviceMonitor.cAdvisor | bool | `true` | |
| kube-prometheus-stack.nodeExporter.enabled | bool | `true` | |
| kube-prometheus-stack.nodeExporter.serviceMonitor.relabelings[0].action | string | `"replace"` | |
| kube-prometheus-stack.nodeExporter.serviceMonitor.relabelings[0].regex | string | `"^(.*)$"` | |
| kube-prometheus-stack.nodeExporter.serviceMonitor.relabelings[0].replacement | string | `"$1"` | |
| kube-prometheus-stack.nodeExporter.serviceMonitor.relabelings[0].separator | string | `";"` | |
| kube-prometheus-stack.nodeExporter.serviceMonitor.relabelings[0].sourceLabels[0] | string | `"__meta_kubernetes_pod_node_name"` | |
| kube-prometheus-stack.nodeExporter.serviceMonitor.relabelings[0].targetLabel | string | `"node"` | |
| kube-prometheus-stack.prometheus.enabled | bool | `true` | |
| kube-prometheus-stack.prometheus.prometheusSpec.portName | string | `"http-prometheus"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.limits.cpu | string | `"1000m"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.limits.memory | string | `"3Gi"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.cpu | string | `"500m"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.memory | string | `"1Gi"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.retention | string | `"8d"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName | string | `"ebs-sc-gp2-xfs"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.enabled | bool | `false` | |
| kube-prometheus-stack.prometheusOperator.createCustomResource | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.enabled | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.manageCrds | bool | `false` | |
| kube-prometheus-stack.prometheusOperator.namespaces.additional[0] | string | `"kube-system"` | |
| kube-prometheus-stack.prometheusOperator.namespaces.additional[1] | string | `"logging"` | |
| kube-prometheus-stack.prometheusOperator.namespaces.releaseNamespace | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| kube-prometheus-stack.prometheusOperator.tlsProxy.enabled | bool | `false` | |
| kube-prometheus-stack.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| prometheus-adapter.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| prometheus-adapter.prometheus.url | string | `"http://metrics-prometheus-operato-prometheus"` | |
| prometheus-adapter.prometheus.url | string | `"http://metrics-kube-prometheus-st-prometheus"` | |
| prometheus-adapter.rules.default | bool | `false` | |
| prometheus-adapter.rules.resource.cpu.containerLabel | string | `"container"` | |
| prometheus-adapter.rules.resource.cpu.containerQuery | string | `"sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!=\"POD\",container!=\"\",pod!=\"\"}[5m])) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.cpu.nodeQuery | string | `"sum(1 - irate(node_cpu_seconds_total{mode=\"idle\"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.cpu.containerQuery | string | `"sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!=\"POD\",container!=\"\",pod!=\"\"}[3m])) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.cpu.nodeQuery | string | `"sum(1 - irate(node_cpu_seconds_total{mode=\"idle\"}[3m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.cpu.resources.overrides.namespace.resource | string | `"namespace"` | |
| prometheus-adapter.rules.resource.cpu.resources.overrides.node.resource | string | `"node"` | |
| prometheus-adapter.rules.resource.cpu.resources.overrides.pod.resource | string | `"pod"` | |
@ -45,67 +103,9 @@ Kubernetes: `>= 1.16.0`
| prometheus-adapter.rules.resource.memory.resources.overrides.namespace.resource | string | `"namespace"` | |
| prometheus-adapter.rules.resource.memory.resources.overrides.node.resource | string | `"node"` | |
| prometheus-adapter.rules.resource.memory.resources.overrides.pod.resource | string | `"pod"` | |
| prometheus-adapter.rules.resource.window | string | `"5m"` | |
| prometheus-adapter.rules.resource.window | string | `"3m"` | |
| prometheus-adapter.tolerations[0].effect | string | `"NoSchedule"` | |
| prometheus-adapter.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| prometheus-operator.alertmanager.enabled | bool | `false` | |
| prometheus-operator.coreDns.enabled | bool | `true` | |
| prometheus-operator.defaultRules.create | bool | `true` | |
| prometheus-operator.grafana.enabled | bool | `true` | |
| prometheus-operator.grafana.initChownData.enabled | bool | `false` | |
| prometheus-operator.grafana.persistence.enabled | bool | `true` | |
| prometheus-operator.grafana.persistence.size | string | `"4Gi"` | |
| prometheus-operator.grafana.persistence.storageClassName | string | `"ebs-sc-gp2-xfs"` | |
| prometheus-operator.grafana.plugins[0] | string | `"grafana-piechart-panel"` | |
| prometheus-operator.grafana.service.portName | string | `"http-grafana"` | |
| prometheus-operator.grafana.testFramework.enabled | bool | `false` | |
| prometheus-operator.kubeApiServer.enabled | bool | `true` | |
| prometheus-operator.kubeControllerManager.enabled | bool | `true` | |
| prometheus-operator.kubeControllerManager.service.port | int | `10257` | |
| prometheus-operator.kubeControllerManager.service.targetPort | int | `10257` | |
| prometheus-operator.kubeControllerManager.serviceMonitor.https | bool | `true` | |
| prometheus-operator.kubeControllerManager.serviceMonitor.insecureSkipVerify | bool | `true` | |
| prometheus-operator.kubeDns.enabled | bool | `false` | |
| prometheus-operator.kubeEtcd.enabled | bool | `true` | |
| prometheus-operator.kubeEtcd.service.port | int | `2381` | |
| prometheus-operator.kubeEtcd.service.targetPort | int | `2381` | |
| prometheus-operator.kubeProxy.enabled | bool | `true` | |
| prometheus-operator.kubeScheduler.enabled | bool | `true` | |
| prometheus-operator.kubeScheduler.service.port | int | `10259` | |
| prometheus-operator.kubeScheduler.service.targetPort | int | `10259` | |
| prometheus-operator.kubeScheduler.serviceMonitor.https | bool | `true` | |
| prometheus-operator.kubeScheduler.serviceMonitor.insecureSkipVerify | bool | `true` | |
| prometheus-operator.kubeStateMetrics.enabled | bool | `true` | |
| prometheus-operator.kubelet.enabled | bool | `true` | |
| prometheus-operator.kubelet.serviceMonitor.cAdvisor | bool | `true` | |
| prometheus-operator.nodeExporter.enabled | bool | `true` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].action | string | `"replace"` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].regex | string | `"^(.*)$"` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].replacement | string | `"$1"` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].separator | string | `";"` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].sourceLabels[0] | string | `"__meta_kubernetes_pod_node_name"` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].targetLabel | string | `"node"` | |
| prometheus-operator.prometheus.enabled | bool | `true` | |
| prometheus-operator.prometheus.prometheusSpec.portName | string | `"http-prometheus"` | |
| prometheus-operator.prometheus.prometheusSpec.resources.limits.cpu | string | `"1000m"` | |
| prometheus-operator.prometheus.prometheusSpec.resources.limits.memory | string | `"3Gi"` | |
| prometheus-operator.prometheus.prometheusSpec.resources.requests.cpu | string | `"500m"` | |
| prometheus-operator.prometheus.prometheusSpec.resources.requests.memory | string | `"1Gi"` | |
| prometheus-operator.prometheus.prometheusSpec.retention | string | `"8d"` | |
| prometheus-operator.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | |
| prometheus-operator.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | |
| prometheus-operator.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName | string | `"ebs-sc-gp2-xfs"` | |
| prometheus-operator.prometheusOperator.admissionWebhooks.enabled | bool | `false` | |
| prometheus-operator.prometheusOperator.createCustomResource | bool | `true` | |
| prometheus-operator.prometheusOperator.enabled | bool | `true` | |
| prometheus-operator.prometheusOperator.manageCrds | bool | `false` | |
| prometheus-operator.prometheusOperator.namespaces.additional[0] | string | `"kube-system"` | |
| prometheus-operator.prometheusOperator.namespaces.additional[1] | string | `"logging"` | |
| prometheus-operator.prometheusOperator.namespaces.releaseNamespace | bool | `true` | |
| prometheus-operator.prometheusOperator.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| prometheus-operator.prometheusOperator.tlsProxy.enabled | bool | `false` | |
| prometheus-operator.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | |
| prometheus-operator.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| prometheus.istio.enabled | bool | `false` | |
| prometheus.istio.gateway | string | `"istio-system/ingressgateway"` | |
| prometheus.istio.url | string | `""` | |

View File

@ -31,5 +31,5 @@ spec:
http:
- route:
- destination:
host: metrics-prometheus-operato-prometheus
host: metrics-kube-prometheus-st-prometheus
{{- end }}

View File

@ -11,7 +11,7 @@ prometheus:
url: ""
gateway: istio-system/ingressgateway
prometheus-operator:
kube-prometheus-stack:
defaultRules:
create: true
@ -60,9 +60,9 @@ prometheus-operator:
prometheusOperator:
enabled: true
#image:
# tag: v0.41.0
# tag: v0.42.1
#prometheusConfigReloaderImage:
# tag: v0.41.0
# tag: v0.42.1
# Run on controller nodes
tolerations:
@ -113,7 +113,7 @@ prometheus-operator:
cpu: "500m"
limits:
memory: "3Gi"
cpu: "1000m"
# cpu: "1000m"
storageSpec:
volumeClaimTemplate:
@ -148,7 +148,7 @@ prometheus-operator:
# Metrics adapter
prometheus-adapter:
prometheus:
url: http://metrics-prometheus-operato-prometheus
url: http://metrics-kube-prometheus-st-prometheus
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
@ -160,8 +160,8 @@ prometheus-adapter:
default: false
resource:
cpu:
containerQuery: sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}[5m])) by (<<.GroupBy>>)
nodeQuery: sum(1 - irate(node_cpu_seconds_total{mode="idle"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)
containerQuery: sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}[3m])) by (<<.GroupBy>>)
nodeQuery: sum(1 - irate(node_cpu_seconds_total{mode="idle"}[3m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)
resources:
overrides:
node:
@ -183,4 +183,4 @@ prometheus-adapter:
pod:
resource: pod
containerLabel: container
window: 5m
window: 3m

View File

@ -44,4 +44,4 @@ Kubernetes: `>= 1.16.0`
| platform | string | `"aws"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.2.1](https://github.com/norwoodj/helm-docs/releases/v1.2.1)
Autogenerated from chart metadata using [helm-docs v1.4.0](https://github.com/norwoodj/helm-docs/releases/v1.4.0)

View File

@ -78,17 +78,21 @@ kubezero:
aws-efs-csi-driver:
enabled: {{ index .Values "aws-efs-csi-driver" "enabled" }}
values:
{{- with index .Values "aws-efs-csi-driver" "nodeSelector" }}
aws-efs-csi-driver:
nodeSelector:
node.kubernetes.io/csi.efs.fs: {{ index .Values "aws-efs-csi-driver" "EfsId" }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with index .Values "aws-efs-csi-driver" "PersistentVolumes" }}
PersistentVolumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if index .Values "aws-efs-csi-driver" "EfsId" }}
PersistentVolume:
create: true
EfsId: {{ index .Values "aws-efs-csi-driver" "EfsId" }}
Name: {{ default "kubezero-efs-pv" ( index .Values "aws-efs-csi-driver" "PVName" ) }}
{{- end }}
{{- end }}
istio:
@ -120,9 +124,9 @@ kubezero:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- end }}
{{- if index .Values "metrics" "prometheus-operator" }}
prometheus-operator:
{{- with index .Values "metrics" "prometheus-operator" }}
{{- if index .Values "metrics" "kube-prometheus-stack" }}
kube-prometheus-stack:
{{- with index .Values "metrics" "kube-prometheus-stack" }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}