Patch release 2.20.2 #48

Merged
stefan merged 6 commits from master into stable 2021-08-12 19:51:02 +00:00
21 changed files with 147 additions and 49 deletions

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-aws-ebs-csi-driver
description: KubeZero Umbrella Chart for aws-ebs-csi-driver
type: application
version: 0.6.3
version: 0.6.4
appVersion: 1.2.4
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png

View File

@ -1,6 +1,6 @@
# kubezero-aws-ebs-csi-driver
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.2.3](https://img.shields.io/badge/AppVersion-1.2.3-informational?style=flat-square)
![Version: 0.6.4](https://img.shields.io/badge/Version-0.6.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.2.4](https://img.shields.io/badge/AppVersion-1.2.4-informational?style=flat-square)
KubeZero Umbrella Chart for aws-ebs-csi-driver
@ -23,7 +23,7 @@ Kubernetes: `>= 1.18.0`
| Repository | Name | Version |
|------------|------|---------|
| | aws-ebs-csi-driver | 1.2.3 |
| | aws-ebs-csi-driver | 1.2.4 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## IAM Role
@ -50,6 +50,9 @@ This class is by default also set as default storage class.
| aws-ebs-csi-driver.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-ebs-csi-driver.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| aws-ebs-csi-driver.enableVolumeSnapshot | bool | `true` | |
| aws-ebs-csi-driver.node.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-ebs-csi-driver.node.tolerations[0].key | string | `"kubezero-workergroup"` | |
| aws-ebs-csi-driver.node.tolerations[0].operator | string | `"Exists"` | |
| aws-ebs-csi-driver.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| aws-ebs-csi-driver.storageClasses[0].allowVolumeExpansion | bool | `true` | |
| aws-ebs-csi-driver.storageClasses[0].name | string | `"ebs-sc-gp2-xfs"` | |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-lib
description: KubeZero helm library - common helm functions and blocks
type: library
version: 0.1.3
version: 0.1.4
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -1,14 +1,3 @@
{{- /*
Common set of labels
*/ -}}
{{- define "kubezero-lib.labels" -}}
helm.sh/chart: {{ include "kubezero-lib.chart" . }}
app.kubernetes.io/name: {{ include "kubezero-lib.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/part-of: kubezero
{{- end -}}
{{- /*
Common naming functions
*/ -}}
@ -32,3 +21,22 @@ Common naming functions
{{- define "kubezero-lib.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "kubezero-lib.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kubezero-lib.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- /*
Common set of labels
*/ -}}
{{- define "kubezero-lib.labels" -}}
helm.sh/chart: {{ include "kubezero-lib.chart" . }}
{{ include "kubezero-lib.selectorLabels" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: kubezero
{{- end -}}

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack
type: application
version: 0.7.5
version: 0.7.6
appVersion: 1.6.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png

View File

@ -1,6 +1,6 @@
# kubezero-logging
![Version: 0.7.5](https://img.shields.io/badge/Version-0.7.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
![Version: 0.7.6](https://img.shields.io/badge/Version-0.7.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack
@ -111,7 +111,7 @@ Kubernetes: `>= 1.18.0`
| fluentd.env[1].valueFrom.secretKeyRef.name | string | `"logging-es-elastic-user"` | |
| fluentd.fileConfigs."00_system.conf" | string | `"<system>\n root_dir /var/log/fluentd\n log_level info\n ignore_repeated_log_interval 60s\n ignore_same_log_interval 60s\n workers 2\n</system>"` | |
| fluentd.fileConfigs."01_sources.conf" | string | `"<source>\n @type http\n @label @KUBERNETES\n port 9880\n bind 0.0.0.0\n keepalive_timeout 30\n</source>\n\n<source>\n @type forward\n @label @KUBERNETES\n port 24224\n bind 0.0.0.0\n # skip_invalid_event true\n send_keepalive_packet true\n <security>\n self_hostname \"#{ENV['HOSTNAME']}\"\n shared_key {{ .Values.shared_key }}\n </security>\n</source>"` | |
| fluentd.fileConfigs."02_filters.conf" | string | `"<label @KUBERNETES>\n # prevent log feedback loops eg. ES has issues etc.\n # discard logs from our own pods\n <match kube.logging.fluentd>\n @type relabel\n @label @FLUENT_LOG\n </match>\n\n <match **>\n @type relabel\n @label @DISPATCH\n </match>\n</label>"` | |
| fluentd.fileConfigs."02_filters.conf" | string | `"<label @KUBERNETES>\n # prevent log feedback loops eg. ES has issues etc.\n # discard logs from our own pods\n <match kube.logging.fluentd>\n @type relabel\n @label @FLUENT_LOG\n </match>\n\n # Exclude current fluent-bit multiline noise\n <filter kube.logging.fluent-bit>\n @type grep\n <exclude>\n key log\n pattern /could not append content to multiline context/\n </exclude>\n </filter>\n\n <match **>\n @type relabel\n @label @DISPATCH\n </match>\n</label>"` | |
| fluentd.fileConfigs."04_outputs.conf" | string | `"<label @OUTPUT>\n <match **>\n @id out_es\n @type elasticsearch\n # @log_level debug\n include_tag_key true\n id_key id\n remove_keys id\n\n # KubeZero pipeline incl. GeoIP etc.\n pipeline fluentd\n\n hosts \"{{ .Values.output.host }}\"\n port 9200\n scheme http\n user elastic\n password \"#{ENV['OUTPUT_PASSWORD']}\"\n\n log_es_400_reason\n logstash_format true\n reconnect_on_error true\n reload_on_failure true\n request_timeout 60s\n suppress_type_name true\n slow_flush_log_threshold 55.0\n\n # Retry failed bulk requests\n # https://github.com/uken/fluent-plugin-elasticsearch#unrecoverable-error-types\n unrecoverable_error_types [\"out_of_memory_error\"]\n bulk_message_request_threshold 2097152\n\n <buffer>\n @type file\n\n flush_mode interval\n flush_thread_count 1\n flush_interval 30s\n\n chunk_limit_size 8MB\n total_limit_size 2GB\n\n flush_at_shutdown true\n retry_type exponential_backoff\n retry_timeout 6h\n overflow_action drop_oldest_chunk\n disable_chunk_backup true\n </buffer>\n </match>\n</label>"` | |
| fluentd.image.repository | string | `"fluent/fluentd-kubernetes-daemonset"` | |
| fluentd.image.tag | string | `"v1-debian-elasticsearch"` | |

View File

@ -156,6 +156,15 @@ fluentd:
@label @FLUENT_LOG
</match>
# Exclude current fluent-bit multiline noise
<filter kube.logging.fluent-bit>
@type grep
<exclude>
key log
pattern /could not append content to multiline context/
</exclude>
</filter>
<match **>
@type relabel
@label @DISPATCH

View File

@ -14,7 +14,7 @@ maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.3"
version: ">= 0.1.4"
repository: https://zero-down-time.github.io/kubezero/
- name: kube-prometheus-stack
version: 17.0.3

View File

@ -21,3 +21,5 @@
- https://grafana.com/api/dashboards/9578/revisions/4/download
## Prometheus
- https://grafana.com/api/dashboards/3662/revisions/2/download
## AlertManager SNS Forwarder
- https://github.com/DataReply/alertmanager-sns-forwarder

View File

@ -7,5 +7,5 @@ metadata:
{{ include "kubezero-lib.labels" $ | indent 4 }}
data:
script.sh: |-
#!/bin/sh
find /tmp/dashboards -name "*.gz" -exec gunzip -f -k {} \;
#!/bin/sh
find /tmp/dashboards -name "*.gz" -exec gunzip -f -k {} \;

View File

@ -5,6 +5,9 @@ kube-prometheus-stack:
defaultRules:
create: true
#additionalRuleLabels:
# clusterName: myTestCluster
# awsRegion: eu-central-1
coreDns:
enabled: true
@ -199,6 +202,64 @@ kube-prometheus-stack:
# externalUrl:
logFormat: json
# for none AWS cluster or if SNS AlertHub should NOT be used, remove sns-forwarder by overwriting containers eg.:
# containers: []
# Add sns-forwarder to AlertManager pod, see: https://github.com/DataReply/alertmanager-sns-forwarder
# uses the alertmanager serviceaccount to assume IAM role, requires annotation: kubezero.com/sns_forwarder_arn_prefix to point to SNSAlertHub
# eg: "arn:aws:sns:eu-central-1:123456789012:"
#containers:
#- name: alertmanager-sns-forwarder
# image: datareply/alertmanager-sns-forwarder:latest
# imagePullPolicy: Always
# env:
# - name: SNS_FORWARDER_ARN_PREFIX
# valueFrom:
# fieldRef:
# fieldPath: metadata.annotations['kubezero.com/sns_forwarder_ARN_PREFIX']
# - name: AWS_ROLE_ARN
# valueFrom:
# fieldRef:
# fieldPath: metadata.annotations['kubezero.com/sns_forwarder_AWS_ROLE_ARN']
# - name: AWS_WEB_IDENTITY_TOKEN_FILE
# value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
# - name: AWS_STS_REGIONAL_ENDPOINTS
# value: regional
# volumeMounts:
# - name: aws-token
# mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
# readOnly: true
# resources:
# limits:
# memory: 64Mi
# cpu: 100m
# requests:
# cpu: 25m
# memory: 32Mi
# ports:
# - containerPort: 9087
# name: webhook-port
# livenessProbe:
# httpGet:
# path: /health
# port: webhook-port
# initialDelaySeconds: 30
# timeoutSeconds: 10
# readinessProbe:
# httpGet:
# path: /health
# port: webhook-port
# initialDelaySeconds: 10
# timeoutSeconds: 10
#volumes:
#- name: aws-token
# projected:
# sources:
# - serviceAccountToken:
# path: token
# expirationSeconds: 86400
# audience: "sts.amazonaws.com"
# Metrics adapter
prometheus-adapter:
enabled: true

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-mq
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
type: application
version: 0.2.1
version: 0.2.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -0,0 +1,22 @@
{{- if .Values.nats.istio.enabled }}
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: {{ .Release.Name }}-nats
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
hosts:
- {{ .Values.nats.istio.url }}
gateways:
- {{ .Values.nats.istio.gateway }}
tcp:
- match:
- port: 1883
route:
- destination:
host: nats
port:
number: 1883
{{- end }}

View File

@ -18,6 +18,11 @@ nats:
mqtt:
enabled: false
istio:
enabled: false
# url: "mq.example.com"
gateway: istio-ingress/private-ingressgateway
# rabbitmq
rabbitmq:
enabled: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-redis
description: KubeZero Umbrella Chart for Redis HA
type: application
version: 0.3.0
version: 0.3.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -15,7 +15,7 @@ dependencies:
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: redis
version: 12.1.1
version: 14.8.8
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled
- name: redis-cluster

View File

@ -22,7 +22,7 @@ spec:
address:
socket_address:
address: {{ $.Release.Name }}-{{ $i }}.{{ $.Release.Name }}-headless.{{ $.Release.Namespace }}.svc.cluster.local
port_value: {{ index $.Values "redis-cluster" "redisPort" }}
port_value: 6379
{{- end }}
cluster_type:
name: envoy.clusters.redis

View File

@ -13,7 +13,7 @@ spec:
match:
context: GATEWAY
listener:
name: 0.0.0.0_{{ index .Values "redis-cluster" "redisPort" }}
name: 0.0.0.0_{{ default 6379 .Values.istio.port }}
filterChain:
filter:
name: "envoy.filters.network.tcp_proxy"

View File

@ -21,6 +21,6 @@ spec:
{{- end }}
to:
- operation:
ports: ["{{ default 6379 .Values.redis.redisPort }}"]
ports: ["{{ default 6379 .Values.istio.port }}"]
{{- end }}
{{- end }}

View File

@ -13,10 +13,10 @@ spec:
- {{ .Values.istio.gateway }}
tcp:
- match:
- port: {{ default 6379 .Values.redis.redisPort }}
- port: {{ default 6379 .Values.istio.port }}
route:
- destination:
host: redis-headless
host: redis-cluster-headless
port:
number: {{ default 6379 .Values.redis.redisPort }}
number: 6379
{{- end }}

View File

@ -1,12 +1,13 @@
redis:
enabled: false
redisPort: 6379
architecture: standalone
cluster:
slaveCount: 0
replica:
replicaCount: 0
usePassword: false
auth:
enabled: false
master:
persistence:
@ -20,14 +21,11 @@ redis:
enabled: false
serviceMonitor:
enabled: false
# extraArgs:
# redis.addr: "redis://localhost:6379"
redis-cluster:
enabled: false
redisPort: 6379
usePassword: false
cluster:

View File

@ -15,17 +15,12 @@ istio-ingress:
istio-ingressgateway:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with index .Values "istio-ingress" "public" "dnsNames" }}
# Legacy
dnsNames:
{{- toYaml . | nindent 2 }}
certificates:
{{- with index .Values "istio-ingress" "public" "dnsNames" }}
- name: ingress-cert
dnsNames:
{{- toYaml . | nindent 4 }}
{{- end }}
# New multi cert gateway
{{- range $cert := (index .Values "istio-ingress" "public" "certificates") }}
- name: {{ $cert.name }}
dnsNames:
@ -44,17 +39,12 @@ istio-private-ingress:
istio-ingressgateway:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with index .Values "istio-ingress" "private" "dnsNames" }}
# Legacy
dnsNames:
{{- toYaml . | nindent 2 }}
certificates:
{{- with index .Values "istio-ingress" "private" "dnsNames" }}
- name: private-ingress-cert
dnsNames:
{{- toYaml . | nindent 4 }}
{{- end }}
# New multi cert gateway
{{- range $cert := (index .Values "istio-ingress" "private" "certificates") }}
- name: {{ $cert.name }}
dnsNames: