master #9

Merged
stefan merged 96 commits from master into stable 2020-08-24 11:15:28 +00:00
124 changed files with 8346 additions and 8214 deletions

15
CHANGES.md Normal file
View File

@ -0,0 +1,15 @@
# CFN / Platform
- Kube to 1.17
- Kube-proxy uses ipvs
- metrics support for kube-proxy
- no reliance on custom resource for S3 buckets anymore
# Kubezero
- fully automated one command bootstrap incl. all kubezero components
- migrated from kube-prometheuss to prometheus-operator helm charts for metrics
- latest Grafana incl. peristence
- kube-prometheus adapter improvements / customizations
- integrated EFS CSI driver into Kubezero
- prometheus itself can be exposed via istio ingress on demand to ease development of custom metrics
- backup script to export all cert-manager items between clusters

View File

@ -1,31 +0,0 @@
# Calico CNI
Current top-level still contains the deprecated Canal implementation.
Removed once new AWS config is tested and rolled out to all existing clusters.
## AWS
Calico is setup based on the upstream calico-vxlan config from
`https://docs.projectcalico.org/v3.15/manifests/calico-vxlan.yaml`
Changes:
- VxLAN set to Always to not expose cluster communication to VPC
-> EC2 SecurityGroups still apply and only need to allow UDP 4789 for VxLAN traffic
-> No need to disable source/destination check on EC2 instances
-> Prepared for optional WireGuard encryption for all inter node traffic
- MTU set to 8941
- Removed migration init-container
- Disable BGB and BIRD health checks
- Set FELIX log level to warning
- Enable Prometheus metrics
## Prometheus
See: https://grafana.com/grafana/dashboards/12175

View File

@ -1,101 +0,0 @@
--- calico-vxlan.yaml 2020-07-03 15:32:40.740506882 +0100
+++ calico.yaml 2020-07-03 15:27:47.651499841 +0100
@@ -10,13 +10,13 @@
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
- calico_backend: "bird"
+ calico_backend: "vxlan"
# Configure the MTU to use for workload interfaces and tunnels.
# - If Wireguard is enabled, set to your network MTU - 60
# - Otherwise, if VXLAN or BPF mode is enabled, set to your network MTU - 50
# - Otherwise, if IPIP is enabled, set to your network MTU - 20
# - Otherwise, if not using any encapsulation, set to your network MTU.
- veth_mtu: "1410"
+ veth_mtu: "8941"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
@@ -3451,29 +3451,6 @@
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
- # This container performs upgrade from host-local IPAM to calico-ipam.
- # It can be deleted if this is a fresh installation, or if you have already
- # upgraded to use calico-ipam.
- - name: upgrade-ipam
- image: calico/cni:v3.15.0
- command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
- env:
- - name: KUBERNETES_NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: CALICO_NETWORKING_BACKEND
- valueFrom:
- configMapKeyRef:
- name: calico-config
- key: calico_backend
- volumeMounts:
- - mountPath: /var/lib/cni/networks
- name: host-local-net-dir
- - mountPath: /host/opt/cni/bin
- name: cni-bin-dir
- securityContext:
- privileged: true
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
@@ -3545,7 +3522,7 @@
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
- value: "k8s,bgp"
+ value: "k8s,kubeadm"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
@@ -3554,7 +3531,7 @@
value: "Never"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
- value: "CrossSubnet"
+ value: "Always"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
@@ -3595,9 +3572,17 @@
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
- value: "info"
+ value: "Warning"
+ - name: FELIX_LOGSEVERITYFILE
+ value: "Warning"
+ - name: FELIX_LOGSEVERITYSYS
+ value: ""
- name: FELIX_HEALTHENABLED
value: "true"
+ - name: FELIX_PROMETHEUSGOMETRICSENABLED
+ value: "false"
+ - name: FELIX_PROMETHEUSMETRICSENABLED
+ value: "true"
securityContext:
privileged: true
resources:
@@ -3608,7 +3593,6 @@
command:
- /bin/calico-node
- -felix-live
- - -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
@@ -3617,7 +3601,6 @@
command:
- /bin/calico-node
- -felix-ready
- - -bird-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
namespace: kube-system
resources:
- canal.yaml
patchesStrategicMerge:
- logging.yaml
- prometheus.yaml

View File

@ -1,16 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: canal
spec:
template:
spec:
containers:
- name: calico-node
env:
- name: FELIX_LOGSEVERITYSCREEN
value: "Warning"
- name: FELIX_LOGSEVERITYFILE
value: "Warning"
- name: FELIX_LOGSEVERITYSYS
value: ""

View File

@ -1,14 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: canal
spec:
template:
spec:
containers:
- name: calico-node
env:
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "false"
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "true"

View File

@ -1,50 +0,0 @@
--- canal.yaml.orig 2020-07-02 16:56:37.279169481 +0100
+++ canal.yaml 2020-07-02 16:56:37.285169542 +0100
@@ -5,7 +5,6 @@
apiVersion: v1
metadata:
name: canal-config
- namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
@@ -3438,7 +3437,6 @@
apiVersion: apps/v1
metadata:
name: canal
- namespace: kube-system
labels:
k8s-app: canal
spec:
@@ -3683,7 +3681,6 @@
kind: ServiceAccount
metadata:
name: canal
- namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
@@ -3692,7 +3689,6 @@
kind: Deployment
metadata:
name: calico-kube-controllers
- namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
@@ -3706,7 +3702,6 @@
template:
metadata:
name: calico-kube-controllers
- namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
@@ -3741,7 +3736,6 @@
kind: ServiceAccount
metadata:
name: calico-kube-controllers
- namespace: kube-system
---
# Source: calico/templates/calico-etcd-secrets.yaml

1
charts/fluent-bit Symbolic link
View File

@ -0,0 +1 @@
../../helm-charts/charts/fluent-bit

View File

@ -1,7 +1,7 @@
apiVersion: v2
description: KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
name: kubezero-argo-cd
version: 0.3.5
version: 0.4.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -13,9 +13,9 @@ maintainers:
dependencies:
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: argo-cd
version: 2.5.0
version: 2.6.0
repository: https://argoproj.github.io/argo-helm
kubeVersion: ">= 1.16.0"

View File

@ -2,7 +2,7 @@ kubezero-argo-cd
================
KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
Current chart version is `0.3.5`
Current chart version is `0.4.1`
Source code can be found [here](https://kubezero.com)
@ -10,29 +10,45 @@ Source code can be found [here](https://kubezero.com)
| Repository | Name | Version |
|------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 2.5.0 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
| https://argoproj.github.io/argo-helm | argo-cd | 2.6.0 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| argo-cd.controller.args.appResyncPeriod | string | `"300"` | |
| argo-cd.controller.args.operationProcessors | string | `"1"` | |
| argo-cd.controller.args.statusProcessors | string | `"2"` | |
| argo-cd.controller.metrics.enabled | bool | `false` | |
| argo-cd.controller.metrics.serviceMonitor.additionalLabels.release | string | `"metrics"` | |
| argo-cd.controller.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.controller.metrics.serviceMonitor.namespace | string | `"monitoring"` | |
| argo-cd.controller.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.dex.enabled | bool | `false` | |
| argo-cd.installCRDs | bool | `false` | |
| argo-cd.istio.enabled | bool | `false` | Deploy Istio VirtualService to expose ArgoCD |
| argo-cd.istio.gateway | string | `"ingressgateway.istio-system.svc.cluster.local"` | Name of the Istio gateway to add the VirtualService to |
| argo-cd.istio.gateway | string | `"istio-system/ingressgateway"` | Name of the Istio gateway to add the VirtualService to |
| argo-cd.istio.ipBlocks | list | `[]` | |
| argo-cd.redis.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.redis.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.redis.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.serviceMonitor.additionalLabels.release | string | `"metrics"` | |
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.repoServer.metrics.serviceMonitor.namespace | string | `"monitoring"` | |
| argo-cd.repoServer.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.repoServer.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.repoServer.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.server.config."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
| argo-cd.server.config.url | string | `"argocd.example.com"` | ArgoCD hostname to be exposed via Istio |
| argo-cd.server.extraArgs[0] | string | `"--insecure"` | |
| argo-cd.server.metrics.enabled | bool | `false` | |
| argo-cd.server.metrics.serviceMonitor.additionalLabels.release | string | `"metrics"` | |
| argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.server.metrics.serviceMonitor.namespace | string | `"monitoring"` | |
| argo-cd.server.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
| argo-cd.server.tolerations[0].effect | string | `"NoSchedule"` | |
@ -41,3 +57,7 @@ Source code can be found [here](https://kubezero.com)
| kubezero.global.defaultSource.pathPrefix | string | `""` | optional path prefix within repoURL to support eg. remote subtrees |
| kubezero.global.defaultSource.repoURL | string | `"https://github.com/zero-down-time/kubezero"` | default repository for argocd applications |
| kubezero.global.defaultSource.targetRevision | string | `"HEAD"` | default tracking of repoURL |
## Resources
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json

View File

@ -0,0 +1,14 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionLine" . }}
{{ template "chart.sourceLinkLine" . }}
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}
## Resources
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json

View File

@ -14,12 +14,13 @@ spec:
helm:
values: |
{{- toYaml .Values.kubezero | nindent 8 }}
{{- toYaml .Values.kubezero | nindent 8 }}
destination:
server: {{ .Values.kubezero.global.defaultDestination.server }}
namespace: argocd
{{- if .Values.kubezero.global.syncPolicy }}
syncPolicy:
automated:
prune: true
selfHeal: false
{{- toYaml .Values.kubezero.global.syncPolicy | nindent 4 }}
{{- end }}

View File

@ -1,25 +1,26 @@
{{- if index .Values "argo-cd" "istio" "enabled" }}
{{- if index .Values "argo-cd" "istio" "ipBlocks" }}
apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy
metadata:
name: argocd-allow-only
name: argocd-deny-not-in-ipblocks
namespace: istio-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
selector:
matchLabels:
app: istio-ingressgateway
action: DENY
rules:
{{- if index .Values "argo-cd" "istio" "ipBlocks" }}
- from:
- source:
ipBlocks:
notIpBlocks:
{{- with index .Values "argo-cd" "istio" "ipBlocks" }}
{{- . | toYaml | nindent 8 }}
{{- end }}
to:
- operation:
hosts: ["{{ index .Values "argo-cd" "server" "config" "url" }}"]
{{- else }}
- {}
{{- end }}
{{- end }}
{{- end }}

View File

@ -24,6 +24,12 @@ spec:
server: https://kubernetes.default.svc
- namespace: istio-system
server: https://kubernetes.default.svc
- namespace: monitoring
server: https://kubernetes.default.svc
- namespace: elastic-system
server: https://kubernetes.default.svc
- namespace: logging
server: https://kubernetes.default.svc
clusterResourceWhitelist:
- group: '*'

View File

@ -15,6 +15,11 @@ kubezero:
# kubezero.global.defaultSource.pathPrefix -- optional path prefix within repoURL to support eg. remote subtrees
pathPrefix: ''
# syncPolicy, details see: https://argoproj.github.io/argo-cd/user-guide/auto_sync
#syncPolicy:
# automated:
# prune: true
argo-cd:
installCRDs: false
@ -24,8 +29,21 @@ argo-cd:
# argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG"
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
# Run Argo on the controllers
controller:
args:
statusProcessors: "2"
operationProcessors: "1"
appResyncPeriod: "300"
metrics:
enabled: false
serviceMonitor:
enabled: true
namespace: monitoring
additionalLabels:
release: metrics
# controller to masters
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
@ -33,6 +51,14 @@ argo-cd:
effect: NoSchedule
repoServer:
metrics:
enabled: false
serviceMonitor:
enabled: true
namespace: monitoring
additionalLabels:
release: metrics
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
@ -44,10 +70,43 @@ argo-cd:
# argo-cd.server.config.url -- ArgoCD hostname to be exposed via Istio
url: argocd.example.com
resource.customizations: |
cert-manager.io/Certificate:
# Lua script for customizing the health status assessment
health.lua: |
hs = {}
if obj.status ~= nil then
if obj.status.conditions ~= nil then
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Ready" and condition.status == "False" then
hs.status = "Degraded"
hs.message = condition.message
return hs
end
if condition.type == "Ready" and condition.status == "True" then
hs.status = "Healthy"
hs.message = condition.message
return hs
end
end
end
end
hs.status = "Progressing"
hs.message = "Waiting for certificate"
return hs
# Rename former https port to grpc, works with istio + insecure
service:
servicePortHttpsName: grpc
metrics:
enabled: false
serviceMonitor:
enabled: true
namespace: monitoring
additionalLabels:
release: metrics
extraArgs:
- --insecure
nodeSelector:
@ -70,5 +129,5 @@ argo-cd:
# argo-cd.istio.enabled -- Deploy Istio VirtualService to expose ArgoCD
enabled: false
# argo-cd.istio.gateway -- Name of the Istio gateway to add the VirtualService to
gateway: ingressgateway.istio-system.svc.cluster.local
gateway: istio-system/ingressgateway
ipBlocks: []

View File

@ -2,7 +2,8 @@ apiVersion: v2
name: kubezero-aws-ebs-csi-driver
description: KubeZero Umbrella Chart for aws-ebs-csi-driver
type: application
version: 0.1.1
version: 0.3.1
appVersion: 0.6.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
sources:
@ -17,6 +18,6 @@ maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
kubeVersion: ">= 1.16.0"

View File

@ -2,7 +2,7 @@ kubezero-aws-ebs-csi-driver
===========================
KubeZero Umbrella Chart for aws-ebs-csi-driver
Current chart version is `0.1.1`
Current chart version is `0.3.1`
Source code can be found [here](https://kubezero.com)
@ -10,7 +10,7 @@ Source code can be found [here](https://kubezero.com)
| Repository | Name | Version |
|------------|------|---------|
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## IAM Role
If you use kiam or kube2iam and restrict access on nodes running this controller please adjust:

View File

@ -1,16 +1,16 @@
apiVersion: v1
appVersion: "0.5.0"
name: aws-ebs-csi-driver
appVersion: 0.6.0
description: A Helm chart for AWS EBS CSI Driver
version: 0.3.0
kubeVersion: ">=1.13.0-0"
home: https://github.com/kubernetes-sigs/aws-ebs-csi-driver
sources:
- https://github.com/kubernetes-sigs/aws-ebs-csi-driver
keywords:
- aws
- ebs
- csi
- aws
- ebs
- csi
kubeVersion: '>=1.13.0-0'
maintainers:
- name: leakingtapan
email: chengpan@amazon.com
- email: chengpan@amazon.com
name: leakingtapan
name: aws-ebs-csi-driver
sources:
- https://github.com/kubernetes-sigs/aws-ebs-csi-driver
version: 0.5.0

View File

@ -35,13 +35,24 @@ Create chart name and version as used by the chart label.
Common labels
*/}}
{{- define "aws-ebs-csi-driver.labels" -}}
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
{{ include "aws-ebs-csi-driver.selectorLabels" . }}
{{- if ne .Release.Name "kustomize" }}
helm.sh/chart: {{ include "aws-ebs-csi-driver.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{- end -}}
{{/*
Common selector labels
*/}}
{{- define "aws-ebs-csi-driver.selectorLabels" -}}
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
{{- if ne .Release.Name "kustomize" }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- end -}}
{{/*
@ -53,6 +64,6 @@ Convert the `--extra-volume-tags` command line arg from a map.
{{- $noop := printf "%s=%s" $key $value | append $result.pairs | set $result "pairs" -}}
{{- end -}}
{{- if gt (len $result.pairs) 0 -}}
- --extra-volume-tags={{- join "," $result.pairs -}}
{{- printf "%s=%s" "- --extra-volume-tags" (join "," $result.pairs) -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,20 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-attacher-role
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]

View File

@ -0,0 +1,35 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-provisioner-role
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]

View File

@ -0,0 +1,31 @@
{{- if .Values.enableVolumeResizing }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-resizer-role
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
{{- end}}

View File

@ -0,0 +1,35 @@
{{- if .Values.enableVolumeSnapshot }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-snapshot-controller-role
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if .Values.enableVolumeSnapshot }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-snapshotter-role
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
{{- end }}

View File

@ -0,0 +1,15 @@
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-attacher-binding
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-attacher-role
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,15 @@
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-provisioner-binding
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-provisioner-role
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,18 @@
{{- if .Values.enableVolumeResizing }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-resizer-binding
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-resizer-role
apiGroup: rbac.authorization.k8s.io
{{- end}}

View File

@ -0,0 +1,18 @@
{{- if .Values.enableVolumeSnapshot }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-snapshot-controller-binding
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: ebs-snapshot-controller
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-snapshot-controller-role
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if .Values.enableVolumeSnapshot }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-snapshotter-binding
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -4,25 +4,26 @@ apiVersion: apps/v1
metadata:
name: ebs-csi-controller
namespace: kube-system
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- include "aws-ebs-csi-driver.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
app: ebs-csi-controller
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- include "aws-ebs-csi-driver.labels" . | nindent 8 }}
{{- if .Values.podAnnotations }}
annotations: {{ toYaml .Values.podAnnotations | nindent 8 }}
{{- end }}
spec:
nodeSelector:
beta.kubernetes.io/os: linux
kubernetes.io/os: linux
kubernetes.io/arch: amd64
{{- with .Values.nodeSelector }}
{{ toYaml . | indent 8 }}
{{- end }}
@ -38,12 +39,18 @@ spec:
{{- end }}
containers:
- name: ebs-plugin
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
{{- if ne .Release.Name "kustomize" }}
- controller
{{ else }}
# - {all,controller,node} # specify the driver mode
{{- end }}
- --endpoint=$(CSI_ENDPOINT)
{{ include "aws-ebs-csi-driver.extra-volume-tags" . }}
{{- if .Values.extraVolumeTags }}
{{- include "aws-ebs-csi-driver.extra-volume-tags" . | nindent 12 }}
{{- end }}
- --logtostderr
- --v=5
env:

View File

@ -2,6 +2,8 @@ apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
name: ebs.csi.aws.com
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
spec:
attachRequired: true
podInfoOnMount: false

View File

@ -4,24 +4,34 @@ apiVersion: apps/v1
metadata:
name: ebs-csi-node
namespace: kube-system
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
app: ebs-csi-node
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- include "aws-ebs-csi-driver.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
app: ebs-csi-node
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- include "aws-ebs-csi-driver.labels" . | nindent 8 }}
{{- if .Values.node.podAnnotations }}
annotations: {{ toYaml .Values.node.podAnnotations | nindent 8 }}
{{- end }}
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
nodeSelector:
beta.kubernetes.io/os: linux
kubernetes.io/os: linux
kubernetes.io/arch: amd64
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
@ -33,7 +43,7 @@ spec:
- name: ebs-plugin
securityContext:
privileged: true
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
args:
- node
- --endpoint=$(CSI_ENDPOINT)

View File

@ -1,251 +0,0 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-attacher-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-attacher-role
apiGroup: rbac.authorization.k8s.io
{{- if .Values.enableVolumeSnapshot }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-snapshotter-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-snapshotter-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-snapshot-controller-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-snapshot-controller-binding
subjects:
- kind: ServiceAccount
name: ebs-snapshot-controller
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-snapshot-controller-role
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-snapshot-controller-leaderelection
namespace: kube-system
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: snapshot-controller-leaderelection
namespace: kube-system
subjects:
- kind: ServiceAccount
name: ebs-snapshot-controller
namespace: kube-system
roleRef:
kind: Role
name: snapshot-controller-leaderelection
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- if .Values.enableVolumeResizing }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-resizer-role
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-resizer-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-resizer-role
apiGroup: rbac.authorization.k8s.io
{{- end}}

View File

@ -0,0 +1,15 @@
{{- if .Values.enableVolumeSnapshot }}
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-snapshot-controller-leaderelection
namespace: kube-system
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{{- end }}

View File

@ -0,0 +1,19 @@
{{- if .Values.enableVolumeSnapshot }}
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: snapshot-controller-leaderelection
namespace: kube-system
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: ebs-snapshot-controller
namespace: kube-system
roleRef:
kind: Role
name: snapshot-controller-leaderelection
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: ebs-csi-controller-sa
namespace: kube-system
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.controller.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
{{- if eq .Release.Name "kustomize" }}
#Enable if EKS IAM for SA is used
#annotations:
# eks.amazonaws.com/role-arn: arn:aws:iam::586565787010:role/ebs-csi-role
{{- end }}

View File

@ -1,18 +1,13 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: ebs-csi-controller-sa
namespace: kube-system
{{- with .Values.serviceAccount.controller.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.enableVolumeSnapshot }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ebs-snapshot-controller
namespace: kube-system
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.snapshot.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -5,21 +5,25 @@ apiVersion: apps/v1
metadata:
name: ebs-snapshot-controller
namespace: kube-system
labels:
{{- include "aws-ebs-csi-driver.labels" . | nindent 4 }}
spec:
serviceName: ebs-snapshot-controller
replicas: 1
selector:
matchLabels:
app: ebs-snapshot-controller
{{- include "aws-ebs-csi-driver.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
app: ebs-snapshot-controller
{{- include "aws-ebs-csi-driver.labels" . | nindent 8 }}
spec:
serviceAccount: ebs-snapshot-controller
serviceAccountName: ebs-snapshot-controller
containers:
- name: snapshot-controller
image: quay.io/k8scsi/snapshot-controller:v2.0.1
image: quay.io/k8scsi/snapshot-controller:v2.1.1
args:
- --v=5
- --leader-election=false

View File

@ -6,7 +6,7 @@ replicaCount: 2
image:
repository: amazon/aws-ebs-csi-driver
tag: "v0.5.0"
tag: "v0.6.0"
pullPolicy: IfNotPresent
sidecars:
@ -18,7 +18,7 @@ sidecars:
tag: "v1.2.0"
snapshotterImage:
repository: quay.io/k8scsi/csi-snapshotter
tag: "v2.0.1"
tag: "v2.1.1"
livenessProbeImage:
repository: quay.io/k8scsi/livenessprobe
tag: "v1.1.0"

View File

@ -1,6 +1,8 @@
#!/bin/bash
set -ex
# Upstream doesnt have proper Helm repo yet so we just download latest release and stuff it into charts
REPO="kubernetes-sigs/aws-ebs-csi-driver"
LATEST_RELEASE=$(curl -sL -s https://api.github.com/repos/${REPO}/releases | grep '"tag_name":' | cut -d'"' -f4 | grep -v -E "(alpha|beta|rc)" | sort -t"." -k 1,1 -k 2,2 -k 3,3 -k 4,4 | tail -n 1)

View File

@ -1,7 +1,7 @@
apiVersion: v2
name: kubezero-aws-efs-csi-driver
description: KubeZero Umbrella Chart for aws-efs-csi-driver
version: 0.1.0
version: 0.1.1
appVersion: 1.0.0
kubeVersion: ">=1.16.0-0"
home: https://kubezero.com
@ -18,7 +18,7 @@ maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
# Once they properly update upstream
# - name: aws-ebs-csi-driver

View File

@ -2,7 +2,7 @@ kubezero-aws-efs-csi-driver
===========================
KubeZero Umbrella Chart for aws-efs-csi-driver
Current chart version is `0.1.0`
Current chart version is `0.1.1`
Source code can be found [here](https://kubezero.com)
@ -10,7 +10,7 @@ Source code can be found [here](https://kubezero.com)
| Repository | Name | Version |
|------------|------|---------|
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## Storage Class
Optionally creates the *efs-cs* storage class.

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-calico
description: KubeZero Umbrella Chart for Calico
type: application
version: 0.1.7
appVersion: 3.15
version: 0.1.9
appVersion: 3.15.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -13,6 +13,6 @@ maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
kubeVersion: ">= 1.16.0"

View File

@ -2,7 +2,7 @@ kubezero-calico
===============
KubeZero Umbrella Chart for Calico
Current chart version is `0.1.7`
Current chart version is `0.1.9`
Source code can be found [here](https://kubezero.com)
@ -10,7 +10,7 @@ Source code can be found [here](https://kubezero.com)
| Repository | Name | Version |
|------------|------|---------|
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## KubeZero default configuration

View File

@ -322,10 +322,6 @@ spec:
spec:
nodeSelector:
kubernetes.io/os: linux
{{- if .Values.migration }}
# Only run Calico on nodes that have been migrated.
projectcalico.org/node-network-during-migration: calico
{{- end }}
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
@ -345,7 +341,7 @@ spec:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.15.0
image: calico/cni:v3.15.1
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
@ -381,7 +377,7 @@ spec:
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.15.0
image: calico/pod2daemon-flexvol:v3.15.1
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
@ -392,7 +388,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.15.0
image: calico/node:v3.15.1
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
@ -594,7 +590,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:v3.15.0
image: calico/kube-controllers:v3.15.1
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS

View File

@ -1,4 +1,5 @@
{{- if .Values.prometheus }}
apiVersion: v1
kind: Service
metadata:
labels:

View File

@ -5,7 +5,7 @@ metadata:
name: calico-node
labels:
k8s-app: calico-node
prometheus: kube-prometheus
release: metrics
spec:
jobLabel: k8s-app
selector:

View File

@ -0,0 +1,34 @@
# Once pod is running:
# kubectl -n NAME-SPACE-TO-TEST exec -it pod/POD_NAME /bin/sh
apiVersion: apps/v1
kind: Deployment
metadata:
name: netshoot
namespace: kube-system
labels:
app: netshoot
spec:
replicas: 2
strategy:
type: Recreate
selector:
matchLabels:
app: netshoot
template:
metadata:
labels:
app: netshoot
spec:
containers:
- name: netshoot
image: nicolaka/netshoot
imagePullPolicy: Always
command:
- /bin/sleep
args:
- "3600"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cert-manager
description: KubeZero Umbrella Chart for cert-manager
type: application
version: 0.3.5
version: 0.3.6
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -12,7 +12,7 @@ maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: cert-manager
version: 0.15.1

View File

@ -2,7 +2,7 @@ kubezero-cert-manager
=====================
KubeZero Umbrella Chart for cert-manager
Current chart version is `0.3.5`
Current chart version is `0.3.6`
Source code can be found [here](https://kubezero.com)
@ -11,7 +11,7 @@ Source code can be found [here](https://kubezero.com)
| Repository | Name | Version |
|------------|------|---------|
| https://charts.jetstack.io | cert-manager | 0.15.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## AWS - IAM Role
If you use kiam or kube2iam and restrict access on nodes running cert-manager please adjust:

View File

@ -0,0 +1,7 @@
#!/bin/bash
kubectl get -A -o yaml issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml
echo '---' >> cert-manager-backup.yaml
kubectl get -A -o yaml secrets --field-selector type=kubernetes.io/tls >> cert-manager-backup.yaml
echo '---' >> cert-manager-backup.yaml
kubectl get -o yaml secrets -n cert-manager letsencrypt-dns-prod >> cert-manager-backup.yaml

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-istio
description: KubeZero Umbrella Chart for Istio
type: application
version: 0.2.0
appVersion: 1.6.5
version: 0.2.3
appVersion: 1.6.7
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -13,7 +13,7 @@ maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: istio-operator
version: ">= 1.6"

View File

@ -5,7 +5,7 @@ KubeZero Umbrella Chart for Istio
Installs Istio Operator and KubeZero Istio profile
Current chart version is `0.2.0`
Current chart version is `0.2.3`
Source code can be found [here](https://kubezero.com)
@ -14,7 +14,7 @@ Source code can be found [here](https://kubezero.com)
| Repository | Name | Version |
|------------|------|---------|
| | istio-operator | >= 1.6 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## KubeZero default configuration
- mapped istio-operator to run on the controller nodes only
@ -24,11 +24,12 @@ Source code can be found [here](https://kubezero.com)
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| ingress.autoscaleEnabled | bool | `false` | |
| ingress.private | bool | `true` | |
| ingress.private.enabled | bool | `true` | |
| ingress.private.nodeSelector | string | `"31080_31443_30671_30672_31224"` | |
| ingress.replicaCount | int | `2` | |
| ingress.type | string | `"NodePort"` | |
| istio-operator.hub | string | `"docker.io/istio"` | |
| istio-operator.tag | string | `"1.6.5"` | |
| istio-operator.tag | string | `"1.6.7"` | |
| istiod.autoscaleEnabled | bool | `false` | |
| istiod.replicaCount | int | `1` | |

View File

@ -0,0 +1,7 @@
#!/bin/bash
# First delete old 1.4
kubectl delete -f ingress-gateway.yaml
kubectl delete -f istio.yaml
kubectl delete -f istio-init.yaml
kubectl delete -f namespace.yaml

View File

@ -4,6 +4,8 @@ kind: Certificate
metadata:
name: public-ingress-cert
namespace: istio-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
secretName: public-ingress-cert
issuerRef:

View File

@ -3,6 +3,8 @@ kind: Gateway
metadata:
name: ingressgateway
namespace: istio-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
selector:
istio: ingressgateway
@ -27,12 +29,15 @@ spec:
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
credentialName: public-ingress-cert
{{- if .Values.ingress.private.enabled }}
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: private-ingressgateway
namespace: istio-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
selector:
istio: private-ingressgateway
@ -56,9 +61,22 @@ spec:
privateKey: /etc/istio/ingressgateway-certs/tls.key
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
credentialName: public-ingress-cert
- port:
number: 5672
name: amqp
protocol: TCP
hosts:
- "*"
- port:
number: 5671
name: amqps
protocol: TCP
hosts:
- "*"
- port:
number: 24224
name: fluentd-forward
protocol: TCP
hosts:
- "*"
{{- end }}

View File

@ -1,9 +1,11 @@
{{- if .Values.ingress.private }}
{{- if .Values.ingress.private.enabled }}
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
metadata:
name: kubezero-istio-private-ingress
namespace: istio-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
profile: empty
components:
@ -28,25 +30,40 @@ spec:
name: istio-private-ingressgateway
{{- end }}
env:
# https://github.com/istio/istio/issues/26524
#- name: TERMINATION_DRAIN_DURATION_SECONDS
# value: "60"
- name: ISTIO_META_HTTP10
value: '"1"'
- name: ISTIO_META_ROUTER_MODE
value: standard
- name: ISTIO_META_IDLE_TIMEOUT
value: "3600s"
{{- if eq .Values.ingress.type "NodePort" }}
nodeSelector:
node.kubernetes.io/ingress.private: "31080_31443_30671_30672_31224"
node.kubernetes.io/ingress.private: "{{ .Values.ingress.private.nodeSelector }}"
{{- end }}
resources:
limits:
cpu: 2000m
# cpu: 2000m
memory: 1024Mi
requests:
cpu: 100m
memory: 128Mi
memory: 64Mi
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 25%
overlays:
- apiVersion: apps/v1
kind: Deployment
name: istio-private-ingressgateway
patches:
- path: spec.template.spec.containers.[name:istio-proxy].lifecycle
value: {"preStop": {"exec": {"command": ["sh", "-c", "curl -X POST http://localhost:15000/healthcheck/fail && sleep 30"]}}}
- path: spec.template.spec.terminationGracePeriodSeconds
value: 90
values:
gateways:
istio-ingressgateway:
@ -63,6 +80,11 @@ spec:
values: istio-private-ingressgateway
type: {{ default "NodePort" .Values.ingress.type }}
ports:
- name: http-status
port: 15021
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 31021
{{- end }}
- name: http2
port: 80
{{- if eq .Values.ingress.type "NodePort" }}
@ -73,21 +95,21 @@ spec:
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 31443
{{- end }}
- name: amqp
port: 5672
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 30672
{{- end }}
- name: amqps
port: 5671
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 30671
{{- end }}
- name: fluentd-forward
port: 24224
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 31224
{{- end }}
- name: amqps
port: 5671
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 31671
{{- end }}
- name: amqp
port: 5672
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 31672
{{- end }}
sds:
enabled: true
image: node-agent-k8s

View File

@ -3,6 +3,8 @@ kind: IstioOperator
metadata:
name: kubezero-istio
namespace: istio-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
profile: empty
addonComponents:
@ -32,25 +34,40 @@ spec:
name: istio-ingressgateway
{{- end }}
env:
# https://github.com/istio/istio/issues/26524
#- name: TERMINATION_DRAIN_DURATION_SECONDS
# value: "60"
- name: ISTIO_META_HTTP10
value: '"1"'
- name: ISTIO_META_ROUTER_MODE
value: standard
- name: ISTIO_META_IDLE_TIMEOUT
value: "3600s"
{{- if eq .Values.ingress.type "NodePort" }}
nodeSelector:
node.kubernetes.io/ingress.public: "30080_30443"
{{- end }}
resources:
limits:
cpu: 2000m
# cpu: 2000m
memory: 1024Mi
requests:
cpu: 100m
memory: 128Mi
memory: 64Mi
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 25%
overlays:
- apiVersion: apps/v1
kind: Deployment
name: istio-ingressgateway
patches:
- path: spec.template.spec.containers.[name:istio-proxy].lifecycle
value: {"preStop": {"exec": {"command": ["sh", "-c", "curl -X POST http://localhost:15000/healthcheck/fail && sleep 30"]}}}
- path: spec.template.spec.terminationGracePeriodSeconds
value: 90
name: istio-ingressgateway
pilot:
enabled: true
@ -95,6 +112,11 @@ spec:
values: istio-ingressgateway
type: {{ default "NodePort" .Values.ingress.type }}
ports:
- name: http-status
port: 15021
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 30021
{{- end }}
- name: http2
port: 80
{{- if eq .Values.ingress.type "NodePort" }}

View File

@ -4,3 +4,4 @@ metadata:
name: istio-system
labels:
istio-injection: disabled
{{ include "kubezero-lib.labels" . | indent 4 }}

View File

@ -1,7 +1,7 @@
#!/bin/bash
set -ex
ISTIO_VERSION=1.6.5
ISTIO_VERSION=1.6.7
NAME="istio-$ISTIO_VERSION"
URL="https://github.com/istio/istio/releases/download/${ISTIO_VERSION}/istio-${ISTIO_VERSION}-linux-amd64.tar.gz"

View File

@ -1,15 +0,0 @@
#!/bin/bash
# First delete old 1.4
kubectl delete -f ingress-gateway.yaml
kubectl delete -f istio.yaml
kubectl delete -f istio-init.yaml
kubectl delete -f namespace.yaml
# Now we need to install the new Istio Operator via KubeZero
# deploy the CR for 1.6
kubectl apply -f istio-1.6.yaml
# add the additiona private ingress gateway as dedicated CR
kubectl apply -f istio-1.6-private-ingress.yaml

View File

@ -6,10 +6,12 @@ ingress:
autoscaleEnabled: false
replicaCount: 2
type: NodePort
private: true
private:
enabled: true
nodeSelector: "31080_31443_30671_30672_31224"
#dnsNames:
#- "*.example.com"
istio-operator:
hub: docker.io/istio
tag: 1.6.5
tag: 1.6.7

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-kiam
description: KubeZero Umbrella Chart for Kiam
type: application
version: 0.2.6
version: 0.2.8
appVersion: 3.6
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -13,7 +13,7 @@ maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: kiam
version: 5.8.1

View File

@ -2,7 +2,7 @@ kubezero-kiam
=============
KubeZero Umbrella Chart for Kiam
Current chart version is `0.2.6`
Current chart version is `0.2.8`
Source code can be found [here](https://kubezero.com)
@ -11,7 +11,7 @@ Source code can be found [here](https://kubezero.com)
| Repository | Name | Version |
|------------|------|---------|
| https://uswitch.github.io/kiam-helm-charts/charts/ | kiam | 5.8.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## KubeZero default configuration
We run agents on the controllers as well, so we force eg. ebs csi controllers and others to assume roles etc.
@ -25,7 +25,8 @@ The required certificates for Kiam server and agents are provided by a local cer
[KubeZero cert-manager](../kubezero-cert-manager/README.md)
## Metadata restrictions
Required for the *csi ebs plugin* and most likely various others assuming basic AWS information.
Some services require access to some basic AWS information. One example is the `aws-ebs-csi` controller.
By default all access to the meta-data service is blocked, expect for:
- `/latest/meta-data/instance-id`
- `/latest/dynamic/instance-identity/document`
@ -40,6 +41,8 @@ Required for the *csi ebs plugin* and most likely various others assuming basic
| kiam.agent.image.tag | string | `"v3.6"` | |
| kiam.agent.log.level | string | `"warn"` | |
| kiam.agent.prometheus.servicemonitor.enabled | bool | `false` | |
| kiam.agent.prometheus.servicemonitor.interval | string | `"30s"` | |
| kiam.agent.prometheus.servicemonitor.labels.release | string | `"metrics"` | |
| kiam.agent.sslCertHostPath | string | `"/etc/ssl/certs"` | |
| kiam.agent.tlsCerts.caFileName | string | `"ca.crt"` | |
| kiam.agent.tlsCerts.certFileName | string | `"tls.crt"` | |
@ -56,6 +59,8 @@ Required for the *csi ebs plugin* and most likely various others assuming basic
| kiam.server.log.level | string | `"warn"` | |
| kiam.server.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| kiam.server.prometheus.servicemonitor.enabled | bool | `false` | |
| kiam.server.prometheus.servicemonitor.interval | string | `"30s"` | |
| kiam.server.prometheus.servicemonitor.labels.release | string | `"metrics"` | |
| kiam.server.service.port | int | `6444` | |
| kiam.server.service.targetPort | int | `6444` | |
| kiam.server.sslCertHostPath | string | `"/etc/ssl/certs"` | |
@ -76,3 +81,5 @@ Required for the *csi ebs plugin* and most likely various others assuming basic
## Resources
- https://github.com/uswitch/kiam
- https://www.bluematador.com/blog/iam-access-in-kubernetes-kube2iam-vs-kiam
- [Grafana Dashboard](https://raw.githubusercontent.com/uswitch/kiam/master/docs/dashboard-prom.json)
![Kiam overview](./kiam_architecure.png)

View File

@ -19,7 +19,8 @@ The required certificates for Kiam server and agents are provided by a local cer
[KubeZero cert-manager](../kubezero-cert-manager/README.md)
## Metadata restrictions
Required for the *csi ebs plugin* and most likely various others assuming basic AWS information.
Some services require access to some basic AWS information. One example is the `aws-ebs-csi` controller.
By default all access to the meta-data service is blocked, expect for:
- `/latest/meta-data/instance-id`
- `/latest/dynamic/instance-identity/document`
@ -34,3 +35,5 @@ Required for the *csi ebs plugin* and most likely various others assuming basic
## Resources
- https://github.com/uswitch/kiam
- https://www.bluematador.com/blog/iam-access-in-kubernetes-kube2iam-vs-kiam
- [Grafana Dashboard](https://raw.githubusercontent.com/uswitch/kiam/master/docs/dashboard-prom.json)
![Kiam overview](./kiam_architecure.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

View File

@ -26,6 +26,9 @@ kiam:
prometheus:
servicemonitor:
enabled: false
interval: 30s
labels:
release: metrics
log:
level: warn
@ -51,6 +54,9 @@ kiam:
prometheus:
servicemonitor:
enabled: false
interval: 30s
labels:
release: metrics
log:
level: warn
# extraEnv:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-lib
description: KubeZero helm library - common helm functions and blocks
type: library
version: 0.1.2
version: 0.1.3
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -2,7 +2,32 @@
Common set of labels
*/ -}}
{{- define "kubezero-lib.labels" -}}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
helm.sh/chart: {{ include "kubezero-lib.chart" . }}
app.kubernetes.io/name: {{ include "kubezero-lib.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: kubezero
{{- end -}}
{{- /*
Common naming functions
*/ -}}
{{- define "kubezero-lib.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "kubezero-lib.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- define "kubezero-lib.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-local-volume-provisioner
description: KubeZero Umbrella Chart for local-static-provisioner
type: application
version: 0.0.1
version: 0.1.0
appVersion: 2.3.4
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -13,6 +13,6 @@ maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
kubeVersion: ">= 1.16.0"

View File

@ -4,7 +4,7 @@ KubeZero Umbrella Chart for local-static-provisioner
Provides persistent volumes backed by local volumes, eg. additional SSDs or spindles.
Current chart version is `0.0.1`
Current chart version is `0.1.0`
Source code can be found [here](https://kubezero.com)
@ -12,7 +12,7 @@ Source code can be found [here](https://kubezero.com)
| Repository | Name | Version |
|------------|------|---------|
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## KubeZero default configuration

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,30 @@
apiVersion: v2
name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack
type: application
version: 0.3.1
appVersion: 1.2.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- elasticsearch
- kibana
- fluentd
- fluent-bit
maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: fluentd
version: 2.5.1
repository: https://kubernetes-charts.storage.googleapis.com/
condition: fluentd.enabled
- name: fluent-bit
version: 0.6.3
repository: https://zero-down-time.github.io/kubezero/
# repository: https://fluent.github.io/helm-charts
condition: fluent-bit.enabled
kubeVersion: ">= 1.16.0"

View File

@ -0,0 +1,106 @@
kubezero-logging
================
KubeZero Umbrella Chart for complete EFK stack
Current chart version is `0.3.1`
Source code can be found [here](https://kubezero.com)
## Chart Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://kubernetes-charts.storage.googleapis.com/ | fluentd | 2.5.1 |
| https://zero-down-time.github.io/kubezero/ | fluent-bit | 0.6.3 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## Changes from upstream
### ECK
- Operator mapped to controller nodes
### ES
- SSL disabled ( Todo: provide cluster certs and setup Kibana/Fluentd to use https incl. client certs )
- Installed Plugins:
- repository-s3
- elasticsearch-prometheus-exporter
- [Cross AZ Zone awareness](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-advanced-node-scheduling.html#k8s-availability-zone-awareness) is implemented via nodeSets
### Kibana
- increased timeout to ES to 3 minutes
## Manual tasks ATM
- Install index template
- setup Kibana
- create `logstash-*` Index Pattern
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| elastic_password | string | `""` | |
| es.nodeSets | list | `[]` | |
| es.prometheus | bool | `false` | |
| es.s3Snapshot.enabled | bool | `false` | |
| es.s3Snapshot.iamrole | string | `""` | |
| fluent-bit.config.customParsers | string | `"[PARSER]\n # http://rubular.com/r/tjUt3Awgg4\n Name cri\n Format regex\n Regex ^(?<time>[^ ]+) (?<stream>stdout|stderr) (?<logtag>[^ ]*) (?<log>.*)$\n Time_Key time\n Time_Format %Y-%m-%dT%H:%M:%S.%L%z\n # Decode_Field_As json log\n"` | |
| fluent-bit.config.filters | string | `"[FILTER]\n Name kubernetes\n Match kube.*\n Merge_Log On\n Keep_Log Off\n K8S-Logging.Parser On\n K8S-Logging.Exclude On\n\n[FILTER]\n Name lua\n Match kube.*\n script /fluent-bit/etc/functions.lua\n call dedot\n"` | |
| fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n Parser cri\n Tag kube.*\n Mem_Buf_Limit 5MB\n Skip_Long_Lines On\n Refresh_Interval 10\n DB /var/log/flb_kube.db\n DB.Sync Normal\n"` | |
| fluent-bit.config.lua | string | `"function dedot(tag, timestamp, record)\n if record[\"kubernetes\"] == nil then\n return 0, 0, 0\n end\n dedot_keys(record[\"kubernetes\"][\"annotations\"])\n dedot_keys(record[\"kubernetes\"][\"labels\"])\n return 1, timestamp, record\nend\n\nfunction dedot_keys(map)\n if map == nil then\n return\n end\n local new_map = {}\n local changed_keys = {}\n for k, v in pairs(map) do\n local dedotted = string.gsub(k, \"%.\", \"_\")\n if dedotted ~= k then\n new_map[dedotted] = v\n changed_keys[k] = true\n end\n end\n for k in pairs(changed_keys) do\n map[k] = nil\n end\n for k, v in pairs(new_map) do\n map[k] = v\n end\nend\n"` | |
| fluent-bit.config.outputs | string | `"[OUTPUT]\n Match *\n Name forward\n Host fluentd\n Port 24224\n tls on\n tls.verify off\n Shared_Key cloudbender\n"` | |
| fluent-bit.config.service | string | `"[SERVICE]\n Flush 5\n Daemon Off\n Log_Level warn\n Parsers_File parsers.conf\n Parsers_File custom_parsers.conf\n HTTP_Server On\n HTTP_Listen 0.0.0.0\n HTTP_Port 2020\n"` | |
| fluent-bit.enabled | bool | `true` | |
| fluent-bit.serviceMonitor.enabled | bool | `true` | |
| fluent-bit.serviceMonitor.namespace | string | `"monitoring"` | |
| fluent-bit.serviceMonitor.selector.release | string | `"metrics"` | |
| fluent-bit.test.enabled | bool | `false` | |
| fluent-bit.tolerations[0].effect | string | `"NoSchedule"` | |
| fluent-bit.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| fluentd.configMaps."forward-input.conf" | string | `"<source>\n @type forward\n port 24224\n bind 0.0.0.0\n skip_invalid_event true\n <transport tls>\n cert_path /mnt/fluentd-certs/tls.crt\n private_key_path /mnt/fluentd-certs/tls.key\n </transport>\n <security>\n self_hostname \"#{ENV['HOSTNAME']}\"\n shared_key \"#{ENV['FLUENTD_SHARED_KEY']}\"\n </security>\n</source>\n"` | |
| fluentd.configMaps."output.conf" | string | `"<match **>\n @id elasticsearch\n @type elasticsearch\n @log_level info\n include_tag_key true\n id_key id\n remove_keys id\n\n # KubeZero pipeline incl. GeoIP etc.\n pipeline fluentd\n\n host \"#{ENV['OUTPUT_HOST']}\"\n port \"#{ENV['OUTPUT_PORT']}\"\n scheme \"#{ENV['OUTPUT_SCHEME']}\"\n ssl_version \"#{ENV['OUTPUT_SSL_VERSION']}\"\n ssl_verify \"#{ENV['OUTPUT_SSL_VERIFY']}\"\n user \"#{ENV['OUTPUT_USER']}\"\n password \"#{ENV['OUTPUT_PASSWORD']}\"\n\n logstash_format true\n reload_connections false\n reconnect_on_error true\n reload_on_failure true\n request_timeout 15s\n\n <buffer>\n @type file\n path /var/log/fluentd-buffers/kubernetes.system.buffer\n flush_mode interval\n flush_thread_count 2\n flush_interval 5s\n flush_at_shutdown true\n retry_type exponential_backoff\n retry_timeout 60m\n retry_max_interval 30\n chunk_limit_size \"#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}\"\n queue_limit_length \"#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}\"\n overflow_action drop_oldest_chunk\n </buffer>\n</match>\n"` | |
| fluentd.enabled | bool | `false` | |
| fluentd.env.OUTPUT_SSL_VERIFY | string | `"false"` | |
| fluentd.env.OUTPUT_USER | string | `"elastic"` | |
| fluentd.extraEnvVars[0].name | string | `"OUTPUT_PASSWORD"` | |
| fluentd.extraEnvVars[0].valueFrom.secretKeyRef.key | string | `"elastic"` | |
| fluentd.extraEnvVars[0].valueFrom.secretKeyRef.name | string | `"logging-es-elastic-user"` | |
| fluentd.extraEnvVars[1].name | string | `"FLUENTD_SHARED_KEY"` | |
| fluentd.extraEnvVars[1].valueFrom.secretKeyRef.key | string | `"shared_key"` | |
| fluentd.extraEnvVars[1].valueFrom.secretKeyRef.name | string | `"logging-fluentd-secret"` | |
| fluentd.extraVolumeMounts[0].mountPath | string | `"/mnt/fluentd-certs"` | |
| fluentd.extraVolumeMounts[0].name | string | `"fluentd-certs"` | |
| fluentd.extraVolumeMounts[0].readOnly | bool | `true` | |
| fluentd.extraVolumes[0].name | string | `"fluentd-certs"` | |
| fluentd.extraVolumes[0].secret.secretName | string | `"fluentd-certificate"` | |
| fluentd.istio.enabled | bool | `false` | |
| fluentd.metrics.enabled | bool | `false` | |
| fluentd.metrics.serviceMonitor.additionalLabels.release | string | `"metrics"` | |
| fluentd.metrics.serviceMonitor.enabled | bool | `true` | |
| fluentd.metrics.serviceMonitor.namespace | string | `"monitoring"` | |
| fluentd.output.host | string | `"logging-es-http"` | |
| fluentd.plugins.enabled | bool | `false` | |
| fluentd.plugins.pluginsList | string | `nil` | |
| fluentd.replicaCount | int | `2` | |
| fluentd.service.ports[0].containerPort | int | `24224` | |
| fluentd.service.ports[0].name | string | `"tcp-forward"` | |
| fluentd.service.ports[0].protocol | string | `"TCP"` | |
| fluentd.service.ports[1].containerPort | int | `9880` | |
| fluentd.service.ports[1].name | string | `"http-fluentd"` | |
| fluentd.service.ports[1].protocol | string | `"TCP"` | |
| fluentd.shared_key | string | `"cloudbender"` | |
| kibana.count | int | `1` | |
| kibana.istio.enabled | bool | `false` | |
| kibana.istio.gateway | string | `"istio-system/ingressgateway"` | |
| kibana.istio.url | string | `""` | |
| version | string | `"7.8.1"` | |
## Resources:
- https://www.elastic.co/downloads/elastic-cloud-kubernetes
- https://github.com/elastic/cloud-on-k8s

View File

@ -0,0 +1,41 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionLine" . }}
{{ template "chart.sourceLinkLine" . }}
{{ template "chart.requirementsSection" . }}
## Changes from upstream
### ECK
- Operator mapped to controller nodes
### ES
- SSL disabled ( Todo: provide cluster certs and setup Kibana/Fluentd to use https incl. client certs )
- Installed Plugins:
- repository-s3
- elasticsearch-prometheus-exporter
- [Cross AZ Zone awareness](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-advanced-node-scheduling.html#k8s-availability-zone-awareness) is implemented via nodeSets
### Kibana
- increased timeout to ES to 3 minutes
## Manual tasks ATM
- Install index template
- setup Kibana
- create `logstash-*` Index Pattern
{{ template "chart.valuesSection" . }}
## Resources:
- https://www.elastic.co/downloads/elastic-cloud-kubernetes
- https://github.com/elastic/cloud-on-k8s

View File

@ -0,0 +1,5 @@
#!/bin/bash
# We only need to delete the service monitor and virtual service, others will be taken over by the new chart and we dont loose data
kubectl delete -n logging VirtualService kibana-logging
kubectl delete -n logging ServiceMonitor es-logging

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,6 @@
resources:
- all-in-one.yaml
# map operator to controller nodes
patchesStrategicMerge:
- map-operator.yaml

View File

@ -0,0 +1,14 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elastic-operator
spec:
template:
spec:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule

View File

@ -0,0 +1,7 @@
#!/bin/bash
ECK_VERSION=1.2.1
curl -o all-in-one.yaml https://download.elastic.co/downloads/eck/${ECK_VERSION}/all-in-one.yaml
kubectl kustomize . > ../templates/eck-operator.yaml

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,91 @@
{{- if .Values.es.nodeSets }}
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: {{ template "kubezero-lib.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
version: {{ .Values.version }}
nodeSets:
{{- range .Values.es.nodeSets }}
- name: {{ .name }}
config:
node.master: true
node.data: true
node.ingest: true
node.ml: false
{{- if $.Values.es.prometheus }}
prometheus.indices: false
{{- end }}
{{- if .zone }}
node.attr.zone: {{ .zone }}
cluster.routing.allocation.awareness.attributes: zone
{{- end }}
podTemplate:
{{- if $.Values.es.s3Snapshot.iamrole }}
metadata:
annotations:
iam.amazonaws.com/role: {{ $.Values.es.s3Snapshot.iamrole }}
{{- end }}
spec:
{{- if or $.Values.es.prometheus $.Values.es.s3Snapshot.enabled }}
initContainers:
- name: install-plugins
command:
- sh
- -c
- |
{{- if $.Values.es.s3Snapshot.enabled }}
bin/elasticsearch-plugin install --batch repository-s3;
{{- end }}
{{- if $.Values.es.prometheus }}
bin/elasticsearch-plugin install --batch https://github.com/vvanholl/elasticsearch-prometheus-exporter/releases/download/{{ $.Values.version }}.0/prometheus-exporter-{{ $.Values.version }}.0.zip;
{{- end }}
{{- end }}
containers:
- name: elasticsearch
resources:
requests:
cpu: 100m
memory: 2500Mi
limits:
memory: 4Gi
env:
- name: ES_JAVA_OPTS
value: "-Xms2g -Xmx2g"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
elasticsearch.k8s.elastic.co/cluster-name: {{ template "kubezero-lib.fullname" $ }}
topologyKey: kubernetes.io/hostname
{{- if .zone }}
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: failure-domain.beta.kubernetes.io/zone
operator: In
values:
- {{ .zone }}
{{- end }}
count: {{ .count }}
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .storage.size }}
storageClassName: {{ .storage.class }}
{{- end }}
http:
tls:
selfSignedCertificate:
disabled: true
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if .Values.kibana.istio.enabled }}
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: {{ template "kubezero-lib.fullname" . }}-kibana
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
hosts:
- {{ .Values.kibana.istio.url }}
gateways:
- {{ default "istio-system/ingressgateway" .Values.kibana.istio.gateway }}
http:
- route:
- destination:
host: {{ template "kubezero-lib.fullname" . }}-kb-http
{{- end }}

View File

@ -0,0 +1,36 @@
{{- if .Values.es.nodeSets }}
# Only deploy Kibana if we have local ES cluster
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: {{ template "kubezero-lib.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
version: {{ .Values.version }}
count: {{ .Values.kibana.count }}
elasticsearchRef:
name: {{ template "kubezero-lib.fullname" . }}
namespace: {{ .Release.Namespace }}
config:
elasticsearch.requestTimeout: 180000
elasticsearch.shardTimeout: 180000
#xpack.monitoring.enabled: false
#xpack.monitoring.ui.enabled: false
#xpack.ml.enabled: false
podTemplate:
spec:
containers:
- name: kibana
resources:
requests:
memory: 1Gi
cpu: 100m
limits:
memory: 2Gi
http:
tls:
selfSignedCertificate:
disabled: true
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if .Values.es.nodeSets }}
apiVersion: v1
kind: Secret
type: Opaque
metadata:
labels:
common.k8s.elastic.co/type: elasticsearch
elasticsearch.k8s.elastic.co/cluster-name: {{ template "kubezero-lib.fullname" $ }}
name: {{ template "kubezero-lib.fullname" $ }}-es-elastic-user
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
data:
username: {{ "elastic" | b64enc | quote }}
elastic: {{ .Values.elastic_password | b64enc | quote }}
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if .Values.es.prometheus }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kubezero-lib.fullname" . }}-es
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
release: metrics
spec:
endpoints:
- basicAuth:
username:
name: {{ template "kubezero-lib.fullname" $ }}-es-elastic-user
key: username
password:
name: {{ template "kubezero-lib.fullname" $ }}-es-elastic-user
key: elastic
port: http
path: /_prometheus/metrics
selector:
matchExpressions:
- key: elasticsearch.k8s.elastic.co/statefulset-name
operator: DoesNotExist
matchLabels:
common.k8s.elastic.co/type: elasticsearch
elasticsearch.k8s.elastic.co/cluster-name: {{ template "kubezero-lib.fullname" $ }}
{{- end }}

View File

@ -0,0 +1,16 @@
{{- if .Values.fluentd.enabled }}
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: fluentd-ingress-cert
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
secretName: fluentd-certificate
issuerRef:
name: letsencrypt-dns-prod
kind: ClusterIssuer
dnsNames:
- "{{ .Values.fluentd.url }}"
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if .Values.fluentd.istio.enabled }}
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: {{ template "kubezero-lib.fullname" $ }}-fluentd
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
gateways:
- {{ .Values.fluentd.istio.gateway }}
hosts:
- {{ .Values.fluentd.url }}
tcp:
- match:
- port: 24224
route:
- destination:
host: {{ template "kubezero-lib.fullname" $ }}-fluentd
port:
number: 24224
http:
- route:
- destination:
host: {{ template "kubezero-lib.fullname" $ }}-fluentd
port:
number: 9880
{{- end }}

View File

@ -0,0 +1,12 @@
{{- if .Values.fluentd.enabled }}
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: {{ template "kubezero-lib.fullname" $ }}-fluentd-secret
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
data:
shared_key: {{ .Values.fluentd.shared_key | b64enc | quote }}
{{- end }}

View File

@ -0,0 +1,34 @@
# Default values for zdt-logging.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# This is for backwards compatibility with older zdt-logging setup
fullnameOverride: logging
# Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.6.0
elastic_password: "dsfsfs" # super_secret_elastic_password
es:
nodeSets:
- name: default-zone-0
count: 2
storage:
size: 512Gi
class: ebs-sc-gp2-xfs
zone: us-west-2a
s3Snapshot:
enabled: true
iamrole: "dfsf" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots
prometheus: true
kibana:
istio:
enabled: true
url: kibana.example.com
gateway: istio-system/private-ingressgateway
fluentd:
enabled: true

View File

@ -0,0 +1,8 @@
fluent-bit:
enabled: true
metrics:
enabled: true
url: fluentd.example.com

View File

@ -0,0 +1,11 @@
fluentd:
enabled: true
metrics:
enabled: true
url: fluentd.example.com
istio:
enabled: true
gateway: istio-system/private-ingressgateway

View File

@ -0,0 +1,292 @@
# use this for backwards compatability
# fullnameOverride: ""
# Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.8.1
elastic_password: "" # super_secret_elastic_password
es:
nodeSets: []
#- count: 2
# storage:
# size: 16Gi
# class: local-sc-xfs
# zone: us-west-2a
s3Snapshot:
enabled: false
iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots
prometheus: false
kibana:
count: 1
#servicename: kibana.example.com
istio:
enabled: false
gateway: "istio-system/ingressgateway"
url: "" # kibana.example.com
fluentd:
enabled: false
image:
repository: quay.io/fluentd_elasticsearch/fluentd
tag: v2.9.0
istio:
enabled: false
# broken as of 2.5.1 ;-(
# useStatefulSet: true
replicaCount: 2
plugins:
enabled: false
pluginsList:
#- fluent-plugin-detect-exceptions
#- fluent-plugin-s3
#- fluent-plugin-grok-parser
#persistence:
# enabled: true
# storageClass: "ebs-sc-gp2-xfs"
# accessMode: ReadWriteOnce
# size: 4Gi
service:
ports:
- name: tcp-forward
protocol: TCP
containerPort: 24224
- name: http-fluentd
protocol: TCP
containerPort: 9880
metrics:
enabled: false
serviceMonitor:
enabled: true
additionalLabels:
release: metrics
namespace: monitoring
output:
host: logging-es-http
shared_key: "cloudbender"
env:
OUTPUT_USER: elastic
OUTPUT_SSL_VERIFY: "false"
extraEnvVars:
- name: OUTPUT_PASSWORD
valueFrom:
secretKeyRef:
name: logging-es-elastic-user
key: elastic
- name: FLUENTD_SHARED_KEY
valueFrom:
secretKeyRef:
name: logging-fluentd-secret
key: shared_key
extraVolumes:
- name: fluentd-certs
secret:
secretName: fluentd-certificate
extraVolumeMounts:
- name: fluentd-certs
mountPath: /mnt/fluentd-certs
readOnly: true
configMaps:
forward-input.conf: |
<source>
@type forward
port 24224
bind 0.0.0.0
skip_invalid_event true
<transport tls>
cert_path /mnt/fluentd-certs/tls.crt
private_key_path /mnt/fluentd-certs/tls.key
</transport>
<security>
self_hostname "#{ENV['HOSTNAME']}"
shared_key "#{ENV['FLUENTD_SHARED_KEY']}"
</security>
</source>
output.conf: |
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
id_key id
remove_keys id
# KubeZero pipeline incl. GeoIP etc.
pipeline fluentd
host "#{ENV['OUTPUT_HOST']}"
port "#{ENV['OUTPUT_PORT']}"
scheme "#{ENV['OUTPUT_SCHEME']}"
ssl_version "#{ENV['OUTPUT_SSL_VERSION']}"
ssl_verify "#{ENV['OUTPUT_SSL_VERIFY']}"
user "#{ENV['OUTPUT_USER']}"
password "#{ENV['OUTPUT_PASSWORD']}"
logstash_format true
reload_connections false
reconnect_on_error true
reload_on_failure true
request_timeout 30s
suppress_type_name true
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
flush_thread_count 2
flush_interval 5s
flush_at_shutdown true
retry_type exponential_backoff
retry_timeout 60m
retry_max_interval 30
chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}"
queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}"
overflow_action drop_oldest_chunk
</buffer>
</match>
# filter.conf: |
# <filter auth system.auth>
# @type parser
# key_name message
# reserve_data true
# reserve_time true
# <parse>
# @type grok
#
# # SSH
# <grok>
# pattern %{DATA:system.auth.ssh.event} %{DATA:system.auth.ssh.method} for (invalid user )?%{DATA:system.auth.user} from %{IPORHOST:system.auth.ip} port %{NUMBER:system.auth.port} ssh2(: %{GREEDYDATA:system.auth.ssh.signature})?
# </grok>
# <grok>
# pattern %{DATA:system.auth.ssh.event} user %{DATA:system.auth.user} from %{IPORHOST:system.auth.ip}
# </grok>
#
# # sudo
# <grok>
# pattern \s*%{DATA:system.auth.user} :( %{DATA:system.auth.sudo.error} ;)? TTY=%{DATA:system.auth.sudo.tty} ; PWD=%{DATA:system.auth.sudo.pwd} ; USER=%{DATA:system.auth.sudo.user} ; COMMAND=%{GREEDYDATA:system.auth.sudo.command}
# </grok>
#
# # Users
# <grok>
# pattern new group: name=%{DATA:system.auth.groupadd.name}, GID=%{NUMBER:system.auth.groupadd.gid}
# </grok>
# <grok>
# pattern new user: name=%{DATA:system.auth.useradd.name}, UID=%{NUMBER:system.auth.useradd.uid}, GID=%{NUMBER:system.auth.useradd.gid}, home=%{DATA:system.auth.useradd.home}, shell=%{DATA:system.auth.useradd.shell}$
# </grok>
#
# <grok>
# pattern %{GREEDYDATA:message}
# </grok>
# </parse>
# </filter>
fluent-bit:
enabled: false
test:
enabled: false
config:
outputs: |
[OUTPUT]
Match *
Name forward
Host logging-fluentd
Port 24224
tls on
tls.verify off
Shared_Key cloudbender
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
Parser cri
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
Refresh_Interval 10
DB /var/log/flb_kube.db
DB.Sync Normal
filters: |
[FILTER]
Name kubernetes
Match kube.*
Merge_Log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
[FILTER]
Name lua
Match kube.*
script /fluent-bit/etc/functions.lua
call dedot
service: |
[SERVICE]
Flush 5
Daemon Off
Log_Level warn
Parsers_File parsers.conf
Parsers_File custom_parsers.conf
HTTP_Server On
HTTP_Listen 0.0.0.0
HTTP_Port 2020
lua: |
function dedot(tag, timestamp, record)
if record["kubernetes"] == nil then
return 0, 0, 0
end
dedot_keys(record["kubernetes"]["annotations"])
dedot_keys(record["kubernetes"]["labels"])
return 1, timestamp, record
end
function dedot_keys(map)
if map == nil then
return
end
local new_map = {}
local changed_keys = {}
for k, v in pairs(map) do
local dedotted = string.gsub(k, "%.", "_")
if dedotted ~= k then
new_map[dedotted] = v
changed_keys[k] = true
end
end
for k in pairs(changed_keys) do
map[k] = nil
end
for k, v in pairs(new_map) do
map[k] = v
end
end
serviceMonitor:
enabled: true
namespace: monitoring
selector:
release: metrics
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,24 @@
apiVersion: v2
name: kubezero-metrics
description: KubeZero Umbrella Chart for prometheus-operator
type: application
version: 0.1.3
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- prometheus
- grafana
maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: prometheus-operator
version: 9.3.0
repository: https://kubernetes-charts.storage.googleapis.com/
- name: prometheus-adapter
version: 2.5.0
repository: https://kubernetes-charts.storage.googleapis.com/
kubeVersion: ">= 1.16.0"

View File

@ -0,0 +1,98 @@
kubezero-metrics
================
KubeZero Umbrella Chart for prometheus-operator
Current chart version is `0.1.3`
Source code can be found [here](https://kubezero.com)
## Chart Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://kubernetes-charts.storage.googleapis.com/ | prometheus-adapter | 2.5.0 |
| https://kubernetes-charts.storage.googleapis.com/ | prometheus-operator | 9.3.0 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| grafana.istio.enabled | bool | `false` | |
| grafana.istio.gateway | string | `"istio-system/ingressgateway"` | |
| grafana.istio.ipBlocks | list | `[]` | |
| grafana.istio.url | string | `""` | |
| prometheus-adapter.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| prometheus-adapter.prometheus.url | string | `"http://metrics-prometheus-operato-prometheus"` | |
| prometheus-adapter.rules.default | bool | `false` | |
| prometheus-adapter.rules.resource.cpu.containerLabel | string | `"container"` | |
| prometheus-adapter.rules.resource.cpu.containerQuery | string | `"sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!=\"POD\",container!=\"\",pod!=\"\"}[5m])) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.cpu.nodeQuery | string | `"sum(1 - irate(node_cpu_seconds_total{mode=\"idle\"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.cpu.resources.overrides.namespace.resource | string | `"namespace"` | |
| prometheus-adapter.rules.resource.cpu.resources.overrides.node.resource | string | `"node"` | |
| prometheus-adapter.rules.resource.cpu.resources.overrides.pod.resource | string | `"pod"` | |
| prometheus-adapter.rules.resource.memory.containerLabel | string | `"container"` | |
| prometheus-adapter.rules.resource.memory.containerQuery | string | `"sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container!=\"POD\",container!=\"\",pod!=\"\"}) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.memory.nodeQuery | string | `"sum(node_memory_MemTotal_bytes{job=\"node-exporter\",<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{job=\"node-exporter\",<<.LabelMatchers>>}) by (<<.GroupBy>>)"` | |
| prometheus-adapter.rules.resource.memory.resources.overrides.namespace.resource | string | `"namespace"` | |
| prometheus-adapter.rules.resource.memory.resources.overrides.node.resource | string | `"node"` | |
| prometheus-adapter.rules.resource.memory.resources.overrides.pod.resource | string | `"pod"` | |
| prometheus-adapter.rules.resource.window | string | `"5m"` | |
| prometheus-adapter.tolerations[0].effect | string | `"NoSchedule"` | |
| prometheus-adapter.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| prometheus-operator.alertmanager.enabled | bool | `false` | |
| prometheus-operator.coreDns.enabled | bool | `true` | |
| prometheus-operator.defaultRules.create | bool | `true` | |
| prometheus-operator.grafana.enabled | bool | `true` | |
| prometheus-operator.grafana.initChownData.enabled | bool | `false` | |
| prometheus-operator.grafana.persistence.enabled | bool | `true` | |
| prometheus-operator.grafana.persistence.size | string | `"4Gi"` | |
| prometheus-operator.grafana.persistence.storageClassName | string | `"ebs-sc-gp2-xfs"` | |
| prometheus-operator.grafana.plugins[0] | string | `"grafana-piechart-panel"` | |
| prometheus-operator.grafana.service.portName | string | `"http-grafana"` | |
| prometheus-operator.grafana.testFramework.enabled | bool | `false` | |
| prometheus-operator.kubeApiServer.enabled | bool | `true` | |
| prometheus-operator.kubeControllerManager.enabled | bool | `true` | |
| prometheus-operator.kubeControllerManager.service.port | int | `10257` | |
| prometheus-operator.kubeControllerManager.service.targetPort | int | `10257` | |
| prometheus-operator.kubeControllerManager.serviceMonitor.https | bool | `true` | |
| prometheus-operator.kubeControllerManager.serviceMonitor.insecureSkipVerify | bool | `true` | |
| prometheus-operator.kubeDns.enabled | bool | `false` | |
| prometheus-operator.kubeEtcd.enabled | bool | `false` | |
| prometheus-operator.kubeProxy.enabled | bool | `true` | |
| prometheus-operator.kubeScheduler.enabled | bool | `true` | |
| prometheus-operator.kubeScheduler.service.port | int | `10259` | |
| prometheus-operator.kubeScheduler.service.targetPort | int | `10259` | |
| prometheus-operator.kubeScheduler.serviceMonitor.https | bool | `true` | |
| prometheus-operator.kubeScheduler.serviceMonitor.insecureSkipVerify | bool | `true` | |
| prometheus-operator.kubeStateMetrics.enabled | bool | `true` | |
| prometheus-operator.kubelet.enabled | bool | `true` | |
| prometheus-operator.kubelet.serviceMonitor.cAdvisor | bool | `true` | |
| prometheus-operator.nodeExporter.enabled | bool | `true` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].action | string | `"replace"` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].regex | string | `"^(.*)$"` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].replacement | string | `"$1"` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].separator | string | `";"` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].sourceLabels[0] | string | `"__meta_kubernetes_pod_node_name"` | |
| prometheus-operator.nodeExporter.serviceMonitor.relabelings[0].targetLabel | string | `"node"` | |
| prometheus-operator.prometheus.enabled | bool | `true` | |
| prometheus-operator.prometheus.prometheusSpec.portName | string | `"http-prometheus"` | |
| prometheus-operator.prometheus.prometheusSpec.resources.requests.memory | string | `"512Mi"` | |
| prometheus-operator.prometheus.prometheusSpec.retention | string | `"8d"` | |
| prometheus-operator.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | |
| prometheus-operator.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"8Gi"` | |
| prometheus-operator.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName | string | `"ebs-sc-gp2-xfs"` | |
| prometheus-operator.prometheusOperator.admissionWebhooks.enabled | bool | `false` | |
| prometheus-operator.prometheusOperator.createCustomResource | bool | `true` | |
| prometheus-operator.prometheusOperator.enabled | bool | `true` | |
| prometheus-operator.prometheusOperator.manageCrds | bool | `false` | |
| prometheus-operator.prometheusOperator.namespaces.additional[0] | string | `"kube-system"` | |
| prometheus-operator.prometheusOperator.namespaces.additional[1] | string | `"logging"` | |
| prometheus-operator.prometheusOperator.namespaces.releaseNamespace | bool | `true` | |
| prometheus-operator.prometheusOperator.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| prometheus-operator.prometheusOperator.tlsProxy.enabled | bool | `false` | |
| prometheus-operator.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | |
| prometheus-operator.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| prometheus.istio.enabled | bool | `false` | |
| prometheus.istio.gateway | string | `"istio-system/ingressgateway"` | |
| prometheus.istio.url | string | `""` | |

View File

@ -0,0 +1,10 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionLine" . }}
{{ template "chart.sourceLinkLine" . }}
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}

Some files were not shown because too many files have changed in this diff Show More