Merge all Kubezero updates for end of July release #8

Merged
stefan merged 35 commits from master into stable 2020-07-27 21:28:55 +00:00
92 changed files with 10398 additions and 3324 deletions

View File

@ -1,16 +1,57 @@
# KubeZero - ZeroDownTime Kubernetes platform
KubeZero - Zero Down Time Kubernetes platform
========================
KubeZero is a pre-configured collection of components deployed onto a bare Kubernetes cluster.
All chosen components are 100% organic OpenSource.
## Abstract
KubeZero is a mildly opinionated collection of Kubernetes components to be deployed on a bare Kubernetes cluster.
All components are 100% organic OpenSource.
# Design goals
## Quickstart
- Cloud provider agnostic, bare-metal / self-hosted possible
- No vendor lock in
- No closed source solutions
- No premium services / subscriptions required
- Staying to upstream projects as close as possible
- Minimal custom code
- Work within each community / give back
## Architecure
The kuberzero-app is root application Helm chart.
This also implements the *umbrella chart* pattern in order to inject custom values into upstream charts.
# Components
## Components
## Network / CNI
- Calico using VxLAN as default backend
## Certificate management
- cert-manager incl. a local self-signed cluster CA
## Metrics / Alerting
- Prometheus / Grafana
## Logging
- Fluent-bit
- Fluentd
- ElasticSearch
- Kibana
## Dashboard
- see ArgoCD
## Storage
- EBS external CSI storage provider
- EFS external CSI storage provider
- LocalVolumes
- LocalPath
## Ingress
- AWS Network Loadbalancer
- Istio providing Public and Private Envoy proxies
- HTTP(s) and TCP support
- Real client source IPs available
## Service Mesh ( optional )
# KubeZero vs. EKS
## Controller nodes used for various admin controllers
## KIAM incl. blocked access to meta-data service
### ArgoCD

View File

@ -1,5 +0,0 @@
nameSpace: kube-system
resources:
- local-sc-xfs.yaml
- local-volume-provisioner.yaml

View File

@ -1,136 +0,0 @@
---
# Source: provisioner/templates/provisioner.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: local-provisioner-config
namespace: kube-system
labels:
heritage: "Helm"
release: "RELEASE-NAME"
chart: provisioner-2.3.3
data:
storageClassMap: |
local-sc-xfs:
hostDir: /mnt/disks
mountDir: /mnt/disks
---
# Source: provisioner/templates/provisioner-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-storage-admin
namespace: kube-system
labels:
heritage: "Helm"
release: "RELEASE-NAME"
chart: provisioner-2.3.3
---
# Source: provisioner/templates/provisioner-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-storage-provisioner-node-clusterrole
labels:
heritage: "Helm"
release: "RELEASE-NAME"
chart: provisioner-2.3.3
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
# Source: provisioner/templates/provisioner-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-storage-provisioner-pv-binding
labels:
heritage: "Helm"
release: "RELEASE-NAME"
chart: provisioner-2.3.3
subjects:
- kind: ServiceAccount
name: local-storage-admin
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:persistent-volume-provisioner
apiGroup: rbac.authorization.k8s.io
---
# Source: provisioner/templates/provisioner-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-storage-provisioner-node-binding
labels:
heritage: "Helm"
release: "RELEASE-NAME"
chart: provisioner-2.3.3
subjects:
- kind: ServiceAccount
name: local-storage-admin
namespace: kube-system
roleRef:
kind: ClusterRole
name: local-storage-provisioner-node-clusterrole
apiGroup: rbac.authorization.k8s.io
---
# Source: provisioner/templates/provisioner.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: local-volume-provisioner
namespace: kube-system
labels:
app: local-volume-provisioner
heritage: "Helm"
release: "RELEASE-NAME"
chart: provisioner-2.3.3
spec:
selector:
matchLabels:
app: local-volume-provisioner
template:
metadata:
labels:
app: local-volume-provisioner
spec:
serviceAccountName: local-storage-admin
nodeSelector:
node.kubernetes.io/localVolume: present
containers:
- image: "quay.io/external_storage/local-volume-provisioner:v2.3.3"
name: provisioner
securityContext:
privileged: true
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: JOB_CONTAINER_IMAGE
value: "quay.io/external_storage/local-volume-provisioner:v2.3.3"
volumeMounts:
- mountPath: /etc/provisioner/config
name: provisioner-config
readOnly: true
- mountPath: /dev
name: provisioner-dev
- mountPath: /mnt/disks
name: local-sc-xfs
mountPropagation: "HostToContainer"
volumes:
- name: provisioner-config
configMap:
name: local-provisioner-config
- name: provisioner-dev
hostPath:
path: /dev
- name: local-sc-xfs
hostPath:
path: /mnt/disks

View File

@ -1,5 +0,0 @@
#!/bin/bash
# get chart and render yaml
git clone --depth=1 https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner.git
helm template ./sig-storage-local-static-provisioner/helm/provisioner -f values.yaml --namespace kube-system > local-volume-provisioner.yaml

View File

@ -1,11 +0,0 @@
common:
namespace: kube-system
classes:
- name: local-sc-xfs
hostDir: /mnt/disks
daemonset:
nodeSelector:
node.kubernetes.io/localVolume: present
prometheus:
operator:
enabled: false

View File

@ -1,7 +1,7 @@
apiVersion: v2
description: KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
name: kubezero-argo-cd
version: 0.3.1
version: 0.3.5
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -2,7 +2,7 @@ kubezero-argo-cd
================
KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
Current chart version is `0.3.1`
Current chart version is `0.3.5`
Source code can be found [here](https://kubezero.com)
@ -24,6 +24,7 @@ Source code can be found [here](https://kubezero.com)
| argo-cd.installCRDs | bool | `false` | |
| argo-cd.istio.enabled | bool | `false` | Deploy Istio VirtualService to expose ArgoCD |
| argo-cd.istio.gateway | string | `"ingressgateway.istio-system.svc.cluster.local"` | Name of the Istio gateway to add the VirtualService to |
| argo-cd.istio.ipBlocks | list | `[]` | |
| argo-cd.redis.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.redis.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.redis.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |

View File

@ -0,0 +1,25 @@
{{- if index .Values "argo-cd" "istio" "enabled" }}
apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy
metadata:
name: argocd-allow-only
namespace: istio-system
spec:
selector:
matchLabels:
app: istio-ingressgateway
rules:
{{- if index .Values "argo-cd" "istio" "ipBlocks" }}
- from:
- source:
ipBlocks:
{{- with index .Values "argo-cd" "istio" "ipBlocks" }}
{{- . | toYaml | nindent 8 }}
{{- end }}
to:
- operation:
hosts: ["{{ index .Values "argo-cd" "server" "config" "url" }}"]
{{- else }}
- {}
{{- end }}
{{- end }}

View File

@ -20,6 +20,10 @@ spec:
server: https://kubernetes.default.svc
- namespace: cert-manager
server: https://kubernetes.default.svc
- namespace: istio-operator
server: https://kubernetes.default.svc
- namespace: istio-system
server: https://kubernetes.default.svc
clusterResourceWhitelist:
- group: '*'

View File

@ -71,3 +71,4 @@ argo-cd:
enabled: false
# argo-cd.istio.gateway -- Name of the Istio gateway to add the VirtualService to
gateway: ingressgateway.istio-system.svc.cluster.local
ipBlocks: []

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-aws-ebs-csi-driver
description: KubeZero Umbrella Chart for aws-ebs-csi-driver
type: application
version: 0.1.0
version: 0.1.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
sources:

View File

@ -2,7 +2,7 @@ kubezero-aws-ebs-csi-driver
===========================
KubeZero Umbrella Chart for aws-ebs-csi-driver
Current chart version is `0.1.0`
Current chart version is `0.1.1`
Source code can be found [here](https://kubezero.com)
@ -20,13 +20,15 @@ podAnnotations:
```
## Storage Classes
Provides the *ebs-sc-gp2-xfs* storage class for gp2, enrypted and XFS.
This class is also set as default.
By default it also creates the *ebs-sc-gp2-xfs* storage class for gp2, enrypted and XFS.
This class is by default also set as default storage class.
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| StorageClass.create | bool | `true` | |
| StorageClass.default | bool | `true` | |
| aws-ebs-csi-driver.enableVolumeResizing | bool | `false` | |
| aws-ebs-csi-driver.enableVolumeScheduling | bool | `true` | |
| aws-ebs-csi-driver.enableVolumeSnapshot | bool | `false` | |

View File

@ -15,7 +15,7 @@ podAnnotations:
```
## Storage Classes
Provides the *ebs-sc-gp2-xfs* storage class for gp2, enrypted and XFS.
This class is also set as default.
By default it also creates the *ebs-sc-gp2-xfs* storage class for gp2, enrypted and XFS.
This class is by default also set as default storage class.
{{ template "chart.valuesSection" . }}

View File

@ -1,11 +1,14 @@
{{- if .Values.StorageClass.create }}
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: ebs-sc-gp2-xfs
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
{{- if .Values.StorageClass.default }}
annotations:
storageclass.kubernetes.io/is-default-class: "true"
{{- end }}
provisioner: ebs.csi.aws.com
volumeBindingMode: WaitForFirstConsumer
parameters:
@ -16,7 +19,7 @@ parameters:
allowVolumeExpansion: true
{{- end }}
{{- range .Values.storageClassZones }}
{{- range .Values.StorageClass.Zones }}
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
@ -39,3 +42,4 @@ allowedTopologies:
values:
- {{ . }}
{{- end }}
{{- end }}

View File

@ -19,3 +19,7 @@ aws-ebs-csi-driver:
# aws-ebs-csi-driver.extraVolumeTags -- Optional tags to be added to each EBS volume
extraVolumeTags: {}
# Name: KubeZero-Cluster
StorageClass:
create: true
default: true

View File

@ -0,0 +1,26 @@
apiVersion: v2
name: kubezero-aws-efs-csi-driver
description: KubeZero Umbrella Chart for aws-efs-csi-driver
version: 0.1.0
appVersion: 1.0.0
kubeVersion: ">=1.16.0-0"
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
sources:
- https://github.com/Zero-Down-Time/kubezero
- https://github.com/kubernetes-sigs/aws-efs-csi-driver
keywords:
- kubezero
- aws
- efs
- csi
maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
repository: https://zero-down-time.github.io/kubezero/
# Once they properly update upstream
# - name: aws-ebs-csi-driver
# version: 1.0.0
# repository: https://kubernetes-sigs.github.io/aws-efs-csi-driver

View File

@ -0,0 +1,27 @@
commit 42a8ce1f587f10aa896ece3edbb7d31b627447a0
Author: Stefan Reimer <stefan@zero-downtime.net>
Date: Fri Jul 24 15:05:14 2020 +0100
Actually use nodeSelector for the deamonset, to allow selecting onle workers having the EFS mounted
diff --git a/helm/templates/daemonset.yaml b/helm/templates/daemonset.yaml
index 7fcfc1e..bfe3496 100644
--- a/helm/templates/daemonset.yaml
+++ b/helm/templates/daemonset.yaml
@@ -22,12 +22,15 @@ spec:
spec:
nodeSelector:
beta.kubernetes.io/os: linux
+ {{- with .Values.nodeSelector }}
+ {{- . | toYaml | nindent 8 }}
+ {{- end }}
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
{{- with .Values.node.tolerations }}
-{{ toYaml . | indent 8 }}
+ {{- . | toYaml | nindent 8 }}
{{- end }}
containers:
- name: efs-plugin

View File

@ -0,0 +1,28 @@
kubezero-aws-efs-csi-driver
===========================
KubeZero Umbrella Chart for aws-efs-csi-driver
Current chart version is `0.1.0`
Source code can be found [here](https://kubezero.com)
## Chart Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
## Storage Class
Optionally creates the *efs-cs* storage class.
Could also be made the default storage class if requested.
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| PersistentVolume.EfsId | string | `""` | |
| PersistentVolume.Name | string | `""` | |
| PersistentVolume.create | bool | `false` | |
| StorageClass.create | bool | `true` | |
| StorageClass.default | bool | `false` | |
| aws-efs-csi-driver.nodeSelector | object | `{}` | |

View File

@ -0,0 +1,14 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionLine" . }}
{{ template "chart.sourceLinkLine" . }}
{{ template "chart.requirementsSection" . }}
## Storage Class
Optionally creates the *efs-cs* storage class.
Could also be made the default storage class if requested.
{{ template "chart.valuesSection" . }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
appVersion: "1.0.0"
name: aws-efs-csi-driver
description: A Helm chart for AWS EFS CSI Driver
version: 0.1.0
kubeVersion: ">=1.14.0-0"
home: https://github.com/kubernetes-sigs/aws-efs-csi-driver
sources:
- https://github.com/kubernetes-sigs/aws-efs-csi-driver
keywords:
- aws
- efs
- csi
maintainers:
- name: leakingtapan

View File

@ -0,0 +1,3 @@
To verify that aws-efs-csi-driver has started, run:
kubectl get pod -n kube-system -l "app.kubernetes.io/name={{ include "aws-efs-csi-driver.name" . }},app.kubernetes.io/instance={{ .Release.Name }}"

View File

@ -0,0 +1,45 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "aws-efs-csi-driver.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "aws-efs-csi-driver.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "aws-efs-csi-driver.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "aws-efs-csi-driver.labels" -}}
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
helm.sh/chart: {{ include "aws-efs-csi-driver.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}

View File

@ -0,0 +1,6 @@
apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
name: efs.csi.aws.com
spec:
attachRequired: false

View File

@ -0,0 +1,117 @@
# Node Service
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: efs-csi-node
namespace: kube-system
spec:
selector:
matchLabels:
app: efs-csi-node
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app: efs-csi-node
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.node.podAnnotations }}
annotations: {{ toYaml .Values.node.podAnnotations | nindent 8 }}
{{- end }}
spec:
nodeSelector:
beta.kubernetes.io/os: linux
{{- with .Values.nodeSelector }}
{{- . | toYaml | nindent 8 }}
{{- end }}
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
{{- with .Values.node.tolerations }}
{{- . | toYaml | nindent 8 }}
{{- end }}
containers:
- name: efs-plugin
securityContext:
privileged: true
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
args:
- --endpoint=$(CSI_ENDPOINT)
- --logtostderr
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:/csi/csi.sock
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /csi
- name: efs-state-dir
mountPath: /var/run/efs
- name: efs-utils-config
mountPath: /etc/amazon/efs
ports:
- name: healthz
containerPort: 9809
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 2
failureThreshold: 5
- name: cs-driver-registrar
image: {{ printf "%s:%s" .Values.sidecars.nodeDriverRegistrarImage.repository .Values.sidecars.nodeDriverRegistrarImage.tag }}
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=5
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/efs.csi.aws.com/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: liveness-probe
image: {{ printf "%s:%s" .Values.sidecars.livenessProbeImage.repository .Values.sidecars.livenessProbeImage.tag }}
args:
- --csi-address=/csi/csi.sock
- --health-port=9809
volumeMounts:
- name: plugin-dir
mountPath: /csi
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/efs.csi.aws.com/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: efs-state-dir
hostPath:
path: /var/run/efs
type: DirectoryOrCreate
- name: efs-utils-config
hostPath:
path: /etc/amazon/efs
type: DirectoryOrCreate

View File

@ -0,0 +1,46 @@
# Default values for aws-efs-csi-driver.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 2
image:
repository: amazon/aws-efs-csi-driver
tag: "v1.0.0"
pullPolicy: IfNotPresent
sidecars:
livenessProbeImage:
repository: quay.io/k8scsi/livenessprobe
tag: "v2.0.0"
nodeDriverRegistrarImage:
repository: quay.io/k8scsi/csi-node-driver-registrar
tag: "v1.3.0"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
podAnnotations: {}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
node:
podAnnotations: {}
tolerations: []

View File

@ -0,0 +1,19 @@
{{- if .Values.PersistentVolume.create }}
apiVersion: v1
kind: PersistentVolume
metadata:
name: {{ default "kubezero-efs-pv" .Values.PersistentVolume.Name }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: efs-sc
csi:
driver: efs.csi.aws.com
volumeHandle: {{ .Values.PersistentVolume.EfsId }}
{{- end }}

View File

@ -0,0 +1,13 @@
{{- if .Values.StorageClass.create }}
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: efs-sc
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
{{- if .Values.StorageClass.default }}
annotations:
storageclass.kubernetes.io/is-default-class: "true"
{{- end }}
provisioner: efs.csi.aws.com
{{- end }}

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -ex
REPO="kubernetes-sigs/aws-efs-csi-driver"
LATEST_RELEASE=$(curl -sL -s https://api.github.com/repos/${REPO}/releases | grep '"tag_name":' | cut -d'"' -f4 | grep -v -E "(alpha|beta|rc)" | sort -t"." -k 1,1 -k 2,2 -k 3,3 -k 4,4 | tail -n 1)
URL="https://github.com/${REPO}/releases/download/${LATEST_RELEASE}/helm-chart.tgz"
rm -rf charts && mkdir -p charts/aws-efs-csi-driver
curl -sL "$URL" | tar xz -C charts/aws-efs-csi-driver --strip-components=1

View File

@ -0,0 +1,12 @@
aws-efs-csi-driver:
nodeSelector: {}
# node.kubernetes.io/csi.efs.fs: EFS-FS_ID
StorageClass:
create: true
default: false
PersistentVolume:
create: false
EfsId: ""
Name: ""

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-calico
description: KubeZero Umbrella Chart for Calico
type: application
version: 0.1.4
version: 0.1.7
appVersion: 3.15
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png

View File

@ -2,7 +2,7 @@ kubezero-calico
===============
KubeZero Umbrella Chart for Calico
Current chart version is `0.1.3`
Current chart version is `0.1.7`
Source code can be found [here](https://kubezero.com)
@ -34,6 +34,17 @@ The setup is based on the upstream calico-vxlan config from
- Set FELIX log level to warning
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| customIPPool | object | `{}` | |
| installCRDs | bool | `false` | |
| loglevel | string | `"Warning"` | |
| migration | bool | `false` | |
| mtu | int | `8941` | |
| network | string | `"vxlan"` | |
| prometheus | bool | `false` | |
## Resources

View File

@ -29,6 +29,7 @@ The setup is based on the upstream calico-vxlan config from
- Set FELIX log level to warning
{{ template "chart.valuesSection" . }}
## Resources

File diff suppressed because it is too large Load Diff

View File

@ -487,12 +487,14 @@ spec:
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 3
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
periodSeconds: 10
timeoutSeconds: 3
volumeMounts:
- mountPath: /lib/modules
name: lib-modules

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,4 @@
{{- if .Values.customIPPool }}
apiVersion: crd.projectcalico.org/v1
kind: IPPool
metadata:
@ -9,3 +10,4 @@ spec:
natOutgoing: true
nodeSelector: all()
vxlanMode: Always
{{- end }}

View File

@ -1,4 +1,4 @@
migration: false
installCRDs: false
network: vxlan
@ -7,3 +7,9 @@ mtu: 8941
loglevel: Warning
prometheus: false
customIPPool: {}
# Soon to be removed
migration: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cert-manager
description: KubeZero Umbrella Chart for cert-manager
type: application
version: 0.3.4
version: 0.3.5
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -2,7 +2,7 @@ kubezero-cert-manager
=====================
KubeZero Umbrella Chart for cert-manager
Current chart version is `0.3.4`
Current chart version is `0.3.5`
Source code can be found [here](https://kubezero.com)

View File

@ -2,6 +2,7 @@
{{- if .Values.localCA.selfsigning }}
# KubeZero / Local cluster CA
# The resources are serialized via waves in Argo
apiVersion: cert-manager.io/v1alpha2
kind: Issuer
metadata:
@ -9,6 +10,8 @@ metadata:
namespace: kube-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
annotations:
argocd.argoproj.io/sync-wave: "10"
spec:
selfSigned: {}
---
@ -19,6 +22,8 @@ metadata:
namespace: kube-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
annotations:
argocd.argoproj.io/sync-wave: "11"
spec:
secretName: kubezero-ca-tls
commonName: "kubezero-local-ca"
@ -50,6 +55,8 @@ metadata:
namespace: kube-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
annotations:
argocd.argoproj.io/sync-wave: "12"
spec:
ca:
secretName: kubezero-ca-tls

View File

@ -0,0 +1,20 @@
apiVersion: v2
name: kubezero-istio
description: KubeZero Umbrella Chart for Istio
type: application
version: 0.2.0
appVersion: 1.6.5
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- istio
maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
repository: https://zero-down-time.github.io/kubezero/
- name: istio-operator
version: ">= 1.6"
kubeVersion: ">= 1.16.0"

View File

@ -0,0 +1,37 @@
kubezero-istio
==============
KubeZero Umbrella Chart for Istio
Installs Istio Operator and KubeZero Istio profile
Current chart version is `0.2.0`
Source code can be found [here](https://kubezero.com)
## Chart Requirements
| Repository | Name | Version |
|------------|------|---------|
| | istio-operator | >= 1.6 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
## KubeZero default configuration
- mapped istio-operator to run on the controller nodes only
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| ingress.autoscaleEnabled | bool | `false` | |
| ingress.private | bool | `true` | |
| ingress.replicaCount | int | `2` | |
| ingress.type | string | `"NodePort"` | |
| istio-operator.hub | string | `"docker.io/istio"` | |
| istio-operator.tag | string | `"1.6.5"` | |
| istiod.autoscaleEnabled | bool | `false` | |
| istiod.replicaCount | int | `1` | |
## Resources
- https://istio.io/latest/docs/setup/install/standalone-operator/

View File

@ -0,0 +1,20 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
Installs Istio Operator and KubeZero Istio profile
{{ template "chart.versionLine" . }}
{{ template "chart.sourceLinkLine" . }}
{{ template "chart.requirementsSection" . }}
## KubeZero default configuration
- mapped istio-operator to run on the controller nodes only
{{ template "chart.valuesSection" . }}
## Resources
- https://istio.io/latest/docs/setup/install/standalone-operator/

View File

@ -0,0 +1,12 @@
apiVersion: v1
name: istio-operator
version: 1.6.0
tillerVersion: ">=2.7.2"
description: Helm chart for deploying Istio operator
keywords:
- istio
- operator
sources:
- http://github.com/istio/istio/operator
engine: gotpl
icon: https://istio.io/favicons/android-192x192.png

View File

@ -0,0 +1,113 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: istio-operator
rules:
# istio groups
- apiGroups:
- authentication.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- config.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- install.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- networking.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- rbac.istio.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- security.istio.io
resources:
- '*'
verbs:
- '*'
# k8s groups
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- '*'
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions.apiextensions.k8s.io
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- apps
- extensions
resources:
- daemonsets
- deployments
- deployments/finalizers
- ingresses
- replicasets
- statefulsets
verbs:
- '*'
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- get
- create
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
- clusterroles
- roles
- rolebindings
verbs:
- '*'
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- events
- namespaces
- pods
- persistentvolumeclaims
- secrets
- services
- serviceaccounts
verbs:
- '*'
---

View File

@ -0,0 +1,13 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: istio-operator
subjects:
- kind: ServiceAccount
name: istio-operator
namespace: {{.Values.operatorNamespace}}
roleRef:
kind: ClusterRole
name: istio-operator
apiGroup: rbac.authorization.k8s.io
---

View File

@ -0,0 +1,46 @@
# SYNC WITH manifests/charts/base/files
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: istiooperators.install.istio.io
spec:
group: install.istio.io
names:
kind: IstioOperator
plural: istiooperators
singular: istiooperator
shortNames:
- iop
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values.
More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase.
More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: 'Specification of the desired state of the istio control plane resource.
More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
type: object
status:
description: 'Status describes each of istio control plane component status at the current time.
0 means NONE, 1 means UPDATING, 2 means HEALTHY, 3 means ERROR, 4 means RECONCILING.
More info: https://github.com/istio/api/blob/master/operator/v1alpha1/istio.operator.v1alpha1.pb.html &
https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
type: object
versions:
- name: v1alpha1
served: true
storage: true
---

View File

@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: {{.Values.operatorNamespace}}
name: istio-operator
spec:
replicas: 1
selector:
matchLabels:
name: istio-operator
template:
metadata:
labels:
name: istio-operator
spec:
serviceAccountName: istio-operator
nodeSelector:
kubernetes.io/os: linux
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: istio-operator
image: {{.Values.hub}}/operator:{{.Values.tag}}
command:
- operator
- server
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 50m
memory: 128Mi
env:
- name: WATCH_NAMESPACE
value: {{.Values.istioNamespace}}
- name: LEADER_ELECTION_NAMESPACE
value: {{.Values.operatorNamespace}}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: {{.Values.operatorNamespace}}
---

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: Namespace
metadata:
name: {{.Values.operatorNamespace}}
labels:
istio-operator-managed: Reconcile
istio-injection: disabled
---

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
namespace: {{.Values.operatorNamespace}}
labels:
name: istio-operator
name: istio-operator
spec:
ports:
- name: http-metrics
port: 8383
targetPort: 8383
selector:
name: istio-operator
---

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: {{.Values.operatorNamespace}}
name: istio-operator
---

View File

@ -0,0 +1,4 @@
hub: gcr.io/istio-testing
tag: 1.6-dev
operatorNamespace: istio-operator
istioNamespace: istio-system

View File

@ -0,0 +1,17 @@
diff --git a/charts/kubezero-istio/charts/istio-operator/templates/deployment.yaml b/charts/kubezero-istio/charts/istio-operator/templates/deployment.yaml
index 5ef7848..8350dd5 100644
--- a/charts/kubezero-istio/charts/istio-operator/templates/deployment.yaml
+++ b/charts/kubezero-istio/charts/istio-operator/templates/deployment.yaml
@@ -14,6 +14,12 @@ spec:
name: istio-operator
spec:
serviceAccountName: istio-operator
+ nodeSelector:
+ kubernetes.io/os: linux
+ node-role.kubernetes.io/master: ""
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
containers:
- name: istio-operator
image: {{.Values.hub}}/operator:{{.Values.tag}}

View File

@ -0,0 +1,14 @@
{{- if .Values.ingress.dnsNames }}
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: public-ingress-cert
namespace: istio-system
spec:
secretName: public-ingress-cert
issuerRef:
name: letsencrypt-dns-prod
kind: ClusterIssuer
dnsNames:
{{ toYaml .Values.ingress.dnsNames | indent 4 }}
{{- end }}

View File

@ -0,0 +1,64 @@
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: ingressgateway
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP2
hosts:
- "*"
tls:
httpsRedirect: true
- port:
number: 443
name: https
protocol: HTTPS
hosts:
- "*"
tls:
mode: SIMPLE
privateKey: /etc/istio/ingressgateway-certs/tls.key
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
credentialName: public-ingress-cert
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: private-ingressgateway
namespace: istio-system
spec:
selector:
istio: private-ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP2
hosts:
- "*"
tls:
httpsRedirect: true
- port:
number: 443
name: https
protocol: HTTPS
hosts:
- "*"
tls:
mode: SIMPLE
privateKey: /etc/istio/ingressgateway-certs/tls.key
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
credentialName: public-ingress-cert
- port:
number: 24224
name: fluentd-forward
protocol: TCP
hosts:
- "*"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,111 @@
{{- if .Values.ingress.private }}
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
metadata:
name: kubezero-istio-private-ingress
namespace: istio-system
spec:
profile: empty
components:
ingressGateways:
- name: istio-private-ingressgateway
enabled: true
namespace: istio-system
k8s:
replicaCount: {{ .Values.ingress.replicaCount }}
{{- if .Values.ingress.autoscaleEnabled }}
hpaSpec:
maxReplicas: 5
metrics:
- resource:
name: cpu
targetAverageUtilization: 80
type: Resource
minReplicas: 1
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: istio-private-ingressgateway
{{- end }}
env:
- name: ISTIO_META_HTTP10
value: '"1"'
- name: ISTIO_META_ROUTER_MODE
value: standard
{{- if eq .Values.ingress.type "NodePort" }}
nodeSelector:
node.kubernetes.io/ingress.private: "31080_31443_30671_30672_31224"
{{- end }}
resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 100m
memory: 128Mi
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 25%
values:
gateways:
istio-ingressgateway:
autoscaleEnabled: {{ .Values.ingress.autoscaleEnabled }}
externalTrafficPolicy: Local
labels:
app: istio-private-ingressgateway
istio: private-ingressgateway
meshExpansionPorts: []
podAntiAffinityLabelSelector:
- key: app
operator: In
topologyKey: kubernetes.io/hostname
values: istio-private-ingressgateway
type: {{ default "NodePort" .Values.ingress.type }}
ports:
- name: http2
port: 80
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 31080
{{- end }}
- name: https
port: 443
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 31443
{{- end }}
- name: amqp
port: 5672
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 30672
{{- end }}
- name: amqps
port: 5671
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 30671
{{- end }}
- name: fluentd-forward
port: 24224
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 31224
{{- end }}
sds:
enabled: true
image: node-agent-k8s
resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 100m
memory: 128Mi
secretVolumes:
- mountPath: /etc/istio/ingressgateway-certs
name: ingressgateway-certs
secretName: istio-ingressgateway-certs
- mountPath: /etc/istio/ingressgateway-ca-certs
name: ingressgateway-ca-certs
secretName: istio-ingressgateway-ca-certs
global:
jwtPolicy: first-party-jwt
{{- end }}

View File

@ -0,0 +1,142 @@
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
metadata:
name: kubezero-istio
namespace: istio-system
spec:
profile: empty
addonComponents:
prometheus:
enabled: false
components:
citadel:
enabled: false
galley:
enabled: false
ingressGateways:
- enabled: true
k8s:
replicaCount: {{ .Values.ingress.replicaCount }}
{{- if .Values.ingress.autoscaleEnabled }}
hpaSpec:
maxReplicas: 5
metrics:
- resource:
name: cpu
targetAverageUtilization: 80
type: Resource
minReplicas: 1
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: istio-ingressgateway
{{- end }}
env:
- name: ISTIO_META_HTTP10
value: '"1"'
- name: ISTIO_META_ROUTER_MODE
value: standard
{{- if eq .Values.ingress.type "NodePort" }}
nodeSelector:
node.kubernetes.io/ingress.public: "30080_30443"
{{- end }}
resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 100m
memory: 128Mi
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 25%
name: istio-ingressgateway
pilot:
enabled: true
k8s:
replicaCount: {{ .Values.istiod.replicaCount }}
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
resources:
requests:
cpu: 100m
memory: 128Mi
policy:
enabled: true
k8s:
replicaCount: {{ .Values.istiod.replicaCount }}
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
sidecarInjector:
enabled: false
telemetry:
enabled: false
values:
gateways:
istio-ingressgateway:
autoscaleEnabled: {{ .Values.ingress.autoscaleEnabled }}
externalTrafficPolicy: Local
labels:
app: istio-ingressgateway
istio: ingressgateway
meshExpansionPorts: []
podAntiAffinityLabelSelector:
- key: app
operator: In
topologyKey: kubernetes.io/hostname
values: istio-ingressgateway
type: {{ default "NodePort" .Values.ingress.type }}
ports:
- name: http2
port: 80
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 30080
{{- end }}
- name: https
port: 443
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 30443
{{- end }}
sds:
enabled: true
image: node-agent-k8s
resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 100m
memory: 128Mi
secretVolumes:
- mountPath: /etc/istio/ingressgateway-certs
name: ingressgateway-certs
secretName: istio-ingressgateway-certs
- mountPath: /etc/istio/ingressgateway-ca-certs
name: ingressgateway-ca-certs
secretName: istio-ingressgateway-ca-certs
meshConfig:
accessLogFile: /dev/stdout
disablePolicyChecks: false
global:
jwtPolicy: first-party-jwt
omitSidecarInjectorConfigMap: true
proxy:
accessLogEncoding: JSON
autoInject: disabled
envoyStatsd:
enabled: false
useMCP: false
pilot:
sidecar: false
autoscaleEnabled: false
mixer:
policy:
autoscaleEnabled: false

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: istio-system
labels:
istio-injection: disabled

21
charts/kubezero-istio/update.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash
set -ex
ISTIO_VERSION=1.6.5
NAME="istio-$ISTIO_VERSION"
URL="https://github.com/istio/istio/releases/download/${ISTIO_VERSION}/istio-${ISTIO_VERSION}-linux-amd64.tar.gz"
curl -sL "$URL" | tar xz
# Now lets extract what we need
rm -rf charts/istio-operator
cp -r istio-${ISTIO_VERSION}/manifests/charts/istio-operator charts
rm -rf istio-${ISTIO_VERSION}
# Apply our patch
patch -i istio-operator.patch -p3
# Extract base / CRDs from istioctl into plain manifest to workaround chicken egg problem with CRDs
istioctl manifest generate --set profile=empty --set components.base.enabled=true > templates/istio-base.yaml

View File

@ -0,0 +1,15 @@
#!/bin/bash
# First delete old 1.4
kubectl delete -f ingress-gateway.yaml
kubectl delete -f istio.yaml
kubectl delete -f istio-init.yaml
kubectl delete -f namespace.yaml
# Now we need to install the new Istio Operator via KubeZero
# deploy the CR for 1.6
kubectl apply -f istio-1.6.yaml
# add the additiona private ingress gateway as dedicated CR
kubectl apply -f istio-1.6-private-ingress.yaml

View File

@ -0,0 +1,15 @@
istiod:
autoscaleEnabled: false
replicaCount: 1
ingress:
autoscaleEnabled: false
replicaCount: 2
type: NodePort
private: true
#dnsNames:
#- "*.example.com"
istio-operator:
hub: docker.io/istio
tag: 1.6.5

View File

@ -2,7 +2,8 @@ apiVersion: v2
name: kubezero-kiam
description: KubeZero Umbrella Chart for Kiam
type: application
version: 0.2.5
version: 0.2.6
appVersion: 3.6
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -2,7 +2,7 @@ kubezero-kiam
=============
KubeZero Umbrella Chart for Kiam
Current chart version is `0.2.5`
Current chart version is `0.2.6`
Source code can be found [here](https://kubezero.com)
@ -37,7 +37,7 @@ Required for the *csi ebs plugin* and most likely various others assuming basic
| kiam.agent.gatewayTimeoutCreation | string | `"5s"` | |
| kiam.agent.host.interface | string | `"cali+"` | |
| kiam.agent.host.iptables | bool | `false` | |
| kiam.agent.image.tag | string | `"v3.6-rc1"` | |
| kiam.agent.image.tag | string | `"v3.6"` | |
| kiam.agent.log.level | string | `"warn"` | |
| kiam.agent.prometheus.servicemonitor.enabled | bool | `false` | |
| kiam.agent.sslCertHostPath | string | `"/etc/ssl/certs"` | |
@ -52,7 +52,7 @@ Required for the *csi ebs plugin* and most likely various others assuming basic
| kiam.server.assumeRoleArn | string | `""` | kiam server IAM role to assume, required as we run the agents next to the servers normally, eg. arn:aws:iam::123456789012:role/kiam-server-role |
| kiam.server.deployment.enabled | bool | `true` | |
| kiam.server.deployment.replicas | int | `1` | |
| kiam.server.image.tag | string | `"v3.6-rc1"` | |
| kiam.server.image.tag | string | `"v3.6"` | |
| kiam.server.log.level | string | `"warn"` | |
| kiam.server.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| kiam.server.prometheus.servicemonitor.enabled | bool | `false` | |

View File

@ -1,7 +1,7 @@
kiam:
server:
image:
tag: "v3.6-rc1"
tag: "v3.6"
# kiam.server.assumeRoleArn -- kiam server IAM role to assume, required as we run the agents next to the servers normally, eg. arn:aws:iam::123456789012:role/kiam-server-role
assumeRoleArn: ''
useHostNetwork: true
@ -31,7 +31,7 @@ kiam:
agent:
image:
tag: "v3.6-rc1"
tag: "v3.6"
gatewayTimeoutCreation: "5s"
updateStrategy: RollingUpdate
# IP tables set on each node at boot, see CloudBender

View File

@ -0,0 +1,18 @@
apiVersion: v2
name: kubezero-local-volume-provisioner
description: KubeZero Umbrella Chart for local-static-provisioner
type: application
version: 0.0.1
appVersion: 2.3.4
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- local-static-provisioner
maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
repository: https://zero-down-time.github.io/kubezero/
kubeVersion: ">= 1.16.0"

View File

@ -0,0 +1,24 @@
kubezero-local-volume-provisioner
=================================
KubeZero Umbrella Chart for local-static-provisioner
Provides persistent volumes backed by local volumes, eg. additional SSDs or spindles.
Current chart version is `0.0.1`
Source code can be found [here](https://kubezero.com)
## Chart Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
## KubeZero default configuration
- add nodeSelector to only install on nodes actually having ephemeral local storage
- provide matching storage class to expose mounted disks under `/mnt/disks`
## Resources
- https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner

View File

@ -1,12 +1,19 @@
# local-volume-provisioner
Provides persistent volumes backed by local volumes, eg. additional SSDs or spindles.
{{ template "chart.header" . }}
{{ template "chart.description" . }}
As the upstream Helm chart is not part of a repository we extract the chart and store it locally as base for kustomize.
See `update.sh`.
Provides persistent volumes backed by local volumes, eg. additional SSDs or spindles.
{{ template "chart.versionLine" . }}
{{ template "chart.sourceLinkLine" . }}
{{ template "chart.requirementsSection" . }}
## KubeZero default configuration
## Kustomizations
- add nodeSelector to only install on nodes actually having ephemeral local storage
- provide matching storage class to expose mounted disks under `/mnt/disks`
## Resources
- https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner.git
- https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner

View File

@ -0,0 +1,8 @@
apiVersion: v1
appVersion: 2.3.4
description: local provisioner chart
keywords:
- storage
- local
name: local-static-provisioner
version: 3.0.0

View File

@ -0,0 +1 @@
provisioner installed

View File

@ -0,0 +1,42 @@
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "provisioner.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Expand the name of the chart.
*/}}
{{- define "provisioner.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "provisioner.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "provisioner.serviceAccountName" -}}
{{- if .Values.common.serviceAccount.create -}}
{{ default (include "provisioner.fullname" .) .Values.common.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.common.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,52 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "provisioner.fullname" . }}-config
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
data:
{{- if .Values.daemonset.nodeLabels }}
nodeLabelsForPV: |
{{- range $label := .Values.daemonset.nodeLabels }}
- {{$label}}
{{- end }}
{{- end }}
{{- if .Values.common.useAlphaAPI }}
useAlphaAPI: "true"
{{- end }}
{{- if .Values.common.setPVOwnerRef }}
setPVOwnerRef: "true"
{{- end }}
{{- if .Values.common.useJobForCleaning }}
useJobForCleaning: "yes"
{{- end}}
{{- if .Values.common.useNodeNameOnly }}
useNodeNameOnly: "true"
{{- end }}
{{- if .Values.common.minResyncPeriod }}
minResyncPeriod: {{ .Values.common.minResyncPeriod | quote }}
{{- end}}
storageClassMap: |
{{- range $classConfig := .Values.classes }}
{{ $classConfig.name }}:
hostDir: {{ $classConfig.hostDir }}
mountDir: {{ if $classConfig.mountDir }} {{- $classConfig.mountDir -}} {{ else }} {{- $classConfig.hostDir -}} {{ end }}
{{- if $classConfig.blockCleanerCommand }}
blockCleanerCommand:
{{- range $val := $classConfig.blockCleanerCommand }}
- "{{ $val -}}"{{- end}}
{{- end }}
{{- if $classConfig.volumeMode }}
volumeMode: {{ $classConfig.volumeMode }}
{{- end }}
{{- if $classConfig.fsType }}
fsType: {{ $classConfig.fsType }}
{{- end }}
{{- if $classConfig.namePattern }}
namePattern: "{{ $classConfig.namePattern }}"
{{- end }}
{{- end }}

View File

@ -0,0 +1,93 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ include "provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
serviceAccountName: {{ template "provisioner.serviceAccountName" . }}
{{- if .Values.daemonset.priorityClassName }}
priorityClassName: {{.Values.daemonset.priorityClassName}}
{{- end }}
{{- if .Values.daemonset.nodeSelector }}
nodeSelector:
{{ .Values.daemonset.nodeSelector | toYaml | trim | indent 8 }}
{{- end }}
{{- if .Values.daemonset.tolerations }}
tolerations:
{{ .Values.daemonset.tolerations | toYaml | trim | indent 8 }}
{{- end }}
{{- if .Values.daemonset.affinity }}
affinity:
{{ .Values.daemonset.affinity | toYaml | trim | indent 8 }}
{{- end }}
containers:
- image: "{{ .Values.daemonset.image }}"
{{- if .Values.daemonset.imagePullPolicy }}
imagePullPolicy: {{ .Values.daemonset.imagePullPolicy | quote }}
{{- end }}
name: provisioner
securityContext:
privileged: true
{{- if .Values.daemonset.resources }}
resources:
{{ .Values.daemonset.resources | toYaml | trim | indent 12 }}
{{- end }}
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: JOB_CONTAINER_IMAGE
value: "{{ .Values.daemonset.image }}"
{{- if .Values.daemonset.kubeConfigEnv }}
- name: KUBECONFIG
value: {{.Values.daemonset.kubeConfigEnv}}
{{- end }}
{{- if .Values.serviceMonitor.enabled }}
ports:
- containerPort: 8080
name: metrics
{{- end }}
volumeMounts:
- mountPath: /etc/provisioner/config
name: provisioner-config
readOnly: true
- mountPath: /dev
name: provisioner-dev
{{- range $classConfig := .Values.classes }}
- mountPath: {{ if $classConfig.mountDir }} {{- $classConfig.mountDir -}} {{ else }} {{- $classConfig.hostDir -}} {{ end }}
name: {{ $classConfig.name }}
mountPropagation: "HostToContainer"
{{- end }}
volumes:
- name: provisioner-config
configMap:
name: {{ template "provisioner.fullname" . }}-config
- name: provisioner-dev
hostPath:
path: /dev
{{- range $classConfig := .Values.classes }}
- name: {{ $classConfig.name }}
hostPath:
path: {{ $classConfig.hostDir }}
{{- end }}

View File

@ -0,0 +1,36 @@
{{- if .Values.common.rbac.pspEnabled -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "provisioner.fullname" . }}
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
allowPrivilegeEscalation: true
allowedHostPaths:
- pathPrefix: /dev
{{- range $classConfig := .Values.classes }}
- pathPrefix: {{ $classConfig.hostDir }}
{{- end }}
fsGroup:
rule: RunAsAny
privileged: true
requiredDropCapabilities:
- ALL
runAsUser:
ranges:
- max: 65535
min: 1
rule: MustRunAs
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- secret
- hostPath
{{- end }}

View File

@ -0,0 +1,131 @@
{{- if .Values.common.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "provisioner.fullname" . }}-pv-binding
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ template "provisioner.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: system:persistent-volume-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "provisioner.fullname" . }}-node-clusterrole
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "provisioner.fullname" . }}-node-binding
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ template "provisioner.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ template "provisioner.fullname" . }}-node-clusterrole
apiGroup: rbac.authorization.k8s.io
{{- if .Values.common.useJobForCleaning }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "provisioner.fullname" . }}-jobs-role
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups:
- 'batch'
resources:
- jobs
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ template "provisioner.fullname" . }}-jobs-rolebinding
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ template "provisioner.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ template "provisioner.fullname" . }}-jobs-role
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- if .Values.common.rbac.pspEnabled }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "provisioner.fullname" . }}-psp-role
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
resourceNames:
- {{ template "provisioner.fullname" . }}
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ template "provisioner.fullname" . }}-psp-rolebinding
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ template "provisioner.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ template "provisioner.fullname" . }}-psp-role
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- end }}

View File

@ -0,0 +1,12 @@
{{- if .Values.common.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "provisioner.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,53 @@
{{- if .Values.serviceMonitor.enabled }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: 8080
name: metrics
selector:
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "provisioner.fullname" . }}
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
labels:
helm.sh/chart: {{ template "provisioner.chart" . }}
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- range $k, $v := .Values.serviceMonitor.additionalLabels }}
{{ $k }}: {{ $v | quote }}
{{- end }}
spec:
jobLabel: app.kubernetes.io/name
endpoints:
- port: metrics
interval: {{ .Values.serviceMonitor.interval }}
scheme: http
{{- if .Values.serviceMonitor.relabelings }}
relabelings:
{{ toYaml .Values.serviceMonitor.relabelings | indent 4 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "provisioner.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,28 @@
{{- $release := .Release }}
{{- $chart := .Chart }}
{{- range $val := .Values.classes }}
{{- if $val.storageClass }}
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ $val.name }}
{{- if kindIs "map" $val.storageClass }}
{{- if $val.storageClass.isDefaultClass }}
annotations:
storageclass.kubernetes.io/is-default-class: "true"
{{- end }}
{{- end }}
labels:
app.kubernetes.io/managed-by: {{ $release.Service | quote }}
app.kubernetes.io/instance: {{ $release.Name | quote }}
helm.sh/chart: {{ replace "+" "_" $chart.Version | printf "%s-%s" $chart.Name }}
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
{{- if kindIs "map" $val.storageClass }}
reclaimPolicy: {{ $val.storageClass.reclaimPolicy | default "Delete" }}
{{- else }}
reclaimPolicy: Delete
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,162 @@
#
# Common options.
#
common:
#
# Defines whether to generate rbac roles
#
rbac:
# rbac.create: `true` if rbac resources should be created
create: true
# rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created
pspEnabled: false
#
# Defines whether to generate a serviceAccount
#
serviceAccount:
# serviceAccount.create: Whether to create a service account or not
create: true
# serviceAccount.name: The name of the service account to create or use
name: ""
#
# Beta PV.NodeAffinity field is used by default. If running against pre-1.10
# k8s version, the `useAlphaAPI` flag must be enabled in the configMap.
#
useAlphaAPI: false
#
# Indicates if PVs should be dependents of the owner Node.
#
setPVOwnerRef: false
#
# Provisioner clean volumes in process by default. If set to true, provisioner
# will use Jobs to clean.
#
useJobForCleaning: false
#
# Provisioner name contains Node.UID by default. If set to true, the provisioner
# name will only use Node.Name.
#
useNodeNameOnly: false
#
# Resync period in reflectors will be random between minResyncPeriod and
# 2*minResyncPeriod. Default: 5m0s.
#
#minResyncPeriod: 5m0s
#
# Configure storage classes.
#
classes:
- name: fast-disks # Defines name of storage classe.
# Path on the host where local volumes of this storage class are mounted
# under.
hostDir: /mnt/fast-disks
# Optionally specify mount path of local volumes. By default, we use same
# path as hostDir in container.
# mountDir: /mnt/fast-disks
# The volume mode of created PersistentVolume object. Default to Filesystem
# if not specified.
volumeMode: Filesystem
# Filesystem type to mount.
# It applies only when the source path is a block device,
# and desire volume mode is Filesystem.
# Must be a filesystem type supported by the host operating system.
fsType: ext4
# File name pattern to discover. By default, discover all file names.
namePattern: "*"
blockCleanerCommand:
# Do a quick reset of the block device during its cleanup.
# - "/scripts/quick_reset.sh"
# or use dd to zero out block dev in two iterations by uncommenting these lines
# - "/scripts/dd_zero.sh"
# - "2"
# or run shred utility for 2 iteration.s
- "/scripts/shred.sh"
- "2"
# or blkdiscard utility by uncommenting the line below.
# - "/scripts/blkdiscard.sh"
# Uncomment to create storage class object with default configuration.
# storageClass: true
# Uncomment to create storage class object and configure it.
# storageClass:
# reclaimPolicy: Delete # Available reclaim policies: Delete/Retain, defaults: Delete.
# isDefaultClass: true # set as default class
#
# Configure DaemonSet for provisioner.
#
daemonset:
#
# Defines Provisioner's image name including container registry.
#
image: quay.io/external_storage/local-volume-provisioner:v2.3.4
#
# Defines Image download policy, see kubernetes documentation for available values.
#
#imagePullPolicy: Always
#
# Defines a name of the Pod Priority Class to use with the Provisioner DaemonSet
#
# Note that if you want to make it critical, specify "system-cluster-critical"
# or "system-node-critical" and deploy in kube-system namespace.
# Ref: https://k8s.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical
#
#priorityClassName: system-node-critical
# If configured, nodeSelector will add a nodeSelector field to the DaemonSet PodSpec.
#
# NodeSelector constraint for local-volume-provisioner scheduling to nodes.
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
#
# If configured KubeConfigEnv will (optionally) specify the location of kubeconfig file on the node.
# kubeConfigEnv: KUBECONFIG
#
# List of node labels to be copied to the PVs created by the provisioner in a format:
#
# nodeLabels:
# - failure-domain.beta.kubernetes.io/zone
# - failure-domain.beta.kubernetes.io/region
#
# If configured, tolerations will add a toleration field to the DaemonSet PodSpec.
#
# Node tolerations for local-volume-provisioner scheduling to nodes with taints.
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
#
# If configured, affinity will add a affinity filed to the DeamonSet PodSpec.
# Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
#
# If configured, resources will set the requests/limits field to the Daemonset PodSpec.
# Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
resources: {}
# limits:
# memory: "512Mi"
# cpu: "1000m"
# requests:
# memory: "32Mi"
# cpu: "10m"
#
# Configure Prometheus monitoring
#
serviceMonitor:
enabled: false
## Interval at which Prometheus scrapes the provisioner
interval: 10s
# Namespace Prometheus is installed in defaults to release namespace
namespace:
## Defaults to whats used if you follow CoreOS [Prometheus Install Instructions](https://github.com/coreos/prometheus-operator/tree/master/helm#tldr)
## [Prometheus Selector Label](https://github.com/coreos/prometheus-operator/blob/master/helm/prometheus/templates/prometheus.yaml#L65)
## [Kube Prometheus Selector Label](https://github.com/coreos/prometheus-operator/blob/master/helm/kube-prometheus/values.yaml#L298)
additionalLabels: {}
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
#
# Overrice the default chartname or releasename
#
nameOverride: ""
fullnameOverride: ""

View File

@ -0,0 +1,10 @@
#!/bin/bash
# get subchart until they have upstream repo
rm -rf charts/local-volume-provisioner && mkdir -p charts/local-volume-provisioner
git clone --depth=1 https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner.git
cp -r ./sig-storage-local-static-provisioner/helm/provisioner/* charts/local-volume-provisioner
rm -rf sig-storage-local-static-provisioner

View File

@ -0,0 +1,12 @@
local-static-provisioner:
common:
namespace: kube-system
classes:
- name: local-sc-xfs
hostDir: /mnt/disks
daemonset:
nodeSelector:
node.kubernetes.io/localVolume: present
prometheus:
operator:
enabled: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero
description: KubeZero ArgoCD Application - Root App of Apps chart of KubeZero
type: application
version: 0.3.5
version: 0.3.10
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -2,7 +2,7 @@ kubezero
========
KubeZero ArgoCD Application - Root App of Apps chart of KubeZero
Current chart version is `0.3.1`
Current chart version is `0.3.10`
Source code can be found [here](https://kubezero.com)
@ -17,12 +17,16 @@ Source code can be found [here](https://kubezero.com)
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| aws-ebs-csi-driver.enabled | bool | `false` | |
| aws-efs-csi-driver.enabled | bool | `false` | |
| calico.enabled | bool | `true` | |
| calico.type | string | `"kustomize"` | |
| calico.values.migration | bool | `false` | |
| cert-manager.enabled | bool | `true` | |
| global.defaultDestination.server | string | `"https://kubernetes.default.svc"` | |
| global.defaultSource.pathPrefix | string | `""` | |
| global.defaultSource.repoURL | string | `"https://github.com/zero-down-time/kubezero"` | |
| global.defaultSource.targetRevision | string | `"HEAD"` | |
| istio.enabled | bool | `false` | |
| kiam.enabled | bool | `false` | |
| local-volume-provisioner.enabled | bool | `false` | |
| platform | string | `"aws"` | |

View File

@ -0,0 +1,6 @@
{{- if index .Values "aws-efs-csi-driver" "enabled" }}
{{ template "kubezero-app.app" dict "root" . "name" "aws-efs-csi-driver" "type" "helm" }}
syncPolicy:
automated:
prune: true
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if .Values.istio.enabled }}
{{ template "kubezero-app.app" dict "root" . "name" "istio" "type" "helm" }}
syncPolicy:
automated:
prune: true
ignoreDifferences:
- group: apiextensions.k8s.io
kind: CustomResourceDefinition
jsonPointers:
- /metadata/labels
- /spec/additionalPrinterColumns
- group: admissionregistration.k8s.io
kind: ValidatingWebhookConfiguration
jsonPointers:
- /webhooks/0/clientConfig/caBundle
- /webhooks/0/failurePolicy
{{- end }}

View File

@ -1,5 +1,5 @@
{{- if index .Values "local-volume-provisioner" "enabled" }}
{{ template "kubezero-app.app" dict "root" . "name" "local-volume-provisioner" "type" "kustomize" }}
{{ template "kubezero-app.app" dict "root" . "name" "local-volume-provisioner" "type" "helm" }}
syncPolicy:
automated:
prune: true

View File

@ -27,3 +27,9 @@ kiam:
aws-ebs-csi-driver:
enabled: true
aws-efs-csi-driver:
enabled: true
istio:
enabled: true

View File

@ -32,3 +32,9 @@ kiam:
aws-ebs-csi-driver:
enabled: false
aws-efs-csi-driver:
enabled: false
istio:
enabled: false

View File

@ -10,7 +10,7 @@ function wait_for() {
$@ && break
[ $TRIES -eq 100 ] && return 1
let TRIES=$TRIES+1
sleep 3
sleep 3
done
}
@ -58,14 +58,18 @@ EOF
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml --set istio.enabled=false --set prometheus.enabled=false > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
echo "Install Istio / kube-prometheus manually for now, before proceeding! <Any key to continue>"
# Install Istio if enabled, but keep ArgoCD istio support disabled for now in case
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml --set argo-cd.istio.enabled=false > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
echo "Install kube-prometheus and logging manually for now, before proceeding! <Any key to continue>"
read
# Todo: Now we need to wait till all is synced and healthy ... argocd cli or kubectl ?
# Wait for aws-ebs or kiam to be all ready, or all pods running ?
# Todo:
# - integrate Istio
# - integrate Prometheus-Grafana
# - integrate ES based logging
# Finally we could enable the actual config and deploy all
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml > generated-values.yaml

View File

@ -10,17 +10,13 @@ kubezero:
migration: {{ default false .Values.calico.migration }}
prometheus: false
# prometheus: {{ .Values.prometheus.enabled }}
{{- if .Values.calico.network }}
network: {{ .Values.calico.network }}
{{- end }}
network: {{ default "vxlan" .Values.calico.network }}
mtu: {{ default "8941" .Values.calico.mtu }}
cert-manager:
enabled: {{ index .Values "cert-manager" "enabled" }}
{{- if not .Values.bootstrap }}
values:
{{- if .Values.bootstrap }}
localCA:
enabled: false
{{- else }}
{{- if .Values.aws }}
{{- if eq .Values.platform "aws" }}
cert-manager:
podAnnotations:
iam.amazonaws.com/role: "{{ index .Values "cert-manager" "IamArn" }}"
@ -36,12 +32,17 @@ kubezero:
{{- . | toYaml | nindent 14 }}
{{- end }}
dns01:
{{- if .Values.aws }}
{{- if eq .Values.platform "aws" }}
route53:
region: {{ .Values.region }}
{{- else }}
{{- with index .Values "cert-manager" "solvers" }}
{{- . | toYaml | nindent 14 }}
{{- end }}
{{- end }}
{{- if .Values.aws }}
# AWS only components
{{- if eq .Values.platform "aws" }}
aws-ebs-csi-driver:
enabled: {{ index .Values "aws-ebs-csi-driver" "enabled" }}
values:
@ -63,6 +64,21 @@ kubezero:
servicemonitor:
enabled: {{ .Values.prometheus.enabled }}
{{- end }}
istio:
enabled: {{ .Values.istio.enabled }}
values:
istiod:
replicaCount: {{ ternary 2 1 .Values.HighAvailableControlplane }}
ingress:
private: {{ .Values.istio.ingress.private }}
type: {{ .Values.istio.ingress.type }}
replicaCount: {{ default 2 .Values.istio.ingress.replicaCount }}
dnsNames:
{{- with .Values.istio.ingress.dnsNames }}
{{- . | toYaml | nindent 8 }}
{{- end }}
{{- end }}
argo-cd:
@ -74,8 +90,9 @@ argo-cd:
configs:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if not .Values.bootstrap }}
{{- if and ( not .Values.bootstrap ) .Values.istio.enabled }}
istio:
enabled: {{ .Values.istio.enabled }}
gateway: private-ingressgateway.istio-system.svc.cluster.local
{{- with index .Values "argo-cd" "istio" }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -28,3 +28,4 @@ prometheus:
argo-cd:
server: {}
gateway: private-ingressgateway.istio-system.svc.cluster.local