master #3

Merged
stefan merged 31 commits from master into stable 2020-06-29 15:53:02 +00:00
67 changed files with 4759 additions and 228 deletions

View File

@ -1,2 +1,4 @@
# Ignore sub-charts
charts/*/charts/*
charts/kubezero-lib
deploy

87
Quickstart.md Normal file
View File

@ -0,0 +1,87 @@
# Quickstart
---
# CloudBender
## Prepare Config
- check config/kube/kube-control-plane.yaml
- check config/kube/kube-workers.yaml
## Deploy Control Plane
- cloudbender sync kube-control-plane
## Get kubectl config
- get admin.conf from S3 and store in your local `~/.kube` folder
## Verify controller nodes
- Verify all controller nodes have the expected version and are *Ready*, eg via: `kubectl get nodes`
## Deploy Worker group
- cloudbender sync kube-workers
## Verify all nodes
- Verify all nodes incl. workers have the expected version and are *Ready*, eg via: `kubectl get nodes`
---
# KubeZero
## Prepare Config
- check values.yaml
Easiest way to get the ARNs for various IAM roles is to use the CloudBender output command:
`cloudbender outputs config/kube-control-plane.yaml`
## Deploy KubeZero Helm chart
`./deploy.sh`
## Verify ArgoCD
At this stage we there is no support for any kind of Ingress yet. To reach the Argo API port forward from localhost via:
`kubectl port-forward svc/kubezero-argocd-server -n argocd 8080:443`
Next download the argo-cd cli, details for different OS see https://argoproj.github.io/argo-cd/cli_installation/
Finally login into argo-cd via `argocd login localhost:8080` using the *admin* user and the password set in values.yaml earlier.
List all Argo applications via: `argocd app list`.
Currently it is very likely that you need to manually trigger sync runs for `cert-manager`as well as `kiam`.
eg. `argocd app cert-manager sync`
# Only proceed any further if all Argo Applications show healthy !!
## WIP not yet integrated into KubeZero
### EFS CSI
To deploy the EFS CSI driver the backing EFS filesystem needs to be in place ahead of time. This is easy to do by enabling the EFS functionality in the worker CloudBender stack.
- retrieve the EFS: `cloudbender outputs config/kube-control-worker.yaml` and look for *EfsFileSystemId*
- update values.yaml in the `aws-efs-csi` artifact folder as well as the efs_pv.yaml
- execute `deploy.sh`
### Istio
Istio is currently pinned to version 1.4.X as this is the last version supporting installation via helm charts.
Until Istio is integrated into KubeZero as well as upgraded to 1.6 we have to install manually.
- adjust values.yaml
- update domain in `ingress-certificate.yaml`
- update.sh
- deploy.sh
### Logging
To deploy fluentbit only required adjustment is the `fluentd_host=<LOG_HOST>` in the kustomization.yaml.
- deploy namespace for logging via deploy.sh
- deploy fluentbit via `kubectl apply -k fluentbit`
### Prometheus / Grafana
Only adjustment required is the ingress routing config in istio-service.yaml. Adjust as needed before executing:
`deploy.sh`
# Demo / own apps
- Add your own application to ArgoCD via the cli

View File

@ -14,4 +14,3 @@ This also implements the *umbrella chart* pattern in order to inject custom valu
## Components
### ArgoCD

View File

@ -9,4 +9,4 @@ See eg: `https://github.com/kubernetes-sigs/kustomize/issues/1351`
## Upgrade
See: https://docs.projectcalico.org/maintenance/kubernetes-upgrade
`curl https://docs.projectcalico.org/manifests/canal.yaml -O`
`curl https://docs.projectcalico.org/manifests/canal.yaml -O && patch < remove-namespace.patch`

View File

@ -150,6 +150,8 @@ spec:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
shortNames:
- gnp
---
apiVersion: apiextensions.k8s.io/v1beta1
@ -238,6 +240,19 @@ spec:
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: kubecontrollersconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: KubeControllersConfiguration
plural: kubecontrollersconfigurations
singular: kubecontrollersconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
@ -267,6 +282,89 @@ spec:
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are queried to check for existence.
- apiGroups: [""]
resources:
- pods
verbs:
- get
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# kube-controllers manages hostendpoints.
- apiGroups: ["crd.projectcalico.org"]
resources:
- hostendpoints
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
# KubeControllersConfiguration is where it gets its config
- apiGroups: ["crd.projectcalico.org"]
resources:
- kubecontrollersconfigurations
verbs:
# read its own config
- get
# create a default if none exists
- create
# update status
- update
# watch for changes
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
@ -479,7 +577,7 @@ spec:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.13.3
image: calico/cni:v3.14.1
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
@ -515,7 +613,7 @@ spec:
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.13.3
image: calico/pod2daemon-flexvol:v3.14.1
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
@ -526,7 +624,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.13.3
image: calico/node:v3.14.1
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
@ -683,10 +781,64 @@ metadata:
name: canal
---
# Source: calico/templates/calico-etcd-secrets.yaml
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
labels:
k8s-app: calico-kube-controllers
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:v3.14.1
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
---
# Source: calico/templates/calico-kube-controllers.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
---
# Source: calico/templates/calico-etcd-secrets.yaml
---
# Source: calico/templates/calico-typha.yaml

View File

@ -0,0 +1,50 @@
--- canal.yaml.orig 2020-06-03 15:39:41.972295775 +0100
+++ canal.yaml 2020-06-03 15:39:59.718477177 +0100
@@ -5,7 +5,6 @@
apiVersion: v1
metadata:
name: canal-config
- namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
@@ -536,7 +535,6 @@
apiVersion: apps/v1
metadata:
name: canal
- namespace: kube-system
labels:
k8s-app: canal
spec:
@@ -781,7 +779,6 @@
kind: ServiceAccount
metadata:
name: canal
- namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
@@ -790,7 +787,6 @@
kind: Deployment
metadata:
name: calico-kube-controllers
- namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
@@ -804,7 +800,6 @@
template:
metadata:
name: calico-kube-controllers
- namespace: kube-system
labels:
k8s-app: calico-kube-controllers
annotations:
@@ -841,7 +836,6 @@
kind: ServiceAccount
metadata:
name: calico-kube-controllers
- namespace: kube-system
---
# Source: calico/templates/calico-etcd-secrets.yaml

View File

@ -1,14 +0,0 @@
apiVersion: v2
name: kubezero-app
description: KubeZero ArgoCD Application - Root chart of the KubeZero
type: application
version: 0.2.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/logo_small.png
keywords:
- kubezero
- argocd
- gitops
maintainers:
- name: Quarky9
kubeVersion: ">= 1.16.0"

View File

@ -1,21 +0,0 @@
kubezeroApp
===========
KubeZero ArgoCD Application - Root chart of the KubeZero
Current chart version is `0.1.4`
Source code can be found [here](https://kubezero.com)
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| calico.enabled | bool | `false` | |
| certManager.enabled | bool | `false` | |
| defaultDestination.server | string | `"https://kubernetes.default.svc"` | |
| defaultSource.pathPrefix | string | `""` | optional path prefix within repoURL to support eg. remote subtrees |
| defaultSource.repoURL | string | `"https://github.com/zero-down-time/kubezero"` | default repository for argocd applications |
| defaultSource.targetRevision | string | `"HEAD"` | default tracking of repoURL |
| localVolumeProvisioner.enabled | bool | `false` | |

View File

@ -1,3 +0,0 @@
{{- if .Values.calico.enabled }}
{{ template "kubezero.app" dict "root" . "name" "calico" "type" "kustomize" "retain" true }}
{{- end }}

View File

@ -1,3 +0,0 @@
{{- if index .Values "kiam" "enabled" }}
{{ template "kubezero.app" dict "root" . "name" "kiam" "type" "helm" }}
{{- end }}

View File

@ -1,3 +0,0 @@
{{- if index .Values "local-volume-provisioner" "enabled" }}
{{ template "kubezero.app" dict "root" . "name" "local-volume-provisioner" "type" "kustomize" }}
{{- end }}

View File

@ -1,26 +0,0 @@
global:
defaultDestination:
server: https://kubernetes.default.svc
# This repoURL is used a base for all the repoURLs applications
# Setting this to a eg. private git repo incl. the use of pathPrefix allows kubezero to be
# integrated into any repository as a git subtree if for example public internet access is unavailable
defaultSource:
# defaultSource.repoURL -- default repository for argocd applications
repoURL: https://github.com/zero-down-time/kubezero
# defaultSource.targetRevision -- default tracking of repoURL
targetRevision: HEAD
# defaultSource.pathPrefix -- optional path prefix within repoURL to support eg. remote subtrees
pathPrefix: ''
calico:
enabled: false
local-volume-provisioner:
enabled: false
cert-manager:
enabled: false
kiam:
enabled: false

View File

@ -0,0 +1,21 @@
apiVersion: v2
description: KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
name: kubezero-argo-cd
version: 0.3.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- argocd
- gitops
maintainers:
- name: Quarky9
dependencies:
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
repository: https://zero-down-time.github.io/kubezero/
- name: argo-cd
version: 2.3.2
repository: https://argoproj.github.io/argo-helm
kubeVersion: ">= 1.16.0"

View File

@ -0,0 +1,42 @@
kubezero-argo-cd
================
KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
Current chart version is `0.3.0`
Source code can be found [here](https://kubezero.com)
## Chart Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 2.3.2 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| argo-cd.controller.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.dex.enabled | bool | `false` | |
| argo-cd.installCRDs | bool | `false` | |
| argo-cd.istio.enabled | bool | `false` | Deploy Istio VirtualService to expose ArgoCD |
| argo-cd.istio.gateway | string | `"ingressgateway.istio-system.svc.cluster.local"` | Name of the Istio gateway to add the VirtualService to |
| argo-cd.redis.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.redis.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.redis.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.repoServer.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.repoServer.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.repoServer.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.server.config.url | string | `"argocd.example.com"` | ArgoCD hostname to be exposed via Istio |
| argo-cd.server.extraArgs[0] | string | `"--insecure"` | |
| argo-cd.server.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
| argo-cd.server.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.server.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kubezero.global.defaultDestination | object | `{"server":"https://kubernetes.default.svc"}` | Destination cluster |
| kubezero.global.defaultSource.pathPrefix | string | `""` | optional path prefix within repoURL to support eg. remote subtrees |
| kubezero.global.defaultSource.repoURL | string | `"https://github.com/zero-down-time/kubezero"` | default repository for argocd applications |
| kubezero.global.defaultSource.targetRevision | string | `"HEAD"` | default tracking of repoURL |

View File

@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubezero
namespace: argocd
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
project: kubezero
source:
repoURL: {{ .Values.kubezero.global.defaultSource.repoURL }}
targetRevision: {{ .Values.kubezero.global.defaultSource.targetRevision }}
path: {{ .Values.kubezero.global.defaultSource.pathPrefix}}charts/kubezero
helm:
values: |
{{- toYaml .Values.kubezero | nindent 8 }}
destination:
server: {{ .Values.kubezero.global.defaultDestination.server }}
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: false

View File

@ -1,18 +1,13 @@
{{- if .Values.istio.enabled }}
{{- if index .Values "argo-cd" "istio" "enabled" }}
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: argocd-server
labels:
app.kubernetes.io/name: {{ .Chart.Name }}-argocd-virtualservice
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: argocd
app.kubernetes.io/component: server
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
gateways:
- {{ .Values.istio.gateway }}
- {{ index .Values "argo-cd" "istio" "gateway" }}
hosts:
- {{ index .Values "argo-cd" "server" "config" "url" }}
http:

View File

@ -4,10 +4,7 @@ metadata:
name: kubezero
namespace: argocd
labels:
app.kubernetes.io/name: {{ .Chart.Name }}-project
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
description: KubeZero - ZeroDownTime Kubernetes Platform

View File

@ -0,0 +1,73 @@
kubezero:
global:
# kubezero.global.defaultDestination -- Destination cluster
defaultDestination:
server: https://kubernetes.default.svc
# This repoURL is used a base for all the repoURLs applications
# Setting this to a eg. private git repo incl. the use of pathPrefix allows kubezero to be
# integrated into any repository as a git subtree if for example public internet access is unavailable
defaultSource:
# kubezero.global.defaultSource.repoURL -- default repository for argocd applications
repoURL: https://github.com/zero-down-time/kubezero
# kubezero.global.defaultSource.targetRevision -- default tracking of repoURL
targetRevision: HEAD
# kubezero.global.defaultSource.pathPrefix -- optional path prefix within repoURL to support eg. remote subtrees
pathPrefix: ''
argo-cd:
installCRDs: false
#configs:
# secret:
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
# argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG"
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
# Run Argo on the controllers
controller:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
repoServer:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
server:
config:
# argo-cd.server.config.url -- ArgoCD hostname to be exposed via Istio
url: argocd.example.com
# Rename former https port to grpc, works with istio + insecure
service:
servicePortHttpsName: grpc
extraArgs:
- --insecure
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
redis:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
dex:
enabled: false
istio:
# argo-cd.istio.enabled -- Deploy Istio VirtualService to expose ArgoCD
enabled: false
# argo-cd.istio.gateway -- Name of the Istio gateway to add the VirtualService to
gateway: ingressgateway.istio-system.svc.cluster.local

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,22 @@
apiVersion: v2
name: kubezero-aws-ebs-csi-driver
description: KubeZero Umbrella Chart for aws-ebs-csi-driver
type: application
version: 0.1.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
sources:
- https://github.com/kubernetes-sigs/aws-ebs-csi-driver
- https://github.com/Zero-Down-Time/kubezero
keywords:
- kubezero
- aws
- ebs
- csi
maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
repository: https://zero-down-time.github.io/kubezero/
kubeVersion: ">= 1.16.0"

View File

@ -0,0 +1,38 @@
kubezero-aws-ebs-csi-driver
===========================
KubeZero Umbrella Chart for aws-ebs-csi-driver
Current chart version is `0.1.0`
Source code can be found [here](https://kubezero.com)
## Chart Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
## IAM Role
If you use kiam or kube2iam and restrict access on nodes running this controller please adjust:
```
podAnnotations:
iam.amazonaws.com/role: <ROLE>
```
## Storage Classes
Provides the *ebs-sc-gp2-xfs* storage class for gp2, enrypted and XFS.
This class is also set as default.
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| aws-ebs-csi-driver.enableVolumeResizing | bool | `false` | |
| aws-ebs-csi-driver.enableVolumeScheduling | bool | `true` | |
| aws-ebs-csi-driver.enableVolumeSnapshot | bool | `false` | |
| aws-ebs-csi-driver.extraVolumeTags | object | `{}` | Optional tags to be added to each EBS volume |
| aws-ebs-csi-driver.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| aws-ebs-csi-driver.podAnnotations | object | `{}` | iam.amazonaws.com/role: <IAM role ARN> to assume |
| aws-ebs-csi-driver.replicaCount | int | `1` | |
| aws-ebs-csi-driver.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-ebs-csi-driver.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |

View File

@ -0,0 +1,21 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionLine" . }}
{{ template "chart.sourceLinkLine" . }}
{{ template "chart.requirementsSection" . }}
## IAM Role
If you use kiam or kube2iam and restrict access on nodes running this controller please adjust:
```
podAnnotations:
iam.amazonaws.com/role: <ROLE>
```
## Storage Classes
Provides the *ebs-sc-gp2-xfs* storage class for gp2, enrypted and XFS.
This class is also set as default.
{{ template "chart.valuesSection" . }}

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,16 @@
apiVersion: v1
appVersion: "0.5.0"
name: aws-ebs-csi-driver
description: A Helm chart for AWS EBS CSI Driver
version: 0.3.0
kubeVersion: ">=1.13.0-0"
home: https://github.com/kubernetes-sigs/aws-ebs-csi-driver
sources:
- https://github.com/kubernetes-sigs/aws-ebs-csi-driver
keywords:
- aws
- ebs
- csi
maintainers:
- name: leakingtapan
email: chengpan@amazon.com

View File

@ -0,0 +1,3 @@
To verify that aws-ebs-csi-driver has started, run:
kubectl get pod -n kube-system -l "app.kubernetes.io/name={{ include "aws-ebs-csi-driver.name" . }},app.kubernetes.io/instance={{ .Release.Name }}"

View File

@ -0,0 +1,58 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "aws-ebs-csi-driver.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "aws-ebs-csi-driver.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "aws-ebs-csi-driver.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "aws-ebs-csi-driver.labels" -}}
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
helm.sh/chart: {{ include "aws-ebs-csi-driver.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Convert the `--extra-volume-tags` command line arg from a map.
*/}}
{{- define "aws-ebs-csi-driver.extra-volume-tags" -}}
{{- $result := dict "pairs" (list) -}}
{{- range $key, $value := .Values.extraVolumeTags -}}
{{- $noop := printf "%s=%s" $key $value | append $result.pairs | set $result "pairs" -}}
{{- end -}}
{{- if gt (len $result.pairs) 0 -}}
- --extra-volume-tags={{- join "," $result.pairs -}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,7 @@
apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
name: ebs.csi.aws.com
spec:
attachRequired: true
podInfoOnMount: false

View File

@ -0,0 +1,108 @@
# Node Service
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ebs-csi-node
namespace: kube-system
spec:
selector:
matchLabels:
app: ebs-csi-node
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app: ebs-csi-node
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.node.podAnnotations }}
annotations: {{ toYaml .Values.node.podAnnotations | nindent 8 }}
{{- end }}
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
{{- with .Values.node.tolerations }}
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: ebs-plugin
securityContext:
privileged: true
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
args:
- node
- --endpoint=$(CSI_ENDPOINT)
- --logtostderr
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:/csi/csi.sock
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /csi
- name: device-dir
mountPath: /dev
ports:
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 5
- name: node-driver-registrar
image: {{ printf "%s:%s" .Values.sidecars.nodeDriverRegistrarImage.repository .Values.sidecars.nodeDriverRegistrarImage.tag }}
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=5
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock"]
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: liveness-probe
image: {{ printf "%s:%s" .Values.sidecars.livenessProbeImage.repository .Values.sidecars.livenessProbeImage.tag }}
args:
- --csi-address=/csi/csi.sock
volumeMounts:
- name: plugin-dir
mountPath: /csi
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/ebs.csi.aws.com/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: device-dir
hostPath:
path: /dev
type: Directory

View File

@ -0,0 +1,151 @@
# Controller Service
kind: Deployment
apiVersion: apps/v1
metadata:
name: ebs-csi-controller
namespace: kube-system
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: ebs-csi-controller
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app: ebs-csi-controller
app.kubernetes.io/name: {{ include "aws-ebs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podAnnotations }}
annotations: {{ toYaml .Values.podAnnotations | nindent 8 }}
{{- end }}
spec:
nodeSelector:
beta.kubernetes.io/os: linux
{{- with .Values.nodeSelector }}
{{ toYaml . | indent 8 }}
{{- end }}
serviceAccountName: ebs-csi-controller-sa
priorityClassName: system-cluster-critical
{{- with .Values.affinity }}
affinity: {{ toYaml . | nindent 8 }}
{{- end }}
tolerations:
- operator: Exists
{{- with .Values.tolerations }}
{{ toYaml . | indent 8 }}
{{- end }}
containers:
- name: ebs-plugin
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- controller
- --endpoint=$(CSI_ENDPOINT)
{{ include "aws-ebs-csi-driver.extra-volume-tags" . }}
- --logtostderr
- --v=5
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: aws-secret
key: key_id
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: aws-secret
key: access_key
optional: true
{{- if .Values.region }}
- name: AWS_REGION
value: {{ .Values.region }}
{{- end }}
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
ports:
- name: healthz
containerPort: 9808
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 10
failureThreshold: 5
{{- with .Values.resources }}
resources: {{ toYaml . | nindent 12 }}
{{- end }}
- name: csi-provisioner
image: {{ printf "%s:%s" .Values.sidecars.provisionerImage.repository .Values.sidecars.provisionerImage.tag }}
args:
- --csi-address=$(ADDRESS)
- --v=5
{{- if .Values.enableVolumeScheduling }}
- --feature-gates=Topology=true
{{- end}}
- --enable-leader-election
- --leader-election-type=leases
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: {{ printf "%s:%s" .Values.sidecars.attacherImage.repository .Values.sidecars.attacherImage.tag }}
args:
- --csi-address=$(ADDRESS)
- --v=5
- --leader-election=true
- --leader-election-type=leases
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{{- if .Values.enableVolumeSnapshot }}
- name: csi-snapshotter
image: {{ printf "%s:%s" .Values.sidecars.snapshotterImage.repository .Values.sidecars.snapshotterImage.tag }}
args:
- --csi-address=$(ADDRESS)
- --leader-election=true
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{{- end }}
{{- if .Values.enableVolumeResizing }}
- name: csi-resizer
image: {{ printf "%s:%s" .Values.sidecars.resizerImage.repository .Values.sidecars.resizerImage.tag }}
imagePullPolicy: Always
args:
- --csi-address=$(ADDRESS)
- --v=5
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
{{- end }}
- name: liveness-probe
image: {{ printf "%s:%s" .Values.sidecars.livenessProbeImage.repository .Values.sidecars.livenessProbeImage.tag }}
args:
- --csi-address=/csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
volumes:
- name: socket-dir
emptyDir: {}

View File

@ -0,0 +1,251 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-attacher-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-attacher-role
apiGroup: rbac.authorization.k8s.io
{{- if .Values.enableVolumeSnapshot }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-snapshotter-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-snapshotter-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-snapshot-controller-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-snapshot-controller-binding
subjects:
- kind: ServiceAccount
name: ebs-snapshot-controller
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-snapshot-controller-role
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-snapshot-controller-leaderelection
namespace: kube-system
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: snapshot-controller-leaderelection
namespace: kube-system
subjects:
- kind: ServiceAccount
name: ebs-snapshot-controller
namespace: kube-system
roleRef:
kind: Role
name: snapshot-controller-leaderelection
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- if .Values.enableVolumeResizing }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-external-resizer-role
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: ebs-csi-resizer-binding
subjects:
- kind: ServiceAccount
name: ebs-csi-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: ebs-external-resizer-role
apiGroup: rbac.authorization.k8s.io
{{- end}}

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: ebs-csi-controller-sa
namespace: kube-system
{{- with .Values.serviceAccount.controller.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ebs-snapshot-controller
namespace: kube-system
{{- with .Values.serviceAccount.snapshot.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,26 @@
{{- if .Values.enableVolumeSnapshot }}
#Snapshot controller
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: ebs-snapshot-controller
namespace: kube-system
spec:
serviceName: ebs-snapshot-controller
replicas: 1
selector:
matchLabels:
app: ebs-snapshot-controller
template:
metadata:
labels:
app: ebs-snapshot-controller
spec:
serviceAccount: ebs-snapshot-controller
containers:
- name: snapshot-controller
image: quay.io/k8scsi/snapshot-controller:v2.0.1
args:
- --v=5
- --leader-election=false
{{- end }}

View File

@ -0,0 +1,86 @@
# Default values for aws-ebs-csi-driver.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 2
image:
repository: amazon/aws-ebs-csi-driver
tag: "v0.5.0"
pullPolicy: IfNotPresent
sidecars:
provisionerImage:
repository: quay.io/k8scsi/csi-provisioner
tag: "v1.5.0"
attacherImage:
repository: quay.io/k8scsi/csi-attacher
tag: "v1.2.0"
snapshotterImage:
repository: quay.io/k8scsi/csi-snapshotter
tag: "v2.0.1"
livenessProbeImage:
repository: quay.io/k8scsi/livenessprobe
tag: "v1.1.0"
resizerImage:
repository: quay.io/k8scsi/csi-resizer
tag: "v0.3.0"
nodeDriverRegistrarImage:
repository: quay.io/k8scsi/csi-node-driver-registrar
tag: "v1.1.0"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
podAnnotations: {}
# True if enable volume scheduling for dynamic volume provisioning
enableVolumeScheduling: false
# True if enable volume resizing
enableVolumeResizing: false
# True if enable volume snapshot
enableVolumeSnapshot: false
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
# Extra volume tags to attach to each dynamically provisioned volume.
# ---
# extraVolumeTags:
# key1: value1
# key2: value2
extraVolumeTags: {}
# AWS region to use. If not specified then the region will be looked up via the AWS EC2 metadata
# service.
# ---
# region: us-east-1
region: ""
node:
podAnnotations: {}
tolerations: []
serviceAccount:
controller:
annotations: {}
snapshot:
annotations: {}

View File

@ -0,0 +1,10 @@
{{- if index .Values "aws-ebs-csi-driver" "enableVolumeSnapshot" }}
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshotClass
metadata:
name: csi-aws-vsc
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
driver: ebs.csi.aws.com
deletionPolicy: Delete
{{- end }}

View File

@ -0,0 +1,41 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: ebs-sc-gp2-xfs
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: ebs.csi.aws.com
volumeBindingMode: WaitForFirstConsumer
parameters:
csi.storage.k8s.io/fstype: xfs
type: gp2
encrypted: "true"
{{- if index .Values "aws-ebs-csi-driver" "enableVolumeResizing" }}
allowVolumeExpansion: true
{{- end }}
{{- range .Values.storageClassZones }}
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: ebs-sc-gp2-xfs-{{ . }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
provisioner: ebs.csi.aws.com
volumeBindingMode: WaitForFirstConsumer
parameters:
csi.storage.k8s.io/fstype: xfs
type: gp2
encrypted: "true"
{{- if index .Values "aws-ebs-csi-driver" "enableVolumeResizing" }}
allowVolumeExpansion: true
{{- end }}
allowedTopologies:
- matchLabelExpressions:
- key: failure-domain.beta.kubernetes.io/zone
values:
- {{ . }}
{{- end }}

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -ex
REPO="kubernetes-sigs/aws-ebs-csi-driver"
LATEST_RELEASE=$(curl -sL -s https://api.github.com/repos/${REPO}/releases | grep '"tag_name":' | cut -d'"' -f4 | grep -v -E "(alpha|beta|rc)" | sort -t"." -k 1,1 -k 2,2 -k 3,3 -k 4,4 | tail -n 1)
URL="https://github.com/${REPO}/releases/download/${LATEST_RELEASE}/helm-chart.tgz"
rm -rf charts/aws-ebs-csi-driver
curl -sL "$URL" | tar xz -C charts

View File

@ -0,0 +1,21 @@
aws-ebs-csi-driver:
replicaCount: 1
enableVolumeScheduling: true
enableVolumeResizing: false
enableVolumeSnapshot: false
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# aws-ebs-csi-driver.podAnnotations -- iam.amazonaws.com/role: <IAM role ARN> to assume
podAnnotations: {}
# iam.amazonaws.com/role: ''
# aws-ebs-csi-driver.extraVolumeTags -- Optional tags to be added to each EBS volume
extraVolumeTags: {}
# Name: KubeZero-Cluster

View File

@ -2,16 +2,19 @@ apiVersion: v2
name: kubezero-cert-manager
description: KubeZero Umbrella Chart for cert-manager
type: application
version: 0.3.2
version: 0.3.4
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/logo_small.png
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- cert-manager
maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
repository: https://zero-down-time.github.io/kubezero/
- name: cert-manager
version: 0.15.0
version: 0.15.1
repository: https://charts.jetstack.io
kubeVersion: ">= 1.16.0"

View File

@ -0,0 +1,47 @@
kubezero-cert-manager
=====================
KubeZero Umbrella Chart for cert-manager
Current chart version is `0.3.4`
Source code can be found [here](https://kubezero.com)
## Chart Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://charts.jetstack.io | cert-manager | 0.15.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
## AWS - IAM Role
If you use kiam or kube2iam and restrict access on nodes running cert-manager please adjust:
```
cert-manager.podAnnotations:
iam.amazonaws.com/role: <ROLE>
```
## Resolver Secrets
If your resolvers need additional sercrets like CloudFlare API tokens etc. make sure to provide these secrets separatly matching your defined issuers.
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cert-manager.extraArgs[0] | string | `"--dns01-recursive-nameservers-only"` | |
| cert-manager.ingressShim.defaultIssuerKind | string | `"ClusterIssuer"` | |
| cert-manager.ingressShim.defaultIssuerName | string | `"letsencrypt-dns-prod"` | |
| cert-manager.installCRDs | bool | `true` | |
| cert-manager.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| cert-manager.podAnnotations | object | `{}` | "iam.amazonaws.com/roleIAM:" role ARN the cert-manager might use via kiam eg."arn:aws:iam::123456789012:role/certManagerRoleArn" |
| cert-manager.prometheus.servicemonitor.enabled | bool | `false` | |
| cert-manager.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cert-manager.webhook.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| cert-manager.webhook.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| clusterIssuer | object | `{}` | |
| localCA.enabled | bool | `true` | |
| localCA.selfsigning | bool | `true` | |

View File

@ -0,0 +1,20 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionLine" . }}
{{ template "chart.sourceLinkLine" . }}
{{ template "chart.requirementsSection" . }}
## AWS - IAM Role
If you use kiam or kube2iam and restrict access on nodes running cert-manager please adjust:
```
cert-manager.podAnnotations:
iam.amazonaws.com/role: <ROLE>
```
## Resolver Secrets
If your resolvers need additional sercrets like CloudFlare API tokens etc. make sure to provide these secrets separatly matching your defined issuers.
{{ template "chart.valuesSection" . }}

View File

@ -7,6 +7,8 @@ kind: Issuer
metadata:
name: kubezero-selfsigning-issuer
namespace: kube-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
selfSigned: {}
---
@ -15,6 +17,8 @@ kind: Certificate
metadata:
name: kubezero-local-ca
namespace: kube-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
secretName: kubezero-ca-tls
commonName: "kubezero-local-ca"
@ -31,6 +35,8 @@ kind: Secret
metadata:
name: kubezero-ca-tls
namespace: kube-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
data:
tls.crt: {{ .Values.localCA.ca.crt | b64enc }}
tls.key: {{ .Values.localCA.ca.key | b64enc }}
@ -42,6 +48,8 @@ kind: Issuer
metadata:
name: kubezero-local-ca-issuer
namespace: kube-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
ca:
secretName: kubezero-ca-tls

View File

@ -3,6 +3,8 @@ apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: {{ .Values.clusterIssuer.name }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
acme:
server: {{ .Values.clusterIssuer.server }}

View File

@ -45,5 +45,6 @@ cert-manager:
prometheus:
servicemonitor:
enabled: false
#podAnnotations:
# iam.amazonaws.com/role: "INSERT_CLOUDFORMATION_OUTPUT_CertManagerRoleArn"
# cert-manager.podAnnotations -- "iam.amazonaws.com/roleIAM:" role ARN the cert-manager might use via kiam eg."arn:aws:iam::123456789012:role/certManagerRoleArn"
podAnnotations: {}
# iam.amazonaws.com/role: ""

View File

@ -2,15 +2,18 @@ apiVersion: v2
name: kubezero-kiam
description: KubeZero Umbrella Chart for Kiam
type: application
version: 0.1.0
version: 0.2.4
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/logo_small.png
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- kiam
maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.1"
repository: https://zero-down-time.github.io/kubezero/
- name: kiam
version: 5.7.0
repository: https://uswitch.github.io/kiam-helm-charts/charts/

View File

@ -0,0 +1,78 @@
kubezero-kiam
=============
KubeZero Umbrella Chart for Kiam
Current chart version is `0.2.4`
Source code can be found [here](https://kubezero.com)
## Chart Requirements
| Repository | Name | Version |
|------------|------|---------|
| https://uswitch.github.io/kiam-helm-charts/charts/ | kiam | 5.7.0 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
## KubeZero default configuration
We run agents on the controllers as well, so we force eg. ebs csi controllers and others to assume roles etc.
This means we need to run kiam containers on the controllers using `hostnetwork: true`.
Therefore we also change the default port from 443 to 6444 to not collide with the potential api-server port on the controllers.
Make sure any firewall rules between controllers and workers are adjusted accordingly.
## Kiam Certificates
The required certificates for Kiam server and agents are provided by a local cert-manager, which is configured to have a cluster local self-signing CA as part of the KubeZero platform.
[Kiam TLS Config](https://github.com/uswitch/kiam/blob/master/docs/TLS.md#cert-manager)
[KubeZero cert-manager](../kubezero-cert-manager/README.md)
## Metadata restrictions
Required for the *csi ebs plugin* and most likely various others assuming basic AWS information.
- `/latest/meta-data/instance-id`
- `/latest/dynamic/instance-identity/document`
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| kiam.agent.gatewayTimeoutCreation | string | `"5s"` | |
| kiam.agent.host.interface | string | `"cali+"` | |
| kiam.agent.host.iptables | bool | `false` | |
| kiam.agent.image.tag | string | `"v3.6-rc1"` | |
| kiam.agent.log.level | string | `"warn"` | |
| kiam.agent.prometheus.servicemonitor.enabled | bool | `false` | |
| kiam.agent.sslCertHostPath | string | `"/etc/ssl/certs"` | |
| kiam.agent.tlsCerts.caFileName | string | `"ca.crt"` | |
| kiam.agent.tlsCerts.certFileName | string | `"tls.crt"` | |
| kiam.agent.tlsCerts.keyFileName | string | `"tls.key"` | |
| kiam.agent.tlsSecret | string | `"kiam-agent-tls"` | |
| kiam.agent.tolerations[0].effect | string | `"NoSchedule"` | |
| kiam.agent.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kiam.agent.updateStrategy | string | `"RollingUpdate"` | |
| kiam.agent.whiteListRouteRegexp | string | `"^/latest/(meta-data/instance-id|dynamic)"` | |
| kiam.server.assumeRoleArn | string | `""` | kiam server IAM role to assume, required as we run the agents next to the servers normally, eg. arn:aws:iam::123456789012:role/kiam-server-role |
| kiam.server.deployment.enabled | bool | `true` | |
| kiam.server.deployment.replicas | int | `1` | |
| kiam.server.image.tag | string | `"v3.6-rc1"` | |
| kiam.server.log.level | string | `"warn"` | |
| kiam.server.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| kiam.server.prometheus.servicemonitor.enabled | bool | `false` | |
| kiam.server.service.port | int | `6444` | |
| kiam.server.service.targetPort | int | `6444` | |
| kiam.server.sslCertHostPath | string | `"/etc/ssl/certs"` | |
| kiam.server.tlsCerts.caFileName | string | `"ca.crt"` | |
| kiam.server.tlsCerts.certFileName | string | `"tls.crt"` | |
| kiam.server.tlsCerts.keyFileName | string | `"tls.key"` | |
| kiam.server.tlsSecret | string | `"kiam-server-tls"` | |
| kiam.server.tolerations[0].effect | string | `"NoSchedule"` | |
| kiam.server.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kiam.server.updateStrategy | string | `"RollingUpdate"` | |
| kiam.server.useHostNetwork | bool | `true` | |
## Debugging
- Verify iptables rules on hosts to be set by the kiam agent:
`iptables -L -t nat -n --line-numbers`
`iptables -t nat -D PREROUTING <wrong rule>`
## Resources
- https://github.com/uswitch/kiam
- https://www.bluematador.com/blog/iam-access-in-kubernetes-kube2iam-vs-kiam

View File

@ -0,0 +1,36 @@
{{ template "chart.header" . }}
{{ template "chart.description" . }}
{{ template "chart.versionLine" . }}
{{ template "chart.sourceLinkLine" . }}
{{ template "chart.requirementsSection" . }}
## KubeZero default configuration
We run agents on the controllers as well, so we force eg. ebs csi controllers and others to assume roles etc.
This means we need to run kiam containers on the controllers using `hostnetwork: true`.
Therefore we also change the default port from 443 to 6444 to not collide with the potential api-server port on the controllers.
Make sure any firewall rules between controllers and workers are adjusted accordingly.
## Kiam Certificates
The required certificates for Kiam server and agents are provided by a local cert-manager, which is configured to have a cluster local self-signing CA as part of the KubeZero platform.
[Kiam TLS Config](https://github.com/uswitch/kiam/blob/master/docs/TLS.md#cert-manager)
[KubeZero cert-manager](../kubezero-cert-manager/README.md)
## Metadata restrictions
Required for the *csi ebs plugin* and most likely various others assuming basic AWS information.
- `/latest/meta-data/instance-id`
- `/latest/dynamic/instance-identity/document`
{{ template "chart.valuesSection" . }}
## Debugging
- Verify iptables rules on hosts to be set by the kiam agent:
`iptables -L -t nat -n --line-numbers`
`iptables -t nat -D PREROUTING <wrong rule>`
## Resources
- https://github.com/uswitch/kiam
- https://www.bluematador.com/blog/iam-access-in-kubernetes-kube2iam-vs-kiam

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,32 @@
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: kiam-agent
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
secretName: kiam-agent-tls
issuerRef:
name: kubezero-local-ca-issuer
usages:
- "any"
dnsNames:
- "kiam-agent"
---
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: kiam-server
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
secretName: kiam-server-tls
issuerRef:
name: kubezero-local-ca-issuer
usages:
- "any"
dnsNames:
- "localhost"
- "kiam-server"
ipAddresses:
- "127.0.0.1"

View File

@ -0,0 +1,62 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
name: kiam-namespace-annotate
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
name: kiam-namespace-annotate
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- update
- patch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
name: kiam-namespace-annotate
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kiam-namespace-annotate
subjects:
- kind: ServiceAccount
name: kiam-namespace-annotate
namespace: kube-system
---
apiVersion: batch/v1
kind: Job
metadata:
name: kiam-kube-system-ns-annotation
namespace: kube-system
annotations:
argocd.argoproj.io/hook: Sync
argocd.argoproj.io/hook-delete-policy: HookSucceeded
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
template:
spec:
serviceAccountName: kiam-namespace-annotate
containers:
- name: kubectl
image: "bitnami/kubectl:latest"
imagePullPolicy: "IfNotPresent"
command:
- /bin/sh
- -c
- kubectl annotate --overwrite namespace kube-system 'iam.amazonaws.com/permitted=.*'
restartPolicy: Never

View File

@ -1,15 +1,23 @@
kiam:
server:
# assumeRoleArn: <INSERT_CLOUDFORMATION_OUTPUT_KiamServerRoleArn>
image:
tag: "v3.6-rc1"
# kiam.server.assumeRoleArn -- kiam server IAM role to assume, required as we run the agents next to the servers normally, eg. arn:aws:iam::123456789012:role/kiam-server-role
assumeRoleArn: ''
useHostNetwork: true
sslCertHostPath: /etc/ssl/certs
tlsSecret: kiam-server-tls
tlsCerts:
certFileName: tls.crt
keyFileName: tls.key
caFileName: ca.crt
service:
port: 6444
targetPort: 6444
deployment:
enabled: true
replicas: 2
replicas: 1
updateStrategy: RollingUpdate
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
@ -18,23 +26,33 @@ kiam:
prometheus:
servicemonitor:
enabled: false
# log:
# level: warn
log:
level: warn
agent:
image:
tag: "v3.6-rc1"
gatewayTimeoutCreation: "5s"
updateStrategy: RollingUpdate
# IP tables set on each node at boot, see CloudBender
host:
iptables: true
iptables: false
interface: "cali+"
whiteListRouteRegexp: '^/latest/(meta-data/instance-id|dynamic)'
sslCertHostPath: /etc/ssl/certs
tlsSecret: kiam-agent-tls
tlsCerts:
certFileName: tls.crt
keyFileName: tls.key
caFileName: ca.crt
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
prometheus:
servicemonitor:
enabled: false
# log:
# level: warn
log:
level: warn
# extraEnv:
# - name: GRPC_GO_LOG_SEVERITY_LEVEL
# value: "info"

View File

@ -0,0 +1,12 @@
apiVersion: v2
name: kubezero-lib
description: KubeZero helm library - common helm functions and blocks
type: library
version: 0.1.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
maintainers:
- name: Quarky9
kubeVersion: ">= 1.16.0"

View File

@ -0,0 +1,11 @@
kubezero-lib
============
KubeZero helm library - common helm functions and blocks
Current chart version is `0.1.0`
Source code can be found [here](https://kubezero.com)

View File

@ -0,0 +1,8 @@
{{- /*
Common set of labels
*/ -}}
{{- define "kubezero-lib.labels" -}}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: kubezero
{{- end -}}

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -1,9 +1,10 @@
apiVersion: v2
description: KubeZero Helm chart to install Zero Down Time Kuberenetes platform
name: kubezero
version: 0.2.5
description: KubeZero ArgoCD Application - Root App of Apps chart of KubeZero
type: application
version: 0.3.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/logo_small.png
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- argocd
@ -11,7 +12,7 @@ keywords:
maintainers:
- name: Quarky9
dependencies:
- name: argo-cd
version: 2.3.2
repository: https://argoproj.github.io/argo-helm
- name: kubezero-lib
version: ">= 0.1.1"
repository: https://zero-down-time.github.io/kubezero/
kubeVersion: ">= 1.16.0"

View File

@ -1,8 +1,8 @@
kubezero
========
KubeZero Helm chart to install Zero Down Time Kuberenetes platform
KubeZero ArgoCD Application - Root App of Apps chart of KubeZero
Current chart version is `0.2.0`
Current chart version is `0.3.0`
Source code can be found [here](https://kubezero.com)
@ -10,29 +10,18 @@ Source code can be found [here](https://kubezero.com)
| Repository | Name | Version |
|------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 2.2.13 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
## Chart Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| argo-cd.controller.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.dex.enabled | bool | `false` | |
| argo-cd.installCRDs | bool | `false` | |
| argo-cd.redis.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.redis.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.redis.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.repoServer.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.repoServer.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.repoServer.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| argo-cd.server.config.url | string | `"argocd.example.com"` | ArgoCD hostname to be exposed via Istio |
| argo-cd.server.extraArgs[0] | string | `"--insecure"` | |
| argo-cd.server.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
| argo-cd.server.tolerations[0].effect | string | `"NoSchedule"` | |
| argo-cd.server.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| istio.enabled | bool | `false` | Deploy Istio VirtualService to expose ArgoCD |
| istio.gateway | string | `"ingressgateway.istio-system.svc.cluster.local"` | Name of the Istio gateway to add the VirtualService to |
| kubezero | object | `{}` | Kubezero configuration, values.yaml please see kubezeroApp |
| aws-ebs-csi-driver.enabled | bool | `false` | |
| calico.enabled | bool | `false` | |
| cert-manager.enabled | bool | `false` | |
| global.defaultDestination.server | string | `"https://kubernetes.default.svc"` | |
| global.defaultSource.pathPrefix | string | `""` | |
| global.defaultSource.repoURL | string | `"https://github.com/zero-down-time/kubezero"` | |
| global.defaultSource.targetRevision | string | `"HEAD"` | |
| kiam.enabled | bool | `false` | |
| local-volume-provisioner.enabled | bool | `false` | |

View File

@ -1,14 +1,11 @@
{{- define "kubezero.app" }}
{{- define "kubezero-app.app" }}
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: {{ .name | lower }}
namespace: argocd
labels:
app.kubernetes.io/name: {{ .name }}
helm.sh/chart: {{ .root.Chart.Name }}-{{ .root.Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .root.Release.Service }}
app.kubernetes.io/part-of: kubezero
{{ include "kubezero-lib.labels" .root | indent 4 }}
{{- if not .retain }}
finalizers:
- resources-finalizer.argocd.argoproj.io
@ -38,5 +35,7 @@ spec:
syncPolicy:
automated:
prune: true
selfHeal: false
{{- if .selfheal }}
selfHeal: true
{{- end }}
{{- end }}

View File

@ -1,32 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubezero
namespace: argocd
labels:
app.kubernetes.io/name: kubezero
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
project: kubezero
source:
repoURL: {{ .Values.global.defaultSource.repoURL }}
targetRevision: {{ .Values.global.defaultSource.targetRevision }}
path: {{ .Values.global.defaultSource.pathPrefix}}charts/kubezero-app
helm:
values: |
global:
{{- toYaml .Values.global | nindent 10 }}
{{- if .Values.kubezero }}
{{- toYaml .Values.kubezero | nindent 8 }}
{{- end }}
destination:
server: {{ .Values.global.defaultDestination.server }}
namespace: argocd
syncPolicy:
automated:
prune: true
selfHeal: false

View File

@ -0,0 +1,3 @@
{{- if index .Values "aws-ebs-csi-driver" "enabled" }}
{{ template "kubezero-app.app" dict "root" . "name" "aws-ebs-csi-driver" "type" "helm" }}
{{- end }}

View File

@ -0,0 +1,3 @@
{{- if .Values.calico.enabled }}
{{ template "kubezero-app.app" dict "root" . "name" "calico" "type" "kustomize" "retain" true }}
{{- end }}

View File

@ -1,10 +1,12 @@
{{- if index .Values "cert-manager" "enabled" }}
{{ template "kubezero.app" dict "root" . "name" "cert-manager" "type" "helm" "namespace" "cert-manager" }}
{{ template "kubezero-app.app" dict "root" . "name" "cert-manager" "type" "helm" "namespace" "cert-manager" "selfheal" "true" }}
---
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
{{- if index .Values "kiam" "enabled" }}
annotations:
iam.amazonaws.com/permitted: ".*CertManagerRole.*"
{{- end }}
{{- end }}

View File

@ -0,0 +1,3 @@
{{- if index .Values "kiam" "enabled" }}
{{ template "kubezero-app.app" dict "root" . "name" "kiam" "type" "helm" }}
{{- end }}

View File

@ -0,0 +1,3 @@
{{- if index .Values "local-volume-provisioner" "enabled" }}
{{ template "kubezero-app.app" dict "root" . "name" "local-volume-provisioner" "type" "kustomize" }}
{{- end }}

View File

@ -24,3 +24,6 @@ cert-manager:
kiam:
enabled: true
aws-ebs-csi-driver:
enabled: true

View File

@ -13,61 +13,17 @@ global:
# defaultSource.pathPrefix -- optional path prefix within repoURL to support eg. remote subtrees
pathPrefix: ''
# kubezero -- Kubezero configuration, values.yaml please see kubezeroApp
kubezero: {}
calico:
enabled: true
argo-cd:
installCRDs: false
cert-manager:
enabled: true
#configs:
# secret:
# argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG"
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
# Run Argo on the controllers
controller:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
repoServer:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
server:
config:
# argo-cd.server.config.url -- ArgoCD hostname to be exposed via Istio
url: argocd.example.com
# Rename former https port to grpc, works with istio + insecure
service:
servicePortHttpsName: grpc
extraArgs:
- --insecure
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
redis:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
dex:
enabled: false
istio:
# istio.enabled -- Deploy Istio VirtualService to expose ArgoCD
local-volume-provisioner:
enabled: false
kiam:
enabled: false
aws-ebs-csi-driver:
enabled: false
# istio.gateway -- Name of the Istio gateway to add the VirtualService to
gateway: ingressgateway.istio-system.svc.cluster.local

View File

@ -17,7 +17,7 @@ helm repo add uswitch https://uswitch.github.io/kiam-helm-charts/charts/
for dir in $(find $SRCROOT/charts -mindepth 1 -maxdepth 1 -type d);
do
rm -rf $dir/charts $dir/Chart.lock
# rm -rf $dir/charts $dir/Chart.lock
name=$(basename $dir)