feat: introduce storage module with openEBS LVM as first member

This commit is contained in:
Stefan Reimer 2021-07-21 13:26:56 +02:00
parent 99b42e2781
commit b6c1cefa31
9 changed files with 124 additions and 5 deletions

View File

@ -0,0 +1,18 @@
apiVersion: v2
name: kubezero-storage
description: KubeZero umbrella chart for all things storage, eg. openEBS
type: application
version: 0.1.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- gemini
maintainers:
- name: Quarky9
dependencies:
- name: openebs
version: 2.11.2
repository: https://openebs.github.io/charts
condition: openebs.enabled
kubeVersion: ">= 1.18.0"

View File

@ -0,0 +1,33 @@
# kubezero-timecapsule
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things backup
**Homepage:** <https://kubezero.com>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Quarky9 | | |
## Requirements
Kubernetes: `>= 1.18.0`
| Repository | Name | Version |
|------------|------|---------|
| | gemini | 0.0.6 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| gemini.resources.limits.cpu | string | `"400m"` | |
| gemini.resources.limits.memory | string | `"128Mi"` | |
| gemini.resources.requests.cpu | string | `"20m"` | |
| gemini.resources.requests.memory | string | `"32Mi"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.5.0](https://github.com/norwoodj/helm-docs/releases/v1.5.0)

View File

@ -0,0 +1,11 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: openebs-lvm
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
parameters:
storage: lvm
vgpattern: "openebs.*"
fsType: xfs
provisioner: local.csi.openebs.io

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -ex
export VERSION=2.11.2
#rm -rf charts/gemini
#helm pull fairwinds-stable/gemini --untar --untardir charts
# Patch for istiod to control plane
#patch -p0 -i run-on-controller.patch --no-backup-if-mismatch

View File

@ -0,0 +1,40 @@
openebs:
enabled: true
apiserver:
enabled: false
provisioner:
enabled: false
localprovisioner:
enabled: false
ndm:
enabled: false
ndmOperator:
enabled: false
snapshotOperator:
enabled: false
webhook:
enabled: false
# Google Analytics ??
analytics:
enabled: false
# We only use LVM for now
lvm-localpv:
enabled: true
lvmNode:
nodeSelector:
node.kubernetes.io/lvm: "openebs"
tolerations:
- key: kubezero-workergroup
effect: NoSchedule
operator: Exists
lvmController:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""

View File

@ -186,10 +186,6 @@ function kiam-pre() {
function kiam-post() {
wait_for 'kubectl get daemonset -n kube-system kiam-agent'
kubectl rollout status daemonset -n kube-system kiam-agent
# Make sure kube-system and cert-manager are allowed to kiam
kubectl annotate --overwrite namespace kube-system 'iam.amazonaws.com/permitted=.*'
kubectl annotate --overwrite namespace cert-manager 'iam.amazonaws.com/permitted=.*CertManagerRole.*'
}

View File

@ -1,6 +1,6 @@
{{- if not .Values.argo }}
{{- $artifacts := list "calico" "cert-manager" "kiam" "aws-node-termination-handler" "aws-ebs-csi-driver" "aws-efs-csi-driver" "local-volume-provisioner" "local-path-provisioner" "istio" "istio-ingress" "metrics" "logging" "argocd" "timecapsule" }}
{{- $artifacts := list "calico" "cert-manager" "kiam" "aws-node-termination-handler" "aws-ebs-csi-driver" "aws-efs-csi-driver" "local-volume-provisioner" "local-path-provisioner" "istio" "istio-ingress" "metrics" "logging" "argocd" "timecapsule" "storage" }}
{{- if .Values.global }}
global:

View File

@ -0,0 +1,7 @@
{{- define "storage-values" }}
{{- end }}
{{- define "storage-argo" }}
{{- end }}
{{ include "kubezero-app.app" . }}

View File

@ -26,6 +26,10 @@ aws-node-termination-handler:
timecapsule:
enabled: false
storage:
enabled: false
crds: true
local-volume-provisioner:
enabled: false