feat: release kubezero 1.20.11-4

This commit is contained in:
Stefan Reimer 2021-11-11 14:53:23 +01:00
parent 7980f88f8a
commit c1758008d9
14 changed files with 80 additions and 250 deletions

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero name: kubezero
description: KubeZero - Bootstrap and ArgoCD Root App of Apps chart description: KubeZero - Bootstrap and ArgoCD Root App of Apps chart
type: application type: application
version: 1.20.8-14 version: 1.20.11-4
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:

View File

@ -1,6 +1,6 @@
# kubezero # kubezero
![Version: 1.20.8-13](https://img.shields.io/badge/Version-1.20.8--13-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.20.11-4](https://img.shields.io/badge/Version-1.20.11--4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero - Bootstrap and ArgoCD Root App of Apps chart KubeZero - Bootstrap and ArgoCD Root App of Apps chart
@ -27,28 +27,23 @@ Kubernetes: `>= 1.18.0`
| HighAvailableControlplane | bool | `false` | | | HighAvailableControlplane | bool | `false` | |
| addons.enabled | bool | `false` | | | addons.enabled | bool | `false` | |
| addons.targetRevision | string | `"0.1.0"` | | | addons.targetRevision | string | `"0.1.0"` | |
| argocd.crds | bool | `true` | |
| argocd.enabled | bool | `false` | | | argocd.enabled | bool | `false` | |
| argocd.istio.enabled | bool | `false` | | | argocd.istio.enabled | bool | `false` | |
| argocd.namespace | string | `"argocd"` | | | argocd.namespace | string | `"argocd"` | |
| argocd.targetRevision | string | `"0.8.4"` | | | argocd.targetRevision | string | `"0.8.7"` | |
| aws-ebs-csi-driver.crds | bool | `true` | |
| aws-ebs-csi-driver.enabled | bool | `false` | | | aws-ebs-csi-driver.enabled | bool | `false` | |
| aws-ebs-csi-driver.targetRevision | string | `"0.6.4"` | | | aws-ebs-csi-driver.targetRevision | string | `"0.6.4"` | |
| aws-efs-csi-driver.enabled | bool | `false` | | | aws-efs-csi-driver.enabled | bool | `false` | |
| aws-efs-csi-driver.targetRevision | string | `"0.4.2"` | | | aws-efs-csi-driver.targetRevision | string | `"0.4.2"` | |
| calico.crds | bool | `true` | |
| calico.enabled | bool | `false` | | | calico.enabled | bool | `false` | |
| calico.retain | bool | `true` | | | calico.retain | bool | `true` | |
| calico.targetRevision | string | `"0.2.2"` | | | calico.targetRevision | string | `"0.2.2"` | |
| cert-manager.crds | bool | `true` | |
| cert-manager.enabled | bool | `false` | | | cert-manager.enabled | bool | `false` | |
| cert-manager.namespace | string | `"cert-manager"` | | | cert-manager.namespace | string | `"cert-manager"` | |
| cert-manager.targetRevision | string | `"0.7.3"` | | | cert-manager.targetRevision | string | `"0.7.3"` | |
| istio-ingress.enabled | bool | `false` | | | istio-ingress.enabled | bool | `false` | |
| istio-ingress.namespace | string | `"istio-ingress"` | | | istio-ingress.namespace | string | `"istio-ingress"` | |
| istio-ingress.targetRevision | string | `"0.7.5"` | | | istio-ingress.targetRevision | string | `"0.7.5"` | |
| istio.crds | bool | `true` | |
| istio.enabled | bool | `false` | | | istio.enabled | bool | `false` | |
| istio.namespace | string | `"istio-system"` | | | istio.namespace | string | `"istio-system"` | |
| istio.targetRevision | string | `"0.7.5"` | | | istio.targetRevision | string | `"0.7.5"` | |
@ -58,17 +53,14 @@ Kubernetes: `>= 1.18.0`
| kubezero.gitSync | object | `{}` | | | kubezero.gitSync | object | `{}` | |
| kubezero.repoURL | string | `"https://zero-down-time.github.io/kubezero"` | | | kubezero.repoURL | string | `"https://zero-down-time.github.io/kubezero"` | |
| kubezero.server | string | `"https://kubernetes.default.svc"` | | | kubezero.server | string | `"https://kubernetes.default.svc"` | |
| logging.crds | bool | `true` | |
| logging.enabled | bool | `false` | | | logging.enabled | bool | `false` | |
| logging.namespace | string | `"logging"` | | | logging.namespace | string | `"logging"` | |
| logging.targetRevision | string | `"0.7.14"` | | | logging.targetRevision | string | `"0.7.16"` | |
| metrics.crds | bool | `true` | |
| metrics.enabled | bool | `false` | | | metrics.enabled | bool | `false` | |
| metrics.istio.grafana | object | `{}` | | | metrics.istio.grafana | object | `{}` | |
| metrics.istio.prometheus | object | `{}` | | | metrics.istio.prometheus | object | `{}` | |
| metrics.namespace | string | `"monitoring"` | | | metrics.namespace | string | `"monitoring"` | |
| metrics.targetRevision | string | `"0.5.4"` | | | metrics.targetRevision | string | `"0.5.4"` | |
| storage.crds | bool | `true` | |
| storage.enabled | bool | `false` | | | storage.enabled | bool | `false` | |
| storage.targetRevision | string | `"0.3.2"` | | | storage.targetRevision | string | `"0.3.2"` | |

View File

@ -5,7 +5,6 @@ ACTION=$1
ARTIFACTS=($(echo $2 | tr "," "\n")) ARTIFACTS=($(echo $2 | tr "," "\n"))
CLUSTER=$3 CLUSTER=$3
LOCATION=${4:-""} LOCATION=${4:-""}
KUBEZERO_VERSION=${5:-""}
which yq || { echo "yq not found!"; exit 1; } which yq || { echo "yq not found!"; exit 1; }
which helm || { echo "helm not found!"; exit 1; } which helm || { echo "helm not found!"; exit 1; }
@ -16,8 +15,6 @@ echo $helm_version | grep -qe "^v3.[5-9]" || { echo "Helm version >= 3.5 require
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1" API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1"
KUBE_VERSION="--kube-version $(kubectl version -o json | jq -r .serverVersion.gitVersion)" KUBE_VERSION="--kube-version $(kubectl version -o json | jq -r .serverVersion.gitVersion)"
[ -n "$KUBEZERO_VERSION" ] && KUBEZERO_VERSION="--version $KUBEZERO_VERSION --devel"
TMPDIR=$(mktemp -d kubezero.XXX) TMPDIR=$(mktemp -d kubezero.XXX)
[ -z "$DEBUG" ] && trap 'rm -rf $TMPDIR' ERR EXIT [ -z "$DEBUG" ] && trap 'rm -rf $TMPDIR' ERR EXIT
@ -61,17 +58,21 @@ function delete_ns() {
# Extract crds via helm calls and apply delta=crds only # Extract crds via helm calls and apply delta=crds only
function _crds() { function _crds() {
helm template $(chart_location $chart) -n $namespace --name-template $release --skip-crds --set ${release}.installCRDs=false -f $TMPDIR/values.yaml $API_VERSIONS $KUBE_VERSION > $TMPDIR/helm-no-crds.yaml helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds --set ${module}.installCRDs=false -f $TMPDIR/values.yaml $API_VERSIONS $KUBE_VERSION > $TMPDIR/helm-no-crds.yaml
helm template $(chart_location $chart) -n $namespace --name-template $release --include-crds --set ${release}.installCRDs=true -f $TMPDIR/values.yaml $API_VERSIONS $KUBE_VERSION > $TMPDIR/helm-crds.yaml helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds --set ${module}.installCRDs=true -f $TMPDIR/values.yaml $API_VERSIONS $KUBE_VERSION > $TMPDIR/helm-crds.yaml
diff -e $TMPDIR/helm-no-crds.yaml $TMPDIR/helm-crds.yaml | head -n-1 | tail -n+2 > $TMPDIR/crds.yaml diff -e $TMPDIR/helm-no-crds.yaml $TMPDIR/helm-crds.yaml | head -n-1 | tail -n+2 > $TMPDIR/crds.yaml
[ -s $TMPDIR/crds.yaml ] && kubectl apply -f $TMPDIR/crds.yaml
# Only apply if there are actually any crds
if [ -s $TMPDIR/crds.yaml ]; then
kubectl apply -f $TMPDIR/crds.yaml
fi
} }
# helm template | kubectl apply -f - # helm template | kubectl apply -f -
# confine to one namespace if possible # confine to one namespace if possible
function apply(){ function apply(){
helm template $(chart_location $chart) -n $namespace --name-template $release --skip-crds -f $TMPDIR/values.yaml $API_VERSIONS $KUBE_VERSION $@ > $TMPDIR/helm.yaml helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds -f $TMPDIR/values.yaml $API_VERSIONS $KUBE_VERSION $@ > $TMPDIR/helm.yaml
# If resources are in more than ONE $namespace, apply without restrictions # If resources are in more than ONE $namespace, apply without restrictions
nr_ns=$(grep -e '^ namespace:' $TMPDIR/helm.yaml | sed "s/\"//g" | sort | uniq | wc -l) nr_ns=$(grep -e '^ namespace:' $TMPDIR/helm.yaml | sed "s/\"//g" | sort | uniq | wc -l)
@ -85,26 +86,30 @@ function apply(){
function _helm() { function _helm() {
local action=$1 local action=$1
local module=$2
local chart="kubezero-$2" local chart="kubezero-${module}"
local release=$2 local namespace=$(yq r $TMPDIR/kubezero/templates/${module}.yaml spec.destination.namespace)
local namespace=$(get_namespace $2)
local targetRevision="--version $(yq r $TMPDIR/kubezero/templates/${module}.yaml spec.source.targetRevision)"
yq r $TMPDIR/kubezero/templates/${module}.yaml 'spec.source.helm.values' > $TMPDIR/values.yaml
if [ $action == "crds" ]; then if [ $action == "crds" ]; then
declare -F ${release}-crds && ${release}-crds # Allow custom CRD handling
declare -F ${release}-crds || _crds declare -F ${module}-crds && ${module}-crds || _crds
elif [ $action == "apply" ]; then elif [ $action == "apply" ]; then
# namespace must exist prior to apply # namespace must exist prior to apply
create_ns $namespace create_ns $namespace
# Optional pre hook # Optional pre hook
declare -F ${release}-pre && ${release}-pre declare -F ${module}-pre && ${module}-pre
apply apply
# Optional post hook # Optional post hook
declare -F ${release}-post && ${release}-post declare -F ${module}-post && ${module}-post
elif [ $action == "delete" ]; then elif [ $action == "delete" ]; then
apply apply
@ -117,39 +122,6 @@ function _helm() {
} }
function is_enabled() {
local chart=$1
local enabled=$(yq r $TMPDIR/kubezero.yaml ${chart}.enabled)
if [ "$enabled" == "true" ]; then
# slice values for this chart only from kubezero.yaml
yq r $TMPDIR/kubezero.yaml ${chart}.values > $TMPDIR/values.yaml
return 0
fi
return 1
}
function has_crds() {
local chart=$1
local enabled=$(yq r $TMPDIR/kubezero.yaml ${chart}.crds)
[ "$enabled" == "true" ] && return 0
return 1
}
function get_namespace() {
local namespace=$(yq r $TMPDIR/kubezero.yaml ${1}.namespace)
[ -z "$namespace" ] && echo "kube-system" || echo $namespace
}
function update_kubezero_argo() {
helm template $(chart_location kubezero) -f ${VALUES%%,} --set installKubeZero=true $KUBEZERO_VERSION > $TMPDIR/kubezero-argocd.yaml
kubectl apply -f $TMPDIR/kubezero-argocd.yaml
}
################ ################
# cert-manager # # cert-manager #
################ ################
@ -193,42 +165,37 @@ function metrics-pre() {
} }
##########
## MAIN ## ## MAIN ##
# First lets generate kubezero.yaml, either plain values.yaml or application.yaml for ArgoCD ##########
if [ -f $CLUSTER/kubezero/application.yaml ]; then if [ ! -f $CLUSTER/kubezero/application.yaml ]; then
yq r $CLUSTER/kubezero/application.yaml 'spec.source.helm.values' > $TMPDIR/_argovalues.yaml echo "Cannot find cluster config!"
VALUES=$TMPDIR/_argovalues.yaml exit 1
else
VALUES="$(find $CLUSTER -name '*.yaml' | sort | tr '\n' ',')"
fi fi
helm template $(chart_location kubezero) -f ${VALUES%%,} $KUBEZERO_VERSION > $TMPDIR/kubezero.yaml
# Resolve all the all enabled artifacts in order of their appearance KUBEZERO_VERSION=$(yq r $CLUSTER/kubezero/application.yaml 'spec.source.targetRevision')
# Extract all kubezero values from argo app
yq r $CLUSTER/kubezero/application.yaml 'spec.source.helm.values' > $TMPDIR/values.yaml
# Render all enabled Kubezero modules
helm template $(chart_location kubezero) -f $TMPDIR/values.yaml --version $KUBEZERO_VERSION --devel --output-dir $TMPDIR
# Resolve all the all enabled artifacts
if [ ${ARTIFACTS[0]} == "all" ]; then if [ ${ARTIFACTS[0]} == "all" ]; then
ARTIFACTS=($(yq r -p p $TMPDIR/kubezero.yaml "*.enabled" | awk -F "." '{print $1}')) ARTIFACTS=($(ls $TMPDIR/kubezero/templates | sed -e 's/.yaml//g'))
fi fi
echo "Artifacts: ${ARTIFACTS[@]}" echo "Artifacts: ${ARTIFACTS[@]}"
if [ $1 == "apply" -o $1 == "crds" ]; then
if [ $1 == "deploy" ]; then
for t in ${ARTIFACTS[@]}; do for t in ${ARTIFACTS[@]}; do
is_enabled $t && _helm apply $t || true _helm $1 $t || true
done
# If artifact enabled and has crds install
elif [ $1 == "crds" ]; then
for t in ${ARTIFACTS[@]}; do
is_enabled $t && has_crds $t && _helm crds $t || true
done done
# Delete in reverse order, continue even if errors # Delete in reverse order, continue even if errors
elif [ $1 == "delete" ]; then elif [ $1 == "delete" ]; then
set +e set +e
for (( idx=${#ARTIFACTS[@]}-1 ; idx>=0 ; idx-- )) ; do for (( idx=${#ARTIFACTS[@]}-1 ; idx>=0 ; idx-- )) ; do
is_enabled ${ARTIFACTS[idx]} && _helm delete ${ARTIFACTS[idx]} || true _helm delete ${ARTIFACTS[idx]} || true
done done
# Update ArgoCD Kubezero app
elif [ $1 == "argo" -a $2 == 'kubezero' ]; then
update_kubezero_argo
fi fi

View File

@ -1,13 +0,0 @@
apiVersion: v2
name: kubezero-git-sync
description: KubeZero Git Sync Argo Application to track cluster values via git
type: application
version: 0.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- argocd
maintainers:
- name: Quarky9
kubeVersion: ">= 1.18.0"

View File

@ -1,26 +0,0 @@
# kubezero-git-sync
![Version: 0.1](https://img.shields.io/badge/Version-0.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Git Sync Argo Application to track cluster values via git
**Homepage:** <https://kubezero.com>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Quarky9 | | |
## Requirements
Kubernetes: `>= 1.18.0`
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| kubezero.version | string | `"1.20.8-9"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.5.0](https://github.com/norwoodj/helm-docs/releases/v1.5.0)

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubezero
namespace: argocd
spec:
project: kubezero
source:
repoURL: https://zero-down-time.github.io/kubezero
chart: kubezero
targetRevision: {{ .Values.kubezero.version }}
helm:
parameters:
# We use this to detect if we are called from ArgoCD
- name: argocdAppName
value: $ARGOCD_APP_NAME
# This breaks the recursion, otherwise we install another kubezero project and app
- name: installKubeZero
value: "false"
values: |
{{- toYaml .Values | nindent 8 }}
destination:
server: https://kubernetes.default.svc
namespace: argocd
syncPolicy:
automated:
prune: true

View File

@ -1,2 +0,0 @@
kubezero:
version: 1.20.8-9

View File

@ -1,7 +1,7 @@
{{- define "kubezero-app.app" }} {{- define "kubezero-app.app" }}
{{- $name := regexReplaceAll "kubezero/templates/([a-z-]*)..*" .Template.Name "${1}" }} {{- $name := regexReplaceAll "kubezero/templates/([a-z-]*)..*" .Template.Name "${1}" }}
{{- if and .Values.argocdAppName ( index .Values $name "enabled" ) }} {{- if index .Values $name "enabled" }}
apiVersion: argoproj.io/v1alpha1 apiVersion: argoproj.io/v1alpha1
kind: Application kind: Application
metadata: metadata:

View File

@ -13,10 +13,47 @@ argo-cd:
{{- with index .Values "argocd" "server" }} {{- with index .Values "argocd" "server" }}
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
additionalProjects:
- name: kubezero
namespace: argocd
description: KubeZero - ZeroDownTime Kubernetes Platform
sourceRepos:
- {{ .Values.kubezero.repoURL }}
{{- with .Values.kubezero.gitSync.repoURL }}
- {{ . }}
{{- end }}
destinations:
- namespace: '*'
server: https://kubernetes.default.svc
clusterResourceWhitelist:
- group: '*'
kind: '*'
additionalApplications:
- name: kubezero-git-sync
namespace: argocd
project: kubezero
source:
repoURL: {{ .Values.kubezero.gitSync.repoURL }}
targetRevision: {{ .Values.kubezero.gitSync.targetRevision }}
path: {{ .Values.kubezero.gitSync.path }}
directory:
recurse: true
destination:
server: https://kubernetes.default.svc
namespace: argocd
{{- with .Values.kubezero.syncPolicy }}
syncPolicy:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with index .Values "argocd" "configs" }} {{- with index .Values "argocd" "configs" }}
configs: configs:
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
{{- if and ( index .Values "argocd" "istio" "enabled" ) .Values.istio.enabled }} {{- if and ( index .Values "argocd" "istio" "enabled" ) .Values.istio.enabled }}
istio: istio:
{{- with index .Values "argocd" "istio" }} {{- with index .Values "argocd" "istio" }}

View File

@ -1,17 +0,0 @@
{{- if and ( not .Values.argocdAppName ) ( not .Values.installKubeZero ) }}
# if no ArgoCD is used, only render the global values.yaml for all kubezero modules
{{- $artifacts := list "addons" "calico" "cert-manager" "kiam" "aws-ebs-csi-driver" "aws-efs-csi-driver" "istio" "istio-ingress" "metrics" "logging" "argocd" "storage" }}
{{- range $artifacts }}
{{- if index $.Values . }}
{{ . }}:
enabled: {{ index $.Values . "enabled" }}
namespace: {{ default "kube-system" ( index $.Values . "namespace" ) }}
crds: {{ default false ( index $.Values . "crds" ) }}
values:
{{- include (print . "-values") $ | nindent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,71 +0,0 @@
{{- if .Values.installKubeZero }}
# Add KubeZero app and project
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: kubezero
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
description: KubeZero - ZeroDownTime Kubernetes Platform
# Add the git repo used for git-sync
sourceRepos:
- {{ .Values.kubezero.repoURL }}
{{ with .Values.kubezero.gitSync.repoURL }}
- {{ . }}
{{- end }}
# platform namespaces in the local cluster
destinations:
- namespace: argocd
server: https://kubernetes.default.svc
- namespace: kube-system
server: https://kubernetes.default.svc
- namespace: cert-manager
server: https://kubernetes.default.svc
- namespace: istio-system
server: https://kubernetes.default.svc
- namespace: istio-ingress
server: https://kubernetes.default.svc
- namespace: monitoring
server: https://kubernetes.default.svc
- namespace: elastic-system
server: https://kubernetes.default.svc
- namespace: logging
server: https://kubernetes.default.svc
clusterResourceWhitelist:
- group: '*'
kind: '*'
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kubezero-git-sync
namespace: argocd
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
project: kubezero
source:
repoURL: {{ .Values.kubezero.gitSync.repoURL }}
targetRevision: {{ .Values.kubezero.gitSync.targetRevision }}
path: {{ .Values.kubezero.gitSync.path }}
directory:
recurse: true
destination:
server: https://kubernetes.default.svc
namespace: argocd
{{- with .Values.kubezero.syncPolicy }}
syncPolicy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -12,13 +12,11 @@ addons:
calico: calico:
enabled: false enabled: false
crds: true
retain: true retain: true
targetRevision: 0.2.2 targetRevision: 0.2.2
cert-manager: cert-manager:
enabled: false enabled: false
crds: true
namespace: cert-manager namespace: cert-manager
targetRevision: 0.7.3 targetRevision: 0.7.3
@ -28,12 +26,10 @@ kiam:
storage: storage:
enabled: false enabled: false
crds: true
targetRevision: 0.3.2 targetRevision: 0.3.2
aws-ebs-csi-driver: aws-ebs-csi-driver:
enabled: false enabled: false
crds: true
targetRevision: 0.6.4 targetRevision: 0.6.4
aws-efs-csi-driver: aws-efs-csi-driver:
@ -42,7 +38,6 @@ aws-efs-csi-driver:
istio: istio:
enabled: false enabled: false
crds: true
namespace: istio-system namespace: istio-system
targetRevision: 0.7.5 targetRevision: 0.7.5
@ -53,7 +48,6 @@ istio-ingress:
metrics: metrics:
enabled: false enabled: false
crds: true
namespace: monitoring namespace: monitoring
targetRevision: 0.5.4 targetRevision: 0.5.4
istio: istio:
@ -62,14 +56,12 @@ metrics:
logging: logging:
enabled: false enabled: false
crds: true
namespace: logging namespace: logging
targetRevision: 0.7.16 targetRevision: 0.7.16
argocd: argocd:
enabled: false enabled: false
crds: true
namespace: argocd namespace: argocd
targetRevision: 0.8.4 targetRevision: 0.8.7
istio: istio:
enabled: false enabled: false