New ArgoCD and bootstrap fixes #4

Merged
stefan merged 4 commits from master into stable 2020-06-30 16:11:30 +00:00
11 changed files with 221 additions and 6 deletions

View File

@ -1,7 +1,7 @@
apiVersion: v2
description: KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
name: kubezero-argo-cd
version: 0.3.0
version: 0.3.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -16,6 +16,6 @@ dependencies:
version: ">= 0.1.1"
repository: https://zero-down-time.github.io/kubezero/
- name: argo-cd
version: 2.3.2
version: 2.5.0
repository: https://argoproj.github.io/argo-helm
kubeVersion: ">= 1.16.0"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-kiam
description: KubeZero Umbrella Chart for Kiam
type: application
version: 0.2.4
version: 0.2.5
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -15,6 +15,6 @@ dependencies:
version: ">= 0.1.1"
repository: https://zero-down-time.github.io/kubezero/
- name: kiam
version: 5.7.0
version: 5.8.1
repository: https://uswitch.github.io/kiam-helm-charts/charts/
kubeVersion: ">= 1.16.0"

View File

@ -60,3 +60,6 @@ spec:
- -c
- kubectl annotate --overwrite namespace kube-system 'iam.amazonaws.com/permitted=.*'
restartPolicy: Never
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master

1
deploy/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
generated-values.yaml

23
deploy/.helmignore Normal file
View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

6
deploy/Chart.yaml Normal file
View File

@ -0,0 +1,6 @@
apiVersion: v2
name: deploy
description: "This chart is only used to generate the values.yaml for KubeZero !! Once something like https://github.com/helm/helm/pull/6876 gets merged this will be removed !"
type: application
version: 0.0.1
appVersion: 1.16.0

13
deploy/argocd_password.py Executable file
View File

@ -0,0 +1,13 @@
#!/usr/bin/env python3
import bcrypt
import random
import string
chars_fixed = string.ascii_letters + string.digits
passwd = "".join(random.choice(chars_fixed) for x in range(15))
salt = bcrypt.gensalt()
hashed = bcrypt.hashpw(passwd.encode('utf-8'), salt)
print("{}:{}".format(passwd, hashed.decode('utf-8')))

72
deploy/deploy.sh Executable file
View File

@ -0,0 +1,72 @@
#!/bin/bash
set -e
DEPLOY_DIR=$( dirname $( realpath $0 ))
# Waits for max 300s and retries
function wait_for() {
local TRIES=0
while true; do
$@ && break
[ $TRIES -eq 100 ] && return 1
let TRIES=$TRIES+1
sleep 3
done
}
helm repo add kubezero https://zero-down-time.github.io/kubezero
helm repo update
# Determine if we bootstrap or update
helm list -n argocd -f kubezero -q | grep -q kubezero && rc=$? || rc=$?
if [ $rc -eq 0 ]; then
helm template $DEPLOY_DIR -f values.yaml -f kubezero.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
else
# During bootstrap we first generate a minimal values.yaml to prevent various deadlocks
# Generate ArgoCD password if not in values.yaml yet and add it
grep -q argocdServerAdminPassword values.yaml && rc=$? || rc=$?
if [ $rc -ne 0 ]; then
_argo_date="$(date -u --iso-8601=seconds)"
_argo_passwd="$($DEPLOY_DIR/argocd_password.py)"
cat <<EOF >> values.yaml
configs:
secret:
# ArgoCD password: ${_argo_passwd%%:*} Please move to secure location !
argocdServerAdminPassword: "${_argo_passwd##*:}"
argocdServerAdminPasswordMtime: "$_argo_date"
EOF
fi
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml --set bootstrap=true > generated-values.yaml
# Deploy initial argo-cd
helm install -n argocd kubezero kubezero/kubezero-argo-cd --create-namespace -f generated-values.yaml
# Wait for argocd-server to be running
kubectl rollout status deployment -n argocd kubezero-argocd-server
# Now wait for cert-manager to be bootstrapped
echo "Waiting for cert-manager to be deployed..."
wait_for kubectl get deployment -n cert-manager cert-manager-webhook 2>/dev/null 1>&2
kubectl rollout status deployment -n cert-manager cert-manager-webhook
# Now lets get kiam and cert-manager to work as they depend on each other, keep advanced options still disabled though
# - istio, prometheus
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml --set istio.enabled=false --set prometheus.enabled=false > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
exit 0
# Todo: Now we need to wait till all is synced and healthy ... argocd cli or kubectl ?
# Wait for aws-ebs or kiam to be all ready, or all pods running ?
# Todo:
# - integrate Istio
# - integrate Prometheus-Grafana
# Finally we could enable the actual config and deploy all
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
fi

View File

@ -0,0 +1,73 @@
kubezero:
{{- if .Values.global }}
globals:
{{- toYaml .Values.global | nindent 4 }}
{{- end }}
calico:
enabled: {{ .Values.calico.enabled }}
cert-manager:
enabled: {{ index .Values "cert-manager" "enabled" }}
values:
{{- if .Values.bootstrap }}
localCA:
enabled: false
{{- else }}
{{- if .Values.aws }}
cert-manager:
podAnnotations:
iam.amazonaws.com/role: "{{ index .Values "cert-manager" "IamArn" }}"
{{- end }}
clusterIssuer:
name: letsencrypt-dns-prod
server: https://acme-v02.api.letsencrypt.org/directory
email: {{ index .Values "cert-manager" "email" }}
solvers:
- selector:
dnsZones:
{{- with index .Values "cert-manager" "dnsZones" }}
{{- . | toYaml | nindent 14 }}
{{- end }}
dns01:
{{- if .Values.aws }}
route53:
region: {{ .Values.region }}
{{- end }}
{{- if .Values.aws }}
aws-ebs-csi-driver:
enabled: {{ index .Values "aws-ebs-csi-driver" "enabled" }}
values:
aws-ebs-csi-driver:
replicaCount: {{ ternary 2 1 .Values.HighAvailableControlplane }}
podAnnotations:
iam.amazonaws.com/role: "{{ index .Values "aws-ebs-csi-driver" "IamArn" }}"
extraVolumeTags:
Name: {{ .Values.ClusterName }}
kiam:
enabled: {{ .Values.kiam.enabled }}
values:
kiam:
server:
assumeRoleArn: "{{ .Values.kiam.IamArn }}"
deployment:
replicas: {{ ternary 2 1 .Values.HighAvailableControlplane }}
prometheus:
servicemonitor:
enabled: {{ .Values.prometheus.enabled }}
{{- end }}
{{- end }}
argo-cd:
{{- with index .Values "argo-cd" "server" }}
server:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with index .Values "argo-cd" "configs" }}
configs:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if not .Values.bootstrap }}
istio:
enabled: {{ .Values.istio.enabled }}
gateway: private-ingressgateway.istio-system.svc.cluster.local
{{- end }}

26
deploy/values.yaml Normal file
View File

@ -0,0 +1,26 @@
ClusterName: Test-cluster
Domain: example.com
aws: true
HighAvailableControlplane: false
calico:
enabled: true
cert-manager:
enabled: true
IamArn: ""
aws-ebs-csi-driver:
enabled: true
IamArn: ""
kiam:
enabled: true
IamArn: ""
istio:
enabled: false
prometheus:
enabled: false

View File

@ -17,8 +17,6 @@ helm repo add uswitch https://uswitch.github.io/kiam-helm-charts/charts/
for dir in $(find $SRCROOT/charts -mindepth 1 -maxdepth 1 -type d);
do
# rm -rf $dir/charts $dir/Chart.lock
name=$(basename $dir)
if [ $(helm dep list $dir 2>/dev/null| wc -l) -gt 1 ]