feat: various tweaks for new ZDT CI flow, version bumps
This commit is contained in:
parent
ec0b6c6ce9
commit
69106a0391
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-addons
|
||||
description: KubeZero umbrella chart for various optional cluster addons
|
||||
type: application
|
||||
version: 0.2.4
|
||||
version: 0.3.1
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -0,0 +1,30 @@
|
||||
{{- if .Values.awsController.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kubezero-aws-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: kubezero-aws-controller
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kubezero-aws-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kubezero-aws-controller
|
||||
spec:
|
||||
containers:
|
||||
- name: kubezero-aws-controller
|
||||
image: "{{ .Values.awsController.image.name }}:{{ .Values.awsController.image.tag }}"
|
||||
imagePullPolicy: Always
|
||||
serviceAccountName: kubezero-aws-controller
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
{{- end }}
|
31
charts/kubezero-addons/templates/awsController/rbac.yaml
Normal file
31
charts/kubezero-addons/templates/awsController/rbac.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
{{- if .Values.awsController.enabled }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kubezero-aws-controller
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kubezero-aws-controller
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["*"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubezero-aws-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubezero-aws-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubezero-aws-controller
|
||||
namespace: kube-system
|
||||
{{- end }}
|
@ -15,7 +15,7 @@ spec:
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: public.ecr.aws/zero-downtime/fuse-device-plugin:v1.0-r1
|
||||
- image: public.ecr.aws/zero-downtime/fuse-device-plugin:v1.1.0
|
||||
# imagePullPolicy: Always
|
||||
name: fuse-device-plugin
|
||||
securityContext:
|
||||
|
@ -9,6 +9,13 @@ clusterBackup:
|
||||
password: ""
|
||||
extraEnv: []
|
||||
|
||||
tenjin:
|
||||
enabled: false
|
||||
|
||||
image:
|
||||
name: public.ecr.aws/zero-downtime/kubezero-tenjin
|
||||
tag: v0.1.0
|
||||
|
||||
aws-node-termination-handler:
|
||||
enabled: false
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: v2
|
||||
description: KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
|
||||
name: kubezero-argocd
|
||||
version: 0.9.2
|
||||
version: 0.9.3
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -5,7 +5,7 @@ metadata:
|
||||
name: argocd-server
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
labels:
|
||||
{{ include "kubezero-lib.labels" . | indent 4 }}
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
spec:
|
||||
gateways:
|
||||
- {{ .Values.istio.gateway }}
|
||||
|
@ -32,9 +32,7 @@ argo-cd:
|
||||
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
|
||||
gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
[git.zero-downtime.net]:22000 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6C8EjqbFGTU/HJhwed7CSVd5o4cusQk++GnQb4gZ9RGySsm/Gfi2VtcphMQcwzQYfjwlQAcMFikQYTD0XjEqmxQ17Q0+XRdLr6svWMXLuvqkWIvke+b1AAnJhNj9dZl7jSZcLgZedUEAMIMEd7dQC+ggrBF0dzMxf+PmMwtPtpkeZ53+JyHoswtolV4qrGJy7xgQZjyABtwe0Cy/J8gVonvzPYhyl2Eh5z4KXY7MumtBSCv79G4PHNaBI7d59GmnNQCcZaxHykHEDPPr+ymZlhSoe+OBkbr5m7zCRMHWuQuz4mR/4wd1kvVReRTLr8ZJQc0cyNHHVJGj05QkAeBBv
|
||||
[git.zero-downtime.net]:22000 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBytYYdGzh0Y9iwx4hxgjiEwbxLxBdIaMbfpkB6zqiDqmMtVDprjZfRlDTew25+p+Hki7/xJPQRYqB6RR0fuTwo=
|
||||
[git.zero-downtime.net]:22000 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHv6s7N5aiiP9yt2y5EpGZxENxUtUdbWyWJsAgjHBCZZ
|
||||
git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==
|
||||
|
||||
controller:
|
||||
args:
|
||||
|
@ -25,6 +25,10 @@ Kubernetes: `>= 1.20.0`
|
||||
| https://gocd.github.io/helm-chart | gocd | 1.39.4 |
|
||||
|
||||
# Jenkins
|
||||
- default build retention 10 builds, 32days
|
||||
- memory request 1.25GB
|
||||
- dark theme
|
||||
- trivy scanner incl. HTML reporting and publisher
|
||||
|
||||
# goCD
|
||||
|
||||
|
@ -14,6 +14,10 @@
|
||||
{{ template "chart.requirementsSection" . }}
|
||||
|
||||
# Jenkins
|
||||
- default build retention 10 builds, 32days
|
||||
- memory request 1.25GB
|
||||
- dark theme
|
||||
- trivy scanner incl. HTML reporting and publisher
|
||||
|
||||
# goCD
|
||||
|
||||
|
@ -2,11 +2,12 @@ apiVersion: v2
|
||||
name: kubezero-lib
|
||||
description: KubeZero helm library - common helm functions and blocks
|
||||
type: library
|
||||
version: 0.1.4
|
||||
version: 0.1.5
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
- kubezero
|
||||
maintainers:
|
||||
- name: Quarky9
|
||||
kubeVersion: ">= 1.18.0"
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
kubeVersion: ">= 1.20.0"
|
||||
|
@ -10,6 +10,9 @@ Common naming functions
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if .subchart }}
|
||||
{{- $name = default .subchart .Values.nameOverride -}}
|
||||
{{- end -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero
|
||||
description: KubeZero - Root App of Apps chart
|
||||
type: application
|
||||
version: 1.21.8-5
|
||||
version: 1.21.8-7
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -1,11 +1,8 @@
|
||||
{{- define "logging-values" }}
|
||||
{{- if or .Values.logging.es .Values.logging.kibana }}
|
||||
eck-operator:
|
||||
enabled: true
|
||||
{{- with index .Values "logging" "eck-operator" }}
|
||||
eck-operator:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.logging.elastic_password }}
|
||||
elastic_password: {{ .Values.logging.elastic_password }}
|
||||
|
@ -27,7 +27,7 @@ kiam:
|
||||
|
||||
storage:
|
||||
enabled: false
|
||||
targetRevision: 0.4.3
|
||||
targetRevision: 0.5.2
|
||||
aws-ebs-csi-driver:
|
||||
enabled: false
|
||||
aws-efs-csi-driver:
|
||||
|
@ -211,27 +211,16 @@ elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then
|
||||
|
||||
render_kubeadm
|
||||
|
||||
if [[ "$1" =~ "^(recover|join)$" ]]; then
|
||||
if [[ "$1" =~ "^(bootstrap)$" ]]; then
|
||||
# Create all certs during bootstrap
|
||||
_kubeadm init phase certs all
|
||||
|
||||
else
|
||||
# Recert certificates for THIS node
|
||||
rm -f ${HOSTFS}/etc/kubernetes/pki/etcd/peer.* ${HOSTFS}/etc/kubernetes/pki/etcd/server.* ${HOSTFS}/etc/kubernetes/pki/apiserver.*
|
||||
_kubeadm init phase certs etcd-server
|
||||
_kubeadm init phase certs etcd-peer
|
||||
_kubeadm init phase certs apiserver
|
||||
|
||||
# Restore only etcd for desaster recovery
|
||||
if [[ "$1" =~ "^(recover)$" ]]; then
|
||||
etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \
|
||||
--name $ETCD_NODENAME \
|
||||
--data-dir="${HOSTFS}/var/lib/etcd" \
|
||||
--initial-cluster-token etcd-${CLUSTERNAME} \
|
||||
--initial-advertise-peer-urls https://${ETCD_NODENAME}:2380 \
|
||||
--initial-cluster $ETCD_NODENAME=https://${ETCD_NODENAME}:2380
|
||||
fi
|
||||
|
||||
# Create all certs during bootstrap
|
||||
else
|
||||
_kubeadm init phase certs all
|
||||
fi
|
||||
|
||||
pre_kubeadm
|
||||
@ -329,7 +318,7 @@ elif [ "$1" == 'backup' ]; then
|
||||
restic snapshots || restic init
|
||||
restic backup ${WORKDIR} -H $CLUSTERNAME --tag $VERSION
|
||||
|
||||
echo "Backup complete"
|
||||
echo "Backup complete."
|
||||
|
||||
# Remove backups from previous versions
|
||||
restic forget --keep-tag $VERSION --prune
|
||||
@ -355,6 +344,16 @@ elif [ "$1" == 'restore' ]; then
|
||||
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
|
||||
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
|
||||
|
||||
etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \
|
||||
--name $ETCD_NODENAME \
|
||||
--data-dir="${HOSTFS}/var/lib/etcd" \
|
||||
--initial-cluster-token etcd-${CLUSTERNAME} \
|
||||
--initial-advertise-peer-urls https://${ETCD_NODENAME}:2380 \
|
||||
--initial-cluster $ETCD_NODENAME=https://${ETCD_NODENAME}:2380
|
||||
|
||||
echo "Backup restored."
|
||||
|
||||
|
||||
elif [ "$1" == 'debug_shell' ]; then
|
||||
echo "Entering debug shell"
|
||||
/bin/sh
|
||||
|
@ -6,6 +6,9 @@ set -x
|
||||
kubectl patch application aws-ebs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]'
|
||||
kubectl patch application aws-efs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]'
|
||||
|
||||
# Migrate ZeroDownTime helm repo
|
||||
kubectl patch appproject kubezero -n argocd --type=json -p='[{"op": "replace", "path": "/spec/sourceRepos/0", "value": "https://cdn.zero-downtime.net/charts" }]'
|
||||
|
||||
# Delete EBS and EFS Deployments and Daemonsets as we cannot change the lables while moving them to storage.
|
||||
# This will NOT affect provisioned volumes
|
||||
kubectl delete deployment ebs-csi-controller -n kube-system
|
||||
|
@ -17,13 +17,13 @@ yq eval -i 'del(.calico) | del(.kiam)' _values.yaml
|
||||
# Move storage into module
|
||||
yq eval -i '.storage.enabled=true' _values.yaml
|
||||
|
||||
[ $(yq eval 'has(".aws-ebs-csi-driver")' _values.yaml) == "true" ] && yq eval -i '.storage.aws-ebs-csi-driver=.aws-ebs-csi-driver' _values.yaml
|
||||
[ $(yq eval 'has(".aws-efs-csi-driver")' _values.yaml) == "true" ] && yq eval -i '.storage.aws-efs-csi-driver=.aws-efs-csi-driver' _values.yaml
|
||||
[ $(yq eval 'has("aws-ebs-csi-driver")' _values.yaml) == "true" ] && yq eval -i '.storage.aws-ebs-csi-driver=.aws-ebs-csi-driver' _values.yaml
|
||||
[ $(yq eval 'has("aws-efs-csi-driver")' _values.yaml) == "true" ] && yq eval -i '.storage.aws-efs-csi-driver=.aws-efs-csi-driver' _values.yaml
|
||||
|
||||
# Finally remove old helm apps
|
||||
yq eval -i 'del(.aws-ebs-csi-driver) | del(.aws-efs-csi-driver)' _values.yaml
|
||||
|
||||
# merge _values.yaml back
|
||||
yq eval -i '.spec.source.helm.values |= strload("_values.yaml")' $YAML
|
||||
yq eval -Pi '.spec.source.helm.values |= strload("_values.yaml")' $YAML
|
||||
|
||||
rm -f _values.yaml
|
||||
|
9
containers/awsController/Dockerfile
Normal file
9
containers/awsController/Dockerfile
Normal file
@ -0,0 +1,9 @@
|
||||
ARG SHELL_OPERATOR_VERSION
|
||||
|
||||
FROM flant/shell-operator:v${SHELL_OPERATOR_VERSION}
|
||||
|
||||
RUN apk upgrade -U -a && \
|
||||
apk --no-cache add \
|
||||
aws-cli
|
||||
|
||||
ADD hooks /hooks
|
23
containers/awsController/Makefile
Normal file
23
containers/awsController/Makefile
Normal file
@ -0,0 +1,23 @@
|
||||
SHELL_OPERATOR_VERSION ?= 1.0.6
|
||||
REGISTRY := public.ecr.aws/zero-downtime
|
||||
REPOSITORY := kubezero-controller
|
||||
TAG := $(REPOSITORY):v$(SHELL_OPERATOR_VERSION)-aws
|
||||
|
||||
.PHONY: build push clean scan
|
||||
|
||||
all: build push
|
||||
|
||||
build:
|
||||
podman build --rm --squash-all --build-arg SHELL_OPERATOR_VERSION=$(SHELL_OPERATOR_VERSION) -t $(TAG) .
|
||||
|
||||
push:
|
||||
aws ecr-public get-login-password --region us-east-1 | podman login --username AWS --password-stdin $(REGISTRY)
|
||||
podman tag $(TAG) $(REGISTRY)/$(TAG)
|
||||
podman push $(REGISTRY)/$(TAG)
|
||||
|
||||
clean:
|
||||
podman image prune -f
|
||||
|
||||
scan:
|
||||
podman system service&
|
||||
sleep 5; trivy $(TAG)
|
@ -6,6 +6,10 @@
|
||||
kubectl get pods --all-namespaces -o json | jq '.items[] | select(.status.reason!=null) | select(.status.reason | contains("Evicted")) | "kubectl delete pods \(.metadata.name) -n \(.metadata.namespace)"' | xargs -n 1 bash -c
|
||||
`
|
||||
|
||||
## cleanup stuck namespace
|
||||
`for ns in $(kubectl get ns --field-selector status.phase=Terminating -o jsonpath='{.items[*].metadata.name}'); do kubectl get ns $ns -ojson | jq '.spec.finalizers = []' | kubectl replace --raw "/api/v1/namespaces/$ns/finalize" -f -; done
|
||||
`
|
||||
|
||||
## Cleanup old replicasets
|
||||
`kubectl get rs --all-namespaces | awk {' if ($3 == 0 && $4 == 0) system("kubectl delete rs "$2" --namespace="$1)'}`
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user