diff --git a/charts/kubezero-addons/Chart.yaml b/charts/kubezero-addons/Chart.yaml index f34399c..d6c810e 100644 --- a/charts/kubezero-addons/Chart.yaml +++ b/charts/kubezero-addons/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: kubezero-addons description: KubeZero umbrella chart for various optional cluster addons type: application -version: 0.2.4 +version: 0.3.1 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: diff --git a/charts/kubezero-addons/templates/awsController/deployment.yaml b/charts/kubezero-addons/templates/awsController/deployment.yaml new file mode 100644 index 0000000..5047fc3 --- /dev/null +++ b/charts/kubezero-addons/templates/awsController/deployment.yaml @@ -0,0 +1,30 @@ +{{- if .Values.awsController.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kubezero-aws-controller + namespace: kube-system + labels: + app: kubezero-aws-controller +spec: + replicas: 1 + selector: + matchLabels: + app: kubezero-aws-controller + template: + metadata: + labels: + app: kubezero-aws-controller + spec: + containers: + - name: kubezero-aws-controller + image: "{{ .Values.awsController.image.name }}:{{ .Values.awsController.image.tag }}" + imagePullPolicy: Always + serviceAccountName: kubezero-aws-controller + hostNetwork: true + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule +{{- end }} diff --git a/charts/kubezero-addons/templates/awsController/rbac.yaml b/charts/kubezero-addons/templates/awsController/rbac.yaml new file mode 100644 index 0000000..b2ac261 --- /dev/null +++ b/charts/kubezero-addons/templates/awsController/rbac.yaml @@ -0,0 +1,31 @@ +{{- if .Values.awsController.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kubezero-aws-controller + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubezero-aws-controller +rules: +- apiGroups: [""] + resources: ["pods", "nodes"] + verbs: ["*"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubezero-aws-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubezero-aws-controller +subjects: + - kind: ServiceAccount + name: kubezero-aws-controller + namespace: kube-system +{{- end }} diff --git a/charts/kubezero-addons/templates/device-plugins/fuse-device-plugin.yaml b/charts/kubezero-addons/templates/device-plugins/fuse-device-plugin.yaml index 8deebd1..053c283 100644 --- a/charts/kubezero-addons/templates/device-plugins/fuse-device-plugin.yaml +++ b/charts/kubezero-addons/templates/device-plugins/fuse-device-plugin.yaml @@ -15,7 +15,7 @@ spec: spec: hostNetwork: true containers: - - image: public.ecr.aws/zero-downtime/fuse-device-plugin:v1.0-r1 + - image: public.ecr.aws/zero-downtime/fuse-device-plugin:v1.1.0 # imagePullPolicy: Always name: fuse-device-plugin securityContext: diff --git a/charts/kubezero-addons/values.yaml b/charts/kubezero-addons/values.yaml index d89943f..a65731d 100644 --- a/charts/kubezero-addons/values.yaml +++ b/charts/kubezero-addons/values.yaml @@ -9,6 +9,13 @@ clusterBackup: password: "" extraEnv: [] +tenjin: + enabled: false + + image: + name: public.ecr.aws/zero-downtime/kubezero-tenjin + tag: v0.1.0 + aws-node-termination-handler: enabled: false diff --git a/charts/kubezero-argocd/Chart.yaml b/charts/kubezero-argocd/Chart.yaml index f44732b..2387ef5 100644 --- a/charts/kubezero-argocd/Chart.yaml +++ b/charts/kubezero-argocd/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 description: KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application name: kubezero-argocd -version: 0.9.2 +version: 0.9.3 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: diff --git a/charts/kubezero-argocd/templates/istio-service.yaml b/charts/kubezero-argocd/templates/istio-service.yaml index 0ef393c..156befe 100644 --- a/charts/kubezero-argocd/templates/istio-service.yaml +++ b/charts/kubezero-argocd/templates/istio-service.yaml @@ -5,7 +5,7 @@ metadata: name: argocd-server namespace: {{ $.Release.Namespace }} labels: -{{ include "kubezero-lib.labels" . | indent 4 }} + {{- include "kubezero-lib.labels" . | nindent 4 }} spec: gateways: - {{ .Values.istio.gateway }} diff --git a/charts/kubezero-argocd/values.yaml b/charts/kubezero-argocd/values.yaml index 081d4b0..34b6a8a 100644 --- a/charts/kubezero-argocd/values.yaml +++ b/charts/kubezero-argocd/values.yaml @@ -32,9 +32,7 @@ argo-cd: gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 - [git.zero-downtime.net]:22000 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6C8EjqbFGTU/HJhwed7CSVd5o4cusQk++GnQb4gZ9RGySsm/Gfi2VtcphMQcwzQYfjwlQAcMFikQYTD0XjEqmxQ17Q0+XRdLr6svWMXLuvqkWIvke+b1AAnJhNj9dZl7jSZcLgZedUEAMIMEd7dQC+ggrBF0dzMxf+PmMwtPtpkeZ53+JyHoswtolV4qrGJy7xgQZjyABtwe0Cy/J8gVonvzPYhyl2Eh5z4KXY7MumtBSCv79G4PHNaBI7d59GmnNQCcZaxHykHEDPPr+ymZlhSoe+OBkbr5m7zCRMHWuQuz4mR/4wd1kvVReRTLr8ZJQc0cyNHHVJGj05QkAeBBv - [git.zero-downtime.net]:22000 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBytYYdGzh0Y9iwx4hxgjiEwbxLxBdIaMbfpkB6zqiDqmMtVDprjZfRlDTew25+p+Hki7/xJPQRYqB6RR0fuTwo= - [git.zero-downtime.net]:22000 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHv6s7N5aiiP9yt2y5EpGZxENxUtUdbWyWJsAgjHBCZZ + git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ== controller: args: diff --git a/charts/kubezero-ci/README.md b/charts/kubezero-ci/README.md index 52e84c8..258a727 100644 --- a/charts/kubezero-ci/README.md +++ b/charts/kubezero-ci/README.md @@ -25,6 +25,10 @@ Kubernetes: `>= 1.20.0` | https://gocd.github.io/helm-chart | gocd | 1.39.4 | # Jenkins +- default build retention 10 builds, 32days +- memory request 1.25GB +- dark theme +- trivy scanner incl. HTML reporting and publisher # goCD diff --git a/charts/kubezero-ci/README.md.gotmpl b/charts/kubezero-ci/README.md.gotmpl index 97513a6..6940f8f 100644 --- a/charts/kubezero-ci/README.md.gotmpl +++ b/charts/kubezero-ci/README.md.gotmpl @@ -14,14 +14,18 @@ {{ template "chart.requirementsSection" . }} # Jenkins - +- default build retention 10 builds, 32days +- memory request 1.25GB +- dark theme +- trivy scanner incl. HTML reporting and publisher + # goCD - + # Gitea ## OpenSSH 8.8 RSA disabled - https://github.com/go-gitea/gitea/issues/17798 -## Resources +## Resources {{ template "chart.valuesSection" . }} diff --git a/charts/kubezero-lib/Chart.yaml b/charts/kubezero-lib/Chart.yaml index 035cd66..8b6df41 100644 --- a/charts/kubezero-lib/Chart.yaml +++ b/charts/kubezero-lib/Chart.yaml @@ -2,11 +2,12 @@ apiVersion: v2 name: kubezero-lib description: KubeZero helm library - common helm functions and blocks type: library -version: 0.1.4 +version: 0.1.5 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: - kubezero maintainers: - - name: Quarky9 -kubeVersion: ">= 1.18.0" + - name: Stefan Reimer + email: stefan@zero-downtime.net +kubeVersion: ">= 1.20.0" diff --git a/charts/kubezero-lib/templates/_helpers.tpl b/charts/kubezero-lib/templates/_helpers.tpl index 76c3dbe..1c706d8 100644 --- a/charts/kubezero-lib/templates/_helpers.tpl +++ b/charts/kubezero-lib/templates/_helpers.tpl @@ -10,6 +10,9 @@ Common naming functions {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} {{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if .subchart }} +{{- $name = default .subchart .Values.nameOverride -}} +{{- end -}} {{- if contains $name .Release.Name -}} {{- .Release.Name | trunc 63 | trimSuffix "-" -}} {{- else -}} diff --git a/charts/kubezero/Chart.yaml b/charts/kubezero/Chart.yaml index 6867fde..d6ccd4b 100644 --- a/charts/kubezero/Chart.yaml +++ b/charts/kubezero/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: kubezero description: KubeZero - Root App of Apps chart type: application -version: 1.21.8-5 +version: 1.21.8-7 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: diff --git a/charts/kubezero/templates/logging.yaml b/charts/kubezero/templates/logging.yaml index b42bbe1..26d26b4 100644 --- a/charts/kubezero/templates/logging.yaml +++ b/charts/kubezero/templates/logging.yaml @@ -1,10 +1,7 @@ {{- define "logging-values" }} -{{- if or .Values.logging.es .Values.logging.kibana }} +{{- with index .Values "logging" "eck-operator" }} eck-operator: - enabled: true - {{- with index .Values "logging" "eck-operator" }} {{- toYaml . | nindent 2 }} - {{- end }} {{- end }} {{- if .Values.logging.elastic_password }} diff --git a/charts/kubezero/values.yaml b/charts/kubezero/values.yaml index d822477..265a605 100644 --- a/charts/kubezero/values.yaml +++ b/charts/kubezero/values.yaml @@ -27,7 +27,7 @@ kiam: storage: enabled: false - targetRevision: 0.4.3 + targetRevision: 0.5.2 aws-ebs-csi-driver: enabled: false aws-efs-csi-driver: diff --git a/containers/admin/v1.21/kubezero.sh b/containers/admin/v1.21/kubezero.sh index 0bf2254..68e3852 100755 --- a/containers/admin/v1.21/kubezero.sh +++ b/containers/admin/v1.21/kubezero.sh @@ -211,27 +211,16 @@ elif [[ "$1" =~ "^(bootstrap|recover|join)$" ]]; then render_kubeadm - if [[ "$1" =~ "^(recover|join)$" ]]; then + if [[ "$1" =~ "^(bootstrap)$" ]]; then + # Create all certs during bootstrap + _kubeadm init phase certs all + else # Recert certificates for THIS node rm -f ${HOSTFS}/etc/kubernetes/pki/etcd/peer.* ${HOSTFS}/etc/kubernetes/pki/etcd/server.* ${HOSTFS}/etc/kubernetes/pki/apiserver.* _kubeadm init phase certs etcd-server _kubeadm init phase certs etcd-peer _kubeadm init phase certs apiserver - - # Restore only etcd for desaster recovery - if [[ "$1" =~ "^(recover)$" ]]; then - etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \ - --name $ETCD_NODENAME \ - --data-dir="${HOSTFS}/var/lib/etcd" \ - --initial-cluster-token etcd-${CLUSTERNAME} \ - --initial-advertise-peer-urls https://${ETCD_NODENAME}:2380 \ - --initial-cluster $ETCD_NODENAME=https://${ETCD_NODENAME}:2380 - fi - - # Create all certs during bootstrap - else - _kubeadm init phase certs all fi pre_kubeadm @@ -329,7 +318,7 @@ elif [ "$1" == 'backup' ]; then restic snapshots || restic init restic backup ${WORKDIR} -H $CLUSTERNAME --tag $VERSION - echo "Backup complete" + echo "Backup complete." # Remove backups from previous versions restic forget --keep-tag $VERSION --prune @@ -355,6 +344,16 @@ elif [ "$1" == 'restore' ]; then # Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config + etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \ + --name $ETCD_NODENAME \ + --data-dir="${HOSTFS}/var/lib/etcd" \ + --initial-cluster-token etcd-${CLUSTERNAME} \ + --initial-advertise-peer-urls https://${ETCD_NODENAME}:2380 \ + --initial-cluster $ETCD_NODENAME=https://${ETCD_NODENAME}:2380 + + echo "Backup restored." + + elif [ "$1" == 'debug_shell' ]; then echo "Entering debug shell" /bin/sh diff --git a/containers/admin/v1.21/kubezero_121.sh b/containers/admin/v1.21/kubezero_121.sh index a70f3b0..737a859 100755 --- a/containers/admin/v1.21/kubezero_121.sh +++ b/containers/admin/v1.21/kubezero_121.sh @@ -3,8 +3,11 @@ set -x # Allow EFS and EBS Argo apps to be deleted without removing things like storageClasses etc. # all to be replaced by kubezero-storage -kubectl patch application aws-ebs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]' -kubectl patch application aws-efs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]' +kubectl patch application aws-ebs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]' +kubectl patch application aws-efs-csi-driver -n argocd --type=json -p='[{"op": "remove", "path": "/metadata/finalizers"}]' + +# Migrate ZeroDownTime helm repo +kubectl patch appproject kubezero -n argocd --type=json -p='[{"op": "replace", "path": "/spec/sourceRepos/0", "value": "https://cdn.zero-downtime.net/charts" }]' # Delete EBS and EFS Deployments and Daemonsets as we cannot change the lables while moving them to storage. # This will NOT affect provisioned volumes diff --git a/containers/admin/v1.21/migrate_argo.sh b/containers/admin/v1.21/migrate_argo.sh index 32738af..ddb1289 100755 --- a/containers/admin/v1.21/migrate_argo.sh +++ b/containers/admin/v1.21/migrate_argo.sh @@ -17,13 +17,13 @@ yq eval -i 'del(.calico) | del(.kiam)' _values.yaml # Move storage into module yq eval -i '.storage.enabled=true' _values.yaml -[ $(yq eval 'has(".aws-ebs-csi-driver")' _values.yaml) == "true" ] && yq eval -i '.storage.aws-ebs-csi-driver=.aws-ebs-csi-driver' _values.yaml -[ $(yq eval 'has(".aws-efs-csi-driver")' _values.yaml) == "true" ] && yq eval -i '.storage.aws-efs-csi-driver=.aws-efs-csi-driver' _values.yaml +[ $(yq eval 'has("aws-ebs-csi-driver")' _values.yaml) == "true" ] && yq eval -i '.storage.aws-ebs-csi-driver=.aws-ebs-csi-driver' _values.yaml +[ $(yq eval 'has("aws-efs-csi-driver")' _values.yaml) == "true" ] && yq eval -i '.storage.aws-efs-csi-driver=.aws-efs-csi-driver' _values.yaml # Finally remove old helm apps yq eval -i 'del(.aws-ebs-csi-driver) | del(.aws-efs-csi-driver)' _values.yaml # merge _values.yaml back -yq eval -i '.spec.source.helm.values |= strload("_values.yaml")' $YAML +yq eval -Pi '.spec.source.helm.values |= strload("_values.yaml")' $YAML rm -f _values.yaml diff --git a/containers/awsController/Dockerfile b/containers/awsController/Dockerfile new file mode 100644 index 0000000..7d9f596 --- /dev/null +++ b/containers/awsController/Dockerfile @@ -0,0 +1,9 @@ +ARG SHELL_OPERATOR_VERSION + +FROM flant/shell-operator:v${SHELL_OPERATOR_VERSION} + +RUN apk upgrade -U -a && \ + apk --no-cache add \ + aws-cli + +ADD hooks /hooks diff --git a/containers/awsController/Makefile b/containers/awsController/Makefile new file mode 100644 index 0000000..e9eeba6 --- /dev/null +++ b/containers/awsController/Makefile @@ -0,0 +1,23 @@ +SHELL_OPERATOR_VERSION ?= 1.0.6 +REGISTRY := public.ecr.aws/zero-downtime +REPOSITORY := kubezero-controller +TAG := $(REPOSITORY):v$(SHELL_OPERATOR_VERSION)-aws + +.PHONY: build push clean scan + +all: build push + +build: + podman build --rm --squash-all --build-arg SHELL_OPERATOR_VERSION=$(SHELL_OPERATOR_VERSION) -t $(TAG) . + +push: + aws ecr-public get-login-password --region us-east-1 | podman login --username AWS --password-stdin $(REGISTRY) + podman tag $(TAG) $(REGISTRY)/$(TAG) + podman push $(REGISTRY)/$(TAG) + +clean: + podman image prune -f + +scan: + podman system service& + sleep 5; trivy $(TAG) diff --git a/docs/notes.md b/docs/notes.md index 1a181de..d777880 100644 --- a/docs/notes.md +++ b/docs/notes.md @@ -6,6 +6,10 @@ kubectl get pods --all-namespaces -o json | jq '.items[] | select(.status.reason!=null) | select(.status.reason | contains("Evicted")) | "kubectl delete pods \(.metadata.name) -n \(.metadata.namespace)"' | xargs -n 1 bash -c ` +## cleanup stuck namespace +`for ns in $(kubectl get ns --field-selector status.phase=Terminating -o jsonpath='{.items[*].metadata.name}'); do kubectl get ns $ns -ojson | jq '.spec.finalizers = []' | kubectl replace --raw "/api/v1/namespaces/$ns/finalize" -f -; done +` + ## Cleanup old replicasets `kubectl get rs --all-namespaces | awk {' if ($3 == 0 && $4 == 0) system("kubectl delete rs "$2" --namespace="$1)'}`