First draft of v1.25.8

This commit is contained in:
Stefan Reimer 2023-04-12 11:14:31 +00:00
parent e64388d582
commit 70d03338e4
63 changed files with 9910 additions and 227 deletions

View File

@ -1,9 +1,9 @@
ARG ALPINE_VERSION=3.16
ARG ALPINE_VERSION=3.17
FROM alpine:${ALPINE_VERSION}
ARG ALPINE_VERSION
ARG KUBE_VERSION=1.24
ARG KUBE_VERSION=1.25
RUN cd /etc/apk/keys && \
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
@ -18,19 +18,19 @@ RUN cd /etc/apk/keys && \
bash \
python3 \
py3-yaml \
restic \
helm \
cri-tools@kubezero \
kubeadm@kubezero~=${KUBE_VERSION} \
kubectl@kubezero~=${KUBE_VERSION} \
etcdhelper@kubezero \
etcd-ctl@edge-testing \
restic@edge-community \
helm@edge-community
etcd-ctl@edge-testing
RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \
mkdir -p /var/lib/kubezero
ADD admin/kubezero.sh admin/libhelm.sh admin/migrate_argo_values.py /usr/bin
ADD admin/libhelm.sh admin/v${KUBE_VERSION}/* /var/lib/kubezero
ADD admin/libhelm.sh admin/pre-upgrade.sh /var/lib/kubezero
ADD charts/kubeadm /charts/kubeadm
ADD charts/kubezero /charts/kubezero

View File

@ -8,26 +8,17 @@ import yaml
def migrate(values):
"""Actual changes here"""
# ClusterBackup is enabled on AWS anyways, same with cluster-autoscaler
if "aws" in values["global"]:
deleteKey(values["addons"], "clusterBackup")
deleteKey(values["addons"], "cluster-autoscaler")
# Move additional prometheus labels to better config tree
try:
labels = {}
for c in values["metrics"]['kube-prometheus-stack']["prometheus"]["prometheusSpec"]["additionalAlertRelabelConfigs"]:
labels[c["target_label"]] = c["replacement"]
# Remove calico and multus
deleteKey(values["network"], "calico")
deleteKey(values["network"], "multus")
# ArgoCD helm changes
if "argocd" in values:
if "server" in values["argocd"]:
if not "configs" in values["argocd"]:
values["argocd"]["configs"] = {}
if not "cm" in values["argocd"]["configs"]:
values["argocd"]["configs"]["cm"] = {}
values["argocd"]["configs"]["cm"]["url"] = values["argocd"]["server"]["config"][
"url"
]
deleteKey(values["argocd"], "server")
values["metrics"]['kube-prometheus-stack']["prometheus"]["prometheusSpec"]["externalLabels"] = labels
deleteKey(values["metrics"]['kube-prometheus-stack']["prometheus"]["prometheusSpec"], "additionalAlertRelabelConfigs")
except KeyError:
pass
return values

View File

@ -1,7 +1,7 @@
#!/bin/bash -e
#VERSION="latest"
VERSION="v1.24"
VERSION="v1.25"
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
@ -148,38 +148,27 @@ argo_used && disable_argo
#all_nodes_upgrade ""
# Cleanup
# Remove calico CRDs
kubectl delete -f https://git.zero-downtime.net/ZeroDownTime/kubezero/raw/tag/v1.23.11/charts/kubezero-network/charts/calico/crds/crds.yaml || true
# delete old kubelet configs
for cm in $(kubectl get cm -n kube-system --no-headers | awk '{if ($1 ~ "kubelet-config-1*") print $1}'); do kubectl delete cm $cm -n kube-system; done
for rb in $(kubectl get rolebindings -n kube-system --no-headers | awk '{if ($1 ~ "kubelet-config-1*") print $1}'); do kubectl delete rolebindings $rb -n kube-system; done
control_plane_upgrade kubeadm_upgrade
echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015
argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
# Remove calico
#kubectl delete deployment calico-kube-controllers -n kube-system || true
#kubectl delete daemonset calico-node -n kube-system || true
#kubectl delete network-attachment-definitions calico -n kube-system || true
# Remove previous cilium config as the helm options are additive only -> fail
kubectl delete configmap cilium-config -n kube-system || true
control_plane_upgrade "apply_network, apply_addons, apply_storage"
kubectl rollout restart daemonset/kube-multus-ds -n kube-system
kubectl rollout restart daemonset/cilium -n kube-system
echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning
echo "Applying remaining KubeZero modules..."
# delete argocd deployments as various immutable things changed, also redis restart fails otherwise
kubectl delete deployment argocd-redis -n argocd || true
kubectl delete deployment argocd-repo-server -n argocd || true
kubectl delete statefulset argocd-application-controller -n argocd || true
# Delete prometheus-push gateway due to label changes
kubectl delete deploy -l app=prometheus-pushgateway -n monitoring || true
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_argocd"
# Final step is to commit the new argocd kubezero app

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubeadm
description: KubeZero Kubeadm cluster config
type: application
version: 1.24.9
version: 1.25.8
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -11,4 +11,4 @@ keywords:
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
kubeVersion: ">= 1.24.0"
kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubeadm
![Version: 1.24.9](https://img.shields.io/badge/Version-1.24.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 1.25.8](https://img.shields.io/badge/Version-1.25.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Kubeadm cluster config
@ -14,7 +14,7 @@ KubeZero Kubeadm cluster config
## Requirements
Kubernetes: `>= 1.24.0`
Kubernetes: `>= 1.25.0`
## Values

View File

@ -1,6 +1,6 @@
{{- /* Feature gates for all control plane components */ -}}
{{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "CronJobTimeZone" "NodeOutOfServiceVolumeDetach" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "NodeOutOfServiceVolumeDetach" }}
{{- if eq .return "csv" }}
{{- range $key := $gates }}
{{- $key }}=true,

View File

@ -12,13 +12,3 @@ spec:
memory: 128Mi
nodeSelector:
node-role.kubernetes.io/control-plane: ""
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: "kubernetes.io/hostname"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons
type: application
version: 0.7.3
version: 0.7.4
appVersion: v1.24
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -24,7 +24,7 @@ dependencies:
repository: https://kubernetes-sigs.github.io/external-dns/
condition: external-dns.enabled
- name: cluster-autoscaler
version: 9.21.0
version: 9.24.0
repository: https://kubernetes.github.io/autoscaler
condition: cluster-autoscaler.enabled
- name: nvidia-device-plugin
@ -33,7 +33,7 @@ dependencies:
repository: https://nvidia.github.io/k8s-device-plugin
condition: nvidia-device-plugin.enabled
- name: sealed-secrets
version: 2.7.1
version: 2.7.3
repository: https://bitnami-labs.github.io/sealed-secrets
condition: sealed-secrets.enabled
- name: aws-node-termination-handler

View File

@ -1,6 +1,6 @@
# kubezero-addons
![Version: 0.7.3](https://img.shields.io/badge/Version-0.7.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.24](https://img.shields.io/badge/AppVersion-v1.24-informational?style=flat-square)
![Version: 0.7.4](https://img.shields.io/badge/Version-0.7.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.24](https://img.shields.io/badge/AppVersion-v1.24-informational?style=flat-square)
KubeZero umbrella chart for various optional cluster addons
@ -20,9 +20,9 @@ Kubernetes: `>= 1.24.0`
|------------|------|---------|
| | aws-eks-asg-rolling-update-handler | 1.2.7 |
| | aws-node-termination-handler | 0.20.1 |
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.7.1 |
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.7.3 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.11.0 |
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.21.0 |
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.24.0 |
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.13.0 |
# MetalLB
@ -105,6 +105,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| cluster-autoscaler.enabled | bool | `false` | |
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
| cluster-autoscaler.image.tag | string | `"v1.24.0"` | |
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cluster-autoscaler.podDisruptionBudget | bool | `false` | |
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | |
@ -139,6 +140,10 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node.kubernetes.io/instance-type"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"g5.xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[10] | string | `"g4dn.4xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[11] | string | `"g4dn.8xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[12] | string | `"g4dn.12xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[13] | string | `"g4dn.16xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[1] | string | `"g5.2xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[2] | string | `"g5.4xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[3] | string | `"g5.8xlarge"` | |
@ -146,6 +151,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[5] | string | `"g5.16xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[6] | string | `"g5.24xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[7] | string | `"g5.48xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[8] | string | `"g4dn.xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[9] | string | `"g4dn.2xlarge"` | |
| nvidia-device-plugin.enabled | bool | `false` | |
| nvidia-device-plugin.tolerations[0].effect | string | `"NoSchedule"` | |
| nvidia-device-plugin.tolerations[0].key | string | `"nvidia.com/gpu"` | |
@ -155,7 +162,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| nvidia-device-plugin.tolerations[1].operator | string | `"Exists"` | |
| sealed-secrets.enabled | bool | `false` | |
| sealed-secrets.fullnameOverride | string | `"sealed-secrets-controller"` | |
| sealed-secrets.keyrenewperiod | int | `0` | |
| sealed-secrets.keyrenewperiod | string | `"0"` | |
| sealed-secrets.metrics.serviceMonitor.enabled | bool | `false` | |
| sealed-secrets.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| sealed-secrets.resources.limits.memory | string | `"128Mi"` | |

View File

@ -31,7 +31,7 @@ sealed-secrets:
fullnameOverride: sealed-secrets-controller
# Disable auto keyrotation for now
keyrenewperiod: 0
keyrenewperiod: "0"
resources:
requests:
@ -179,10 +179,18 @@ nvidia-device-plugin:
- g5.24xlarge
- g5.48xlarge
- g4dn.xlarge
- g4dn.2xlarge
- g4dn.4xlarge
- g4dn.8xlarge
- g4dn.12xlarge
- g4dn.16xlarge
cluster-autoscaler:
enabled: false
image:
tag: v1.24.0
autoDiscovery:
clusterName: ""
awsRegion: "us-west-2"

View File

@ -68,16 +68,16 @@ argo-cd:
createSecret: false
knownHosts:
data:
ssh_known_hosts: |
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==
data:
ssh_known_hosts: |
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==
params:
controller.status.processors: "10"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-ci
description: KubeZero umbrella chart for all things CI
type: application
version: 0.5.25
version: 0.6.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -22,16 +22,15 @@ dependencies:
repository: https://gocd.github.io/helm-chart
condition: gocd.enabled
- name: gitea
version: 6.0.5
version: 7.0.4
repository: https://dl.gitea.io/charts/
condition: gitea.enabled
- name: jenkins
version: 4.2.20
version: 4.3.2
repository: https://charts.jenkins.io
condition: jenkins.enabled
- name: trivy
version: 0.4.17
version: 0.6.0
repository: https://aquasecurity.github.io/helm-charts/
condition: trivy.enabled
kubeVersion: ">= 1.20.0"
kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubezero-ci
![Version: 0.5.24](https://img.shields.io/badge/Version-0.5.24-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI
@ -14,14 +14,14 @@ KubeZero umbrella chart for all things CI
## Requirements
Kubernetes: `>= 1.20.0`
Kubernetes: `>= 1.24.0`
| Repository | Name | Version |
|------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.17 |
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.6.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.jenkins.io | jenkins | 4.2.17 |
| https://dl.gitea.io/charts/ | gitea | 6.0.5 |
| https://charts.jenkins.io | jenkins | 4.3.2 |
| https://dl.gitea.io/charts/ | gitea | 7.0.4 |
| https://gocd.github.io/helm-chart | gocd | 1.40.8 |
# Jenkins
@ -82,6 +82,10 @@ Kubernetes: `>= 1.20.0`
| jenkins.agent.image | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
| jenkins.agent.podName | string | `"podman-aws"` | |
| jenkins.agent.podRetention | string | `"Default"` | |
| jenkins.agent.resources.limits.cpu | string | `""` | |
| jenkins.agent.resources.limits.memory | string | `""` | |
| jenkins.agent.resources.requests.cpu | string | `""` | |
| jenkins.agent.resources.requests.memory | string | `""` | |
| jenkins.agent.showRawYaml | bool | `false` | |
| jenkins.agent.tag | string | `"v0.4.1"` | |
| jenkins.agent.yamlMergeStrategy | string | `"merge"` | |
@ -92,18 +96,18 @@ Kubernetes: `>= 1.20.0`
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3743.v1fa_4c724c3b_7"` | |
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3883.v4d70a_a_a_df034"` | |
| jenkins.controller.installPlugins[10] | string | `"build-discarder:139.v05696a_7fe240"` | |
| jenkins.controller.installPlugins[11] | string | `"dark-theme:262.v0202a_4c8fb_6a"` | |
| jenkins.controller.installPlugins[12] | string | `"kubernetes-credentials-provider:1.206.v7ce2cf7b_0c8b"` | |
| jenkins.controller.installPlugins[11] | string | `"dark-theme:302.vf069cb_e01486"` | |
| jenkins.controller.installPlugins[12] | string | `"kubernetes-credentials-provider:1.209.v862c6e5fb_1ef"` | |
| jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | |
| jenkins.controller.installPlugins[2] | string | `"git:4.14.3"` | |
| jenkins.controller.installPlugins[2] | string | `"git:5.0.0"` | |
| jenkins.controller.installPlugins[3] | string | `"basic-branch-build-strategies:71.vc1421f89888e"` | |
| jenkins.controller.installPlugins[4] | string | `"pipeline-graph-view:144.vf3924feb_7e35"` | |
| jenkins.controller.installPlugins[5] | string | `"pipeline-stage-view:2.28"` | |
| jenkins.controller.installPlugins[4] | string | `"pipeline-graph-view:154.v52138b_cb_557e"` | |
| jenkins.controller.installPlugins[5] | string | `"pipeline-stage-view:2.31"` | |
| jenkins.controller.installPlugins[6] | string | `"configuration-as-code:1569.vb_72405b_80249"` | |
| jenkins.controller.installPlugins[7] | string | `"antisamy-markup-formatter:155.v795fb_8702324"` | |
| jenkins.controller.installPlugins[8] | string | `"prometheus:2.0.11"` | |
| jenkins.controller.installPlugins[8] | string | `"prometheus:2.1.1"` | |
| jenkins.controller.installPlugins[9] | string | `"htmlpublisher:1.31"` | |
| jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | |
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | |

View File

@ -121,19 +121,19 @@ jenkins:
numToKeepStr: "10"
installPlugins:
- kubernetes:3802.vb_b_600831fcb_3
- kubernetes:3883.v4d70a_a_a_df034
- workflow-aggregator:581.v0c46fa_697ffd
- git:5.0.0
- basic-branch-build-strategies:71.vc1421f89888e
- pipeline-graph-view:144.vf3924feb_7e35
- pipeline-stage-view:2.28
- pipeline-graph-view:154.v52138b_cb_557e
- pipeline-stage-view:2.31
- configuration-as-code:1569.vb_72405b_80249
- antisamy-markup-formatter:155.v795fb_8702324
- prometheus:2.1.0
- prometheus:2.1.1
- htmlpublisher:1.31
- build-discarder:139.v05696a_7fe240
- dark-theme:262.v0202a_4c8fb_6a
- kubernetes-credentials-provider:1.208.v128ee9800c04
- dark-theme:302.vf069cb_e01486
- kubernetes-credentials-provider:1.209.v862c6e5fb_1ef
serviceAccountAgent:
create: true
@ -231,7 +231,7 @@ jenkins:
trivy:
enabled: false
image:
tag: 0.35.0
tag: 0.37.3
persistence:
enabled: true
size: 1Gi

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio-gateway
description: KubeZero Umbrella Chart for Istio gateways
type: application
version: 0.9.0
version: 0.9.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -14,9 +14,9 @@ maintainers:
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: ">= 0.1.5"
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: gateway
version: 1.16.1
version: 1.16.3
repository: https://istio-release.storage.googleapis.com/charts
kubeVersion: ">= 1.24.0"

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 1.16.1
appVersion: 1.16.3
description: Helm chart for deploying Istio gateways
icon: https://istio.io/latest/favicons/android-192x192.png
keywords:
@ -9,4 +9,4 @@ name: gateway
sources:
- http://github.com/istio/istio
type: application
version: 1.16.1
version: 1.16.3

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio
description: KubeZero Umbrella Chart for Istio
type: application
version: 0.9.0
version: 0.9.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -16,10 +16,10 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: base
version: 1.16.1
version: 1.16.3
repository: https://istio-release.storage.googleapis.com/charts
- name: istiod
version: 1.16.1
version: 1.16.3
repository: https://istio-release.storage.googleapis.com/charts
- name: kiali-server
version: "1.60.0"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack
type: application
version: 0.8.4
version: 0.8.5
appVersion: 1.6.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -27,6 +27,6 @@ dependencies:
version: 0.3.9
condition: fluentd.enabled
- name: fluent-bit
version: 0.20.6
version: 0.24.0
condition: fluent-bit.enabled
kubeVersion: ">= 1.24.0"

View File

@ -1,13 +1,13 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: "Additional upstream config option added"
description: "Updated Fluent Bit image to v2.0.9."
apiVersion: v1
appVersion: 1.9.7
appVersion: 2.0.9
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems.
home: https://fluentbit.io/
icon: https://fluentbit.io/assets/img/logo1-default.png
icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/fluentd/fluentbit/icon/fluentbit-icon-color.svg
keywords:
- logging
- fluent-bit
@ -24,4 +24,4 @@ maintainers:
name: fluent-bit
sources:
- https://github.com/fluent/fluent-bit/
version: 0.20.6
version: 0.24.0

View File

@ -31,7 +31,7 @@ First, you should add your Lua scripts to `luaScripts` in values.yaml, for examp
```yaml
luaScripts:
filter_example.lua: |
filter_example.lua: |
function filter_name(tag, timestamp, record)
-- put your lua code here.
end

View File

@ -11,6 +11,9 @@ priorityClassName: {{ .Values.priorityClassName }}
securityContext:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ . }}
{{- end }}
hostNetwork: {{ .Values.hostNetwork }}
dnsPolicy: {{ .Values.dnsPolicy }}
{{- with .Values.dnsConfig }}

View File

@ -19,7 +19,7 @@ rules:
- get
- list
- watch
{{- if .Values.podSecurityPolicy.create }}
{{- if and .Values.podSecurityPolicy.create (semverCompare "<=1.25-0" .Capabilities.KubeVersion.GitVersion) }}
- apiGroups:
- policy
resources:

View File

@ -20,6 +20,9 @@ spec:
updateStrategy:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.minReadySeconds }}
minReadySeconds: {{ . }}
{{- end }}
template:
metadata:
annotations:

View File

@ -23,6 +23,9 @@ spec:
selector:
matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.minReadySeconds }}
minReadySeconds: {{ . }}
{{- end }}
template:
metadata:
annotations:

View File

@ -1,4 +1,4 @@
{{- if .Values.podSecurityPolicy.create }}
{{- if and .Values.podSecurityPolicy.create (semverCompare "<=1.25-0" .Capabilities.KubeVersion.GitVersion) -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:

View File

@ -16,6 +16,15 @@ spec:
{{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if (eq .Values.service.type "LoadBalancer")}}
{{- with .Values.service.loadBalancerClass}}
loadBalancerClass: {{ . }}
{{- end }}
{{- with .Values.service.loadBalancerSourceRanges}}
loadBalancerSourceRanges:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
ports:
- port: {{ .Values.service.port }}
targetPort: http

View File

@ -36,6 +36,16 @@ spec:
relabelings:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.serviceMonitor.scheme }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- end }}
{{- if .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml .Values.serviceMonitor.tlsConfig | nindent 8 }}
{{- end }}
{{- with .Values.serviceMonitor.additionalEndpoints }}
{{- toYaml . | nindent 4 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}

View File

@ -0,0 +1,38 @@
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.autoscaling.vpa.enabled }}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.autoscaling.vpa.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
resourcePolicy:
containerPolicies:
- containerName: {{ .Chart.Name }}
{{- with .Values.autoscaling.vpa.controlledResources }}
controlledResources:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.autoscaling.vpa.maxAllowed }}
maxAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.autoscaling.vpa.minAllowed }}
minAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
targetRef:
apiVersion: apps/v1
kind: {{ .Values.kind }}
name: {{ include "fluent-bit.fullname" . }}
{{- if .Values.autoscaling.vpa.updatePolicy }}
updatePolicy:
{{- with .Values.autoscaling.vpa.updatePolicy.updateMode }}
updateMode: {{ . }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -13,7 +13,7 @@ image:
pullPolicy: Always
testFramework:
enabled: false
enabled: true
image:
repository: busybox
pullPolicy: Always
@ -32,6 +32,11 @@ rbac:
create: true
nodeAccess: false
# Configure podsecuritypolicy
# Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
# from Kubernetes 1.25, PSP is deprecated
# See: https://kubernetes.io/blog/2022/08/23/kubernetes-v1-25-release/#pod-security-changes
# We automatically disable PSP if Kubernetes version is 1.25 or higher
podSecurityPolicy:
create: false
annotations: {}
@ -78,6 +83,8 @@ securityContext: {}
service:
type: ClusterIP
port: 2020
loadBalancerClass:
loadBalancerSourceRanges: []
labels: {}
# nodePort: 30020
# clusterIP: 172.16.10.1
@ -111,6 +118,35 @@ serviceMonitor:
# targetLabel: nodename
# replacement: $1
# action: replace
# scheme: ""
# tlsConfig: {}
## Beare in mind if youn want to collec metrics from a different port
## you will need to configure the new ports on the extraPorts property.
additionalEndpoints: []
# - port: metrics
# path: /metrics
# interval: 10s
# scrapeTimeout: 10s
# scheme: ""
# tlsConfig: {}
# # metric relabel configs to apply to samples before ingestion.
# #
# metricRelabelings:
# - sourceLabels: [__meta_kubernetes_service_label_cluster]
# targetLabel: cluster
# regex: (.*)
# replacement: ${1}
# action: replace
# # relabel configs to apply to samples after ingestion.
# #
# relabelings:
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
prometheusRule:
enabled: false
@ -177,6 +213,28 @@ ingress:
## only available if kind is Deployment
autoscaling:
vpa:
enabled: false
annotations: {}
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
controlledResources: []
# Define the max allowed resources for the pod
maxAllowed: {}
# cpu: 200m
# memory: 100Mi
# Define the min allowed resources for the pod
minAllowed: {}
# cpu: 200m
# memory: 100Mi
updatePolicy:
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
updateMode: Auto
enabled: false
minReplicas: 1
maxReplicas: 3
@ -222,6 +280,14 @@ podAnnotations: {}
podLabels: {}
## How long (in seconds) a pods needs to be stable before progressing the deployment
##
minReadySeconds:
## How long (in seconds) a pod may take to exit (useful with lifecycle hooks to ensure lb deregistration is done)
##
terminationGracePeriodSeconds:
priorityClassName: ""
env: []
@ -274,7 +340,7 @@ networkPolicy:
luaScripts: {}
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file
config:
service: |
[SERVICE]
@ -332,15 +398,8 @@ config:
Retry_Limit False
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/upstream-servers
## This configuration is deprecated, please use `extraFiles` instead.
upstream: {}
# upstream.conf: |
# [UPSTREAM]
# upstream1
#
# [NODE]
# name node-1
# host 127.0.0.1
# port 43000
## https://docs.fluentbit.io/manual/pipeline/parsers
customParsers: |
@ -354,6 +413,14 @@ config:
# This allows adding more files with arbitary filenames to /fluent-bit/etc by providing key/value pairs.
# The key becomes the filename, the value becomes the file content.
extraFiles: {}
# upstream.conf: |
# [UPSTREAM]
# upstream1
#
# [NODE]
# name node-1
# host 127.0.0.1
# port 43000
# example.conf: |
# [OUTPUT]
# Name example

View File

@ -1,24 +0,0 @@
diff -tubr charts/fluent-bit/templates/tests/test-connection.yaml charts/fluent-bit.zdt/templates/tests/test-connection.yaml
--- charts/fluent-bit/templates/tests/test-connection.yaml 2021-06-22 12:25:10.000000000 +0200
+++ charts/fluent-bit.zdt/templates/tests/test-connection.yaml 2021-05-17 12:09:02.724057438 +0200
@@ -1,3 +1,4 @@
+{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: Pod
metadata:
@@ -18,3 +19,4 @@
{{- toYaml . | nindent 4 }}
{{- end }}
restartPolicy: Never
+{{- end }}
diff -tubr charts/fluent-bit/values.yaml charts/fluent-bit.zdt/values.yaml
--- charts/fluent-bit/values.yaml 2021-06-22 12:25:10.000000000 +0200
+++ charts/fluent-bit.zdt/values.yaml 2021-07-19 10:23:01.383398153 +0200
@@ -12,6 +12,7 @@
# tag:
testFramework:
+ enabled: false
image:
repository: busybox
pullPolicy: Always

View File

@ -19,7 +19,7 @@ yq eval -Mi 'del(.dependencies)' charts/eck-operator/Chart.yaml
rm -rf charts/fluent-bit
curl -L -s -o - https://github.com/fluent/helm-charts/releases/download/fluent-bit-${FLUENT_BIT_VERSION}/fluent-bit-${FLUENT_BIT_VERSION}.tgz | tar xfz - -C charts
patch -i fluent-bit.patch -p0 --no-backup-if-mismatch
# patch -i fluent-bit.patch -p0 --no-backup-if-mismatch
# FluentD

View File

@ -246,7 +246,10 @@ fluent-bit:
image:
#repository: public.ecr.aws/zero-downtime/fluent-bit
tag: 1.9.8
tag: 2.0.10
testFramework:
enabled: false
serviceMonitor:
enabled: false
@ -276,8 +279,8 @@ fluent-bit:
tls: false
input:
memBufLimit: 4MB
refreshInterval: 10
memBufLimit: 16MB
refreshInterval: 5
logLevel: info
flushInterval: 5
@ -300,16 +303,19 @@ fluent-bit:
[INPUT]
Name tail
Path /var/log/containers/*.log
# Exclude ourselves to current error spam, https://github.com/fluent/fluent-bit/issues/5769
Exclude_Path *logging-fluent-bit*
multiline.parser cri
Tag cri.*
Skip_Long_Lines On
Skip_Empty_Lines On
DB /var/log/flb_kube.db
DB.Sync Normal
DB.locking true
# Buffer_Max_Size 1M
{{- with .Values.config.input }}
Mem_Buf_Limit {{ default "4MB" .memBufLimit }}
Refresh_Interval {{ default 10 .refreshInterval }}
Mem_Buf_Limit {{ default "16MB" .memBufLimit }}
Refresh_Interval {{ default 5 .refreshInterval }}
{{- end }}
filters: |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-mq
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
type: application
version: 0.3.4
version: 0.3.5
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -1,6 +1,6 @@
# kubezero-mq
![Version: 0.3.3](https://img.shields.io/badge/Version-0.3.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.3.5](https://img.shields.io/badge/Version-0.3.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
@ -20,7 +20,7 @@ Kubernetes: `>= 1.20.0`
|------------|------|---------|
| | nats | 0.8.4 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.bitnami.com/bitnami | rabbitmq | 11.1.5 |
| https://charts.bitnami.com/bitnami | rabbitmq | 11.3.2 |
| https://charts.bitnami.com/bitnami | rabbitmq-cluster-operator | 3.1.4 |
## Values
@ -52,8 +52,10 @@ Kubernetes: `>= 1.20.0`
| rabbitmq.clustering.forceBoot | bool | `false` | |
| rabbitmq.enabled | bool | `false` | |
| rabbitmq.hosts | list | `[]` | hostnames of rabbitmq services, used for Istio and TLS |
| rabbitmq.istio.amqp | bool | `false` | |
| rabbitmq.istio.enabled | bool | `false` | |
| rabbitmq.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| rabbitmq.istio.mqtt | bool | `false` | |
| rabbitmq.istio.mqtts | bool | `false` | |
| rabbitmq.metrics.enabled | bool | `false` | |
| rabbitmq.metrics.serviceMonitor.enabled | bool | `true` | |

View File

@ -25,6 +25,15 @@ spec:
host: rabbitmq
port:
number: 5672
{{- if .Values.rabbitmq.istio.amqp }}
- match:
- port: 5672
route:
- destination:
host: rabbitmq
port:
number: 5672
{{- end }}
{{- if .Values.rabbitmq.istio.mqtts }}
- match:
- port: 8883

View File

@ -56,6 +56,7 @@ rabbitmq:
istio:
enabled: false
gateway: istio-ingress/private-ingressgateway
amqp: false
mqtt: false
mqtts: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-network
description: KubeZero umbrella chart for all things network
type: application
version: 0.4.2
version: 0.4.3
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -16,14 +16,14 @@ maintainers:
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: ">= 0.1.5"
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: cilium
version: 1.12.5
version: 1.13.1
repository: https://helm.cilium.io/
condition: cilium.enabled
- name: metallb
version: 0.13.7
version: 0.13.9
repository: https://metallb.github.io/metallb
condition: metallb.enabled
kubeVersion: ">= 1.24.0"
kubeVersion: ">= 1.25.0"

View File

@ -18,7 +18,7 @@ metallb:
multus:
enabled: false
tag: "v3.9.2"
tag: "v3.9.3"
clusterNetwork: "cilium"
defaultNetworks: []

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-storage
description: KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini
type: application
version: 0.7.5
version: 0.8.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -20,19 +20,23 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: lvm-localpv
version: 1.0.0
version: 1.0.1
condition: lvm-localpv.enabled
# we patch: repository: https://openebs.github.io/lvm-localpv
- name: gemini
version: 1.0.0
condition: gemini.enabled
# repository: https://charts.fairwinds.com/stable
- name: aws-ebs-csi-driver
version: 2.14.1
version: 2.17.2
condition: aws-ebs-csi-driver.enabled
repository: https://kubernetes-sigs.github.io/aws-ebs-csi-driver
- name: aws-efs-csi-driver
version: 2.3.2
version: 2.4.1
condition: aws-efs-csi-driver.enabled
# repository: https://kubernetes-sigs.github.io/aws-ebs-csi-driver
- name: gemini
version: 2.0.0
condition: gemini.enabled
# repository: https://charts.fairwinds.com/stable
- name: k8up
version: 4.2.0
condition: k8up.enabled
repository: https://k8up-io.github.io/k8up
kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubezero-storage
![Version: 0.7.5](https://img.shields.io/badge/Version-0.7.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.8.1](https://img.shields.io/badge/Version-0.8.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things storage incl. AWS EBS/EFS, openEBS-lvm, gemini
@ -20,8 +20,9 @@ Kubernetes: `>= 1.24.0`
|------------|------|---------|
| | aws-efs-csi-driver | 2.3.2 |
| | gemini | 1.0.0 |
| | lvm-localpv | 1.0.0 |
| | lvm-localpv | 1.0.1 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://k8up-io.github.io/k8up | k8up | 4.2.0 |
| https://kubernetes-sigs.github.io/aws-ebs-csi-driver | aws-ebs-csi-driver | 2.14.1 |
## Values
@ -102,6 +103,18 @@ Kubernetes: `>= 1.24.0`
| gemini.resources.limits.memory | string | `"128Mi"` | |
| gemini.resources.requests.cpu | string | `"20m"` | |
| gemini.resources.requests.memory | string | `"32Mi"` | |
| k8up.enabled | bool | `false` | |
| k8up.k8up.enableLeaderElection | bool | `false` | |
| k8up.metrics.serviceMonitor.enabled | bool | `true` | |
| k8up.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| k8up.replicaCount | int | `1` | |
| k8up.resources.limits.memory | string | `"256Mi"` | |
| k8up.resources.requests.cpu | string | `"20m"` | |
| k8up.resources.requests.memory | string | `"32Mi"` | |
| k8up.tolerations[0].effect | string | `"NoSchedule"` | |
| k8up.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| k8up.tolerations[1].effect | string | `"NoSchedule"` | |
| k8up.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| lvm-localpv.analytics.enabled | bool | `false` | |
| lvm-localpv.enabled | bool | `false` | |
| lvm-localpv.lvmController.logLevel | int | `2` | |

View File

@ -1,4 +1,25 @@
# Helm chart
# v2.4.1
* Bump app/driver version to `v1.5.4`
# v2.4.0
* Bump app/driver version to `v1.5.3`
# v2.3.9
* Bump app/driver version to `v1.5.2`
# v2.3.8
* Bump app/driver version to `v1.5.1`
# v2.3.7
* Bump app/driver version to `v1.5.0`
# v2.3.6
* Bump app/driver version to `v1.4.9`
# v2.3.5
* Bump app/driver version to `v1.4.8`
# v2.3.4
* Bump app/driver version to `v1.4.7`
# v2.3.3
* Bump app/driver version to `v1.4.6`
# v2.3.2
* Bump app/driver version to `v1.4.5`

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 1.4.5
appVersion: 1.5.4
description: A Helm chart for AWS EFS CSI Driver
home: https://github.com/kubernetes-sigs/aws-efs-csi-driver
keywords:
@ -15,4 +15,4 @@ maintainers:
name: aws-efs-csi-driver
sources:
- https://github.com/kubernetes-sigs/aws-efs-csi-driver
version: 2.3.2
version: 2.4.1

View File

@ -15,7 +15,7 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
{{- with .Values.controller.updateStrategy }}
strategy:
type: {{ . }}
{{ toYaml . | nindent 4 }}
{{- end }}
template:
metadata:

View File

@ -11,6 +11,10 @@ spec:
app: efs-csi-node
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- with .Values.node.updateStrategy }}
updateStrategy:
{{ toYaml . | nindent 4 }}
{{- end }}
template:
metadata:
labels:

View File

@ -11,26 +11,26 @@ useFIPS: false
image:
repository: amazon/aws-efs-csi-driver
tag: "v1.4.5"
tag: "v1.5.4"
pullPolicy: IfNotPresent
sidecars:
livenessProbe:
image:
repository: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe
tag: v2.2.0-eks-1-18-13
tag: v2.8.0-eks-1-25-latest
pullPolicy: IfNotPresent
resources: {}
nodeDriverRegistrar:
image:
repository: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar
tag: v2.1.0-eks-1-18-13
tag: v2.6.2-eks-1-25-latest
pullPolicy: IfNotPresent
resources: {}
csiProvisioner:
image:
repository: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner
tag: v2.1.1-eks-1-18-13
tag: v3.3.0-eks-1-25-latest
pullPolicy: IfNotPresent
resources: {}
@ -68,6 +68,7 @@ controller:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
updateStrategy: {}
tolerations: []
affinity: {}
# Specifies whether a service account should be created
@ -112,6 +113,10 @@ node:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
updateStrategy: {}
# Override default strategy (RollingUpdate) to speed up deployment.
# This can be useful if helm timeouts are observed.
# type: OnDelete
tolerations:
- operator: Exists
# Specifies whether a service account should be created

View File

@ -1,9 +1,9 @@
apiVersion: v1
appVersion: "1.0"
appVersion: "2.0"
description: Automated backup and restore of PersistentVolumes using the VolumeSnapshot
API
maintainers:
- email: robertb@fairwinds.com
name: rbren
name: gemini
version: 1.0.0
version: 2.0.0

View File

@ -16,12 +16,22 @@ See the [Gemini README](https://github.com/FairwindsOps/gemini) for more informa
## Installation
```bash
helm repo add fairwinds-stable https://charts.fairwinds.com/stable
helm install gemini fairwinds-stable/gemini --namespace gemini
helm install gemini fairwinds-stable/gemini --namespace gemini --create-namespace
```
## Requirements
Your cluster must support the [VolumeSnapshot API](https://kubernetes.io/docs/concepts/storage/volume-snapshots/)
## Upgrading to V2
Version 2.0 of Gemini updates the CRD from `v1beta1` to `v1`. There are no substantial
changes, but `v1` adds better support for PersistentVolumeClaims on Kubernetes 1.25.
If you want to keep the v1beta1 CRD available, you can run:
```
kubectl apply -f https://raw.githubusercontent.com/FairwindsOps/gemini/main/pkg/types/snapshotgroup/v1beta1/crd-with-beta1.yaml
```
before upgrading, and add `--skip-crds` when running `helm install`.
## Values
| Key | Type | Default | Description |

View File

@ -16,10 +16,21 @@ See the [Gemini README](https://github.com/FairwindsOps/gemini) for more informa
## Installation
```bash
helm repo add fairwinds-stable https://charts.fairwinds.com/stable
helm install gemini fairwinds-stable/gemini --namespace gemini
helm install gemini fairwinds-stable/gemini --namespace gemini --create-namespace
```
## Requirements
Your cluster must support the [VolumeSnapshot API](https://kubernetes.io/docs/concepts/storage/volume-snapshots/)
## Upgrading to V2
Version 2.0 of Gemini updates the CRD from `v1beta1` to `v1`. There are no substantial
changes, but `v1` adds better support for PersistentVolumeClaims on Kubernetes 1.25.
If you want to keep the v1beta1 CRD available, you can run:
```
kubectl apply -f https://raw.githubusercontent.com/FairwindsOps/gemini/main/pkg/types/snapshotgroup/v1beta1/crd-with-beta1.yaml
```
before upgrading, and add `--skip-crds` when running `helm install`.
{{ template "chart.valuesSection" . }}

View File

@ -0,0 +1,87 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: snapshotgroups.gemini.fairwinds.com
spec:
group: gemini.fairwinds.com
names:
plural: snapshotgroups
singular: snapshotgroup
kind: SnapshotGroup
listKind: SnapshotGroupList
scope: Namespaced
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
persistentVolumeClaim:
type: object
properties:
claimName:
description: PersistentVolumeClaim name to backup
type: string
spec:
description: PersistentVolumeClaim spec to create and backup
type: object
properties:
storageClassName:
type: string
accessModes:
type: array
items:
type: string
volumeName:
type: string
resources:
type: object
properties:
requests:
type: object
additionalProperties: true
limits:
type: object
properties:
storageClassName:
type: string
additionalProperties: true
selector:
type: object
properties:
matchLabels:
type: object
additionalProperties: true
matchExpressions:
type: array
items:
type: object
additionalProperties: true
schedule:
type: array
items:
type: object
properties:
every:
description: Interval for creating new backups
type: string
keep:
description: Number of historical backups to keep
type: integer
template:
type: object
properties:
spec:
description: VolumeSnapshot spec
type: object
properties:
volumeSnapshotClassName:
description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.'
type: string
conversion:
strategy: None

View File

@ -20,4 +20,4 @@ maintainers:
name: lvm-localpv
sources:
- https://github.com/openebs/lvm-localpv
version: 1.0.0
version: 1.0.1

View File

@ -131,7 +131,7 @@ spec:
emptyDir: {}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 2 }}
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.lvmController.nodeSelector }}
nodeSelector:

View File

@ -140,7 +140,7 @@ spec:
type: Directory
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 2 }}
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.lvmNode.nodeSelector }}
nodeSelector:

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,6 @@
diff -tuNr charts/aws-efs-csi-driver.orig/templates/controller-deployment.yaml charts/aws-efs-csi-driver/templates/controller-deployment.yaml
--- charts/aws-efs-csi-driver.orig/templates/controller-deployment.yaml 2022-10-11 20:57:20.000000000 +0200
+++ charts/aws-efs-csi-driver/templates/controller-deployment.yaml 2022-10-12 22:40:48.100658738 +0200
@@ -13,6 +13,10 @@
app: efs-csi-controller
app.kubernetes.io/name: {{ include "aws-efs-csi-driver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
+ {{- with .Values.controller.updateStrategy }}
+ strategy:
+ type: {{ . }}
+ {{- end }}
template:
metadata:
labels:
@@ -69,9 +73,14 @@
- name: AWS_USE_FIPS_ENDPOINT
value: "true"

View File

@ -18,7 +18,7 @@
"subdir": "contrib/mixin"
}
},
"version": "9e3966fbce6dccd2271b7ade588fefeb4ca7b247",
"version": "5872b80ed5e1aca4f6bb1d9a00b60e24826b9631",
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
},
{
@ -28,7 +28,7 @@
"subdir": "grafonnet"
}
},
"version": "30280196507e0fe6fa978a3e0eaca3a62844f817",
"version": "f0b70307b8e5f12236b277883d998af129a8211f",
"sum": "342u++/7rViR/zj2jeJOjshzglkZ1SY+hFNuyCBFMdc="
},
{
@ -38,7 +38,7 @@
"subdir": "grafana-builder"
}
},
"version": "d68f9a6e0b1af7c4c4056dc2b43fb8f3bac01f43",
"version": "713f3cae6766cbcbadce69074cb88c5538c8cb5c",
"sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0="
},
{
@ -58,7 +58,7 @@
"subdir": "lib/promgrafonnet"
}
},
"version": "3c386687c1f8ceb6b79ff887c4a934e9cee1b90a",
"version": "eed459199703c969afc318ea55b9361ae48180a7",
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
},
{

View File

@ -1,7 +1,7 @@
#!/bin/bash
set -ex
helm dependencies update
#helm dependencies update
### Gemini
rm -rf charts/gemini
@ -9,6 +9,10 @@ helm pull fairwinds-stable/gemini --untar --untardir charts
# Patch to run gemini on controller nodes
patch -p0 -i gemini.patch --no-backup-if-mismatch
# k8up
VERSION=$(yq eval '.dependencies[] | select(.name=="k8up") | .version' Chart.yaml)
curl -L -s -o crds/k8up.yaml https://github.com/k8up-io/k8up/releases/download/k8up-${VERSION}/k8up-crd.yaml
### openEBS
VERSION=$(yq eval '.dependencies[] | select(.name=="lvm-localpv") | .version' Chart.yaml)
helm repo add openebs-lvmlocalpv https://openebs.github.io/lvm-localpv || true

View File

@ -51,6 +51,33 @@ lvm-localpv:
prometheus:
enabled: false
k8up:
enabled: false
replicaCount: 1
k8up:
enableLeaderElection: false
metrics:
serviceMonitor:
enabled: true
resources:
requests:
memory: 32Mi
cpu: 20m
limits:
memory: 256Mi
#cpu: 400m
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
gemini:
enabled: false
# verbosity: 1
@ -66,7 +93,6 @@ gemini:
memory: 128Mi
cpu: 400m
aws-ebs-csi-driver:
enabled: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero
description: KubeZero - Root App of Apps chart
type: application
version: 1.24.9
version: 1.25.8
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -13,6 +13,6 @@ maintainers:
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: ">= 0.1.5"
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts
kubeVersion: ">= 1.24.0"
kubeVersion: ">= 1.25.0"

View File

@ -32,7 +32,7 @@ Kubernetes: `>= 1.24.0`
| addons.external-dns.enabled | bool | `false` | |
| addons.forseti.enabled | bool | `false` | |
| addons.sealed-secrets.enabled | bool | `false` | |
| addons.targetRevision | string | `"0.7.3"` | |
| addons.targetRevision | string | `"0.7.4"` | |
| argocd.argocd-image-updater.enabled | bool | `false` | |
| argocd.enabled | bool | `false` | |
| argocd.istio.enabled | bool | `false` | |
@ -76,9 +76,9 @@ Kubernetes: `>= 1.24.0`
| storage.aws-efs-csi-driver.enabled | bool | `false` | |
| storage.enabled | bool | `false` | |
| storage.gemini.enabled | bool | `false` | |
| storage.k8up.enabled | bool | `false` | |
| storage.snapshotController.enabled | bool | `false` | |
| storage.targetRevision | string | `"0.7.4"` | |
| storage.velero.enabled | bool | `false` | |
| storage.targetRevision | string | `"0.8.0"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)

View File

@ -12,13 +12,13 @@ gemini:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.storage.velero }}
velero:
{{- with .Values.storage.k8up }}
k8up:
{{- toYaml . | nindent 2 }}
{{- end }}
snapshotController:
enabled: {{ default false (or (index .Values "storage" "velero" "enabled") (index .Values "storage" "gemini" "enabled")) }}
enabled: {{ default false (index .Values "storage" "gemini" "enabled") }}
aws-ebs-csi-driver:
enabled: {{ default false (index .Values "storage" "aws-ebs-csi-driver" "enabled")}}

View File

@ -10,7 +10,7 @@ global:
addons:
enabled: true
targetRevision: 0.7.3
targetRevision: 0.7.4
external-dns:
enabled: false
forseti:
@ -29,7 +29,7 @@ addons:
network:
enabled: true
retain: true
targetRevision: 0.4.2
targetRevision: 0.4.3
cilium:
cluster: {}
@ -40,12 +40,12 @@ cert-manager:
storage:
enabled: false
targetRevision: 0.7.4
targetRevision: 0.8.1
aws-ebs-csi-driver:
enabled: false
aws-efs-csi-driver:
enabled: false
velero:
k8up:
enabled: false
gemini:
enabled: false
@ -55,13 +55,13 @@ storage:
istio:
enabled: false
namespace: istio-system
targetRevision: 0.9.0
targetRevision: 0.9.1
istio-ingress:
enabled: false
chart: kubezero-istio-gateway
namespace: istio-ingress
targetRevision: 0.9.0
targetRevision: 0.9.1
gateway:
service: {}
@ -69,7 +69,7 @@ istio-private-ingress:
enabled: false
chart: kubezero-istio-gateway
namespace: istio-ingress
targetRevision: 0.9.0
targetRevision: 0.9.1
gateway:
service: {}
@ -84,7 +84,7 @@ metrics:
logging:
enabled: false
namespace: logging
targetRevision: 0.8.4
targetRevision: 0.8.5
argocd:
enabled: false

View File

@ -0,0 +1,19 @@
#!/bin/bash
# set -x
LOC=$(cd $1; pwd -P)
TMP=$(mktemp -d)
cd $TMP
for f in $(grep -Rl "kind: SealedSecret" $LOC); do
echo "Re-encrypting: $f"
csplit -z -s $f -f secret- -b %02d.yaml '/^---$/' '{*}'
for s in secret-*.yaml; do
kubeseal --re-encrypt -f $s -w new -o yaml && cat new >> $f.new && rm new $s
echo "---" >> $f.new
done
head -n -1 $f.new > $f && rm $f.new
done