Merge pull request 'Release v1.25' (#52) from v1.25 into master

Reviewed-on: #52
This commit is contained in:
Stefan Reimer 2023-05-26 17:38:29 +00:00
commit 5bcf945213
277 changed files with 17580 additions and 1112 deletions

View File

@ -1,11 +1,22 @@
# Parse version from latest git semver tag # Parse version from latest git semver tag
BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_TAG=$(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null) GIT_TAG := $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
TAG ?= $(shell echo $(GIT_TAG) | awk -F '-' '{ print $$1 "-" $$2 }' | sed -e 's/-$$//')
TAG := $(GIT_TAG)
# append branch name to tag if NOT main nor master
ifeq (,$(filter main master, $(GIT_BRANCH)))
# If branch is substring of tag, omit branch name
ifeq ($(findstring $(GIT_BRANCH), $(GIT_TAG)),)
# only append branch name if not equal tag
ifneq ($(GIT_TAG), $(GIT_BRANCH))
TAG = $(GIT_TAG)-$(GIT_BRANCH)
endif
endif
endif
ARCH := amd64 ARCH := amd64
ALL_ARCHS := amd64 arm64 ALL_ARCHS := amd64 arm64
_ARCH = $(or $(filter $(ARCH),$(ALL_ARCHS)),$(error $$ARCH [$(ARCH)] must be exactly one of "$(ALL_ARCHS)"))
# EXTRA_TAGS supposed to be set at the caller, eg. $(shell echo $(TAG) | awk -F '.' '{ print $$1 "." $$2 }')
ifneq ($(TRIVY_REMOTE),) ifneq ($(TRIVY_REMOTE),)
TRIVY_OPTS := --server $(TRIVY_REMOTE) TRIVY_OPTS := --server $(TRIVY_REMOTE)
@ -22,28 +33,30 @@ help: ## Show Help
grep -E '^[a-zA-Z_-]+:.*?## .*$$' .ci/podman.mk | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' grep -E '^[a-zA-Z_-]+:.*?## .*$$' .ci/podman.mk | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
build: ## Build the app build: ## Build the app
buildah build --rm --layers -t $(IMAGE):$(TAG)-$(ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(ARCH) --platform linux/$(ARCH) . buildah build --rm --layers -t $(IMAGE):$(TAG)-$(_ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(_ARCH) --platform linux/$(_ARCH) .
test: rm-test-image ## Execute Dockerfile.test test: rm-test-image ## Execute Dockerfile.test
test -f Dockerfile.test && \ test -f Dockerfile.test && \
{ buildah build --rm --layers -t $(REGISTRY)/$(IMAGE):$(TAG)-test --from=$(REGISTRY)/$(IMAGE):$(TAG) -f Dockerfile.test --platform linux/$(ARCH) . && \ { buildah build --rm --layers -t $(REGISTRY)/$(IMAGE):$(TAG)-test --from=$(REGISTRY)/$(IMAGE):$(TAG) -f Dockerfile.test --platform linux/$(_ARCH) . && \
podman run --rm --env-host -t $(REGISTRY)/$(IMAGE):$(TAG)-$(ARCH)-test; } || \ podman run --rm --env-host -t $(REGISTRY)/$(IMAGE):$(TAG)-$(_ARCH)-test; } || \
echo "No Dockerfile.test found, skipping test" echo "No Dockerfile.test found, skipping test"
scan: ## Scan image using trivy scan: ## Scan image using trivy
echo "Scanning $(IMAGE):$(TAG)-$(ARCH) using Trivy $(TRIVY_REMOTE)" echo "Scanning $(IMAGE):$(TAG)-$(_ARCH) using Trivy $(TRIVY_REMOTE)"
trivy image $(TRIVY_OPTS) localhost/$(IMAGE):$(TAG)-$(ARCH) trivy image $(TRIVY_OPTS) localhost/$(IMAGE):$(TAG)-$(_ARCH)
# first tag and push all actual images # first tag and push all actual images
# create new manifest for each tag and add all available TAG-ARCH before pushing # create new manifest for each tag and add all available TAG-ARCH before pushing
push: ecr-login ## push images to registry push: ecr-login ## push images to registry
for t in $(TAG) latest $(EXTRA_TAGS); do \ for t in $(TAG) latest $(EXTRA_TAGS); do \
buildah tag $(IMAGE):$(TAG)-$(ARCH) $(REGISTRY)/$(IMAGE):$${t}-$(ARCH); \ echo "Tagging image with $(REGISTRY)/$(IMAGE):$${t}-$(ARCH)"
buildah tag $(IMAGE):$(TAG)-$(_ARCH) $(REGISTRY)/$(IMAGE):$${t}-$(_ARCH); \
buildah manifest rm $(IMAGE):$$t || true; \ buildah manifest rm $(IMAGE):$$t || true; \
buildah manifest create $(IMAGE):$$t; \ buildah manifest create $(IMAGE):$$t; \
for a in $(ALL_ARCHS); do \ for a in $(ALL_ARCHS); do \
buildah manifest add $(IMAGE):$$t $(REGISTRY)/$(IMAGE):$(TAG)-$$a; \ buildah manifest add $(IMAGE):$$t $(REGISTRY)/$(IMAGE):$(TAG)-$$a; \
done; \ done; \
echo "Pushing manifest $(IMAGE):$$t"
buildah manifest push --all $(IMAGE):$$t docker://$(REGISTRY)/$(IMAGE):$$t; \ buildah manifest push --all $(IMAGE):$$t docker://$(REGISTRY)/$(IMAGE):$$t; \
done done
@ -58,13 +71,13 @@ rm-remote-untagged: ## delete all remote untagged images
[ -n "$$IMAGE_IDS" ] && aws ecr-public batch-delete-image --repository-name $(IMAGE) --region $(REGION) --image-ids $$IMAGE_IDS || echo "No image to remove" [ -n "$$IMAGE_IDS" ] && aws ecr-public batch-delete-image --repository-name $(IMAGE) --region $(REGION) --image-ids $$IMAGE_IDS || echo "No image to remove"
rm-image: rm-image:
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH))" || podman image rm -f $(IMAGE):$(TAG)-$(ARCH) > /dev/null test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || podman image rm -f $(IMAGE):$(TAG)-$(_ARCH) > /dev/null
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH))" || echo "Error: Removing image failed" test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || echo "Error: Removing image failed"
# Ensure we run the tests by removing any previous runs # Ensure we run the tests by removing any previous runs
rm-test-image: rm-test-image:
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH)-test)" || podman image rm -f $(IMAGE):$(TAG)-$(ARCH)-test > /dev/null test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH)-test)" || podman image rm -f $(IMAGE):$(TAG)-$(_ARCH)-test > /dev/null
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH)-test)" || echo "Error: Removing test image failed" test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH)-test)" || echo "Error: Removing test image failed"
ci-pull-upstream: ## pull latest shared .ci subtree ci-pull-upstream: ## pull latest shared .ci subtree
git stash && git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash && git stash pop git stash && git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash && git stash pop

View File

@ -7,12 +7,14 @@ def call(Map config=[:]) {
label 'podman-aws-trivy' label 'podman-aws-trivy'
} }
} }
stages { stages {
stage('Prepare') { stage('Prepare') {
// get tags
steps { steps {
sh 'git fetch -q --tags ${GIT_URL} +refs/heads/${BRANCH_NAME}:refs/remotes/origin/${BRANCH_NAME}' // pull tags
withCredentials([gitUsernamePassword(credentialsId: 'gitea-jenkins-user')]) {
sh 'git fetch -q --tags ${GIT_URL}'
}
sh 'make prepare || true'
} }
} }
@ -36,8 +38,7 @@ def call(Map config=[:]) {
TRIVY_OUTPUT = "reports/trivy.html" TRIVY_OUTPUT = "reports/trivy.html"
} }
steps { steps {
sh 'mkdir -p reports' sh 'mkdir -p reports && make scan'
sh 'make scan'
publishHTML target: [ publishHTML target: [
allowMissing: true, allowMissing: true,
alwaysLinkToLastBuild: true, alwaysLinkToLastBuild: true,
@ -59,10 +60,11 @@ def call(Map config=[:]) {
} }
} }
// Push to ECR // Push to container registry, skip if PR
stage('Push') { stage('Push') {
when { not { changeRequest() } }
steps { steps {
sh 'make ecr-login push' sh 'make push'
} }
} }

View File

@ -1,9 +1,9 @@
ARG ALPINE_VERSION=3.16 ARG ALPINE_VERSION=3.17
FROM alpine:${ALPINE_VERSION} FROM docker.io/alpine:${ALPINE_VERSION}
ARG ALPINE_VERSION ARG ALPINE_VERSION
ARG KUBE_VERSION=1.24 ARG KUBE_VERSION=1.25
RUN cd /etc/apk/keys && \ RUN cd /etc/apk/keys && \
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \ wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
@ -18,19 +18,19 @@ RUN cd /etc/apk/keys && \
bash \ bash \
python3 \ python3 \
py3-yaml \ py3-yaml \
restic \
helm \
cri-tools@kubezero \ cri-tools@kubezero \
kubeadm@kubezero~=${KUBE_VERSION} \ kubeadm@kubezero~=${KUBE_VERSION} \
kubectl@kubezero~=${KUBE_VERSION} \ kubectl@kubezero~=${KUBE_VERSION} \
etcdhelper@kubezero \ etcdhelper@kubezero \
etcd-ctl@edge-testing \ etcd-ctl@edge-testing
restic@edge-community \
helm@edge-community
RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \ RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \
mkdir -p /var/lib/kubezero mkdir -p /var/lib/kubezero
ADD admin/kubezero.sh admin/libhelm.sh admin/migrate_argo_values.py /usr/bin ADD admin/kubezero.sh admin/libhelm.sh admin/migrate_argo_values.py /usr/bin
ADD admin/libhelm.sh admin/v${KUBE_VERSION}/* /var/lib/kubezero ADD admin/libhelm.sh admin/pre-upgrade.sh /var/lib/kubezero
ADD charts/kubeadm /charts/kubeadm ADD charts/kubeadm /charts/kubeadm
ADD charts/kubezero /charts/kubezero ADD charts/kubezero /charts/kubezero

View File

@ -3,7 +3,8 @@ IMAGE := kubezero-admin
REGION := us-east-1 REGION := us-east-1
# Also tag as Kubernetes major version # Also tag as Kubernetes major version
EXTRA_TAGS = $(shell echo $(TAG) | awk -F '.' '{ print $$1 "." $$2 }') MY_TAG = $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
EXTRA_TAGS = $(shell echo $(MY_TAG) | awk -F '.' '{ print $$1 "." $$2 }')
include .ci/podman.mk include .ci/podman.mk

View File

@ -19,7 +19,7 @@ KubeZero is a Kubernetes distribution providing an integrated container platform
# Version / Support Matrix # Version / Support Matrix
KubeZero releases track the same *minor* version of Kubernetes. KubeZero releases track the same *minor* version of Kubernetes.
Any 1.24.X-Y release of Kubezero supports any Kubernetes cluster 1.24.X. Any 1.26.X-Y release of Kubezero supports any Kubernetes cluster 1.26.X.
KubeZero is distributed as a collection of versioned Helm charts, allowing custom upgrade schedules and module versions as needed. KubeZero is distributed as a collection of versioned Helm charts, allowing custom upgrade schedules and module versions as needed.
@ -28,15 +28,15 @@ KubeZero is distributed as a collection of versioned Helm charts, allowing custo
gantt gantt
title KubeZero Support Timeline title KubeZero Support Timeline
dateFormat YYYY-MM-DD dateFormat YYYY-MM-DD
section 1.23
beta :123b, 2022-08-01, 2022-09-01
release :after 123b, 2023-02-01
section 1.24 section 1.24
beta :124b, 2022-11-14, 2022-12-31 beta :124b, 2022-11-14, 2022-12-31
release :after 124b, 2023-06-01 release :after 124b, 2023-06-01
section 1.25 section 1.25
beta :125b, 2023-03-01, 2023-03-31 beta :125b, 2023-03-01, 2023-03-31
release :after 125b, 2023-08-01 release :after 125b, 2023-08-01
section 1.26
beta :126b, 2023-06-01, 2023-06-30
release :after 126b, 2023-10-01
``` ```
[Upstream release policy](https://kubernetes.io/releases/) [Upstream release policy](https://kubernetes.io/releases/)
@ -44,7 +44,7 @@ gantt
# Components # Components
## OS ## OS
- all nodes are based on Alpine V3.16 - all nodes are based on Alpine V3.17
- 2 GB encrypted root filesystem - 2 GB encrypted root filesystem
- no 3rd party dependencies at boot ( other than container registries ) - no 3rd party dependencies at boot ( other than container registries )
- minimal attack surface - minimal attack surface
@ -73,10 +73,8 @@ gantt
- support for [Inf1 instances](https://aws.amazon.com/ec2/instance-types/inf1/) part of [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/). - support for [Inf1 instances](https://aws.amazon.com/ec2/instance-types/inf1/) part of [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/).
## Network ## Network
- Cilium using Geneve encapsulation, incl. increased MTU allowing flexible / more containers per worker node compared to eg. AWS VPC CNI
- Multus support for multiple network interfaces per pod, eg. additional AWS CNI - Multus support for multiple network interfaces per pod, eg. additional AWS CNI
- Calico using VxLAN incl. increased MTU
allows flexible / more containers per worker node compared to eg. AWS VPC CNI
- isolates container traffic from VPC by using VxLAN overlay
- no restrictions on IP space / sizing from the underlying VPC architecture - no restrictions on IP space / sizing from the underlying VPC architecture
## Storage ## Storage
@ -95,7 +93,7 @@ allows flexible / more containers per worker node compared to eg. AWS VPC CNI
- optional full service mesh - optional full service mesh
## Metrics ## Metrics
- Prometheus support for all components - Prometheus support for all components, incl. out of cluster EC2 instances (node_exporter)
- automated service discovery allowing instant access to common workload metrics - automated service discovery allowing instant access to common workload metrics
- pre-configured Grafana dashboards and alerts - pre-configured Grafana dashboards and alerts
- Alertmanager events via SNSAlertHub to Slack, Google, Matrix, etc. - Alertmanager events via SNSAlertHub to Slack, Google, Matrix, etc.

View File

@ -161,7 +161,7 @@ control_plane_node() {
else else
# restore latest backup # restore latest backup
retry 10 60 30 restic restore latest --no-lock -t / #Review: Use latest no matter what for now: --tag $KUBE_VERSION_MINOR retry 10 60 30 restic restore latest --no-lock -t / # --tag $KUBE_VERSION_MINOR
# Make last etcd snapshot available # Make last etcd snapshot available
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
@ -214,30 +214,16 @@ control_plane_node() {
sleep 3 sleep 3
done done
# if we are NOT member already, flush etcd to be able to join # see if we are a former member and remove our former self if so
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//') MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
# Failsafe / etcd on ephmeral: we were a member but our dataset is missing
# -> remove former self so we can re-join
if [ -n "$MY_ID" -a ! -d ${HOSTFS}/var/lib/etcd/member ]; then
# Remove former self first
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints [ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
MY_ID=""
fi
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
if [ -z "$MY_ID" ]; then
# flush etcd data directory from restore
rm -rf ${HOSTFS}/var/lib/etcd/member rm -rf ${HOSTFS}/var/lib/etcd/member
# Announce new etcd member and capture ETCD_INITIAL_CLUSTER, retry needed in case another node joining causes temp quorum loss # Announce new etcd member and capture ETCD_INITIAL_CLUSTER, retry needed in case another node joining causes temp quorum loss
ETCD_ENVS=$(retry 12 5 5 etcdctl member add $ETCD_NODENAME --peer-urls="https://${ETCD_NODENAME}:2380" --endpoints=$etcd_endpoints) ETCD_ENVS=$(retry 12 5 5 etcdctl member add $ETCD_NODENAME --peer-urls="https://${ETCD_NODENAME}:2380" --endpoints=$etcd_endpoints)
export $(echo "$ETCD_ENVS" | grep ETCD_INITIAL_CLUSTER= | sed -e 's/"//g') export $(echo "$ETCD_ENVS" | grep ETCD_INITIAL_CLUSTER= | sed -e 's/"//g')
else
# build initial_cluster string from running cluster
_cluster=$(etcdctl member list --endpoints=$etcd_endpoints -w json | jq -r '.members[] | "\(.name)=\(.peerURLs[]),"')
export ETCD_INITIAL_CLUSTER=$(echo ${_cluster%%,} | sed -e 's/ //g')
fi
# Patch kubeadm-values.yaml and re-render to get etcd manifest patched # Patch kubeadm-values.yaml and re-render to get etcd manifest patched
yq eval -i '.etcd.state = "existing" yq eval -i '.etcd.state = "existing"
@ -358,7 +344,6 @@ backup() {
cp -r ${HOSTFS}/etc/kubernetes/admin.conf ${WORKDIR} cp -r ${HOSTFS}/etc/kubernetes/admin.conf ${WORKDIR}
# Backup via restic # Backup via restic
restic snapshots || restic init
restic backup ${WORKDIR} -H $CLUSTERNAME --tag $CLUSTER_VERSION restic backup ${WORKDIR} -H $CLUSTERNAME --tag $CLUSTER_VERSION
echo "Backup complete." echo "Backup complete."

View File

@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
# Simulate well-known CRDs being available # Simulate well-known CRDs being available
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1" API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget"
# Waits for max 300s and retries # Waits for max 300s and retries
function wait_for() { function wait_for() {
@ -166,6 +166,9 @@ function _helm() {
render render
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$? kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$?
# Try again without server-side, review with 1.26, required for cert-manager during 1.25
[ $rc -ne 0 ] && kubectl $action -f $WORKDIR/helm.yaml && rc=$? || rc=$?
# Optional post hook # Optional post hook
declare -F ${module}-post && ${module}-post declare -F ${module}-post && ${module}-post

View File

@ -8,26 +8,10 @@ import yaml
def migrate(values): def migrate(values):
"""Actual changes here""" """Actual changes here"""
# ClusterBackup is enabled on AWS anyways, same with cluster-autoscaler # Remove various keys as they have been merged into the metrics template
if "aws" in values["global"]: deleteKey(values["metrics"]['kube-prometheus-stack']["alertmanager"]["alertmanagerSpec"], "podMetadata")
deleteKey(values["addons"], "clusterBackup") deleteKey(values["metrics"]['kube-prometheus-stack']["alertmanager"], "config")
deleteKey(values["addons"], "cluster-autoscaler") deleteKey(values["metrics"]['kube-prometheus-stack']["prometheus"]["prometheusSpec"], "externalLabels")
# Remove calico and multus
deleteKey(values["network"], "calico")
deleteKey(values["network"], "multus")
# ArgoCD helm changes
if "argocd" in values:
if "server" in values["argocd"]:
if not "configs" in values["argocd"]:
values["argocd"]["configs"] = {}
if not "cm" in values["argocd"]["configs"]:
values["argocd"]["configs"]["cm"] = {}
values["argocd"]["configs"]["cm"]["url"] = values["argocd"]["server"]["config"][
"url"
]
deleteKey(values["argocd"], "server")
return values return values

View File

@ -1,7 +1,7 @@
#!/bin/bash -e #!/bin/bash -e
#VERSION="latest" #VERSION="latest"
VERSION="v1.24" VERSION="v1.25"
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml} ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
@ -10,7 +10,6 @@ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
[ -n "$DEBUG" ] && set -x [ -n "$DEBUG" ] && set -x
all_nodes_upgrade() { all_nodes_upgrade() {
CMD="$1" CMD="$1"
@ -148,40 +147,37 @@ argo_used && disable_argo
#all_nodes_upgrade "" #all_nodes_upgrade ""
# Cleanup
# Remove calico CRDs
kubectl delete -f https://git.zero-downtime.net/ZeroDownTime/kubezero/raw/tag/v1.23.11/charts/kubezero-network/charts/calico/crds/crds.yaml 2>/dev/null || true
kubectl delete servicemonitor calico-node -n kube-system 2>/dev/null || true
# delete old kubelet configs
for cm in $(kubectl get cm -n kube-system --no-headers | awk '{if ($1 ~ "kubelet-config-1*") print $1}'); do kubectl delete cm $cm -n kube-system; done
for rb in $(kubectl get rolebindings -n kube-system --no-headers | awk '{if ($1 ~ "kubelet-config-1*") print $1}'); do kubectl delete rolebindings $rb -n kube-system; done
control_plane_upgrade kubeadm_upgrade control_plane_upgrade kubeadm_upgrade
echo "Adjust kubezero values as needed:" echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015 # shellcheck disable=SC2015
argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
# Remove calico
#kubectl delete deployment calico-kube-controllers -n kube-system || true
#kubectl delete daemonset calico-node -n kube-system || true
#kubectl delete network-attachment-definitions calico -n kube-system || true
# Remove previous cilium config as the helm options are additive only -> fail
kubectl delete configmap cilium-config -n kube-system || true
control_plane_upgrade "apply_network, apply_addons, apply_storage" control_plane_upgrade "apply_network, apply_addons, apply_storage"
kubectl rollout restart daemonset/kube-multus-ds -n kube-system
kubectl rollout restart daemonset/cilium -n kube-system
echo "Checking that all pods in kube-system are running ..." echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning waitSystemPodsRunning
echo "Applying remaining KubeZero modules..." echo "Applying remaining KubeZero modules..."
# delete argocd deployments as various immutable things changed, also redis restart fails otherwise
kubectl delete deployment argocd-redis -n argocd || true
kubectl delete deployment argocd-repo-server -n argocd || true
kubectl delete statefulset argocd-application-controller -n argocd || true
# Delete prometheus-push gateway due to label changes
kubectl delete deploy -l app=prometheus-pushgateway -n monitoring || true
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_argocd" control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_argocd"
# Trigger backup of upgraded cluster state
kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$VERSION -n kube-system
while true; do
kubectl wait --for=condition=complete job/kubezero-backup-$VERSION -n kube-system 2>/dev/null && kubectl delete job kubezero-backup-$VERSION -n kube-system && break
sleep 1
done
# Final step is to commit the new argocd kubezero app # Final step is to commit the new argocd kubezero app
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..) | .spec.source.helm.values |= (from_yaml | to_yaml)' > $ARGO_APP kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..) | .spec.source.helm.values |= (from_yaml | to_yaml)' > $ARGO_APP

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubeadm name: kubeadm
description: KubeZero Kubeadm cluster config description: KubeZero Kubeadm cluster config
type: application type: application
version: 1.24.9 version: 1.25.8
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -11,4 +11,4 @@ keywords:
maintainers: maintainers:
- name: Stefan Reimer - name: Stefan Reimer
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
kubeVersion: ">= 1.24.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubeadm # kubeadm
![Version: 1.24.9](https://img.shields.io/badge/Version-1.24.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 1.25.8](https://img.shields.io/badge/Version-1.25.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Kubeadm cluster config KubeZero Kubeadm cluster config
@ -14,7 +14,7 @@ KubeZero Kubeadm cluster config
## Requirements ## Requirements
Kubernetes: `>= 1.24.0` Kubernetes: `>= 1.25.0`
## Values ## Values

View File

@ -1,6 +1,6 @@
{{- /* Feature gates for all control plane components */ -}} {{- /* Feature gates for all control plane components */ -}}
{{- define "kubeadm.featuregates" }} {{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "CronJobTimeZone" "NodeOutOfServiceVolumeDetach" }} {{- $gates := list "CustomCPUCFSQuotaPeriod" "NodeOutOfServiceVolumeDetach" }}
{{- if eq .return "csv" }} {{- if eq .return "csv" }}
{{- range $key := $gates }} {{- range $key := $gates }}
{{- $key }}=true, {{- $key }}=true,

View File

@ -12,13 +12,3 @@ spec:
memory: 128Mi memory: 128Mi
nodeSelector: nodeSelector:
node-role.kubernetes.io/control-plane: "" node-role.kubernetes.io/control-plane: ""
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: "kubernetes.io/hostname"

View File

@ -1,6 +1,6 @@
{{- if .Values.api.awsIamAuth.enabled }} {{- if .Values.api.awsIamAuth.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata: metadata:
name: aws-iam-authenticator name: aws-iam-authenticator
rules: rules:
@ -51,8 +51,8 @@ metadata:
namespace: kube-system namespace: kube-system
--- ---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata: metadata:
name: aws-iam-authenticator name: aws-iam-authenticator
namespace: kube-system namespace: kube-system

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-addons name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons description: KubeZero umbrella chart for various optional cluster addons
type: application type: application
version: 0.7.3 version: 0.7.5
appVersion: v1.24 appVersion: v1.25
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -20,28 +20,28 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: external-dns - name: external-dns
version: 1.11.0 version: 1.12.2
repository: https://kubernetes-sigs.github.io/external-dns/ repository: https://kubernetes-sigs.github.io/external-dns/
condition: external-dns.enabled condition: external-dns.enabled
- name: cluster-autoscaler - name: cluster-autoscaler
version: 9.21.0 version: 9.28.0
repository: https://kubernetes.github.io/autoscaler repository: https://kubernetes.github.io/autoscaler
condition: cluster-autoscaler.enabled condition: cluster-autoscaler.enabled
- name: nvidia-device-plugin - name: nvidia-device-plugin
version: 0.13.0 version: 0.14.0
# https://github.com/NVIDIA/k8s-device-plugin # https://github.com/NVIDIA/k8s-device-plugin
repository: https://nvidia.github.io/k8s-device-plugin repository: https://nvidia.github.io/k8s-device-plugin
condition: nvidia-device-plugin.enabled condition: nvidia-device-plugin.enabled
- name: sealed-secrets - name: sealed-secrets
version: 2.7.1 version: 2.8.1
repository: https://bitnami-labs.github.io/sealed-secrets repository: https://bitnami-labs.github.io/sealed-secrets
condition: sealed-secrets.enabled condition: sealed-secrets.enabled
- name: aws-node-termination-handler - name: aws-node-termination-handler
version: 0.20.1 version: 0.21.0
# repository: https://aws.github.io/eks-charts # repository: https://aws.github.io/eks-charts
condition: aws-node-termination-handler.enabled condition: aws-node-termination-handler.enabled
- name: aws-eks-asg-rolling-update-handler - name: aws-eks-asg-rolling-update-handler
version: 1.2.7 version: 1.3.0
# repository: https://twin.github.io/helm-charts # repository: https://twin.github.io/helm-charts
condition: aws-eks-asg-rolling-update-handler.enabled condition: aws-eks-asg-rolling-update-handler.enabled
kubeVersion: ">= 1.24.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-addons # kubezero-addons
![Version: 0.7.3](https://img.shields.io/badge/Version-0.7.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.24](https://img.shields.io/badge/AppVersion-v1.24-informational?style=flat-square) ![Version: 0.7.5](https://img.shields.io/badge/Version-0.7.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.25](https://img.shields.io/badge/AppVersion-v1.25-informational?style=flat-square)
KubeZero umbrella chart for various optional cluster addons KubeZero umbrella chart for various optional cluster addons
@ -14,16 +14,16 @@ KubeZero umbrella chart for various optional cluster addons
## Requirements ## Requirements
Kubernetes: `>= 1.24.0` Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| | aws-eks-asg-rolling-update-handler | 1.2.7 | | | aws-eks-asg-rolling-update-handler | 1.3.0 |
| | aws-node-termination-handler | 0.20.1 | | | aws-node-termination-handler | 0.21.0 |
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.7.1 | | https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.8.1 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.11.0 | | https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.12.2 |
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.21.0 | | https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.28.0 |
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.13.0 | | https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.14.0 |
# MetalLB # MetalLB
@ -103,8 +103,11 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| cluster-autoscaler.autoDiscovery.clusterName | string | `""` | | | cluster-autoscaler.autoDiscovery.clusterName | string | `""` | |
| cluster-autoscaler.awsRegion | string | `"us-west-2"` | | | cluster-autoscaler.awsRegion | string | `"us-west-2"` | |
| cluster-autoscaler.enabled | bool | `false` | | | cluster-autoscaler.enabled | bool | `false` | |
| cluster-autoscaler.extraArgs.balance-similar-node-groups | bool | `true` | |
| cluster-autoscaler.extraArgs.ignore-taint | string | `"node.cilium.io/agent-not-ready"` | |
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | | | cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | | | cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
| cluster-autoscaler.image.tag | string | `"v1.25.1"` | |
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cluster-autoscaler.podDisruptionBudget | bool | `false` | | | cluster-autoscaler.podDisruptionBudget | bool | `false` | |
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | | | cluster-autoscaler.prometheusRule.enabled | bool | `false` | |
@ -139,6 +142,10 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node.kubernetes.io/instance-type"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node.kubernetes.io/instance-type"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"g5.xlarge"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"g5.xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[10] | string | `"g4dn.4xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[11] | string | `"g4dn.8xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[12] | string | `"g4dn.12xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[13] | string | `"g4dn.16xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[1] | string | `"g5.2xlarge"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[1] | string | `"g5.2xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[2] | string | `"g5.4xlarge"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[2] | string | `"g5.4xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[3] | string | `"g5.8xlarge"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[3] | string | `"g5.8xlarge"` | |
@ -146,6 +153,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[5] | string | `"g5.16xlarge"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[5] | string | `"g5.16xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[6] | string | `"g5.24xlarge"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[6] | string | `"g5.24xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[7] | string | `"g5.48xlarge"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[7] | string | `"g5.48xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[8] | string | `"g4dn.xlarge"` | |
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[9] | string | `"g4dn.2xlarge"` | |
| nvidia-device-plugin.enabled | bool | `false` | | | nvidia-device-plugin.enabled | bool | `false` | |
| nvidia-device-plugin.tolerations[0].effect | string | `"NoSchedule"` | | | nvidia-device-plugin.tolerations[0].effect | string | `"NoSchedule"` | |
| nvidia-device-plugin.tolerations[0].key | string | `"nvidia.com/gpu"` | | | nvidia-device-plugin.tolerations[0].key | string | `"nvidia.com/gpu"` | |
@ -155,7 +164,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| nvidia-device-plugin.tolerations[1].operator | string | `"Exists"` | | | nvidia-device-plugin.tolerations[1].operator | string | `"Exists"` | |
| sealed-secrets.enabled | bool | `false` | | | sealed-secrets.enabled | bool | `false` | |
| sealed-secrets.fullnameOverride | string | `"sealed-secrets-controller"` | | | sealed-secrets.fullnameOverride | string | `"sealed-secrets-controller"` | |
| sealed-secrets.keyrenewperiod | int | `0` | | | sealed-secrets.keyrenewperiod | string | `"0"` | |
| sealed-secrets.metrics.serviceMonitor.enabled | bool | `false` | | | sealed-secrets.metrics.serviceMonitor.enabled | bool | `false` | |
| sealed-secrets.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | sealed-secrets.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| sealed-secrets.resources.limits.memory | string | `"128Mi"` | | | sealed-secrets.resources.limits.memory | string | `"128Mi"` | |

View File

@ -5,4 +5,4 @@ home: https://github.com/TwiN/aws-eks-asg-rolling-update-handler
maintainers: maintainers:
- name: TwiN - name: TwiN
name: aws-eks-asg-rolling-update-handler name: aws-eks-asg-rolling-update-handler
version: 1.2.7 version: 1.3.0

View File

@ -0,0 +1,13 @@
# aws-eks-asg-rolling-update-handler
## Configuration
The following table lists the configurable parameters of the aws-eks-asg-rolling-update-handler chart and their default values.
| Parameters | Description | Required | Default |
|:-----------|:------------|:---------|:------------|
| environmentVars | environment variables for aws-eks-asg-rolling-update-handler container, available variables are listed [here](https://github.com/TwiN/aws-eks-asg-rolling-update-handler/blob/master/README.md#usage) | yes |`[{"name":"CLUSTER_NAME","value":"cluster-name"}]`|
| replicaCount | Number of aws-eks-asg-rolling-update-handler replicas | yes |`1` |
| image.repository | Image repository | yes | `twinproduction/aws-eks-asg-rolling-update-handler` |
| image.tag | image tag | yes | `v1.4.3` |
| image.pullPolicy | Image pull policy | yes | `IfNotPresent` |
| resources | CPU/memory resource requests/limits | no | `{}` |
| podAnnotations | Annotations to add to the aws-eks-asg-rolling-update-handler pod configuration | no | `{}` |

View File

@ -15,6 +15,10 @@ spec:
metadata: metadata:
labels: labels:
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 8 }} {{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 8 }}
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec: spec:
automountServiceAccountToken: true automountServiceAccountToken: true
serviceAccountName: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }} serviceAccountName: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }}
@ -25,11 +29,11 @@ spec:
image: {{ .Values.image.repository }}:{{ .Values.image.tag }} image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
env: env:
{{- toYaml .Values.environmentVars | nindent 12 }} {{- toYaml .Values.environmentVars | nindent 12 }}
{{- with .Values.resources }} {{- with .Values.resources }}
resources: resources:
{{- toYaml . | nindent 12 }} {{- toYaml . | nindent 12 }}
{{- end }} {{- end }}
volumeMounts: volumeMounts:
- name: aws-token - name: aws-token
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/" mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
@ -52,5 +56,5 @@ spec:
{{- end }} {{- end }}
{{- with .Values.imagePullSecrets }} {{- with .Values.imagePullSecrets }}
imagePullSecrets: imagePullSecrets:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}

View File

@ -2,7 +2,7 @@ replicaCount: 1
image: image:
repository: twinproduction/aws-eks-asg-rolling-update-handler repository: twinproduction/aws-eks-asg-rolling-update-handler
tag: v1.4.3 tag: v1.7.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
#imagePullSecrets: #imagePullSecrets:
@ -22,6 +22,17 @@ environmentVars:
#- name: ENVIRONMENT #- name: ENVIRONMENT
# value: "" # value: ""
resources: {}
# limits:
# cpu: 0.3
# memory: 100Mi
# requests:
# cpu: 0.1
# memory: 50Mi
podAnnotations: {}
# prometheus.io/port: "8080"
# prometheus.io/scrape: "true"
serviceAccount: serviceAccount:
create: true create: true
#name: aws-eks-asg-rolling-update-handler #name: aws-eks-asg-rolling-update-handler

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 1.18.1 appVersion: 1.19.0
description: A Helm chart for the AWS Node Termination Handler. description: A Helm chart for the AWS Node Termination Handler.
home: https://github.com/aws/eks-charts home: https://github.com/aws/eks-charts
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
@ -22,4 +22,4 @@ sources:
- https://github.com/aws/aws-node-termination-handler/ - https://github.com/aws/aws-node-termination-handler/
- https://github.com/aws/eks-charts/ - https://github.com/aws/eks-charts/
type: application type: application
version: 0.20.1 version: 0.21.0

View File

@ -56,7 +56,7 @@ The configuration in this table applies to all AWS Node Termination Handler mode
| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the full name template. | `nil` | | `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the full name template. | `nil` |
| `serviceAccount.annotations` | Annotations to add to the service account. | `{}` | | `serviceAccount.annotations` | Annotations to add to the service account. | `{}` |
| `rbac.create` | If `true`, create the RBAC resources. | `true` | | `rbac.create` | If `true`, create the RBAC resources. | `true` |
| `rbac.pspEnabled` | If `true`, create a pod security policy resource. | `true` | | `rbac.pspEnabled` | If `true`, create a pod security policy resource. Note: `PodSecurityPolicy`s will not be created when Kubernetes version is 1.25 or later. | `true` |
| `customLabels` | Labels to add to all resource metadata. | `{}` | | `customLabels` | Labels to add to all resource metadata. | `{}` |
| `podLabels` | Labels to add to the pod. | `{}` | | `podLabels` | Labels to add to the pod. | `{}` |
| `podAnnotations` | Annotations to add to the pod. | `{}` | | `podAnnotations` | Annotations to add to the pod. | `{}` |
@ -123,7 +123,7 @@ The configuration in this table applies to AWS Node Termination Handler in queue
The configuration in this table applies to AWS Node Termination Handler in IMDS mode. The configuration in this table applies to AWS Node Termination Handler in IMDS mode.
| Parameter | Description | Default | | Parameter | Description | Default |
| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | | -------------------------------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------|
| `targetNodeOs` | Space separated list of node OS's to target (e.g. `"linux"`, `"windows"`, `"linux windows"`). Windows support is **EXPERIMENTAL**. | `"linux"` | | `targetNodeOs` | Space separated list of node OS's to target (e.g. `"linux"`, `"windows"`, `"linux windows"`). Windows support is **EXPERIMENTAL**. | `"linux"` |
| `linuxPodLabels` | Labels to add to each Linux pod. | `{}` | | `linuxPodLabels` | Labels to add to each Linux pod. | `{}` |
| `windowsPodLabels` | Labels to add to each Windows pod. | `{}` | | `windowsPodLabels` | Labels to add to each Windows pod. | `{}` |
@ -152,10 +152,10 @@ The configuration in this table applies to AWS Node Termination Handler in IMDS
| `windowsTolerations` | Override `daemonsetTolerations` for the Linux DaemonSet. | `[]` | | `windowsTolerations` | Override `daemonsetTolerations` for the Linux DaemonSet. | `[]` |
| `enableProbesServer` | If `true`, start an http server exposing `/healthz` endpoint for probes. | `false` | | `enableProbesServer` | If `true`, start an http server exposing `/healthz` endpoint for probes. | `false` |
| `metadataTries` | The number of times to try requesting metadata. | `3` | | `metadataTries` | The number of times to try requesting metadata. | `3` |
| `enableSpotInterruptionDraining` | If `true`, drain nodes when the spot interruption termination notice is received. | `true` | | `enableSpotInterruptionDraining` | If `true`, drain nodes when the spot interruption termination notice is received. Only used in IMDS mode. | `true` |
| `enableScheduledEventDraining` | If `true`, drain nodes before the maintenance window starts for an EC2 instance scheduled event. This is **EXPERIMENTAL**. | `false` | | `enableScheduledEventDraining` | If `true`, drain nodes before the maintenance window starts for an EC2 instance scheduled event. Only used in IMDS mode. | `true` |
| `enableRebalanceMonitoring` | If `true`, cordon nodes when the rebalance recommendation notice is received. If you'd like to drain the node in addition to cordoning, then also set `enableRebalanceDraining`. | `false` | | `enableRebalanceMonitoring` | If `true`, cordon nodes when the rebalance recommendation notice is received. If you'd like to drain the node in addition to cordoning, then also set `enableRebalanceDraining`. Only used in IMDS mode. | `false` |
| `enableRebalanceDraining` | If `true`, drain nodes when the rebalance recommendation notice is received. | `false` | | `enableRebalanceDraining` | If `true`, drain nodes when the rebalance recommendation notice is received. Only used in IMDS mode. | `false` |
### Testing Configuration ### Testing Configuration

View File

@ -146,14 +146,6 @@ spec:
- name: WEBHOOK_TEMPLATE - name: WEBHOOK_TEMPLATE
value: {{ .Values.webhookTemplate | quote }} value: {{ .Values.webhookTemplate | quote }}
{{- end }} {{- end }}
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
value: {{ .Values.enableSpotInterruptionDraining | quote }}
- name: ENABLE_SCHEDULED_EVENT_DRAINING
value: {{ .Values.enableScheduledEventDraining | quote }}
- name: ENABLE_REBALANCE_MONITORING
value: {{ .Values.enableRebalanceMonitoring | quote }}
- name: ENABLE_REBALANCE_DRAINING
value: {{ .Values.enableRebalanceDraining | quote }}
- name: ENABLE_SQS_TERMINATION_DRAINING - name: ENABLE_SQS_TERMINATION_DRAINING
value: "true" value: "true"
{{- with .Values.awsRegion }} {{- with .Values.awsRegion }}

View File

@ -1,4 +1,4 @@
{{- if .Values.rbac.pspEnabled }} {{- if and (.Values.rbac.pspEnabled) (semverCompare "<1.25-0" .Capabilities.KubeVersion.GitVersion) }}
apiVersion: policy/v1beta1 apiVersion: policy/v1beta1
kind: PodSecurityPolicy kind: PodSecurityPolicy
metadata: metadata:

View File

@ -23,7 +23,7 @@ serviceAccount:
rbac: rbac:
# Specifies whether RBAC resources should be created # Specifies whether RBAC resources should be created
create: true create: true
# Specifies if PodSecurityPolicy resources should be created # Specifies if PodSecurityPolicy resources should be created. PodSecurityPolicy will not be created when Kubernetes version is 1.25 or later.
pspEnabled: true pspEnabled: true
customLabels: {} customLabels: {}
@ -259,22 +259,22 @@ daemonsetTolerations:
linuxTolerations: [] linuxTolerations: []
windowsTolerations: [] windowsTolerations: []
# If the probes server is running for the Daemonset # If the probes server is running.
enableProbesServer: false enableProbesServer: false
# Total number of times to try making the metadata request before failing. # Total number of times to try making the metadata request before failing.
metadataTries: 3 metadataTries: 3
# enableSpotInterruptionDraining If false, do not drain nodes when the spot interruption termination notice is received # enableSpotInterruptionDraining If false, do not drain nodes when the spot interruption termination notice is received. Only used in IMDS mode.
enableSpotInterruptionDraining: true enableSpotInterruptionDraining: true
# enableScheduledEventDraining [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event # enableScheduledEventDraining If false, do not drain nodes before the maintenance window starts for an EC2 instance scheduled event. Only used in IMDS mode.
enableScheduledEventDraining: false enableScheduledEventDraining: true
# enableRebalanceMonitoring If true, cordon nodes when the rebalance recommendation notice is received # enableRebalanceMonitoring If true, cordon nodes when the rebalance recommendation notice is received. Only used in IMDS mode.
enableRebalanceMonitoring: false enableRebalanceMonitoring: false
# enableRebalanceDraining If true, drain nodes when the rebalance recommendation notice is received # enableRebalanceDraining If true, drain nodes when the rebalance recommendation notice is received. Only used in IMDS mode.
enableRebalanceDraining: false enableRebalanceDraining: false
# --------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------

View File

@ -1,16 +1,10 @@
diff -tuNr charts/aws-eks-asg-rolling-update-handler.orig/templates/deployment.yaml charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml diff -tuNr charts/aws-eks-asg-rolling-update-handler.orig/templates/deployment.yaml charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml
--- charts/aws-eks-asg-rolling-update-handler.orig/templates/deployment.yaml 2022-12-16 13:10:26.049272371 +0000 --- charts/aws-eks-asg-rolling-update-handler.orig/templates/deployment.yaml 2023-04-12 15:49:08.744242462 +0000
+++ charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml 2022-12-16 15:56:00.880666339 +0000 +++ charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml 2023-04-12 15:55:44.399489809 +0000
@@ -25,7 +25,31 @@ @@ -34,6 +34,26 @@
image: {{ .Values.image.repository }}:{{ .Values.image.tag }} resources:
imagePullPolicy: {{ .Values.image.pullPolicy }} {{- toYaml . | nindent 12 }}
env: {{- end }}
-{{- toYaml .Values.environmentVars | nindent 12 }}
+ {{- toYaml .Values.environmentVars | nindent 12 }}
+ {{- with .Values.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ volumeMounts: + volumeMounts:
+ - name: aws-token + - name: aws-token
+ mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/" + mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"

View File

@ -31,7 +31,7 @@ sealed-secrets:
fullnameOverride: sealed-secrets-controller fullnameOverride: sealed-secrets-controller
# Disable auto keyrotation for now # Disable auto keyrotation for now
keyrenewperiod: 0 keyrenewperiod: "0"
resources: resources:
requests: requests:
@ -179,10 +179,18 @@ nvidia-device-plugin:
- g5.24xlarge - g5.24xlarge
- g5.48xlarge - g5.48xlarge
- g4dn.xlarge - g4dn.xlarge
- g4dn.2xlarge
- g4dn.4xlarge
- g4dn.8xlarge
- g4dn.12xlarge
- g4dn.16xlarge
cluster-autoscaler: cluster-autoscaler:
enabled: false enabled: false
image:
tag: v1.25.1
autoDiscovery: autoDiscovery:
clusterName: "" clusterName: ""
awsRegion: "us-west-2" awsRegion: "us-west-2"
@ -201,6 +209,8 @@ cluster-autoscaler:
extraArgs: extraArgs:
scan-interval: 30s scan-interval: 30s
skip-nodes-with-local-storage: false skip-nodes-with-local-storage: false
balance-similar-node-groups: true
ignore-taint: "node.cilium.io/agent-not-ready"
#securityContext: #securityContext:
# runAsNonRoot: true # runAsNonRoot: true

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
description: KubeZero ArgoCD - config, branding, image-updater (optional) description: KubeZero ArgoCD - config, branding, image-updater (optional)
name: kubezero-argocd name: kubezero-argocd
version: 0.11.2 version: 0.12.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -17,13 +17,13 @@ dependencies:
version: ">= 0.1.6" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: argo-cd - name: argo-cd
version: 5.16.10 version: 5.28.2
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
- name: argocd-apps - name: argocd-apps
version: 0.0.6 version: 0.0.9
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
- name: argocd-image-updater - name: argocd-image-updater
version: 0.8.1 version: 0.8.5
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
condition: argocd-image-updater.enabled condition: argocd-image-updater.enabled
kubeVersion: ">= 1.24.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-argocd # kubezero-argocd
![Version: 0.11.2](https://img.shields.io/badge/Version-0.11.2-informational?style=flat-square) ![Version: 0.12.0](https://img.shields.io/badge/Version-0.12.0-informational?style=flat-square)
KubeZero ArgoCD - config, branding, image-updater (optional) KubeZero ArgoCD - config, branding, image-updater (optional)
@ -14,13 +14,13 @@ KubeZero ArgoCD - config, branding, image-updater (optional)
## Requirements ## Requirements
Kubernetes: `>= 1.24.0` Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 5.16.10 | | https://argoproj.github.io/argo-helm | argo-cd | 5.28.2 |
| https://argoproj.github.io/argo-helm | argocd-apps | 0.0.6 | | https://argoproj.github.io/argo-helm | argocd-apps | 0.0.9 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.8.1 | | https://argoproj.github.io/argo-helm | argocd-image-updater | 0.8.5 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
## Values ## Values
@ -30,12 +30,12 @@ Kubernetes: `>= 1.24.0`
| argo-cd.applicationSet.enabled | bool | `false` | | | argo-cd.applicationSet.enabled | bool | `false` | |
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | | | argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
| argo-cd.configs.cm."timeout.reconciliation" | int | `300` | | | argo-cd.configs.cm."timeout.reconciliation" | int | `300` | |
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.24 - Release notes"` | | | argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.25 - Release notes"` | |
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | | | argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | | | argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.24"` | | | argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.25"` | |
| argo-cd.configs.cm.url | string | `"argocd.example.com"` | | | argo-cd.configs.cm.url | string | `"argocd.example.com"` | |
| argo-cd.configs.knownHosts.data.ssh_known_hosts | string | `"bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==\ngithub.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==\ngitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=\ngitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf\ngitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9\ngit.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==\n"` | | | argo-cd.configs.knownHosts.data.ssh_known_hosts | string | `"bitbucket.org ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPIQmuzMBuKdWeF4+a2sjSSpBK0iqitSQ+5BM9KhpexuGt20JpTVM7u5BDZngncgrqDMbWdxMWWOGtZ9UgbqgZE=\nbitbucket.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIazEu89wgQZ4bqs3d63QSMzYVa0MuJ2e2gKTKqu+UUO\nbitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDQeJzhupRu0u0cdegZIa8e86EG2qOCsIsD1Xw0xSeiPDlCr7kq97NLmMbpKTX6Esc30NuoqEEHCuc7yWtwp8dI76EEEB1VqY9QJq6vk+aySyboD5QF61I/1WeTwu+deCbgKMGbUijeXhtfbxSxm6JwGrXrhBdofTsbKRUsrN1WoNgUa8uqN1Vx6WAJw1JHPhglEGGHea6QICwJOAr/6mrui/oB7pkaWKHj3z7d1IC4KWLtY47elvjbaTlkN04Kc/5LFEirorGYVbt15kAUlqGM65pk6ZBxtaO3+30LVlORZkxOh+LKL/BvbZ/iRNhItLqNyieoQj/uh/7Iv4uyH/cV/0b4WDSd3DptigWq84lJubb9t/DnZlrJazxyDCulTmKdOR7vs9gMTo+uoIrPSb8ScTtvw65+odKAlBj59dhnVp9zd7QUojOpXlL62Aw56U4oO+FALuevvMjiWeavKhJqlR7i5n9srYcrNV7ttmDw7kf/97P5zauIhxcjX+xHv4M=\ngithub.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==\ngitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=\ngitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf\ngitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9\ngit.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==\n"` | |
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | | | argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | | | argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | | | argo-cd.configs.params."server.enable.gzip" | bool | `true` | |

View File

@ -29,8 +29,8 @@ argo-cd:
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); } .sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
cm: cm:
ui.bannercontent: "KubeZero v1.24 - Release notes" ui.bannercontent: "KubeZero v1.25 - Release notes"
ui.bannerurl: "https://kubezero.com/releases/v1.24" ui.bannerurl: "https://kubezero.com/releases/v1.25"
ui.bannerpermanent: "true" ui.bannerpermanent: "true"
ui.bannerposition: "bottom" ui.bannerposition: "bottom"

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-auth name: kubezero-auth
description: KubeZero umbrella chart for all things Authentication and Identity management description: KubeZero umbrella chart for all things Authentication and Identity management
type: application type: application
version: 0.3.4 version: 0.3.5
appVersion: 20.0.2 appVersion: 21.1.1
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -20,4 +20,4 @@ dependencies:
version: 11.8.1 version: 11.8.1
repository: https://charts.bitnami.com/bitnami repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled condition: postgresql.enabled
kubeVersion: ">= 1.20.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-auth # kubezero-auth
![Version: 0.3.4](https://img.shields.io/badge/Version-0.3.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 20.0.2](https://img.shields.io/badge/AppVersion-20.0.2-informational?style=flat-square) ![Version: 0.3.5](https://img.shields.io/badge/Version-0.3.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 21.1.1](https://img.shields.io/badge/AppVersion-21.1.1-informational?style=flat-square)
KubeZero umbrella chart for all things Authentication and Identity management KubeZero umbrella chart for all things Authentication and Identity management
@ -14,7 +14,7 @@ KubeZero umbrella chart for all things Authentication and Identity management
## Requirements ## Requirements
Kubernetes: `>= 1.20.0` Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|

View File

@ -4,20 +4,20 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
annotations: annotations:
app.quarkus.io/build-timestamp: 2022-12-13 - 14:29:14 +0000 app.quarkus.io/build-timestamp: 2023-04-26 - 10:32:03 +0000
labels: labels:
app.kubernetes.io/name: keycloak-operator app.kubernetes.io/name: keycloak-operator
app.kubernetes.io/version: 20.0.2 app.kubernetes.io/version: 21.1.1
name: keycloak-operator name: keycloak-operator
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
annotations: annotations:
app.quarkus.io/build-timestamp: 2022-12-13 - 14:29:14 +0000 app.quarkus.io/build-timestamp: 2023-04-26 - 10:32:03 +0000
labels: labels:
app.kubernetes.io/name: keycloak-operator app.kubernetes.io/name: keycloak-operator
app.kubernetes.io/version: 20.0.2 app.kubernetes.io/version: 21.1.1
name: keycloak-operator name: keycloak-operator
spec: spec:
ports: ports:
@ -26,7 +26,7 @@ spec:
targetPort: 8080 targetPort: 8080
selector: selector:
app.kubernetes.io/name: keycloak-operator app.kubernetes.io/name: keycloak-operator
app.kubernetes.io/version: 20.0.2 app.kubernetes.io/version: 21.1.1
type: ClusterIP type: ClusterIP
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
@ -179,24 +179,24 @@ apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
annotations: annotations:
app.quarkus.io/build-timestamp: 2022-12-13 - 14:29:14 +0000 app.quarkus.io/build-timestamp: 2023-04-26 - 10:32:03 +0000
labels: labels:
app.kubernetes.io/name: keycloak-operator app.kubernetes.io/name: keycloak-operator
app.kubernetes.io/version: 20.0.2 app.kubernetes.io/version: 21.1.1
name: keycloak-operator name: keycloak-operator
spec: spec:
replicas: 1 replicas: 1
selector: selector:
matchLabels: matchLabels:
app.kubernetes.io/name: keycloak-operator app.kubernetes.io/name: keycloak-operator
app.kubernetes.io/version: 20.0.2 app.kubernetes.io/version: 21.1.1
template: template:
metadata: metadata:
annotations: annotations:
app.quarkus.io/build-timestamp: 2022-12-13 - 14:29:14 +0000 app.quarkus.io/build-timestamp: 2023-04-26 - 10:32:03 +0000
labels: labels:
app.kubernetes.io/name: keycloak-operator app.kubernetes.io/name: keycloak-operator
app.kubernetes.io/version: 20.0.2 app.kubernetes.io/version: 21.1.1
spec: spec:
containers: containers:
- env: - env:
@ -205,8 +205,8 @@ spec:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: OPERATOR_KEYCLOAK_IMAGE - name: OPERATOR_KEYCLOAK_IMAGE
value: quay.io/keycloak/keycloak:20.0.2 value: quay.io/keycloak/keycloak:21.1.1
image: quay.io/keycloak/keycloak-operator:20.0.2 image: quay.io/keycloak/keycloak-operator:21.1.1
imagePullPolicy: Always imagePullPolicy: Always
livenessProbe: livenessProbe:
failureThreshold: 3 failureThreshold: 3

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cert-manager name: kubezero-cert-manager
description: KubeZero Umbrella Chart for cert-manager description: KubeZero Umbrella Chart for cert-manager
type: application type: application
version: 0.9.3 version: 0.9.4
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -16,6 +16,6 @@ dependencies:
version: ">= 0.1.6" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: cert-manager - name: cert-manager
version: 1.9.1 version: 1.11.1
repository: https://charts.jetstack.io repository: https://charts.jetstack.io
kubeVersion: ">= 1.24.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-cert-manager # kubezero-cert-manager
![Version: 0.9.3](https://img.shields.io/badge/Version-0.9.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.9.4](https://img.shields.io/badge/Version-0.9.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for cert-manager KubeZero Umbrella Chart for cert-manager
@ -14,12 +14,12 @@ KubeZero Umbrella Chart for cert-manager
## Requirements ## Requirements
Kubernetes: `>= 1.24.0` Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.jetstack.io | cert-manager | 1.9.1 | | https://charts.jetstack.io | cert-manager | 1.11.1 |
## AWS - OIDC IAM roles ## AWS - OIDC IAM roles

View File

@ -18,7 +18,7 @@
"subdir": "contrib/mixin" "subdir": "contrib/mixin"
} }
}, },
"version": "f1842b6ecf67a8102766cc914eaa2a8c7ad97314", "version": "9d2cda4e44a26f064d8578e258bbba2fc3cd5b73",
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc=" "sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
}, },
{ {
@ -28,7 +28,7 @@
"subdir": "grafonnet" "subdir": "grafonnet"
} }
}, },
"version": "30280196507e0fe6fa978a3e0eaca3a62844f817", "version": "f0b70307b8e5f12236b277883d998af129a8211f",
"sum": "342u++/7rViR/zj2jeJOjshzglkZ1SY+hFNuyCBFMdc=" "sum": "342u++/7rViR/zj2jeJOjshzglkZ1SY+hFNuyCBFMdc="
}, },
{ {
@ -38,7 +38,7 @@
"subdir": "grafana-builder" "subdir": "grafana-builder"
} }
}, },
"version": "15484ab1cb78eb7588e6b79ac52fc04e63f552b4", "version": "e0b90a4435817ad642d8d049e7dd975264cb960e",
"sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0=" "sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0="
}, },
{ {
@ -58,7 +58,7 @@
"subdir": "lib/promgrafonnet" "subdir": "lib/promgrafonnet"
} }
}, },
"version": "3c386687c1f8ceb6b79ff887c4a934e9cee1b90a", "version": "eed459199703c969afc318ea55b9361ae48180a7",
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps=" "sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
}, },
{ {

View File

@ -1,3 +1,4 @@
{{- if index .Values "cert-manager" "prometheus" "servicemonitor" "enabled" }}
apiVersion: monitoring.coreos.com/v1 apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule kind: PrometheusRule
metadata: metadata:
@ -51,3 +52,4 @@ spec:
labels: labels:
severity: critical severity: critical
{{- end }}

View File

@ -23,6 +23,8 @@ cert-manager:
leaderElection: leaderElection:
namespace: "cert-manager" namespace: "cert-manager"
#enableCertificateOwnerRef: true
# On AWS enable Projected Service Accounts to assume IAM role # On AWS enable Projected Service Accounts to assume IAM role
#extraEnv: #extraEnv:
#- name: AWS_ROLE_ARN #- name: AWS_ROLE_ARN

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-ci name: kubezero-ci
description: KubeZero umbrella chart for all things CI description: KubeZero umbrella chart for all things CI
type: application type: application
version: 0.5.25 version: 0.6.2
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -22,16 +22,15 @@ dependencies:
repository: https://gocd.github.io/helm-chart repository: https://gocd.github.io/helm-chart
condition: gocd.enabled condition: gocd.enabled
- name: gitea - name: gitea
version: 6.0.5 version: 8.2.0
repository: https://dl.gitea.io/charts/ repository: https://dl.gitea.io/charts/
condition: gitea.enabled condition: gitea.enabled
- name: jenkins - name: jenkins
version: 4.2.20 version: 4.3.20
repository: https://charts.jenkins.io repository: https://charts.jenkins.io
condition: jenkins.enabled condition: jenkins.enabled
- name: trivy - name: trivy
version: 0.4.17 version: 0.7.0
repository: https://aquasecurity.github.io/helm-charts/ repository: https://aquasecurity.github.io/helm-charts/
condition: trivy.enabled condition: trivy.enabled
kubeVersion: ">= 1.24.0"
kubeVersion: ">= 1.20.0"

View File

@ -1,6 +1,6 @@
# kubezero-ci # kubezero-ci
![Version: 0.5.24](https://img.shields.io/badge/Version-0.5.24-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.6.2](https://img.shields.io/badge/Version-0.6.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI KubeZero umbrella chart for all things CI
@ -14,14 +14,14 @@ KubeZero umbrella chart for all things CI
## Requirements ## Requirements
Kubernetes: `>= 1.20.0` Kubernetes: `>= 1.24.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.17 | | https://aquasecurity.github.io/helm-charts/ | trivy | 0.7.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.jenkins.io | jenkins | 4.2.17 | | https://charts.jenkins.io | jenkins | 4.3.20 |
| https://dl.gitea.io/charts/ | gitea | 6.0.5 | | https://dl.gitea.io/charts/ | gitea | 8.2.0 |
| https://gocd.github.io/helm-chart | gocd | 1.40.8 | | https://gocd.github.io/helm-chart | gocd | 1.40.8 |
# Jenkins # Jenkins
@ -34,8 +34,10 @@ Kubernetes: `>= 1.20.0`
# Gitea # Gitea
## OpenSSH 8.8 RSA disabled # Verdaccio
- https://github.com/go-gitea/gitea/issues/17798
## Authentication sealed-secret
```htpasswd -n -b -B -C 4 <username> <password> | kubeseal --raw --namespace verdaccio --name verdaccio-htpasswd```
## Resources ## Resources
@ -82,6 +84,10 @@ Kubernetes: `>= 1.20.0`
| jenkins.agent.image | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | | | jenkins.agent.image | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
| jenkins.agent.podName | string | `"podman-aws"` | | | jenkins.agent.podName | string | `"podman-aws"` | |
| jenkins.agent.podRetention | string | `"Default"` | | | jenkins.agent.podRetention | string | `"Default"` | |
| jenkins.agent.resources.limits.cpu | string | `""` | |
| jenkins.agent.resources.limits.memory | string | `""` | |
| jenkins.agent.resources.requests.cpu | string | `""` | |
| jenkins.agent.resources.requests.memory | string | `""` | |
| jenkins.agent.showRawYaml | bool | `false` | | | jenkins.agent.showRawYaml | bool | `false` | |
| jenkins.agent.tag | string | `"v0.4.1"` | | | jenkins.agent.tag | string | `"v0.4.1"` | |
| jenkins.agent.yamlMergeStrategy | string | `"merge"` | | | jenkins.agent.yamlMergeStrategy | string | `"merge"` | |
@ -92,18 +98,18 @@ Kubernetes: `>= 1.20.0`
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | | | jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | | | jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | | | jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3743.v1fa_4c724c3b_7"` | | | jenkins.controller.installPlugins[0] | string | `"kubernetes:3923.v294a_d4250b_91"` | |
| jenkins.controller.installPlugins[10] | string | `"build-discarder:139.v05696a_7fe240"` | | | jenkins.controller.installPlugins[10] | string | `"build-discarder:139.v05696a_7fe240"` | |
| jenkins.controller.installPlugins[11] | string | `"dark-theme:262.v0202a_4c8fb_6a"` | | | jenkins.controller.installPlugins[11] | string | `"dark-theme:315.va_22e7d692ea_a"` | |
| jenkins.controller.installPlugins[12] | string | `"kubernetes-credentials-provider:1.206.v7ce2cf7b_0c8b"` | | | jenkins.controller.installPlugins[12] | string | `"kubernetes-credentials-provider:1.211.vc236a_f5a_2f3c"` | |
| jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | | | jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | |
| jenkins.controller.installPlugins[2] | string | `"git:4.14.3"` | | | jenkins.controller.installPlugins[2] | string | `"git:5.0.2"` | |
| jenkins.controller.installPlugins[3] | string | `"basic-branch-build-strategies:71.vc1421f89888e"` | | | jenkins.controller.installPlugins[3] | string | `"basic-branch-build-strategies:71.vc1421f89888e"` | |
| jenkins.controller.installPlugins[4] | string | `"pipeline-graph-view:144.vf3924feb_7e35"` | | | jenkins.controller.installPlugins[4] | string | `"pipeline-graph-view:183.v9e27732d970f"` | |
| jenkins.controller.installPlugins[5] | string | `"pipeline-stage-view:2.28"` | | | jenkins.controller.installPlugins[5] | string | `"pipeline-stage-view:2.32"` | |
| jenkins.controller.installPlugins[6] | string | `"configuration-as-code:1569.vb_72405b_80249"` | | | jenkins.controller.installPlugins[6] | string | `"configuration-as-code:1625.v27444588cc3d"` | |
| jenkins.controller.installPlugins[7] | string | `"antisamy-markup-formatter:155.v795fb_8702324"` | | | jenkins.controller.installPlugins[7] | string | `"antisamy-markup-formatter:159.v25b_c67cd35fb_"` | |
| jenkins.controller.installPlugins[8] | string | `"prometheus:2.0.11"` | | | jenkins.controller.installPlugins[8] | string | `"prometheus:2.2.2"` | |
| jenkins.controller.installPlugins[9] | string | `"htmlpublisher:1.31"` | | | jenkins.controller.installPlugins[9] | string | `"htmlpublisher:1.31"` | |
| jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | | | jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | |
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | | | jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | |
@ -128,7 +134,7 @@ Kubernetes: `>= 1.20.0`
| jenkins.serviceAccountAgent.create | bool | `true` | | | jenkins.serviceAccountAgent.create | bool | `true` | |
| jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | | | jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | |
| trivy.enabled | bool | `false` | | | trivy.enabled | bool | `false` | |
| trivy.image.tag | string | `"0.35.0"` | | | trivy.image.tag | string | `"0.39.1"` | |
| trivy.persistence.enabled | bool | `true` | | | trivy.persistence.enabled | bool | `true` | |
| trivy.persistence.size | string | `"1Gi"` | | | trivy.persistence.size | string | `"1Gi"` | |
| trivy.rbac.create | bool | `false` | | | trivy.rbac.create | bool | `false` | |

View File

@ -23,8 +23,10 @@
# Gitea # Gitea
## OpenSSH 8.8 RSA disabled # Verdaccio
- https://github.com/go-gitea/gitea/issues/17798
## Authentication sealed-secret
```htpasswd -n -b -B -C 4 <username> <password> | kubeseal --raw --namespace verdaccio --name verdaccio-htpasswd```
## Resources ## Resources

View File

@ -121,19 +121,19 @@ jenkins:
numToKeepStr: "10" numToKeepStr: "10"
installPlugins: installPlugins:
- kubernetes:3802.vb_b_600831fcb_3 - kubernetes:3923.v294a_d4250b_91
- workflow-aggregator:581.v0c46fa_697ffd - workflow-aggregator:581.v0c46fa_697ffd
- git:5.0.0 - git:5.0.2
- basic-branch-build-strategies:71.vc1421f89888e - basic-branch-build-strategies:71.vc1421f89888e
- pipeline-graph-view:144.vf3924feb_7e35 - pipeline-graph-view:183.v9e27732d970f
- pipeline-stage-view:2.28 - pipeline-stage-view:2.32
- configuration-as-code:1569.vb_72405b_80249 - configuration-as-code:1625.v27444588cc3d
- antisamy-markup-formatter:155.v795fb_8702324 - antisamy-markup-formatter:159.v25b_c67cd35fb_
- prometheus:2.1.0 - prometheus:2.2.2
- htmlpublisher:1.31 - htmlpublisher:1.31
- build-discarder:139.v05696a_7fe240 - build-discarder:139.v05696a_7fe240
- dark-theme:262.v0202a_4c8fb_6a - dark-theme:315.va_22e7d692ea_a
- kubernetes-credentials-provider:1.208.v128ee9800c04 - kubernetes-credentials-provider:1.211.vc236a_f5a_2f3c
serviceAccountAgent: serviceAccountAgent:
create: true create: true
@ -231,7 +231,7 @@ jenkins:
trivy: trivy:
enabled: false enabled: false
image: image:
tag: 0.35.0 tag: 0.39.1
persistence: persistence:
enabled: true enabled: true
size: 1Gi size: 1Gi

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio-gateway name: kubezero-istio-gateway
description: KubeZero Umbrella Chart for Istio gateways description: KubeZero Umbrella Chart for Istio gateways
type: application type: application
version: 0.9.0 version: 0.10.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -14,9 +14,9 @@ maintainers:
email: stefan@zero-downtime.net email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.5" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: gateway - name: gateway
version: 1.16.1 version: 1.17.2
repository: https://istio-release.storage.googleapis.com/charts repository: https://istio-release.storage.googleapis.com/charts
kubeVersion: ">= 1.24.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-istio-gateway # kubezero-istio-gateway
![Version: 0.9.0](https://img.shields.io/badge/Version-0.9.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.10.0](https://img.shields.io/badge/Version-0.10.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Istio gateways KubeZero Umbrella Chart for Istio gateways
@ -16,12 +16,12 @@ Installs Istio Ingress Gateways, requires kubezero-istio to be installed !
## Requirements ## Requirements
Kubernetes: `>= 1.24.0` Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://istio-release.storage.googleapis.com/charts | gateway | 1.16.1 | | https://istio-release.storage.googleapis.com/charts | gateway | 1.17.2 |
## Values ## Values

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 1.16.1 appVersion: 1.17.2
description: Helm chart for deploying Istio gateways description: Helm chart for deploying Istio gateways
icon: https://istio.io/latest/favicons/android-192x192.png icon: https://istio.io/latest/favicons/android-192x192.png
keywords: keywords:
@ -9,4 +9,4 @@ name: gateway
sources: sources:
- http://github.com/istio/istio - http://github.com/istio/istio
type: application type: application
version: 1.16.1 version: 1.17.2

View File

@ -49,6 +49,9 @@ spec:
- name: istio-proxy - name: istio-proxy
# "auto" will be populated at runtime by the mutating webhook. See https://istio.io/latest/docs/setup/additional-setup/sidecar-injection/#customizing-injection # "auto" will be populated at runtime by the mutating webhook. See https://istio.io/latest/docs/setup/additional-setup/sidecar-injection/#customizing-injection
image: auto image: auto
{{- with .Values.imagePullPolicy }}
imagePullPolicy: {{ . }}
{{- end }}
securityContext: securityContext:
{{- if .Values.containerSecurityContext }} {{- if .Values.containerSecurityContext }}
{{- toYaml .Values.containerSecurityContext | nindent 12 }} {{- toYaml .Values.containerSecurityContext | nindent 12 }}

View File

@ -191,6 +191,10 @@
"networkGateway": { "networkGateway": {
"type": "string" "type": "string"
}, },
"imagePullPolicy": {
"type": "string",
"enum": ["", "Always", "IfNotPresent", "Never"]
},
"imagePullSecrets": { "imagePullSecrets": {
"type": "array", "type": "array",
"items": { "items": {

View File

@ -90,4 +90,8 @@ affinity: {}
# If specified, the gateway will act as a network gateway for the given network. # If specified, the gateway will act as a network gateway for the given network.
networkGateway: "" networkGateway: ""
# Specify image pull policy if default behavior isn't desired.
# Default behavior: latest images will be Always else IfNotPresent
imagePullPolicy: ""
imagePullSecrets: [] imagePullSecrets: []

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio name: kubezero-istio
description: KubeZero Umbrella Chart for Istio description: KubeZero Umbrella Chart for Istio
type: application type: application
version: 0.9.0 version: 0.10.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -16,13 +16,13 @@ dependencies:
version: ">= 0.1.6" version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
- name: base - name: base
version: 1.16.1 version: 1.17.2
repository: https://istio-release.storage.googleapis.com/charts repository: https://istio-release.storage.googleapis.com/charts
- name: istiod - name: istiod
version: 1.16.1 version: 1.17.2
repository: https://istio-release.storage.googleapis.com/charts repository: https://istio-release.storage.googleapis.com/charts
- name: kiali-server - name: kiali-server
version: "1.60.0" version: "1.66.0"
repository: https://kiali.org/helm-charts repository: https://kiali.org/helm-charts
condition: kiali-server.enabled condition: kiali-server.enabled
kubeVersion: ">= 1.24.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-istio # kubezero-istio
![Version: 0.9.0](https://img.shields.io/badge/Version-0.9.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.10.0](https://img.shields.io/badge/Version-0.10.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Istio KubeZero Umbrella Chart for Istio
@ -16,14 +16,14 @@ Installs the Istio control plane
## Requirements ## Requirements
Kubernetes: `>= 1.24.0` Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://istio-release.storage.googleapis.com/charts | base | 1.16.1 | | https://istio-release.storage.googleapis.com/charts | base | 1.17.2 |
| https://istio-release.storage.googleapis.com/charts | istiod | 1.16.1 | | https://istio-release.storage.googleapis.com/charts | istiod | 1.17.2 |
| https://kiali.org/helm-charts | kiali-server | 1.60.0 | | https://kiali.org/helm-charts | kiali-server | 1.66.0 |
## Values ## Values

View File

@ -1,22 +1,22 @@
# Revision 148 = 1.16 # Revision 162 = 1.17
configmap: grafana-dashboards configmap: grafana-dashboards
gzip: true gzip: true
folder: Istio folder: Istio
condition: '.Values.istiod.telemetry.enabled' condition: '.Values.istiod.telemetry.enabled'
dashboards: dashboards:
- name: istio-control-plane - name: istio-control-plane
url: https://grafana.com/api/dashboards/7645/revisions/148/download url: https://grafana.com/api/dashboards/7645/revisions/162/download
tags: tags:
- Istio - Istio
- name: istio-mesh - name: istio-mesh
url: https://grafana.com/api/dashboards/7639/revisions/148/download url: https://grafana.com/api/dashboards/7639/revisions/162/download
tags: tags:
- Istio - Istio
- name: istio-service - name: istio-service
url: https://grafana.com/api/dashboards/7636/revisions/148/download url: https://grafana.com/api/dashboards/7636/revisions/162/download
tags: tags:
- Istio - Istio
- name: istio-workload - name: istio-workload
url: https://grafana.com/api/dashboards/7630/revisions/148/download url: https://grafana.com/api/dashboards/7630/revisions/162/download
tags: tags:
- Istio - Istio

File diff suppressed because one or more lines are too long

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack description: KubeZero Umbrella Chart for complete EFK stack
type: application type: application
version: 0.8.4 version: 0.8.6
appVersion: 1.6.0 appVersion: 1.6.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -27,6 +27,6 @@ dependencies:
version: 0.3.9 version: 0.3.9
condition: fluentd.enabled condition: fluentd.enabled
- name: fluent-bit - name: fluent-bit
version: 0.20.6 version: 0.24.0
condition: fluent-bit.enabled condition: fluent-bit.enabled
kubeVersion: ">= 1.24.0" kubeVersion: ">= 1.24.0"

View File

@ -1,6 +1,6 @@
# kubezero-logging # kubezero-logging
![Version: 0.8.4](https://img.shields.io/badge/Version-0.8.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square) ![Version: 0.8.6](https://img.shields.io/badge/Version-0.8.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack KubeZero Umbrella Chart for complete EFK stack
@ -19,7 +19,7 @@ Kubernetes: `>= 1.24.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| | eck-operator | 2.4.0 | | | eck-operator | 2.4.0 |
| | fluent-bit | 0.20.6 | | | fluent-bit | 0.24.0 |
| | fluentd | 0.3.9 | | | fluentd | 0.3.9 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
@ -72,9 +72,9 @@ Kubernetes: `>= 1.24.0`
| fluent-bit.config.customParsers | string | `"[PARSER]\n Name cri-log\n Format regex\n Regex ^(?<time>.+) (?<stream>stdout|stderr) (?<logtag>F|P) (?<log>.*)$\n Time_Key time\n Time_Format %Y-%m-%dT%H:%M:%S.%L%z\n"` | | | fluent-bit.config.customParsers | string | `"[PARSER]\n Name cri-log\n Format regex\n Regex ^(?<time>.+) (?<stream>stdout|stderr) (?<logtag>F|P) (?<log>.*)$\n Time_Key time\n Time_Format %Y-%m-%dT%H:%M:%S.%L%z\n"` | |
| fluent-bit.config.filters | string | `"[FILTER]\n Name parser\n Match cri.*\n Parser cri-log\n Key_Name log\n\n[FILTER]\n Name kubernetes\n Match cri.*\n Merge_Log On\n Merge_Log_Key kube\n Kube_Tag_Prefix cri.var.log.containers.\n Keep_Log Off\n K8S-Logging.Parser Off\n K8S-Logging.Exclude Off\n Kube_Meta_Cache_TTL 3600s\n Buffer_Size 0\n #Use_Kubelet true\n\n{{- if index .Values \"config\" \"extraRecords\" }}\n\n[FILTER]\n Name record_modifier\n Match cri.*\n {{- range $k,$v := index .Values \"config\" \"extraRecords\" }}\n Record {{ $k }} {{ $v }}\n {{- end }}\n{{- end }}\n\n[FILTER]\n Name rewrite_tag\n Match cri.*\n Emitter_Name kube_tag_rewriter\n Rule $kubernetes['pod_id'] .* kube.$kubernetes['namespace_name'].$kubernetes['container_name'] false\n\n[FILTER]\n Name lua\n Match kube.*\n script /fluent-bit/scripts/kubezero.lua\n call nest_k8s_ns\n"` | | | fluent-bit.config.filters | string | `"[FILTER]\n Name parser\n Match cri.*\n Parser cri-log\n Key_Name log\n\n[FILTER]\n Name kubernetes\n Match cri.*\n Merge_Log On\n Merge_Log_Key kube\n Kube_Tag_Prefix cri.var.log.containers.\n Keep_Log Off\n K8S-Logging.Parser Off\n K8S-Logging.Exclude Off\n Kube_Meta_Cache_TTL 3600s\n Buffer_Size 0\n #Use_Kubelet true\n\n{{- if index .Values \"config\" \"extraRecords\" }}\n\n[FILTER]\n Name record_modifier\n Match cri.*\n {{- range $k,$v := index .Values \"config\" \"extraRecords\" }}\n Record {{ $k }} {{ $v }}\n {{- end }}\n{{- end }}\n\n[FILTER]\n Name rewrite_tag\n Match cri.*\n Emitter_Name kube_tag_rewriter\n Rule $kubernetes['pod_id'] .* kube.$kubernetes['namespace_name'].$kubernetes['container_name'] false\n\n[FILTER]\n Name lua\n Match kube.*\n script /fluent-bit/scripts/kubezero.lua\n call nest_k8s_ns\n"` | |
| fluent-bit.config.flushInterval | int | `5` | | | fluent-bit.config.flushInterval | int | `5` | |
| fluent-bit.config.input.memBufLimit | string | `"4MB"` | | | fluent-bit.config.input.memBufLimit | string | `"16MB"` | |
| fluent-bit.config.input.refreshInterval | int | `10` | | | fluent-bit.config.input.refreshInterval | int | `5` | |
| fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n multiline.parser cri\n Tag cri.*\n Skip_Long_Lines On\n DB /var/log/flb_kube.db\n DB.Sync Normal\n DB.locking true\n # Buffer_Max_Size 1M\n {{- with .Values.config.input }}\n Mem_Buf_Limit {{ default \"4MB\" .memBufLimit }}\n Refresh_Interval {{ default 10 .refreshInterval }}\n {{- end }}\n"` | | | fluent-bit.config.inputs | string | `"[INPUT]\n Name tail\n Path /var/log/containers/*.log\n # Exclude ourselves to current error spam, https://github.com/fluent/fluent-bit/issues/5769\n Exclude_Path *logging-fluent-bit*\n multiline.parser cri\n Tag cri.*\n Skip_Long_Lines On\n Skip_Empty_Lines On\n DB /var/log/flb_kube.db\n DB.Sync Normal\n DB.locking true\n # Buffer_Max_Size 1M\n {{- with .Values.config.input }}\n Mem_Buf_Limit {{ default \"16MB\" .memBufLimit }}\n Refresh_Interval {{ default 5 .refreshInterval }}\n {{- end }}\n"` | |
| fluent-bit.config.logLevel | string | `"info"` | | | fluent-bit.config.logLevel | string | `"info"` | |
| fluent-bit.config.output.host | string | `"logging-fluentd"` | | | fluent-bit.config.output.host | string | `"logging-fluentd"` | |
| fluent-bit.config.output.sharedKey | string | `"cloudbender"` | | | fluent-bit.config.output.sharedKey | string | `"cloudbender"` | |
@ -90,13 +90,14 @@ Kubernetes: `>= 1.24.0`
| fluent-bit.daemonSetVolumes[1].hostPath.path | string | `"/var/lib/containers/logs"` | | | fluent-bit.daemonSetVolumes[1].hostPath.path | string | `"/var/lib/containers/logs"` | |
| fluent-bit.daemonSetVolumes[1].name | string | `"newlog"` | | | fluent-bit.daemonSetVolumes[1].name | string | `"newlog"` | |
| fluent-bit.enabled | bool | `false` | | | fluent-bit.enabled | bool | `false` | |
| fluent-bit.image.tag | string | `"1.9.8"` | | | fluent-bit.image.tag | string | `"2.0.10"` | |
| fluent-bit.luaScripts."kubezero.lua" | string | `"function nest_k8s_ns(tag, timestamp, record)\n if not record['kubernetes']['namespace_name'] then\n return 0, 0, 0\n end\n new_record = {}\n for key, val in pairs(record) do\n if key == 'kube' then\n new_record[key] = {}\n new_record[key][record['kubernetes']['namespace_name']] = record[key]\n else\n new_record[key] = record[key]\n end\n end\n return 1, timestamp, new_record\nend\n"` | | | fluent-bit.luaScripts."kubezero.lua" | string | `"function nest_k8s_ns(tag, timestamp, record)\n if not record['kubernetes']['namespace_name'] then\n return 0, 0, 0\n end\n new_record = {}\n for key, val in pairs(record) do\n if key == 'kube' then\n new_record[key] = {}\n new_record[key][record['kubernetes']['namespace_name']] = record[key]\n else\n new_record[key] = record[key]\n end\n end\n return 1, timestamp, new_record\nend\n"` | |
| fluent-bit.resources.limits.memory | string | `"64Mi"` | | | fluent-bit.resources.limits.memory | string | `"64Mi"` | |
| fluent-bit.resources.requests.cpu | string | `"20m"` | | | fluent-bit.resources.requests.cpu | string | `"20m"` | |
| fluent-bit.resources.requests.memory | string | `"32Mi"` | | | fluent-bit.resources.requests.memory | string | `"32Mi"` | |
| fluent-bit.serviceMonitor.enabled | bool | `false` | | | fluent-bit.serviceMonitor.enabled | bool | `false` | |
| fluent-bit.serviceMonitor.selector.release | string | `"metrics"` | | | fluent-bit.serviceMonitor.selector.release | string | `"metrics"` | |
| fluent-bit.testFramework.enabled | bool | `false` | |
| fluent-bit.tolerations[0].effect | string | `"NoSchedule"` | | | fluent-bit.tolerations[0].effect | string | `"NoSchedule"` | |
| fluent-bit.tolerations[0].operator | string | `"Exists"` | | | fluent-bit.tolerations[0].operator | string | `"Exists"` | |
| fluentd.dashboards.enabled | bool | `false` | | | fluentd.dashboards.enabled | bool | `false` | |
@ -111,7 +112,7 @@ Kubernetes: `>= 1.24.0`
| fluentd.fileConfigs."02_filters.conf" | string | `"<label @KUBERNETES>\n # prevent log feedback loops eg. ES has issues etc.\n # discard logs from our own pods\n <match kube.logging.fluentd>\n @type relabel\n @label @FLUENT_LOG\n </match>\n\n # Exclude current fluent-bit multiline noise\n <filter kube.logging.fluent-bit>\n @type grep\n <exclude>\n key log\n pattern /could not append content to multiline context/\n </exclude>\n </filter>\n\n # Generate Hash ID to break endless loop for already ingested events during retries\n <filter **>\n @type elasticsearch_genid\n use_entire_record true\n </filter>\n\n # Route through DISPATCH for Prometheus metrics\n <match **>\n @type relabel\n @label @DISPATCH\n </match>\n</label>"` | | | fluentd.fileConfigs."02_filters.conf" | string | `"<label @KUBERNETES>\n # prevent log feedback loops eg. ES has issues etc.\n # discard logs from our own pods\n <match kube.logging.fluentd>\n @type relabel\n @label @FLUENT_LOG\n </match>\n\n # Exclude current fluent-bit multiline noise\n <filter kube.logging.fluent-bit>\n @type grep\n <exclude>\n key log\n pattern /could not append content to multiline context/\n </exclude>\n </filter>\n\n # Generate Hash ID to break endless loop for already ingested events during retries\n <filter **>\n @type elasticsearch_genid\n use_entire_record true\n </filter>\n\n # Route through DISPATCH for Prometheus metrics\n <match **>\n @type relabel\n @label @DISPATCH\n </match>\n</label>"` | |
| fluentd.fileConfigs."04_outputs.conf" | string | `"<label @OUTPUT>\n <match **>\n @id out_es\n @type elasticsearch\n # @log_level debug\n include_tag_key true\n\n id_key _hash\n remove_keys _hash\n write_operation create\n\n # KubeZero pipeline incl. GeoIP etc.\n pipeline fluentd\n\n hosts \"{{ .Values.output.host }}\"\n port 9200\n scheme http\n user elastic\n password \"#{ENV['OUTPUT_PASSWORD']}\"\n\n log_es_400_reason\n logstash_format true\n reconnect_on_error true\n reload_on_failure true\n request_timeout 300s\n slow_flush_log_threshold 55.0\n\n #with_transporter_log true\n\n verify_es_version_at_startup false\n default_elasticsearch_version 7\n suppress_type_name true\n\n # Retry failed bulk requests\n # https://github.com/uken/fluent-plugin-elasticsearch#unrecoverable-error-types\n unrecoverable_error_types [\"out_of_memory_error\"]\n bulk_message_request_threshold 1048576\n\n <buffer>\n @type file\n\n flush_mode interval\n flush_thread_count 2\n flush_interval 10s\n\n chunk_limit_size 2MB\n total_limit_size 1GB\n\n flush_at_shutdown true\n retry_type exponential_backoff\n retry_timeout 6h\n overflow_action drop_oldest_chunk\n disable_chunk_backup true\n </buffer>\n </match>\n</label>"` | | | fluentd.fileConfigs."04_outputs.conf" | string | `"<label @OUTPUT>\n <match **>\n @id out_es\n @type elasticsearch\n # @log_level debug\n include_tag_key true\n\n id_key _hash\n remove_keys _hash\n write_operation create\n\n # KubeZero pipeline incl. GeoIP etc.\n pipeline fluentd\n\n hosts \"{{ .Values.output.host }}\"\n port 9200\n scheme http\n user elastic\n password \"#{ENV['OUTPUT_PASSWORD']}\"\n\n log_es_400_reason\n logstash_format true\n reconnect_on_error true\n reload_on_failure true\n request_timeout 300s\n slow_flush_log_threshold 55.0\n\n #with_transporter_log true\n\n verify_es_version_at_startup false\n default_elasticsearch_version 7\n suppress_type_name true\n\n # Retry failed bulk requests\n # https://github.com/uken/fluent-plugin-elasticsearch#unrecoverable-error-types\n unrecoverable_error_types [\"out_of_memory_error\"]\n bulk_message_request_threshold 1048576\n\n <buffer>\n @type file\n\n flush_mode interval\n flush_thread_count 2\n flush_interval 10s\n\n chunk_limit_size 2MB\n total_limit_size 1GB\n\n flush_at_shutdown true\n retry_type exponential_backoff\n retry_timeout 6h\n overflow_action drop_oldest_chunk\n disable_chunk_backup true\n </buffer>\n </match>\n</label>"` | |
| fluentd.image.repository | string | `"public.ecr.aws/zero-downtime/fluentd-concenter"` | | | fluentd.image.repository | string | `"public.ecr.aws/zero-downtime/fluentd-concenter"` | |
| fluentd.image.tag | string | `"v1.14.2"` | | | fluentd.image.tag | string | `"v1.16.0"` | |
| fluentd.istio.enabled | bool | `false` | | | fluentd.istio.enabled | bool | `false` | |
| fluentd.kind | string | `"Deployment"` | | | fluentd.kind | string | `"Deployment"` | |
| fluentd.metrics.serviceMonitor.additionalLabels.release | string | `"metrics"` | | | fluentd.metrics.serviceMonitor.additionalLabels.release | string | `"metrics"` | |

View File

@ -1,13 +1,13 @@
annotations: annotations:
artifacthub.io/changes: | artifacthub.io/changes: |
- kind: changed - kind: changed
description: "Additional upstream config option added" description: "Updated Fluent Bit image to v2.0.9."
apiVersion: v1 apiVersion: v1
appVersion: 1.9.7 appVersion: 2.0.9
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems. family operating systems.
home: https://fluentbit.io/ home: https://fluentbit.io/
icon: https://fluentbit.io/assets/img/logo1-default.png icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/fluentd/fluentbit/icon/fluentbit-icon-color.svg
keywords: keywords:
- logging - logging
- fluent-bit - fluent-bit
@ -24,4 +24,4 @@ maintainers:
name: fluent-bit name: fluent-bit
sources: sources:
- https://github.com/fluent/fluent-bit/ - https://github.com/fluent/fluent-bit/
version: 0.20.6 version: 0.24.0

View File

@ -11,6 +11,9 @@ priorityClassName: {{ .Values.priorityClassName }}
securityContext: securityContext:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}
{{- with .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ . }}
{{- end }}
hostNetwork: {{ .Values.hostNetwork }} hostNetwork: {{ .Values.hostNetwork }}
dnsPolicy: {{ .Values.dnsPolicy }} dnsPolicy: {{ .Values.dnsPolicy }}
{{- with .Values.dnsConfig }} {{- with .Values.dnsConfig }}

View File

@ -19,7 +19,7 @@ rules:
- get - get
- list - list
- watch - watch
{{- if .Values.podSecurityPolicy.create }} {{- if and .Values.podSecurityPolicy.create (semverCompare "<=1.25-0" .Capabilities.KubeVersion.GitVersion) }}
- apiGroups: - apiGroups:
- policy - policy
resources: resources:

View File

@ -20,6 +20,9 @@ spec:
updateStrategy: updateStrategy:
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
{{- with .Values.minReadySeconds }}
minReadySeconds: {{ . }}
{{- end }}
template: template:
metadata: metadata:
annotations: annotations:

View File

@ -23,6 +23,9 @@ spec:
selector: selector:
matchLabels: matchLabels:
{{- include "fluent-bit.selectorLabels" . | nindent 6 }} {{- include "fluent-bit.selectorLabels" . | nindent 6 }}
{{- with .Values.minReadySeconds }}
minReadySeconds: {{ . }}
{{- end }}
template: template:
metadata: metadata:
annotations: annotations:

View File

@ -1,4 +1,4 @@
{{- if .Values.podSecurityPolicy.create }} {{- if and .Values.podSecurityPolicy.create (semverCompare "<=1.25-0" .Capabilities.KubeVersion.GitVersion) -}}
apiVersion: policy/v1beta1 apiVersion: policy/v1beta1
kind: PodSecurityPolicy kind: PodSecurityPolicy
metadata: metadata:

View File

@ -16,6 +16,15 @@ spec:
{{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }} {{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }}
clusterIP: {{ .Values.service.clusterIP }} clusterIP: {{ .Values.service.clusterIP }}
{{- end }} {{- end }}
{{- if (eq .Values.service.type "LoadBalancer")}}
{{- with .Values.service.loadBalancerClass}}
loadBalancerClass: {{ . }}
{{- end }}
{{- with .Values.service.loadBalancerSourceRanges}}
loadBalancerSourceRanges:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
ports: ports:
- port: {{ .Values.service.port }} - port: {{ .Values.service.port }}
targetPort: http targetPort: http

View File

@ -36,6 +36,16 @@ spec:
relabelings: relabelings:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
{{- if .Values.serviceMonitor.scheme }}
scheme: {{ .Values.serviceMonitor.scheme }}
{{- end }}
{{- if .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml .Values.serviceMonitor.tlsConfig | nindent 8 }}
{{- end }}
{{- with .Values.serviceMonitor.additionalEndpoints }}
{{- toYaml . | nindent 4 }}
{{- end }}
namespaceSelector: namespaceSelector:
matchNames: matchNames:
- {{ .Release.Namespace }} - {{ .Release.Namespace }}

View File

@ -0,0 +1,38 @@
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.autoscaling.vpa.enabled }}
apiVersion: autoscaling.k8s.io/v1
kind: VerticalPodAutoscaler
metadata:
name: {{ include "fluent-bit.fullname" . }}
labels:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.autoscaling.vpa.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
resourcePolicy:
containerPolicies:
- containerName: {{ .Chart.Name }}
{{- with .Values.autoscaling.vpa.controlledResources }}
controlledResources:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.autoscaling.vpa.maxAllowed }}
maxAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.autoscaling.vpa.minAllowed }}
minAllowed:
{{- toYaml . | nindent 8 }}
{{- end }}
targetRef:
apiVersion: apps/v1
kind: {{ .Values.kind }}
name: {{ include "fluent-bit.fullname" . }}
{{- if .Values.autoscaling.vpa.updatePolicy }}
updatePolicy:
{{- with .Values.autoscaling.vpa.updatePolicy.updateMode }}
updateMode: {{ . }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -13,7 +13,7 @@ image:
pullPolicy: Always pullPolicy: Always
testFramework: testFramework:
enabled: false enabled: true
image: image:
repository: busybox repository: busybox
pullPolicy: Always pullPolicy: Always
@ -32,6 +32,11 @@ rbac:
create: true create: true
nodeAccess: false nodeAccess: false
# Configure podsecuritypolicy
# Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
# from Kubernetes 1.25, PSP is deprecated
# See: https://kubernetes.io/blog/2022/08/23/kubernetes-v1-25-release/#pod-security-changes
# We automatically disable PSP if Kubernetes version is 1.25 or higher
podSecurityPolicy: podSecurityPolicy:
create: false create: false
annotations: {} annotations: {}
@ -78,6 +83,8 @@ securityContext: {}
service: service:
type: ClusterIP type: ClusterIP
port: 2020 port: 2020
loadBalancerClass:
loadBalancerSourceRanges: []
labels: {} labels: {}
# nodePort: 30020 # nodePort: 30020
# clusterIP: 172.16.10.1 # clusterIP: 172.16.10.1
@ -111,6 +118,35 @@ serviceMonitor:
# targetLabel: nodename # targetLabel: nodename
# replacement: $1 # replacement: $1
# action: replace # action: replace
# scheme: ""
# tlsConfig: {}
## Beare in mind if youn want to collec metrics from a different port
## you will need to configure the new ports on the extraPorts property.
additionalEndpoints: []
# - port: metrics
# path: /metrics
# interval: 10s
# scrapeTimeout: 10s
# scheme: ""
# tlsConfig: {}
# # metric relabel configs to apply to samples before ingestion.
# #
# metricRelabelings:
# - sourceLabels: [__meta_kubernetes_service_label_cluster]
# targetLabel: cluster
# regex: (.*)
# replacement: ${1}
# action: replace
# # relabel configs to apply to samples after ingestion.
# #
# relabelings:
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace
prometheusRule: prometheusRule:
enabled: false enabled: false
@ -177,6 +213,28 @@ ingress:
## only available if kind is Deployment ## only available if kind is Deployment
autoscaling: autoscaling:
vpa:
enabled: false
annotations: {}
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
controlledResources: []
# Define the max allowed resources for the pod
maxAllowed: {}
# cpu: 200m
# memory: 100Mi
# Define the min allowed resources for the pod
minAllowed: {}
# cpu: 200m
# memory: 100Mi
updatePolicy:
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
updateMode: Auto
enabled: false enabled: false
minReplicas: 1 minReplicas: 1
maxReplicas: 3 maxReplicas: 3
@ -222,6 +280,14 @@ podAnnotations: {}
podLabels: {} podLabels: {}
## How long (in seconds) a pods needs to be stable before progressing the deployment
##
minReadySeconds:
## How long (in seconds) a pod may take to exit (useful with lifecycle hooks to ensure lb deregistration is done)
##
terminationGracePeriodSeconds:
priorityClassName: "" priorityClassName: ""
env: [] env: []
@ -274,7 +340,7 @@ networkPolicy:
luaScripts: {} luaScripts: {}
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file ## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file
config: config:
service: | service: |
[SERVICE] [SERVICE]
@ -332,15 +398,8 @@ config:
Retry_Limit False Retry_Limit False
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/upstream-servers ## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/upstream-servers
## This configuration is deprecated, please use `extraFiles` instead.
upstream: {} upstream: {}
# upstream.conf: |
# [UPSTREAM]
# upstream1
#
# [NODE]
# name node-1
# host 127.0.0.1
# port 43000
## https://docs.fluentbit.io/manual/pipeline/parsers ## https://docs.fluentbit.io/manual/pipeline/parsers
customParsers: | customParsers: |
@ -354,6 +413,14 @@ config:
# This allows adding more files with arbitary filenames to /fluent-bit/etc by providing key/value pairs. # This allows adding more files with arbitary filenames to /fluent-bit/etc by providing key/value pairs.
# The key becomes the filename, the value becomes the file content. # The key becomes the filename, the value becomes the file content.
extraFiles: {} extraFiles: {}
# upstream.conf: |
# [UPSTREAM]
# upstream1
#
# [NODE]
# name node-1
# host 127.0.0.1
# port 43000
# example.conf: | # example.conf: |
# [OUTPUT] # [OUTPUT]
# Name example # Name example

View File

@ -3,7 +3,7 @@ gzip: true
folder: Logging folder: Logging
dashboards: dashboards:
- name: fluent-logging - name: fluent-logging
url: https://grafana.com/api/dashboards/7752/revisions/4/download url: https://grafana.com/api/dashboards/7752/revisions/6/download
#url: https://grafana.com/api/dashboards/13042/revisions/2/download #url: https://grafana.com/api/dashboards/13042/revisions/2/download
tags: tags:
- fluentd - fluentd

View File

@ -1,24 +0,0 @@
diff -tubr charts/fluent-bit/templates/tests/test-connection.yaml charts/fluent-bit.zdt/templates/tests/test-connection.yaml
--- charts/fluent-bit/templates/tests/test-connection.yaml 2021-06-22 12:25:10.000000000 +0200
+++ charts/fluent-bit.zdt/templates/tests/test-connection.yaml 2021-05-17 12:09:02.724057438 +0200
@@ -1,3 +1,4 @@
+{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: Pod
metadata:
@@ -18,3 +19,4 @@
{{- toYaml . | nindent 4 }}
{{- end }}
restartPolicy: Never
+{{- end }}
diff -tubr charts/fluent-bit/values.yaml charts/fluent-bit.zdt/values.yaml
--- charts/fluent-bit/values.yaml 2021-06-22 12:25:10.000000000 +0200
+++ charts/fluent-bit.zdt/values.yaml 2021-07-19 10:23:01.383398153 +0200
@@ -12,6 +12,7 @@
# tag:
testFramework:
+ enabled: false
image:
repository: busybox
pullPolicy: Always

File diff suppressed because one or more lines are too long

View File

@ -19,7 +19,7 @@ yq eval -Mi 'del(.dependencies)' charts/eck-operator/Chart.yaml
rm -rf charts/fluent-bit rm -rf charts/fluent-bit
curl -L -s -o - https://github.com/fluent/helm-charts/releases/download/fluent-bit-${FLUENT_BIT_VERSION}/fluent-bit-${FLUENT_BIT_VERSION}.tgz | tar xfz - -C charts curl -L -s -o - https://github.com/fluent/helm-charts/releases/download/fluent-bit-${FLUENT_BIT_VERSION}/fluent-bit-${FLUENT_BIT_VERSION}.tgz | tar xfz - -C charts
patch -i fluent-bit.patch -p0 --no-backup-if-mismatch # patch -i fluent-bit.patch -p0 --no-backup-if-mismatch
# FluentD # FluentD

View File

@ -48,7 +48,7 @@ fluentd:
enabled: false enabled: false
image: image:
repository: public.ecr.aws/zero-downtime/fluentd-concenter repository: public.ecr.aws/zero-downtime/fluentd-concenter
tag: v1.14.2 tag: v1.16.0
istio: istio:
enabled: false enabled: false
@ -246,7 +246,10 @@ fluent-bit:
image: image:
#repository: public.ecr.aws/zero-downtime/fluent-bit #repository: public.ecr.aws/zero-downtime/fluent-bit
tag: 1.9.8 tag: 2.0.10
testFramework:
enabled: false
serviceMonitor: serviceMonitor:
enabled: false enabled: false
@ -276,8 +279,8 @@ fluent-bit:
tls: false tls: false
input: input:
memBufLimit: 4MB memBufLimit: 16MB
refreshInterval: 10 refreshInterval: 5
logLevel: info logLevel: info
flushInterval: 5 flushInterval: 5
@ -300,16 +303,19 @@ fluent-bit:
[INPUT] [INPUT]
Name tail Name tail
Path /var/log/containers/*.log Path /var/log/containers/*.log
# Exclude ourselves to current error spam, https://github.com/fluent/fluent-bit/issues/5769
Exclude_Path *logging-fluent-bit*
multiline.parser cri multiline.parser cri
Tag cri.* Tag cri.*
Skip_Long_Lines On Skip_Long_Lines On
Skip_Empty_Lines On
DB /var/log/flb_kube.db DB /var/log/flb_kube.db
DB.Sync Normal DB.Sync Normal
DB.locking true DB.locking true
# Buffer_Max_Size 1M # Buffer_Max_Size 1M
{{- with .Values.config.input }} {{- with .Values.config.input }}
Mem_Buf_Limit {{ default "4MB" .memBufLimit }} Mem_Buf_Limit {{ default "16MB" .memBufLimit }}
Refresh_Interval {{ default 10 .refreshInterval }} Refresh_Interval {{ default 5 .refreshInterval }}
{{- end }} {{- end }}
filters: | filters: |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-metrics name: kubezero-metrics
description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations. description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
type: application type: application
version: 0.8.9 version: 0.9.2
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -19,16 +19,16 @@ dependencies:
repository: https://cdn.zero-downtime.net/charts/ repository: https://cdn.zero-downtime.net/charts/
# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack # https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack
- name: kube-prometheus-stack - name: kube-prometheus-stack
version: 43.2.0 version: 45.27.2
# Switch back to upstream once all alerts are fixed eg. etcd gpcr # Switch back to upstream once all alerts are fixed eg. etcd gpcr
# repository: https://prometheus-community.github.io/helm-charts # repository: https://prometheus-community.github.io/helm-charts
- name: prometheus-adapter - name: prometheus-adapter
version: 3.5.0 version: 4.1.1
repository: https://prometheus-community.github.io/helm-charts repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-adapter.enabled condition: prometheus-adapter.enabled
- name: prometheus-pushgateway - name: prometheus-pushgateway
version: 2.0.2 version: 2.1.3
# Switch back to upstream once namespaces are supported # Switch back to upstream once namespaces are supported
repository: https://prometheus-community.github.io/helm-charts repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-pushgateway.enabled condition: prometheus-pushgateway.enabled
kubeVersion: ">= 1.24.0" kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-metrics # kubezero-metrics
![Version: 0.8.9](https://img.shields.io/badge/Version-0.8.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.9.2](https://img.shields.io/badge/Version-0.9.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations. KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
@ -14,14 +14,14 @@ KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all
## Requirements ## Requirements
Kubernetes: `>= 1.24.0` Kubernetes: `>= 1.25.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| | kube-prometheus-stack | 43.2.0 | | | kube-prometheus-stack | 45.27.2 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 3.5.0 | | https://prometheus-community.github.io/helm-charts | prometheus-adapter | 4.1.1 |
| https://prometheus-community.github.io/helm-charts | prometheus-pushgateway | 2.0.2 | | https://prometheus-community.github.io/helm-charts | prometheus-pushgateway | 2.1.3 |
## Values ## Values
@ -155,7 +155,7 @@ Kubernetes: `>= 1.24.0`
| kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.relabelings[0].replacement | string | `"$1"` | | | kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.relabelings[0].replacement | string | `"$1"` | |
| kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.relabelings[0].separator | string | `";"` | | | kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.relabelings[0].separator | string | `";"` | |
| kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.relabelings[0].sourceLabels[0] | string | `"__meta_kubernetes_pod_node_name"` | | | kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.relabelings[0].sourceLabels[0] | string | `"__meta_kubernetes_pod_node_name"` | |
| kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.relabelings[0].targetLabel | string | `"node"` | | | kube-prometheus-stack.prometheus-node-exporter.prometheus.monitor.relabelings[0].targetLabel | string | `"instance"` | |
| kube-prometheus-stack.prometheus-node-exporter.resources.requests.cpu | string | `"20m"` | | | kube-prometheus-stack.prometheus-node-exporter.resources.requests.cpu | string | `"20m"` | |
| kube-prometheus-stack.prometheus-node-exporter.resources.requests.memory | string | `"16Mi"` | | | kube-prometheus-stack.prometheus-node-exporter.resources.requests.memory | string | `"16Mi"` | |
| kube-prometheus-stack.prometheus.enabled | bool | `true` | | | kube-prometheus-stack.prometheus.enabled | bool | `true` | |
@ -218,3 +218,5 @@ Kubernetes: `>= 1.24.0`
- https://grafana.com/api/dashboards/3662/revisions/2/download - https://grafana.com/api/dashboards/3662/revisions/2/download
## AlertManager SNS Forwarder ## AlertManager SNS Forwarder
- https://github.com/DataReply/alertmanager-sns-forwarder - https://github.com/DataReply/alertmanager-sns-forwarder
## docker-registry
- https://github.com/lstn/misc-grafana-dashboards/blob/master/dashboards/docker-registry.json

View File

@ -23,3 +23,5 @@
- https://grafana.com/api/dashboards/3662/revisions/2/download - https://grafana.com/api/dashboards/3662/revisions/2/download
## AlertManager SNS Forwarder ## AlertManager SNS Forwarder
- https://github.com/DataReply/alertmanager-sns-forwarder - https://github.com/DataReply/alertmanager-sns-forwarder
## docker-registry
- https://github.com/lstn/misc-grafana-dashboards/blob/master/dashboards/docker-registry.json

View File

@ -7,20 +7,20 @@ annotations:
url: https://github.com/prometheus-operator/kube-prometheus url: https://github.com/prometheus-operator/kube-prometheus
artifacthub.io/operator: "true" artifacthub.io/operator: "true"
apiVersion: v2 apiVersion: v2
appVersion: 0.61.1 appVersion: v0.65.1
dependencies: dependencies:
- condition: kubeStateMetrics.enabled - condition: kubeStateMetrics.enabled
name: kube-state-metrics name: kube-state-metrics
repository: https://prometheus-community.github.io/helm-charts repository: https://prometheus-community.github.io/helm-charts
version: 4.24.* version: 5.5.*
- condition: nodeExporter.enabled - condition: nodeExporter.enabled
name: prometheus-node-exporter name: prometheus-node-exporter
repository: https://prometheus-community.github.io/helm-charts repository: https://prometheus-community.github.io/helm-charts
version: 4.8.* version: 4.16.*
- condition: grafana.enabled - condition: grafana.enabled
name: grafana name: grafana
repository: https://grafana.github.io/helm-charts repository: https://grafana.github.io/helm-charts
version: 6.48.* version: 6.56.*
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards, description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
and Prometheus rules combined with documentation and scripts to provide easy to and Prometheus rules combined with documentation and scripts to provide easy to
operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
@ -52,4 +52,4 @@ sources:
- https://github.com/prometheus-community/helm-charts - https://github.com/prometheus-community/helm-charts
- https://github.com/prometheus-operator/kube-prometheus - https://github.com/prometheus-operator/kube-prometheus
type: application type: application
version: 43.2.0 version: 45.27.2

View File

@ -80,6 +80,44 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
### From 44.x to 45.x
This version upgrades Prometheus-Operator to v0.63.0, Prometheus to v2.43.0 and Thanos to v0.30.2.
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
### From 43.x to 44.x
This version upgrades Prometheus-Operator to v0.62.0, Prometheus to v2.41.0 and Thanos to v0.30.1.
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
If you have explicitly set `prometheusOperator.admissionWebhooks.failurePolicy`, this value is now always used even when `.prometheusOperator.admissionWebhooks.patch.enabled` is `true` (the default).
The values for `prometheusOperator.image.tag` & `prometheusOperator.prometheusConfigReloader.image.tag` are now empty by default and the Chart.yaml `appVersion` field is used instead.
### From 42.x to 43.x ### From 42.x to 43.x
This version upgrades Prometheus-Operator to v0.61.1, Prometheus to v2.40.5 and Thanos to v0.29.0. This version upgrades Prometheus-Operator to v0.61.1, Prometheus to v2.40.5 and Thanos to v0.29.0.

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 9.3.1 appVersion: 9.5.1
description: The leading tool for querying and visualizing time series and metrics. description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.net home: https://grafana.net
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
@ -19,4 +19,4 @@ name: grafana
sources: sources:
- https://github.com/grafana/grafana - https://github.com/grafana/grafana
type: application type: application
version: 6.48.0 version: 6.56.2

View File

@ -87,6 +87,7 @@ This version requires Helm >= 3.1.0.
| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | | `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` |
| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | | `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` |
| `ingress.tls` | Ingress TLS configuration | `[]` | | `ingress.tls` | Ingress TLS configuration | `[]` |
| `ingress.ingressClassName` | Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 | `""` |
| `resources` | CPU/Memory resource requests/limits | `{}` | | `resources` | CPU/Memory resource requests/limits | `{}` |
| `nodeSelector` | Node labels for pod assignment | `{}` | | `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` | | `tolerations` | Toleration labels for pod assignment | `[]` |
@ -146,7 +147,7 @@ This version requires Helm >= 3.1.0.
| `podPortName` | Name of the grafana port on the pod | `grafana` | | `podPortName` | Name of the grafana port on the pod | `grafana` |
| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` | | `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` |
| `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` | | `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` |
| `sidecar.image.tag` | Sidecar image tag | `1.19.2` | | `sidecar.image.tag` | Sidecar image tag | `1.22.0` |
| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | | `sidecar.image.sha` | Sidecar image sha (optional) | `""` |
| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | | `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` |
| `sidecar.resources` | Sidecar resources | `{}` | | `sidecar.resources` | Sidecar resources | `{}` |
@ -216,11 +217,12 @@ This version requires Helm >= 3.1.0.
| `rbac.create` | Create and use RBAC resources | `true` | | `rbac.create` | Create and use RBAC resources | `true` |
| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | | `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` |
| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | | `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` |
| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` | | `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `false` |
| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` | | `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `false` |
| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | | `rbac.extraRoleRules` | Additional rules to add to the Role | [] |
| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | | `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] |
| `command` | Define command to be executed by grafana container at startup | `nil` | | `command` | Define command to be executed by grafana container at startup | `nil` |
| `args` | Define additional args if command is used | `nil` |
| `testFramework.enabled` | Whether to create test-related resources | `true` | | `testFramework.enabled` | Whether to create test-related resources | `true` |
| `testFramework.image` | `test-framework` image repository. | `bats/bats` | | `testFramework.image` | `test-framework` image repository. | `bats/bats` |
| `testFramework.tag` | `test-framework` image tag. | `v1.4.1` | | `testFramework.tag` | `test-framework` image tag. | `v1.4.1` |
@ -250,6 +252,7 @@ This version requires Helm >= 3.1.0.
| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | | `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` |
| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | | `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` |
| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | | `imageRenderer.env` | extra env-vars for image-renderer | `{}` |
| `imageRenderer.envValueFrom` | Environment variables for image-renderer from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` |
| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | | `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` |
| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | | `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` |
| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | | `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` |
@ -276,11 +279,10 @@ This version requires Helm >= 3.1.0.
| `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` | | `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` |
| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` | | `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` |
### Example ingress with path ### Example ingress with path
With grafana 6.3 and above With grafana 6.3 and above
```yaml ```yaml
grafana.ini: grafana.ini:
server: server:
@ -397,9 +399,41 @@ filters out the ones with a label as defined in `sidecar.datasources.label`. The
those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, those secrets are written to a folder and accessed by grafana on startup. Using these yaml files,
the data sources in grafana can be imported. the data sources in grafana can be imported.
Should you aim for reloading datasources in Grafana each time the config is changed, set `sidecar.datasources.skipReload: false` and adjust `sidecar.datasources.reloadURL` to `http://<svc-name>.<namespace>.svc.cluster.local/api/admin/provisioning/datasources/reload`.
Secrets are recommended over configmaps for this usecase because datasources usually contain private Secrets are recommended over configmaps for this usecase because datasources usually contain private
data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those.
Example values to add a postgres datasource as a kubernetes secret:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: grafana-datasources
labels:
grafana_datasource: 'true' # default value for: sidecar.datasources.label
stringData:
pg-db.yaml: |-
apiVersion: 1
datasources:
- name: My pg db datasource
type: postgres
url: my-postgresql-db:5432
user: db-readonly-user
secureJsonData:
password: 'SUperSEcretPa$$word'
jsonData:
database: my_datase
sslmode: 'disable' # disable/require/verify-ca/verify-full
maxOpenConns: 0 # Grafana v5.4+
maxIdleConns: 2 # Grafana v5.4+
connMaxLifetime: 14400 # Grafana v5.4+
postgresVersion: 1000 # 903=9.3, 904=9.4, 905=9.5, 906=9.6, 1000=10
timescaledb: false
# <bool> allow users to edit datasources from the UI.
editable: false
```
Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file):
```yaml ```yaml
@ -491,6 +525,51 @@ delete_notifiers:
# default org_id: 1 # default org_id: 1
``` ```
## Provision alert rules, contact points, notification policies and notification templates
There are two methods to provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method:
```yaml
alerting:
team1-alert-rules.yaml:
file: alerting/team1/rules.yaml
team2-alert-rules.yaml:
file: alerting/team2/rules.yaml
team3-alert-rules.yaml:
file: alerting/team3/rules.yaml
notification-policies.yaml:
file: alerting/shared/notification-policies.yaml
notification-templates.yaml:
file: alerting/shared/notification-templates.yaml
contactpoints.yaml:
apiVersion: 1
contactPoints:
- orgId: 1
name: Slack channel
receivers:
- uid: default-receiver
type: slack
settings:
# Webhook URL to be filled in
url: ""
# We need to escape double curly braces for the tpl function.
text: '{{ `{{ template "default.message" . }}` }}'
title: '{{ `{{ template "default.title" . }}` }}'
```
There are two possibilities:
* Inlining the file contents as described in the example `values.yaml` and the official [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/).
* Importing a file using a relative path starting from the chart root directory.
### Important notes on file provisioning
* The chart supports importing YAML and JSON files.
* The filename must be unique, otherwise one volume mount will overwrite the other.
* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped.
* The number of total files under `alerting:` is not limited. Each file will end up as a volume mount in the corresponding provisioning folder of the deployed Grafana instance.
* The file size for each import is limited by what the function `.Files.Get` can handle, which suffices for most cases.
## How to serve Grafana with a path prefix (/grafana) ## How to serve Grafana with a path prefix (/grafana)
In order to serve Grafana with a prefix (e.g., <http://example.com/grafana>), add the following to your values.yaml. In order to serve Grafana with a prefix (e.g., <http://example.com/grafana>), add the following to your values.yaml.
@ -598,6 +677,9 @@ grafana.ini:
unified_alerting: unified_alerting:
enabled: true enabled: true
ha_peers: {{ Name }}-headless:9094 ha_peers: {{ Name }}-headless:9094
ha_listen_address: ${POD_IP}:9094
ha_advertise_address: ${POD_IP}:9094
alerting: alerting:
enabled: false enabled: false
``` ```

View File

@ -1,6 +1,7 @@
1. Get your '{{ .Values.adminUser }}' user password by running: 1. Get your '{{ .Values.adminUser }}' user password by running:
kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} -o jsonpath="{.data.{{ .Values.admin.passwordKey | default "admin-password" }}}" | base64 --decode ; echo
2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: 2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster:

View File

@ -68,7 +68,7 @@ Common labels
helm.sh/chart: {{ include "grafana.chart" . }} helm.sh/chart: {{ include "grafana.chart" . }}
{{ include "grafana.selectorLabels" . }} {{ include "grafana.selectorLabels" . }}
{{- if or .Chart.AppVersion .Values.image.tag }} {{- if or .Chart.AppVersion .Values.image.tag }}
app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }}
{{- end }} {{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.extraLabels }} {{- with .Values.extraLabels }}
@ -91,7 +91,7 @@ Common labels
helm.sh/chart: {{ include "grafana.chart" . }} helm.sh/chart: {{ include "grafana.chart" . }}
{{ include "grafana.imageRenderer.selectorLabels" . }} {{ include "grafana.imageRenderer.selectorLabels" . }}
{{- if or .Chart.AppVersion .Values.image.tag }} {{- if or .Chart.AppVersion .Values.image.tag }}
app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }}
{{- end }} {{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }} {{- end }}
@ -145,10 +145,12 @@ Return the appropriate apiVersion for ingress.
Return the appropriate apiVersion for Horizontal Pod Autoscaler. Return the appropriate apiVersion for Horizontal Pod Autoscaler.
*/}} */}}
{{- define "grafana.hpa.apiVersion" -}} {{- define "grafana.hpa.apiVersion" -}}
{{- if semverCompare "<1.23-0" .Capabilities.KubeVersion.Version }} {{- if $.Capabilities.APIVersions.Has "autoscaling/v2/HorizontalPodAutoscaler" }}
{{- print "autoscaling/v2beta1" }}
{{- else }}
{{- print "autoscaling/v2" }} {{- print "autoscaling/v2" }}
{{- else if $.Capabilities.APIVersions.Has "autoscaling/v2beta2/HorizontalPodAutoscaler" }}
{{- print "autoscaling/v2beta2" }}
{{- else }}
{{- print "autoscaling/v2beta1" }}
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@ -763,7 +763,13 @@ containers:
{{- range .Values.command }} {{- range .Values.command }}
- {{ . | quote }} - {{ . | quote }}
{{- end }} {{- end }}
{{- end}} {{- end }}
{{- if .Values.args }}
args:
{{- range .Values.args }}
- {{ . | quote }}
{{- end }}
{{- end }}
{{- with .Values.containerSecurityContext }} {{- with .Values.containerSecurityContext }}
securityContext: securityContext:
{{- toYaml . | nindent 6 }} {{- toYaml . | nindent 6 }}
@ -780,7 +786,7 @@ containers:
{{- range .Values.extraConfigmapMounts }} {{- range .Values.extraConfigmapMounts }}
- name: {{ tpl .name $root }} - name: {{ tpl .name $root }}
mountPath: {{ tpl .mountPath $root }} mountPath: {{ tpl .mountPath $root }}
subPath: {{ (tpl .subPath $root) | default "" }} subPath: {{ tpl (.subPath | default "") $root }}
readOnly: {{ .readOnly }} readOnly: {{ .readOnly }}
{{- end }} {{- end }}
- name: storage - name: storage
@ -878,7 +884,17 @@ containers:
- name: {{ .Values.podPortName }} - name: {{ .Values.podPortName }}
containerPort: {{ .Values.service.targetPort }} containerPort: {{ .Values.service.targetPort }}
protocol: TCP protocol: TCP
- name: {{ .Values.gossipPortName }}-tcp
containerPort: 9094
protocol: TCP
- name: {{ .Values.gossipPortName }}-udp
containerPort: 9094
protocol: UDP
env: env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
{{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
- name: GF_SECURITY_ADMIN_USER - name: GF_SECURITY_ADMIN_USER
valueFrom: valueFrom:
@ -1128,8 +1144,7 @@ volumes:
path: {{ .hostPath }} path: {{ .hostPath }}
{{- else if .csi }} {{- else if .csi }}
csi: csi:
data: {{- toYaml .data | nindent 6 }}
{{- toYaml .data | nindent 8 }}
{{- else }} {{- else }}
emptyDir: {} emptyDir: {}
{{- end }} {{- end }}

View File

@ -1,4 +1,4 @@
{{- if and .Values.rbac.create (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }} {{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingRole) }}
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
@ -9,9 +9,9 @@ metadata:
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
name: {{ include "grafana.fullname" . }}-clusterrole name: {{ include "grafana.fullname" . }}-clusterrole
{{- if or .Values.sidecar.dashboards.enabled (or .Values.rbac.extraClusterRoleRules (or .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled)) }} {{- if or .Values.sidecar.dashboards.enabled .Values.rbac.extraClusterRoleRules .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }}
rules: rules:
{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }} {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }}
- apiGroups: [""] # "" indicates the core API group - apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"] resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"] verbs: ["get", "watch", "list"]

View File

@ -1,4 +1,4 @@
{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} {{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }}
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:

View File

@ -1,4 +1,5 @@
{{- if .Values.createConfigmap }} {{- if .Values.createConfigmap }}
{{- $files := .Files }}
{{- $root := . -}} {{- $root := . -}}
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
@ -53,9 +54,14 @@ data:
{{- end }} {{- end }}
{{- range $key, $value := .Values.alerting }} {{- range $key, $value := .Values.alerting }}
{{- if (hasKey $value "file") }}
{{- $key | nindent 2 }}:
{{- toYaml ( $files.Get $value.file ) | nindent 4}}
{{- else }}
{{- $key | nindent 2 }}: | {{- $key | nindent 2 }}: |
{{- tpl (toYaml $value | nindent 4) $root }} {{- tpl (toYaml $value | nindent 4) $root }}
{{- end }} {{- end }}
{{- end }}
{{- range $key, $value := .Values.dashboardProviders }} {{- range $key, $value := .Values.dashboardProviders }}
{{- $key | nindent 2 }}: | {{- $key | nindent 2 }}: |
@ -81,12 +87,19 @@ data:
--connect-timeout 60 \ --connect-timeout 60 \
--max-time 60 \ --max-time 60 \
{{- if not $value.b64content }} {{- if not $value.b64content }}
{{- if not $value.acceptHeader }}
-H "Accept: application/json" \ -H "Accept: application/json" \
{{- else }}
-H "Accept: {{ $value.acceptHeader }}" \
{{- end }}
{{- if $value.token }} {{- if $value.token }}
-H "Authorization: token {{ $value.token }}" \ -H "Authorization: token {{ $value.token }}" \
{{- end }} {{- end }}
{{- if $value.bearerToken }} {{- if $value.bearerToken }}
-H "Authorization: Bearer {{ $value.bearerToken }}" \ -H "Authorization: Bearer {{ $value.bearerToken }}" \
{{- end }}
{{- if $value.basic }}
-H "Authorization: Basic {{ $value.basic }}" \
{{- end }} {{- end }}
{{- if $value.gitlabToken }} {{- if $value.gitlabToken }}
-H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \ -H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \

View File

@ -42,6 +42,7 @@ spec:
{{- if .Values.envRenderSecret }} {{- if .Values.envRenderSecret }}
checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }}
{{- end }} {{- end }}
kubectl.kubernetes.io/default-container: {{ .Chart.Name }}
{{- with .Values.podAnnotations }} {{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}

View File

@ -17,7 +17,6 @@ spec:
{{- include "grafana.selectorLabels" . | nindent 4 }} {{- include "grafana.selectorLabels" . | nindent 4 }}
type: ClusterIP type: ClusterIP
ports: ports:
- protocol: TCP - name: {{ .Values.gossipPortName }}-tcp
port: 3000 port: 9094
targetPort: {{ .Values.service.targetPort }}
{{- end }} {{- end }}

View File

@ -26,7 +26,7 @@ spec:
- type: Resource - type: Resource
resource: resource:
name: memory name: memory
{{- if semverCompare "<1.23-0" .Capabilities.KubeVersion.Version }} {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
targetAverageUtilization: {{ .Values.autoscaling.targetMemory }} targetAverageUtilization: {{ .Values.autoscaling.targetMemory }}
{{- else }} {{- else }}
target: target:
@ -38,7 +38,7 @@ spec:
- type: Resource - type: Resource
resource: resource:
name: cpu name: cpu
{{- if semverCompare "<1.23-0" .Capabilities.KubeVersion.Version }} {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
targetAverageUtilization: {{ .Values.autoscaling.targetCPU }} targetAverageUtilization: {{ .Values.autoscaling.targetCPU }}
{{- else }} {{- else }}
target: target:
@ -46,4 +46,7 @@ spec:
averageUtilization: {{ .Values.autoscaling.targetCPU }} averageUtilization: {{ .Values.autoscaling.targetCPU }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.autoscaling.behavior }}
behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }}
{{- end }}
{{- end }} {{- end }}

View File

@ -15,7 +15,9 @@ metadata:
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}
{{- end }} {{- end }}
spec: spec:
{{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }}
replicas: {{ .Values.imageRenderer.replicas }} replicas: {{ .Values.imageRenderer.replicas }}
{{- end }}
revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }}
selector: selector:
matchLabels: matchLabels:
@ -86,6 +88,15 @@ spec:
env: env:
- name: HTTP_PORT - name: HTTP_PORT
value: {{ .Values.imageRenderer.service.targetPort | quote }} value: {{ .Values.imageRenderer.service.targetPort | quote }}
{{- if .Values.imageRenderer.serviceMonitor.enabled }}
- name: ENABLE_METRICS
value: "true"
{{- end }}
{{- range $key, $value := .Values.imageRenderer.envValueFrom }}
- name: {{ $key | quote }}
valueFrom:
{{- tpl (toYaml $value) $ | nindent 16 }}
{{- end }}
{{- range $key, $value := .Values.imageRenderer.env }} {{- range $key, $value := .Values.imageRenderer.env }}
- name: {{ $key | quote }} - name: {{ $key | quote }}
value: {{ $value | quote }} value: {{ $value | quote }}

View File

@ -0,0 +1,47 @@
{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }}
apiVersion: {{ include "grafana.hpa.apiVersion" . }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "grafana.fullname" . }}-image-renderer
namespace: {{ include "grafana.namespace" . }}
labels:
app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer
helm.sh/chart: {{ include "grafana.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "grafana.fullname" . }}-image-renderer
minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }}
maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }}
metrics:
{{- if .Values.imageRenderer.autoscaling.targetMemory }}
- type: Resource
resource:
name: memory
{{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }}
{{- end }}
{{- end }}
{{- if .Values.imageRenderer.autoscaling.targetCPU }}
- type: Resource
resource:
name: cpu
{{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }}
targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }}
{{- else }}
target:
type: Utilization
averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }}
{{- end }}
{{- end }}
{{- if .Values.imageRenderer.autoscaling.behavior }}
behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -24,13 +24,16 @@ spec:
from: from:
- namespaceSelector: - namespaceSelector:
matchLabels: matchLabels:
name: {{ include "grafana.namespace" . }} kubernetes.io/metadata.name: {{ include "grafana.namespace" . }}
- podSelector: podSelector:
matchLabels: matchLabels:
{{- include "grafana.selectorLabels" . | nindent 14 }} {{- include "grafana.selectorLabels" . | nindent 14 }}
{{- with .Values.podLabels }} {{- with .Values.podLabels }}
{{- toYaml . | nindent 14 }} {{- toYaml . | nindent 14 }}
{{- end }} {{- end }}
{{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}}
{{ toYaml . | nindent 8 }}
{{- end }}
{{- end }} {{- end }}
{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }} {{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }}
@ -61,10 +64,13 @@ spec:
protocol: TCP protocol: TCP
# talk only to grafana # talk only to grafana
- ports: - ports:
- port: {{ .Values.service.port }} - port: {{ .Values.service.targetPort }}
protocol: TCP protocol: TCP
to: to:
- podSelector: - namespaceSelector:
matchLabels:
name: {{ include "grafana.namespace" . }}
podSelector:
matchLabels: matchLabels:
{{- include "grafana.selectorLabels" . | nindent 14 }} {{- include "grafana.selectorLabels" . | nindent 14 }}
{{- with .Values.podLabels }} {{- with .Values.podLabels }}

View File

@ -0,0 +1,48 @@
{{- if .Values.imageRenderer.serviceMonitor.enabled }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "grafana.fullname" . }}-image-renderer
{{- if .Values.imageRenderer.serviceMonitor.namespace }}
namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }}
{{- else }}
namespace: {{ include "grafana.namespace" . }}
{{- end }}
labels:
{{- include "grafana.imageRenderer.labels" . | nindent 4 }}
{{- with .Values.imageRenderer.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: {{ .Values.imageRenderer.service.portName }}
{{- with .Values.imageRenderer.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: true
path: {{ .Values.imageRenderer.serviceMonitor.path }}
scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }}
{{- with .Values.imageRenderer.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.imageRenderer.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
jobLabel: "{{ .Release.Name }}-image-renderer"
selector:
matchLabels:
{{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ include "grafana.namespace" . }}
{{- with .Values.imageRenderer.serviceMonitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -12,7 +12,7 @@ metadata:
{{- end }} {{- end }}
{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }} {{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }}
rules: rules:
{{- if .Values.rbac.pspEnabled }} {{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }}
- apiGroups: ['extensions'] - apiGroups: ['extensions']
resources: ['podsecuritypolicies'] resources: ['podsecuritypolicies']
verbs: ['use'] verbs: ['use']

View File

@ -41,4 +41,8 @@ spec:
namespaceSelector: namespaceSelector:
matchNames: matchNames:
- {{ include "grafana.namespace" . }} - {{ include "grafana.namespace" . }}
{{- with .Values.serviceMonitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }} {{- end }}

View File

@ -31,6 +31,7 @@ spec:
{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- end }} {{- end }}
kubectl.kubernetes.io/default-container: {{ .Chart.Name }}
{{- with .Values.podAnnotations }} {{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}

View File

@ -17,8 +17,8 @@ rbac:
create: true create: true
## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true)
# useExistingRole: name-of-some-(cluster)role # useExistingRole: name-of-some-(cluster)role
pspEnabled: true pspEnabled: false
pspUseAppArmor: true pspUseAppArmor: false
namespaced: false namespaced: false
extraRoleRules: [] extraRoleRules: []
# - apiGroups: [] # - apiGroups: []
@ -52,6 +52,7 @@ autoscaling:
maxReplicas: 5 maxReplicas: 5
targetCPU: "60" targetCPU: "60"
targetMemory: "" targetMemory: ""
behavior: {}
## See `kubectl explain poddisruptionbudget.spec` for more ## See `kubectl explain poddisruptionbudget.spec` for more
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
@ -83,7 +84,7 @@ livenessProbe:
# schedulerName: "default-scheduler" # schedulerName: "default-scheduler"
image: image:
repository: grafana/grafana repository: docker.io/grafana/grafana
# Overrides the Grafana image tag whose default is the chart appVersion # Overrides the Grafana image tag whose default is the chart appVersion
tag: "" tag: ""
sha: "" sha: ""
@ -99,17 +100,23 @@ image:
testFramework: testFramework:
enabled: true enabled: true
image: "bats/bats" image: docker.io/bats/bats
tag: "v1.4.1" tag: "v1.4.1"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
securityContext: {} securityContext: {}
securityContext: securityContext:
runAsNonRoot: true
runAsUser: 472 runAsUser: 472
runAsGroup: 472 runAsGroup: 472
fsGroup: 472 fsGroup: 472
containerSecurityContext: {} containerSecurityContext:
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
# Enable creating the grafana configmap # Enable creating the grafana configmap
createConfigmap: true createConfigmap: true
@ -136,7 +143,7 @@ extraLabels: {}
# priorityClassName: # priorityClassName:
downloadDashboardsImage: downloadDashboardsImage:
repository: curlimages/curl repository: docker.io/curlimages/curl
tag: 7.85.0 tag: 7.85.0
sha: "" sha: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@ -145,7 +152,13 @@ downloadDashboards:
env: {} env: {}
envFromSecret: "" envFromSecret: ""
resources: {} resources: {}
securityContext: {} securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
envValueFrom: {} envValueFrom: {}
# ENV_NAME: # ENV_NAME:
# configMapKeyRef: # configMapKeyRef:
@ -159,7 +172,7 @@ downloadDashboards:
# podLabels: {} # podLabels: {}
podPortName: grafana podPortName: grafana
gossipPortName: gossip
## Deployment annotations ## Deployment annotations
# annotations: {} # annotations: {}
@ -193,6 +206,7 @@ serviceMonitor:
tlsConfig: {} tlsConfig: {}
scrapeTimeout: 30s scrapeTimeout: 30s
relabelings: [] relabelings: []
targetLabels: []
extraExposePorts: [] extraExposePorts: []
# - name: keycloak # - name: keycloak
@ -344,7 +358,7 @@ initChownData:
## initChownData container image ## initChownData container image
## ##
image: image:
repository: busybox repository: docker.io/library/busybox
tag: "1.31.1" tag: "1.31.1"
sha: "" sha: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
@ -362,7 +376,11 @@ initChownData:
securityContext: securityContext:
runAsNonRoot: false runAsNonRoot: false
runAsUser: 0 runAsUser: 0
seccompProfile:
type: RuntimeDefault
capabilities:
add:
- CHOWN
# Administrator credentials when not using an existing secret (see below) # Administrator credentials when not using an existing secret (see below)
adminUser: admin adminUser: admin
@ -382,6 +400,14 @@ admin:
# - "sh" # - "sh"
# - "/run.sh" # - "/run.sh"
## Optionally define args if command is used
## Needed if using `hashicorp/envconsul` to manage secrets
## By default no arguments are set
# args:
# - "-secret"
# - "secret/grafana"
# - "./grafana"
## Extra environment variables that will be pass onto deployment pods ## Extra environment variables that will be pass onto deployment pods
## ##
## to provide grafana with access to CloudWatch on AWS EKS: ## to provide grafana with access to CloudWatch on AWS EKS:
@ -510,6 +536,9 @@ lifecycleHooks: {}
plugins: [] plugins: []
# - digrich-bubblechart-panel # - digrich-bubblechart-panel
# - grafana-clock-panel # - grafana-clock-panel
## You can also use other plugin download URL, as long as they are valid zip files,
## and specify the name of the plugin after the semicolon. Like this:
# - https://grafana.com/api/plugins/marcusolsson-json-datasource/versions/1.3.2/download;marcusolsson-json-datasource
## Configure grafana datasources ## Configure grafana datasources
## ref: http://docs.grafana.org/administration/provisioning/#datasources ## ref: http://docs.grafana.org/administration/provisioning/#datasources
@ -663,6 +692,10 @@ dashboards: {}
# local-dashboard-bitbucket: # local-dashboard-bitbucket:
# url: https://example.com/repository/test-bitbucket.json # url: https://example.com/repository/test-bitbucket.json
# bearerToken: '' # bearerToken: ''
# local-dashboard-azure:
# url: https://example.com/repository/test-azure.json
# basic: ''
# acceptHeader: '*/*'
## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. ## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value.
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. ## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
@ -754,7 +787,7 @@ smtp:
sidecar: sidecar:
image: image:
repository: quay.io/kiwigrid/k8s-sidecar repository: quay.io/kiwigrid/k8s-sidecar
tag: 1.21.0 tag: 1.22.0
sha: "" sha: ""
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: {} resources: {}
@ -764,7 +797,13 @@ sidecar:
# requests: # requests:
# cpu: 50m # cpu: 50m
# memory: 50Mi # memory: 50Mi
securityContext: {} securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
# skipTlsVerify Set to true to skip tls verification for kube api calls # skipTlsVerify Set to true to skip tls verification for kube api calls
# skipTlsVerify: true # skipTlsVerify: true
enableUniqueFilenames: false enableUniqueFilenames: false
@ -1008,9 +1047,16 @@ imageRenderer:
# Enable the image-renderer deployment & service # Enable the image-renderer deployment & service
enabled: false enabled: false
replicas: 1 replicas: 1
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPU: "60"
targetMemory: ""
behavior: {}
image: image:
# image-renderer Image repository # image-renderer Image repository
repository: grafana/grafana-image-renderer repository: docker.io/grafana/grafana-image-renderer
# image-renderer Image tag # image-renderer Image tag
tag: latest tag: latest
# image-renderer Image sha (optional) # image-renderer Image sha (optional)
@ -1023,12 +1069,29 @@ imageRenderer:
# RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758
# RENDERING_MODE: clustered # RENDERING_MODE: clustered
# IGNORE_HTTPS_ERRORS: true # IGNORE_HTTPS_ERRORS: true
## "valueFrom" environment variable references that will be added to deployment pods. Name is templated.
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core
## Renders in container spec as:
## env:
## ...
## - name: <key>
## valueFrom:
## <value rendered as YAML>
envValueFrom: {}
# ENV_NAME:
# configMapKeyRef:
# name: configmap-name
# key: value_key
# image-renderer deployment serviceAccount # image-renderer deployment serviceAccount
serviceAccountName: "" serviceAccountName: ""
# image-renderer deployment securityContext # image-renderer deployment securityContext
securityContext: {} securityContext: {}
# image-renderer deployment container securityContext # image-renderer deployment container securityContext
containerSecurityContext: containerSecurityContext:
seccompProfile:
type: RuntimeDefault
capabilities: capabilities:
drop: ['ALL'] drop: ['ALL']
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
@ -1047,6 +1110,23 @@ imageRenderer:
targetPort: 8081 targetPort: 8081
# Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp"
appProtocol: "" appProtocol: ""
serviceMonitor:
## If true, a ServiceMonitor CRD is created for a prometheus operator
## https://github.com/coreos/prometheus-operator
##
enabled: false
path: /metrics
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
labels: {}
interval: 1m
scheme: http
tlsConfig: {}
scrapeTimeout: 30s
relabelings: []
# See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels
targetLabels: []
# - targetLabel1
# - targetLabel2
# If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana
grafanaProtocol: http grafanaProtocol: http
# In case a sub_path is used this needs to be added to the image renderer callback # In case a sub_path is used this needs to be added to the image renderer callback
@ -1060,6 +1140,8 @@ imageRenderer:
limitIngress: true limitIngress: true
# Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods
limitEgress: false limitEgress: false
# Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled)
extraIngressSelectors: []
resources: {} resources: {}
# limits: # limits:
# cpu: 100m # cpu: 100m

View File

@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: 2.7.0 appVersion: 2.8.2
description: Install kube-state-metrics to generate and expose cluster-level metrics description: Install kube-state-metrics to generate and expose cluster-level metrics
home: https://github.com/kubernetes/kube-state-metrics/ home: https://github.com/kubernetes/kube-state-metrics/
keywords: keywords:
@ -18,4 +18,4 @@ name: kube-state-metrics
sources: sources:
- https://github.com/kubernetes/kube-state-metrics/ - https://github.com/kubernetes/kube-state-metrics/
type: application type: application
version: 4.24.0 version: 5.5.0

View File

@ -2,14 +2,15 @@
Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics). Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics).
## Get Repo Info ## Get Repository Info
<!-- textlint-disable -->
```console ```console
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update helm repo update
``` ```
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ _See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
<!-- textlint-enable -->
## Install Chart ## Install Chart
@ -43,20 +44,19 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
You can upgrade in-place: You can upgrade in-place:
1. [get repo info](#get-repo-info) 1. [get repository info](#get-repository-info)
1. [upgrade](#upgrading-chart) your existing release name using the new chart repo 1. [upgrade](#upgrading-chart) your existing release name using the new chart repository
## Upgrading to v3.0.0 ## Upgrading to v3.0.0
v3.0.0 includes kube-state-metrics v2.0, see the [changelog](https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/CHANGELOG.md) for major changes on the application-side. v3.0.0 includes kube-state-metrics v2.0, see the [changelog](https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/CHANGELOG.md) for major changes on the application-side.
The upgraded chart now the following changes: The upgraded chart now the following changes:
* Dropped support for helm v2 (helm v3 or later is required) * Dropped support for helm v2 (helm v3 or later is required)
* collectors key was renamed to resources * collectors key was renamed to resources
* namespace key was renamed to namespaces * namespace key was renamed to namespaces
## Configuration ## Configuration
See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments:
@ -65,4 +65,21 @@ See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_h
helm show values prometheus-community/kube-state-metrics helm show values prometheus-community/kube-state-metrics
``` ```
You may also run `helm show values` on this chart's [dependencies](#dependencies) for additional options. ### kube-rbac-proxy
You can enable `kube-state-metrics` endpoint protection using `kube-rbac-proxy`. By setting `kubeRBACProxy.enabled: true`, this chart will deploy one RBAC proxy container per endpoint (metrics & telemetry).
To authorize access, authenticate your requests (via a `ServiceAccount` for example) with a `ClusterRole` attached such as:
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-state-metrics-read
rules:
- apiGroups: [ "" ]
resources: ["services/kube-state-metrics"]
verbs:
- get
```
See [kube-rbac-proxy examples](https://github.com/brancz/kube-rbac-proxy/tree/master/examples/resource-attributes) for more details.

View File

@ -8,3 +8,16 @@ In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-s
They are served either as plaintext or protobuf depending on the Accept header. They are served either as plaintext or protobuf depending on the Accept header.
They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint.
{{- if .Values.kubeRBACProxy.enabled}}
kube-rbac-proxy endpoint protections is enabled:
- Metrics endpoints are now HTTPS
- Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions:
```
rules:
- apiGroups: [ "" ]
resources: ["services/{{ template "kube-state-metrics.fullname" . }}"]
verbs:
- get
```
{{- end }}

View File

@ -77,9 +77,13 @@ release: {{ .Release.Name }}
Selector labels Selector labels
*/}} */}}
{{- define "kube-state-metrics.selectorLabels" }} {{- define "kube-state-metrics.selectorLabels" }}
{{- if .Values.selectorOverride }}
{{ toYaml .Values.selectorOverride }}
{{- else }}
app.kubernetes.io/name: {{ include "kube-state-metrics.name" . }} app.kubernetes.io/name: {{ include "kube-state-metrics.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }} {{- end }}
{{- end }}
{{/* Sets default scrape limits for servicemonitor */}} {{/* Sets default scrape limits for servicemonitor */}}
{{- define "servicemonitor.scrapeLimits" -}} {{- define "servicemonitor.scrapeLimits" -}}
@ -99,3 +103,54 @@ labelNameLengthLimit: {{ . }}
labelValueLengthLimit: {{ . }} labelValueLengthLimit: {{ . }}
{{- end }} {{- end }}
{{- end -}} {{- end -}}
{{/*
Formats imagePullSecrets. Input is (dict "Values" .Values "imagePullSecrets" .{specific imagePullSecrets})
*/}}
{{- define "kube-state-metrics.imagePullSecrets" -}}
{{- range (concat .Values.global.imagePullSecrets .imagePullSecrets) }}
{{- if eq (typeOf .) "map[string]interface {}" }}
- {{ toYaml . | trim }}
{{- else }}
- name: {{ . }}
{{- end }}
{{- end }}
{{- end -}}
{{/*
The image to use for kube-state-metrics
*/}}
{{- define "kube-state-metrics.image" -}}
{{- if .Values.image.sha }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }}
{{- else }}
{{- printf "%s/%s:%s@%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }}
{{- end }}
{{- else }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }}
{{- else }}
{{- printf "%s/%s:%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }}
{{- end }}
{{- end }}
{{- end }}
{{/*
The image to use for kubeRBACProxy
*/}}
{{- define "kubeRBACProxy.image" -}}
{{- if .Values.kubeRBACProxy.image.sha }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }}
{{- else }}
{{- printf "%s/%s:%s@%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }}
{{- end }}
{{- else }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }}
{{- else }}
{{- printf "%s/%s:%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -40,6 +40,8 @@ spec:
priorityClassName: {{ .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }}
{{- end }} {{- end }}
containers: containers:
{{- $httpPort := ternary 9090 (.Values.service.port | default 8080) .Values.kubeRBACProxy.enabled}}
{{- $telemetryPort := ternary 9091 (.Values.selfMonitor.telemetryPort | default 8081) .Values.kubeRBACProxy.enabled}}
- name: {{ template "kube-state-metrics.name" . }} - name: {{ template "kube-state-metrics.name" . }}
{{- if .Values.autosharding.enabled }} {{- if .Values.autosharding.enabled }}
env: env:
@ -56,9 +58,7 @@ spec:
{{- if .Values.extraArgs }} {{- if .Values.extraArgs }}
{{- .Values.extraArgs | toYaml | nindent 8 }} {{- .Values.extraArgs | toYaml | nindent 8 }}
{{- end }} {{- end }}
{{- if .Values.service.port }} - --port={{ $httpPort }}
- --port={{ .Values.service.port | default 8080}}
{{- end }}
{{- if .Values.collectors }} {{- if .Values.collectors }}
- --resources={{ .Values.collectors | join "," }} - --resources={{ .Values.collectors | join "," }}
{{- end }} {{- end }}
@ -96,11 +96,16 @@ spec:
{{- if .Values.kubeconfig.enabled }} {{- if .Values.kubeconfig.enabled }}
- --kubeconfig=/opt/k8s/.kube/config - --kubeconfig=/opt/k8s/.kube/config
{{- end }} {{- end }}
{{- if .Values.kubeRBACProxy.enabled }}
- --telemetry-host=127.0.0.1
- --telemetry-port={{ $telemetryPort }}
{{- else }}
{{- if .Values.selfMonitor.telemetryHost }} {{- if .Values.selfMonitor.telemetryHost }}
- --telemetry-host={{ .Values.selfMonitor.telemetryHost }} - --telemetry-host={{ .Values.selfMonitor.telemetryHost }}
{{- end }} {{- end }}
{{- if .Values.selfMonitor.telemetryPort }} {{- if .Values.selfMonitor.telemetryPort }}
- --telemetry-port={{ .Values.selfMonitor.telemetryPort | default 8081 }} - --telemetry-port={{ $telemetryPort }}
{{- end }}
{{- end }} {{- end }}
{{- if or (.Values.kubeconfig.enabled) (.Values.volumeMounts) }} {{- if or (.Values.kubeconfig.enabled) (.Values.volumeMounts) }}
volumeMounts: volumeMounts:
@ -114,28 +119,26 @@ spec:
{{- end }} {{- end }}
{{- end }} {{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }} imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.image.sha }} image: {{ include "kube-state-metrics.image" . }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}" {{- if eq .Values.kubeRBACProxy.enabled false }}
{{- else }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
{{- end }}
ports: ports:
- containerPort: {{ .Values.service.port | default 8080}} - containerPort: {{ .Values.service.port | default 8080}}
name: "http" name: "http"
{{- if .Values.selfMonitor.enabled }} {{- if .Values.selfMonitor.enabled }}
- containerPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }} - containerPort: {{ $telemetryPort }}
name: "metrics" name: "metrics"
{{- end }} {{- end }}
{{- end }}
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthz path: /healthz
port: {{ .Values.service.port | default 8080}} port: {{ $httpPort }}
initialDelaySeconds: 5 initialDelaySeconds: 5
timeoutSeconds: 5 timeoutSeconds: 5
readinessProbe: readinessProbe:
httpGet: httpGet:
path: / path: /
port: {{ .Values.service.port | default 8080}} port: {{ $httpPort }}
initialDelaySeconds: 5 initialDelaySeconds: 5
timeoutSeconds: 5 timeoutSeconds: 5
{{- if .Values.resources }} {{- if .Values.resources }}
@ -146,9 +149,87 @@ spec:
securityContext: securityContext:
{{ toYaml .Values.containerSecurityContext | indent 10 }} {{ toYaml .Values.containerSecurityContext | indent 10 }}
{{- end }} {{- end }}
{{- if .Values.imagePullSecrets }} {{- if .Values.kubeRBACProxy.enabled }}
- name: kube-rbac-proxy-http
args:
{{- if .Values.kubeRBACProxy.extraArgs }}
{{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 8 }}
{{- end }}
- --secure-listen-address=:{{ .Values.service.port | default 8080}}
- --upstream=http://127.0.0.1:{{ $httpPort }}/
- --proxy-endpoints-port=8888
- --config-file=/etc/kube-rbac-proxy-config/config-file.yaml
volumeMounts:
- name: kube-rbac-proxy-config
mountPath: /etc/kube-rbac-proxy-config
{{- with .Values.kubeRBACProxy.volumeMounts }}
{{- toYaml . | nindent 10 }}
{{- end }}
imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }}
image: {{ include "kubeRBACProxy.image" . }}
ports:
- containerPort: {{ .Values.service.port | default 8080}}
name: "http"
- containerPort: 8888
name: "http-healthz"
readinessProbe:
httpGet:
scheme: HTTPS
port: 8888
path: healthz
initialDelaySeconds: 5
timeoutSeconds: 5
{{- if .Values.kubeRBACProxy.resources }}
resources:
{{ toYaml .Values.kubeRBACProxy.resources | indent 10 }}
{{- end }}
{{- if .Values.kubeRBACProxy.containerSecurityContext }}
securityContext:
{{ toYaml .Values.kubeRBACProxy.containerSecurityContext | indent 10 }}
{{- end }}
{{- if .Values.selfMonitor.enabled }}
- name: kube-rbac-proxy-telemetry
args:
{{- if .Values.kubeRBACProxy.extraArgs }}
{{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 8 }}
{{- end }}
- --secure-listen-address=:{{ .Values.selfMonitor.telemetryPort | default 8081 }}
- --upstream=http://127.0.0.1:{{ $telemetryPort }}/
- --proxy-endpoints-port=8889
- --config-file=/etc/kube-rbac-proxy-config/config-file.yaml
volumeMounts:
- name: kube-rbac-proxy-config
mountPath: /etc/kube-rbac-proxy-config
{{- with .Values.kubeRBACProxy.volumeMounts }}
{{- toYaml . | nindent 10 }}
{{- end }}
imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }}
image: {{ include "kubeRBACProxy.image" . }}
ports:
- containerPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }}
name: "metrics"
- containerPort: 8889
name: "metrics-healthz"
readinessProbe:
httpGet:
scheme: HTTPS
port: 8889
path: healthz
initialDelaySeconds: 5
timeoutSeconds: 5
{{- if .Values.kubeRBACProxy.resources }}
resources:
{{ toYaml .Values.kubeRBACProxy.resources | indent 10 }}
{{- end }}
{{- if .Values.kubeRBACProxy.containerSecurityContext }}
securityContext:
{{ toYaml .Values.kubeRBACProxy.containerSecurityContext | indent 10 }}
{{- end }}
{{- end }}
{{- end }}
{{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }}
imagePullSecrets: imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }} {{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.imagePullSecrets) | indent 8 }}
{{- end }} {{- end }}
{{- if .Values.affinity }} {{- if .Values.affinity }}
affinity: affinity:
@ -166,13 +247,18 @@ spec:
topologySpreadConstraints: topologySpreadConstraints:
{{ toYaml .Values.topologySpreadConstraints | indent 8 }} {{ toYaml .Values.topologySpreadConstraints | indent 8 }}
{{- end }} {{- end }}
{{- if or (.Values.kubeconfig.enabled) (.Values.volumes) }} {{- if or (.Values.kubeconfig.enabled) (.Values.volumes) (.Values.kubeRBACProxy.enabled) }}
volumes: volumes:
{{- if .Values.kubeconfig.enabled}} {{- if .Values.kubeconfig.enabled}}
- name: kubeconfig - name: kubeconfig
secret: secret:
secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig
{{- end }} {{- end }}
{{- if .Values.kubeRBACProxy.enabled}}
- name: kube-rbac-proxy-config
configMap:
name: {{ template "kube-state-metrics.fullname" . }}-rbac-config
{{- end }}
{{- if .Values.volumes }} {{- if .Values.volumes }}
{{ toYaml .Values.volumes | indent 8 }} {{ toYaml .Values.volumes | indent 8 }}
{{- end }} {{- end }}

View File

@ -0,0 +1,43 @@
{{- if .Values.networkPolicy.enabled }}
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
name: {{ template "kube-state-metrics.fullname" . }}
namespace: {{ template "kube-state-metrics.namespace" . }}
{{- if .Values.annotations }}
annotations:
{{ toYaml .Values.annotations | nindent 4 }}
{{- end }}
spec:
{{- if .Values.networkPolicy.egress }}
## Deny all egress by default
egress:
{{- toYaml .Values.networkPolicy.egress | nindent 4 }}
{{- end }}
ingress:
{{- if .Values.networkPolicy.ingress }}
{{- toYaml .Values.networkPolicy.ingress | nindent 4 }}
{{- else }}
## Allow ingress on default ports by default
- ports:
- port: {{ .Values.service.port | default 8080 }}
protocol: TCP
{{- if .Values.selfMonitor.enabled }}
{{- $telemetryPort := ternary 9091 (.Values.selfMonitor.telemetryPort | default 8081) .Values.kubeRBACProxy.enabled}}
- port: {{ $telemetryPort }}
protocol: TCP
{{- end }}
{{- end }}
podSelector:
{{- if .Values.networkPolicy.podSelector }}
{{- toYaml .Values.networkPolicy.podSelector | nindent 4 }}
{{- else }}
matchLabels:
{{- include "kube-state-metrics.selectorLabels" . | indent 6 }}
{{- end }}
policyTypes:
- Ingress
- Egress
{{- end }}

Some files were not shown because too many files have changed in this diff Show More