Merge pull request 'v1.24' (#51) from v1.24 into master
Reviewed-on: ZeroDownTime/kubezero#51
This commit is contained in:
commit
e64388d582
@ -1,12 +1,14 @@
|
|||||||
# Parse version from latest git semver tag
|
# Parse version from latest git semver tag
|
||||||
GTAG=$(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
|
BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||||
TAG ?= $(shell echo $(GTAG) | awk -F '-' '{ print $$1 "-" $$2 }' | sed -e 's/-$$//')
|
GIT_TAG=$(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
|
||||||
|
TAG ?= $(shell echo $(GIT_TAG) | awk -F '-' '{ print $$1 "-" $$2 }' | sed -e 's/-$$//')
|
||||||
ARCH := amd64
|
ARCH := amd64
|
||||||
|
ALL_ARCHS := amd64 arm64
|
||||||
|
|
||||||
# EXTRA_TAGS supposed to be set at the caller, eg. $(shell echo $(TAG) | awk -F '.' '{ print $$1 "." $$2 }')
|
# EXTRA_TAGS supposed to be set at the caller, eg. $(shell echo $(TAG) | awk -F '.' '{ print $$1 "." $$2 }')
|
||||||
|
|
||||||
ifneq ($(TRIVY_REMOTE),)
|
ifneq ($(TRIVY_REMOTE),)
|
||||||
TRIVY_OPTS := --server $(TRIVY_REMOTE)
|
TRIVY_OPTS := --server $(TRIVY_REMOTE)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
.SILENT: ; # no need for @
|
.SILENT: ; # no need for @
|
||||||
@ -20,46 +22,48 @@ help: ## Show Help
|
|||||||
grep -E '^[a-zA-Z_-]+:.*?## .*$$' .ci/podman.mk | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
grep -E '^[a-zA-Z_-]+:.*?## .*$$' .ci/podman.mk | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||||
|
|
||||||
build: ## Build the app
|
build: ## Build the app
|
||||||
docker build --rm -t $(REGISTRY)/$(IMAGE):$(TAG)-$(ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(ARCH) --platform linux/$(ARCH) .
|
buildah build --rm --layers -t $(IMAGE):$(TAG)-$(ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(ARCH) --platform linux/$(ARCH) .
|
||||||
|
|
||||||
test: rm-test-image ## Execute Dockerfile.test
|
test: rm-test-image ## Execute Dockerfile.test
|
||||||
test -f Dockerfile.test && \
|
test -f Dockerfile.test && \
|
||||||
{ docker build --rm -t $(REGISTRY)/$(IMAGE):$(TAG)-test --from=$(REGISTRY)/$(IMAGE):$(TAG) -f Dockerfile.test --platform linux/$(ARCH) . && \
|
{ buildah build --rm --layers -t $(REGISTRY)/$(IMAGE):$(TAG)-test --from=$(REGISTRY)/$(IMAGE):$(TAG) -f Dockerfile.test --platform linux/$(ARCH) . && \
|
||||||
docker run --rm --env-host -t $(REGISTRY)/$(IMAGE):$(TAG)-$(ARCH)-test; } || \
|
podman run --rm --env-host -t $(REGISTRY)/$(IMAGE):$(TAG)-$(ARCH)-test; } || \
|
||||||
echo "No Dockerfile.test found, skipping test"
|
echo "No Dockerfile.test found, skipping test"
|
||||||
|
|
||||||
scan: ## Scan image using trivy
|
scan: ## Scan image using trivy
|
||||||
echo "Scanning $(REGISTRY)/$(IMAGE):$(TAG)-$(ARCH) using Trivy $(TRIVY_REMOTE)"
|
echo "Scanning $(IMAGE):$(TAG)-$(ARCH) using Trivy $(TRIVY_REMOTE)"
|
||||||
trivy image $(TRIVY_OPTS) $(REGISTRY)/$(IMAGE):$(TAG)-$(ARCH)
|
trivy image $(TRIVY_OPTS) localhost/$(IMAGE):$(TAG)-$(ARCH)
|
||||||
|
|
||||||
# We create new manifest and add TAG-ARCH image
|
# first tag and push all actual images
|
||||||
# if manigest exists already, get it and add TAG-ARCH to eg. add arm64 to existing amd64
|
# create new manifest for each tag and add all available TAG-ARCH before pushing
|
||||||
push: ## push images to registry
|
push: ecr-login ## push images to registry
|
||||||
for t in $(TAG) latest $(EXTRA_TAGS); \
|
for t in $(TAG) latest $(EXTRA_TAGS); do \
|
||||||
do echo "creating and pushing: $$t"; \
|
buildah tag $(IMAGE):$(TAG)-$(ARCH) $(REGISTRY)/$(IMAGE):$${t}-$(ARCH); \
|
||||||
docker tag $(REGISTRY)/$(IMAGE):$(TAG)-$(ARCH) $(REGISTRY)/$(IMAGE):$${t}-$(ARCH) && \
|
buildah manifest rm $(IMAGE):$$t || true; \
|
||||||
docker push $(REGISTRY)/$(IMAGE):$${t}-$(ARCH); \
|
buildah manifest create $(IMAGE):$$t; \
|
||||||
podman manifest exists $(IMAGE):$$t || podman manifest create $(IMAGE):$$t; \
|
for a in $(ALL_ARCHS); do \
|
||||||
buildah manifest add $(IMAGE):$$t $(REGISTRY)/$(IMAGE):$(TAG)-$(ARCH) && docker manifest push $(IMAGE):$$t $(REGISTRY)/$(IMAGE):$$t; \
|
buildah manifest add $(IMAGE):$$t $(REGISTRY)/$(IMAGE):$(TAG)-$$a; \
|
||||||
|
done; \
|
||||||
|
buildah manifest push --all $(IMAGE):$$t docker://$(REGISTRY)/$(IMAGE):$$t; \
|
||||||
done
|
done
|
||||||
|
|
||||||
ecr-login: ## log into AWS ECR public
|
ecr-login: ## log into AWS ECR public
|
||||||
aws ecr-public get-login-password --region $(REGION) | docker login --username AWS --password-stdin $(REGISTRY)
|
aws ecr-public get-login-password --region $(REGION) | podman login --username AWS --password-stdin $(REGISTRY)
|
||||||
|
|
||||||
clean: rm-test-image rm-image ## delete local built container and test images
|
clean: rm-test-image rm-image ## delete local built container and test images
|
||||||
|
|
||||||
rm-remote-untagged: ## delete all remote untagged images
|
rm-remote-untagged: ## delete all remote untagged images
|
||||||
echo "Removing all untagged images from $(IMAGE) in $(REGION)"
|
echo "Removing all untagged images from $(IMAGE) in $(REGION)"
|
||||||
IMAGE_IDS=$$(for image in $$(aws ecr-public describe-images --repository-name $(IMAGE) --region $(REGION) --output json | jq -r '.imageDetails[] | select(.imageTags | not ).imageDigest'); do echo -n "imageDigest=$$image "; done) ; \
|
IMAGE_IDS=$$(for image in $$(aws ecr-public describe-images --repository-name $(IMAGE) --region $(REGION) --output json | jq -r '.imageDetails[] | select(.imageTags | not ).imageDigest'); do echo -n "imageDigest=$$image "; done) ; \
|
||||||
[ -n "$$IMAGE_IDS" ] && aws ecr-public batch-delete-image --repository-name $(IMAGE) --region $(REGION) --image-ids $$IMAGE_IDS || echo "No image to remove"
|
[ -n "$$IMAGE_IDS" ] && aws ecr-public batch-delete-image --repository-name $(IMAGE) --region $(REGION) --image-ids $$IMAGE_IDS || echo "No image to remove"
|
||||||
|
|
||||||
rm-image:
|
rm-image:
|
||||||
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH))" || docker image rm -f $(IMAGE):$(TAG)-$(ARCH) > /dev/null
|
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH))" || podman image rm -f $(IMAGE):$(TAG)-$(ARCH) > /dev/null
|
||||||
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH))" || echo "Error: Removing image failed"
|
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH))" || echo "Error: Removing image failed"
|
||||||
|
|
||||||
# Ensure we run the tests by removing any previous runs
|
# Ensure we run the tests by removing any previous runs
|
||||||
rm-test-image:
|
rm-test-image:
|
||||||
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH)-test)" || docker image rm -f $(IMAGE):$(TAG)-$(ARCH)-test > /dev/null
|
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH)-test)" || podman image rm -f $(IMAGE):$(TAG)-$(ARCH)-test > /dev/null
|
||||||
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH)-test)" || echo "Error: Removing test image failed"
|
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH)-test)" || echo "Error: Removing test image failed"
|
||||||
|
|
||||||
ci-pull-upstream: ## pull latest shared .ci subtree
|
ci-pull-upstream: ## pull latest shared .ci subtree
|
||||||
|
11
Dockerfile
11
Dockerfile
@ -3,12 +3,13 @@ ARG ALPINE_VERSION=3.16
|
|||||||
FROM alpine:${ALPINE_VERSION}
|
FROM alpine:${ALPINE_VERSION}
|
||||||
|
|
||||||
ARG ALPINE_VERSION
|
ARG ALPINE_VERSION
|
||||||
ARG KUBE_VERSION=1.23
|
ARG KUBE_VERSION=1.24
|
||||||
|
|
||||||
RUN cd /etc/apk/keys && \
|
RUN cd /etc/apk/keys && \
|
||||||
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
||||||
echo "@kubezero https://cdn.zero-downtime.net/alpine/v${ALPINE_VERSION}/kubezero" >> /etc/apk/repositories && \
|
echo "@kubezero https://cdn.zero-downtime.net/alpine/v${ALPINE_VERSION}/kubezero" >> /etc/apk/repositories && \
|
||||||
echo "@testing http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
echo "@edge-testing http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
||||||
|
echo "@edge-community http://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories && \
|
||||||
apk upgrade -U -a --no-cache && \
|
apk upgrade -U -a --no-cache && \
|
||||||
apk --no-cache add \
|
apk --no-cache add \
|
||||||
jq \
|
jq \
|
||||||
@ -21,9 +22,9 @@ RUN cd /etc/apk/keys && \
|
|||||||
kubeadm@kubezero~=${KUBE_VERSION} \
|
kubeadm@kubezero~=${KUBE_VERSION} \
|
||||||
kubectl@kubezero~=${KUBE_VERSION} \
|
kubectl@kubezero~=${KUBE_VERSION} \
|
||||||
etcdhelper@kubezero \
|
etcdhelper@kubezero \
|
||||||
etcd-ctl@testing \
|
etcd-ctl@edge-testing \
|
||||||
restic@testing \
|
restic@edge-community \
|
||||||
helm@testing
|
helm@edge-community
|
||||||
|
|
||||||
RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \
|
RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \
|
||||||
mkdir -p /var/lib/kubezero
|
mkdir -p /var/lib/kubezero
|
||||||
|
22
README.md
22
README.md
@ -5,11 +5,11 @@ KubeZero is a Kubernetes distribution providing an integrated container platform
|
|||||||
# Design philosophy
|
# Design philosophy
|
||||||
|
|
||||||
- Cloud provider agnostic, bare-metal/self-hosted
|
- Cloud provider agnostic, bare-metal/self-hosted
|
||||||
- Focus on security and simplicity before feature bloat
|
- Focus on security and simplicity over feature creep
|
||||||
- No vendor lock in, most components are optional and could be exchanged
|
- No vendor lock in, most components are optional and could be easily exchanged
|
||||||
- Organic Open Source / open and permissive licenses over closed-source solutions
|
- Organic Open Source / open and permissive licenses over closed-source solutions
|
||||||
- No premium services / subscriptions required
|
- No premium services / subscriptions required
|
||||||
- Staying and contributing back to upstream projects as much as possible
|
- Staying up to date and contributing back to upstream projects, like alpine-cloud-images and others
|
||||||
- Corgi approved :dog:
|
- Corgi approved :dog:
|
||||||
|
|
||||||
|
|
||||||
@ -19,7 +19,7 @@ KubeZero is a Kubernetes distribution providing an integrated container platform
|
|||||||
|
|
||||||
# Version / Support Matrix
|
# Version / Support Matrix
|
||||||
KubeZero releases track the same *minor* version of Kubernetes.
|
KubeZero releases track the same *minor* version of Kubernetes.
|
||||||
Any 1.21.X-Y release of Kubezero supports any Kubernetes cluster 1.21.X.
|
Any 1.24.X-Y release of Kubezero supports any Kubernetes cluster 1.24.X.
|
||||||
|
|
||||||
KubeZero is distributed as a collection of versioned Helm charts, allowing custom upgrade schedules and module versions as needed.
|
KubeZero is distributed as a collection of versioned Helm charts, allowing custom upgrade schedules and module versions as needed.
|
||||||
|
|
||||||
@ -28,15 +28,15 @@ KubeZero is distributed as a collection of versioned Helm charts, allowing custo
|
|||||||
gantt
|
gantt
|
||||||
title KubeZero Support Timeline
|
title KubeZero Support Timeline
|
||||||
dateFormat YYYY-MM-DD
|
dateFormat YYYY-MM-DD
|
||||||
section 1.22
|
|
||||||
beta :122b, 2022-05-01, 2022-06-01
|
|
||||||
release :after 122b, 2022-10-28
|
|
||||||
section 1.23
|
section 1.23
|
||||||
beta :123b, 2022-08-01, 2022-09-01
|
beta :123b, 2022-08-01, 2022-09-01
|
||||||
release :after 123b, 2023-02-28
|
release :after 123b, 2023-02-01
|
||||||
section 1.24
|
section 1.24
|
||||||
beta :124b, 2022-12-01, 2022-12-31
|
beta :124b, 2022-11-14, 2022-12-31
|
||||||
release :after 124b, 2023-05-01
|
release :after 124b, 2023-06-01
|
||||||
|
section 1.25
|
||||||
|
beta :125b, 2023-03-01, 2023-03-31
|
||||||
|
release :after 125b, 2023-08-01
|
||||||
```
|
```
|
||||||
|
|
||||||
[Upstream release policy](https://kubernetes.io/releases/)
|
[Upstream release policy](https://kubernetes.io/releases/)
|
||||||
@ -44,7 +44,7 @@ gantt
|
|||||||
# Components
|
# Components
|
||||||
|
|
||||||
## OS
|
## OS
|
||||||
- all nodes are based on Alpine V3.15
|
- all nodes are based on Alpine V3.16
|
||||||
- 2 GB encrypted root filesystem
|
- 2 GB encrypted root filesystem
|
||||||
- no 3rd party dependencies at boot ( other than container registries )
|
- no 3rd party dependencies at boot ( other than container registries )
|
||||||
- minimal attack surface
|
- minimal attack surface
|
||||||
|
@ -56,7 +56,7 @@ render_kubeadm() {
|
|||||||
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
cat ${WORKDIR}/kubeadm/templates/${f}Configuration.yaml >> ${HOSTFS}/etc/kubernetes/kubeadm.yaml
|
||||||
done
|
done
|
||||||
|
|
||||||
# hack to "uncloack" the json patches after they go processed by helm
|
# "uncloak" the json patches after they got processed by helm
|
||||||
for s in apiserver controller-manager scheduler; do
|
for s in apiserver controller-manager scheduler; do
|
||||||
yq eval '.json' ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml > /tmp/_tmp.yaml && \
|
yq eval '.json' ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml > /tmp/_tmp.yaml && \
|
||||||
mv /tmp/_tmp.yaml ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml
|
mv /tmp/_tmp.yaml ${WORKDIR}/kubeadm/templates/patches/kube-${s}1\+json.yaml
|
||||||
@ -65,11 +65,6 @@ render_kubeadm() {
|
|||||||
|
|
||||||
|
|
||||||
parse_kubezero() {
|
parse_kubezero() {
|
||||||
# remove with 1.24
|
|
||||||
if [ ! -f ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml ]; then
|
|
||||||
[ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] && cp ${HOSTFS}/etc/kubernetes/kubezero.yaml ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
|
|
||||||
fi
|
|
||||||
|
|
||||||
export CLUSTERNAME=$(yq eval '.global.clusterName // .clusterName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
export CLUSTERNAME=$(yq eval '.global.clusterName // .clusterName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||||
export HIGHAVAILABLE=$(yq eval '.global.highAvailable // .highAvailable // "false"' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
export HIGHAVAILABLE=$(yq eval '.global.highAvailable // .highAvailable // "false"' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||||
export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml)
|
||||||
@ -328,6 +323,21 @@ apply_module() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
delete_module() {
|
||||||
|
MODULES=$1
|
||||||
|
|
||||||
|
get_kubezero_values
|
||||||
|
|
||||||
|
# Always use embedded kubezero chart
|
||||||
|
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
|
||||||
|
|
||||||
|
for t in $MODULES; do
|
||||||
|
_helm delete $t
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Deleted KubeZero modules: $MODULES. Potential CRDs must be removed manually."
|
||||||
|
}
|
||||||
|
|
||||||
# backup etcd + /etc/kubernetes/pki
|
# backup etcd + /etc/kubernetes/pki
|
||||||
backup() {
|
backup() {
|
||||||
# Display all ENVs, careful this exposes the password !
|
# Display all ENVs, careful this exposes the password !
|
||||||
@ -382,7 +392,8 @@ for t in $@; do
|
|||||||
bootstrap) control_plane_node bootstrap;;
|
bootstrap) control_plane_node bootstrap;;
|
||||||
join) control_plane_node join;;
|
join) control_plane_node join;;
|
||||||
restore) control_plane_node restore;;
|
restore) control_plane_node restore;;
|
||||||
apply_*) apply_module ${t##apply_};;
|
apply_*) apply_module "${t##apply_}";;
|
||||||
|
delete_*) delete_module "${t##delete_}";;
|
||||||
backup) backup;;
|
backup) backup;;
|
||||||
debug_shell) debug_shell;;
|
debug_shell) debug_shell;;
|
||||||
*) echo "Unknown command: '$t'";;
|
*) echo "Unknown command: '$t'";;
|
||||||
|
@ -108,6 +108,7 @@ function _crds() {
|
|||||||
|
|
||||||
# Only apply if there are actually any crds
|
# Only apply if there are actually any crds
|
||||||
if [ -s $WORKDIR/crds.yaml ]; then
|
if [ -s $WORKDIR/crds.yaml ]; then
|
||||||
|
[ -n "$DEBUG" ] && cat $WORKDIR/crds.yaml
|
||||||
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts
|
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@ -115,7 +116,7 @@ function _crds() {
|
|||||||
|
|
||||||
# helm template | kubectl apply -f -
|
# helm template | kubectl apply -f -
|
||||||
# confine to one namespace if possible
|
# confine to one namespace if possible
|
||||||
function apply() {
|
function render() {
|
||||||
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
|
helm template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ \
|
||||||
| python3 -c '
|
| python3 -c '
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
@ -128,8 +129,6 @@ for manifest in yaml.safe_load_all(sys.stdin):
|
|||||||
manifest["metadata"]["namespace"] = sys.argv[1]
|
manifest["metadata"]["namespace"] = sys.argv[1]
|
||||||
print("---")
|
print("---")
|
||||||
print(yaml.dump(manifest))' $namespace > $WORKDIR/helm.yaml
|
print(yaml.dump(manifest))' $namespace > $WORKDIR/helm.yaml
|
||||||
|
|
||||||
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -164,13 +163,15 @@ function _helm() {
|
|||||||
# Optional pre hook
|
# Optional pre hook
|
||||||
declare -F ${module}-pre && ${module}-pre
|
declare -F ${module}-pre && ${module}-pre
|
||||||
|
|
||||||
apply
|
render
|
||||||
|
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$?
|
||||||
|
|
||||||
# Optional post hook
|
# Optional post hook
|
||||||
declare -F ${module}-post && ${module}-post
|
declare -F ${module}-post && ${module}-post
|
||||||
|
|
||||||
elif [ $action == "delete" ]; then
|
elif [ $action == "delete" ]; then
|
||||||
apply
|
render
|
||||||
|
kubectl $action -f $WORKDIR/helm.yaml && rc=$? || rc=$?
|
||||||
|
|
||||||
# Delete dedicated namespace if not kube-system
|
# Delete dedicated namespace if not kube-system
|
||||||
[ -n "$DELETE_NS" ] && delete_ns $namespace
|
[ -n "$DELETE_NS" ] && delete_ns $namespace
|
||||||
|
@ -8,32 +8,34 @@ import yaml
|
|||||||
def migrate(values):
|
def migrate(values):
|
||||||
"""Actual changes here"""
|
"""Actual changes here"""
|
||||||
|
|
||||||
# migrate ClusterName to clusterName
|
# ClusterBackup is enabled on AWS anyways, same with cluster-autoscaler
|
||||||
if "ClusterName" in values:
|
if "aws" in values["global"]:
|
||||||
values["clusterName"] = values["ClusterName"]
|
deleteKey(values["addons"], "clusterBackup")
|
||||||
values.pop("ClusterName")
|
deleteKey(values["addons"], "cluster-autoscaler")
|
||||||
|
|
||||||
# Remove HighAvailableControlplane
|
# Remove calico and multus
|
||||||
|
deleteKey(values["network"], "calico")
|
||||||
|
deleteKey(values["network"], "multus")
|
||||||
|
|
||||||
|
# ArgoCD helm changes
|
||||||
|
if "argocd" in values:
|
||||||
|
if "server" in values["argocd"]:
|
||||||
|
if not "configs" in values["argocd"]:
|
||||||
|
values["argocd"]["configs"] = {}
|
||||||
|
if not "cm" in values["argocd"]["configs"]:
|
||||||
|
values["argocd"]["configs"]["cm"] = {}
|
||||||
|
values["argocd"]["configs"]["cm"]["url"] = values["argocd"]["server"]["config"][
|
||||||
|
"url"
|
||||||
|
]
|
||||||
|
deleteKey(values["argocd"], "server")
|
||||||
|
|
||||||
|
return values
|
||||||
|
|
||||||
|
|
||||||
|
def deleteKey(values, key):
|
||||||
|
"""Delete key from dictionary if exists"""
|
||||||
try:
|
try:
|
||||||
values["global"]["highAvailable"] = values["HighAvailableControlplane"]
|
values.pop(key)
|
||||||
values.pop("HighAvailableControlplane")
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Create new clusterwide cloudprovider data if possible
|
|
||||||
# IamArn: arn:aws:iam::<ACCOUNT_ID>:role/<REGION>.<CLUSTERNAME>.cert-manager
|
|
||||||
try:
|
|
||||||
if values["cert-manager"]["IamArn"]:
|
|
||||||
account_id = values["cert-manager"]["IamArn"].split(":")[4]
|
|
||||||
region = values["cert-manager"]["IamArn"].split(":")[5].split('.')[0].split('/')[1]
|
|
||||||
if "global" not in values:
|
|
||||||
values["global"] = {}
|
|
||||||
if "aws" not in values["global"]:
|
|
||||||
values["global"]["aws"] = {}
|
|
||||||
|
|
||||||
values["global"]["aws"]["region"] = region
|
|
||||||
values["global"]["aws"]["accountId"] = account_id
|
|
||||||
|
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -44,9 +46,11 @@ class MyDumper(yaml.Dumper):
|
|||||||
"""
|
"""
|
||||||
Required to add additional indent for arrays to match yq behaviour to reduce noise in diffs
|
Required to add additional indent for arrays to match yq behaviour to reduce noise in diffs
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def increase_indent(self, flow=False, indentless=False):
|
def increase_indent(self, flow=False, indentless=False):
|
||||||
return super(MyDumper, self).increase_indent(flow, False)
|
return super(MyDumper, self).increase_indent(flow, False)
|
||||||
|
|
||||||
|
|
||||||
def str_presenter(dumper, data):
|
def str_presenter(dumper, data):
|
||||||
if len(data.splitlines()) > 1: # check for multiline string
|
if len(data.splitlines()) > 1: # check for multiline string
|
||||||
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
|
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
|
||||||
@ -93,5 +97,5 @@ yaml.dump(
|
|||||||
default_flow_style=False,
|
default_flow_style=False,
|
||||||
indent=2,
|
indent=2,
|
||||||
sort_keys=False,
|
sort_keys=False,
|
||||||
Dumper=MyDumper
|
Dumper=MyDumper,
|
||||||
)
|
)
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -e
|
||||||
|
|
||||||
VERSION="v1.23"
|
|
||||||
#VERSION="latest"
|
#VERSION="latest"
|
||||||
|
VERSION="v1.24"
|
||||||
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
|
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
|
||||||
|
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||||
. $SCRIPT_DIR/libhelm.sh
|
# shellcheck disable=SC1091
|
||||||
|
. "$SCRIPT_DIR"/libhelm.sh
|
||||||
|
|
||||||
[ -n "$DEBUG" ] && set -x
|
[ -n "$DEBUG" ] && set -x
|
||||||
|
|
||||||
@ -38,6 +39,9 @@ spec:
|
|||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
operator: Exists
|
operator: Exists
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
operator: Exists
|
||||||
|
effect: NoSchedule
|
||||||
initContainers:
|
initContainers:
|
||||||
- name: node-upgrade
|
- name: node-upgrade
|
||||||
image: busybox
|
image: busybox
|
||||||
@ -75,7 +79,7 @@ EOF
|
|||||||
control_plane_upgrade() {
|
control_plane_upgrade() {
|
||||||
TASKS="$1"
|
TASKS="$1"
|
||||||
|
|
||||||
echo "Deploy cluster admin task: $TASK"
|
echo "Deploy cluster admin task: $TASKS"
|
||||||
cat <<EOF | kubectl apply -f -
|
cat <<EOF | kubectl apply -f -
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
@ -120,6 +124,10 @@ spec:
|
|||||||
node-role.kubernetes.io/control-plane: ""
|
node-role.kubernetes.io/control-plane: ""
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
|
operator: Exists
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
operator: Exists
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
EOF
|
EOF
|
||||||
@ -138,47 +146,42 @@ waitSystemPodsRunning
|
|||||||
|
|
||||||
argo_used && disable_argo
|
argo_used && disable_argo
|
||||||
|
|
||||||
all_nodes_upgrade "nsenter -m/hostproc/1/ns/mnt mount --make-shared /sys/fs/cgroup; nsenter -m/hostproc/1/ns/mnt mount --make-shared /sys; nsenter -r/host /usr/bin/podman image prune -a -f;"
|
#all_nodes_upgrade ""
|
||||||
|
|
||||||
control_plane_upgrade kubeadm_upgrade
|
control_plane_upgrade kubeadm_upgrade
|
||||||
|
|
||||||
echo "Adjust kubezero values as needed: (eg. set cilium cluster id and ensure no IP space overlap !!):"
|
echo "Adjust kubezero values as needed:"
|
||||||
|
# shellcheck disable=SC2015
|
||||||
argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
|
argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
|
||||||
|
|
||||||
# Remove multus DS due to label changes, if this fails:
|
# Remove calico
|
||||||
# kubezero-network $ helm template . --set multus.enabled=true | kubectl apply -f -
|
#kubectl delete deployment calico-kube-controllers -n kube-system || true
|
||||||
kubectl delete ds kube-multus-ds -n kube-system || true
|
#kubectl delete daemonset calico-node -n kube-system || true
|
||||||
|
#kubectl delete network-attachment-definitions calico -n kube-system || true
|
||||||
|
|
||||||
# Required due to chart upgrade to 4.X part of prometheus-stack 40.X
|
# Remove previous cilium config as the helm options are additive only -> fail
|
||||||
kubectl delete daemonset metrics-prometheus-node-exporter -n monitoring || true
|
kubectl delete configmap cilium-config -n kube-system || true
|
||||||
|
|
||||||
# AWS EBS CSI driver change their fsGroupPolicy
|
|
||||||
kubectl delete CSIDriver ebs.csi.aws.com || true
|
|
||||||
|
|
||||||
# Delete external-dns deployment as upstream changed strategy to 'recreate'
|
|
||||||
kubectl delete deployment addons-external-dns -n kube-system || true
|
|
||||||
|
|
||||||
control_plane_upgrade "apply_network, apply_addons, apply_storage"
|
control_plane_upgrade "apply_network, apply_addons, apply_storage"
|
||||||
|
|
||||||
kubectl rollout restart daemonset/calico-node -n kube-system
|
|
||||||
kubectl rollout restart daemonset/cilium -n kube-system
|
|
||||||
kubectl rollout restart daemonset/kube-multus-ds -n kube-system
|
kubectl rollout restart daemonset/kube-multus-ds -n kube-system
|
||||||
|
kubectl rollout restart daemonset/cilium -n kube-system
|
||||||
|
|
||||||
echo "Checking that all pods in kube-system are running ..."
|
echo "Checking that all pods in kube-system are running ..."
|
||||||
waitSystemPodsRunning
|
waitSystemPodsRunning
|
||||||
|
|
||||||
echo "Applying remaining KubeZero modules..."
|
echo "Applying remaining KubeZero modules..."
|
||||||
|
|
||||||
# Delete outdated cert-manager CRDs, otherwise serverside apply will fail
|
# delete argocd deployments as various immutable things changed, also redis restart fails otherwise
|
||||||
for c in certificaterequests.cert-manager.io certificates.cert-manager.io challenges.acme.cert-manager.io clusterissuers.cert-manager.io issuers.cert-manager.io orders.acme.cert-manager.io; do
|
kubectl delete deployment argocd-redis -n argocd || true
|
||||||
kubectl delete crd $c
|
kubectl delete deployment argocd-repo-server -n argocd || true
|
||||||
done
|
kubectl delete statefulset argocd-application-controller -n argocd || true
|
||||||
|
|
||||||
|
# Delete prometheus-push gateway due to label changes
|
||||||
|
kubectl delete deploy -l app=prometheus-pushgateway -n monitoring || true
|
||||||
|
|
||||||
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_argocd"
|
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_argocd"
|
||||||
|
|
||||||
# delete legace ArgCD controller which is now a statefulSet
|
|
||||||
kubectl delete deployment argocd-application-controller -n argocd || true
|
|
||||||
|
|
||||||
# Final step is to commit the new argocd kubezero app
|
# Final step is to commit the new argocd kubezero app
|
||||||
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..) | .spec.source.helm.values |= (from_yaml | to_yaml)' > $ARGO_APP
|
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..) | .spec.source.helm.values |= (from_yaml | to_yaml)' > $ARGO_APP
|
||||||
|
|
||||||
@ -186,6 +189,6 @@ echo "Please commit $ARGO_APP as the updated kubezero/application.yaml for your
|
|||||||
echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades."
|
echo "Then head over to ArgoCD for this cluster and sync all KubeZero modules to apply remaining upgrades."
|
||||||
|
|
||||||
echo "<Return> to continue and re-enable ArgoCD:"
|
echo "<Return> to continue and re-enable ArgoCD:"
|
||||||
read
|
read -r
|
||||||
|
|
||||||
argo_used && enable_argo
|
argo_used && enable_argo
|
||||||
|
@ -1,52 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Migrate addons and network values from local kubeadm-values.yaml on controllers into CM
|
|
||||||
# - enable cilium
|
|
||||||
|
|
||||||
# Create emtpy CM if not exists yet
|
|
||||||
kubectl get cm -n kube-system kubezero-values || \
|
|
||||||
kubectl create configmap -n kube-system kubezero-values
|
|
||||||
|
|
||||||
kubectl get cm -n kube-system kubeadm-values || \
|
|
||||||
kubectl create configmap -n kube-system kubeadm-values
|
|
||||||
|
|
||||||
# tweak local kubeadm for upgrade later on
|
|
||||||
yq eval -i '.global.clusterName = strenv(CLUSTERNAME) |
|
|
||||||
.global.highAvailable = env(HIGHAVAILABLE)' \
|
|
||||||
${HOSTFS}/etc/kubernetes/kubeadm-values.yaml
|
|
||||||
|
|
||||||
# extract network
|
|
||||||
yq e '.network |
|
|
||||||
.cilium.enabled = true |
|
|
||||||
.calico.enabled = true |
|
|
||||||
.multus.enabled = true |
|
|
||||||
.multus.defaultNetworks = ["cilium"] |
|
|
||||||
.cilium.cluster.name = strenv(CLUSTERNAME) |
|
|
||||||
{"network": .}' ${HOSTFS}/etc/kubernetes/kubeadm-values.yaml > $WORKDIR/network-values.yaml
|
|
||||||
|
|
||||||
# get current argo cd values
|
|
||||||
kubectl get application kubezero -n argocd -o yaml | yq '.spec.source.helm.values' > ${WORKDIR}/argo-values.yaml
|
|
||||||
|
|
||||||
# merge all into new CM and set new minimal addons
|
|
||||||
yq ea '. as $item ireduce ({}; . * $item ) |
|
|
||||||
.global.clusterName = strenv(CLUSTERNAME) |
|
|
||||||
.global.highAvailable = env(HIGHAVAILABLE) |
|
|
||||||
.addons.clusterBackup.image.tag = "v1.23" ' ${WORKDIR}/network-values.yaml $WORKDIR/argo-values.yaml > $WORKDIR/kubezero-pre-values.yaml
|
|
||||||
|
|
||||||
# tumble new config through migrate.py
|
|
||||||
cat $WORKDIR/kubezero-pre-values.yaml | migrate_argo_values.py > $WORKDIR/kubezero-values.yaml
|
|
||||||
|
|
||||||
# Update kubezero-values CM
|
|
||||||
kubectl get cm -n kube-system kubezero-values -o=yaml | \
|
|
||||||
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/kubezero-values.yaml")' | \
|
|
||||||
kubectl replace -f -
|
|
||||||
|
|
||||||
|
|
||||||
# update argo app
|
|
||||||
kubectl get application kubezero -n argocd -o yaml | \
|
|
||||||
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
|
|
||||||
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
|
|
||||||
kubectl apply -f -
|
|
||||||
|
|
||||||
# finally remove annotation to allow argo to sync again
|
|
||||||
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'
|
|
21
admin/v1.24/pre-upgrade.sh
Normal file
21
admin/v1.24/pre-upgrade.sh
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# get current values, argo app over cm
|
||||||
|
get_kubezero_values
|
||||||
|
|
||||||
|
# tumble new config through migrate.py
|
||||||
|
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
|
||||||
|
|
||||||
|
# Update kubezero-values CM
|
||||||
|
kubectl get cm -n kube-system kubezero-values -o=yaml | \
|
||||||
|
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
|
||||||
|
kubectl replace -f -
|
||||||
|
|
||||||
|
# update argo app
|
||||||
|
kubectl get application kubezero -n argocd -o yaml | \
|
||||||
|
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
|
||||||
|
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
|
||||||
|
kubectl apply -f -
|
||||||
|
|
||||||
|
# finally remove annotation to allow argo to sync again
|
||||||
|
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubeadm
|
name: kubeadm
|
||||||
description: KubeZero Kubeadm cluster config
|
description: KubeZero Kubeadm cluster config
|
||||||
type: application
|
type: application
|
||||||
version: 1.23.11
|
version: 1.24.9
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -11,4 +11,4 @@ keywords:
|
|||||||
maintainers:
|
maintainers:
|
||||||
- name: Stefan Reimer
|
- name: Stefan Reimer
|
||||||
email: stefan@zero-downtime.net
|
email: stefan@zero-downtime.net
|
||||||
kubeVersion: ">= 1.20.0"
|
kubeVersion: ">= 1.24.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubeadm
|
# kubeadm
|
||||||
|
|
||||||
![Version: 1.23.10](https://img.shields.io/badge/Version-1.23.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
![Version: 1.24.9](https://img.shields.io/badge/Version-1.24.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero Kubeadm cluster config
|
KubeZero Kubeadm cluster config
|
||||||
|
|
||||||
@ -14,7 +14,7 @@ KubeZero Kubeadm cluster config
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.20.0`
|
Kubernetes: `>= 1.24.0`
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ Kubernetes: `>= 1.20.0`
|
|||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/
|
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/
|
||||||
- https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2
|
- https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||||
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||||
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
||||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/
|
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/
|
||||||
- https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2
|
- https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||||
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
- https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3
|
||||||
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
- https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/kubelet/config/v1beta1/types.go
|
||||||
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
- https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/
|
||||||
|
@ -2,8 +2,8 @@ apiVersion: kubeadm.k8s.io/v1beta3
|
|||||||
kind: ClusterConfiguration
|
kind: ClusterConfiguration
|
||||||
kubernetesVersion: {{ .Chart.Version }}
|
kubernetesVersion: {{ .Chart.Version }}
|
||||||
clusterName: {{ .Values.global.clusterName }}
|
clusterName: {{ .Values.global.clusterName }}
|
||||||
featureGates:
|
#featureGates:
|
||||||
UnversionedKubeletConfigMap: true
|
# NonGracefulFailover: true
|
||||||
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
||||||
networking:
|
networking:
|
||||||
podSubnet: 10.244.0.0/16
|
podSubnet: 10.244.0.0/16
|
||||||
|
@ -17,8 +17,8 @@ protectKernelDefaults: {{ .Values.protectKernelDefaults }}
|
|||||||
tlsCipherSuites: [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256]
|
tlsCipherSuites: [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256]
|
||||||
featureGates:
|
featureGates:
|
||||||
{{- include "kubeadm.featuregates" ( dict "return" "map" ) | nindent 2 }}
|
{{- include "kubeadm.featuregates" ( dict "return" "map" ) | nindent 2 }}
|
||||||
# Minimal unit is 50m per pod
|
# Minimal unit is 40m per pod
|
||||||
podsPerCore: 20
|
podsPerCore: 25
|
||||||
# cpuCFSQuotaPeriod: 10ms
|
# cpuCFSQuotaPeriod: 10ms
|
||||||
# Basic OS incl. crio
|
# Basic OS incl. crio
|
||||||
systemReserved:
|
systemReserved:
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{{- /* Feature gates for all control plane components */ -}}
|
{{- /* Feature gates for all control plane components */ -}}
|
||||||
{{- define "kubeadm.featuregates" }}
|
{{- define "kubeadm.featuregates" }}
|
||||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "KubeletCredentialProviders"}}
|
{{- $gates := list "CustomCPUCFSQuotaPeriod" "CronJobTimeZone" "NodeOutOfServiceVolumeDetach" }}
|
||||||
{{- if eq .return "csv" }}
|
{{- if eq .return "csv" }}
|
||||||
{{- range $key := $gates }}
|
{{- range $key := $gates }}
|
||||||
{{- $key }}=true,
|
{{- $key }}=true,
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
apiVersion: kubelet.config.k8s.io/v1alpha1
|
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||||
kind: CredentialProviderConfig
|
kind: CredentialProviderConfig
|
||||||
providers:
|
providers:
|
||||||
- name: amazon-ecr-credential-helper
|
- name: amazon-ecr-credential-helper
|
||||||
|
@ -112,10 +112,12 @@ spec:
|
|||||||
tolerations:
|
tolerations:
|
||||||
- effect: NoSchedule
|
- effect: NoSchedule
|
||||||
key: node-role.kubernetes.io/master
|
key: node-role.kubernetes.io/master
|
||||||
|
- effect: NoSchedule
|
||||||
|
key: node-role.kubernetes.io/control-plane
|
||||||
|
|
||||||
containers:
|
containers:
|
||||||
- name: aws-iam-authenticator
|
- name: aws-iam-authenticator
|
||||||
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.9
|
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.11
|
||||||
args:
|
args:
|
||||||
- server
|
- server
|
||||||
- --backend-mode=CRD,MountedFile
|
- --backend-mode=CRD,MountedFile
|
||||||
@ -131,7 +133,7 @@ spec:
|
|||||||
|
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
memory: 20Mi
|
memory: 32Mi
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
limits:
|
limits:
|
||||||
memory: 64Mi
|
memory: 64Mi
|
||||||
|
@ -2,8 +2,8 @@ apiVersion: v2
|
|||||||
name: kubezero-addons
|
name: kubezero-addons
|
||||||
description: KubeZero umbrella chart for various optional cluster addons
|
description: KubeZero umbrella chart for various optional cluster addons
|
||||||
type: application
|
type: application
|
||||||
version: 0.6.3
|
version: 0.7.3
|
||||||
appVersion: v1.23.11
|
appVersion: v1.24
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -11,16 +11,14 @@ keywords:
|
|||||||
- fuse-device-plugin
|
- fuse-device-plugin
|
||||||
- neuron-device-plugin
|
- neuron-device-plugin
|
||||||
- nvidia-device-plugin
|
- nvidia-device-plugin
|
||||||
- aws-node-termination-handler
|
- cluster-autoscaler
|
||||||
|
- sealed-secrets
|
||||||
- external-dns
|
- external-dns
|
||||||
|
- aws-node-termination-handler
|
||||||
maintainers:
|
maintainers:
|
||||||
- name: Stefan Reimer
|
- name: Stefan Reimer
|
||||||
email: stefan@zero-downtime.net
|
email: stefan@zero-downtime.net
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: aws-node-termination-handler
|
|
||||||
version: 0.18.5
|
|
||||||
# repository: https://aws.github.io/eks-charts
|
|
||||||
condition: aws-node-termination-handler.enabled
|
|
||||||
- name: external-dns
|
- name: external-dns
|
||||||
version: 1.11.0
|
version: 1.11.0
|
||||||
repository: https://kubernetes-sigs.github.io/external-dns/
|
repository: https://kubernetes-sigs.github.io/external-dns/
|
||||||
@ -30,8 +28,20 @@ dependencies:
|
|||||||
repository: https://kubernetes.github.io/autoscaler
|
repository: https://kubernetes.github.io/autoscaler
|
||||||
condition: cluster-autoscaler.enabled
|
condition: cluster-autoscaler.enabled
|
||||||
- name: nvidia-device-plugin
|
- name: nvidia-device-plugin
|
||||||
version: 0.12.2
|
version: 0.13.0
|
||||||
# https://github.com/NVIDIA/k8s-device-plugin
|
# https://github.com/NVIDIA/k8s-device-plugin
|
||||||
repository: https://nvidia.github.io/k8s-device-plugin
|
repository: https://nvidia.github.io/k8s-device-plugin
|
||||||
condition: nvidia-device-plugin.enabled
|
condition: nvidia-device-plugin.enabled
|
||||||
kubeVersion: ">= 1.20.0"
|
- name: sealed-secrets
|
||||||
|
version: 2.7.1
|
||||||
|
repository: https://bitnami-labs.github.io/sealed-secrets
|
||||||
|
condition: sealed-secrets.enabled
|
||||||
|
- name: aws-node-termination-handler
|
||||||
|
version: 0.20.1
|
||||||
|
# repository: https://aws.github.io/eks-charts
|
||||||
|
condition: aws-node-termination-handler.enabled
|
||||||
|
- name: aws-eks-asg-rolling-update-handler
|
||||||
|
version: 1.2.7
|
||||||
|
# repository: https://twin.github.io/helm-charts
|
||||||
|
condition: aws-eks-asg-rolling-update-handler.enabled
|
||||||
|
kubeVersion: ">= 1.24.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-addons
|
# kubezero-addons
|
||||||
|
|
||||||
![Version: 0.6.3](https://img.shields.io/badge/Version-0.6.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.23.11](https://img.shields.io/badge/AppVersion-v1.23.11-informational?style=flat-square)
|
![Version: 0.7.3](https://img.shields.io/badge/Version-0.7.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.24](https://img.shields.io/badge/AppVersion-v1.24-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero umbrella chart for various optional cluster addons
|
KubeZero umbrella chart for various optional cluster addons
|
||||||
|
|
||||||
@ -14,14 +14,16 @@ KubeZero umbrella chart for various optional cluster addons
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.20.0`
|
Kubernetes: `>= 1.24.0`
|
||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| | aws-node-termination-handler | 0.18.5 |
|
| | aws-eks-asg-rolling-update-handler | 1.2.7 |
|
||||||
|
| | aws-node-termination-handler | 0.20.1 |
|
||||||
|
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.7.1 |
|
||||||
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.11.0 |
|
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.11.0 |
|
||||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.21.0 |
|
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.21.0 |
|
||||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.12.2 |
|
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.13.0 |
|
||||||
|
|
||||||
# MetalLB
|
# MetalLB
|
||||||
|
|
||||||
@ -39,6 +41,34 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||||||
|
|
||||||
| Key | Type | Default | Description |
|
| Key | Type | Default | Description |
|
||||||
|-----|------|---------|-------------|
|
|-----|------|---------|-------------|
|
||||||
|
| aws-eks-asg-rolling-update-handler.enabled | bool | `false` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[0].name | string | `"CLUSTER_NAME"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[0].value | string | `""` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[1].name | string | `"AWS_REGION"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[1].value | string | `"us-west-2"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[2].name | string | `"EXECUTION_INTERVAL"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[2].value | string | `"60"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[3].name | string | `"METRICS"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[3].value | string | `"true"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[4].name | string | `"EAGER_CORDONING"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[4].value | string | `"true"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[5].name | string | `"SLOW_MODE"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[5].value | string | `"true"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[6].name | string | `"AWS_ROLE_ARN"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[6].value | string | `""` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[7].name | string | `"AWS_WEB_IDENTITY_TOKEN_FILE"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[7].value | string | `"/var/run/secrets/sts.amazonaws.com/serviceaccount/token"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[8].name | string | `"AWS_STS_REGIONAL_ENDPOINTS"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.environmentVars[8].value | string | `"regional"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.image.tag | string | `"v1.7.0"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.resources.limits.memory | string | `"128Mi"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.resources.requests.cpu | string | `"10m"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.resources.requests.memory | string | `"32Mi"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| aws-eks-asg-rolling-update-handler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| aws-node-termination-handler.deleteLocalData | bool | `true` | |
|
| aws-node-termination-handler.deleteLocalData | bool | `true` | |
|
||||||
| aws-node-termination-handler.emitKubernetesEvents | bool | `true` | |
|
| aws-node-termination-handler.emitKubernetesEvents | bool | `true` | |
|
||||||
| aws-node-termination-handler.enableProbesServer | bool | `true` | |
|
| aws-node-termination-handler.enableProbesServer | bool | `true` | |
|
||||||
@ -54,7 +84,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||||||
| aws-node-termination-handler.fullnameOverride | string | `"aws-node-termination-handler"` | |
|
| aws-node-termination-handler.fullnameOverride | string | `"aws-node-termination-handler"` | |
|
||||||
| aws-node-termination-handler.ignoreDaemonSets | bool | `true` | |
|
| aws-node-termination-handler.ignoreDaemonSets | bool | `true` | |
|
||||||
| aws-node-termination-handler.jsonLogging | bool | `true` | |
|
| aws-node-termination-handler.jsonLogging | bool | `true` | |
|
||||||
| aws-node-termination-handler.managedAsgTag | string | `"aws-node-termination-handler/managed"` | "aws-node-termination-handler/${ClusterName}" |
|
| aws-node-termination-handler.logFormatVersion | int | `2` | |
|
||||||
|
| aws-node-termination-handler.managedTag | string | `"aws-node-termination-handler/managed"` | "aws-node-termination-handler/${ClusterName}" |
|
||||||
| aws-node-termination-handler.metadataTries | int | `0` | |
|
| aws-node-termination-handler.metadataTries | int | `0` | |
|
||||||
| aws-node-termination-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
| aws-node-termination-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
| aws-node-termination-handler.podMonitor.create | bool | `false` | |
|
| aws-node-termination-handler.podMonitor.create | bool | `false` | |
|
||||||
@ -63,6 +94,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||||||
| aws-node-termination-handler.taintNode | bool | `true` | |
|
| aws-node-termination-handler.taintNode | bool | `true` | |
|
||||||
| aws-node-termination-handler.tolerations[0].effect | string | `"NoSchedule"` | |
|
| aws-node-termination-handler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| aws-node-termination-handler.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| aws-node-termination-handler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| aws-node-termination-handler.useProviderId | bool | `true` | |
|
| aws-node-termination-handler.useProviderId | bool | `true` | |
|
||||||
| awsNeuron.enabled | bool | `false` | |
|
| awsNeuron.enabled | bool | `false` | |
|
||||||
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
|
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
|
||||||
@ -80,30 +113,22 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||||||
| cluster-autoscaler.serviceMonitor.interval | string | `"30s"` | |
|
| cluster-autoscaler.serviceMonitor.interval | string | `"30s"` | |
|
||||||
| cluster-autoscaler.tolerations[0].effect | string | `"NoSchedule"` | |
|
| cluster-autoscaler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| cluster-autoscaler.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| cluster-autoscaler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| clusterBackup.enabled | bool | `false` | |
|
| clusterBackup.enabled | bool | `false` | |
|
||||||
| clusterBackup.extraEnv | list | `[]` | |
|
| clusterBackup.extraEnv | list | `[]` | |
|
||||||
| clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | |
|
| clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | |
|
||||||
| clusterBackup.password | string | `""` | /etc/cloudbender/clusterBackup.passphrase |
|
| clusterBackup.password | string | `""` | /etc/cloudbender/clusterBackup.passphrase |
|
||||||
| clusterBackup.repository | string | `""` | s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup |
|
| clusterBackup.repository | string | `""` | s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup |
|
||||||
| external-dns.enabled | bool | `false` | |
|
| external-dns.enabled | bool | `false` | |
|
||||||
| external-dns.env[0] | object | `{"name":"AWS_ROLE_ARN","value":""}` | "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.externalDNS" |
|
|
||||||
| external-dns.env[1].name | string | `"AWS_WEB_IDENTITY_TOKEN_FILE"` | |
|
|
||||||
| external-dns.env[1].value | string | `"/var/run/secrets/sts.amazonaws.com/serviceaccount/token"` | |
|
|
||||||
| external-dns.env[2].name | string | `"AWS_STS_REGIONAL_ENDPOINTS"` | |
|
|
||||||
| external-dns.env[2].value | string | `"regional"` | |
|
|
||||||
| external-dns.extraVolumeMounts[0].mountPath | string | `"/var/run/secrets/sts.amazonaws.com/serviceaccount/"` | |
|
|
||||||
| external-dns.extraVolumeMounts[0].name | string | `"aws-token"` | |
|
|
||||||
| external-dns.extraVolumeMounts[0].readOnly | bool | `true` | |
|
|
||||||
| external-dns.extraVolumes[0].name | string | `"aws-token"` | |
|
|
||||||
| external-dns.extraVolumes[0].projected.sources[0].serviceAccountToken.audience | string | `"sts.amazonaws.com"` | |
|
|
||||||
| external-dns.extraVolumes[0].projected.sources[0].serviceAccountToken.expirationSeconds | int | `86400` | |
|
|
||||||
| external-dns.extraVolumes[0].projected.sources[0].serviceAccountToken.path | string | `"token"` | |
|
|
||||||
| external-dns.interval | string | `"3m"` | |
|
| external-dns.interval | string | `"3m"` | |
|
||||||
| external-dns.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
| external-dns.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
| external-dns.provider | string | `"inmemory"` | |
|
| external-dns.provider | string | `"inmemory"` | |
|
||||||
| external-dns.sources[0] | string | `"service"` | |
|
| external-dns.sources[0] | string | `"service"` | |
|
||||||
| external-dns.tolerations[0].effect | string | `"NoSchedule"` | |
|
| external-dns.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| external-dns.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| external-dns.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| external-dns.triggerLoopOnEvent | bool | `true` | |
|
| external-dns.triggerLoopOnEvent | bool | `true` | |
|
||||||
| forseti.aws.iamRoleArn | string | `""` | "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti" |
|
| forseti.aws.iamRoleArn | string | `""` | "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti" |
|
||||||
| forseti.aws.region | string | `""` | |
|
| forseti.aws.region | string | `""` | |
|
||||||
@ -128,3 +153,15 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||||||
| nvidia-device-plugin.tolerations[1].effect | string | `"NoSchedule"` | |
|
| nvidia-device-plugin.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
| nvidia-device-plugin.tolerations[1].key | string | `"kubezero-workergroup"` | |
|
| nvidia-device-plugin.tolerations[1].key | string | `"kubezero-workergroup"` | |
|
||||||
| nvidia-device-plugin.tolerations[1].operator | string | `"Exists"` | |
|
| nvidia-device-plugin.tolerations[1].operator | string | `"Exists"` | |
|
||||||
|
| sealed-secrets.enabled | bool | `false` | |
|
||||||
|
| sealed-secrets.fullnameOverride | string | `"sealed-secrets-controller"` | |
|
||||||
|
| sealed-secrets.keyrenewperiod | int | `0` | |
|
||||||
|
| sealed-secrets.metrics.serviceMonitor.enabled | bool | `false` | |
|
||||||
|
| sealed-secrets.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
|
| sealed-secrets.resources.limits.memory | string | `"128Mi"` | |
|
||||||
|
| sealed-secrets.resources.requests.cpu | string | `"10m"` | |
|
||||||
|
| sealed-secrets.resources.requests.memory | string | `"24Mi"` | |
|
||||||
|
| sealed-secrets.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
|
| sealed-secrets.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| sealed-secrets.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| sealed-secrets.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
|
@ -0,0 +1,8 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
description: Handles rolling upgrades for AWS ASGs for EKS by replacing outdated nodes
|
||||||
|
by new nodes.
|
||||||
|
home: https://github.com/TwiN/aws-eks-asg-rolling-update-handler
|
||||||
|
maintainers:
|
||||||
|
- name: TwiN
|
||||||
|
name: aws-eks-asg-rolling-update-handler
|
||||||
|
version: 1.2.7
|
@ -0,0 +1,31 @@
|
|||||||
|
{{/*
|
||||||
|
Create a default app name.
|
||||||
|
*/}}
|
||||||
|
{{- define "aws-eks-asg-rolling-update-handler.name" -}}
|
||||||
|
{{- .Chart.Name -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default namespace.
|
||||||
|
*/}}
|
||||||
|
{{- define "aws-eks-asg-rolling-update-handler.namespace" -}}
|
||||||
|
{{- .Release.Namespace -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "aws-eks-asg-rolling-update-handler.labels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "aws-eks-asg-rolling-update-handler.name" . }}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use.
|
||||||
|
*/}}
|
||||||
|
{{- define "aws-eks-asg-rolling-update-handler.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccount.create -}}
|
||||||
|
{{ default (include "aws-eks-asg-rolling-update-handler.name" .) .Values.serviceAccount.name }}
|
||||||
|
{{- else -}}
|
||||||
|
{{ default "default" .Values.serviceAccount.name }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
|
||||||
|
labels:
|
||||||
|
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }}
|
||||||
|
namespace: {{ template "aws-eks-asg-rolling-update-handler.namespace" . }}
|
@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
|
||||||
|
labels:
|
||||||
|
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- "*"
|
||||||
|
resources:
|
||||||
|
- "*"
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- "*"
|
||||||
|
resources:
|
||||||
|
- nodes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- apiGroups:
|
||||||
|
- "*"
|
||||||
|
resources:
|
||||||
|
- pods/eviction
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- create
|
||||||
|
- apiGroups:
|
||||||
|
- "*"
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
@ -0,0 +1,56 @@
|
|||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
|
||||||
|
namespace: {{ template "aws-eks-asg-rolling-update-handler.namespace" . }}
|
||||||
|
labels:
|
||||||
|
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.replicaCount }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 6 }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 8 }}
|
||||||
|
spec:
|
||||||
|
automountServiceAccountToken: true
|
||||||
|
serviceAccountName: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }}
|
||||||
|
restartPolicy: Always
|
||||||
|
dnsPolicy: Default
|
||||||
|
containers:
|
||||||
|
- name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
|
||||||
|
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
env:
|
||||||
|
{{- toYaml .Values.environmentVars | nindent 12 }}
|
||||||
|
{{- with .Values.resources }}
|
||||||
|
resources:
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
|
volumeMounts:
|
||||||
|
- name: aws-token
|
||||||
|
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: aws-token
|
||||||
|
projected:
|
||||||
|
sources:
|
||||||
|
- serviceAccountToken:
|
||||||
|
path: token
|
||||||
|
expirationSeconds: 86400
|
||||||
|
audience: "sts.amazonaws.com"
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
@ -0,0 +1,13 @@
|
|||||||
|
{{ if .Values.serviceAccount.create }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }}
|
||||||
|
namespace: {{ template "aws-eks-asg-rolling-update-handler.namespace" . }}
|
||||||
|
labels:
|
||||||
|
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
|
||||||
|
{{- with .Values.serviceAccount.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{ end }}
|
@ -0,0 +1,28 @@
|
|||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
image:
|
||||||
|
repository: twinproduction/aws-eks-asg-rolling-update-handler
|
||||||
|
tag: v1.4.3
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
#imagePullSecrets:
|
||||||
|
#- imagePullSecret
|
||||||
|
|
||||||
|
environmentVars:
|
||||||
|
- name: CLUSTER_NAME
|
||||||
|
value: "cluster-name" # REPLACE THIS WITH THE NAME OF YOUR EKS CLUSTER
|
||||||
|
#- name: AUTO_SCALING_GROUP_NAMES
|
||||||
|
# value: "asg-1,asg-2,asg-3" # REPLACE THESE VALUES FOR THE NAMES OF THE ASGs, if CLUSTER_NAME is provided, this is ignored
|
||||||
|
#- name: IGNORE_DAEMON_SETS
|
||||||
|
# value: "true"
|
||||||
|
#- name: DELETE_LOCAL_DATA
|
||||||
|
# value: "true"
|
||||||
|
#- name: AWS_REGION
|
||||||
|
# value: us-west-2
|
||||||
|
#- name: ENVIRONMENT
|
||||||
|
# value: ""
|
||||||
|
|
||||||
|
serviceAccount:
|
||||||
|
create: true
|
||||||
|
#name: aws-eks-asg-rolling-update-handler
|
||||||
|
annotations: {}
|
@ -1,5 +1,5 @@
|
|||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
appVersion: 1.16.5
|
appVersion: 1.18.1
|
||||||
description: A Helm chart for the AWS Node Termination Handler.
|
description: A Helm chart for the AWS Node Termination Handler.
|
||||||
home: https://github.com/aws/eks-charts
|
home: https://github.com/aws/eks-charts
|
||||||
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
||||||
@ -22,4 +22,4 @@ sources:
|
|||||||
- https://github.com/aws/aws-node-termination-handler/
|
- https://github.com/aws/aws-node-termination-handler/
|
||||||
- https://github.com/aws/eks-charts/
|
- https://github.com/aws/eks-charts/
|
||||||
type: application
|
type: application
|
||||||
version: 0.18.5
|
version: 0.20.1
|
||||||
|
@ -70,6 +70,7 @@ The configuration in this table applies to all AWS Node Termination Handler mode
|
|||||||
| `extraEnv` | Additional environment variables for the _aws-node-termination-handler_ container. | `[]` |
|
| `extraEnv` | Additional environment variables for the _aws-node-termination-handler_ container. | `[]` |
|
||||||
| `probes` | The Kubernetes liveness probe configuration. | _See values.yaml_ |
|
| `probes` | The Kubernetes liveness probe configuration. | _See values.yaml_ |
|
||||||
| `logLevel` | Sets the log level (`info`,`debug`, or `error`) | `info` |
|
| `logLevel` | Sets the log level (`info`,`debug`, or `error`) | `info` |
|
||||||
|
| `logFormatVersion` | Sets the log format version. Available versions: 1, 2. Version 1 refers to the format that has been used through v1.17.3. Version 2 offers more detail for the "event kind" and "reason", especially when operating in Queue Processor mode. | `1` |
|
||||||
| `jsonLogging` | If `true`, use JSON-formatted logs instead of human readable logs. | `false` |
|
| `jsonLogging` | If `true`, use JSON-formatted logs instead of human readable logs. | `false` |
|
||||||
| `enablePrometheusServer` | If `true`, start an http server exposing `/metrics` endpoint for _Prometheus_. | `false` |
|
| `enablePrometheusServer` | If `true`, start an http server exposing `/metrics` endpoint for _Prometheus_. | `false` |
|
||||||
| `prometheusServerPort` | Replaces the default HTTP port for exposing _Prometheus_ metrics. | `9092` |
|
| `prometheusServerPort` | Replaces the default HTTP port for exposing _Prometheus_ metrics. | `9092` |
|
||||||
@ -82,6 +83,7 @@ The configuration in this table applies to all AWS Node Termination Handler mode
|
|||||||
| `podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used, which defaults to 30 seconds if not specified for the pod. | `-1` |
|
| `podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used, which defaults to 30 seconds if not specified for the pod. | `-1` |
|
||||||
| `nodeTerminationGracePeriod` | Period of time in seconds given to each node to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120` |
|
| `nodeTerminationGracePeriod` | Period of time in seconds given to each node to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120` |
|
||||||
| `emitKubernetesEvents` | If `true`, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event. More information [here](https://github.com/aws/aws-node-termination-handler/blob/main/docs/kubernetes_events.md). | `false` |
|
| `emitKubernetesEvents` | If `true`, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event. More information [here](https://github.com/aws/aws-node-termination-handler/blob/main/docs/kubernetes_events.md). | `false` |
|
||||||
|
| `completeLifecycleActionDelaySeconds` | Pause after draining the node before completing the EC2 Autoscaling lifecycle action. This may be helpful if Pods on the node have Persistent Volume Claims. | -1 |
|
||||||
| `kubernetesEventsExtraAnnotations` | A comma-separated list of `key=value` extra annotations to attach to all emitted Kubernetes events (e.g. `first=annotation,sample.annotation/number=two"`). | `""` |
|
| `kubernetesEventsExtraAnnotations` | A comma-separated list of `key=value` extra annotations to attach to all emitted Kubernetes events (e.g. `first=annotation,sample.annotation/number=two"`). | `""` |
|
||||||
| `webhookURL` | Posts event data to URL upon instance interruption action. | `""` |
|
| `webhookURL` | Posts event data to URL upon instance interruption action. | `""` |
|
||||||
| `webhookURLSecretName` | Pass the webhook URL as a Secret using the key `webhookurl`. | `""` |
|
| `webhookURLSecretName` | Pass the webhook URL as a Secret using the key `webhookurl`. | `""` |
|
||||||
@ -110,9 +112,11 @@ The configuration in this table applies to AWS Node Termination Handler in queue
|
|||||||
| `awsRegion` | If specified, use the AWS region for AWS API calls, else NTH will try to find the region through the `AWS_REGION` environment variable, IMDS, or the specified queue URL. | `""` |
|
| `awsRegion` | If specified, use the AWS region for AWS API calls, else NTH will try to find the region through the `AWS_REGION` environment variable, IMDS, or the specified queue URL. | `""` |
|
||||||
| `queueURL` | Listens for messages on the specified SQS queue URL. | `""` |
|
| `queueURL` | Listens for messages on the specified SQS queue URL. | `""` |
|
||||||
| `workers` | The maximum amount of parallel event processors to handle concurrent events. | `10` |
|
| `workers` | The maximum amount of parallel event processors to handle concurrent events. | `10` |
|
||||||
| `checkASGTagBeforeDraining` | If `true`, check that the instance is tagged with the `managedAsgTag` before draining the node. If `false`, disables calls ASG API. | `true` |
|
| `checkTagBeforeDraining` | If `true`, check that the instance is tagged with the `managedTag` before draining the node. | `true` |
|
||||||
| `managedAsgTag` | The node tag to check if `checkASGTagBeforeDraining` is `true`. | `aws-node-termination-handler/managed` |
|
| `managedTag` | The node tag to check if `checkTagBeforeDraining` is `true`. | `aws-node-termination-handler/managed` |
|
||||||
| `useProviderId` | If `true`, fetch node name through Kubernetes node spec ProviderID instead of AWS event PrivateDnsHostname. | `false` |
|
| `checkASGTagBeforeDraining` | [DEPRECATED](Use `checkTagBeforeDraining` instead) If `true`, check that the instance is tagged with the `managedAsgTag` before draining the node. If `false`, disables calls ASG API. | `true` |
|
||||||
|
| `managedAsgTag` | [DEPRECATED](Use `managedTag` instead) The node tag to check if `checkASGTagBeforeDraining` is `true`.
|
||||||
|
| `useProviderId` | If `true`, fetch node name through Kubernetes node spec ProviderID instead of AWS event PrivateDnsHostname. | `false` |
|
||||||
|
|
||||||
### IMDS Mode Configuration
|
### IMDS Mode Configuration
|
||||||
|
|
||||||
|
@ -81,6 +81,8 @@ spec:
|
|||||||
value: {{ .Values.logLevel | quote }}
|
value: {{ .Values.logLevel | quote }}
|
||||||
- name: JSON_LOGGING
|
- name: JSON_LOGGING
|
||||||
value: {{ .Values.jsonLogging | quote }}
|
value: {{ .Values.jsonLogging | quote }}
|
||||||
|
- name: LOG_FORMAT_VERSION
|
||||||
|
value: {{ .Values.logFormatVersion | quote }}
|
||||||
- name: ENABLE_PROMETHEUS_SERVER
|
- name: ENABLE_PROMETHEUS_SERVER
|
||||||
value: {{ .Values.enablePrometheusServer | quote }}
|
value: {{ .Values.enablePrometheusServer | quote }}
|
||||||
- name: PROMETHEUS_SERVER_PORT
|
- name: PROMETHEUS_SERVER_PORT
|
||||||
@ -151,6 +153,9 @@ spec:
|
|||||||
value: "false"
|
value: "false"
|
||||||
- name: UPTIME_FROM_FILE
|
- name: UPTIME_FROM_FILE
|
||||||
value: {{ .Values.procUptimeFile | quote }}
|
value: {{ .Values.procUptimeFile | quote }}
|
||||||
|
{{- with .Values.extraEnv }}
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
{{- if or .Values.enablePrometheusServer .Values.enableProbesServer }}
|
{{- if or .Values.enablePrometheusServer .Values.enableProbesServer }}
|
||||||
ports:
|
ports:
|
||||||
{{- if .Values.enableProbesServer }}
|
{{- if .Values.enableProbesServer }}
|
||||||
|
@ -52,7 +52,7 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
- name: aws-node-termination-handler
|
- name: aws-node-termination-handler
|
||||||
{{- with .Values.securityContext }}
|
{{- with unset .Values.securityContext "runAsUser" }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml . | nindent 12 }}
|
{{- toYaml . | nindent 12 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@ -81,6 +81,8 @@ spec:
|
|||||||
value: {{ .Values.logLevel | quote }}
|
value: {{ .Values.logLevel | quote }}
|
||||||
- name: JSON_LOGGING
|
- name: JSON_LOGGING
|
||||||
value: {{ .Values.jsonLogging | quote }}
|
value: {{ .Values.jsonLogging | quote }}
|
||||||
|
- name: LOG_FORMAT_VERSION
|
||||||
|
value: {{ .Values.logFormatVersion | quote }}
|
||||||
- name: ENABLE_PROMETHEUS_SERVER
|
- name: ENABLE_PROMETHEUS_SERVER
|
||||||
value: {{ .Values.enablePrometheusServer | quote }}
|
value: {{ .Values.enablePrometheusServer | quote }}
|
||||||
- name: PROMETHEUS_SERVER_PORT
|
- name: PROMETHEUS_SERVER_PORT
|
||||||
@ -149,6 +151,9 @@ spec:
|
|||||||
value: {{ .Values.enableRebalanceDraining | quote }}
|
value: {{ .Values.enableRebalanceDraining | quote }}
|
||||||
- name: ENABLE_SQS_TERMINATION_DRAINING
|
- name: ENABLE_SQS_TERMINATION_DRAINING
|
||||||
value: "false"
|
value: "false"
|
||||||
|
{{- with .Values.extraEnv }}
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
{{- if or .Values.enablePrometheusServer .Values.enableProbesServer }}
|
{{- if or .Values.enablePrometheusServer .Values.enableProbesServer }}
|
||||||
ports:
|
ports:
|
||||||
{{- if .Values.enableProbesServer }}
|
{{- if .Values.enableProbesServer }}
|
||||||
|
@ -78,14 +78,22 @@ spec:
|
|||||||
value: {{ .Values.logLevel | quote }}
|
value: {{ .Values.logLevel | quote }}
|
||||||
- name: JSON_LOGGING
|
- name: JSON_LOGGING
|
||||||
value: {{ .Values.jsonLogging | quote }}
|
value: {{ .Values.jsonLogging | quote }}
|
||||||
|
- name: LOG_FORMAT_VERSION
|
||||||
|
value: {{ .Values.logFormatVersion | quote }}
|
||||||
- name: ENABLE_PROMETHEUS_SERVER
|
- name: ENABLE_PROMETHEUS_SERVER
|
||||||
value: {{ .Values.enablePrometheusServer | quote }}
|
value: {{ .Values.enablePrometheusServer | quote }}
|
||||||
- name: PROMETHEUS_SERVER_PORT
|
- name: PROMETHEUS_SERVER_PORT
|
||||||
value: {{ .Values.prometheusServerPort | quote }}
|
value: {{ .Values.prometheusServerPort | quote }}
|
||||||
|
# [DEPRECATED] Use CHECK_TAG_BEFORE_DRAINING instead
|
||||||
- name: CHECK_ASG_TAG_BEFORE_DRAINING
|
- name: CHECK_ASG_TAG_BEFORE_DRAINING
|
||||||
value: {{ .Values.checkASGTagBeforeDraining | quote }}
|
value: {{ .Values.checkASGTagBeforeDraining | quote }}
|
||||||
|
- name: CHECK_TAG_BEFORE_DRAINING
|
||||||
|
value: {{ .Values.checkTagBeforeDraining | quote }}
|
||||||
|
# [DEPRECATED] Use MANAGED_TAG instead
|
||||||
- name: MANAGED_ASG_TAG
|
- name: MANAGED_ASG_TAG
|
||||||
value: {{ .Values.managedAsgTag | quote }}
|
value: {{ .Values.managedAsgTag | quote }}
|
||||||
|
- name: MANAGED_TAG
|
||||||
|
value: {{ .Values.managedTag | quote }}
|
||||||
- name: USE_PROVIDER_ID
|
- name: USE_PROVIDER_ID
|
||||||
value: {{ .Values.useProviderId | quote }}
|
value: {{ .Values.useProviderId | quote }}
|
||||||
- name: DRY_RUN
|
- name: DRY_RUN
|
||||||
@ -106,6 +114,8 @@ spec:
|
|||||||
value: {{ .Values.nodeTerminationGracePeriod | quote }}
|
value: {{ .Values.nodeTerminationGracePeriod | quote }}
|
||||||
- name: EMIT_KUBERNETES_EVENTS
|
- name: EMIT_KUBERNETES_EVENTS
|
||||||
value: {{ .Values.emitKubernetesEvents | quote }}
|
value: {{ .Values.emitKubernetesEvents | quote }}
|
||||||
|
- name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS
|
||||||
|
value: {{ .Values.completeLifecycleActionDelaySeconds | quote }}
|
||||||
{{- with .Values.kubernetesEventsExtraAnnotations }}
|
{{- with .Values.kubernetesEventsExtraAnnotations }}
|
||||||
- name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS
|
- name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS
|
||||||
value: {{ . | quote }}
|
value: {{ . | quote }}
|
||||||
|
@ -66,6 +66,9 @@ probes:
|
|||||||
# Set the log level
|
# Set the log level
|
||||||
logLevel: info
|
logLevel: info
|
||||||
|
|
||||||
|
# Set the log format version
|
||||||
|
logFormatVersion: 1
|
||||||
|
|
||||||
# Log messages in JSON format
|
# Log messages in JSON format
|
||||||
jsonLogging: false
|
jsonLogging: false
|
||||||
|
|
||||||
@ -100,6 +103,9 @@ nodeTerminationGracePeriod: 120
|
|||||||
# emitKubernetesEvents If true, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event
|
# emitKubernetesEvents If true, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event
|
||||||
emitKubernetesEvents: false
|
emitKubernetesEvents: false
|
||||||
|
|
||||||
|
# completeLifecycleActionDelaySeconds will pause for the configured duration after draining the node before completing the EC2 Autoscaling lifecycle action. This may be helpful if Pods on the node have Persistent Volume Claims.
|
||||||
|
completeLifecycleActionDelaySeconds: -1
|
||||||
|
|
||||||
# kubernetesEventsExtraAnnotations A comma-separated list of key=value extra annotations to attach to all emitted Kubernetes events
|
# kubernetesEventsExtraAnnotations A comma-separated list of key=value extra annotations to attach to all emitted Kubernetes events
|
||||||
# Example: "first=annotation,sample.annotation/number=two"
|
# Example: "first=annotation,sample.annotation/number=two"
|
||||||
kubernetesEventsExtraAnnotations: ""
|
kubernetesEventsExtraAnnotations: ""
|
||||||
@ -170,13 +176,18 @@ queueURL: ""
|
|||||||
# The maximum amount of parallel event processors to handle concurrent events
|
# The maximum amount of parallel event processors to handle concurrent events
|
||||||
workers: 10
|
workers: 10
|
||||||
|
|
||||||
# If true, check that the instance is tagged with "aws-node-termination-handler/managed" as the key before draining the node
|
# [DEPRECATED] Use checkTagBeforeDraining instead
|
||||||
# If false, disables calls to ASG API.
|
|
||||||
checkASGTagBeforeDraining: true
|
checkASGTagBeforeDraining: true
|
||||||
|
|
||||||
# The tag to ensure is on a node if checkASGTagBeforeDraining is true
|
# If true, check that the instance is tagged with "aws-node-termination-handler/managed" as the key before draining the node
|
||||||
|
checkTagBeforeDraining: true
|
||||||
|
|
||||||
|
# [DEPRECATED] Use managedTag instead
|
||||||
managedAsgTag: "aws-node-termination-handler/managed"
|
managedAsgTag: "aws-node-termination-handler/managed"
|
||||||
|
|
||||||
|
# The tag to ensure is on a node if checkTagBeforeDraining is true
|
||||||
|
managedTag: "aws-node-termination-handler/managed"
|
||||||
|
|
||||||
# If true, fetch node name through Kubernetes node spec ProviderID instead of AWS event PrivateDnsHostname.
|
# If true, fetch node name through Kubernetes node spec ProviderID instead of AWS event PrivateDnsHostname.
|
||||||
useProviderId: false
|
useProviderId: false
|
||||||
|
|
||||||
|
36
charts/kubezero-addons/ruh.patch
Normal file
36
charts/kubezero-addons/ruh.patch
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
diff -tuNr charts/aws-eks-asg-rolling-update-handler.orig/templates/deployment.yaml charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml
|
||||||
|
--- charts/aws-eks-asg-rolling-update-handler.orig/templates/deployment.yaml 2022-12-16 13:10:26.049272371 +0000
|
||||||
|
+++ charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml 2022-12-16 15:56:00.880666339 +0000
|
||||||
|
@@ -25,7 +25,31 @@
|
||||||
|
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
env:
|
||||||
|
-{{- toYaml .Values.environmentVars | nindent 12 }}
|
||||||
|
+ {{- toYaml .Values.environmentVars | nindent 12 }}
|
||||||
|
+ {{- with .Values.resources }}
|
||||||
|
+ resources:
|
||||||
|
+ {{- toYaml . | nindent 12 }}
|
||||||
|
+ {{- end }}
|
||||||
|
+ volumeMounts:
|
||||||
|
+ - name: aws-token
|
||||||
|
+ mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
|
||||||
|
+ readOnly: true
|
||||||
|
+ volumes:
|
||||||
|
+ - name: aws-token
|
||||||
|
+ projected:
|
||||||
|
+ sources:
|
||||||
|
+ - serviceAccountToken:
|
||||||
|
+ path: token
|
||||||
|
+ expirationSeconds: 86400
|
||||||
|
+ audience: "sts.amazonaws.com"
|
||||||
|
+ {{- with .Values.nodeSelector }}
|
||||||
|
+ nodeSelector:
|
||||||
|
+ {{- toYaml . | nindent 8 }}
|
||||||
|
+ {{- end }}
|
||||||
|
+ {{- with .Values.tolerations }}
|
||||||
|
+ tolerations:
|
||||||
|
+ {{- toYaml . | nindent 8 }}
|
||||||
|
+ {{- end }}
|
||||||
|
{{- with .Values.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
@ -6,6 +6,7 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
schedule: "0 * * * *"
|
schedule: "0 * * * *"
|
||||||
|
concurrencyPolicy: "Replace"
|
||||||
jobTemplate:
|
jobTemplate:
|
||||||
spec:
|
spec:
|
||||||
backoffLimit: 1
|
backoffLimit: 1
|
||||||
@ -56,5 +57,7 @@ spec:
|
|||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -71,6 +71,8 @@ spec:
|
|||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
volumes:
|
volumes:
|
||||||
- name: aws-token
|
- name: aws-token
|
||||||
projected:
|
projected:
|
||||||
|
@ -4,6 +4,7 @@ set -ex
|
|||||||
helm repo update
|
helm repo update
|
||||||
|
|
||||||
NTH_VERSION=$(yq eval '.dependencies[] | select(.name=="aws-node-termination-handler") | .version' Chart.yaml)
|
NTH_VERSION=$(yq eval '.dependencies[] | select(.name=="aws-node-termination-handler") | .version' Chart.yaml)
|
||||||
|
RUH_VERSION=$(yq eval '.dependencies[] | select(.name=="aws-eks-asg-rolling-update-handler") | .version' Chart.yaml)
|
||||||
|
|
||||||
rm -rf charts/aws-node-termination-handler
|
rm -rf charts/aws-node-termination-handler
|
||||||
helm pull eks/aws-node-termination-handler --untar --untardir charts --version $NTH_VERSION
|
helm pull eks/aws-node-termination-handler --untar --untardir charts --version $NTH_VERSION
|
||||||
@ -11,4 +12,8 @@ helm pull eks/aws-node-termination-handler --untar --untardir charts --version $
|
|||||||
# diff -tuNr charts/aws-node-termination-handler.orig charts/aws-node-termination-handler > nth.patch
|
# diff -tuNr charts/aws-node-termination-handler.orig charts/aws-node-termination-handler > nth.patch
|
||||||
patch -p0 -i nth.patch --no-backup-if-mismatch
|
patch -p0 -i nth.patch --no-backup-if-mismatch
|
||||||
|
|
||||||
|
rm -rf charts/aws-eks-asg-rolling-update-handler
|
||||||
|
helm pull twin/aws-eks-asg-rolling-update-handler --untar --untardir charts --version $RUH_VERSION
|
||||||
|
patch -p0 -i ruh.patch --no-backup-if-mismatch
|
||||||
|
|
||||||
helm dep update
|
helm dep update
|
||||||
|
@ -24,16 +24,82 @@ forseti:
|
|||||||
# -- "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti"
|
# -- "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti"
|
||||||
iamRoleArn: ""
|
iamRoleArn: ""
|
||||||
|
|
||||||
|
sealed-secrets:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
# ensure kubeseal default values match
|
||||||
|
fullnameOverride: sealed-secrets-controller
|
||||||
|
|
||||||
|
# Disable auto keyrotation for now
|
||||||
|
keyrenewperiod: 0
|
||||||
|
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 24Mi
|
||||||
|
limits:
|
||||||
|
memory: 128Mi
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
|
|
||||||
|
aws-eks-asg-rolling-update-handler:
|
||||||
|
enabled: false
|
||||||
|
image:
|
||||||
|
tag: v1.7.0
|
||||||
|
|
||||||
|
environmentVars:
|
||||||
|
- name: CLUSTER_NAME
|
||||||
|
value: ""
|
||||||
|
- name: AWS_REGION
|
||||||
|
value: us-west-2
|
||||||
|
- name: EXECUTION_INTERVAL
|
||||||
|
value: "60"
|
||||||
|
- name: METRICS
|
||||||
|
value: "true"
|
||||||
|
- name: EAGER_CORDONING
|
||||||
|
value: "true"
|
||||||
|
# Only disable if all services have PDBs across AZs
|
||||||
|
- name: SLOW_MODE
|
||||||
|
value: "true"
|
||||||
|
- name: AWS_ROLE_ARN
|
||||||
|
value: ""
|
||||||
|
- name: AWS_WEB_IDENTITY_TOKEN_FILE
|
||||||
|
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
||||||
|
- name: AWS_STS_REGIONAL_ENDPOINTS
|
||||||
|
value: "regional"
|
||||||
|
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 32Mi
|
||||||
|
limits:
|
||||||
|
memory: 128Mi
|
||||||
|
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
|
|
||||||
aws-node-termination-handler:
|
aws-node-termination-handler:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
fullnameOverride: "aws-node-termination-handler"
|
fullnameOverride: "aws-node-termination-handler"
|
||||||
|
|
||||||
#image:
|
|
||||||
# tag: v1.14.1
|
|
||||||
|
|
||||||
# -- "aws-node-termination-handler/${ClusterName}"
|
# -- "aws-node-termination-handler/${ClusterName}"
|
||||||
managedAsgTag: "aws-node-termination-handler/managed"
|
managedTag: "aws-node-termination-handler/managed"
|
||||||
|
|
||||||
useProviderId: true
|
useProviderId: true
|
||||||
enableSqsTerminationDraining: true
|
enableSqsTerminationDraining: true
|
||||||
@ -63,10 +129,13 @@ aws-node-termination-handler:
|
|||||||
create: false
|
create: false
|
||||||
|
|
||||||
jsonLogging: true
|
jsonLogging: true
|
||||||
|
logFormatVersion: 2
|
||||||
|
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/control-plane: ""
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
|
||||||
@ -109,6 +178,7 @@ nvidia-device-plugin:
|
|||||||
- g5.16xlarge
|
- g5.16xlarge
|
||||||
- g5.24xlarge
|
- g5.24xlarge
|
||||||
- g5.48xlarge
|
- g5.48xlarge
|
||||||
|
- g4dn.xlarge
|
||||||
|
|
||||||
cluster-autoscaler:
|
cluster-autoscaler:
|
||||||
enabled: false
|
enabled: false
|
||||||
@ -140,6 +210,8 @@ cluster-autoscaler:
|
|||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
|
|
||||||
# On AWS enable Projected Service Accounts to assume IAM role
|
# On AWS enable Projected Service Accounts to assume IAM role
|
||||||
#extraEnv:
|
#extraEnv:
|
||||||
@ -170,6 +242,8 @@ external-dns:
|
|||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/control-plane: ""
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
|
||||||
@ -179,24 +253,3 @@ external-dns:
|
|||||||
#- istio-gateway
|
#- istio-gateway
|
||||||
|
|
||||||
provider: inmemory
|
provider: inmemory
|
||||||
|
|
||||||
extraVolumes:
|
|
||||||
- name: aws-token
|
|
||||||
projected:
|
|
||||||
sources:
|
|
||||||
- serviceAccountToken:
|
|
||||||
path: token
|
|
||||||
expirationSeconds: 86400
|
|
||||||
audience: "sts.amazonaws.com"
|
|
||||||
extraVolumeMounts:
|
|
||||||
- name: aws-token
|
|
||||||
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
|
|
||||||
readOnly: true
|
|
||||||
env:
|
|
||||||
# -- "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.externalDNS"
|
|
||||||
- name: AWS_ROLE_ARN
|
|
||||||
value: ""
|
|
||||||
- name: AWS_WEB_IDENTITY_TOKEN_FILE
|
|
||||||
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
|
||||||
- name: AWS_STS_REGIONAL_ENDPOINTS
|
|
||||||
value: "regional"
|
|
||||||
|
@ -1,21 +1,29 @@
|
|||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
description: KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
|
description: KubeZero ArgoCD - config, branding, image-updater (optional)
|
||||||
name: kubezero-argocd
|
name: kubezero-argocd
|
||||||
version: 0.10.2
|
version: 0.11.2
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
- kubezero
|
- kubezero
|
||||||
- argocd
|
- argocd
|
||||||
- gitops
|
- argocd-image-updater
|
||||||
maintainers:
|
maintainers:
|
||||||
- name: Stefan Reimer
|
- name: Stefan Reimer
|
||||||
email: stefan@zero-downtime.net
|
email: stefan@zero-downtime.net
|
||||||
|
# Url: https://github.com/argoproj/argo-helm/tree/main/charts
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: kubezero-lib
|
- name: kubezero-lib
|
||||||
version: ">= 0.1.5"
|
version: ">= 0.1.6"
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: argo-cd
|
- name: argo-cd
|
||||||
version: 4.10.9
|
version: 5.16.10
|
||||||
repository: https://argoproj.github.io/argo-helm
|
repository: https://argoproj.github.io/argo-helm
|
||||||
kubeVersion: ">= 1.20.0"
|
- name: argocd-apps
|
||||||
|
version: 0.0.6
|
||||||
|
repository: https://argoproj.github.io/argo-helm
|
||||||
|
- name: argocd-image-updater
|
||||||
|
version: 0.8.1
|
||||||
|
repository: https://argoproj.github.io/argo-helm
|
||||||
|
condition: argocd-image-updater.enabled
|
||||||
|
kubeVersion: ">= 1.24.0"
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
# kubezero-argocd
|
# kubezero-argocd
|
||||||
|
|
||||||
![Version: 0.10.2](https://img.shields.io/badge/Version-0.10.2-informational?style=flat-square)
|
![Version: 0.11.2](https://img.shields.io/badge/Version-0.11.2-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
|
KubeZero ArgoCD - config, branding, image-updater (optional)
|
||||||
|
|
||||||
**Homepage:** <https://kubezero.com>
|
**Homepage:** <https://kubezero.com>
|
||||||
|
|
||||||
@ -14,42 +14,57 @@ KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Appl
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.20.0`
|
Kubernetes: `>= 1.24.0`
|
||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://argoproj.github.io/argo-helm | argo-cd | 4.10.9 |
|
| https://argoproj.github.io/argo-helm | argo-cd | 5.16.10 |
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
| https://argoproj.github.io/argo-helm | argocd-apps | 0.0.6 |
|
||||||
|
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.8.1 |
|
||||||
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
| Key | Type | Default | Description |
|
| Key | Type | Default | Description |
|
||||||
|-----|------|---------|-------------|
|
|-----|------|---------|-------------|
|
||||||
| argo-cd.applicationSet.enabled | bool | `false` | |
|
| argo-cd.applicationSet.enabled | bool | `false` | |
|
||||||
|
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
|
||||||
|
| argo-cd.configs.cm."timeout.reconciliation" | int | `300` | |
|
||||||
|
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.24 - Release notes"` | |
|
||||||
|
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
|
||||||
|
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
|
||||||
|
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.24"` | |
|
||||||
|
| argo-cd.configs.cm.url | string | `"argocd.example.com"` | |
|
||||||
| argo-cd.configs.knownHosts.data.ssh_known_hosts | string | `"bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==\ngithub.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==\ngitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=\ngitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf\ngitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9\ngit.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==\n"` | |
|
| argo-cd.configs.knownHosts.data.ssh_known_hosts | string | `"bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==\ngithub.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==\ngitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=\ngitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf\ngitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9\ngit.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==\n"` | |
|
||||||
|
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
|
||||||
|
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
|
||||||
|
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
|
||||||
|
| argo-cd.configs.params."server.insecure" | bool | `true` | |
|
||||||
| argo-cd.configs.secret.createSecret | bool | `false` | |
|
| argo-cd.configs.secret.createSecret | bool | `false` | |
|
||||||
| argo-cd.controller.args.appResyncPeriod | string | `"300"` | |
|
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
|
||||||
| argo-cd.controller.args.operationProcessors | string | `"4"` | |
|
|
||||||
| argo-cd.controller.args.statusProcessors | string | `"8"` | |
|
|
||||||
| argo-cd.controller.logFormat | string | `"json"` | |
|
|
||||||
| argo-cd.controller.metrics.enabled | bool | `false` | |
|
| argo-cd.controller.metrics.enabled | bool | `false` | |
|
||||||
| argo-cd.controller.metrics.serviceMonitor.enabled | bool | `true` | |
|
| argo-cd.controller.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||||
| argo-cd.controller.resources.requests.cpu | string | `"100m"` | |
|
| argo-cd.controller.resources.requests.cpu | string | `"100m"` | |
|
||||||
| argo-cd.controller.resources.requests.memory | string | `"256Mi"` | |
|
| argo-cd.controller.resources.requests.memory | string | `"256Mi"` | |
|
||||||
| argo-cd.dex.enabled | bool | `false` | |
|
| argo-cd.dex.enabled | bool | `false` | |
|
||||||
| argo-cd.global | string | `nil` | |
|
| argo-cd.global.logging.format | string | `"json"` | |
|
||||||
| argo-cd.installCRDs | bool | `false` | |
|
|
||||||
| argo-cd.notifications.enabled | bool | `false` | |
|
| argo-cd.notifications.enabled | bool | `false` | |
|
||||||
| argo-cd.repoServer.logFormat | string | `"json"` | |
|
|
||||||
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
|
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
|
||||||
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
|
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||||
| argo-cd.server.config."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
|
|
||||||
| argo-cd.server.config.url | string | `"argocd.example.com"` | ArgoCD hostname to be exposed via Istio |
|
|
||||||
| argo-cd.server.extraArgs[0] | string | `"--insecure"` | |
|
|
||||||
| argo-cd.server.logFormat | string | `"json"` | |
|
|
||||||
| argo-cd.server.metrics.enabled | bool | `false` | |
|
| argo-cd.server.metrics.enabled | bool | `false` | |
|
||||||
| argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | |
|
| argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||||
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
|
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
|
||||||
|
| argocd-apps.applications | list | `[]` | |
|
||||||
|
| argocd-apps.projects | list | `[]` | |
|
||||||
|
| argocd-image-updater.authScripts.enabled | bool | `true` | |
|
||||||
|
| argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | |
|
||||||
|
| argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | |
|
||||||
|
| argocd-image-updater.config.argocd.plaintext | bool | `true` | |
|
||||||
|
| argocd-image-updater.enabled | bool | `false` | |
|
||||||
|
| argocd-image-updater.fullnameOverride | string | `"argocd-image-updater"` | |
|
||||||
|
| argocd-image-updater.metrics.enabled | bool | `false` | |
|
||||||
|
| argocd-image-updater.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||||
|
| argocd-image-updater.sshConfig.config | string | `"Host *\n PubkeyAcceptedAlgorithms +ssh-rsa\n HostkeyAlgorithms +ssh-rsa\n"` | |
|
||||||
| istio.enabled | bool | `false` | Deploy Istio VirtualService to expose ArgoCD |
|
| istio.enabled | bool | `false` | Deploy Istio VirtualService to expose ArgoCD |
|
||||||
| istio.gateway | string | `"istio-ingress/ingressgateway"` | Name of the Istio gateway to add the VirtualService to |
|
| istio.gateway | string | `"istio-ingress/ingressgateway"` | Name of the Istio gateway to add the VirtualService to |
|
||||||
| istio.ipBlocks | list | `[]` | |
|
| istio.ipBlocks | list | `[]` | |
|
||||||
|
@ -19,7 +19,7 @@ spec:
|
|||||||
{{- toYaml .Values.istio.ipBlocks | nindent 8 }}
|
{{- toYaml .Values.istio.ipBlocks | nindent 8 }}
|
||||||
to:
|
to:
|
||||||
- operation:
|
- operation:
|
||||||
hosts: [{{ index .Values "argo-cd" "server" "config" "url" | quote }}]
|
hosts: [{{ index .Values "argo-cd" "configs" "cm" "url" | quote }}]
|
||||||
when:
|
when:
|
||||||
- key: connection.sni
|
- key: connection.sni
|
||||||
values:
|
values:
|
||||||
|
@ -10,7 +10,7 @@ spec:
|
|||||||
gateways:
|
gateways:
|
||||||
- {{ .Values.istio.gateway }}
|
- {{ .Values.istio.gateway }}
|
||||||
hosts:
|
hosts:
|
||||||
- {{ index .Values "argo-cd" "server" "config" "url" }}
|
- {{ index .Values "argo-cd" "configs" "cm" "url" }}
|
||||||
http:
|
http:
|
||||||
- name: grpc
|
- name: grpc
|
||||||
match:
|
match:
|
||||||
|
@ -6,9 +6,11 @@ istio:
|
|||||||
gateway: istio-ingress/ingressgateway
|
gateway: istio-ingress/ingressgateway
|
||||||
ipBlocks: []
|
ipBlocks: []
|
||||||
|
|
||||||
argo-cd:
|
argocd-apps:
|
||||||
installCRDs: false
|
projects: []
|
||||||
|
applications: []
|
||||||
|
|
||||||
|
argo-cd:
|
||||||
#configs:
|
#configs:
|
||||||
# secret:
|
# secret:
|
||||||
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
|
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
|
||||||
@ -16,62 +18,26 @@ argo-cd:
|
|||||||
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
|
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
|
||||||
|
|
||||||
global:
|
global:
|
||||||
|
logging:
|
||||||
|
format: json
|
||||||
# image:
|
# image:
|
||||||
# tag: v2.1.6
|
# tag: v2.1.6
|
||||||
|
|
||||||
configs:
|
configs:
|
||||||
secret:
|
styles: |
|
||||||
createSecret: false
|
.sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }
|
||||||
knownHosts:
|
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
|
||||||
data:
|
|
||||||
ssh_known_hosts: |
|
|
||||||
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
|
|
||||||
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
|
||||||
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
|
||||||
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
|
||||||
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
|
|
||||||
gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
|
|
||||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
|
||||||
git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==
|
|
||||||
|
|
||||||
controller:
|
cm:
|
||||||
args:
|
ui.bannercontent: "KubeZero v1.24 - Release notes"
|
||||||
statusProcessors: "8"
|
ui.bannerurl: "https://kubezero.com/releases/v1.24"
|
||||||
operationProcessors: "4"
|
ui.bannerpermanent: "true"
|
||||||
appResyncPeriod: "300"
|
ui.bannerposition: "bottom"
|
||||||
logFormat: json
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
resources:
|
|
||||||
# limits:
|
|
||||||
# cpu: 500m
|
|
||||||
# memory: 2048Mi
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 256Mi
|
|
||||||
|
|
||||||
repoServer:
|
|
||||||
logFormat: json
|
|
||||||
metrics:
|
|
||||||
enabled: false
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
server:
|
|
||||||
logFormat: json
|
|
||||||
config:
|
|
||||||
#ui.bannercontent: "KubeZero Release 1.20.8-12 incl. ArgoCD 2.1 -> Release notes"
|
|
||||||
#ui.bannerurl: "https://blog.argoproj.io/argo-cd-v2-1-first-release-candidate-is-ready-c1aab7795638"
|
|
||||||
|
|
||||||
# argo-cd.server.config.url -- ArgoCD hostname to be exposed via Istio
|
# argo-cd.server.config.url -- ArgoCD hostname to be exposed via Istio
|
||||||
url: argocd.example.com
|
url: argocd.example.com
|
||||||
|
|
||||||
#repositories: |
|
timeout.reconciliation: 300
|
||||||
# - url: https://cdn.zero-downtime.net/charts
|
|
||||||
|
|
||||||
resource.customizations: |
|
resource.customizations: |
|
||||||
cert-manager.io/Certificate:
|
cert-manager.io/Certificate:
|
||||||
@ -98,17 +64,56 @@ argo-cd:
|
|||||||
hs.message = "Waiting for certificate"
|
hs.message = "Waiting for certificate"
|
||||||
return hs
|
return hs
|
||||||
|
|
||||||
# Rename former https port to grpc, works with istio + insecure
|
secret:
|
||||||
service:
|
createSecret: false
|
||||||
servicePortHttpsName: grpc
|
|
||||||
|
|
||||||
|
knownHosts:
|
||||||
|
data:
|
||||||
|
ssh_known_hosts: |
|
||||||
|
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
|
||||||
|
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||||
|
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
||||||
|
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
||||||
|
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
|
||||||
|
gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
|
||||||
|
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||||
|
git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==
|
||||||
|
|
||||||
|
params:
|
||||||
|
controller.status.processors: "10"
|
||||||
|
controller.operation.processors: "5"
|
||||||
|
|
||||||
|
server.insecure: true
|
||||||
|
server.enable.gzip: true
|
||||||
|
|
||||||
|
controller:
|
||||||
metrics:
|
metrics:
|
||||||
enabled: false
|
enabled: false
|
||||||
serviceMonitor:
|
serviceMonitor:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
||||||
extraArgs:
|
resources:
|
||||||
- --insecure
|
# limits:
|
||||||
|
# cpu: 500m
|
||||||
|
# memory: 2048Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 256Mi
|
||||||
|
|
||||||
|
repoServer:
|
||||||
|
metrics:
|
||||||
|
enabled: false
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
server:
|
||||||
|
# Rename former https port to grpc, works with istio + insecure
|
||||||
|
service:
|
||||||
|
servicePortHttpsName: grpc
|
||||||
|
metrics:
|
||||||
|
enabled: false
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
# redis:
|
# redis:
|
||||||
# We might want to try to keep redis close to the controller
|
# We might want to try to keep redis close to the controller
|
||||||
@ -122,3 +127,33 @@ argo-cd:
|
|||||||
|
|
||||||
notifications:
|
notifications:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
|
argocd-image-updater:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
# Unify all ArgoCD pieces under the same argocd namespace
|
||||||
|
fullnameOverride: argocd-image-updater
|
||||||
|
|
||||||
|
config:
|
||||||
|
argocd:
|
||||||
|
plaintext: true
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: false
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
authScripts:
|
||||||
|
enabled: true
|
||||||
|
scripts:
|
||||||
|
ecr-login.sh: |
|
||||||
|
#!/bin/sh
|
||||||
|
aws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d
|
||||||
|
ecr-public-login.sh: |
|
||||||
|
#!/bin/sh
|
||||||
|
aws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d
|
||||||
|
sshConfig:
|
||||||
|
config: |
|
||||||
|
Host *
|
||||||
|
PubkeyAcceptedAlgorithms +ssh-rsa
|
||||||
|
HostkeyAlgorithms +ssh-rsa
|
||||||
|
@ -2,8 +2,8 @@ apiVersion: v2
|
|||||||
name: kubezero-auth
|
name: kubezero-auth
|
||||||
description: KubeZero umbrella chart for all things Authentication and Identity management
|
description: KubeZero umbrella chart for all things Authentication and Identity management
|
||||||
type: application
|
type: application
|
||||||
version: 0.2.4
|
version: 0.3.4
|
||||||
appVersion: 19.0.1
|
appVersion: 20.0.2
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -14,7 +14,7 @@ maintainers:
|
|||||||
email: stefan@zero-downtime.net
|
email: stefan@zero-downtime.net
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: kubezero-lib
|
- name: kubezero-lib
|
||||||
version: ">= 0.1.5"
|
version: ">= 0.1.6"
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: postgresql
|
- name: postgresql
|
||||||
version: 11.8.1
|
version: 11.8.1
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-auth
|
# kubezero-auth
|
||||||
|
|
||||||
![Version: 0.2.4](https://img.shields.io/badge/Version-0.2.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 19.0.1](https://img.shields.io/badge/AppVersion-19.0.1-informational?style=flat-square)
|
![Version: 0.3.4](https://img.shields.io/badge/Version-0.3.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 20.0.2](https://img.shields.io/badge/AppVersion-20.0.2-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero umbrella chart for all things Authentication and Identity management
|
KubeZero umbrella chart for all things Authentication and Identity management
|
||||||
|
|
||||||
@ -18,7 +18,7 @@ Kubernetes: `>= 1.20.0`
|
|||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||||
| https://charts.bitnami.com/bitnami | postgresql | 11.8.1 |
|
| https://charts.bitnami.com/bitnami | postgresql | 11.8.1 |
|
||||||
|
|
||||||
# Keycloak
|
# Keycloak
|
||||||
@ -26,6 +26,8 @@ Kubernetes: `>= 1.20.0`
|
|||||||
## Operator
|
## Operator
|
||||||
|
|
||||||
https://github.com/keycloak/keycloak/tree/main/operator
|
https://github.com/keycloak/keycloak/tree/main/operator
|
||||||
|
https://github.com/aerogear/keycloak-metrics-spi
|
||||||
|
https://github.com/keycloak/keycloak-benchmark/tree/main/provision/minikube/keycloak/templates
|
||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
@ -41,6 +43,8 @@ https://github.com/keycloak/keycloak/tree/main/operator
|
|||||||
| keycloak.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
| keycloak.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||||
| keycloak.istio.url | string | `""` | |
|
| keycloak.istio.url | string | `""` | |
|
||||||
| keycloak.metrics.enabled | bool | `false` | |
|
| keycloak.metrics.enabled | bool | `false` | |
|
||||||
|
| keycloak.podDisruptionBudget.minAvailable | int | `1` | |
|
||||||
|
| keycloak.replicas | int | `1` | |
|
||||||
| postgresql.auth.database | string | `"keycloak"` | |
|
| postgresql.auth.database | string | `"keycloak"` | |
|
||||||
| postgresql.auth.existingSecret | string | `"kubezero-auth-postgresql"` | |
|
| postgresql.auth.existingSecret | string | `"kubezero-auth-postgresql"` | |
|
||||||
| postgresql.auth.username | string | `"keycloak"` | |
|
| postgresql.auth.username | string | `"keycloak"` | |
|
||||||
|
@ -18,6 +18,8 @@
|
|||||||
## Operator
|
## Operator
|
||||||
|
|
||||||
https://github.com/keycloak/keycloak/tree/main/operator
|
https://github.com/keycloak/keycloak/tree/main/operator
|
||||||
|
https://github.com/aerogear/keycloak-metrics-spi
|
||||||
|
https://github.com/keycloak/keycloak-benchmark/tree/main/provision/minikube/keycloak/templates
|
||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
|
@ -938,6 +938,8 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
otpPolicyCodeReusable:
|
||||||
|
type: boolean
|
||||||
clientProfiles:
|
clientProfiles:
|
||||||
type: object
|
type: object
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
x-kubernetes-preserve-unknown-fields: true
|
||||||
|
@ -19,46 +19,59 @@ spec:
|
|||||||
properties:
|
properties:
|
||||||
spec:
|
spec:
|
||||||
properties:
|
properties:
|
||||||
hostname:
|
|
||||||
description: |-
|
|
||||||
Hostname for the Keycloak server.
|
|
||||||
The special value `INSECURE-DISABLE` disables the hostname strict resolution.
|
|
||||||
type: string
|
|
||||||
instances:
|
instances:
|
||||||
description: Number of Keycloak instances in HA mode. Default is 1.
|
description: Number of Keycloak instances in HA mode. Default is 1.
|
||||||
type: integer
|
type: integer
|
||||||
serverConfiguration:
|
transaction:
|
||||||
description: |-
|
description: In this section you can find all properties related to
|
||||||
Configuration of the Keycloak server.
|
the settings of transaction behavior.
|
||||||
expressed as a keys (reference: https://www.keycloak.org/server/all-config) and values that can be either direct values or references to secrets.
|
properties:
|
||||||
items:
|
xaEnabled:
|
||||||
properties:
|
description: Determine whether Keycloak should use a non-XA datasource
|
||||||
secret:
|
in case the database does not support XA transactions.
|
||||||
properties:
|
type: boolean
|
||||||
optional:
|
type: object
|
||||||
type: boolean
|
http:
|
||||||
key:
|
description: In this section you can configure Keycloak features related
|
||||||
type: string
|
to HTTP and HTTPS
|
||||||
name:
|
properties:
|
||||||
type: string
|
httpPort:
|
||||||
type: object
|
description: The used HTTP port.
|
||||||
value:
|
type: integer
|
||||||
type: string
|
tlsSecret:
|
||||||
name:
|
description: "A secret containing the TLS configuration for HTTPS.\
|
||||||
type: string
|
\ Reference: https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets."
|
||||||
type: object
|
type: string
|
||||||
type: array
|
httpsPort:
|
||||||
tlsSecret:
|
description: The used HTTPS port.
|
||||||
description: |-
|
type: integer
|
||||||
A secret containing the TLS configuration for HTTPS. Reference: https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets.
|
httpEnabled:
|
||||||
The special value `INSECURE-DISABLE` disables https.
|
description: Enables the HTTP listener.
|
||||||
type: string
|
type: boolean
|
||||||
disableDefaultIngress:
|
type: object
|
||||||
description: Disable the default ingress.
|
hostname:
|
||||||
type: boolean
|
description: In this section you can configure Keycloak hostname and
|
||||||
image:
|
related properties.
|
||||||
description: Custom Keycloak image to be used.
|
properties:
|
||||||
type: string
|
hostname:
|
||||||
|
description: Hostname for the Keycloak server.
|
||||||
|
type: string
|
||||||
|
strict:
|
||||||
|
description: Disables dynamically resolving the hostname from
|
||||||
|
request headers.
|
||||||
|
type: boolean
|
||||||
|
strictBackchannel:
|
||||||
|
description: By default backchannel URLs are dynamically resolved
|
||||||
|
from request headers to allow internal and external applications.
|
||||||
|
type: boolean
|
||||||
|
admin:
|
||||||
|
description: The hostname for accessing the administration console.
|
||||||
|
type: string
|
||||||
|
adminUrl:
|
||||||
|
description: "Set the base URL for accessing the administration\
|
||||||
|
\ console, including scheme, host, port and path"
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
unsupported:
|
unsupported:
|
||||||
description: |-
|
description: |-
|
||||||
In this section you can configure podTemplate advanced features, not production-ready, and not supported settings.
|
In this section you can configure podTemplate advanced features, not production-ready, and not supported settings.
|
||||||
@ -2765,9 +2778,123 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
required:
|
ingress:
|
||||||
- hostname
|
description: |-
|
||||||
- tlsSecret
|
The deployment is, by default, exposed through a basic ingress.
|
||||||
|
You can change this behaviour by setting the enabled property to false.
|
||||||
|
properties:
|
||||||
|
enabled:
|
||||||
|
type: boolean
|
||||||
|
type: object
|
||||||
|
image:
|
||||||
|
description: Custom Keycloak image to be used.
|
||||||
|
type: string
|
||||||
|
imagePullSecrets:
|
||||||
|
description: Secret(s) that might be used when pulling an image from
|
||||||
|
a private container image registry or repository.
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
additionalOptions:
|
||||||
|
description: |-
|
||||||
|
Configuration of the Keycloak server.
|
||||||
|
expressed as a keys (reference: https://www.keycloak.org/server/all-config) and values that can be either direct values or references to secrets.
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
secret:
|
||||||
|
properties:
|
||||||
|
optional:
|
||||||
|
type: boolean
|
||||||
|
key:
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
value:
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
db:
|
||||||
|
description: In this section you can find all properties related to
|
||||||
|
connect to a database.
|
||||||
|
properties:
|
||||||
|
passwordSecret:
|
||||||
|
description: The reference to a secret holding the password of
|
||||||
|
the database user.
|
||||||
|
properties:
|
||||||
|
optional:
|
||||||
|
type: boolean
|
||||||
|
key:
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
usernameSecret:
|
||||||
|
description: The reference to a secret holding the username of
|
||||||
|
the database user.
|
||||||
|
properties:
|
||||||
|
optional:
|
||||||
|
type: boolean
|
||||||
|
key:
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
port:
|
||||||
|
description: "Sets the port of the default JDBC URL of the chosen\
|
||||||
|
\ vendor. If the `url` option is set, this option is ignored."
|
||||||
|
type: integer
|
||||||
|
schema:
|
||||||
|
description: The database schema to be used.
|
||||||
|
type: string
|
||||||
|
host:
|
||||||
|
description: "Sets the hostname of the default JDBC URL of the\
|
||||||
|
\ chosen vendor. If the `url` option is set, this option is\
|
||||||
|
\ ignored."
|
||||||
|
type: string
|
||||||
|
url:
|
||||||
|
description: "The full database JDBC URL. If not provided, a default\
|
||||||
|
\ URL is set based on the selected database vendor. For instance,\
|
||||||
|
\ if using 'postgres', the default JDBC URL would be 'jdbc:postgresql://localhost/keycloak'. "
|
||||||
|
type: string
|
||||||
|
poolInitialSize:
|
||||||
|
description: The initial size of the connection pool.
|
||||||
|
type: integer
|
||||||
|
poolMaxSize:
|
||||||
|
description: The maximum size of the connection pool.
|
||||||
|
type: integer
|
||||||
|
vendor:
|
||||||
|
description: The database vendor.
|
||||||
|
type: string
|
||||||
|
database:
|
||||||
|
description: "Sets the database name of the default JDBC URL of\
|
||||||
|
\ the chosen vendor. If the `url` option is set, this option\
|
||||||
|
\ is ignored."
|
||||||
|
type: string
|
||||||
|
poolMinSize:
|
||||||
|
description: The minimal size of the connection pool.
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
features:
|
||||||
|
description: "In this section you can configure Keycloak features,\
|
||||||
|
\ which should be enabled/disabled."
|
||||||
|
properties:
|
||||||
|
disabled:
|
||||||
|
description: Disabled Keycloak features
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
enabled:
|
||||||
|
description: Enabled Keycloak features
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
type: object
|
type: object
|
||||||
status:
|
status:
|
||||||
properties:
|
properties:
|
||||||
|
8
charts/kubezero-auth/dashboards-keycloak.yaml
Normal file
8
charts/kubezero-auth/dashboards-keycloak.yaml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
configmap: grafana-dashboards
|
||||||
|
condition: '.Values.keycloak.metrics.enabled'
|
||||||
|
gzip: true
|
||||||
|
# folder:
|
||||||
|
dashboards:
|
||||||
|
- name: keycloak
|
||||||
|
url: https://grafana.com/api/dashboards/10441/revisions/2/download
|
||||||
|
tags: ['Keycloak', 'Auth']
|
File diff suppressed because one or more lines are too long
@ -1,8 +1,8 @@
|
|||||||
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.enabled .Values.keycloak.istio.ipBlocks }}
|
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.enabled }}
|
||||||
apiVersion: security.istio.io/v1beta1
|
apiVersion: security.istio.io/v1beta1
|
||||||
kind: AuthorizationPolicy
|
kind: AuthorizationPolicy
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ .Release.Name }}-deny-not-in-ipblocks
|
name: {{ .Release.Name }}-deny-metrics-ipblocks
|
||||||
namespace: istio-system
|
namespace: istio-system
|
||||||
labels:
|
labels:
|
||||||
{{- include "kubezero-lib.labels" $ | nindent 4 }}
|
{{- include "kubezero-lib.labels" $ | nindent 4 }}
|
||||||
@ -12,6 +12,15 @@ spec:
|
|||||||
app: istio-ingressgateway
|
app: istio-ingressgateway
|
||||||
action: DENY
|
action: DENY
|
||||||
rules:
|
rules:
|
||||||
|
- to:
|
||||||
|
- operation:
|
||||||
|
hosts: ["{{ .Values.keycloak.istio.url }}"]
|
||||||
|
paths: ["/auth/realms/master/metrics"]
|
||||||
|
when:
|
||||||
|
- key: connection.sni
|
||||||
|
values:
|
||||||
|
- '*'
|
||||||
|
{{- if .Values.keycloak.istio.ipBlocks }}
|
||||||
- from:
|
- from:
|
||||||
- source:
|
- source:
|
||||||
notIpBlocks:
|
notIpBlocks:
|
||||||
@ -23,4 +32,5 @@ spec:
|
|||||||
- key: connection.sni
|
- key: connection.sni
|
||||||
values:
|
values:
|
||||||
- '*'
|
- '*'
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -4,13 +4,17 @@ kind: Keycloak
|
|||||||
metadata:
|
metadata:
|
||||||
name: {{ template "kubezero-lib.fullname" . }}
|
name: {{ template "kubezero-lib.fullname" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||||
spec:
|
spec:
|
||||||
instances: 1
|
instances: {{ .Values.keycloak.replicas }}
|
||||||
disableDefaultIngress: true
|
|
||||||
|
|
||||||
serverConfiguration:
|
additionalOptions:
|
||||||
|
# Needs int casting thx to https://github.com/kubernetes-sigs/yaml/issues/45
|
||||||
|
{{- if lt (int .Values.keycloak.replicas) 2 }}
|
||||||
- name: cache
|
- name: cache
|
||||||
value: local
|
value: local
|
||||||
|
{{- end }}
|
||||||
{{- if .Values.postgresql.enabled }}
|
{{- if .Values.postgresql.enabled }}
|
||||||
- name: db
|
- name: db
|
||||||
value: postgres
|
value: postgres
|
||||||
@ -30,15 +34,23 @@ spec:
|
|||||||
- name: hostname-strict-https
|
- name: hostname-strict-https
|
||||||
value: "false"
|
value: "false"
|
||||||
- name: proxy
|
- name: proxy
|
||||||
value: passthrough
|
value: edge
|
||||||
- name: http-enabled
|
- name: http-enabled
|
||||||
value: "true"
|
value: "true"
|
||||||
|
- name: log-console-output
|
||||||
|
value: json
|
||||||
|
|
||||||
|
|
||||||
#hostname: INSECURE-DISABLE
|
ingress:
|
||||||
hostname: {{ default "keycloak" .Values.keycloak.istio.url }}
|
enabled: false
|
||||||
|
|
||||||
|
http:
|
||||||
|
httpEnabled: true
|
||||||
|
|
||||||
# We use Istio Ingress to terminate TLS
|
# We use Istio Ingress to terminate TLS
|
||||||
# mTls down the road
|
# mTls down the road
|
||||||
tlsSecret: INSECURE-DISABLE
|
hostname:
|
||||||
|
hostname: {{ default "keycloak" .Values.keycloak.istio.url }}
|
||||||
|
strict: false
|
||||||
|
strictBackchannel: false
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -4,20 +4,20 @@ apiVersion: v1
|
|||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
app.quarkus.io/build-timestamp: 2022-07-29 - 11:21:21 +0000
|
app.quarkus.io/build-timestamp: 2022-12-13 - 14:29:14 +0000
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/version: 19.0.1
|
|
||||||
app.kubernetes.io/name: keycloak-operator
|
app.kubernetes.io/name: keycloak-operator
|
||||||
|
app.kubernetes.io/version: 20.0.2
|
||||||
name: keycloak-operator
|
name: keycloak-operator
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
app.quarkus.io/build-timestamp: 2022-07-29 - 11:21:21 +0000
|
app.quarkus.io/build-timestamp: 2022-12-13 - 14:29:14 +0000
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/name: keycloak-operator
|
app.kubernetes.io/name: keycloak-operator
|
||||||
app.kubernetes.io/version: 19.0.1
|
app.kubernetes.io/version: 20.0.2
|
||||||
name: keycloak-operator
|
name: keycloak-operator
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
@ -26,7 +26,7 @@ spec:
|
|||||||
targetPort: 8080
|
targetPort: 8080
|
||||||
selector:
|
selector:
|
||||||
app.kubernetes.io/name: keycloak-operator
|
app.kubernetes.io/name: keycloak-operator
|
||||||
app.kubernetes.io/version: 19.0.1
|
app.kubernetes.io/version: 20.0.2
|
||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
@ -179,24 +179,24 @@ apiVersion: apps/v1
|
|||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
app.quarkus.io/build-timestamp: 2022-07-29 - 11:21:21 +0000
|
app.quarkus.io/build-timestamp: 2022-12-13 - 14:29:14 +0000
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/version: 19.0.1
|
|
||||||
app.kubernetes.io/name: keycloak-operator
|
app.kubernetes.io/name: keycloak-operator
|
||||||
|
app.kubernetes.io/version: 20.0.2
|
||||||
name: keycloak-operator
|
name: keycloak-operator
|
||||||
spec:
|
spec:
|
||||||
replicas: 1
|
replicas: 1
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
app.kubernetes.io/version: 19.0.1
|
|
||||||
app.kubernetes.io/name: keycloak-operator
|
app.kubernetes.io/name: keycloak-operator
|
||||||
|
app.kubernetes.io/version: 20.0.2
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
app.quarkus.io/build-timestamp: 2022-07-29 - 11:21:21 +0000
|
app.quarkus.io/build-timestamp: 2022-12-13 - 14:29:14 +0000
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/version: 19.0.1
|
|
||||||
app.kubernetes.io/name: keycloak-operator
|
app.kubernetes.io/name: keycloak-operator
|
||||||
|
app.kubernetes.io/version: 20.0.2
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- env:
|
- env:
|
||||||
@ -205,8 +205,8 @@ spec:
|
|||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.namespace
|
fieldPath: metadata.namespace
|
||||||
- name: OPERATOR_KEYCLOAK_IMAGE
|
- name: OPERATOR_KEYCLOAK_IMAGE
|
||||||
value: quay.io/keycloak/keycloak:19.0.1
|
value: quay.io/keycloak/keycloak:20.0.2
|
||||||
image: quay.io/keycloak/keycloak-operator:19.0.1
|
image: quay.io/keycloak/keycloak-operator:20.0.2
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
failureThreshold: 3
|
failureThreshold: 3
|
||||||
|
15
charts/kubezero-auth/templates/keycloak/pdb.yaml
Normal file
15
charts/kubezero-auth/templates/keycloak/pdb.yaml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{{- if and .Values.keycloak.podDisruptionBudget (gt (int .Values.keycloak.replicas) 1) }}
|
||||||
|
apiVersion: policy/v1
|
||||||
|
kind: PodDisruptionBudget
|
||||||
|
metadata:
|
||||||
|
name: {{ template "kubezero-lib.fullname" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: keycloak
|
||||||
|
app.kubernetes.io/managed-by: keycloak-operator
|
||||||
|
{{- toYaml .Values.keycloak.podDisruptionBudget | nindent 2 }}
|
||||||
|
{{- end }}
|
@ -9,8 +9,11 @@ helm dep update
|
|||||||
# Operator
|
# Operator
|
||||||
VERSION=$(yq eval '.appVersion' Chart.yaml)
|
VERSION=$(yq eval '.appVersion' Chart.yaml)
|
||||||
|
|
||||||
wget -O crds/keycloak.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/${VERSION}/kubernetes/keycloaks.k8s.keycloak.org-v1.yml
|
wget -O crds/keycloak.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/"${VERSION}"/kubernetes/keycloaks.k8s.keycloak.org-v1.yml
|
||||||
wget -O crds/keycloak-realmimports.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/${VERSION}/kubernetes/keycloakrealmimports.k8s.keycloak.org-v1.yml
|
wget -O crds/keycloak-realmimports.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/"${VERSION}"/kubernetes/keycloakrealmimports.k8s.keycloak.org-v1.yml
|
||||||
|
|
||||||
wget -O templates/keycloak/operator.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/${VERSION}/kubernetes/kubernetes.yml
|
wget -O templates/keycloak/operator.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/"${VERSION}"/kubernetes/kubernetes.yml
|
||||||
patch -i keycloak.patch -p0 --no-backup-if-mismatch
|
patch -i keycloak.patch -p0 --no-backup-if-mismatch
|
||||||
|
|
||||||
|
# Fetch dashboards
|
||||||
|
../kubezero-metrics/sync_grafana_dashboards.py dashboards-keycloak.yaml templates/keycloak/grafana-dashboards.yaml
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
keycloak:
|
keycloak:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
|
replicas: 1
|
||||||
|
podDisruptionBudget:
|
||||||
|
minAvailable: 1
|
||||||
|
|
||||||
istio:
|
istio:
|
||||||
enabled: false
|
enabled: false
|
||||||
gateway: istio-ingress/private-ingressgateway
|
gateway: istio-ingress/private-ingressgateway
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-cert-manager
|
name: kubezero-cert-manager
|
||||||
description: KubeZero Umbrella Chart for cert-manager
|
description: KubeZero Umbrella Chart for cert-manager
|
||||||
type: application
|
type: application
|
||||||
version: 0.9.2
|
version: 0.9.3
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -13,9 +13,9 @@ maintainers:
|
|||||||
email: stefan@zero-downtime.net
|
email: stefan@zero-downtime.net
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: kubezero-lib
|
- name: kubezero-lib
|
||||||
version: ">= 0.1.5"
|
version: ">= 0.1.6"
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: cert-manager
|
- name: cert-manager
|
||||||
version: 1.9.1
|
version: 1.9.1
|
||||||
repository: https://charts.jetstack.io
|
repository: https://charts.jetstack.io
|
||||||
kubeVersion: ">= 1.20.0"
|
kubeVersion: ">= 1.24.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-cert-manager
|
# kubezero-cert-manager
|
||||||
|
|
||||||
![Version: 0.9.2](https://img.shields.io/badge/Version-0.9.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
![Version: 0.9.3](https://img.shields.io/badge/Version-0.9.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero Umbrella Chart for cert-manager
|
KubeZero Umbrella Chart for cert-manager
|
||||||
|
|
||||||
@ -14,11 +14,11 @@ KubeZero Umbrella Chart for cert-manager
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.20.0`
|
Kubernetes: `>= 1.24.0`
|
||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||||
| https://charts.jetstack.io | cert-manager | 1.9.1 |
|
| https://charts.jetstack.io | cert-manager | 1.9.1 |
|
||||||
|
|
||||||
## AWS - OIDC IAM roles
|
## AWS - OIDC IAM roles
|
||||||
@ -35,6 +35,8 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
|
|||||||
| cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
| cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
| cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | |
|
| cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| cert-manager.cainjector.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| cert-manager.cainjector.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| cert-manager.enabled | bool | `true` | |
|
| cert-manager.enabled | bool | `true` | |
|
||||||
| cert-manager.extraArgs[0] | string | `"--dns01-recursive-nameservers-only"` | |
|
| cert-manager.extraArgs[0] | string | `"--dns01-recursive-nameservers-only"` | |
|
||||||
| cert-manager.global.leaderElection.namespace | string | `"cert-manager"` | |
|
| cert-manager.global.leaderElection.namespace | string | `"cert-manager"` | |
|
||||||
@ -45,9 +47,13 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
|
|||||||
| cert-manager.startupapicheck.enabled | bool | `false` | |
|
| cert-manager.startupapicheck.enabled | bool | `false` | |
|
||||||
| cert-manager.tolerations[0].effect | string | `"NoSchedule"` | |
|
| cert-manager.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| cert-manager.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| cert-manager.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| cert-manager.webhook.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
| cert-manager.webhook.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
| cert-manager.webhook.tolerations[0].effect | string | `"NoSchedule"` | |
|
| cert-manager.webhook.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| cert-manager.webhook.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| cert-manager.webhook.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| clusterIssuer | object | `{}` | |
|
| clusterIssuer | object | `{}` | |
|
||||||
| localCA.enabled | bool | `false` | |
|
| localCA.enabled | bool | `false` | |
|
||||||
| localCA.selfsigning | bool | `true` | |
|
| localCA.selfsigning | bool | `true` | |
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
configmap: cert-manager-grafana-dashboard
|
configmap: grafana-dashboard
|
||||||
gzip: true
|
gzip: true
|
||||||
folder: KubeZero
|
folder: KubeZero
|
||||||
condition: 'index .Values "cert-manager" "prometheus" "servicemonitor" "enabled"'
|
condition: 'index .Values "cert-manager" "prometheus" "servicemonitor" "enabled"'
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
"subdir": "contrib/mixin"
|
"subdir": "contrib/mixin"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"version": "b872757492ca70f350abd44eb95d81b95339f581",
|
"version": "f1842b6ecf67a8102766cc914eaa2a8c7ad97314",
|
||||||
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
|
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -28,8 +28,8 @@
|
|||||||
"subdir": "grafonnet"
|
"subdir": "grafonnet"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"version": "6db00c292d3a1c71661fc875f90e0ec7caa538c2",
|
"version": "30280196507e0fe6fa978a3e0eaca3a62844f817",
|
||||||
"sum": "gF8foHByYcB25jcUOBqP6jxk0OPifQMjPvKY0HaCk6w="
|
"sum": "342u++/7rViR/zj2jeJOjshzglkZ1SY+hFNuyCBFMdc="
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"source": {
|
"source": {
|
||||||
@ -38,8 +38,8 @@
|
|||||||
"subdir": "grafana-builder"
|
"subdir": "grafana-builder"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"version": "5fb2525651cc6e5100e081b10ad9fbe7e3595231",
|
"version": "15484ab1cb78eb7588e6b79ac52fc04e63f552b4",
|
||||||
"sum": "0KkygBQd/AFzUvVzezE4qF/uDYgrwUXVpZfINBti0oc="
|
"sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0="
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"source": {
|
"source": {
|
||||||
@ -48,8 +48,8 @@
|
|||||||
"subdir": ""
|
"subdir": ""
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"version": "b538a10c89508f8d12885680cca72a134d3127f5",
|
"version": "ab104c5c406b91078d676475c14ab18644f84f2d",
|
||||||
"sum": "GLt5T2k4RKg36Gfcaf9qlTfVumDitqotVD0ipz/bPJ4="
|
"sum": "tRpIInEClWUNe5IS6uIjucFN/KqDFgg19+yo78VrLfU="
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"source": {
|
"source": {
|
||||||
@ -58,7 +58,7 @@
|
|||||||
"subdir": "lib/promgrafonnet"
|
"subdir": "lib/promgrafonnet"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"version": "62ad10fe9ceb53c6b846871997abbfe8e0bd7cf5",
|
"version": "3c386687c1f8ceb6b79ff887c4a934e9cee1b90a",
|
||||||
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
|
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -88,8 +88,8 @@
|
|||||||
"subdir": "jsonnet/kube-prometheus"
|
"subdir": "jsonnet/kube-prometheus"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"version": "125fb56d7495e20f504e1537ae949e8261a2f812",
|
"version": "e7eff18e7e70d7f1168105521451c4d7bd6a6d96",
|
||||||
"sum": "QwH53kTe1jWCeXmQe7+U1PBs/a1p4MCTEW3B8IiIxeo="
|
"sum": "gcgf9y8wos4W8jgcJKuTDfORYDigCxx+q3QOYEijQFo="
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"source": {
|
"source": {
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "cert-manager-grafana-dashboard" | trunc 63 | trimSuffix "-" }}
|
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "grafana-dashboard" | trunc 63 | trimSuffix "-" }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
grafana_dashboard: "1"
|
grafana_dashboard: "1"
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
{{- if index .Values "cert-manager" "prometheus" "servicemonitor" "enabled" }}
|
|
||||||
apiVersion: monitoring.coreos.com/v1
|
apiVersion: monitoring.coreos.com/v1
|
||||||
kind: PrometheusRule
|
kind: PrometheusRule
|
||||||
metadata:
|
metadata:
|
||||||
@ -51,4 +50,4 @@ spec:
|
|||||||
for: 5m
|
for: 5m
|
||||||
labels:
|
labels:
|
||||||
severity: critical
|
severity: critical
|
||||||
{{- end }}
|
|
||||||
|
@ -49,6 +49,8 @@ cert-manager:
|
|||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/control-plane: ""
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
|
||||||
@ -60,6 +62,8 @@ cert-manager:
|
|||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/control-plane: ""
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
|
||||||
@ -67,6 +71,8 @@ cert-manager:
|
|||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/control-plane: ""
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-ci
|
name: kubezero-ci
|
||||||
description: KubeZero umbrella chart for all things CI
|
description: KubeZero umbrella chart for all things CI
|
||||||
type: application
|
type: application
|
||||||
version: 0.5.14
|
version: 0.5.25
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -15,18 +15,18 @@ maintainers:
|
|||||||
email: stefan@zero-downtime.net
|
email: stefan@zero-downtime.net
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: kubezero-lib
|
- name: kubezero-lib
|
||||||
version: ">= 0.1.5"
|
version: ">= 0.1.6"
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: gocd
|
- name: gocd
|
||||||
version: 1.40.8
|
version: 1.40.8
|
||||||
repository: https://gocd.github.io/helm-chart
|
repository: https://gocd.github.io/helm-chart
|
||||||
condition: gocd.enabled
|
condition: gocd.enabled
|
||||||
- name: gitea
|
- name: gitea
|
||||||
version: 5.0.9
|
version: 6.0.5
|
||||||
repository: https://dl.gitea.io/charts/
|
repository: https://dl.gitea.io/charts/
|
||||||
condition: gitea.enabled
|
condition: gitea.enabled
|
||||||
- name: jenkins
|
- name: jenkins
|
||||||
version: 4.2.6
|
version: 4.2.20
|
||||||
repository: https://charts.jenkins.io
|
repository: https://charts.jenkins.io
|
||||||
condition: jenkins.enabled
|
condition: jenkins.enabled
|
||||||
- name: trivy
|
- name: trivy
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-ci
|
# kubezero-ci
|
||||||
|
|
||||||
![Version: 0.5.14](https://img.shields.io/badge/Version-0.5.14-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
![Version: 0.5.24](https://img.shields.io/badge/Version-0.5.24-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero umbrella chart for all things CI
|
KubeZero umbrella chart for all things CI
|
||||||
|
|
||||||
@ -19,9 +19,9 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.17 |
|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.17 |
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||||
| https://charts.jenkins.io | jenkins | 4.2.6 |
|
| https://charts.jenkins.io | jenkins | 4.2.17 |
|
||||||
| https://dl.gitea.io/charts/ | gitea | 5.0.9 |
|
| https://dl.gitea.io/charts/ | gitea | 6.0.5 |
|
||||||
| https://gocd.github.io/helm-chart | gocd | 1.40.8 |
|
| https://gocd.github.io/helm-chart | gocd | 1.40.8 |
|
||||||
|
|
||||||
# Jenkins
|
# Jenkins
|
||||||
@ -52,9 +52,8 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| gitea.gitea.config.database.DB_TYPE | string | `"sqlite3"` | |
|
| gitea.gitea.config.database.DB_TYPE | string | `"sqlite3"` | |
|
||||||
| gitea.gitea.demo | bool | `false` | |
|
| gitea.gitea.demo | bool | `false` | |
|
||||||
| gitea.gitea.metrics.enabled | bool | `false` | |
|
| gitea.gitea.metrics.enabled | bool | `false` | |
|
||||||
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `false` | |
|
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||||
| gitea.image.rootless | bool | `true` | |
|
| gitea.image.rootless | bool | `true` | |
|
||||||
| gitea.image.tag | string | `"1.17.1"` | |
|
|
||||||
| gitea.istio.enabled | bool | `false` | |
|
| gitea.istio.enabled | bool | `false` | |
|
||||||
| gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
| gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||||
| gitea.istio.url | string | `"git.example.com"` | |
|
| gitea.istio.url | string | `"git.example.com"` | |
|
||||||
@ -64,6 +63,9 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| gitea.persistence.enabled | bool | `true` | |
|
| gitea.persistence.enabled | bool | `true` | |
|
||||||
| gitea.persistence.size | string | `"4Gi"` | |
|
| gitea.persistence.size | string | `"4Gi"` | |
|
||||||
| gitea.postgresql.enabled | bool | `false` | |
|
| gitea.postgresql.enabled | bool | `false` | |
|
||||||
|
| gitea.resources.limits.memory | string | `"2048Mi"` | |
|
||||||
|
| gitea.resources.requests.cpu | string | `"150m"` | |
|
||||||
|
| gitea.resources.requests.memory | string | `"320Mi"` | |
|
||||||
| gitea.securityContext.allowPrivilegeEscalation | bool | `false` | |
|
| gitea.securityContext.allowPrivilegeEscalation | bool | `false` | |
|
||||||
| gitea.securityContext.capabilities.add[0] | string | `"SYS_CHROOT"` | |
|
| gitea.securityContext.capabilities.add[0] | string | `"SYS_CHROOT"` | |
|
||||||
| gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | |
|
| gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | |
|
||||||
@ -76,43 +78,40 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| jenkins.agent.annotations."container.apparmor.security.beta.kubernetes.io/jnlp" | string | `"unconfined"` | |
|
| jenkins.agent.annotations."container.apparmor.security.beta.kubernetes.io/jnlp" | string | `"unconfined"` | |
|
||||||
| jenkins.agent.containerCap | int | `2` | |
|
| jenkins.agent.containerCap | int | `2` | |
|
||||||
| jenkins.agent.customJenkinsLabels[0] | string | `"podman-aws-trivy"` | |
|
| jenkins.agent.customJenkinsLabels[0] | string | `"podman-aws-trivy"` | |
|
||||||
| jenkins.agent.idleMinutes | int | `10` | |
|
| jenkins.agent.idleMinutes | int | `15` | |
|
||||||
| jenkins.agent.image | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
|
| jenkins.agent.image | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
|
||||||
| jenkins.agent.podName | string | `"podman-aws"` | |
|
| jenkins.agent.podName | string | `"podman-aws"` | |
|
||||||
| jenkins.agent.podRetention | string | `"Default"` | |
|
| jenkins.agent.podRetention | string | `"Default"` | |
|
||||||
| jenkins.agent.resources.limits.cpu | string | `"4"` | |
|
|
||||||
| jenkins.agent.resources.limits.memory | string | `"6144Mi"` | |
|
|
||||||
| jenkins.agent.resources.requests.cpu | string | `"512m"` | |
|
|
||||||
| jenkins.agent.resources.requests.memory | string | `"1024Mi"` | |
|
|
||||||
| jenkins.agent.showRawYaml | bool | `false` | |
|
| jenkins.agent.showRawYaml | bool | `false` | |
|
||||||
| jenkins.agent.tag | string | `"v0.3.2"` | |
|
| jenkins.agent.tag | string | `"v0.4.1"` | |
|
||||||
| jenkins.agent.yamlMergeStrategy | string | `"merge"` | |
|
| jenkins.agent.yamlMergeStrategy | string | `"merge"` | |
|
||||||
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n serviceAccountName: jenkins-podman-aws\n containers:\n - name: jnlp\n resources:\n limits:\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | |
|
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n serviceAccountName: jenkins-podman-aws\n containers:\n - name: jnlp\n resources:\n requests:\n cpu: \"512m\"\n memory: \"1024Mi\"\n limits:\n cpu: \"4\"\n memory: \"6144Mi\"\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | |
|
||||||
| jenkins.controller.JCasC.configScripts.zdt-settings | string | `"jenkins:\n noUsageStatistics: true\n disabledAdministrativeMonitors:\n - \"jenkins.security.ResourceDomainRecommendation\"\nunclassified:\n buildDiscarders:\n configuredBuildDiscarders:\n - \"jobBuildDiscarder\"\n - defaultBuildDiscarder:\n discarder:\n logRotator:\n artifactDaysToKeepStr: \"32\"\n artifactNumToKeepStr: \"10\"\n daysToKeepStr: \"100\"\n numToKeepStr: \"10\"\n"` | |
|
| jenkins.controller.JCasC.configScripts.zdt-settings | string | `"jenkins:\n noUsageStatistics: true\n disabledAdministrativeMonitors:\n - \"jenkins.security.ResourceDomainRecommendation\"\nunclassified:\n buildDiscarders:\n configuredBuildDiscarders:\n - \"jobBuildDiscarder\"\n - defaultBuildDiscarder:\n discarder:\n logRotator:\n artifactDaysToKeepStr: \"32\"\n artifactNumToKeepStr: \"10\"\n daysToKeepStr: \"100\"\n numToKeepStr: \"10\"\n"` | |
|
||||||
| jenkins.controller.disableRememberMe | bool | `true` | |
|
| jenkins.controller.disableRememberMe | bool | `true` | |
|
||||||
| jenkins.controller.enableRawHtmlMarkupFormatter | bool | `true` | |
|
| jenkins.controller.enableRawHtmlMarkupFormatter | bool | `true` | |
|
||||||
| jenkins.controller.initContainerResources.limits.cpu | string | `"1000m"` | |
|
|
||||||
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
|
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
|
||||||
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
|
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
|
||||||
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
|
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
|
||||||
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3706.vdfb_d599579f3"` | |
|
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3743.v1fa_4c724c3b_7"` | |
|
||||||
|
| jenkins.controller.installPlugins[10] | string | `"build-discarder:139.v05696a_7fe240"` | |
|
||||||
|
| jenkins.controller.installPlugins[11] | string | `"dark-theme:262.v0202a_4c8fb_6a"` | |
|
||||||
|
| jenkins.controller.installPlugins[12] | string | `"kubernetes-credentials-provider:1.206.v7ce2cf7b_0c8b"` | |
|
||||||
| jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | |
|
| jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | |
|
||||||
| jenkins.controller.installPlugins[2] | string | `"git:4.12.1"` | |
|
| jenkins.controller.installPlugins[2] | string | `"git:4.14.3"` | |
|
||||||
| jenkins.controller.installPlugins[3] | string | `"configuration-as-code:1512.vb_79d418d5fc8"` | |
|
| jenkins.controller.installPlugins[3] | string | `"basic-branch-build-strategies:71.vc1421f89888e"` | |
|
||||||
| jenkins.controller.installPlugins[4] | string | `"antisamy-markup-formatter:2.7"` | |
|
| jenkins.controller.installPlugins[4] | string | `"pipeline-graph-view:144.vf3924feb_7e35"` | |
|
||||||
| jenkins.controller.installPlugins[5] | string | `"prometheus:2.0.11"` | |
|
| jenkins.controller.installPlugins[5] | string | `"pipeline-stage-view:2.28"` | |
|
||||||
| jenkins.controller.installPlugins[6] | string | `"htmlpublisher:1.31"` | |
|
| jenkins.controller.installPlugins[6] | string | `"configuration-as-code:1569.vb_72405b_80249"` | |
|
||||||
| jenkins.controller.installPlugins[7] | string | `"build-discarder:139.v05696a_7fe240"` | |
|
| jenkins.controller.installPlugins[7] | string | `"antisamy-markup-formatter:155.v795fb_8702324"` | |
|
||||||
| jenkins.controller.installPlugins[8] | string | `"dark-theme:245.vb_a_2b_b_010ea_96"` | |
|
| jenkins.controller.installPlugins[8] | string | `"prometheus:2.0.11"` | |
|
||||||
| jenkins.controller.installPlugins[9] | string | `"kubernetes-credentials-provider:1.196.va_55f5e31e3c2"` | |
|
| jenkins.controller.installPlugins[9] | string | `"htmlpublisher:1.31"` | |
|
||||||
| jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | |
|
| jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | |
|
||||||
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | |
|
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | |
|
||||||
| jenkins.controller.prometheus.enabled | bool | `false` | |
|
| jenkins.controller.prometheus.enabled | bool | `false` | |
|
||||||
| jenkins.controller.resources.limits.cpu | string | `"2000m"` | |
|
|
||||||
| jenkins.controller.resources.limits.memory | string | `"4096Mi"` | |
|
| jenkins.controller.resources.limits.memory | string | `"4096Mi"` | |
|
||||||
| jenkins.controller.resources.requests.cpu | string | `"250m"` | |
|
| jenkins.controller.resources.requests.cpu | string | `"250m"` | |
|
||||||
| jenkins.controller.resources.requests.memory | string | `"1280Mi"` | |
|
| jenkins.controller.resources.requests.memory | string | `"1280Mi"` | |
|
||||||
| jenkins.controller.tag | string | `"2.370-alpine-jdk17"` | |
|
| jenkins.controller.tag | string | `"alpine-jdk17"` | |
|
||||||
| jenkins.controller.testEnabled | bool | `false` | |
|
| jenkins.controller.testEnabled | bool | `false` | |
|
||||||
| jenkins.enabled | bool | `false` | |
|
| jenkins.enabled | bool | `false` | |
|
||||||
| jenkins.istio.agent.enabled | bool | `false` | |
|
| jenkins.istio.agent.enabled | bool | `false` | |
|
||||||
@ -129,7 +128,7 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| jenkins.serviceAccountAgent.create | bool | `true` | |
|
| jenkins.serviceAccountAgent.create | bool | `true` | |
|
||||||
| jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | |
|
| jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | |
|
||||||
| trivy.enabled | bool | `false` | |
|
| trivy.enabled | bool | `false` | |
|
||||||
|
| trivy.image.tag | string | `"0.35.0"` | |
|
||||||
| trivy.persistence.enabled | bool | `true` | |
|
| trivy.persistence.enabled | bool | `true` | |
|
||||||
| trivy.persistence.size | string | `"1Gi"` | |
|
| trivy.persistence.size | string | `"1Gi"` | |
|
||||||
| trivy.rbac.create | bool | `false` | |
|
| trivy.rbac.create | bool | `false` | |
|
||||||
| trivy.rbac.pspEnabled | bool | `false` | |
|
|
||||||
|
9
charts/kubezero-ci/dashboard-gitea.yaml
Normal file
9
charts/kubezero-ci/dashboard-gitea.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
configmap: grafana-dashboards
|
||||||
|
gzip: true
|
||||||
|
condition: '.Values.gitea.gitea.metrics.enabled'
|
||||||
|
folder: KubeZero
|
||||||
|
dashboards:
|
||||||
|
- name: Gitea
|
||||||
|
url: https://grafana.com/api/dashboards/13192/revisions/1/download
|
||||||
|
tags:
|
||||||
|
- CI
|
15
charts/kubezero-ci/templates/gitea/grafana-dashboard.yaml
Normal file
15
charts/kubezero-ci/templates/gitea/grafana-dashboard.yaml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{{- if .Values.gitea.gitea.metrics.enabled }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "grafana-dashboards" | trunc 63 | trimSuffix "-" }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
grafana_dashboard: "1"
|
||||||
|
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||||
|
annotations:
|
||||||
|
k8s-sidecar-target-directory: KubeZero
|
||||||
|
binaryData:
|
||||||
|
Gitea.json.gz:
|
||||||
|
H4sIAAAAAAAC/+1cW2/bNhR+z68QhGLYgDSwnMRJC+whS5cuaG5I2vVhDQxKomU2FKmSVBw38H8fSd2oixNjSBqrY4G61jmSeK7fR4ku7zccxx2PEUlSwd23zj/y2HHu9afUEBBDKXXfXY0vLs9P//z415+frtzNQo2BD7HSXzAaQzGFKa+UIeQBQ4lAlKhTKoWYJ/qmIRCA05QFsNIlOI0QOQ6VPum4aaY/y80yhtUnLOTn9WbmEoPfUsRgh1PF+BEDE0BAdXMUdoqLILxvKm4h47l3o629re3ciM3u4RJAZLDagyXTzqFMsTHQw2N0hRQtDSZph7FrSG9rsDX4D75xAUR7sKuatO1ZmUJACJXnSq3KYTakixEXZUYrQ6Tm1asp4NMPcK7uRf2vMBBvh4O93XIseY6fIiyO1WieITWC1h0MeQ4kwMdKL1gKDfkUhR1SFFBySDFl6oYs8sGvg01n6HnyY3d30/F+M29dBOag8tf5xTnAkImaCVWO+dSngIVurlvof6838vQ0G++UEiQoc94jAYHDIZMx1/d1YYhEwy03IlDo9vO2vTfDTKSK8SOlWKBEKgZaqFNMUoz1EUbkRndaljpdDh2dBzACXAdGp3RRloEPtGQCMIcVgEhHTyCJxFSZM6jJYdfpj6TRnSCMzdxrgWy1EEGiqmq3VEQMhRe0qrss1/JwZCRkJo/3jeO7Ijj5sarEQZGj8tayYkJIriBDHT7oqG5XBmIYQRLWzQC3UfM6KQ1SxjIvmpoY3HVJEemQ8imddYiFLEzcIb8FOK28aLkqq0Jrze7QwhkKdVKrnKpKuqCIiFOqG0oLqszRpI4DZcJPysJrjZ5AWQpEgAi2wpyxyN9N+K70yhJVF6m697Aub2dNBj6EDOpun2BqwBvXaT6XHScLClYdkikTEMCuApfAGdy0RuECJgkMpcdthwRgETQIvI6NqtXvEpbTQAA5l+zIpUFEjGMYUzYf+3MB+f1X6v/+xY0UUHxxFwb4qNIkQkKHLgO3rsmK9IiyGIi2lsFJRucHbile5N+qWIiptGhKcdiIkUAxPJKtbIBNKb+EUV4UjQuupmgi2lcIjXTuZe66k7luzEtKiLs3OwIwGLYhnlMmGv2uu2FcwDQiIbpFYSoj1ipNWVtEZj9v2Fp7GJMTcwZwB+5Qo/z9NLjJcm76qVo7byEVig6iaZydt3zDvbKzOxprDu7gA6U2KQtB3pqJRq3kM8a6EUpBoz8Ah3VqLtGrdXoGXy2x4UxVbJs9sbPVFPN20iWDRl3AreUn8LY0ujY3aE7bfn4e3n8KHh5aHrY8/Lw8HCTpmEP5uBDysc7uIxwsTeoqrp+Jnw8vPjk16rLEbInZEvNPQsze6CmYedsys2Xm52VmmkAynoQPPxRXUKUwfZw58xScXWiOQCD0a0Svz5R+LmPpHL27spRuKf3/RumPs+5SLt1pcOnokZfNo1b5aLYcPkwSgZpuFKSia9Es5AmCODzvuE5dCXBQL848n1ycUXGm+MnQXNeyEsIJSLFo3lNnN0kQieog04VZ9euMZgQ+pzgV0N1snqEYom1xs1zrocnWXNjNa78eGuO8LGq1Yqj+LBqS641l2kUtQrSTE5fPKmrXZ5hWThUoqSX1a8oFmswLtZoE5GtCxdhMzfhAsYQEUkHbyPrYHGFlytXcOgaB4t02fT7rY+0DXPcwpZ2lsQ+ZQyfOQdPuklf0AuiPBoXRaqDg7VtUsKiw/qjAYEK5Wr9G/UOGyy7bXxYdvOGKc4YdCw8WHtYfHlL5yN07XPhUM/qFAWF/NUDYtXhg8WD98QBxnvZvonBct/rHIsLOI28VvMGSJwjPQsKTQMIcYkxnFhSe8c1CVma9e7FQN/tlYWFnVVgYWFiwsNAHWNDrDr1DhZO61S8LCvsrgsLQzhUsKPQCFCiLAEHfQS9nDOedxr8sRLTeOC6dOIwsRliM6ANGyL9x77DhY83oF8aE0YqYYBcpLST0AhImVAW4d6Bw1DD7ZWFhuOqrx6F9x2BxoRe4kKQ+RsENnPcOGi605R9My9dsXWJnyXPEngWHJwGHJGUJtouVzwcOso369+OFq5rRa7YmsQwS7K+ZLCT0AhJm0J9SetM7VPjctHvN1iWWAcMbCwwWGPrxO2gMAe/jb6Abdq/basQSZLAPERYYegEMAY1jSETvgOGwafe6LUksAYZtCwwWGPoADDHCkAtK+jdnOG1bvm4LE0vAYWj/m4RFh368aAAimPYPGj43zO7ChXIHaWmWKmGl3h5k17hcXh6DKozDbI9jLuYYFtWXnSlAVEXTPTx2y/sKGCdYpotE7Y2pq22Y9ZYgpX6SOS6LY/baKzYbcWWiM5lbuyxRS0asujh3ZFzkx8xy6Zo+8OLq+67x3TMPtgemxtj5ZGh89/L9pesOfad6bx3XZ3TGi32kyzzpLaYzWZrtAU7OvnnHs9333zPpLWBI7TjNHwpctSf43sbiX1mpsRuhXgAA
|
||||||
|
{{- end }}
|
@ -3,4 +3,5 @@
|
|||||||
helm dep update
|
helm dep update
|
||||||
|
|
||||||
# Create ZDT dashboard configmap
|
# Create ZDT dashboard configmap
|
||||||
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
|
../kubezero-metrics/sync_grafana_dashboards.py dashboard-jenkins.yaml templates/jenkins/grafana-dashboard.yaml
|
||||||
|
../kubezero-metrics/sync_grafana_dashboards.py dashboard-gitea.yaml templates/gitea/grafana-dashboard.yaml
|
||||||
|
@ -17,7 +17,7 @@ gitea:
|
|||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
image:
|
image:
|
||||||
tag: 1.17.1
|
#tag: 1.17.4
|
||||||
rootless: true
|
rootless: true
|
||||||
|
|
||||||
securityContext:
|
securityContext:
|
||||||
@ -32,6 +32,13 @@ gitea:
|
|||||||
enabled: true
|
enabled: true
|
||||||
size: 4Gi
|
size: 4Gi
|
||||||
|
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "150m"
|
||||||
|
memory: "320Mi"
|
||||||
|
limits:
|
||||||
|
memory: "2048Mi"
|
||||||
|
|
||||||
gitea:
|
gitea:
|
||||||
admin:
|
admin:
|
||||||
existingSecret: gitea-admin-secret
|
existingSecret: gitea-admin-secret
|
||||||
@ -42,7 +49,7 @@ gitea:
|
|||||||
metrics:
|
metrics:
|
||||||
enabled: false
|
enabled: false
|
||||||
serviceMonitor:
|
serviceMonitor:
|
||||||
enabled: false
|
enabled: true
|
||||||
|
|
||||||
config:
|
config:
|
||||||
database:
|
database:
|
||||||
@ -69,7 +76,7 @@ jenkins:
|
|||||||
enabled: false
|
enabled: false
|
||||||
|
|
||||||
controller:
|
controller:
|
||||||
tag: 2.370-alpine-jdk17
|
tag: alpine-jdk17
|
||||||
#tagLabel: alpine
|
#tagLabel: alpine
|
||||||
disableRememberMe: true
|
disableRememberMe: true
|
||||||
prometheus:
|
prometheus:
|
||||||
@ -84,14 +91,14 @@ jenkins:
|
|||||||
cpu: "250m"
|
cpu: "250m"
|
||||||
memory: "1280Mi"
|
memory: "1280Mi"
|
||||||
limits:
|
limits:
|
||||||
cpu: "2000m"
|
#cpu: "2000m"
|
||||||
memory: "4096Mi"
|
memory: "4096Mi"
|
||||||
initContainerResources:
|
initContainerResources:
|
||||||
requests:
|
requests:
|
||||||
cpu: "50m"
|
cpu: "50m"
|
||||||
memory: "256Mi"
|
memory: "256Mi"
|
||||||
limits:
|
limits:
|
||||||
cpu: "1000m"
|
#cpu: "1000m"
|
||||||
memory: "1024Mi"
|
memory: "1024Mi"
|
||||||
|
|
||||||
JCasC:
|
JCasC:
|
||||||
@ -114,16 +121,19 @@ jenkins:
|
|||||||
numToKeepStr: "10"
|
numToKeepStr: "10"
|
||||||
|
|
||||||
installPlugins:
|
installPlugins:
|
||||||
- kubernetes:3706.vdfb_d599579f3
|
- kubernetes:3802.vb_b_600831fcb_3
|
||||||
- workflow-aggregator:581.v0c46fa_697ffd
|
- workflow-aggregator:581.v0c46fa_697ffd
|
||||||
- git:4.12.1
|
- git:5.0.0
|
||||||
- configuration-as-code:1512.vb_79d418d5fc8
|
- basic-branch-build-strategies:71.vc1421f89888e
|
||||||
- antisamy-markup-formatter:2.7
|
- pipeline-graph-view:144.vf3924feb_7e35
|
||||||
- prometheus:2.0.11
|
- pipeline-stage-view:2.28
|
||||||
|
- configuration-as-code:1569.vb_72405b_80249
|
||||||
|
- antisamy-markup-formatter:155.v795fb_8702324
|
||||||
|
- prometheus:2.1.0
|
||||||
- htmlpublisher:1.31
|
- htmlpublisher:1.31
|
||||||
- build-discarder:139.v05696a_7fe240
|
- build-discarder:139.v05696a_7fe240
|
||||||
- dark-theme:245.vb_a_2b_b_010ea_96
|
- dark-theme:262.v0202a_4c8fb_6a
|
||||||
- kubernetes-credentials-provider:1.196.va_55f5e31e3c2
|
- kubernetes-credentials-provider:1.208.v128ee9800c04
|
||||||
|
|
||||||
serviceAccountAgent:
|
serviceAccountAgent:
|
||||||
create: true
|
create: true
|
||||||
@ -132,24 +142,24 @@ jenkins:
|
|||||||
# Preconfigure agents to use zdt podman requires fuse/overlayfs
|
# Preconfigure agents to use zdt podman requires fuse/overlayfs
|
||||||
agent:
|
agent:
|
||||||
image: public.ecr.aws/zero-downtime/jenkins-podman
|
image: public.ecr.aws/zero-downtime/jenkins-podman
|
||||||
tag: v0.3.2
|
tag: v0.4.1
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: "512m"
|
|
||||||
memory: "1024Mi"
|
|
||||||
limits:
|
|
||||||
cpu: "4"
|
|
||||||
memory: "6144Mi"
|
|
||||||
#alwaysPullImage: true
|
#alwaysPullImage: true
|
||||||
podRetention: "Default"
|
podRetention: "Default"
|
||||||
showRawYaml: false
|
showRawYaml: false
|
||||||
podName: "podman-aws"
|
podName: "podman-aws"
|
||||||
customJenkinsLabels:
|
customJenkinsLabels:
|
||||||
- podman-aws-trivy
|
- podman-aws-trivy
|
||||||
idleMinutes: 10
|
idleMinutes: 15
|
||||||
containerCap: 2
|
containerCap: 2
|
||||||
annotations:
|
annotations:
|
||||||
container.apparmor.security.beta.kubernetes.io/jnlp: unconfined
|
container.apparmor.security.beta.kubernetes.io/jnlp: unconfined
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: ""
|
||||||
|
memory: ""
|
||||||
|
limits:
|
||||||
|
cpu: ""
|
||||||
|
memory: ""
|
||||||
# envVars:
|
# envVars:
|
||||||
# - name: AWS_WEB_IDENTITY_TOKEN_FILE
|
# - name: AWS_WEB_IDENTITY_TOKEN_FILE
|
||||||
# value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
# value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
||||||
@ -168,7 +178,12 @@ jenkins:
|
|||||||
containers:
|
containers:
|
||||||
- name: jnlp
|
- name: jnlp
|
||||||
resources:
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: "512m"
|
||||||
|
memory: "1024Mi"
|
||||||
limits:
|
limits:
|
||||||
|
cpu: "4"
|
||||||
|
memory: "6144Mi"
|
||||||
github.com/fuse: 1
|
github.com/fuse: 1
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: aws-token
|
- name: aws-token
|
||||||
@ -215,9 +230,10 @@ jenkins:
|
|||||||
|
|
||||||
trivy:
|
trivy:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
image:
|
||||||
|
tag: 0.35.0
|
||||||
persistence:
|
persistence:
|
||||||
enabled: true
|
enabled: true
|
||||||
size: 1Gi
|
size: 1Gi
|
||||||
rbac:
|
rbac:
|
||||||
create: false
|
create: false
|
||||||
pspEnabled: false
|
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-istio-gateway
|
name: kubezero-istio-gateway
|
||||||
description: KubeZero Umbrella Chart for Istio gateways
|
description: KubeZero Umbrella Chart for Istio gateways
|
||||||
type: application
|
type: application
|
||||||
version: 0.8.2
|
version: 0.9.0
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -17,6 +17,6 @@ dependencies:
|
|||||||
version: ">= 0.1.5"
|
version: ">= 0.1.5"
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: gateway
|
- name: gateway
|
||||||
version: 1.14.3
|
version: 1.16.1
|
||||||
repository: https://istio-release.storage.googleapis.com/charts
|
repository: https://istio-release.storage.googleapis.com/charts
|
||||||
kubeVersion: ">= 1.20.0"
|
kubeVersion: ">= 1.24.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-istio-gateway
|
# kubezero-istio-gateway
|
||||||
|
|
||||||
![Version: 0.8.2](https://img.shields.io/badge/Version-0.8.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
![Version: 0.9.0](https://img.shields.io/badge/Version-0.9.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero Umbrella Chart for Istio gateways
|
KubeZero Umbrella Chart for Istio gateways
|
||||||
|
|
||||||
@ -16,12 +16,12 @@ Installs Istio Ingress Gateways, requires kubezero-istio to be installed !
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.20.0`
|
Kubernetes: `>= 1.24.0`
|
||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
||||||
| https://istio-release.storage.googleapis.com/charts | gateway | 1.14.3 |
|
| https://istio-release.storage.googleapis.com/charts | gateway | 1.16.1 |
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
appVersion: 1.14.3
|
appVersion: 1.16.1
|
||||||
description: Helm chart for deploying Istio gateways
|
description: Helm chart for deploying Istio gateways
|
||||||
icon: https://istio.io/latest/favicons/android-192x192.png
|
icon: https://istio.io/latest/favicons/android-192x192.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -9,4 +9,4 @@ name: gateway
|
|||||||
sources:
|
sources:
|
||||||
- http://github.com/istio/istio
|
- http://github.com/istio/istio
|
||||||
type: application
|
type: application
|
||||||
version: 1.14.3
|
version: 1.16.1
|
||||||
|
@ -106,6 +106,10 @@ spec:
|
|||||||
tolerations:
|
tolerations:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.topologySpreadConstraints }}
|
||||||
|
topologySpreadConstraints:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.volumes }}
|
{{- with .Values.volumes }}
|
||||||
volumes:
|
volumes:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
|
@ -185,6 +185,9 @@
|
|||||||
"tolerations": {
|
"tolerations": {
|
||||||
"type": "array"
|
"type": "array"
|
||||||
},
|
},
|
||||||
|
"topologySpreadConstraints": {
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
"networkGateway": {
|
"networkGateway": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
|
@ -83,6 +83,8 @@ nodeSelector: {}
|
|||||||
|
|
||||||
tolerations: []
|
tolerations: []
|
||||||
|
|
||||||
|
topologySpreadConstraints: []
|
||||||
|
|
||||||
affinity: {}
|
affinity: {}
|
||||||
|
|
||||||
# If specified, the gateway will act as a network gateway for the given network.
|
# If specified, the gateway will act as a network gateway for the given network.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
diff -tubr charts/gateway.orig/templates/deployment.yaml charts/gateway/templates/deployment.yaml
|
diff -tubr charts/gateway.orig/templates/deployment.yaml charts/gateway/templates/deployment.yaml
|
||||||
--- charts/gateway.orig/templates/deployment.yaml 2022-04-21 17:33:30.042035869 +0200
|
--- charts/gateway.orig/templates/deployment.yaml 2022-12-09 14:58:33.000000000 +0000
|
||||||
+++ charts/gateway/templates/deployment.yaml 2022-04-21 18:17:15.130605952 +0200
|
+++ charts/gateway/templates/deployment.yaml 2022-12-13 11:43:02.196667885 +0000
|
||||||
@@ -11,6 +11,9 @@
|
@@ -11,6 +11,9 @@
|
||||||
{{- if not .Values.autoscaling.enabled }}
|
{{- if not .Values.autoscaling.enabled }}
|
||||||
replicas: {{ .Values.replicaCount }}
|
replicas: {{ .Values.replicaCount }}
|
||||||
@ -30,8 +30,8 @@ diff -tubr charts/gateway.orig/templates/deployment.yaml charts/gateway/template
|
|||||||
{{- with .Values.nodeSelector }}
|
{{- with .Values.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
@@ -98,3 +106,7 @@
|
@@ -102,3 +110,7 @@
|
||||||
tolerations:
|
topologySpreadConstraints:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
+ {{- with .Values.volumes }}
|
+ {{- with .Values.volumes }}
|
||||||
@ -39,8 +39,8 @@ diff -tubr charts/gateway.orig/templates/deployment.yaml charts/gateway/template
|
|||||||
+ {{- toYaml . | nindent 8 }}
|
+ {{- toYaml . | nindent 8 }}
|
||||||
+ {{- end }}
|
+ {{- end }}
|
||||||
diff -tubr charts/gateway.orig/templates/service.yaml charts/gateway/templates/service.yaml
|
diff -tubr charts/gateway.orig/templates/service.yaml charts/gateway/templates/service.yaml
|
||||||
--- charts/gateway.orig/templates/service.yaml 2022-04-21 17:33:30.042035869 +0200
|
--- charts/gateway.orig/templates/service.yaml 2022-12-09 14:58:33.000000000 +0000
|
||||||
+++ charts/gateway/templates/service.yaml 2022-04-21 17:33:41.801806959 +0200
|
+++ charts/gateway/templates/service.yaml 2022-12-12 22:52:27.629670669 +0000
|
||||||
@@ -38,7 +38,14 @@
|
@@ -38,7 +38,14 @@
|
||||||
port: 15017
|
port: 15017
|
||||||
targetPort: 15017
|
targetPort: 15017
|
||||||
@ -55,12 +55,12 @@ diff -tubr charts/gateway.orig/templates/service.yaml charts/gateway/templates/s
|
|||||||
+ {{- end }}
|
+ {{- end }}
|
||||||
+ {{- end }}
|
+ {{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
selector:
|
{{- if .Values.service.externalIPs }}
|
||||||
{{- include "gateway.selectorLabels" . | nindent 4 }}
|
externalIPs: {{- range .Values.service.externalIPs }}
|
||||||
diff -tubr charts/gateway.orig/values.schema.json charts/gateway/values.schema.json
|
diff -tubr charts/gateway.orig/values.schema.json charts/gateway/values.schema.json
|
||||||
--- charts/gateway.orig/values.schema.json 2022-04-21 17:33:30.042035869 +0200
|
--- charts/gateway.orig/values.schema.json 2022-12-09 14:58:33.000000000 +0000
|
||||||
+++ charts/gateway/values.schema.json 2022-04-21 17:52:51.007536238 +0200
|
+++ charts/gateway/values.schema.json 2022-12-12 22:52:27.629670669 +0000
|
||||||
@@ -47,6 +47,12 @@
|
@@ -51,6 +51,12 @@
|
||||||
"labels": {
|
"labels": {
|
||||||
"type": "object"
|
"type": "object"
|
||||||
},
|
},
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-istio
|
name: kubezero-istio
|
||||||
description: KubeZero Umbrella Chart for Istio
|
description: KubeZero Umbrella Chart for Istio
|
||||||
type: application
|
type: application
|
||||||
version: 0.8.4
|
version: 0.9.0
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -13,16 +13,16 @@ maintainers:
|
|||||||
email: stefan@zero-downtime.net
|
email: stefan@zero-downtime.net
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: kubezero-lib
|
- name: kubezero-lib
|
||||||
version: ">= 0.1.5"
|
version: ">= 0.1.6"
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: base
|
- name: base
|
||||||
version: 1.14.3
|
version: 1.16.1
|
||||||
repository: https://istio-release.storage.googleapis.com/charts
|
repository: https://istio-release.storage.googleapis.com/charts
|
||||||
- name: istiod
|
- name: istiod
|
||||||
version: 1.14.3
|
version: 1.16.1
|
||||||
repository: https://istio-release.storage.googleapis.com/charts
|
repository: https://istio-release.storage.googleapis.com/charts
|
||||||
- name: kiali-server
|
- name: kiali-server
|
||||||
version: 1.54
|
version: "1.60.0"
|
||||||
repository: https://kiali.org/helm-charts
|
repository: https://kiali.org/helm-charts
|
||||||
condition: kiali-server.enabled
|
condition: kiali-server.enabled
|
||||||
kubeVersion: ">= 1.20.0"
|
kubeVersion: ">= 1.24.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-istio
|
# kubezero-istio
|
||||||
|
|
||||||
![Version: 0.8.3](https://img.shields.io/badge/Version-0.8.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
![Version: 0.9.0](https://img.shields.io/badge/Version-0.9.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero Umbrella Chart for Istio
|
KubeZero Umbrella Chart for Istio
|
||||||
|
|
||||||
@ -16,14 +16,14 @@ Installs the Istio control plane
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.20.0`
|
Kubernetes: `>= 1.24.0`
|
||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||||
| https://istio-release.storage.googleapis.com/charts | base | 1.14.3 |
|
| https://istio-release.storage.googleapis.com/charts | base | 1.16.1 |
|
||||||
| https://istio-release.storage.googleapis.com/charts | istiod | 1.14.3 |
|
| https://istio-release.storage.googleapis.com/charts | istiod | 1.16.1 |
|
||||||
| https://kiali.org/helm-charts | kiali-server | 1.54 |
|
| https://kiali.org/helm-charts | kiali-server | 1.60.0 |
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| global.defaultPodDisruptionBudget.enabled | bool | `false` | |
|
| global.defaultPodDisruptionBudget.enabled | bool | `false` | |
|
||||||
| global.logAsJson | bool | `true` | |
|
| global.logAsJson | bool | `true` | |
|
||||||
| global.priorityClassName | string | `"system-cluster-critical"` | |
|
| global.priorityClassName | string | `"system-cluster-critical"` | |
|
||||||
| global.tag | string | `"1.14.4-distroless"` | |
|
| global.variant | string | `"distroless"` | |
|
||||||
| istiod.meshConfig.accessLogEncoding | string | `"JSON"` | |
|
| istiod.meshConfig.accessLogEncoding | string | `"JSON"` | |
|
||||||
| istiod.meshConfig.accessLogFile | string | `"/dev/stdout"` | |
|
| istiod.meshConfig.accessLogFile | string | `"/dev/stdout"` | |
|
||||||
| istiod.meshConfig.tcpKeepalive.interval | string | `"60s"` | |
|
| istiod.meshConfig.tcpKeepalive.interval | string | `"60s"` | |
|
||||||
@ -44,6 +44,8 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| istiod.pilot.resources.requests.memory | string | `"128Mi"` | |
|
| istiod.pilot.resources.requests.memory | string | `"128Mi"` | |
|
||||||
| istiod.pilot.tolerations[0].effect | string | `"NoSchedule"` | |
|
| istiod.pilot.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| istiod.pilot.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| istiod.pilot.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| istiod.pilot.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| istiod.pilot.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| istiod.telemetry.enabled | bool | `false` | |
|
| istiod.telemetry.enabled | bool | `false` | |
|
||||||
| kiali-server.auth.strategy | string | `"anonymous"` | |
|
| kiali-server.auth.strategy | string | `"anonymous"` | |
|
||||||
| kiali-server.deployment.ingress_enabled | bool | `false` | |
|
| kiali-server.deployment.ingress_enabled | bool | `false` | |
|
||||||
|
@ -1,22 +1,22 @@
|
|||||||
# Revision 128 = 1.14.3
|
# Revision 148 = 1.16
|
||||||
configmap: grafana-dashboards
|
configmap: grafana-dashboards
|
||||||
gzip: true
|
gzip: true
|
||||||
folder: Istio
|
folder: Istio
|
||||||
condition: '.Values.istiod.telemetry.enabled'
|
condition: '.Values.istiod.telemetry.enabled'
|
||||||
dashboards:
|
dashboards:
|
||||||
- name: istio-control-plane
|
- name: istio-control-plane
|
||||||
url: https://grafana.com/api/dashboards/7645/revisions/128/download
|
url: https://grafana.com/api/dashboards/7645/revisions/148/download
|
||||||
tags:
|
tags:
|
||||||
- Istio
|
- Istio
|
||||||
- name: istio-mesh
|
- name: istio-mesh
|
||||||
url: https://grafana.com/api/dashboards/7639/revisions/128/download
|
url: https://grafana.com/api/dashboards/7639/revisions/148/download
|
||||||
tags:
|
tags:
|
||||||
- Istio
|
- Istio
|
||||||
- name: istio-service
|
- name: istio-service
|
||||||
url: https://grafana.com/api/dashboards/7636/revisions/128/download
|
url: https://grafana.com/api/dashboards/7636/revisions/148/download
|
||||||
tags:
|
tags:
|
||||||
- Istio
|
- Istio
|
||||||
- name: istio-workload
|
- name: istio-workload
|
||||||
url: https://grafana.com/api/dashboards/7630/revisions/128/download
|
url: https://grafana.com/api/dashboards/7630/revisions/148/download
|
||||||
tags:
|
tags:
|
||||||
- Istio
|
- Istio
|
||||||
|
File diff suppressed because one or more lines are too long
@ -7,7 +7,7 @@ export KIALI_VERSION=$(yq eval '.dependencies[] | select(.name=="kiali-server")
|
|||||||
helm dep update
|
helm dep update
|
||||||
|
|
||||||
# Get matching istioctl
|
# Get matching istioctl
|
||||||
[ -x istioctl ] && [ "$(./istioctl version --remote=false)" == $ISTIO_VERSION ] || { curl -sL https://github.com/istio/istio/releases/download/${ISTIO_VERSION}/istioctl-${ISTIO_VERSION}-linux-amd64.tar.gz | tar xz; chmod +x istioctl; }
|
# [ -x istioctl ] && [ "$(./istioctl version --remote=false)" == $ISTIO_VERSION ] || { curl -sL https://github.com/istio/istio/releases/download/${ISTIO_VERSION}/istioctl-${ISTIO_VERSION}-linux-amd64.tar.gz | tar xz; chmod +x istioctl; }
|
||||||
|
|
||||||
# Fetch dashboards from Grafana.com and update ZDT CM
|
# Fetch dashboards from Grafana.com and update ZDT CM
|
||||||
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
|
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
global:
|
global:
|
||||||
# hub: docker.io/istio
|
variant: distroless
|
||||||
tag: 1.14.4-distroless
|
|
||||||
|
|
||||||
logAsJson: true
|
logAsJson: true
|
||||||
|
|
||||||
@ -19,6 +18,8 @@ istiod:
|
|||||||
tolerations:
|
tolerations:
|
||||||
- effect: NoSchedule
|
- effect: NoSchedule
|
||||||
key: node-role.kubernetes.io/master
|
key: node-role.kubernetes.io/master
|
||||||
|
- effect: NoSchedule
|
||||||
|
key: node-role.kubernetes.io/control-plane
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-lib
|
name: kubezero-lib
|
||||||
description: KubeZero helm library - common helm functions and blocks
|
description: KubeZero helm library - common helm functions and blocks
|
||||||
type: library
|
type: library
|
||||||
version: 0.1.5
|
version: 0.1.6
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
|
@ -13,7 +13,7 @@ Common naming functions
|
|||||||
{{- if .subchart }}
|
{{- if .subchart }}
|
||||||
{{- $name = default .subchart .Values.nameOverride -}}
|
{{- $name = default .subchart .Values.nameOverride -}}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{- if contains $name .Release.Name -}}
|
{{- if or (contains $name .Release.Name) (contains $name (printf "%s-%s" "kubezero" .Release.Name)) -}}
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||||
{{- else -}}
|
{{- else -}}
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-logging
|
name: kubezero-logging
|
||||||
description: KubeZero Umbrella Chart for complete EFK stack
|
description: KubeZero Umbrella Chart for complete EFK stack
|
||||||
type: application
|
type: application
|
||||||
version: 0.8.3
|
version: 0.8.4
|
||||||
appVersion: 1.6.0
|
appVersion: 1.6.0
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
@ -17,7 +17,7 @@ maintainers:
|
|||||||
email: stefan@zero-downtime.net
|
email: stefan@zero-downtime.net
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: kubezero-lib
|
- name: kubezero-lib
|
||||||
version: ">= 0.1.5"
|
version: ">= 0.1.6"
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
- name: eck-operator
|
- name: eck-operator
|
||||||
version: 2.4.0
|
version: 2.4.0
|
||||||
@ -29,4 +29,4 @@ dependencies:
|
|||||||
- name: fluent-bit
|
- name: fluent-bit
|
||||||
version: 0.20.6
|
version: 0.20.6
|
||||||
condition: fluent-bit.enabled
|
condition: fluent-bit.enabled
|
||||||
kubeVersion: ">= 1.20.0"
|
kubeVersion: ">= 1.24.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-logging
|
# kubezero-logging
|
||||||
|
|
||||||
![Version: 0.8.3](https://img.shields.io/badge/Version-0.8.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
|
![Version: 0.8.4](https://img.shields.io/badge/Version-0.8.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero Umbrella Chart for complete EFK stack
|
KubeZero Umbrella Chart for complete EFK stack
|
||||||
|
|
||||||
@ -14,14 +14,14 @@ KubeZero Umbrella Chart for complete EFK stack
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.20.0`
|
Kubernetes: `>= 1.24.0`
|
||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| | eck-operator | 2.4.0 |
|
| | eck-operator | 2.4.0 |
|
||||||
| | fluent-bit | 0.20.6 |
|
| | fluent-bit | 0.20.6 |
|
||||||
| | fluentd | 0.3.9 |
|
| | fluentd | 0.3.9 |
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||||
|
|
||||||
## Changes from upstream
|
## Changes from upstream
|
||||||
### ECK
|
### ECK
|
||||||
@ -62,6 +62,8 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| eck-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
| eck-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
| eck-operator.tolerations[0].effect | string | `"NoSchedule"` | |
|
| eck-operator.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| eck-operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| eck-operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| eck-operator.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| eck-operator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| elastic_password | string | `""` | |
|
| elastic_password | string | `""` | |
|
||||||
| es.nodeSets | list | `[]` | |
|
| es.nodeSets | list | `[]` | |
|
||||||
| es.prometheus | bool | `false` | |
|
| es.prometheus | bool | `false` | |
|
||||||
|
9
charts/kubezero-logging/dashboards-es.yaml
Normal file
9
charts/kubezero-logging/dashboards-es.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
configmap: grafana-dashboards-es
|
||||||
|
gzip: true
|
||||||
|
folder: Logging
|
||||||
|
condition: '.Values.es.prometheus'
|
||||||
|
dashboards:
|
||||||
|
- name: elasticsearch-logging
|
||||||
|
url: https://grafana.com/api/dashboards/266/revisions/4/download
|
||||||
|
tags:
|
||||||
|
- ElasticSearch
|
@ -1,14 +1,10 @@
|
|||||||
configmap: grafana-dashboards
|
configmap: grafana-dashboards
|
||||||
gzip: true
|
gzip: true
|
||||||
folder: Logging
|
folder: Logging
|
||||||
condition: '.Values.es.prometheus'
|
|
||||||
dashboards:
|
dashboards:
|
||||||
- name: fluent-logging
|
- name: fluent-logging
|
||||||
url: https://grafana.com/api/dashboards/7752/revisions/4/download
|
url: https://grafana.com/api/dashboards/7752/revisions/4/download
|
||||||
#url: https://grafana.com/api/dashboards/13042/revisions/2/download
|
#url: https://grafana.com/api/dashboards/13042/revisions/2/download
|
||||||
tags:
|
tags:
|
||||||
- Fluent
|
- fluentd
|
||||||
- name: elasticsearch-logging
|
- fluent-bit
|
||||||
url: https://grafana.com/api/dashboards/266/revisions/4/download
|
|
||||||
tags:
|
|
||||||
- ECK
|
|
||||||
|
@ -0,0 +1,15 @@
|
|||||||
|
{{- if .Values.es.prometheus }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "grafana-dashboards-es" | trunc 63 | trimSuffix "-" }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
grafana_dashboard: "1"
|
||||||
|
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||||
|
annotations:
|
||||||
|
k8s-sidecar-target-directory: Logging
|
||||||
|
binaryData:
|
||||||
|
elasticsearch-logging.json.gz:
|
||||||
|
H4sIAAAAAAAC/+2dbXObSBKAv/tXTFG7VXGd4ghJfruqfNg4TtZbcTYbe3N1t5tSYRhJnBGwDDj2qny//XpmQLwNCDlYluT+EEWmYV66Z3qeaRo02yFEGw5t149Cpv2T/AF/EzITnyBxjSmFo9rbi+Gnz7+en17+fPr7hdZJxI5xRR0u9wNvSsMJjVgqtCgzA9sPbc/lp6SC8M4XhVpGaDAvCkyaynwnGtvumVVVqJR/jJv1KT1DnHAPn187sksB/SuyA6roVFL/ODBGhmukhduW8nCihPdFwQ0NWNy7wd5gT48b0VFX5xsuKKtcmT9RVpU9nKmovg6VSu1KZbplNaqq1Pe6e90H9I3Z7tihLDTCcpUXClm5l3NzGq7rwbkg5faU1WuOzUJu3a87ccM0atmhceXwGsIgouLY2KWhGE+9gwN5gOv20vOc0PbhsC4OTmyLnnhuGHgOr2FkOExeLrriRo4j/nJs91qMKNmsgI5ghE14i/tdqTwt8L4pxpzpOY7hM5otWwgm1B5PwkwdcphzfabFZIuSxRnmhF7aU+pFxWuT6rzgjWFejwMvcq2MPvKnfDGcqNSmVJxvgTgejK+MF/vdDtEPex0y2O+Q7t7x4a7WUZzW2wexDmonXX7a0XHFabyU/YH8B6XtapmTvuZalRnf6nErzlIMg1QWBF6g7PHIC6YG16bmei7Nlzg2ojGdj7z54alxm2hQ73YLfZvabiIsitjE+6ZqA59REz6iPMf6wH0rW3TWuRFcU2En3tPMSfe59s9HmQbtzHdNDPCj/CE3pMGN4ahGVn4GZDTh+zClL6U/0Ktk5QGV12jGQ9xw3ZHQIyG9DQvjhsRiXlVOcN9pVnhguOMGhffyhVcOSxgHb2FkfvJAc6w8GDSuRSE89yxRv+m5LjVDamml8y55ixSK9z0Wjuzb3FKaFbwD73Vh/y2K3+/+WDgHPJXyWnG89lKhqnPwXYtNN4L5KOYPtL2s1FB2TPv46ieF0Jtf2FDnDHwkN1HxYHANQ1QxV0e245xwpybsz/1OXwfvpB/Bx9Exdzz6Uck/jXiD1FOQ15ItTxbX457xuF8qKDvjK2epcFpQZjR1y6YKjWBMwwZWoLe+aBMMyheUDU0nYjCdh3ytjdgs/vP1n9oP8dc/tfvdskFSbxjCMjNkNLApK5+WuIp3hhkKVfRKpzh0TF3r3by8ciHgxAPbVMtggEoaVAwaaD1fwQ+63YZjZu42mXCFnV5BxXYoFg3tRGqGSJ0VToIJwYcZdUPlMpIgkYJ+Ut+SnXHgLH5UnNJsynlcAdrr6tn2/vPp6cdqD6d1taUc6ML6/n364cOv/6qpUG+5ws+nb2tq6zV1J+KKZF9hRoGw746ilTkOMxzbYCcJKs3y0/nKmC/MBYhhkw/UHYcTsVSUZFS56jdkH+7mysUuWv8H/fwiL6Zs2YdCd8fuT+wyhiuVWzRuxhWSRKlqKQfw06kf3tXI/0MDr0IMzq5KYrsVkjoKg91GleMXI4UtdOZ8fVAbkku+2ZY0fxO4KuFDaYHVfArjwg2NsRJ6fX5xYFh2xEvfL8vUDQVzWTSgYjUZOV7Bkckl4VfYtQVgmwa+SkyWGp9k5lZnxR5jKccxr63KI+WrU+xVHlad0h8V6qrb8CxgHpNWuY4YiAb5gyFsAlVDgq+dPrU+SFZSLGPL4kaZNcjr16S7S/6hfzddqNGgxB56dyF8VAy97ySQfpFAOs10plTZOqmrau400teb1vWlHmO9XfJyrbSmdAGNVHbSXGXNILewknBFvJO7tNIGk8suJvYoVAtjNC7A8DyMNiuurUDIVtXC6wVheasgF9Zhws9mNI0cI7RvqFa9YWrI4vkoqxDdGrc2K7f7KjKvpdsrqoCbMF5+uaaKG7w4sqC4KsaMogfOksQf5e0w54/c0a+VWrgzbpssvzXxrVwwv9wFLvTGbwxWiu1k2KtElhn60hSSih3xYmeQ9gOKCMLWO6K8TPZDKaruyI7KelVbieVDuhVB2wfGdMtQoozV9g95lOMYsKw2pltPbxjUXUVQt8CHGNXFqC5GdbcsquvC0GNDN5pe0eCxYrsNwbi3LnHdiqDuR66qp43lthFyRE5ATmiXE44RExATEBO2GRO460RUaIwKfD4TF3kBeQF5QRFXOEBgQGBAYNhmYPCpa8EsHIIHvUZoaAQNn6TKiFDZBnHDTqHHoDqfGqWUdHn0DIxixI90KMSfvW9nVlHER/NlrKXsElvKt9PykqSzkwP1wweLc+ozD520n1GPmISYVItJOmLSY2ISjIUL1aqO+NQ+Ps2XBJHYYckkDSVg7Q1WgViKZIoHEVbtBH/cdHyux1qy6nAl8yOpyp8+VX/1WJXpPpFK05CdMvq5yOukLXRa6nHEJmnwqoT2R8yDfyCvxNnzeVAJxHo+q1r+B8rlv999SEq9wrHJjHqFIE2oVwhbyadXlCsToRSCgKvjwrZofba94spGyfa1njrJtS9Z+vtT7SvgIZdvXx5Hq0q4bys9vNyU1vPDYe3zYPnzo2GsO/WiJ8Lur/8Hx/gXWPCeLrxQXA1nM96k+/tHeIBtLVJ7Tz79TiLGh3SzHN8p+0wZ4FG8pKknfZwGrJj2mAVczAJuMcU3nmPVybEV1m6YI1vamc9Xhm5VfmzBgT/bPF9kJQUr6chKyErISnlWmtLpMGLUGl7dhZQhLq0ZLp3TqRfcITFtCTGJSVbDS9LcWruggbSEtLQ8LfWQlpCWkJYSWtLJSwLENGJD3wgnQ+PGsB0+6Ou4qYhN5FW2CDFulrl8/agLdDKb8c6sD4DVW1NkR8znBiga2KysVbvi9p/yfmJPP+iQXrcLH4f8fmLvUHH3TL5gaFxzw7q7d7TUUvwYPekP+K3RXvwBXek9sCvHTU333Xz81mbXSMfbFU+MXLsupshN/qBwIuLxAjxehxvNd+D+p3ijuc3twH4L24HeALcDuB1Yu+1A/4neQwaej/JEK9u1bFP+T295Krv4MjS9qPGt5z9+SAD86y7GVVcVV33rAaTBkGckMR3hNl0jiORD68a2ovwrmhAivxO9Usu/YtrqGAxDrdvJVt0W2Gof0QrRakPQagWR1gVsFXMQqNtiD0As8oo8ErwRpLdV0dtZwmwOmNI17zD2t/nYVoNsHxRWRlxDXHuaUNgAeQ15DUNhVbjGqBGYk+FfEQ3uMBC29ih1IcyFwa/nEfyS1qYY+0KYWo/Y1wDvKyJMYfCrGU21H/pCWNtEWMOoF0a9ENQQ1FYZ9cKHZ5HTNuVxkMcPemUQyvLM2hds4cOza5DkJdCWvPhmhxMSUN+xTYPtYrzrOfwOYeN8sJZBq+bnCJGzkLMqE+3x7iKCFoJWVTyM05ZFHQqaXY66MGD1tAQmjIZp9s/mTiOm2SNZrRlZHSBZIVkhWVWQ1ZRCASx+KwlgFqu7H4h0tV50xY2HcPVs4OpczFUkqy1/GUY6vw3XIvJ+cNZzPsEvWCFYKnLYkCuRK9eOKw+e/tboyKaOxScxf8uwF9wNGbgqfNFwniX768GS77itCDcWkcYi3FjIkvjKYSRJjNGtCqX6yFLIUshSVTG6FKjojW3y1ZZh1v4modXcbAhWWx+kO01sjXE6pKv1oCvMLUO6wjdXKCJV4gFI0zAndCg/MVK1tnc9f+O2IsJMGKN6HjGqi6KZkaGQoZ6IoTCLDBkKGaoqQpUBKQxRbSZTYYwKY1TIV8hXT8RXh8hXyFfIV4v5amKHSFYbRVZgMYSq7Yeqn2186hF5al146gh5CnkKeWoxT01txvCdEptFVNxmFJnqGTzxKAyNVLXlTzye8Fnd/gOOel9/tk84tvKWfrzpiRC5KWn5T/kTlUA+Xggu63tf1Y/J+auiyflPScamg6+Ik1v9Xv3LcsMRIzE49yRchTc7kauQq5q9kyxhKwvhalPg6jx+LRmyFbIVstV2hOguy3MZ30O2FjSJQTqEyU15v+0KYBLA8b83U/7qseGEGv4wYoCOm/1IJ3lJeFcI78q23t79WdlB/HnLbXyuk9sacRFDcWsATxiJQ3hCeCpF4jhBjc0h37tR8aTY9vwqAMDUbDY2tzdR7v2J/N1LBKmtT497f4KpcQhSawFSPSQpJCkkqQYktXW3MZ8DUZUwAYEKb2IiTG3YTcxfvpwvunu5E7dQY+aETo0vNGCyFfpAHg7vZFmWEVyLwmBtGKeDUDt1DBba5gU1AnOizYsL6dTnM9Ydz+cVLMoszIze1OKASc4XPv5Lus9Q0Cy/Po1Vro/eigGjT/Pz+yYumwtUtNsEOgVS5eERFhFYxCz6k6Pgm3SinSULUUYI7iy0FRfFniNdvDIyz5fvK8rP/4LPYtSRPKP2xHMN7RddQ0ZJIKtyQQ+rrd+tro3Llq9NxZlq41eZv5WO7VfXtd92Xfqkul+Tlus6qK7roO26dKu6X1aurh0FDGni0dZ42HYIjKYO0acdsg//9EmHHMC/XA2cwzg8FaZyShos9Oamm3dvWV+1MudyIil6Gd9ili/JuBalbkV1Q4konPLjIoawwQgj0Hj8965az3ru6JjeFgA5YcDseeDghZbZb0kbtLy01Fh+TH1ybFrZm4wgYvRSFpRd1NfG5nkfl5r8IydPhb0L5yfmdgunL2trvqOL/IW7uHu5jcMh8IAh0Ft2CFzAhsoil7xXzQcC34VZckvVxtTnpSVvEugQXux22H4n4fh7CbK20F6MsCO5sYZJ9e3lfEGGvXF8TMtd5tuwfwzSi2OVDBO4y6KclgEyTc/wUg6esnCThQ9Y/KbZK7KSdCnXepnvydr6NekDn+BlxKyuJVvwQbbgbC29QfaPdB3WDq1se5O25NT3tyeCU1oUmvHGI9nX5PccQnYz37jIfYtFmRnYfryl0t6eMdiO3JHcleSa3hEZsmGEW5ZkZunO/f8BZCXhv9ATAQA=
|
||||||
|
{{- end }}
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -29,4 +29,5 @@ curl -L -s -o - https://github.com/fluent/helm-charts/releases/download/fluentd-
|
|||||||
patch -i fluentd.patch -p0 --no-backup-if-mismatch
|
patch -i fluentd.patch -p0 --no-backup-if-mismatch
|
||||||
|
|
||||||
# Fetch dashboards from Grafana.com and update ZDT CM
|
# Fetch dashboards from Grafana.com and update ZDT CM
|
||||||
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
|
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/fluent-bit/grafana-dashboards.yaml
|
||||||
|
../kubezero-metrics/sync_grafana_dashboards.py dashboards-es.yaml templates/eck/grafana-dashboards.yaml
|
||||||
|
@ -7,6 +7,8 @@ eck-operator:
|
|||||||
tolerations:
|
tolerations:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
effect: NoSchedule
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/control-plane: ""
|
node-role.kubernetes.io/control-plane: ""
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||||||
name: kubezero-metrics
|
name: kubezero-metrics
|
||||||
description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
|
description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
|
||||||
type: application
|
type: application
|
||||||
version: 0.8.5
|
version: 0.8.9
|
||||||
home: https://kubezero.com
|
home: https://kubezero.com
|
||||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||||
keywords:
|
keywords:
|
||||||
@ -15,19 +15,20 @@ maintainers:
|
|||||||
email: stefan@zero-downtime.net
|
email: stefan@zero-downtime.net
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: kubezero-lib
|
- name: kubezero-lib
|
||||||
version: ">= 0.1.5"
|
version: ">= 0.1.6"
|
||||||
repository: https://cdn.zero-downtime.net/charts/
|
repository: https://cdn.zero-downtime.net/charts/
|
||||||
|
# https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack
|
||||||
- name: kube-prometheus-stack
|
- name: kube-prometheus-stack
|
||||||
version: 40.0.0
|
version: 43.2.0
|
||||||
# Switch back to upstream once all alerts are fixed eg. etcd gpcr
|
# Switch back to upstream once all alerts are fixed eg. etcd gpcr
|
||||||
# repository: https://prometheus-community.github.io/helm-charts
|
# repository: https://prometheus-community.github.io/helm-charts
|
||||||
- name: prometheus-adapter
|
- name: prometheus-adapter
|
||||||
version: 3.4.0
|
version: 3.5.0
|
||||||
repository: https://prometheus-community.github.io/helm-charts
|
repository: https://prometheus-community.github.io/helm-charts
|
||||||
condition: prometheus-adapter.enabled
|
condition: prometheus-adapter.enabled
|
||||||
- name: prometheus-pushgateway
|
- name: prometheus-pushgateway
|
||||||
version: 1.18.2
|
version: 2.0.2
|
||||||
# Switch back to upstream once namespaces are supported
|
# Switch back to upstream once namespaces are supported
|
||||||
# repository: https://prometheus-community.github.io/helm-charts
|
repository: https://prometheus-community.github.io/helm-charts
|
||||||
condition: prometheus-pushgateway.enabled
|
condition: prometheus-pushgateway.enabled
|
||||||
kubeVersion: ">= 1.20.0"
|
kubeVersion: ">= 1.24.0"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# kubezero-metrics
|
# kubezero-metrics
|
||||||
|
|
||||||
![Version: 0.8.5](https://img.shields.io/badge/Version-0.8.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
![Version: 0.8.9](https://img.shields.io/badge/Version-0.8.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||||
|
|
||||||
KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
|
KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
|
||||||
|
|
||||||
@ -14,14 +14,14 @@ KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
Kubernetes: `>= 1.20.0`
|
Kubernetes: `>= 1.24.0`
|
||||||
|
|
||||||
| Repository | Name | Version |
|
| Repository | Name | Version |
|
||||||
|------------|------|---------|
|
|------------|------|---------|
|
||||||
| | kube-prometheus-stack | 40.0.0 |
|
| | kube-prometheus-stack | 43.2.0 |
|
||||||
| | prometheus-pushgateway | 1.18.2 |
|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 3.5.0 |
|
||||||
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 3.4.0 |
|
| https://prometheus-community.github.io/helm-charts | prometheus-pushgateway | 2.0.2 |
|
||||||
|
|
||||||
## Values
|
## Values
|
||||||
|
|
||||||
@ -87,15 +87,15 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].equal[0] | string | `"namespace"` | |
|
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].equal[0] | string | `"namespace"` | |
|
||||||
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].source_matchers[0] | string | `"alertname = InfoInhibitor"` | |
|
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].source_matchers[0] | string | `"alertname = InfoInhibitor"` | |
|
||||||
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].target_matchers[0] | string | `"severity = info"` | |
|
| kube-prometheus-stack.alertmanager.config.inhibit_rules[2].target_matchers[0] | string | `"severity = info"` | |
|
||||||
|
| kube-prometheus-stack.alertmanager.config.inhibit_rules[3].source_matchers[0] | string | `"alertname = ClusterAutoscalerNodeGroupsEnabled"` | |
|
||||||
|
| kube-prometheus-stack.alertmanager.config.inhibit_rules[3].target_matchers[0] | string | `"alertname =~ \"KubeCPUOvercommit|KubeMemoryOvercommit\""` | |
|
||||||
| kube-prometheus-stack.alertmanager.config.route.group_by[0] | string | `"severity"` | |
|
| kube-prometheus-stack.alertmanager.config.route.group_by[0] | string | `"severity"` | |
|
||||||
| kube-prometheus-stack.alertmanager.config.route.group_by[1] | string | `"clusterName"` | |
|
| kube-prometheus-stack.alertmanager.config.route.group_by[1] | string | `"clusterName"` | |
|
||||||
| kube-prometheus-stack.alertmanager.config.route.group_interval | string | `"5m"` | |
|
| kube-prometheus-stack.alertmanager.config.route.group_interval | string | `"5m"` | |
|
||||||
| kube-prometheus-stack.alertmanager.config.route.group_wait | string | `"30s"` | |
|
| kube-prometheus-stack.alertmanager.config.route.group_wait | string | `"10s"` | |
|
||||||
| kube-prometheus-stack.alertmanager.config.route.repeat_interval | string | `"6h"` | |
|
| kube-prometheus-stack.alertmanager.config.route.repeat_interval | string | `"4h"` | |
|
||||||
| kube-prometheus-stack.alertmanager.config.route.routes[0].matchers[0] | string | `"alertname = Watchdog"` | |
|
| kube-prometheus-stack.alertmanager.config.route.routes[0].matchers[0] | string | `"severity = none"` | |
|
||||||
| kube-prometheus-stack.alertmanager.config.route.routes[0].receiver | string | `"null"` | |
|
| kube-prometheus-stack.alertmanager.config.route.routes[0].receiver | string | `"null"` | |
|
||||||
| kube-prometheus-stack.alertmanager.config.route.routes[1].matchers[0] | string | `"alertname = InfoInhibitor"` | |
|
|
||||||
| kube-prometheus-stack.alertmanager.config.route.routes[1].receiver | string | `"null"` | |
|
|
||||||
| kube-prometheus-stack.alertmanager.enabled | bool | `false` | |
|
| kube-prometheus-stack.alertmanager.enabled | bool | `false` | |
|
||||||
| kube-prometheus-stack.coreDns.enabled | bool | `true` | |
|
| kube-prometheus-stack.coreDns.enabled | bool | `true` | |
|
||||||
| kube-prometheus-stack.defaultRules.create | bool | `false` | |
|
| kube-prometheus-stack.defaultRules.create | bool | `false` | |
|
||||||
@ -127,6 +127,8 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| kube-prometheus-stack.kube-state-metrics.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
| kube-prometheus-stack.kube-state-metrics.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
| kube-prometheus-stack.kube-state-metrics.tolerations[0].effect | string | `"NoSchedule"` | |
|
| kube-prometheus-stack.kube-state-metrics.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| kube-prometheus-stack.kube-state-metrics.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| kube-prometheus-stack.kube-state-metrics.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| kube-prometheus-stack.kube-state-metrics.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| kube-prometheus-stack.kube-state-metrics.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| kube-prometheus-stack.kubeApiServer.enabled | bool | `true` | |
|
| kube-prometheus-stack.kubeApiServer.enabled | bool | `true` | |
|
||||||
| kube-prometheus-stack.kubeControllerManager.enabled | bool | `true` | |
|
| kube-prometheus-stack.kubeControllerManager.enabled | bool | `true` | |
|
||||||
| kube-prometheus-stack.kubeControllerManager.service.port | int | `10257` | |
|
| kube-prometheus-stack.kubeControllerManager.service.port | int | `10257` | |
|
||||||
@ -168,10 +170,11 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| kube-prometheus-stack.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues | bool | `false` | |
|
| kube-prometheus-stack.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues | bool | `false` | |
|
||||||
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | |
|
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | |
|
||||||
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | |
|
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | |
|
||||||
| kube-prometheus-stack.prometheus.prometheusSpec.walCompression | bool | `true` | |
|
|
||||||
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].effect | string | `"NoSchedule"` | |
|
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| kube-prometheus-stack.prometheusOperator.enabled | bool | `true` | |
|
| kube-prometheus-stack.prometheusOperator.enabled | bool | `true` | |
|
||||||
| kube-prometheus-stack.prometheusOperator.logFormat | string | `"json"` | |
|
| kube-prometheus-stack.prometheusOperator.logFormat | string | `"json"` | |
|
||||||
| kube-prometheus-stack.prometheusOperator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
| kube-prometheus-stack.prometheusOperator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
@ -180,6 +183,8 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| kube-prometheus-stack.prometheusOperator.resources.requests.memory | string | `"32Mi"` | |
|
| kube-prometheus-stack.prometheusOperator.resources.requests.memory | string | `"32Mi"` | |
|
||||||
| kube-prometheus-stack.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | |
|
| kube-prometheus-stack.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| kube-prometheus-stack.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| kube-prometheus-stack.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| kube-prometheus-stack.prometheusOperator.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| kube-prometheus-stack.prometheusOperator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| prometheus-adapter.enabled | bool | `true` | |
|
| prometheus-adapter.enabled | bool | `true` | |
|
||||||
| prometheus-adapter.logLevel | int | `1` | |
|
| prometheus-adapter.logLevel | int | `1` | |
|
||||||
| prometheus-adapter.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
| prometheus-adapter.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||||
@ -200,6 +205,8 @@ Kubernetes: `>= 1.20.0`
|
|||||||
| prometheus-adapter.rules.resource.window | string | `"5m"` | |
|
| prometheus-adapter.rules.resource.window | string | `"5m"` | |
|
||||||
| prometheus-adapter.tolerations[0].effect | string | `"NoSchedule"` | |
|
| prometheus-adapter.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||||
| prometheus-adapter.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
| prometheus-adapter.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||||
|
| prometheus-adapter.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||||
|
| prometheus-adapter.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||||
| prometheus-pushgateway.enabled | bool | `false` | |
|
| prometheus-pushgateway.enabled | bool | `false` | |
|
||||||
| prometheus-pushgateway.serviceMonitor.enabled | bool | `true` | |
|
| prometheus-pushgateway.serviceMonitor.enabled | bool | `true` | |
|
||||||
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
annotations:
|
annotations:
|
||||||
|
artifacthub.io/license: Apache-2.0
|
||||||
artifacthub.io/links: |
|
artifacthub.io/links: |
|
||||||
- name: Chart Source
|
- name: Chart Source
|
||||||
url: https://github.com/prometheus-community/helm-charts
|
url: https://github.com/prometheus-community/helm-charts
|
||||||
@ -6,20 +7,20 @@ annotations:
|
|||||||
url: https://github.com/prometheus-operator/kube-prometheus
|
url: https://github.com/prometheus-operator/kube-prometheus
|
||||||
artifacthub.io/operator: "true"
|
artifacthub.io/operator: "true"
|
||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
appVersion: 0.59.1
|
appVersion: 0.61.1
|
||||||
dependencies:
|
dependencies:
|
||||||
- condition: kubeStateMetrics.enabled
|
- condition: kubeStateMetrics.enabled
|
||||||
name: kube-state-metrics
|
name: kube-state-metrics
|
||||||
repository: https://prometheus-community.github.io/helm-charts
|
repository: https://prometheus-community.github.io/helm-charts
|
||||||
version: 4.18.*
|
version: 4.24.*
|
||||||
- condition: nodeExporter.enabled
|
- condition: nodeExporter.enabled
|
||||||
name: prometheus-node-exporter
|
name: prometheus-node-exporter
|
||||||
repository: https://prometheus-community.github.io/helm-charts
|
repository: https://prometheus-community.github.io/helm-charts
|
||||||
version: 4.2.*
|
version: 4.8.*
|
||||||
- condition: grafana.enabled
|
- condition: grafana.enabled
|
||||||
name: grafana
|
name: grafana
|
||||||
repository: https://grafana.github.io/helm-charts
|
repository: https://grafana.github.io/helm-charts
|
||||||
version: 6.38.*
|
version: 6.48.*
|
||||||
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
|
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
|
||||||
and Prometheus rules combined with documentation and scripts to provide easy to
|
and Prometheus rules combined with documentation and scripts to provide easy to
|
||||||
operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
|
operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
|
||||||
@ -34,8 +35,6 @@ kubeVersion: '>=1.16.0-0'
|
|||||||
maintainers:
|
maintainers:
|
||||||
- email: andrew@quadcorps.co.uk
|
- email: andrew@quadcorps.co.uk
|
||||||
name: andrewgkew
|
name: andrewgkew
|
||||||
- email: cedric@desaintmartin.fr
|
|
||||||
name: desaintmartin
|
|
||||||
- email: gianrubio@gmail.com
|
- email: gianrubio@gmail.com
|
||||||
name: gianrubio
|
name: gianrubio
|
||||||
- email: github.gkarthiks@gmail.com
|
- email: github.gkarthiks@gmail.com
|
||||||
@ -46,9 +45,11 @@ maintainers:
|
|||||||
name: scottrigby
|
name: scottrigby
|
||||||
- email: miroslav.hadzhiev@gmail.com
|
- email: miroslav.hadzhiev@gmail.com
|
||||||
name: Xtigyro
|
name: Xtigyro
|
||||||
|
- email: quentin.bisson@gmail.com
|
||||||
|
name: QuentinBisson
|
||||||
name: kube-prometheus-stack
|
name: kube-prometheus-stack
|
||||||
sources:
|
sources:
|
||||||
- https://github.com/prometheus-community/helm-charts
|
- https://github.com/prometheus-community/helm-charts
|
||||||
- https://github.com/prometheus-operator/kube-prometheus
|
- https://github.com/prometheus-operator/kube-prometheus
|
||||||
type: application
|
type: application
|
||||||
version: 40.0.0
|
version: 43.2.0
|
||||||
|
@ -80,6 +80,67 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
|
|||||||
|
|
||||||
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
|
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
|
||||||
|
|
||||||
|
### From 42.x to 43.x
|
||||||
|
|
||||||
|
This version upgrades Prometheus-Operator to v0.61.1, Prometheus to v2.40.5 and Thanos to v0.29.0.
|
||||||
|
|
||||||
|
Run these commands to update the CRDs before applying the upgrade.
|
||||||
|
|
||||||
|
```console
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### From 41.x to 42.x
|
||||||
|
|
||||||
|
This includes the overridability of container registry for all containers at the global level using `global.imageRegistry` or per container image. The defaults have not changed but if you were using a custom image, you will have to override the registry of said custom container image before you upgrade.
|
||||||
|
|
||||||
|
For instance, the prometheus-config-reloader used to be configured as follow:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
image:
|
||||||
|
repository: quay.io/prometheus-operator/prometheus-config-reloader
|
||||||
|
tag: v0.60.1
|
||||||
|
sha: ""
|
||||||
|
```
|
||||||
|
|
||||||
|
But it now moved to:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
image:
|
||||||
|
registry: quay.io
|
||||||
|
repository: prometheus-operator/prometheus-config-reloader
|
||||||
|
tag: v0.60.1
|
||||||
|
sha: ""
|
||||||
|
```
|
||||||
|
|
||||||
|
### From 40.x to 41.x
|
||||||
|
|
||||||
|
This version upgrades Prometheus-Operator to v0.60.1, Prometheus to v2.39.1 and Thanos to v0.28.1.
|
||||||
|
This version also upgrades the Helm charts of kube-state-metrics to 4.20.2, prometheus-node-exporter to 4.3.0 and Grafana to 6.40.4.
|
||||||
|
|
||||||
|
Run these commands to update the CRDs before applying the upgrade.
|
||||||
|
|
||||||
|
```console
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
|
||||||
|
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
This version splits kubeScheduler recording and altering rules in separate config values.
|
||||||
|
Instead of `defaultRules.rules.kubeScheduler` the 2 new variables `defaultRules.rules.kubeSchedulerAlerting` and `defaultRules.rules.kubeSchedulerRecording` are used.
|
||||||
|
|
||||||
### From 39.x to 40.x
|
### From 39.x to 40.x
|
||||||
|
|
||||||
This version upgrades Prometheus-Operator to v0.59.1, Prometheus to v2.38.0, kube-state-metrics to v2.6.0 and Thanos to v0.28.0.
|
This version upgrades Prometheus-Operator to v0.59.1, Prometheus to v2.38.0, kube-state-metrics to v2.6.0 and Thanos to v0.28.0.
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
appVersion: 9.1.4
|
appVersion: 9.3.1
|
||||||
description: The leading tool for querying and visualizing time series and metrics.
|
description: The leading tool for querying and visualizing time series and metrics.
|
||||||
home: https://grafana.net
|
home: https://grafana.net
|
||||||
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
|
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
|
||||||
@ -19,4 +19,4 @@ name: grafana
|
|||||||
sources:
|
sources:
|
||||||
- https://github.com/grafana/grafana
|
- https://github.com/grafana/grafana
|
||||||
type: application
|
type: application
|
||||||
version: 6.38.0
|
version: 6.48.0
|
||||||
|
@ -104,6 +104,7 @@ This version requires Helm >= 3.1.0.
|
|||||||
| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` |
|
| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` |
|
||||||
| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` |
|
| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` |
|
||||||
| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` |
|
| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` |
|
||||||
|
| `persistence.extraPvcLabels` | Extra labels to apply to a PVC. | `{}` |
|
||||||
| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` |
|
| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` |
|
||||||
| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` |
|
| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` |
|
||||||
| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` |
|
| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` |
|
||||||
@ -134,6 +135,7 @@ This version requires Helm >= 3.1.0.
|
|||||||
| `dashboards` | Dashboards to import | `{}` |
|
| `dashboards` | Dashboards to import | `{}` |
|
||||||
| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` |
|
| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` |
|
||||||
| `grafana.ini` | Grafana's primary configuration | `{}` |
|
| `grafana.ini` | Grafana's primary configuration | `{}` |
|
||||||
|
| `global.imagePullSecrets` | Global image pull secrets (can be templated). Allows either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). | `[]` |
|
||||||
| `ldap.enabled` | Enable LDAP authentication | `false` |
|
| `ldap.enabled` | Enable LDAP authentication | `false` |
|
||||||
| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
|
| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` |
|
||||||
| `ldap.config` | Grafana's LDAP configuration | `""` |
|
| `ldap.config` | Grafana's LDAP configuration | `""` |
|
||||||
@ -150,6 +152,15 @@ This version requires Helm >= 3.1.0.
|
|||||||
| `sidecar.resources` | Sidecar resources | `{}` |
|
| `sidecar.resources` | Sidecar resources | `{}` |
|
||||||
| `sidecar.securityContext` | Sidecar securityContext | `{}` |
|
| `sidecar.securityContext` | Sidecar securityContext | `{}` |
|
||||||
| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` |
|
| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` |
|
||||||
|
| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` |
|
||||||
|
| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` |
|
||||||
|
| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` |
|
||||||
|
| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||||
|
| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
|
||||||
|
| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||||
|
| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` |
|
||||||
|
| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
|
||||||
|
| `sidecar.alerts.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` |
|
||||||
| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
|
| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` |
|
||||||
| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` |
|
| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` |
|
||||||
| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` |
|
| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` |
|
||||||
@ -166,21 +177,30 @@ This version requires Helm >= 3.1.0.
|
|||||||
| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` |
|
| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` |
|
||||||
| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` |
|
| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` |
|
||||||
| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
|
| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
|
||||||
| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` |
|
| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||||
| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` |
|
| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` |
|
||||||
|
| `sidecar.dashboards.reloadURL` | Full url of dashboards configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/dashboards/reload"` |
|
||||||
|
| `sidecar.dashboards.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
|
||||||
| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||||
| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` |
|
| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` |
|
||||||
| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
|
| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
|
||||||
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
|
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
|
||||||
| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` |
|
| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` |
|
||||||
| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` |
|
| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||||
|
| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
|
||||||
| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||||
| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` |
|
| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` |
|
||||||
| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
|
| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
|
||||||
|
| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` |
|
||||||
| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` |
|
| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` |
|
||||||
| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` |
|
| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` |
|
||||||
| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` |
|
| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` |
|
||||||
|
| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` |
|
||||||
|
| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
|
||||||
| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
|
||||||
|
| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` |
|
||||||
|
| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` |
|
||||||
|
| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` |
|
||||||
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
|
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
|
||||||
| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
|
| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
|
||||||
| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
|
| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
|
||||||
@ -190,6 +210,7 @@ This version requires Helm >= 3.1.0.
|
|||||||
| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` |
|
| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` |
|
||||||
| `serviceAccount.annotations` | ServiceAccount annotations | |
|
| `serviceAccount.annotations` | ServiceAccount annotations | |
|
||||||
| `serviceAccount.create` | Create service account | `true` |
|
| `serviceAccount.create` | Create service account | `true` |
|
||||||
|
| `serviceAccount.labels` | ServiceAccount labels | `{}` |
|
||||||
| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` |
|
| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` |
|
||||||
| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` |
|
| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` |
|
||||||
| `rbac.create` | Create and use RBAC resources | `true` |
|
| `rbac.create` | Create and use RBAC resources | `true` |
|
||||||
@ -319,6 +340,14 @@ dashboards:
|
|||||||
gnetId: 2
|
gnetId: 2
|
||||||
revision: 2
|
revision: 2
|
||||||
datasource: Prometheus
|
datasource: Prometheus
|
||||||
|
loki-dashboard-quick-search:
|
||||||
|
gnetId: 12019
|
||||||
|
revision: 2
|
||||||
|
datasource:
|
||||||
|
- name: DS_PROMETHEUS
|
||||||
|
value: Prometheus
|
||||||
|
- name: DS_LOKI
|
||||||
|
value: Loki
|
||||||
local-dashboard:
|
local-dashboard:
|
||||||
url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
|
url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json
|
||||||
```
|
```
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user