Compare commits
346 Commits
Author | SHA1 | Date |
---|---|---|
Stefan Reimer | 40760d4a8e | |
Stefan Reimer | a488b14f97 | |
Stefan Reimer | 7cd1cd0c5e | |
Stefan Reimer | 48c816b32c | |
Renovate Bot | e0a24a0af9 | |
Renovate Bot | d0ed102d57 | |
Stefan Reimer | f8e7a85d9c | |
Stefan Reimer | 8bd713c1c7 | |
Stefan Reimer | 73d457d1b9 | |
Stefan Reimer | 46ccd445e0 | |
Stefan Reimer | 3c8a2d7dbd | |
Stefan Reimer | 229f5bc759 | |
Stefan Reimer | 0060ec1ed1 | |
Stefan Reimer | f6b54cde36 | |
Stefan Reimer | b9ee65d128 | |
Stefan Reimer | 76cc875990 | |
Stefan Reimer | 4a54fde888 | |
Renovate Bot | 2957b898d9 | |
Renovate Bot | 42d5000fe0 | |
Stefan Reimer | e7a66a584b | |
Renovate Bot | 8994289608 | |
Renovate Bot | c93b4c8b52 | |
Renovate Bot | 8d27fc22a0 | |
Stefan Reimer | 7eba80b54d | |
Renovate Bot | d66cdb42b8 | |
Stefan Reimer | 9cfeaec3a8 | |
Renovate Bot | 7bac355303 | |
Renovate Bot | dedfd1f7a3 | |
Stefan Reimer | 193967f600 | |
Stefan Reimer | 5be0f7087e | |
Stefan Reimer | b9e52bc2d9 | |
Stefan Reimer | 2cdb30b178 | |
Renovate Bot | 828c467d37 | |
Renovate Bot | dbd1ade98c | |
Stefan Reimer | 730020b329 | |
Stefan Reimer | 1caa01b28b | |
Stefan Reimer | c91d570857 | |
Stefan Reimer | da0c33b02b | |
Stefan Reimer | 5c6fd9bd2c | |
Stefan Reimer | 995d159d3e | |
Renovate Bot | c9dc123eff | |
Stefan Reimer | 5237b002b4 | |
Stefan Reimer | 61d373af7a | |
Stefan Reimer | 012c26d3d6 | |
Stefan Reimer | 945642a551 | |
Stefan Reimer | 9d835e4385 | |
Stefan Reimer | c057f35547 | |
Stefan Reimer | b29774d6d5 | |
Renovate Bot | e748303864 | |
Renovate Bot | 3f8a2c929c | |
Stefan Reimer | 7a80650d9c | |
Stefan Reimer | 75fc295066 | |
Stefan Reimer | 705f36f9aa | |
Renovate Bot | aa597a4970 | |
Renovate Bot | 0e4ed20972 | |
Renovate Bot | 773f968d90 | |
Renovate Bot | c54c9d78c4 | |
Stefan Reimer | 8b7b1ec8fa | |
Stefan Reimer | e2770079eb | |
Renovate Bot | b2d8a11854 | |
Stefan Reimer | 1bdbb7c538 | |
Stefan Reimer | 1350500f7f | |
Stefan Reimer | 1cb0ff2c0d | |
Stefan Reimer | 734f19010f | |
Stefan Reimer | 3013c39061 | |
Stefan Reimer | ca14178e94 | |
Stefan Reimer | 4b4431919a | |
Stefan Reimer | 32e71b4129 | |
Stefan Reimer | 6b7746d3df | |
Stefan Reimer | 52de70a4a8 | |
Renovate Bot | f8605e4b07 | |
Renovate Bot | e8204779a5 | |
Renovate Bot | 9a56c99ee5 | |
Stefan Reimer | 5116e52bc9 | |
Stefan Reimer | 26d59f63da | |
Stefan Reimer | 8c2ef9cf2c | |
Stefan Reimer | 9fed97db49 | |
Stefan Reimer | 588e50f56e | |
Stefan Reimer | 908055bd36 | |
Renovate Bot | a05e6286cc | |
Renovate Bot | 7b153ac7cc | |
Renovate Bot | 3e1d8e9c3e | |
Stefan Reimer | 78639b623a | |
Stefan Reimer | 4e9c147b7e | |
Stefan Reimer | 64d76c283a | |
Renovate Bot | 71f909e49e | |
Stefan Reimer | ed4a47dcec | |
Stefan Reimer | 3ab37e7a7b | |
Stefan Reimer | 798c3cba57 | |
Renovate Bot | 3b536f7c44 | |
Renovate Bot | 69e132c857 | |
Stefan Reimer | 53f0bbffb6 | |
Stefan Reimer | b0a6326a09 | |
Stefan Reimer | 358042d38b | |
Stefan Reimer | 22b774c939 | |
Renovate Bot | 71061475c8 | |
Stefan Reimer | 3ea16b311b | |
Stefan Reimer | 46e115e4f5 | |
Stefan Reimer | e55f986de8 | |
Stefan Reimer | 9ed2dbca96 | |
Renovate Bot | fcd2192cb4 | |
Renovate Bot | 8aa50e4129 | |
Renovate Bot | d9146abf72 | |
Renovate Bot | 7d354402d6 | |
Renovate Bot | 91a0034b26 | |
Renovate Bot | 48e381cb0f | |
Renovate Bot | b98dc98e81 | |
Stefan Reimer | 16fab2e0a0 | |
Stefan Reimer | 3b2f83c124 | |
Stefan Reimer | e36b096a46 | |
Stefan Reimer | 7628debe0c | |
Stefan Reimer | 72c585b8ef | |
Stefan Reimer | d8a73bbb73 | |
Stefan Reimer | 21e5417331 | |
Renovate Bot | 2dc58765e7 | |
Renovate Bot | cfda9b6a92 | |
Renovate Bot | 48b1d08cc6 | |
Stefan Reimer | 18b75af746 | |
Stefan Reimer | d4c67997ae | |
Renovate Bot | 4628d1e1e7 | |
Renovate Bot | 1a0bd7f312 | |
Stefan Reimer | 81c2f24156 | |
Stefan Reimer | 18a4f3e517 | |
Renovate Bot | 8fde71babb | |
Stefan Reimer | ccf8a0788d | |
Stefan Reimer | c96b4a4ab9 | |
Stefan Reimer | 46a4435fad | |
Stefan Reimer | 81f599dbb1 | |
Renovate Bot | 2b0ab96344 | |
Renovate Bot | 27009c7926 | |
Stefan Reimer | 444888ad12 | |
Stefan Reimer | b34c7bd392 | |
Renovate Bot | c66cf3bde2 | |
Stefan Reimer | 7b3f1fe40e | |
Stefan Reimer | 61454581c4 | |
Stefan Reimer | c2a495a6a6 | |
Renovate Bot | 0051451e19 | |
Renovate Bot | c73a3b7007 | |
Renovate Bot | 14030824b1 | |
Stefan Reimer | 5224947818 | |
Renovate Bot | 2850ce02d8 | |
Stefan Reimer | a04b26b851 | |
Stefan Reimer | cf3d5726e2 | |
Stefan Reimer | a56a88f622 | |
Renovate Bot | 8e3331e257 | |
Stefan Reimer | f0cb8e6cc7 | |
Stefan Reimer | a39542e387 | |
Renovate Bot | 8b54524c58 | |
Renovate Bot | fdcf82065f | |
Stefan Reimer | 683b7623f4 | |
Stefan Reimer | ee7161651e | |
Stefan Reimer | 011fbc3062 | |
Stefan Reimer | ccaebf4dad | |
Stefan Reimer | 3fb4567ad6 | |
Stefan Reimer | b2cf56367d | |
Stefan Reimer | 8a7ff1f8a1 | |
Stefan Reimer | 3dfd8bd4e4 | |
Stefan Reimer | 674c3cbc7d | |
Stefan Reimer | c1df995447 | |
Stefan Reimer | 0005b7fdac | |
Stefan Reimer | 96bf297d78 | |
Stefan Reimer | 20ab3bc8c7 | |
Stefan Reimer | a569a6f6b0 | |
Stefan Reimer | 1c076eab61 | |
Stefan Reimer | 39db495adc | |
Renovate Bot | cb45553d64 | |
Stefan Reimer | 248d2e6ffa | |
Stefan Reimer | 42df583323 | |
Stefan Reimer | a65b515f8c | |
Stefan Reimer | f6336e5df5 | |
Stefan Reimer | ac0760d21a | |
Stefan Reimer | ea2d531719 | |
Renovate Bot | d2ae34792d | |
Stefan Reimer | 1db90d4e28 | |
Stefan Reimer | d25a76b526 | |
Stefan Reimer | 915d6c4afb | |
Stefan Reimer | fccc78fd30 | |
Stefan Reimer | 9466cc983e | |
Stefan Reimer | 53bc8b4d88 | |
Renovate Bot | fae3dd10ea | |
Renovate Bot | 64c6056e50 | |
Renovate Bot | f8138fba5f | |
Renovate Bot | 4d93bf817a | |
Stefan Reimer | 3b32f29f90 | |
Stefan Reimer | b08ee586e6 | |
Stefan Reimer | 60c454b534 | |
Stefan Reimer | be1a727fe8 | |
Stefan Reimer | cb0b167437 | |
Renovate Bot | 1ab604df95 | |
Stefan Reimer | 528c425d38 | |
Stefan Reimer | 907d46e6db | |
Stefan Reimer | d8b4c720c8 | |
Stefan Reimer | f4f51c082d | |
Renovate Bot | ead3e680a5 | |
Stefan Reimer | b5a90a6227 | |
Stefan Reimer | ffcd94679b | |
Stefan Reimer | 5ce1b42483 | |
Renovate Bot | f33a01a188 | |
Stefan Reimer | f2e14f1d5d | |
Stefan Reimer | 54d382625a | |
Renovate Bot | f936d17c4e | |
Renovate Bot | 6d60bedae3 | |
Stefan Reimer | ae2a6fa146 | |
Renovate Bot | 025df84873 | |
Stefan Reimer | 950be88780 | |
Stefan Reimer | c0abdf2db0 | |
Stefan Reimer | 9ecf804a90 | |
Renovate Bot | 9cf6922727 | |
Stefan Reimer | 267505d840 | |
Renovate Bot | c22ea88f64 | |
Stefan Reimer | aa903d9e92 | |
Renovate Bot | 6364d38dc7 | |
Stefan Reimer | cdb2848413 | |
Stefan Reimer | 5b32381120 | |
Stefan Reimer | 72bca2e77e | |
Stefan Reimer | 55dd52d5ab | |
Renovate Bot | 20875794cd | |
Stefan Reimer | 617d8fbac4 | |
Renovate Bot | 4291231a8a | |
Stefan Reimer | 5cffc80159 | |
Renovate Bot | 50c9236e92 | |
Stefan Reimer | c139218e1b | |
Stefan Reimer | a6e37ab0c9 | |
Stefan Reimer | 2789fe5be1 | |
Stefan Reimer | 1546c3976a | |
Stefan Reimer | 7eb86a88fd | |
Stefan Reimer | ec70307115 | |
Stefan Reimer | f18d255640 | |
Stefan Reimer | b65293ac3e | |
Stefan Reimer | ab7873dbbf | |
Stefan Reimer | c0cb525f63 | |
Stefan Reimer | 8f89c3ce14 | |
Stefan Reimer | 0966f77d51 | |
Stefan Reimer | 7fd6fe638c | |
Renovate Bot | 741068d7c4 | |
Renovate Bot | 1746a0579b | |
Stefan Reimer | 49b052e8d4 | |
Stefan Reimer | 6e08d8a474 | |
Stefan Reimer | 5845ca08b7 | |
Stefan Reimer | 3fb9fc21cc | |
Renovate Bot | 10be7a8754 | |
Stefan Reimer | d709d0e9e4 | |
Renovate Bot | 6aff67dae0 | |
Stefan Reimer | bc5e6295d3 | |
Stefan Reimer | 156f41fec6 | |
Stefan Reimer | 6a72988f52 | |
Stefan Reimer | 5a7e926202 | |
Stefan Reimer | a66dfe7302 | |
Stefan Reimer | b918c2d145 | |
Stefan Reimer | e0cea3b90e | |
Stefan Reimer | 62ed59426f | |
Stefan Reimer | daa5e72343 | |
Renovate Bot | 8e4b0ac7c9 | |
Stefan Reimer | 17ad966742 | |
Stefan Reimer | ebcd56ff06 | |
Stefan Reimer | 49a88ba8fa | |
Stefan Reimer | 88ee121198 | |
Stefan Reimer | e03e2b7538 | |
Stefan Reimer | 9f0b976b26 | |
Stefan Reimer | df9b85c1cf | |
Stefan Reimer | a4fabccc5a | |
Renovate Bot | 84b7c28db4 | |
Stefan Reimer | 883295aae2 | |
Stefan Reimer | 14e00f09f4 | |
Stefan Reimer | 004c53d8a5 | |
Stefan Reimer | 1de82faf28 | |
Renovate Bot | 967eaeb38d | |
Stefan Reimer | 1b414be803 | |
Renovate Bot | 9be7819091 | |
Renovate Bot | 238a4becfa | |
Renovate Bot | 9ac9f51997 | |
Renovate Bot | c2e70af850 | |
Renovate Bot | 97c5e871be | |
Stefan Reimer | 2abaad1904 | |
Stefan Reimer | bd54440210 | |
Stefan Reimer | 4827fec336 | |
Stefan Reimer | febf0e4efb | |
Stefan Reimer | 60aa548d2a | |
Stefan Reimer | 391bbfe6d5 | |
Stefan Reimer | 57690b1f1e | |
Stefan Reimer | 1e8244ff8d | |
Stefan Reimer | e0f9af604c | |
Stefan Reimer | 9b663e4f22 | |
Stefan Reimer | 413d5be90d | |
Stefan Reimer | b50a66ab79 | |
Stefan Reimer | 11889208c7 | |
Renovate Bot | 1477f77a6d | |
Stefan Reimer | 0cb16de099 | |
Stefan Reimer | a6fd7a7d04 | |
Stefan Reimer | 8f30aec867 | |
Stefan Reimer | 317cab8522 | |
Stefan Reimer | 1e927c95fc | |
Stefan Reimer | 322f4341d9 | |
Stefan Reimer | c979b7ad71 | |
Stefan Reimer | d5510984e4 | |
Stefan Reimer | 870b851864 | |
Stefan Reimer | 5bcf945213 | |
Stefan Reimer | 5036298e3b | |
Stefan Reimer | 8489d77a03 | |
Stefan Reimer | 9121234d40 | |
Stefan Reimer | 36de253216 | |
Stefan Reimer | d76cd279bf | |
Stefan Reimer | fdb3ad0ef2 | |
Stefan Reimer | 35356b3412 | |
Stefan Reimer | 1d97c3e338 | |
Stefan Reimer | 7cf5be2a75 | |
Stefan Reimer | 5008420349 | |
Stefan Reimer | 1455c8b800 | |
Stefan Reimer | 5aaf6c7537 | |
Stefan Reimer | 10b3332633 | |
Stefan Reimer | dd650e775b | |
Stefan Reimer | 2e1eadcdf6 | |
Stefan Reimer | 6145802c38 | |
Stefan Reimer | 1633187ac5 | |
Stefan Reimer | 4ef373436f | |
Stefan Reimer | be099ffcd3 | |
Stefan Reimer | 7acc22bcc5 | |
Stefan Reimer | 4e86623ffd | |
Stefan Reimer | a748448c3e | |
Stefan Reimer | 855bd9aece | |
Stefan Reimer | 17b1e20cb6 | |
Stefan Reimer | d3e18df5f1 | |
Stefan Reimer | 14b0e7cdeb | |
Stefan Reimer | f1ad018695 | |
Stefan Reimer | 016054b97f | |
Stefan Reimer | d44d612494 | |
Stefan Reimer | ff07bf3af1 | |
Stefan Reimer | 2a017e38f9 | |
Stefan Reimer | 7c09355bbd | |
Stefan Reimer | 84345e4e53 | |
Stefan Reimer | 3ba73ca850 | |
Stefan Reimer | fc2866814d | |
Stefan Reimer | c1a8df235a | |
Stefan Reimer | af5db75461 | |
Stefan Reimer | f979c02fd7 | |
Stefan Reimer | bbe16f7bc0 | |
Stefan Reimer | 75051fa7fc | |
Stefan Reimer | 8c2f3c4c73 | |
Stefan Reimer | 8790101d32 | |
Stefan Reimer | 99c1d54790 | |
Stefan Reimer | 74a1515d3c | |
Stefan Reimer | 46bb9382fd | |
Stefan Reimer | d41e82ff08 | |
Stefan Reimer | 2b0597125f | |
Stefan Reimer | 141ee23642 | |
Stefan Reimer | 7e76c78e53 |
|
@ -0,0 +1,63 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import boto3
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Implement basic public ECR lifecycle policy')
|
||||
parser.add_argument('--repo', dest='repositoryName', action='store', required=True,
|
||||
help='Name of the public ECR repository')
|
||||
parser.add_argument('--keep', dest='keep', action='store', default=10, type=int,
|
||||
help='number of tagged images to keep, default 10')
|
||||
parser.add_argument('--dev', dest='delete_dev', action='store_true',
|
||||
help='also delete in-development images only having tags like v0.1.1-commitNr-githash')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
client = boto3.client('ecr-public', region_name='us-east-1')
|
||||
|
||||
images = client.describe_images(repositoryName=args.repositoryName)[
|
||||
"imageDetails"]
|
||||
|
||||
untagged = []
|
||||
kept = 0
|
||||
|
||||
# actual Image
|
||||
# imageManifestMediaType: 'application/vnd.oci.image.manifest.v1+json'
|
||||
# image Index
|
||||
# imageManifestMediaType: 'application/vnd.oci.image.index.v1+json'
|
||||
|
||||
# Sort by date uploaded
|
||||
for image in sorted(images, key=lambda d: d['imagePushedAt'], reverse=True):
|
||||
# Remove all untagged
|
||||
# if registry uses image index all actual images will be untagged anyways
|
||||
if 'imageTags' not in image:
|
||||
untagged.append({"imageDigest": image['imageDigest']})
|
||||
# print("Delete untagged image {}".format(image["imageDigest"]))
|
||||
continue
|
||||
|
||||
# check for dev tags
|
||||
if args.delete_dev:
|
||||
_delete = True
|
||||
for tag in image["imageTags"]:
|
||||
# Look for at least one tag NOT beign a SemVer dev tag
|
||||
if "-" not in tag:
|
||||
_delete = False
|
||||
if _delete:
|
||||
print("Deleting development image {}".format(image["imageTags"]))
|
||||
untagged.append({"imageDigest": image['imageDigest']})
|
||||
continue
|
||||
|
||||
if kept < args.keep:
|
||||
kept = kept+1
|
||||
print("Keeping tagged image {}".format(image["imageTags"]))
|
||||
continue
|
||||
else:
|
||||
print("Deleting tagged image {}".format(image["imageTags"]))
|
||||
untagged.append({"imageDigest": image['imageDigest']})
|
||||
|
||||
deleted_images = client.batch_delete_image(
|
||||
repositoryName=args.repositoryName, imageIds=untagged)
|
||||
|
||||
if deleted_images["imageIds"]:
|
||||
print("Deleted images: {}".format(deleted_images["imageIds"]))
|
|
@ -1,14 +1,26 @@
|
|||
# Parse version from latest git semver tag
|
||||
BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
GIT_TAG=$(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
|
||||
TAG ?= $(shell echo $(GIT_TAG) | awk -F '-' '{ print $$1 "-" $$2 }' | sed -e 's/-$$//')
|
||||
ARCH := amd64
|
||||
ALL_ARCHS := amd64 arm64
|
||||
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
|
||||
# EXTRA_TAGS supposed to be set at the caller, eg. $(shell echo $(TAG) | awk -F '.' '{ print $$1 "." $$2 }')
|
||||
TAG ::= $(GIT_TAG)
|
||||
# append branch name to tag if NOT main nor master
|
||||
ifeq (,$(filter main master, $(GIT_BRANCH)))
|
||||
# If branch is substring of tag, omit branch name
|
||||
ifeq ($(findstring $(GIT_BRANCH), $(GIT_TAG)),)
|
||||
# only append branch name if not equal tag
|
||||
ifneq ($(GIT_TAG), $(GIT_BRANCH))
|
||||
# Sanitize GIT_BRANCH to allowed Docker tag character set
|
||||
TAG = $(GIT_TAG)-$(shell echo $$GIT_BRANCH | sed -e 's/[^a-zA-Z0-9]/-/g')
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ARCH ::= amd64
|
||||
ALL_ARCHS ::= amd64 arm64
|
||||
_ARCH = $(or $(filter $(ARCH),$(ALL_ARCHS)),$(error $$ARCH [$(ARCH)] must be exactly one of "$(ALL_ARCHS)"))
|
||||
|
||||
ifneq ($(TRIVY_REMOTE),)
|
||||
TRIVY_OPTS := --server $(TRIVY_REMOTE)
|
||||
TRIVY_OPTS ::= --server $(TRIVY_REMOTE)
|
||||
endif
|
||||
|
||||
.SILENT: ; # no need for @
|
||||
|
@ -21,53 +33,52 @@ endif
|
|||
help: ## Show Help
|
||||
grep -E '^[a-zA-Z_-]+:.*?## .*$$' .ci/podman.mk | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
build: ## Build the app
|
||||
buildah build --rm --layers -t $(IMAGE):$(TAG)-$(ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(ARCH) --platform linux/$(ARCH) .
|
||||
prepare:: ## custom step on the build agent before building
|
||||
|
||||
test: rm-test-image ## Execute Dockerfile.test
|
||||
test -f Dockerfile.test && \
|
||||
{ buildah build --rm --layers -t $(REGISTRY)/$(IMAGE):$(TAG)-test --from=$(REGISTRY)/$(IMAGE):$(TAG) -f Dockerfile.test --platform linux/$(ARCH) . && \
|
||||
podman run --rm --env-host -t $(REGISTRY)/$(IMAGE):$(TAG)-$(ARCH)-test; } || \
|
||||
echo "No Dockerfile.test found, skipping test"
|
||||
fmt:: ## auto format source
|
||||
|
||||
lint:: ## Lint source
|
||||
|
||||
build: ## Build the app
|
||||
buildah build --rm --layers -t $(IMAGE):$(TAG)-$(_ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(_ARCH) --platform linux/$(_ARCH) .
|
||||
|
||||
test:: ## test built artificats
|
||||
|
||||
scan: ## Scan image using trivy
|
||||
echo "Scanning $(IMAGE):$(TAG)-$(ARCH) using Trivy $(TRIVY_REMOTE)"
|
||||
trivy image $(TRIVY_OPTS) localhost/$(IMAGE):$(TAG)-$(ARCH)
|
||||
echo "Scanning $(IMAGE):$(TAG)-$(_ARCH) using Trivy $(TRIVY_REMOTE)"
|
||||
trivy image $(TRIVY_OPTS) --quiet --no-progress localhost/$(IMAGE):$(TAG)-$(_ARCH)
|
||||
|
||||
# first tag and push all actual images
|
||||
# create new manifest for each tag and add all available TAG-ARCH before pushing
|
||||
push: ecr-login ## push images to registry
|
||||
for t in $(TAG) latest $(EXTRA_TAGS); do \
|
||||
buildah tag $(IMAGE):$(TAG)-$(ARCH) $(REGISTRY)/$(IMAGE):$${t}-$(ARCH); \
|
||||
echo "Tagging image with $(REGISTRY)/$(IMAGE):$${t}-$(ARCH)"
|
||||
buildah tag $(IMAGE):$(TAG)-$(_ARCH) $(REGISTRY)/$(IMAGE):$${t}-$(_ARCH); \
|
||||
buildah manifest rm $(IMAGE):$$t || true; \
|
||||
buildah manifest create $(IMAGE):$$t; \
|
||||
for a in $(ALL_ARCHS); do \
|
||||
buildah manifest add $(IMAGE):$$t $(REGISTRY)/$(IMAGE):$(TAG)-$$a; \
|
||||
done; \
|
||||
echo "Pushing manifest $(IMAGE):$$t"
|
||||
buildah manifest push --all $(IMAGE):$$t docker://$(REGISTRY)/$(IMAGE):$$t; \
|
||||
done
|
||||
|
||||
ecr-login: ## log into AWS ECR public
|
||||
aws ecr-public get-login-password --region $(REGION) | podman login --username AWS --password-stdin $(REGISTRY)
|
||||
|
||||
clean: rm-test-image rm-image ## delete local built container and test images
|
||||
rm-remote-untagged: ## delete all remote untagged and in-dev images, keep 10 tagged
|
||||
echo "Removing all untagged and in-dev images from $(IMAGE) in $(REGION)"
|
||||
.ci/ecr_public_lifecycle.py --repo $(IMAGE) --dev
|
||||
|
||||
rm-remote-untagged: ## delete all remote untagged images
|
||||
echo "Removing all untagged images from $(IMAGE) in $(REGION)"
|
||||
IMAGE_IDS=$$(for image in $$(aws ecr-public describe-images --repository-name $(IMAGE) --region $(REGION) --output json | jq -r '.imageDetails[] | select(.imageTags | not ).imageDigest'); do echo -n "imageDigest=$$image "; done) ; \
|
||||
[ -n "$$IMAGE_IDS" ] && aws ecr-public batch-delete-image --repository-name $(IMAGE) --region $(REGION) --image-ids $$IMAGE_IDS || echo "No image to remove"
|
||||
clean:: ## clean up source folder
|
||||
|
||||
rm-image:
|
||||
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH))" || podman image rm -f $(IMAGE):$(TAG)-$(ARCH) > /dev/null
|
||||
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH))" || echo "Error: Removing image failed"
|
||||
|
||||
# Ensure we run the tests by removing any previous runs
|
||||
rm-test-image:
|
||||
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH)-test)" || podman image rm -f $(IMAGE):$(TAG)-$(ARCH)-test > /dev/null
|
||||
test -z "$$(docker image ls -q $(IMAGE):$(TAG)-$(ARCH)-test)" || echo "Error: Removing test image failed"
|
||||
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || podman image rm -f $(IMAGE):$(TAG)-$(_ARCH) > /dev/null
|
||||
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || echo "Error: Removing image failed"
|
||||
|
||||
## some useful tasks during development
|
||||
ci-pull-upstream: ## pull latest shared .ci subtree
|
||||
git stash && git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash && git stash pop
|
||||
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
|
||||
|
||||
create-repo: ## create new AWS ECR public repository
|
||||
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)
|
||||
|
|
|
@ -2,24 +2,33 @@
|
|||
|
||||
def call(Map config=[:]) {
|
||||
pipeline {
|
||||
options {
|
||||
disableConcurrentBuilds()
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
label 'podman-aws-trivy'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Prepare') {
|
||||
// get tags
|
||||
steps {
|
||||
sh 'git fetch -q --tags ${GIT_URL} +refs/heads/${BRANCH_NAME}:refs/remotes/origin/${BRANCH_NAME}'
|
||||
sh 'mkdir -p reports'
|
||||
|
||||
// we set pull tags as project adv. options
|
||||
// pull tags
|
||||
//withCredentials([gitUsernamePassword(credentialsId: 'gitea-jenkins-user')]) {
|
||||
// sh 'git fetch -q --tags ${GIT_URL}'
|
||||
//}
|
||||
// Optional project specific preparations
|
||||
sh 'make prepare'
|
||||
}
|
||||
}
|
||||
|
||||
// Build using rootless podman
|
||||
stage('Build') {
|
||||
steps {
|
||||
sh 'make build'
|
||||
sh 'make build GIT_BRANCH=$GIT_BRANCH'
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,13 +40,13 @@ def call(Map config=[:]) {
|
|||
|
||||
// Scan via trivy
|
||||
stage('Scan') {
|
||||
environment {
|
||||
TRIVY_FORMAT = "template"
|
||||
TRIVY_OUTPUT = "reports/trivy.html"
|
||||
}
|
||||
steps {
|
||||
sh 'mkdir -p reports'
|
||||
sh 'make scan'
|
||||
// we always scan and create the full json report
|
||||
sh 'TRIVY_FORMAT=json TRIVY_OUTPUT="reports/trivy.json" make scan'
|
||||
|
||||
// render custom full html report
|
||||
sh 'trivy convert -f template -t @/home/jenkins/html.tpl -o reports/trivy.html reports/trivy.json'
|
||||
|
||||
publishHTML target: [
|
||||
allowMissing: true,
|
||||
alwaysLinkToLastBuild: true,
|
||||
|
@ -47,25 +56,33 @@ def call(Map config=[:]) {
|
|||
reportName: 'TrivyScan',
|
||||
reportTitles: 'TrivyScan'
|
||||
]
|
||||
sh 'echo "Trivy report at: $BUILD_URL/TrivyScan"'
|
||||
|
||||
// Scan again and fail on CRITICAL vulns, if not overridden
|
||||
// fail build if issues found above trivy threshold
|
||||
script {
|
||||
if (config.trivyFail == 'NONE') {
|
||||
echo 'trivyFail == NONE, review Trivy report manually. Proceeding ...'
|
||||
} else {
|
||||
sh "TRIVY_EXIT_CODE=1 TRIVY_SEVERITY=${config.trivyFail} make scan"
|
||||
if ( config.trivyFail ) {
|
||||
sh "TRIVY_SEVERITY=${config.trivyFail} trivy convert --report summary --exit-code 1 reports/trivy.json"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push to ECR
|
||||
// Push to container registry if not PR
|
||||
// incl. basic registry retention removing any untagged images
|
||||
stage('Push') {
|
||||
when { not { changeRequest() } }
|
||||
steps {
|
||||
sh 'make ecr-login push'
|
||||
sh 'make push'
|
||||
sh 'make rm-remote-untagged'
|
||||
}
|
||||
}
|
||||
|
||||
// generic clean
|
||||
stage('cleanup') {
|
||||
steps {
|
||||
sh 'make clean'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
14
Dockerfile
14
Dockerfile
|
@ -1,9 +1,9 @@
|
|||
ARG ALPINE_VERSION=3.16
|
||||
ARG ALPINE_VERSION=3.19
|
||||
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
FROM docker.io/alpine:${ALPINE_VERSION}
|
||||
|
||||
ARG ALPINE_VERSION
|
||||
ARG KUBE_VERSION=1.24
|
||||
ARG KUBE_VERSION=1.28.9
|
||||
|
||||
RUN cd /etc/apk/keys && \
|
||||
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
||||
|
@ -18,19 +18,19 @@ RUN cd /etc/apk/keys && \
|
|||
bash \
|
||||
python3 \
|
||||
py3-yaml \
|
||||
restic \
|
||||
helm \
|
||||
cri-tools@kubezero \
|
||||
kubeadm@kubezero~=${KUBE_VERSION} \
|
||||
kubectl@kubezero~=${KUBE_VERSION} \
|
||||
etcdhelper@kubezero \
|
||||
etcd-ctl@edge-testing \
|
||||
restic@edge-community \
|
||||
helm@edge-community
|
||||
etcd-ctl@edge-testing
|
||||
|
||||
RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \
|
||||
mkdir -p /var/lib/kubezero
|
||||
|
||||
ADD admin/kubezero.sh admin/libhelm.sh admin/migrate_argo_values.py /usr/bin
|
||||
ADD admin/libhelm.sh admin/v${KUBE_VERSION}/* /var/lib/kubezero
|
||||
ADD admin/libhelm.sh /var/lib/kubezero
|
||||
|
||||
ADD charts/kubeadm /charts/kubeadm
|
||||
ADD charts/kubezero /charts/kubezero
|
||||
|
|
5
Makefile
5
Makefile
|
@ -2,8 +2,11 @@ REGISTRY := public.ecr.aws/zero-downtime
|
|||
IMAGE := kubezero-admin
|
||||
REGION := us-east-1
|
||||
|
||||
# Use KubeZero chart version rather than git tag for admin image
|
||||
GIT_TAG = v$(shell yq .version < charts/kubezero/Chart.yaml)
|
||||
|
||||
# Also tag as Kubernetes major version
|
||||
EXTRA_TAGS = $(shell echo $(TAG) | awk -F '.' '{ print $$1 "." $$2 }')
|
||||
EXTRA_TAGS = $(shell echo $(GIT_TAG) | awk -F '.' '{ print $$1 "." $$2 }')
|
||||
|
||||
include .ci/podman.mk
|
||||
|
||||
|
|
59
README.md
59
README.md
|
@ -4,12 +4,12 @@ KubeZero is a Kubernetes distribution providing an integrated container platform
|
|||
|
||||
# Design philosophy
|
||||
|
||||
- Cloud provider agnostic, bare-metal/self-hosted
|
||||
- Focus on security and simplicity over feature creep
|
||||
- No vendor lock in, most components are optional and could be easily exchanged
|
||||
- Organic Open Source / open and permissive licenses over closed-source solutions
|
||||
- No vendor lock in, most components are optional and could be easily changed as needed
|
||||
- No premium services / subscriptions required
|
||||
- Staying up to date and contributing back to upstream projects, like alpine-cloud-images and others
|
||||
- Cloud provider agnostic, bare-metal/self-hosted
|
||||
- Organic Open Source / open and permissive licenses over closed-source solutions
|
||||
- Corgi approved :dog:
|
||||
|
||||
|
||||
|
@ -18,8 +18,8 @@ KubeZero is a Kubernetes distribution providing an integrated container platform
|
|||
|
||||
|
||||
# Version / Support Matrix
|
||||
KubeZero releases track the same *minor* version of Kubernetes.
|
||||
Any 1.24.X-Y release of Kubezero supports any Kubernetes cluster 1.24.X.
|
||||
KubeZero releases track the same *minor* version of Kubernetes.
|
||||
Any 1.26.X-Y release of Kubezero supports any Kubernetes cluster 1.26.X.
|
||||
|
||||
KubeZero is distributed as a collection of versioned Helm charts, allowing custom upgrade schedules and module versions as needed.
|
||||
|
||||
|
@ -28,15 +28,15 @@ KubeZero is distributed as a collection of versioned Helm charts, allowing custo
|
|||
gantt
|
||||
title KubeZero Support Timeline
|
||||
dateFormat YYYY-MM-DD
|
||||
section 1.23
|
||||
beta :123b, 2022-08-01, 2022-09-01
|
||||
release :after 123b, 2023-02-01
|
||||
section 1.24
|
||||
beta :124b, 2022-11-14, 2022-12-31
|
||||
release :after 124b, 2023-06-01
|
||||
section 1.25
|
||||
beta :125b, 2023-03-01, 2023-03-31
|
||||
release :after 125b, 2023-08-01
|
||||
section 1.27
|
||||
beta :127b, 2023-09-01, 2023-09-30
|
||||
release :after 127b, 2024-04-30
|
||||
section 1.28
|
||||
beta :128b, 2024-03-01, 2024-04-30
|
||||
release :after 128b, 2024-08-31
|
||||
section 1.29
|
||||
beta :129b, 2024-07-01, 2024-08-30
|
||||
release :after 129b, 2024-11-30
|
||||
```
|
||||
|
||||
[Upstream release policy](https://kubernetes.io/releases/)
|
||||
|
@ -44,14 +44,20 @@ gantt
|
|||
# Components
|
||||
|
||||
## OS
|
||||
- all nodes are based on Alpine V3.16
|
||||
- 2 GB encrypted root filesystem
|
||||
- no 3rd party dependencies at boot ( other than container registries )
|
||||
- all compute nodes are running on Alpine V3.19
|
||||
- 1 or 2 GB encrypted root file system
|
||||
- no external dependencies at boot time, apart from container registries
|
||||
- minimal attack surface
|
||||
- extremely small memory footprint / overhead
|
||||
- cri-o container runtime incl. AppArmor support
|
||||
|
||||
## Container runtime
|
||||
- cri-o rather than Docker for improved security and performance
|
||||
## GitOps
|
||||
- cli / cmd line install
|
||||
- optional full ArgoCD support and integration
|
||||
|
||||
## Featured workloads
|
||||
- rootless CI/CD build platform to build containers as part of a CI pipeline, using podman / fuse device plugin support
|
||||
- containerized AI models via integrated out of the box support for Nvidia GPU workers as well as AWS Neuron
|
||||
|
||||
## Control plane
|
||||
- all Kubernetes components compiled against Alpine OS using `buildmode=pie`
|
||||
|
@ -59,11 +65,6 @@ gantt
|
|||
- access to control plane from within the VPC only by default ( VPN access required for Admin tasks )
|
||||
- controller nodes are used for various platform admin controllers / operators to reduce costs and noise on worker nodes
|
||||
|
||||
## GitOps
|
||||
- cli / cmd line install
|
||||
- optional full ArgoCD support and integration
|
||||
- fuse device plugin support to build containers as part of a CI pipeline leveraging rootless podman build agents
|
||||
|
||||
## AWS integrations
|
||||
- IAM roles for service accounts allowing each pod to assume individual IAM roles
|
||||
- access to meta-data services is blocked all workload containers on all nodes
|
||||
|
@ -73,10 +74,8 @@ gantt
|
|||
- support for [Inf1 instances](https://aws.amazon.com/ec2/instance-types/inf1/) part of [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/).
|
||||
|
||||
## Network
|
||||
- Cilium using Geneve encapsulation, incl. increased MTU allowing flexible / more containers per worker node compared to eg. AWS VPC CNI
|
||||
- Multus support for multiple network interfaces per pod, eg. additional AWS CNI
|
||||
- Calico using VxLAN incl. increased MTU
|
||||
allows flexible / more containers per worker node compared to eg. AWS VPC CNI
|
||||
- isolates container traffic from VPC by using VxLAN overlay
|
||||
- no restrictions on IP space / sizing from the underlying VPC architecture
|
||||
|
||||
## Storage
|
||||
|
@ -86,16 +85,16 @@ allows flexible / more containers per worker node compared to eg. AWS VPC CNI
|
|||
- CSI Snapshot controller and Gemini snapshot groups and retention
|
||||
|
||||
## Ingress
|
||||
- AWS Network Loadbalancer and Istio Ingress controllers
|
||||
- AWS Network Loadbalancer and Istio Ingress controllers
|
||||
- no additional costs per exposed service
|
||||
- real client source IP available to workloads via HTTP header and access logs
|
||||
- ACME SSL Certificate handling via cert-manager incl. renewal etc.
|
||||
- support for TCP services
|
||||
- optional rate limiting support
|
||||
- optional rate limiting support
|
||||
- optional full service mesh
|
||||
|
||||
## Metrics
|
||||
- Prometheus support for all components
|
||||
- Prometheus support for all components, incl. out of cluster EC2 instances (node_exporter)
|
||||
- automated service discovery allowing instant access to common workload metrics
|
||||
- pre-configured Grafana dashboards and alerts
|
||||
- Alertmanager events via SNSAlertHub to Slack, Google, Matrix, etc.
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
#!/bin/bash
|
||||
#set -eEx
|
||||
#set -o pipefail
|
||||
set -x
|
||||
|
||||
ARTIFACTS=($(echo $1 | tr "," "\n"))
|
||||
ACTION=${2:-apply}
|
||||
|
||||
#VERSION="latest"
|
||||
KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"
|
||||
|
||||
WORKDIR=$(mktemp -p /tmp -d kubezero.XXX)
|
||||
[ -z "$DEBUG" ] && trap 'rm -rf $WORKDIR' ERR EXIT
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
# shellcheck disable=SC1091
|
||||
. "$SCRIPT_DIR"/libhelm.sh
|
||||
CHARTS="$(dirname $SCRIPT_DIR)/charts"
|
||||
|
||||
### Various hooks for modules
|
||||
|
||||
################
|
||||
# cert-manager #
|
||||
################
|
||||
function cert-manager-post() {
|
||||
# If any error occurs, wait for initial webhook deployment and try again
|
||||
# see: https://cert-manager.io/docs/concepts/webhook/#webhook-connection-problems-shortly-af
|
||||
ter-cert-manager-installation
|
||||
|
||||
if [ $rc -ne 0 ]; then
|
||||
wait_for "kubectl get deployment -n $namespace cert-manager-webhook"
|
||||
kubectl rollout status deployment -n $namespace cert-manager-webhook
|
||||
wait_for 'kubectl get validatingwebhookconfigurations -o yaml | grep "caBundle: LS0"'
|
||||
apply
|
||||
fi
|
||||
|
||||
wait_for "kubectl get ClusterIssuer -n $namespace kubezero-local-ca-issuer"
|
||||
kubectl wait --timeout=180s --for=condition=Ready -n $namespace ClusterIssuer/kubezero-local
|
||||
-ca-issuer
|
||||
}
|
||||
|
||||
|
||||
###########
|
||||
# ArgoCD #
|
||||
###########
|
||||
function argocd-pre() {
|
||||
for f in $CLUSTER/secrets/argocd-*.yaml; do
|
||||
kubectl apply -f $f
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
###########
|
||||
# Metrics #
|
||||
###########
|
||||
# Cleanup patch jobs from previous runs , ArgoCD does this automatically
|
||||
function metrics-pre() {
|
||||
kubectl delete jobs --field-selector status.successful=1 -n monitoring
|
||||
}
|
||||
|
||||
|
||||
### Main
|
||||
get_kubezero_values
|
||||
|
||||
# Always use embedded kubezero chart
|
||||
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $KUBE_VERSION --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
|
||||
|
||||
# Resolve all the all enabled artifacts
|
||||
if [ ${ARTIFACTS[0]} == "all" ]; then
|
||||
ARTIFACTS=($(ls $WORKDIR/kubezero/templates | sed -e 's/.yaml//g'))
|
||||
fi
|
||||
|
||||
if [ $ACTION == "apply" -o $ACTION == "crds" ]; then
|
||||
for t in ${ARTIFACTS[@]}; do
|
||||
_helm $ACTION $t || true
|
||||
done
|
||||
|
||||
# Delete in reverse order, continue even if errors
|
||||
elif [ $ACTION == "delete" ]; then
|
||||
set +e
|
||||
for (( idx=${#ARTIFACTS[@]}-1 ; idx>=0 ; idx-- )) ; do
|
||||
_helm delete ${ARTIFACTS[idx]} || true
|
||||
done
|
||||
fi
|
|
@ -13,7 +13,7 @@ export WORKDIR=/tmp/kubezero
|
|||
export HOSTFS=/host
|
||||
export CHARTS=/charts
|
||||
export KUBE_VERSION=$(kubeadm version -o json | jq -r .clientVersion.gitVersion)
|
||||
export KUBE_VERSION_MINOR="v1.$(kubectl version -o json | jq .clientVersion.minor -r)"
|
||||
export KUBE_VERSION_MINOR=$(echo $KUBE_VERSION | sed -e 's/\.[0-9]*$//')
|
||||
|
||||
export KUBECONFIG="${HOSTFS}/root/.kube/config"
|
||||
|
||||
|
@ -117,8 +117,28 @@ post_kubeadm() {
|
|||
|
||||
kubeadm_upgrade() {
|
||||
# pre upgrade hook
|
||||
[ -f /var/lib/kubezero/pre-upgrade.sh ] && . /var/lib/kubezero/pre-upgrade.sh
|
||||
|
||||
# get current values, argo app over cm
|
||||
get_kubezero_values
|
||||
|
||||
# tumble new config through migrate.py
|
||||
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
|
||||
|
||||
# Update kubezero-values CM
|
||||
kubectl get cm -n kube-system kubezero-values -o=yaml | \
|
||||
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
|
||||
kubectl replace -f -
|
||||
|
||||
# update argo app
|
||||
kubectl get application kubezero -n argocd -o yaml | \
|
||||
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
|
||||
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
|
||||
kubectl apply -f -
|
||||
|
||||
# finally remove annotation to allow argo to sync again
|
||||
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'
|
||||
|
||||
# Local node upgrade
|
||||
render_kubeadm
|
||||
|
||||
pre_kubeadm
|
||||
|
@ -161,7 +181,7 @@ control_plane_node() {
|
|||
|
||||
else
|
||||
# restore latest backup
|
||||
retry 10 60 30 restic restore latest --no-lock -t / #Review: Use latest no matter what for now: --tag $KUBE_VERSION_MINOR
|
||||
retry 10 60 30 restic restore latest --no-lock -t / # --tag $KUBE_VERSION_MINOR
|
||||
|
||||
# Make last etcd snapshot available
|
||||
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
|
||||
|
@ -214,30 +234,16 @@ control_plane_node() {
|
|||
sleep 3
|
||||
done
|
||||
|
||||
# if we are NOT member already, flush etcd to be able to join
|
||||
# see if we are a former member and remove our former self if so
|
||||
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
|
||||
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
|
||||
|
||||
# Failsafe / etcd on ephmeral: we were a member but our dataset is missing
|
||||
# -> remove former self so we can re-join
|
||||
if [ -n "$MY_ID" -a ! -d ${HOSTFS}/var/lib/etcd/member ]; then
|
||||
# Remove former self first
|
||||
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
|
||||
MY_ID=""
|
||||
fi
|
||||
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
|
||||
rm -rf ${HOSTFS}/var/lib/etcd/member
|
||||
|
||||
|
||||
if [ -z "$MY_ID" ]; then
|
||||
# flush etcd data directory from restore
|
||||
rm -rf ${HOSTFS}/var/lib/etcd/member
|
||||
|
||||
# Announce new etcd member and capture ETCD_INITIAL_CLUSTER, retry needed in case another node joining causes temp quorum loss
|
||||
ETCD_ENVS=$(retry 12 5 5 etcdctl member add $ETCD_NODENAME --peer-urls="https://${ETCD_NODENAME}:2380" --endpoints=$etcd_endpoints)
|
||||
export $(echo "$ETCD_ENVS" | grep ETCD_INITIAL_CLUSTER= | sed -e 's/"//g')
|
||||
else
|
||||
# build initial_cluster string from running cluster
|
||||
_cluster=$(etcdctl member list --endpoints=$etcd_endpoints -w json | jq -r '.members[] | "\(.name)=\(.peerURLs[]),"')
|
||||
export ETCD_INITIAL_CLUSTER=$(echo ${_cluster%%,} | sed -e 's/ //g')
|
||||
fi
|
||||
# Announce new etcd member and capture ETCD_INITIAL_CLUSTER, retry needed in case another node joining causes temp quorum loss
|
||||
ETCD_ENVS=$(retry 12 5 5 etcdctl member add $ETCD_NODENAME --peer-urls="https://${ETCD_NODENAME}:2380" --endpoints=$etcd_endpoints)
|
||||
export $(echo "$ETCD_ENVS" | grep ETCD_INITIAL_CLUSTER= | sed -e 's/"//g')
|
||||
|
||||
# Patch kubeadm-values.yaml and re-render to get etcd manifest patched
|
||||
yq eval -i '.etcd.state = "existing"
|
||||
|
@ -358,7 +364,6 @@ backup() {
|
|||
cp -r ${HOSTFS}/etc/kubernetes/admin.conf ${WORKDIR}
|
||||
|
||||
# Backup via restic
|
||||
restic snapshots || restic init
|
||||
restic backup ${WORKDIR} -H $CLUSTERNAME --tag $CLUSTER_VERSION
|
||||
|
||||
echo "Backup complete."
|
||||
|
|
130
admin/libhelm.sh
130
admin/libhelm.sh
|
@ -1,7 +1,10 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Simulate well-known CRDs being available
|
||||
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1"
|
||||
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget"
|
||||
|
||||
#VERSION="latest"
|
||||
VERSION="v1.28"
|
||||
|
||||
# Waits for max 300s and retries
|
||||
function wait_for() {
|
||||
|
@ -166,6 +169,9 @@ function _helm() {
|
|||
render
|
||||
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$?
|
||||
|
||||
# Try again without server-side, review with 1.26, required for cert-manager during 1.25
|
||||
[ $rc -ne 0 ] && kubectl $action -f $WORKDIR/helm.yaml && rc=$? || rc=$?
|
||||
|
||||
# Optional post hook
|
||||
declare -F ${module}-post && ${module}-post
|
||||
|
||||
|
@ -179,3 +185,125 @@ function _helm() {
|
|||
|
||||
return 0
|
||||
}
|
||||
|
||||
function all_nodes_upgrade() {
|
||||
CMD="$1"
|
||||
|
||||
echo "Deploy all node upgrade daemonSet(busybox)"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: kubezero-upgrade
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
initContainers:
|
||||
- name: node-upgrade
|
||||
image: busybox
|
||||
command: ["/bin/sh"]
|
||||
args: ["-x", "-c", "$CMD" ]
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: hostproc
|
||||
mountPath: /hostproc
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
containers:
|
||||
- name: node-upgrade-wait
|
||||
image: busybox
|
||||
command: ["sleep", "3600"]
|
||||
volumes:
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
- name: hostproc
|
||||
hostPath:
|
||||
path: /proc
|
||||
type: Directory
|
||||
EOF
|
||||
|
||||
kubectl rollout status daemonset -n kube-system kubezero-all-nodes-upgrade --timeout 300s
|
||||
kubectl delete ds kubezero-all-nodes-upgrade -n kube-system
|
||||
}
|
||||
|
||||
|
||||
function control_plane_upgrade() {
|
||||
TASKS="$1"
|
||||
|
||||
echo "Deploy cluster admin task: $TASKS"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kubezero-upgrade
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: kubezero-upgrade
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: kubezero-admin
|
||||
image: public.ecr.aws/zero-downtime/kubezero-admin:${VERSION}
|
||||
imagePullPolicy: Always
|
||||
command: ["kubezero.sh"]
|
||||
args: [$TASKS]
|
||||
env:
|
||||
- name: DEBUG
|
||||
value: "$DEBUG"
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: workdir
|
||||
mountPath: /tmp
|
||||
securityContext:
|
||||
capabilities:
|
||||
add: ["SYS_CHROOT"]
|
||||
volumes:
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
- name: workdir
|
||||
emptyDir: {}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
restartPolicy: Never
|
||||
EOF
|
||||
|
||||
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
|
||||
while true; do
|
||||
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
|
||||
sleep 3
|
||||
done
|
||||
kubectl delete pod kubezero-upgrade -n kube-system
|
||||
}
|
||||
|
|
|
@ -8,26 +8,13 @@ import yaml
|
|||
def migrate(values):
|
||||
"""Actual changes here"""
|
||||
|
||||
# ClusterBackup is enabled on AWS anyways, same with cluster-autoscaler
|
||||
if "aws" in values["global"]:
|
||||
deleteKey(values["addons"], "clusterBackup")
|
||||
deleteKey(values["addons"], "cluster-autoscaler")
|
||||
|
||||
# Remove calico and multus
|
||||
deleteKey(values["network"], "calico")
|
||||
deleteKey(values["network"], "multus")
|
||||
|
||||
# ArgoCD helm changes
|
||||
if "argocd" in values:
|
||||
if "server" in values["argocd"]:
|
||||
if not "configs" in values["argocd"]:
|
||||
values["argocd"]["configs"] = {}
|
||||
if not "cm" in values["argocd"]["configs"]:
|
||||
values["argocd"]["configs"]["cm"] = {}
|
||||
values["argocd"]["configs"]["cm"]["url"] = values["argocd"]["server"]["config"][
|
||||
"url"
|
||||
]
|
||||
deleteKey(values["argocd"], "server")
|
||||
# argoCD moves to argo module
|
||||
try:
|
||||
if values["argocd"]["enabled"]:
|
||||
values["argo"] = { "enabled": True, "argo-cd": values["argocd"] }
|
||||
values.pop("argocd")
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return values
|
||||
|
||||
|
|
|
@ -1,178 +1,57 @@
|
|||
#!/bin/bash -e
|
||||
#!/bin/bash
|
||||
set -eE
|
||||
set -o pipefail
|
||||
|
||||
#VERSION="latest"
|
||||
VERSION="v1.24"
|
||||
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
# shellcheck disable=SC1091
|
||||
. "$SCRIPT_DIR"/libhelm.sh
|
||||
|
||||
[ -n "$DEBUG" ] && set -x
|
||||
|
||||
|
||||
all_nodes_upgrade() {
|
||||
CMD="$1"
|
||||
|
||||
echo "Deploy all node upgrade daemonSet(busybox)"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: kubezero-upgrade
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
initContainers:
|
||||
- name: node-upgrade
|
||||
image: busybox
|
||||
command: ["/bin/sh"]
|
||||
args: ["-x", "-c", "$CMD" ]
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: hostproc
|
||||
mountPath: /hostproc
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
containers:
|
||||
- name: node-upgrade-wait
|
||||
image: busybox
|
||||
command: ["sleep", "3600"]
|
||||
volumes:
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
- name: hostproc
|
||||
hostPath:
|
||||
path: /proc
|
||||
type: Directory
|
||||
EOF
|
||||
|
||||
kubectl rollout status daemonset -n kube-system kubezero-all-nodes-upgrade --timeout 300s
|
||||
kubectl delete ds kubezero-all-nodes-upgrade -n kube-system
|
||||
}
|
||||
|
||||
|
||||
control_plane_upgrade() {
|
||||
TASKS="$1"
|
||||
|
||||
echo "Deploy cluster admin task: $TASKS"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kubezero-upgrade
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: kubezero-upgrade
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: kubezero-admin
|
||||
image: public.ecr.aws/zero-downtime/kubezero-admin:${VERSION}
|
||||
imagePullPolicy: Always
|
||||
command: ["kubezero.sh"]
|
||||
args: [$TASKS]
|
||||
env:
|
||||
- name: DEBUG
|
||||
value: "$DEBUG"
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: workdir
|
||||
mountPath: /tmp
|
||||
securityContext:
|
||||
capabilities:
|
||||
add: ["SYS_CHROOT"]
|
||||
volumes:
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
- name: workdir
|
||||
emptyDir: {}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
restartPolicy: Never
|
||||
EOF
|
||||
|
||||
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
|
||||
while true; do
|
||||
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
|
||||
sleep 3
|
||||
done
|
||||
kubectl delete pod kubezero-upgrade -n kube-system
|
||||
}
|
||||
|
||||
. "$SCRIPT_DIR"/libhelm.sh
|
||||
|
||||
echo "Checking that all pods in kube-system are running ..."
|
||||
waitSystemPodsRunning
|
||||
|
||||
argo_used && disable_argo
|
||||
|
||||
# all_nodes_upgrade ""
|
||||
#all_nodes_upgrade ""
|
||||
|
||||
control_plane_upgrade kubeadm_upgrade
|
||||
|
||||
echo "Adjust kubezero values as needed:"
|
||||
#echo "Adjust kubezero values as needed:"
|
||||
# shellcheck disable=SC2015
|
||||
argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
|
||||
#argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
|
||||
|
||||
control_plane_upgrade "apply_network, apply_addons, apply_storage"
|
||||
### v1.28
|
||||
# - remove old argocd app, all resources will be taken over by argo.argo-cd
|
||||
argo_used && rc=$? || rc=$?
|
||||
if [ $rc -eq 0 ]; then
|
||||
kubectl patch app argocd -n argocd \
|
||||
--type json \
|
||||
--patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]' && \
|
||||
kubectl delete app argocd -n argocd || true
|
||||
|
||||
kubectl rollout restart daemonset/cilium -n kube-system
|
||||
kubectl rollout restart daemonset/kube-multus-ds -n kube-system
|
||||
# remove legacy argocd app resources, but NOT kubezero-git-sync nor the appproject
|
||||
kubectl api-resources --verbs=list --namespaced -o name | grep -ve 'app.*argoproj' | xargs -n 1 kubectl delete --ignore-not-found -l argocd.argoproj.io/instance=argocd -n argocd
|
||||
fi
|
||||
|
||||
# upgrade modules
|
||||
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators"
|
||||
|
||||
echo "Checking that all pods in kube-system are running ..."
|
||||
waitSystemPodsRunning
|
||||
|
||||
echo "Applying remaining KubeZero modules..."
|
||||
|
||||
# delete argocd deployments as various immutable things changed, also redis restart fails otherwise
|
||||
kubectl delete deployment argocd-redis -n argocd || true
|
||||
kubectl delete deployment argocd-repo-server -n argocd || true
|
||||
kubectl delete statefulset argocd-application-controller -n argocd || true
|
||||
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
|
||||
|
||||
# Delete prometheus-push gateway due to label changes
|
||||
kubectl delete deploy -l app=prometheus-pushgateway -n monitoring || true
|
||||
|
||||
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_argocd"
|
||||
# Trigger backup of upgraded cluster state
|
||||
kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$VERSION -n kube-system
|
||||
while true; do
|
||||
kubectl wait --for=condition=complete job/kubezero-backup-$VERSION -n kube-system 2>/dev/null && kubectl delete job kubezero-backup-$VERSION -n kube-system && break
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Final step is to commit the new argocd kubezero app
|
||||
kubectl get app kubezero -n argocd -o yaml | yq 'del(.status) | del(.metadata) | del(.operation) | .metadata.name="kubezero" | .metadata.namespace="argocd"' | yq 'sort_keys(..) | .spec.source.helm.values |= (from_yaml | to_yaml)' > $ARGO_APP
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# get current values, argo app over cm
|
||||
get_kubezero_values
|
||||
|
||||
# tumble new config through migrate.py
|
||||
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
|
||||
|
||||
# Update kubezero-values CM
|
||||
kubectl get cm -n kube-system kubezero-values -o=yaml | \
|
||||
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
|
||||
kubectl replace -f -
|
||||
|
||||
# update argo app
|
||||
kubectl get application kubezero -n argocd -o yaml | \
|
||||
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
|
||||
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
|
||||
kubectl apply -f -
|
||||
|
||||
# finally remove annotation to allow argo to sync again
|
||||
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'
|
|
@ -1,18 +1,19 @@
|
|||
apiVersion: v2
|
||||
name: clamav
|
||||
description: Chart for deploying a ClamavD on kubernetes as statfulSet
|
||||
description: Chart for deploying a ClamAVd on Kubernetes as statfulSet
|
||||
type: application
|
||||
version: 0.1.1
|
||||
appVersion: 0.104.0
|
||||
version: "0.3.1"
|
||||
appVersion: "1.2.1"
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
- kubezero
|
||||
- clamav
|
||||
maintainers:
|
||||
- name: Quarky9
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.4"
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
kubeVersion: ">= 1.18.0"
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# clamav
|
||||
|
||||
![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.104.0](https://img.shields.io/badge/AppVersion-0.104.0-informational?style=flat-square)
|
||||
![Version: 0.3.1](https://img.shields.io/badge/Version-0.3.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.2.1](https://img.shields.io/badge/AppVersion-1.2.1-informational?style=flat-square)
|
||||
|
||||
Chart for deploying a ClamavD on kubernetes as statfulSet
|
||||
Chart for deploying a ClamAVd on Kubernetes as statfulSet
|
||||
|
||||
**Homepage:** <https://kubezero.com>
|
||||
|
||||
|
@ -10,32 +10,31 @@ Chart for deploying a ClamavD on kubernetes as statfulSet
|
|||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Quarky9 | | |
|
||||
| Stefan Reimer | <stefan@zero-downtime.net> | |
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.18.0`
|
||||
Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.4 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| clamav.freshclam.mirrors | string | `"database.clamav.net"` | A list of clamav mirrors to be used by the clamav service |
|
||||
| clamav.image | string | `"clamav/clamav"` | The clamav docker image |
|
||||
| clamav.limits.connectionQueueLength | int | `100` | Maximum length the queue of pending connections may grow to |
|
||||
| clamav.limits.fileSize | int | `20` | The largest file size scanable by clamav, in MB |
|
||||
| clamav.limits.maxThreads | int | `4` | Maximum number of threads running at the same time. |
|
||||
| clamav.limits.scanSize | int | `100` | The largest scan size permitted in clamav, in MB |
|
||||
| clamav.limits.sendBufTimeout | int | `500` | |
|
||||
| clamav.replicaCount | int | `1` | |
|
||||
| clamav.resources | object | `{"requests":{"cpu":"300m","memory":"1300M"}}` | The resource requests and limits for the clamav service |
|
||||
| clamav.version | string | `"unstable"` | The clamav docker image version - defaults to .Chart.appVersion |
|
||||
| freshclam.mirrors | string | `"database.clamav.net"` | A list of clamav mirrors to be used by the clamav service |
|
||||
| fullnameOverride | string | `""` | override the full name of the clamav chart |
|
||||
| image | object | `{"repository":"clamav/clamav","type":"base"}` | The clamav docker image |
|
||||
| limits.connectionQueueLength | int | `100` | Maximum length the queue of pending connections may grow to |
|
||||
| limits.fileSize | int | `25` | The largest file size scanable by clamav, in MB |
|
||||
| limits.maxThreads | int | `4` | Maximum number of threads running at the same time. |
|
||||
| limits.scanSize | int | `100` | The largest scan size permitted in clamav, in MB |
|
||||
| limits.sendBufTimeout | int | `500` | |
|
||||
| nameOverride | string | `""` | override the name of the clamav chart |
|
||||
| replicaCount | int | `1` | |
|
||||
| resources | object | `{"requests":{"cpu":"300m","memory":"2000M"}}` | The resource requests and limits for the clamav service |
|
||||
| service.port | int | `3310` | The port to be used by the clamav service |
|
||||
|
||||
----------------------------------------------
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
release=clamav
|
||||
namespace=clamav
|
||||
|
||||
helm template . --namespace $namespace --name-template $release > clamav.yaml
|
||||
kubectl apply --namespace $namespace -f clamav.yaml
|
|
@ -10,7 +10,7 @@ data:
|
|||
LogTime yes
|
||||
LogClean yes
|
||||
LogSyslog no
|
||||
LogVerbose no
|
||||
LogVerbose yes
|
||||
LogFileMaxSize 0
|
||||
LogFile /dev/stdout
|
||||
DatabaseDirectory /var/lib/clamav
|
||||
|
@ -19,28 +19,28 @@ data:
|
|||
User clamav
|
||||
ExitOnOOM yes
|
||||
Foreground yes
|
||||
MaxScanSize {{.Values.clamav.limits.scanSize}}M
|
||||
MaxFileSize {{.Values.clamav.limits.fileSize}}M
|
||||
MaxScanSize {{.Values.limits.scanSize}}M
|
||||
MaxFileSize {{.Values.limits.fileSize}}M
|
||||
|
||||
# Close the connection when the data size limit is exceeded.
|
||||
# The value should match your MTA's limit for a maximum attachment size.
|
||||
# Default: 25M
|
||||
StreamMaxLength {{.Values.clamav.limits.scanSize}}M
|
||||
StreamMaxLength {{.Values.limits.scanSize}}M
|
||||
|
||||
# Maximum length the queue of pending connections may grow to.
|
||||
# Default: 200
|
||||
MaxConnectionQueueLength {{.Values.clamav.limits.connectionQueueLength}}
|
||||
MaxConnectionQueueLength {{.Values.limits.connectionQueueLength}}
|
||||
|
||||
# Maximum number of threads running at the same time.
|
||||
# Default: 10
|
||||
MaxThreads {{.Values.clamav.limits.maxThreads}}
|
||||
MaxThreads {{.Values.limits.maxThreads}}
|
||||
|
||||
# This option specifies how long to wait (in milliseconds) if the send buffer
|
||||
# is full.
|
||||
# Keep this value low to prevent clamd hanging.
|
||||
#
|
||||
# Default: 500
|
||||
SendBufTimeout {{.Values.clamav.limits.sendBufTimeout}}
|
||||
SendBufTimeout {{.Values.limits.sendBufTimeout}}
|
||||
|
||||
freshclam.conf: |
|
||||
LogTime yes
|
||||
|
@ -49,4 +49,4 @@ data:
|
|||
Checks 24
|
||||
LogSyslog no
|
||||
DatabaseOwner root
|
||||
DatabaseMirror {{ .Values.clamav.freshclam.mirrors }}
|
||||
DatabaseMirror {{ .Values.freshclam.mirrors }}
|
||||
|
|
|
@ -6,7 +6,7 @@ metadata:
|
|||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.clamav.replicaCount }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "kubezero-lib.selectorLabels" . | nindent 6 }}
|
||||
|
@ -20,7 +20,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: clamav
|
||||
image: "{{ .Values.clamav.image }}:{{ default .Chart.AppVersion .Values.clamav.version }}_base"
|
||||
image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}_{{ .Values.image.type }}"
|
||||
ports:
|
||||
- containerPort: 3310
|
||||
name: clamav
|
||||
|
@ -41,7 +41,7 @@ spec:
|
|||
successThreshold: 1
|
||||
timeoutSeconds: 3
|
||||
resources:
|
||||
{{- toYaml .Values.clamav.resources | nindent 10 }}
|
||||
{{- toYaml .Values.resources | nindent 10 }}
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/clamav
|
||||
name: signatures
|
||||
|
@ -53,15 +53,15 @@ spec:
|
|||
- name: config-volume
|
||||
configMap:
|
||||
name: {{ include "kubezero-lib.fullname" . }}
|
||||
{{- with .Values.clamav.nodeSelector }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.clamav.affinity }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.clamav.tolerations }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
@ -70,7 +70,7 @@ spec:
|
|||
name: signatures
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
{{- with .Values.clamav.storageClassName }}
|
||||
{{- with .Values.storageClassName }}
|
||||
storageClassName: {{ . }}
|
||||
{{- end }}
|
||||
resources:
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
update_helm
|
||||
|
||||
|
||||
update_docs
|
|
@ -1,46 +1,41 @@
|
|||
# Default values for clamav.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# nameOverride -- override the name of the clamav chart
|
||||
nameOverride: ""
|
||||
|
||||
# fullnameOverride -- override the full name of the clamav chart
|
||||
fullnameOverride: ""
|
||||
|
||||
# image -- The clamav docker image
|
||||
image:
|
||||
repository: clamav/clamav
|
||||
# version: "latest"
|
||||
type: base
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
freshclam:
|
||||
# freshclam.mirrors -- A list of clamav mirrors to be used by the clamav service
|
||||
mirrors: database.clamav.net
|
||||
limits:
|
||||
# limits.fileSize -- The largest file size scanable by clamav, in MB
|
||||
fileSize: 25
|
||||
# limits.scanSize -- The largest scan size permitted in clamav, in MB
|
||||
scanSize: 100
|
||||
# limits.connectionQueueLength -- Maximum length the queue of pending connections may grow to
|
||||
connectionQueueLength: 100
|
||||
# limits.maxThreads --Maximum number of threads running at the same time.
|
||||
maxThreads: 4
|
||||
# sendBufTimeout -- This option specifies how long to wait (in milliseconds) if the send buffer is full, keep low to avoid clamd hanging
|
||||
sendBufTimeout: 500
|
||||
|
||||
service:
|
||||
# service.port -- The port to be used by the clamav service
|
||||
port: 3310
|
||||
|
||||
clamav:
|
||||
# clamav.image -- The clamav docker image
|
||||
image: clamav/clamav
|
||||
# clamav.version -- The clamav docker image version - defaults to .Chart.appVersion
|
||||
version: "unstable"
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
freshclam:
|
||||
# clamav.freshclam.mirrors -- A list of clamav mirrors to be used by the clamav service
|
||||
mirrors: database.clamav.net
|
||||
limits:
|
||||
# clamav.limits.fileSize -- The largest file size scanable by clamav, in MB
|
||||
fileSize: 20
|
||||
# clamav.limits.scanSize -- The largest scan size permitted in clamav, in MB
|
||||
scanSize: 100
|
||||
# clamav.limits.connectionQueueLength -- Maximum length the queue of pending connections may grow to
|
||||
connectionQueueLength: 100
|
||||
# clamav.limits.maxThreads --Maximum number of threads running at the same time.
|
||||
maxThreads: 4
|
||||
# clamav.sendBufTimeout -- This option specifies how long to wait (in milliseconds) if the send buffer is full, keep low to avoid clamd hanging
|
||||
sendBufTimeout: 500
|
||||
|
||||
|
||||
resources:
|
||||
# clamav.resources -- The resource requests and limits for the clamav service
|
||||
requests:
|
||||
cpu: 300m
|
||||
memory: 1300M
|
||||
#limits:
|
||||
# cpu: 1500m
|
||||
# memory: 2000M
|
||||
resources:
|
||||
# resources -- The resource requests and limits for the clamav service
|
||||
requests:
|
||||
cpu: 300m
|
||||
memory: 2000M
|
||||
#limits:
|
||||
# cpu: 2
|
||||
# memory: 4000M
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||
name: kubeadm
|
||||
description: KubeZero Kubeadm cluster config
|
||||
type: application
|
||||
version: 1.24.7
|
||||
version: 1.28.9
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -11,4 +11,4 @@ keywords:
|
|||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
kubeVersion: ">= 1.24.0"
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# kubeadm
|
||||
|
||||
![Version: 1.24.7](https://img.shields.io/badge/Version-1.24.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 1.25.8](https://img.shields.io/badge/Version-1.25.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero Kubeadm cluster config
|
||||
|
||||
|
@ -14,7 +14,7 @@ KubeZero Kubeadm cluster config
|
|||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.24.0`
|
||||
Kubernetes: `>= 1.25.0`
|
||||
|
||||
## Values
|
||||
|
||||
|
|
|
@ -0,0 +1,159 @@
|
|||
#!/bin/sh
|
||||
|
||||
function createMasterAuditPolicy() {
|
||||
path="templates/apiserver/audit-policy.yaml"
|
||||
|
||||
known_apis='
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "node.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "scheduling.k8s.io"
|
||||
- group: "storage.k8s.io"'
|
||||
|
||||
cat <<EOF >"${path}"
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
users: ["system:unsecured"]
|
||||
namespaces: ["kube-system"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
- level: None
|
||||
users: ["kubelet"] # legacy kubelet identity
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
- system:kube-scheduler
|
||||
- system:serviceaccount:kube-system:endpoint-controller
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints"]
|
||||
- level: None
|
||||
users: ["system:apiserver"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
|
||||
- level: None
|
||||
users: ["cluster-autoscaler"]
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps", "endpoints"]
|
||||
# Don't log HPA fetching metrics.
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
verbs: ["get", "list"]
|
||||
resources:
|
||||
- group: "metrics.k8s.io"
|
||||
|
||||
# Don't log these read-only URLs.
|
||||
- level: None
|
||||
nonResourceURLs:
|
||||
- /healthz*
|
||||
- /version
|
||||
- /swagger*
|
||||
- /readyz
|
||||
|
||||
# Don't log events requests because of performance impact.
|
||||
- level: None
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["events"]
|
||||
|
||||
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
|
||||
- level: Request
|
||||
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
- level: Request
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# deletecollection calls can be large, don't log responses for expected namespace deletions
|
||||
- level: Request
|
||||
users: ["system:serviceaccount:kube-system:namespace-controller"]
|
||||
verbs: ["deletecollection"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
|
||||
# so only log at the Metadata level.
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["secrets", "configmaps", "serviceaccounts/token"]
|
||||
- group: authentication.k8s.io
|
||||
resources: ["tokenreviews"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources: ${known_apis}
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources: ${known_apis}
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for all other requests.
|
||||
- level: Metadata
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
EOF
|
||||
}
|
||||
|
||||
createMasterAuditPolicy
|
|
@ -2,14 +2,15 @@ apiVersion: kubeadm.k8s.io/v1beta3
|
|||
kind: ClusterConfiguration
|
||||
kubernetesVersion: {{ .Chart.Version }}
|
||||
clusterName: {{ .Values.global.clusterName }}
|
||||
#featureGates:
|
||||
featureGates:
|
||||
EtcdLearnerMode: true # becomes beta in 1.29
|
||||
# NonGracefulFailover: true
|
||||
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
||||
networking:
|
||||
podSubnet: 10.244.0.0/16
|
||||
etcd:
|
||||
local:
|
||||
imageTag: 3.5.4-0
|
||||
# imageTag: 3.5.12-0
|
||||
extraArgs:
|
||||
### DNS discovery
|
||||
#discovery-srv: {{ .Values.domain }}
|
||||
|
@ -59,8 +60,11 @@ apiServer:
|
|||
audit-policy-file: /etc/kubernetes/apiserver/audit-policy.yaml
|
||||
audit-log-maxage: "7"
|
||||
audit-log-maxsize: "100"
|
||||
audit-log-maxbackup: "3"
|
||||
audit-log-maxbackup: "1"
|
||||
audit-log-compress: "true"
|
||||
{{- if .Values.api.falco.enabled }}
|
||||
audit-webhook-config-file: /etc/kubernetes/apiserver/audit-webhook.yaml
|
||||
{{- end }}
|
||||
tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
|
||||
admission-control-config-file: /etc/kubernetes/apiserver/admission-configuration.yaml
|
||||
api-audiences: {{ .Values.api.apiAudiences }}
|
||||
|
@ -70,12 +74,13 @@ apiServer:
|
|||
{{- end }}
|
||||
{{- if .Values.api.awsIamAuth.enabled }}
|
||||
authentication-token-webhook-config-file: /etc/kubernetes/apiserver/aws-iam-authenticator.yaml
|
||||
authentication-token-webhook-cache-ttl: 3600s
|
||||
{{- end }}
|
||||
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
|
||||
enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit,ExtendedResourceToleration
|
||||
# {{- if .Values.global.highAvailable }}
|
||||
# goaway-chance: ".001"
|
||||
# {{- end }}
|
||||
{{- if .Values.global.highAvailable }}
|
||||
goaway-chance: ".001"
|
||||
{{- end }}
|
||||
logging-format: json
|
||||
{{- with .Values.api.extraArgs }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
|
|
|
@ -2,6 +2,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
|||
kind: KubeProxyConfiguration
|
||||
# kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways
|
||||
metricsBindAddress: "0.0.0.0:10249"
|
||||
# calico < 3.22.1 breaks starting with 1.23, see https://github.com/projectcalico/calico/issues/5011
|
||||
# we go Cilium anyways
|
||||
mode: "iptables"
|
||||
logging:
|
||||
format: json
|
||||
|
|
|
@ -6,6 +6,7 @@ cgroupDriver: cgroupfs
|
|||
logging:
|
||||
format: json
|
||||
hairpinMode: hairpin-veth
|
||||
containerRuntimeEndpoint: "unix:///var/run/crio/crio.sock"
|
||||
{{- if .Values.systemd }}
|
||||
resolvConf: /run/systemd/resolve/resolv.conf
|
||||
{{- end }}
|
||||
|
@ -17,8 +18,8 @@ protectKernelDefaults: {{ .Values.protectKernelDefaults }}
|
|||
tlsCipherSuites: [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256]
|
||||
featureGates:
|
||||
{{- include "kubeadm.featuregates" ( dict "return" "map" ) | nindent 2 }}
|
||||
# Minimal unit is 50m per pod
|
||||
podsPerCore: 20
|
||||
# Minimal unit is 40m per pod
|
||||
podsPerCore: 25
|
||||
# cpuCFSQuotaPeriod: 10ms
|
||||
# Basic OS incl. crio
|
||||
systemReserved:
|
||||
|
@ -32,4 +33,5 @@ kubeReserved:
|
|||
#evictionHard:
|
||||
# memory.available: "484Mi"
|
||||
imageGCLowThresholdPercent: 70
|
||||
# kernelMemcgNotification: true
|
||||
serializeImagePulls: false
|
||||
maxParallelImagePulls: 4
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
{{- /* Feature gates for all control plane components */ -}}
|
||||
{{- /* Issues: MemoryQoS */ -}}
|
||||
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
|
||||
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
|
||||
{{- /* v1.29: remove/beta SidecarContainers */ -}}
|
||||
{{- /* v1.30: remove/beta KubeProxyDrainingTerminatingNodes */ -}}
|
||||
{{- define "kubeadm.featuregates" }}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "CronJobTimeZone" "NodeOutOfServiceVolumeDetach" }}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "SidecarContainers" "KubeProxyDrainingTerminatingNodes" }}
|
||||
{{- if eq .return "csv" }}
|
||||
{{- range $key := $gates }}
|
||||
{{- $key }}=true,
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
# Don't Log anything, but audit policy enabled
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
metadata:
|
||||
name: kubezero-auditpolicy
|
||||
rules:
|
||||
- level: None
|
|
@ -1,7 +1,164 @@
|
|||
# Don't Log anything, but audit policy enabled
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
metadata:
|
||||
name: kubezero-auditpolicy
|
||||
rules:
|
||||
- level: None
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
users: ["system:unsecured"]
|
||||
namespaces: ["kube-system"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
- level: None
|
||||
users: ["kubelet"] # legacy kubelet identity
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
- system:kube-scheduler
|
||||
- system:serviceaccount:kube-system:endpoint-controller
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints"]
|
||||
- level: None
|
||||
users: ["system:apiserver"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
|
||||
- level: None
|
||||
users: ["cluster-autoscaler"]
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps", "endpoints"]
|
||||
# Don't log HPA fetching metrics.
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
verbs: ["get", "list"]
|
||||
resources:
|
||||
- group: "metrics.k8s.io"
|
||||
|
||||
# Don't log these read-only URLs.
|
||||
- level: None
|
||||
nonResourceURLs:
|
||||
- /healthz*
|
||||
- /version
|
||||
- /swagger*
|
||||
|
||||
# Don't log events requests because of performance impact.
|
||||
- level: None
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["events"]
|
||||
|
||||
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
|
||||
- level: Request
|
||||
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
- level: Request
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# deletecollection calls can be large, don't log responses for expected namespace deletions
|
||||
- level: Request
|
||||
users: ["system:serviceaccount:kube-system:namespace-controller"]
|
||||
verbs: ["deletecollection"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
|
||||
# so only log at the Metadata level.
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["secrets", "configmaps", "serviceaccounts/token"]
|
||||
- group: authentication.k8s.io
|
||||
resources: ["tokenreviews"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "node.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "scheduling.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "node.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "scheduling.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for all other requests.
|
||||
- level: Metadata
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: falco
|
||||
cluster:
|
||||
server: http://falco-k8saudit-webhook:9765/k8s-audit
|
||||
contexts:
|
||||
- context:
|
||||
cluster: falco
|
||||
user: ""
|
||||
name: default-context
|
||||
current-context: default-context
|
||||
preferences: {}
|
||||
users: []
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: CredentialProviderConfig
|
||||
providers:
|
||||
- name: amazon-ecr-credential-helper
|
||||
matchImages:
|
||||
- "*.dkr.ecr.*.amazonaws.com"
|
||||
- "*.dkr.ecr.*.amazonaws.cn"
|
||||
- "*.dkr.ecr-fips.*.amazonaws.com"
|
||||
- "*.dkr.ecr.us-iso-east-1.c2s.ic.gov"
|
||||
- "*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov"
|
||||
defaultCacheDuration: "12h"
|
||||
apiVersion: credentialprovider.kubelet.k8s.io/v1alpha1
|
||||
args:
|
||||
- get
|
||||
#env:
|
||||
# - name: AWS_PROFILE
|
||||
# value: example_profile
|
|
@ -12,13 +12,3 @@ spec:
|
|||
memory: 128Mi
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- kube-dns
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
|
|
|
@ -3,6 +3,6 @@ spec:
|
|||
- name: etcd
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 192Mi
|
||||
cpu: 50m
|
||||
memory: 256Mi
|
||||
#ephemeral-storage: 1Gi
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 1Gi
|
||||
cpu: 250m
|
||||
memory: 1268Mi
|
||||
|
|
|
@ -3,5 +3,5 @@ spec:
|
|||
- name: kube-controller-manager
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
cpu: 50m
|
||||
memory: 192Mi
|
||||
|
|
|
@ -3,5 +3,5 @@ spec:
|
|||
- name: kube-scheduler
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
cpu: 50m
|
||||
memory: 96Mi
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{{- if .Values.api.awsIamAuth.enabled }}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: aws-iam-authenticator
|
||||
rules:
|
||||
|
@ -51,8 +51,8 @@ metadata:
|
|||
namespace: kube-system
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: aws-iam-authenticator
|
||||
namespace: kube-system
|
||||
|
@ -85,8 +85,6 @@ metadata:
|
|||
name: aws-iam-authenticator
|
||||
labels:
|
||||
k8s-app: aws-iam-authenticator
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: runtime/default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
@ -98,6 +96,10 @@ spec:
|
|||
labels:
|
||||
k8s-app: aws-iam-authenticator
|
||||
spec:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
# use service account with access to
|
||||
|
@ -110,14 +112,12 @@ spec:
|
|||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
|
||||
containers:
|
||||
- name: aws-iam-authenticator
|
||||
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.11
|
||||
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.14
|
||||
args:
|
||||
- server
|
||||
- --backend-mode=CRD,MountedFile
|
||||
|
@ -133,7 +133,7 @@ spec:
|
|||
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
memory: 32Mi
|
||||
cpu: 10m
|
||||
limits:
|
||||
memory: 64Mi
|
||||
|
|
|
@ -25,6 +25,9 @@ api:
|
|||
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
|
||||
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"
|
||||
|
||||
falco:
|
||||
enabled: false
|
||||
|
||||
etcd:
|
||||
nodeName: etcd
|
||||
state: new
|
||||
|
|
|
@ -2,8 +2,8 @@ apiVersion: v2
|
|||
name: kubezero-addons
|
||||
description: KubeZero umbrella chart for various optional cluster addons
|
||||
type: application
|
||||
version: 0.7.1
|
||||
appVersion: v1.24
|
||||
version: 0.8.7
|
||||
appVersion: v1.28
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -11,27 +11,37 @@ keywords:
|
|||
- fuse-device-plugin
|
||||
- neuron-device-plugin
|
||||
- nvidia-device-plugin
|
||||
- aws-node-termination-handler
|
||||
- cluster-autoscaler
|
||||
- sealed-secrets
|
||||
- external-dns
|
||||
- aws-node-termination-handler
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: aws-node-termination-handler
|
||||
version: 0.20.0
|
||||
# repository: https://aws.github.io/eks-charts
|
||||
condition: aws-node-termination-handler.enabled
|
||||
- name: external-dns
|
||||
version: 1.11.0
|
||||
version: 1.14.4
|
||||
repository: https://kubernetes-sigs.github.io/external-dns/
|
||||
condition: external-dns.enabled
|
||||
- name: cluster-autoscaler
|
||||
version: 9.21.0
|
||||
version: 9.36.0
|
||||
repository: https://kubernetes.github.io/autoscaler
|
||||
condition: cluster-autoscaler.enabled
|
||||
- name: nvidia-device-plugin
|
||||
version: 0.12.3
|
||||
version: 0.15.0
|
||||
# https://github.com/NVIDIA/k8s-device-plugin
|
||||
repository: https://nvidia.github.io/k8s-device-plugin
|
||||
condition: nvidia-device-plugin.enabled
|
||||
kubeVersion: ">= 1.24.0"
|
||||
- name: sealed-secrets
|
||||
version: 2.15.3
|
||||
repository: https://bitnami-labs.github.io/sealed-secrets
|
||||
condition: sealed-secrets.enabled
|
||||
- name: aws-node-termination-handler
|
||||
version: 0.23.0
|
||||
repository: "oci://public.ecr.aws/aws-ec2/helm"
|
||||
condition: aws-node-termination-handler.enabled
|
||||
- name: aws-eks-asg-rolling-update-handler
|
||||
version: 1.5.0
|
||||
repository: https://twin.github.io/helm-charts
|
||||
condition: aws-eks-asg-rolling-update-handler.enabled
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# kubezero-addons
|
||||
|
||||
![Version: 0.7.1](https://img.shields.io/badge/Version-0.7.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.24](https://img.shields.io/badge/AppVersion-v1.24-informational?style=flat-square)
|
||||
![Version: 0.8.7](https://img.shields.io/badge/Version-0.8.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.28](https://img.shields.io/badge/AppVersion-v1.28-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for various optional cluster addons
|
||||
|
||||
|
@ -14,14 +14,16 @@ KubeZero umbrella chart for various optional cluster addons
|
|||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.24.0`
|
||||
Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| | aws-node-termination-handler | 0.20.0 |
|
||||
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.11.0 |
|
||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.21.0 |
|
||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.12.3 |
|
||||
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.15.3 |
|
||||
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.14.4 |
|
||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.36.0 |
|
||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.15.0 |
|
||||
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
|
||||
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.23.0 |
|
||||
|
||||
# MetalLB
|
||||
|
||||
|
@ -39,6 +41,38 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| aws-eks-asg-rolling-update-handler.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | |
|
||||
| aws-eks-asg-rolling-update-handler.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | |
|
||||
| aws-eks-asg-rolling-update-handler.enabled | bool | `false` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[0].name | string | `"CLUSTER_NAME"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[0].value | string | `""` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[1].name | string | `"AWS_REGION"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[1].value | string | `"us-west-2"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[2].name | string | `"EXECUTION_INTERVAL"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[2].value | string | `"60"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[3].name | string | `"METRICS"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[3].value | string | `"true"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[4].name | string | `"EAGER_CORDONING"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[4].value | string | `"true"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[5].name | string | `"SLOW_MODE"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[5].value | string | `"true"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[6].name | string | `"AWS_ROLE_ARN"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[6].value | string | `""` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[7].name | string | `"AWS_WEB_IDENTITY_TOKEN_FILE"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[7].value | string | `"/var/run/secrets/sts.amazonaws.com/serviceaccount/token"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[8].name | string | `"AWS_STS_REGIONAL_ENDPOINTS"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[8].value | string | `"regional"` | |
|
||||
| aws-eks-asg-rolling-update-handler.image.repository | string | `"twinproduction/aws-eks-asg-rolling-update-handler"` | |
|
||||
| aws-eks-asg-rolling-update-handler.image.tag | string | `"v1.8.3"` | |
|
||||
| aws-eks-asg-rolling-update-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| aws-eks-asg-rolling-update-handler.resources.limits.memory | string | `"128Mi"` | |
|
||||
| aws-eks-asg-rolling-update-handler.resources.requests.cpu | string | `"10m"` | |
|
||||
| aws-eks-asg-rolling-update-handler.resources.requests.memory | string | `"32Mi"` | |
|
||||
| aws-eks-asg-rolling-update-handler.securityContext.runAsNonRoot | bool | `true` | |
|
||||
| aws-eks-asg-rolling-update-handler.securityContext.runAsUser | int | `1001` | |
|
||||
| aws-eks-asg-rolling-update-handler.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
|
||||
| aws-eks-asg-rolling-update-handler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| aws-eks-asg-rolling-update-handler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| aws-node-termination-handler.deleteLocalData | bool | `true` | |
|
||||
| aws-node-termination-handler.emitKubernetesEvents | bool | `true` | |
|
||||
| aws-node-termination-handler.enableProbesServer | bool | `true` | |
|
||||
|
@ -55,7 +89,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| aws-node-termination-handler.ignoreDaemonSets | bool | `true` | |
|
||||
| aws-node-termination-handler.jsonLogging | bool | `true` | |
|
||||
| aws-node-termination-handler.logFormatVersion | int | `2` | |
|
||||
| aws-node-termination-handler.managedTag | string | `"aws-node-termination-handler/managed"` | "aws-node-termination-handler/${ClusterName}" |
|
||||
| aws-node-termination-handler.managedTag | string | `"zdt:kubezero:nth:${ClusterName}"` | "zdt:kubezero:nth:${ClusterName}" |
|
||||
| aws-node-termination-handler.metadataTries | int | `0` | |
|
||||
| aws-node-termination-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| aws-node-termination-handler.podMonitor.create | bool | `false` | |
|
||||
|
@ -63,18 +97,20 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| aws-node-termination-handler.rbac.pspEnabled | bool | `false` | |
|
||||
| aws-node-termination-handler.taintNode | bool | `true` | |
|
||||
| aws-node-termination-handler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| aws-node-termination-handler.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| aws-node-termination-handler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| aws-node-termination-handler.useProviderId | bool | `true` | |
|
||||
| awsNeuron.enabled | bool | `false` | |
|
||||
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
|
||||
| awsNeuron.image.tag | string | `"1.9.3.0"` | |
|
||||
| awsNeuron.image.tag | string | `"2.19.16.0"` | |
|
||||
| cluster-autoscaler.autoDiscovery.clusterName | string | `""` | |
|
||||
| cluster-autoscaler.awsRegion | string | `"us-west-2"` | |
|
||||
| cluster-autoscaler.enabled | bool | `false` | |
|
||||
| cluster-autoscaler.extraArgs.balance-similar-node-groups | bool | `true` | |
|
||||
| cluster-autoscaler.extraArgs.ignore-taint | string | `"node.cilium.io/agent-not-ready"` | |
|
||||
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
|
||||
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
|
||||
| cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | |
|
||||
| cluster-autoscaler.image.tag | string | `"v1.28.2"` | |
|
||||
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cluster-autoscaler.podDisruptionBudget | bool | `false` | |
|
||||
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | |
|
||||
|
@ -82,9 +118,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| cluster-autoscaler.serviceMonitor.enabled | bool | `false` | |
|
||||
| cluster-autoscaler.serviceMonitor.interval | string | `"30s"` | |
|
||||
| cluster-autoscaler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cluster-autoscaler.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cluster-autoscaler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| clusterBackup.enabled | bool | `false` | |
|
||||
| clusterBackup.extraEnv | list | `[]` | |
|
||||
| clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | |
|
||||
|
@ -96,9 +130,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| external-dns.provider | string | `"inmemory"` | |
|
||||
| external-dns.sources[0] | string | `"service"` | |
|
||||
| external-dns.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| external-dns.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| external-dns.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| external-dns.triggerLoopOnEvent | bool | `true` | |
|
||||
| forseti.aws.iamRoleArn | string | `""` | "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti" |
|
||||
| forseti.aws.region | string | `""` | |
|
||||
|
@ -106,9 +138,15 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| forseti.image.name | string | `"public.ecr.aws/zero-downtime/forseti"` | |
|
||||
| forseti.image.tag | string | `"v0.1.2"` | |
|
||||
| fuseDevicePlugin.enabled | bool | `false` | |
|
||||
| fuseDevicePlugin.image.name | string | `"public.ecr.aws/zero-downtime/fuse-device-plugin"` | |
|
||||
| fuseDevicePlugin.image.tag | string | `"v1.2.0"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node.kubernetes.io/instance-type"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"g5.xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[10] | string | `"g4dn.4xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[11] | string | `"g4dn.8xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[12] | string | `"g4dn.12xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[13] | string | `"g4dn.16xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[1] | string | `"g5.2xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[2] | string | `"g5.4xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[3] | string | `"g5.8xlarge"` | |
|
||||
|
@ -116,6 +154,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[5] | string | `"g5.16xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[6] | string | `"g5.24xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[7] | string | `"g5.48xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[8] | string | `"g4dn.xlarge"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[9] | string | `"g4dn.2xlarge"` | |
|
||||
| nvidia-device-plugin.enabled | bool | `false` | |
|
||||
| nvidia-device-plugin.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| nvidia-device-plugin.tolerations[0].key | string | `"nvidia.com/gpu"` | |
|
||||
|
@ -123,3 +163,13 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| nvidia-device-plugin.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| nvidia-device-plugin.tolerations[1].key | string | `"kubezero-workergroup"` | |
|
||||
| nvidia-device-plugin.tolerations[1].operator | string | `"Exists"` | |
|
||||
| sealed-secrets.enabled | bool | `false` | |
|
||||
| sealed-secrets.fullnameOverride | string | `"sealed-secrets-controller"` | |
|
||||
| sealed-secrets.keyrenewperiod | string | `"0"` | |
|
||||
| sealed-secrets.metrics.serviceMonitor.enabled | bool | `false` | |
|
||||
| sealed-secrets.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| sealed-secrets.resources.limits.memory | string | `"128Mi"` | |
|
||||
| sealed-secrets.resources.requests.cpu | string | `"10m"` | |
|
||||
| sealed-secrets.resources.requests.memory | string | `"24Mi"` | |
|
||||
| sealed-secrets.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| sealed-secrets.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
diff -tuNr charts/aws-eks-asg-rolling-update-handler.orig/templates/deployment.yaml charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml
|
||||
--- charts/aws-eks-asg-rolling-update-handler.orig/templates/deployment.yaml 2023-04-12 15:49:08.744242462 +0000
|
||||
+++ charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml 2023-04-12 15:55:44.399489809 +0000
|
||||
@@ -34,6 +34,26 @@
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
+ volumeMounts:
|
||||
+ - name: aws-token
|
||||
+ mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
|
||||
+ readOnly: true
|
||||
+ volumes:
|
||||
+ - name: aws-token
|
||||
+ projected:
|
||||
+ sources:
|
||||
+ - serviceAccountToken:
|
||||
+ path: token
|
||||
+ expirationSeconds: 86400
|
||||
+ audience: "sts.amazonaws.com"
|
||||
+ {{- with .Values.nodeSelector }}
|
||||
+ nodeSelector:
|
||||
+ {{- toYaml . | nindent 8 }}
|
||||
+ {{- end }}
|
||||
+ {{- with .Values.tolerations }}
|
||||
+ tolerations:
|
||||
+ {{- toYaml . | nindent 8 }}
|
||||
+ {{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
|
@ -0,0 +1,8 @@
|
|||
apiVersion: v2
|
||||
description: Handles rolling upgrades for AWS ASGs for EKS by replacing outdated nodes
|
||||
by new nodes.
|
||||
home: https://github.com/TwiN/aws-eks-asg-rolling-update-handler
|
||||
maintainers:
|
||||
- name: TwiN
|
||||
name: aws-eks-asg-rolling-update-handler
|
||||
version: 1.5.0
|
|
@ -0,0 +1,16 @@
|
|||
# aws-eks-asg-rolling-update-handler
|
||||
|
||||
## Configuration
|
||||
The following table lists the configurable parameters of the aws-eks-asg-rolling-update-handler chart and their default values.
|
||||
| Parameters | Description | Required | Default |
|
||||
|:-----------|:------------|:---------|:------------|
|
||||
| environmentVars | environment variables for aws-eks-asg-rolling-update-handler container, available variables are listed [here](https://github.com/TwiN/aws-eks-asg-rolling-update-handler/blob/master/README.md#usage) | yes |`[{"name":"CLUSTER_NAME","value":"cluster-name"}]`|
|
||||
| replicaCount | Number of aws-eks-asg-rolling-update-handler replicas | yes |`1` |
|
||||
| image.repository | Image repository | yes | `twinproduction/aws-eks-asg-rolling-update-handler` |
|
||||
| image.tag | image tag | yes | `v1.4.3` |
|
||||
| image.pullPolicy | Image pull policy | yes | `IfNotPresent` |
|
||||
| resources | CPU/memory resource requests/limits | no | `{}` |
|
||||
| podAnnotations | Annotations to add to the aws-eks-asg-rolling-update-handler pod configuration | no | `{}` |
|
||||
| podLabels | Labels to add to the aws-eks-asg-rolling-update-handler pod configuration | no | `{}` |
|
||||
| securityContext | Pod security context | no | `{}` |
|
||||
| containerSecurityContext | Container security context | no | `{}` |
|
|
@ -0,0 +1,31 @@
|
|||
{{/*
|
||||
Create a default app name.
|
||||
*/}}
|
||||
{{- define "aws-eks-asg-rolling-update-handler.name" -}}
|
||||
{{- .Chart.Name -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default namespace.
|
||||
*/}}
|
||||
{{- define "aws-eks-asg-rolling-update-handler.namespace" -}}
|
||||
{{- .Release.Namespace -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "aws-eks-asg-rolling-update-handler.labels" -}}
|
||||
app.kubernetes.io/name: {{ include "aws-eks-asg-rolling-update-handler.name" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use.
|
||||
*/}}
|
||||
{{- define "aws-eks-asg-rolling-update-handler.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "aws-eks-asg-rolling-update-handler.name" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
|
||||
labels:
|
||||
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }}
|
||||
namespace: {{ template "aws-eks-asg-rolling-update-handler.namespace" . }}
|
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
|
||||
labels:
|
||||
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "*"
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "*"
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "*"
|
||||
resources:
|
||||
- pods/eviction
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- apiGroups:
|
||||
- "*"
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
|
@ -0,0 +1,71 @@
|
|||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
|
||||
namespace: {{ template "aws-eks-asg-rolling-update-handler.namespace" . }}
|
||||
labels:
|
||||
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.securityContext | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: true
|
||||
serviceAccountName: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }}
|
||||
restartPolicy: Always
|
||||
dnsPolicy: Default
|
||||
containers:
|
||||
- name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
|
||||
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.containerSecurityContext | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- toYaml .Values.environmentVars | nindent 12 }}
|
||||
{{- with .Values.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: aws-token
|
||||
mountPath: "/var/run/secrets/sts.amazonaws.com/serviceaccount/"
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: aws-token
|
||||
projected:
|
||||
sources:
|
||||
- serviceAccountToken:
|
||||
path: token
|
||||
expirationSeconds: 86400
|
||||
audience: "sts.amazonaws.com"
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,13 @@
|
|||
{{ if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }}
|
||||
namespace: {{ template "aws-eks-asg-rolling-update-handler.namespace" . }}
|
||||
labels:
|
||||
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{ end }}
|
|
@ -0,0 +1,51 @@
|
|||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: twinproduction/aws-eks-asg-rolling-update-handler
|
||||
tag: v1.7.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
#imagePullSecrets:
|
||||
#- imagePullSecret
|
||||
|
||||
environmentVars:
|
||||
- name: CLUSTER_NAME
|
||||
value: "cluster-name" # REPLACE THIS WITH THE NAME OF YOUR EKS CLUSTER
|
||||
#- name: AUTO_SCALING_GROUP_NAMES
|
||||
# value: "asg-1,asg-2,asg-3" # REPLACE THESE VALUES FOR THE NAMES OF THE ASGs, if CLUSTER_NAME is provided, this is ignored
|
||||
#- name: IGNORE_DAEMON_SETS
|
||||
# value: "true"
|
||||
#- name: DELETE_LOCAL_DATA
|
||||
# value: "true"
|
||||
#- name: AWS_REGION
|
||||
# value: us-west-2
|
||||
#- name: ENVIRONMENT
|
||||
# value: ""
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 0.3
|
||||
# memory: 100Mi
|
||||
# requests:
|
||||
# cpu: 0.1
|
||||
# memory: 50Mi
|
||||
podAnnotations: {}
|
||||
# prometheus.io/port: "8080"
|
||||
# prometheus.io/scrape: "true"
|
||||
podLabels: {}
|
||||
serviceAccount:
|
||||
create: true
|
||||
#name: aws-eks-asg-rolling-update-handler
|
||||
annotations: {}
|
||||
|
||||
securityContext: {}
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1001
|
||||
# seccompProfile:
|
||||
# type: RuntimeDefault
|
||||
|
||||
containerSecurityContext: {}
|
||||
# allowPrivilegeEscalation: false
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v2
|
||||
appVersion: 1.18.0
|
||||
appVersion: 1.21.0
|
||||
description: A Helm chart for the AWS Node Termination Handler.
|
||||
home: https://github.com/aws/eks-charts
|
||||
home: https://github.com/aws/aws-node-termination-handler/
|
||||
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
||||
keywords:
|
||||
- aws
|
||||
|
@ -20,6 +20,5 @@ maintainers:
|
|||
name: aws-node-termination-handler
|
||||
sources:
|
||||
- https://github.com/aws/aws-node-termination-handler/
|
||||
- https://github.com/aws/eks-charts/
|
||||
type: application
|
||||
version: 0.20.0
|
||||
version: 0.23.0
|
||||
|
|
|
@ -8,22 +8,24 @@ AWS Node Termination Handler Helm chart for Kubernetes. For more information on
|
|||
|
||||
## Installing the Chart
|
||||
|
||||
Before you can install the chart you will need to add the `aws` repo to [Helm](https://helm.sh/).
|
||||
|
||||
Before you can install the chart you will need to authenticate your Helm client.
|
||||
```shell
|
||||
helm repo add eks https://aws.github.io/eks-charts/
|
||||
aws ecr-public get-login-password \
|
||||
--region us-east-1 | helm registry login \
|
||||
--username AWS \
|
||||
--password-stdin public.ecr.aws
|
||||
```
|
||||
|
||||
After you've installed the repo you can install the chart, the following command will install the chart with the release name `aws-node-termination-handler` and the default configuration to the `kube-system` namespace.
|
||||
Once the helm registry login succeeds, use the following command to install the chart with the release name `aws-node-termination-handler` and the default configuration to the `kube-system` namespace. In the below command, add the CHART_VERSION that you want to install.
|
||||
|
||||
```shell
|
||||
helm upgrade --install --namespace kube-system aws-node-termination-handler eks/aws-node-termination-handler
|
||||
helm upgrade --install --namespace kube-system aws-node-termination-handler oci://public.ecr.aws/aws-ec2/helm/aws-node-termination-handler --version $CHART_VERSION
|
||||
```
|
||||
|
||||
To install the chart on an EKS cluster where the AWS Node Termination Handler is already installed, you can run the following command.
|
||||
|
||||
```shell
|
||||
helm upgrade --install --namespace kube-system aws-node-termination-handler eks/aws-node-termination-handler --recreate-pods --force
|
||||
helm upgrade --install --namespace kube-system aws-node-termination-handler oci://public.ecr.aws/aws-ec2/helm/aws-node-termination-handler --version $CHART_VERSION --recreate-pods --force
|
||||
```
|
||||
|
||||
If you receive an error similar to the one below simply rerun the above command.
|
||||
|
@ -33,7 +35,7 @@ If you receive an error similar to the one below simply rerun the above command.
|
|||
To uninstall the `aws-node-termination-handler` chart installation from the `kube-system` namespace run the following command.
|
||||
|
||||
```shell
|
||||
helm delete --namespace kube-system aws-node-termination-handler
|
||||
helm uninstall --namespace kube-system aws-node-termination-handler
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
@ -56,7 +58,7 @@ The configuration in this table applies to all AWS Node Termination Handler mode
|
|||
| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the full name template. | `nil` |
|
||||
| `serviceAccount.annotations` | Annotations to add to the service account. | `{}` |
|
||||
| `rbac.create` | If `true`, create the RBAC resources. | `true` |
|
||||
| `rbac.pspEnabled` | If `true`, create a pod security policy resource. | `true` |
|
||||
| `rbac.pspEnabled` | If `true`, create a pod security policy resource. Note: `PodSecurityPolicy`s will not be created when Kubernetes version is 1.25 or later. | `true` |
|
||||
| `customLabels` | Labels to add to all resource metadata. | `{}` |
|
||||
| `podLabels` | Labels to add to the pod. | `{}` |
|
||||
| `podAnnotations` | Annotations to add to the pod. | `{}` |
|
||||
|
@ -117,13 +119,13 @@ The configuration in this table applies to AWS Node Termination Handler in queue
|
|||
| `checkASGTagBeforeDraining` | [DEPRECATED](Use `checkTagBeforeDraining` instead) If `true`, check that the instance is tagged with the `managedAsgTag` before draining the node. If `false`, disables calls ASG API. | `true` |
|
||||
| `managedAsgTag` | [DEPRECATED](Use `managedTag` instead) The node tag to check if `checkASGTagBeforeDraining` is `true`.
|
||||
| `useProviderId` | If `true`, fetch node name through Kubernetes node spec ProviderID instead of AWS event PrivateDnsHostname. | `false` |
|
||||
|
||||
| `topologySpreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) for pod scheduling. Useful with a highly available deployment to reduce the risk of running multiple replicas on the same Node | `[]` |
|
||||
### IMDS Mode Configuration
|
||||
|
||||
The configuration in this table applies to AWS Node Termination Handler in IMDS mode.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
|
||||
| -------------------------------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------|
|
||||
| `targetNodeOs` | Space separated list of node OS's to target (e.g. `"linux"`, `"windows"`, `"linux windows"`). Windows support is **EXPERIMENTAL**. | `"linux"` |
|
||||
| `linuxPodLabels` | Labels to add to each Linux pod. | `{}` |
|
||||
| `windowsPodLabels` | Labels to add to each Windows pod. | `{}` |
|
||||
|
@ -138,7 +140,7 @@ The configuration in this table applies to AWS Node Termination Handler in IMDS
|
|||
| `podMonitor.sampleLimit` | Number of scraped samples accepted. | `5000` |
|
||||
| `useHostNetwork` | If `true`, enables `hostNetwork` for the Linux DaemonSet. NOTE: setting this to `false` may cause issues accessing IMDSv2 if your account is not configured with an IP hop count of 2 see [Metrics Endpoint Considerations](#metrics-endpoint-considerations) | `true` |
|
||||
| `dnsPolicy` | If specified, this overrides `linuxDnsPolicy` and `windowsDnsPolicy` with a single policy. | `""` |
|
||||
| `dnsConfig` | If specified, this sets the dnsConfig: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config | `{}` |
|
||||
| `dnsConfig` | If specified, this sets the dnsConfig: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config | `{}` |
|
||||
| `linuxDnsPolicy` | DNS policy for the Linux DaemonSet. | `""` |
|
||||
| `windowsDnsPolicy` | DNS policy for the Windows DaemonSet. | `""` |
|
||||
| `daemonsetNodeSelector` | Expressions to select a node by it's labels for DaemonSet pod assignment. For backwards compatibility the `nodeSelector` value has priority over this but shouldn't be used. | `{}` |
|
||||
|
@ -152,10 +154,11 @@ The configuration in this table applies to AWS Node Termination Handler in IMDS
|
|||
| `windowsTolerations` | Override `daemonsetTolerations` for the Linux DaemonSet. | `[]` |
|
||||
| `enableProbesServer` | If `true`, start an http server exposing `/healthz` endpoint for probes. | `false` |
|
||||
| `metadataTries` | The number of times to try requesting metadata. | `3` |
|
||||
| `enableSpotInterruptionDraining` | If `true`, drain nodes when the spot interruption termination notice is received. | `true` |
|
||||
| `enableScheduledEventDraining` | If `true`, drain nodes before the maintenance window starts for an EC2 instance scheduled event. This is **EXPERIMENTAL**. | `false` |
|
||||
| `enableRebalanceMonitoring` | If `true`, cordon nodes when the rebalance recommendation notice is received. If you'd like to drain the node in addition to cordoning, then also set `enableRebalanceDraining`. | `false` |
|
||||
| `enableRebalanceDraining` | If `true`, drain nodes when the rebalance recommendation notice is received. | `false` |
|
||||
| `enableSpotInterruptionDraining` | If `true`, drain nodes when the spot interruption termination notice is received. Only used in IMDS mode. | `true` |
|
||||
| `enableScheduledEventDraining` | If `true`, drain nodes before the maintenance window starts for an EC2 instance scheduled event. Only used in IMDS mode. | `true` |
|
||||
| `enableRebalanceMonitoring` | If `true`, cordon nodes when the rebalance recommendation notice is received. If you'd like to drain the node in addition to cordoning, then also set `enableRebalanceDraining`. Only used in IMDS mode. | `false` |
|
||||
| `enableRebalanceDraining` | If `true`, drain nodes when the rebalance recommendation notice is received. Only used in IMDS mode. | `false` |
|
||||
| `deleteSqsMsgIfNodeNotFound` | If `true`, delete the SQS Message from the SQS Queue if the targeted node is not found. Only used in Queue Processor mode. | `false` |
|
||||
|
||||
### Testing Configuration
|
||||
|
||||
|
@ -171,6 +174,6 @@ The configuration in this table applies to AWS Node Termination Handler testing
|
|||
|
||||
## Metrics Endpoint Considerations
|
||||
|
||||
AWS Node Termination HAndler in IMDS mode runs as a DaemonSet with `useHostNetwork: true` by default. If the Prometheus server is enabled with `enablePrometheusServer: true` nothing else will be able to bind to the configured port (by default `prometheusServerPort: 9092`) in the root network namespace. Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint.
|
||||
AWS Node Termination Handler in IMDS mode runs as a DaemonSet with `useHostNetwork: true` by default. If the Prometheus server is enabled with `enablePrometheusServer: true` nothing else will be able to bind to the configured port (by default `prometheusServerPort: 9092`) in the root network namespace. Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint.
|
||||
|
||||
You can switch NTH in IMDS mode to run w/ `useHostNetwork: false`, but you will need to make sure that IMDSv1 is enabled or IMDSv2 IP hop count will need to be incremented to 2 (see the [IMDSv2 documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html).
|
||||
|
|
|
@ -146,14 +146,6 @@ spec:
|
|||
- name: WEBHOOK_TEMPLATE
|
||||
value: {{ .Values.webhookTemplate | quote }}
|
||||
{{- end }}
|
||||
- name: ENABLE_SPOT_INTERRUPTION_DRAINING
|
||||
value: {{ .Values.enableSpotInterruptionDraining | quote }}
|
||||
- name: ENABLE_SCHEDULED_EVENT_DRAINING
|
||||
value: {{ .Values.enableScheduledEventDraining | quote }}
|
||||
- name: ENABLE_REBALANCE_MONITORING
|
||||
value: {{ .Values.enableRebalanceMonitoring | quote }}
|
||||
- name: ENABLE_REBALANCE_DRAINING
|
||||
value: {{ .Values.enableRebalanceDraining | quote }}
|
||||
- name: ENABLE_SQS_TERMINATION_DRAINING
|
||||
value: "true"
|
||||
{{- with .Values.awsRegion }}
|
||||
|
@ -172,6 +164,8 @@ spec:
|
|||
{{- end }}
|
||||
- name: QUEUE_URL
|
||||
value: {{ .Values.queueURL | quote }}
|
||||
- name: DELETE_SQS_MSG_IF_NODE_NOT_FOUND
|
||||
value: {{ .Values.deleteSqsMsgIfNodeNotFound | quote }}
|
||||
- name: WORKERS
|
||||
value: {{ .Values.workers | quote }}
|
||||
{{- with .Values.extraEnv }}
|
||||
|
@ -226,4 +220,8 @@ spec:
|
|||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{{- if .Values.rbac.pspEnabled }}
|
||||
{{- if and (.Values.rbac.pspEnabled) (semverCompare "<1.25-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
|
|
|
@ -23,7 +23,7 @@ serviceAccount:
|
|||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
# Specifies if PodSecurityPolicy resources should be created
|
||||
# Specifies if PodSecurityPolicy resources should be created. PodSecurityPolicy will not be created when Kubernetes version is 1.25 or later.
|
||||
pspEnabled: true
|
||||
|
||||
customLabels: {}
|
||||
|
@ -52,6 +52,8 @@ affinity: {}
|
|||
|
||||
tolerations: []
|
||||
|
||||
topologySpreadConstraints: []
|
||||
|
||||
# Extra environment variables
|
||||
extraEnv: []
|
||||
|
||||
|
@ -259,24 +261,27 @@ daemonsetTolerations:
|
|||
linuxTolerations: []
|
||||
windowsTolerations: []
|
||||
|
||||
# If the probes server is running for the Daemonset
|
||||
# If the probes server is running.
|
||||
enableProbesServer: false
|
||||
|
||||
# Total number of times to try making the metadata request before failing.
|
||||
metadataTries: 3
|
||||
|
||||
# enableSpotInterruptionDraining If false, do not drain nodes when the spot interruption termination notice is received
|
||||
# enableSpotInterruptionDraining If false, do not drain nodes when the spot interruption termination notice is received. Only used in IMDS mode.
|
||||
enableSpotInterruptionDraining: true
|
||||
|
||||
# enableScheduledEventDraining [EXPERIMENTAL] If true, drain nodes before the maintenance window starts for an EC2 instance scheduled event
|
||||
enableScheduledEventDraining: false
|
||||
# enableScheduledEventDraining If false, do not drain nodes before the maintenance window starts for an EC2 instance scheduled event. Only used in IMDS mode.
|
||||
enableScheduledEventDraining: true
|
||||
|
||||
# enableRebalanceMonitoring If true, cordon nodes when the rebalance recommendation notice is received
|
||||
# enableRebalanceMonitoring If true, cordon nodes when the rebalance recommendation notice is received. Only used in IMDS mode.
|
||||
enableRebalanceMonitoring: false
|
||||
|
||||
# enableRebalanceDraining If true, drain nodes when the rebalance recommendation notice is received
|
||||
# enableRebalanceDraining If true, drain nodes when the rebalance recommendation notice is received. Only used in IMDS mode.
|
||||
enableRebalanceDraining: false
|
||||
|
||||
# deleteSqsMsgIfNodeNotFound If true, delete the SQS Message from the SQS Queue if the targeted node(s) are not found. Only used in Queue Processor mode.
|
||||
deleteSqsMsgIfNodeNotFound: false
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
# Testing
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
|
|
|
@ -24,21 +24,22 @@ spec:
|
|||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
#readOnly: true
|
||||
- name: workdir
|
||||
mountPath: /tmp
|
||||
env:
|
||||
env:
|
||||
- name: DEBUG
|
||||
value: ""
|
||||
- name: RESTIC_REPOSITORY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: kubezero-backup-restic
|
||||
key: repository
|
||||
- name: RESTIC_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: kubezero-backup-restic
|
||||
key: password
|
||||
key: password
|
||||
{{- with .Values.clusterBackup.extraEnv }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
|
@ -54,9 +55,8 @@ spec:
|
|||
emptyDir: {}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
restartPolicy: Never
|
||||
|
|
|
@ -17,6 +17,8 @@ spec:
|
|||
spec:
|
||||
serviceAccount: neuron-device-plugin
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: aws.amazon.com/neuron
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
|
@ -40,6 +42,14 @@ spec:
|
|||
- inf1.2xlarge
|
||||
- inf1.6xlarge
|
||||
- inf1.24xlarge
|
||||
- inf2.xlarge
|
||||
- inf2.4xlarge
|
||||
- inf2.8xlarge
|
||||
- inf2.24xlarge
|
||||
- inf2.48xlarge
|
||||
- trn1.2xlarge
|
||||
- trn1.32xlarge
|
||||
- trn1n.32xlarge
|
||||
containers:
|
||||
- image: "{{ .Values.awsNeuron.image.name }}:{{ .Values.awsNeuron.image.tag }}"
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
|
|
@ -13,9 +13,15 @@ spec:
|
|||
labels:
|
||||
name: fuse-device-plugin
|
||||
spec:
|
||||
hostNetwork: true
|
||||
priorityClassName: system-node-critical
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: kubezero-workergroup
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- image: public.ecr.aws/zero-downtime/fuse-device-plugin:v1.1.0
|
||||
- image: "{{ .Values.fuseDevicePlugin.image.name }}:{{ .Values.fuseDevicePlugin.image.tag }}"
|
||||
# imagePullPolicy: Always
|
||||
name: fuse-device-plugin
|
||||
securityContext:
|
||||
|
|
|
@ -69,10 +69,9 @@ spec:
|
|||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
priorityClassName: system-cluster-critical
|
||||
volumes:
|
||||
- name: aws-token
|
||||
projected:
|
||||
|
|
|
@ -1,14 +1,12 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
helm repo update
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
NTH_VERSION=$(yq eval '.dependencies[] | select(.name=="aws-node-termination-handler") | .version' Chart.yaml)
|
||||
login_ecr_public
|
||||
update_helm
|
||||
|
||||
rm -rf charts/aws-node-termination-handler
|
||||
helm pull eks/aws-node-termination-handler --untar --untardir charts --version $NTH_VERSION
|
||||
patch_chart aws-node-termination-handler
|
||||
patch_chart aws-eks-asg-rolling-update-handler
|
||||
|
||||
# diff -tuNr charts/aws-node-termination-handler.orig charts/aws-node-termination-handler > nth.patch
|
||||
patch -p0 -i nth.patch --no-backup-if-mismatch
|
||||
|
||||
helm dep update
|
||||
update_docs
|
||||
|
|
|
@ -24,13 +24,91 @@ forseti:
|
|||
# -- "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti"
|
||||
iamRoleArn: ""
|
||||
|
||||
sealed-secrets:
|
||||
enabled: false
|
||||
|
||||
# ensure kubeseal default values match
|
||||
fullnameOverride: sealed-secrets-controller
|
||||
|
||||
# Disable auto keyrotation for now
|
||||
keyrenewperiod: "0"
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 24Mi
|
||||
limits:
|
||||
memory: 128Mi
|
||||
|
||||
metrics:
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
|
||||
aws-eks-asg-rolling-update-handler:
|
||||
enabled: false
|
||||
image:
|
||||
repository: twinproduction/aws-eks-asg-rolling-update-handler
|
||||
tag: v1.8.3
|
||||
|
||||
environmentVars:
|
||||
- name: CLUSTER_NAME
|
||||
value: ""
|
||||
- name: AWS_REGION
|
||||
value: us-west-2
|
||||
- name: EXECUTION_INTERVAL
|
||||
value: "60"
|
||||
- name: METRICS
|
||||
value: "true"
|
||||
- name: EAGER_CORDONING
|
||||
value: "true"
|
||||
# Only disable if all services have PDBs across AZs
|
||||
- name: SLOW_MODE
|
||||
value: "true"
|
||||
- name: AWS_ROLE_ARN
|
||||
value: ""
|
||||
- name: AWS_WEB_IDENTITY_TOKEN_FILE
|
||||
value: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
||||
- name: AWS_STS_REGIONAL_ENDPOINTS
|
||||
value: "regional"
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1001
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
containerSecurityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 32Mi
|
||||
limits:
|
||||
memory: 128Mi
|
||||
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
|
||||
aws-node-termination-handler:
|
||||
enabled: false
|
||||
|
||||
fullnameOverride: "aws-node-termination-handler"
|
||||
|
||||
# -- "aws-node-termination-handler/${ClusterName}"
|
||||
managedTag: "aws-node-termination-handler/managed"
|
||||
# -- "zdt:kubezero:nth:${ClusterName}"
|
||||
managedTag: "zdt:kubezero:nth:${ClusterName}"
|
||||
|
||||
useProviderId: true
|
||||
enableSqsTerminationDraining: true
|
||||
|
@ -63,8 +141,6 @@ aws-node-termination-handler:
|
|||
logFormatVersion: 2
|
||||
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
|
@ -75,13 +151,16 @@ aws-node-termination-handler:
|
|||
|
||||
fuseDevicePlugin:
|
||||
enabled: false
|
||||
image:
|
||||
name: public.ecr.aws/zero-downtime/fuse-device-plugin
|
||||
tag: v1.2.0
|
||||
|
||||
awsNeuron:
|
||||
enabled: false
|
||||
|
||||
image:
|
||||
name: public.ecr.aws/neuron/neuron-device-plugin
|
||||
tag: 1.9.3.0
|
||||
tag: 2.19.16.0
|
||||
|
||||
nvidia-device-plugin:
|
||||
enabled: false
|
||||
|
@ -109,10 +188,20 @@ nvidia-device-plugin:
|
|||
- g5.16xlarge
|
||||
- g5.24xlarge
|
||||
- g5.48xlarge
|
||||
- g4dn.xlarge
|
||||
- g4dn.2xlarge
|
||||
- g4dn.4xlarge
|
||||
- g4dn.8xlarge
|
||||
- g4dn.12xlarge
|
||||
- g4dn.16xlarge
|
||||
|
||||
cluster-autoscaler:
|
||||
enabled: false
|
||||
|
||||
image:
|
||||
repository: registry.k8s.io/autoscaling/cluster-autoscaler
|
||||
tag: v1.28.2
|
||||
|
||||
autoDiscovery:
|
||||
clusterName: ""
|
||||
awsRegion: "us-west-2"
|
||||
|
@ -131,6 +220,8 @@ cluster-autoscaler:
|
|||
extraArgs:
|
||||
scan-interval: 30s
|
||||
skip-nodes-with-local-storage: false
|
||||
balance-similar-node-groups: true
|
||||
ignore-taint: "node.cilium.io/agent-not-ready"
|
||||
|
||||
#securityContext:
|
||||
# runAsNonRoot: true
|
||||
|
@ -138,15 +229,13 @@ cluster-autoscaler:
|
|||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
|
||||
# On AWS enable Projected Service Accounts to assume IAM role
|
||||
#extraEnv:
|
||||
# AWS_ROLE_ARN: <IamArn>
|
||||
# AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
||||
# AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
||||
# AWS_STS_REGIONAL_ENDPOINTS: "regional"
|
||||
|
||||
#extraVolumes:
|
||||
|
@ -170,8 +259,6 @@ external-dns:
|
|||
triggerLoopOnEvent: true
|
||||
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
|
|
|
@ -1,29 +1,36 @@
|
|||
apiVersion: v2
|
||||
description: KubeZero ArgoCD - config, branding, image-updater (optional)
|
||||
name: kubezero-argocd
|
||||
version: 0.11.1
|
||||
description: KubeZero Argo - Events, Workflow, CD
|
||||
name: kubezero-argo
|
||||
version: 0.2.2
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
- kubezero
|
||||
- argocd
|
||||
- argocd-image-updater
|
||||
- argo-events
|
||||
- argo-workflow
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
# Url: https://github.com/argoproj/argo-helm/tree/main/charts
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.5"
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: argo-events
|
||||
version: 2.4.4
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-events.enabled
|
||||
- name: argo-cd
|
||||
version: 5.16.1
|
||||
version: 6.9.2
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-cd.enabled
|
||||
- name: argocd-apps
|
||||
version: 0.0.4
|
||||
version: 2.0.0
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-cd.enabled
|
||||
- name: argocd-image-updater
|
||||
version: 0.8.1
|
||||
version: 0.10.0
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argocd-image-updater.enabled
|
||||
kubeVersion: ">= 1.24.0"
|
||||
kubeVersion: ">= 1.26.0"
|
|
@ -0,0 +1,92 @@
|
|||
# kubezero-argo
|
||||
|
||||
![Version: 0.2.1](https://img.shields.io/badge/Version-0.2.1-informational?style=flat-square)
|
||||
|
||||
KubeZero Argo - Events, Workflow, CD
|
||||
|
||||
**Homepage:** <https://kubezero.com>
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Stefan Reimer | <stefan@zero-downtime.net> | |
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 6.7.10 |
|
||||
| https://argoproj.github.io/argo-helm | argo-events | 2.4.4 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-apps | 2.0.0 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.9.6 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| argo-cd.applicationSet.enabled | bool | `false` | |
|
||||
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
|
||||
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
|
||||
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.27 - Release notes"` | |
|
||||
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
|
||||
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
|
||||
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.27"` | |
|
||||
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
|
||||
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
|
||||
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
|
||||
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
|
||||
| argo-cd.configs.params."server.insecure" | bool | `true` | |
|
||||
| argo-cd.configs.secret.createSecret | bool | `false` | |
|
||||
| argo-cd.configs.ssh.extraHosts | string | `"git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ=="` | |
|
||||
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar__logo__text-logo { height: 0em; }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
|
||||
| argo-cd.controller.metrics.enabled | bool | `false` | |
|
||||
| argo-cd.controller.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argo-cd.controller.resources.limits.memory | string | `"2048Mi"` | |
|
||||
| argo-cd.controller.resources.requests.cpu | string | `"100m"` | |
|
||||
| argo-cd.controller.resources.requests.memory | string | `"512Mi"` | |
|
||||
| argo-cd.dex.enabled | bool | `false` | |
|
||||
| argo-cd.enabled | bool | `false` | |
|
||||
| argo-cd.global.logging.format | string | `"json"` | |
|
||||
| argo-cd.istio.enabled | bool | `false` | |
|
||||
| argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | |
|
||||
| argo-cd.istio.ipBlocks | list | `[]` | |
|
||||
| argo-cd.notifications.enabled | bool | `false` | |
|
||||
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
|
||||
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argo-cd.server.metrics.enabled | bool | `false` | |
|
||||
| argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
|
||||
| argo-events.configs.jetstream.settings.maxFileStore | int | `-1` | Maximum size of the file storage (e.g. 20G) |
|
||||
| argo-events.configs.jetstream.settings.maxMemoryStore | int | `-1` | Maximum size of the memory storage (e.g. 1G) |
|
||||
| argo-events.configs.jetstream.streamConfig.duplicates | string | `"300s"` | Not documented at the moment |
|
||||
| argo-events.configs.jetstream.streamConfig.maxAge | string | `"72h"` | Maximum age of existing messages, i.e. “72h”, “4h35m” |
|
||||
| argo-events.configs.jetstream.streamConfig.maxBytes | string | `"1GB"` | |
|
||||
| argo-events.configs.jetstream.streamConfig.maxMsgs | int | `1000000` | Maximum number of messages before expiring oldest message |
|
||||
| argo-events.configs.jetstream.streamConfig.replicas | int | `1` | Number of replicas, defaults to 3 and requires minimal 3 |
|
||||
| argo-events.configs.jetstream.versions[0].configReloaderImage | string | `"natsio/nats-server-config-reloader:0.14.1"` | |
|
||||
| argo-events.configs.jetstream.versions[0].metricsExporterImage | string | `"natsio/prometheus-nats-exporter:0.14.0"` | |
|
||||
| argo-events.configs.jetstream.versions[0].natsImage | string | `"nats:2.10.11-scratch"` | |
|
||||
| argo-events.configs.jetstream.versions[0].startCommand | string | `"/nats-server"` | |
|
||||
| argo-events.configs.jetstream.versions[0].version | string | `"2.10.11"` | |
|
||||
| argo-events.enabled | bool | `false` | |
|
||||
| argocd-apps.applications | object | `{}` | |
|
||||
| argocd-apps.enabled | bool | `false` | |
|
||||
| argocd-apps.projects | object | `{}` | |
|
||||
| argocd-image-updater.authScripts.enabled | bool | `true` | |
|
||||
| argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | |
|
||||
| argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | |
|
||||
| argocd-image-updater.config.argocd.plaintext | bool | `true` | |
|
||||
| argocd-image-updater.enabled | bool | `false` | |
|
||||
| argocd-image-updater.fullnameOverride | string | `"argocd-image-updater"` | |
|
||||
| argocd-image-updater.metrics.enabled | bool | `false` | |
|
||||
| argocd-image-updater.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argocd-image-updater.sshConfig.config | string | `"Host *\n PubkeyAcceptedAlgorithms +ssh-rsa\n HostkeyAlgorithms +ssh-rsa\n"` | |
|
||||
|
||||
## Resources
|
||||
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
|
||||
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json
|
||||
|
|
@ -18,3 +18,4 @@
|
|||
## Resources
|
||||
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
|
||||
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{{- if .Values.istio.enabled }}
|
||||
{{- if .Values.istio.ipBlocks }}
|
||||
{{- if index .Values "argo-cd" "istio" "enabled" }}
|
||||
{{- if index .Values "argo-cd" "istio" "ipBlocks" }}
|
||||
apiVersion: security.istio.io/v1beta1
|
||||
kind: AuthorizationPolicy
|
||||
metadata:
|
||||
|
@ -16,7 +16,7 @@ spec:
|
|||
- from:
|
||||
- source:
|
||||
notIpBlocks:
|
||||
{{- toYaml .Values.istio.ipBlocks | nindent 8 }}
|
||||
{{- toYaml (index .Values "argo-cd" "istio" "ipBlocks") | nindent 8 }}
|
||||
to:
|
||||
- operation:
|
||||
hosts: [{{ index .Values "argo-cd" "configs" "cm" "url" | quote }}]
|
|
@ -1,4 +1,4 @@
|
|||
{{- if .Values.istio.enabled }}
|
||||
{{- if index .Values "argo-cd" "istio" "enabled" }}
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
|
@ -8,9 +8,9 @@ metadata:
|
|||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
spec:
|
||||
gateways:
|
||||
- {{ .Values.istio.gateway }}
|
||||
- {{ index .Values "argo-cd" "istio" "gateway" }}
|
||||
hosts:
|
||||
- {{ index .Values "argo-cd" "configs" "cm" "url" }}
|
||||
- {{ get (urlParse (index .Values "argo-cd" "configs" "cm" "url")) "host" }}
|
||||
http:
|
||||
- name: grpc
|
||||
match:
|
||||
|
@ -19,13 +19,13 @@ spec:
|
|||
prefix: argocd-client
|
||||
route:
|
||||
- destination:
|
||||
host: argocd-server
|
||||
host: argo-argocd-server
|
||||
port:
|
||||
number: 443
|
||||
- name: http
|
||||
route:
|
||||
- destination:
|
||||
host: argocd-server
|
||||
host: argo-argocd-server
|
||||
port:
|
||||
number: 80
|
||||
{{- end }}
|
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
update_helm
|
||||
|
||||
# Create ZDT dashboard configmap
|
||||
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/argo-cd/grafana-dashboards.yaml
|
||||
|
||||
update_docs
|
|
@ -1,16 +1,43 @@
|
|||
# Support for Istio Ingress for ArgoCD
|
||||
istio:
|
||||
# istio.enabled -- Deploy Istio VirtualService to expose ArgoCD
|
||||
argo-events:
|
||||
enabled: false
|
||||
# istio.gateway -- Name of the Istio gateway to add the VirtualService to
|
||||
gateway: istio-ingress/ingressgateway
|
||||
ipBlocks: []
|
||||
configs:
|
||||
jetstream:
|
||||
# Default JetStream settings, could be overridden by EventBus JetStream spec
|
||||
# Ref: https://docs.nats.io/running-a-nats-service/configuration#jetstream
|
||||
settings:
|
||||
# -- Maximum size of the memory storage (e.g. 1G)
|
||||
maxMemoryStore: -1
|
||||
# -- Maximum size of the file storage (e.g. 20G)
|
||||
maxFileStore: -1
|
||||
streamConfig:
|
||||
# -- Maximum number of messages before expiring oldest message
|
||||
maxMsgs: 1000000
|
||||
# -- Maximum age of existing messages, i.e. “72h”, “4h35m”
|
||||
maxAge: 72h
|
||||
# Total size of messages before expiring oldest message, 0 means unlimited.
|
||||
maxBytes: 1GB
|
||||
# -- Number of replicas, defaults to 3 and requires minimal 3
|
||||
replicas: 1
|
||||
# -- Not documented at the moment
|
||||
duplicates: 300s
|
||||
# Supported versions of JetStream eventbus
|
||||
# see: https://github.com/nats-io/k8s/blob/main/helm/charts/nats/values.yaml
|
||||
# do NOT use -alpine tag as the entrypoint differs
|
||||
versions:
|
||||
- version: 2.10.11
|
||||
natsImage: nats:2.10.11-scratch
|
||||
metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0
|
||||
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
|
||||
startCommand: /nats-server
|
||||
|
||||
|
||||
argocd-apps:
|
||||
projects: []
|
||||
applications: []
|
||||
enabled: false
|
||||
projects: {}
|
||||
applications: {}
|
||||
|
||||
argo-cd:
|
||||
enabled: false
|
||||
#configs:
|
||||
# secret:
|
||||
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
|
||||
|
@ -26,18 +53,19 @@ argo-cd:
|
|||
configs:
|
||||
styles: |
|
||||
.sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }
|
||||
.sidebar__logo__text-logo { height: 0em; }
|
||||
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
|
||||
|
||||
cm:
|
||||
ui.bannercontent: "KubeZero v1.24 - Release notes"
|
||||
ui.bannerurl: "https://kubezero.com/releases/v1.24"
|
||||
ui.bannercontent: "KubeZero v1.28 - Release notes"
|
||||
ui.bannerurl: "https://kubezero.com/releases/v1.28"
|
||||
ui.bannerpermanent: "true"
|
||||
ui.bannerposition: "bottom"
|
||||
|
||||
# argo-cd.server.config.url -- ArgoCD hostname to be exposed via Istio
|
||||
url: argocd.example.com
|
||||
# argo-cd.server.config.url -- ArgoCD URL being exposed via Istio
|
||||
url: https://argocd.example.com
|
||||
|
||||
timeout.reconciliation: 300
|
||||
timeout.reconciliation: 300s
|
||||
|
||||
resource.customizations: |
|
||||
cert-manager.io/Certificate:
|
||||
|
@ -67,17 +95,8 @@ argo-cd:
|
|||
secret:
|
||||
createSecret: false
|
||||
|
||||
knownHosts:
|
||||
data:
|
||||
ssh_known_hosts: |
|
||||
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
|
||||
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
||||
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
||||
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
|
||||
gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==
|
||||
ssh:
|
||||
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ=="
|
||||
|
||||
params:
|
||||
controller.status.processors: "10"
|
||||
|
@ -86,18 +105,19 @@ argo-cd:
|
|||
server.insecure: true
|
||||
server.enable.gzip: true
|
||||
|
||||
controller:
|
||||
metrics:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
|
||||
resources:
|
||||
# limits:
|
||||
limits:
|
||||
# cpu: 500m
|
||||
# memory: 2048Mi
|
||||
memory: 2048Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
memory: 512Mi
|
||||
|
||||
repoServer:
|
||||
metrics:
|
||||
|
@ -127,6 +147,14 @@ argo-cd:
|
|||
notifications:
|
||||
enabled: false
|
||||
|
||||
# Support for Istio Ingress for ArgoCD
|
||||
istio:
|
||||
# istio.enabled -- Deploy Istio VirtualService to expose ArgoCD
|
||||
enabled: false
|
||||
# istio.gateway -- Name of the Istio gateway to add the VirtualService to
|
||||
gateway: istio-ingress/ingressgateway
|
||||
ipBlocks: []
|
||||
|
||||
argocd-image-updater:
|
||||
enabled: false
|
||||
|
|
@ -1,74 +0,0 @@
|
|||
# kubezero-argocd
|
||||
|
||||
![Version: 0.11.1](https://img.shields.io/badge/Version-0.11.1-informational?style=flat-square)
|
||||
|
||||
KubeZero ArgoCD - config, branding, image-updater (optional)
|
||||
|
||||
**Homepage:** <https://kubezero.com>
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Stefan Reimer | <stefan@zero-downtime.net> | |
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.24.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 5.16.1 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-apps | 0.0.4 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.8.1 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| argo-cd.applicationSet.enabled | bool | `false` | |
|
||||
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
|
||||
| argo-cd.configs.cm."timeout.reconciliation" | int | `300` | |
|
||||
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.24 - Release notes"` | |
|
||||
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
|
||||
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
|
||||
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.24"` | |
|
||||
| argo-cd.configs.cm.url | string | `"argocd.example.com"` | |
|
||||
| argo-cd.configs.knownHosts.data.ssh_known_hosts | string | `"bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==\ngithub.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==\ngitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=\ngitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf\ngitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9\ngit.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==\n"` | |
|
||||
| argo-cd.configs.metrics.enabled | bool | `false` | |
|
||||
| argo-cd.configs.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
|
||||
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
|
||||
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
|
||||
| argo-cd.configs.params."server.insecure" | bool | `true` | |
|
||||
| argo-cd.configs.resources.requests.cpu | string | `"100m"` | |
|
||||
| argo-cd.configs.resources.requests.memory | string | `"256Mi"` | |
|
||||
| argo-cd.configs.secret.createSecret | bool | `false` | |
|
||||
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
|
||||
| argo-cd.dex.enabled | bool | `false` | |
|
||||
| argo-cd.global.logging.format | string | `"json"` | |
|
||||
| argo-cd.notifications.enabled | bool | `false` | |
|
||||
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
|
||||
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argo-cd.server.metrics.enabled | bool | `false` | |
|
||||
| argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
|
||||
| argocd-apps.applications | list | `[]` | |
|
||||
| argocd-apps.projects | list | `[]` | |
|
||||
| argocd-image-updater.authScripts.enabled | bool | `true` | |
|
||||
| argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | |
|
||||
| argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | |
|
||||
| argocd-image-updater.config.argocd.plaintext | bool | `true` | |
|
||||
| argocd-image-updater.enabled | bool | `false` | |
|
||||
| argocd-image-updater.fullnameOverride | string | `"argocd-image-updater"` | |
|
||||
| argocd-image-updater.metrics.enabled | bool | `false` | |
|
||||
| argocd-image-updater.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argocd-image-updater.sshConfig.config | string | `"Host *\n PubkeyAcceptedAlgorithms +ssh-rsa\n HostkeyAlgorithms +ssh-rsa\n"` | |
|
||||
| istio.enabled | bool | `false` | Deploy Istio VirtualService to expose ArgoCD |
|
||||
| istio.gateway | string | `"istio-ingress/ingressgateway"` | Name of the Istio gateway to add the VirtualService to |
|
||||
| istio.ipBlocks | list | `[]` | |
|
||||
|
||||
## Resources
|
||||
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
|
||||
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json
|
|
@ -2,8 +2,8 @@ apiVersion: v2
|
|||
name: kubezero-auth
|
||||
description: KubeZero umbrella chart for all things Authentication and Identity management
|
||||
type: application
|
||||
version: 0.3.3
|
||||
appVersion: 20.0.0
|
||||
version: 0.4.6
|
||||
appVersion: 22.0.5
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -14,10 +14,10 @@ maintainers:
|
|||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.5"
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: postgresql
|
||||
version: 11.8.1
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
kubeVersion: ">= 1.20.0"
|
||||
- name: keycloak
|
||||
version: 18.7.1
|
||||
repository: "oci://registry-1.docker.io/bitnamicharts"
|
||||
condition: keycloak.enabled
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# kubezero-auth
|
||||
|
||||
![Version: 0.3.3](https://img.shields.io/badge/Version-0.3.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 20.0.0](https://img.shields.io/badge/AppVersion-20.0.0-informational?style=flat-square)
|
||||
![Version: 0.4.5](https://img.shields.io/badge/Version-0.4.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 22.0.5](https://img.shields.io/badge/AppVersion-22.0.5-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for all things Authentication and Identity management
|
||||
|
||||
|
@ -14,40 +14,50 @@ KubeZero umbrella chart for all things Authentication and Identity management
|
|||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
||||
| https://charts.bitnami.com/bitnami | postgresql | 11.8.1 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| oci://registry-1.docker.io/bitnamicharts | keycloak | 18.3.2 |
|
||||
|
||||
# Keycloak
|
||||
|
||||
## Operator
|
||||
|
||||
https://www.keycloak.org/operator/installation
|
||||
https://github.com/keycloak/keycloak/tree/main/operator
|
||||
https://github.com/aerogear/keycloak-metrics-spi
|
||||
https://github.com/keycloak/keycloak-benchmark/tree/main/provision/minikube/keycloak/templates
|
||||
|
||||
## Resources
|
||||
|
||||
- Codecentric Helm chart: `https://github.com/codecentric/helm-charts/tree/master/charts/keycloak`
|
||||
- custom image: `https://www.keycloak.org/server/containers`
|
||||
- https://github.com/bitnami/charts/tree/main/bitnami/keycloak
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| keycloak.auth.adminUser | string | `"admin"` | |
|
||||
| keycloak.auth.existingSecret | string | `"kubezero-auth"` | |
|
||||
| keycloak.auth.passwordSecretKey | string | `"admin-password"` | |
|
||||
| keycloak.enabled | bool | `false` | |
|
||||
| keycloak.istio.enabled | bool | `false` | |
|
||||
| keycloak.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| keycloak.istio.url | string | `""` | |
|
||||
| keycloak.istio.admin.enabled | bool | `false` | |
|
||||
| keycloak.istio.admin.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| keycloak.istio.admin.url | string | `""` | |
|
||||
| keycloak.istio.auth.enabled | bool | `false` | |
|
||||
| keycloak.istio.auth.gateway | string | `"istio-ingress/ingressgateway"` | |
|
||||
| keycloak.istio.auth.url | string | `""` | |
|
||||
| keycloak.metrics.enabled | bool | `false` | |
|
||||
| keycloak.podDisruptionBudget.minAvailable | int | `1` | |
|
||||
| keycloak.replicas | int | `1` | |
|
||||
| postgresql.auth.database | string | `"keycloak"` | |
|
||||
| postgresql.auth.existingSecret | string | `"kubezero-auth-postgresql"` | |
|
||||
| postgresql.auth.username | string | `"keycloak"` | |
|
||||
| postgresql.enabled | bool | `false` | |
|
||||
| postgresql.primary.persistence.size | string | `"1Gi"` | |
|
||||
| postgresql.readReplicas.replicaCount | int | `0` | |
|
||||
| keycloak.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| keycloak.pdb.create | bool | `false` | |
|
||||
| keycloak.pdb.minAvailable | int | `1` | |
|
||||
| keycloak.postgresql.auth.database | string | `"keycloak"` | |
|
||||
| keycloak.postgresql.auth.existingSecret | string | `"kubezero-auth"` | |
|
||||
| keycloak.postgresql.auth.username | string | `"keycloak"` | |
|
||||
| keycloak.postgresql.primary.persistence.size | string | `"1Gi"` | |
|
||||
| keycloak.postgresql.readReplicas.replicaCount | int | `0` | |
|
||||
| keycloak.production | bool | `true` | |
|
||||
| keycloak.proxy | string | `"edge"` | |
|
||||
| keycloak.replicaCount | int | `1` | |
|
||||
| keycloak.resources.requests.cpu | string | `"100m"` | |
|
||||
| keycloak.resources.requests.memory | string | `"512Mi"` | |
|
||||
|
|
|
@ -17,13 +17,12 @@
|
|||
|
||||
## Operator
|
||||
|
||||
https://www.keycloak.org/operator/installation
|
||||
https://github.com/keycloak/keycloak/tree/main/operator
|
||||
https://github.com/aerogear/keycloak-metrics-spi
|
||||
https://github.com/keycloak/keycloak-benchmark/tree/main/provision/minikube/keycloak/templates
|
||||
|
||||
## Resources
|
||||
|
||||
- Codecentric Helm chart: `https://github.com/codecentric/helm-charts/tree/master/charts/keycloak`
|
||||
- custom image: `https://www.keycloak.org/server/containers`
|
||||
- https://github.com/bitnami/charts/tree/main/bitnami/keycloak
|
||||
|
||||
{{ template "chart.valuesSection" . }}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,8 +1,9 @@
|
|||
configmap: grafana-dashboards-keycloak
|
||||
configmap: grafana-dashboards
|
||||
condition: '.Values.keycloak.metrics.enabled'
|
||||
gzip: true
|
||||
# folder:
|
||||
dashboards:
|
||||
- name: keycloak
|
||||
url: https://grafana.com/api/dashboards/10441/revisions/2/download
|
||||
# url: https://grafana.com/api/dashboards/10441/revisions/2/download
|
||||
url: https://grafana.com/api/dashboards/17878/revisions/1/download
|
||||
tags: ['Keycloak', 'Auth']
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
# Abstract
|
||||
|
||||
## IdP
|
||||
|
||||
### AWS
|
||||
|
||||
Get client descriptor for your realm and client via:
|
||||
`wget https://<auth-endpoint>/realms/<realm>/protocol/saml/descriptor`
|
||||
|
||||
# Resources
|
||||
|
||||
## AWS
|
||||
- https://aws.amazon.com/blogs/business-intelligence/federate-amazon-quicksight-access-with-open-source-identity-provider-keycloak/
|
||||
- https://docs.aws.amazon.com/singlesignon/latest/userguide/troubleshooting.html#issue8
|
|
@ -0,0 +1,49 @@
|
|||
# Upgrade Postgres major version
|
||||
|
||||
## backup
|
||||
|
||||
- shell into running posgres-auth pod
|
||||
```
|
||||
export PGPASSWORD="<postgres_password from secret>"
|
||||
cd /bitnami/posgres
|
||||
pg_dumpall > backup
|
||||
```
|
||||
|
||||
- store backup off-site
|
||||
```
|
||||
kubectl cp keycloak/kubezero-auth-postgresql-0:/bitnami/postgresql/backup postgres-backup
|
||||
```
|
||||
|
||||
## upgrade
|
||||
|
||||
- upgrade auth chart
|
||||
- set replica of the keycloak statefulSet to 0
|
||||
- set replica of the postgres-auth statefulSet to 0
|
||||
- delete postgres-auth PVC and POD to flush old DB
|
||||
|
||||
## restore
|
||||
|
||||
- restore replica of postgres-auth statefulSet
|
||||
- copy backup to new PVC
|
||||
```
|
||||
kubectl cp postgres-backup keycloak/kubezero-auth-postgresql-0:/bitnami/postgresql/backup
|
||||
```
|
||||
|
||||
- log into psql as admin ( shell on running pod )
|
||||
```
|
||||
psql -U postgres
|
||||
```
|
||||
|
||||
- drop database `keycloak` in case the keycloak instances connected early
|
||||
```
|
||||
DROP database keycloak
|
||||
```
|
||||
|
||||
- actual restore
|
||||
```
|
||||
psql -U postgres -d postgres -f backup
|
||||
```
|
||||
|
||||
- reset replia of keycloak statefulSet or force ArgoCD sync
|
||||
|
||||
success.
|
|
@ -1,12 +0,0 @@
|
|||
--- templates/keycloak/operator.yaml.orig 2022-05-11 12:46:15.860204871 +0200
|
||||
+++ templates/keycloak/operator.yaml 2022-05-11 12:46:02.840068240 +0200
|
||||
@@ -1,3 +1,4 @@
|
||||
+{{- if .Values.keycloak.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
@@ -233,3 +234,4 @@
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 10
|
||||
serviceAccountName: keycloak-operator
|
||||
+{{- end }}
|
File diff suppressed because one or more lines are too long
|
@ -1,8 +1,8 @@
|
|||
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.enabled }}
|
||||
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.admin.enabled }}
|
||||
apiVersion: security.istio.io/v1beta1
|
||||
kind: AuthorizationPolicy
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-deny-metrics-ipblocks
|
||||
name: {{ .Release.Name }}-keycloak-admin-deny-not-in-ipblocks
|
||||
namespace: istio-system
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" $ | nindent 4 }}
|
||||
|
@ -12,22 +12,23 @@ spec:
|
|||
app: istio-ingressgateway
|
||||
action: DENY
|
||||
rules:
|
||||
# block access to metrics via Ingress
|
||||
- to:
|
||||
- operation:
|
||||
hosts: ["{{ .Values.keycloak.istio.url }}"]
|
||||
paths: ["/auth/realms/master/metrics"]
|
||||
hosts: ["{{ .Values.keycloak.istio.admin.url }}"]
|
||||
paths: ["/metrics", "/realms/*/metrics"]
|
||||
when:
|
||||
- key: connection.sni
|
||||
values:
|
||||
- '*'
|
||||
{{- if .Values.keycloak.istio.ipBlocks }}
|
||||
{{- if .Values.keycloak.istio.admin.ipBlocks }}
|
||||
- from:
|
||||
- source:
|
||||
notIpBlocks:
|
||||
{{- toYaml .Values.keycloak.istio.ipBlocks | nindent 8 }}
|
||||
{{- toYaml .Values.keycloak.istio.admin.ipBlocks | nindent 8 }}
|
||||
to:
|
||||
- operation:
|
||||
hosts: ["{{ .Values.keycloak.istio.url }}"]
|
||||
hosts: ["{{ .Values.keycloak.istio.admin.url }}"]
|
||||
when:
|
||||
- key: connection.sni
|
||||
values:
|
||||
|
|
|
@ -1,18 +1,44 @@
|
|||
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.enabled .Values.keycloak.istio.url }}
|
||||
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.admin.enabled .Values.keycloak.istio.admin.url }}
|
||||
# Admin endpoint / all URLs allowed
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: {{ template "kubezero-lib.fullname" $ }}
|
||||
name: {{ template "kubezero-lib.fullname" $ }}-admin
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" $ | nindent 4 }}
|
||||
spec:
|
||||
gateways:
|
||||
- {{ .Values.keycloak.istio.gateway }}
|
||||
- {{ .Values.keycloak.istio.admin.gateway }}
|
||||
hosts:
|
||||
- {{ .Values.keycloak.istio.url }}
|
||||
- {{ .Values.keycloak.istio.admin.url }}
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: {{ template "kubezero-lib.fullname" $ }}-service
|
||||
host: {{ template "kubezero-lib.fullname" $ }}-keycloak
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
|
||||
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.auth.enabled .Values.keycloak.istio.auth.url }}
|
||||
# auth endpoint - only expose minimal URls
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: {{ template "kubezero-lib.fullname" $ }}-auth
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" $ | nindent 4 }}
|
||||
spec:
|
||||
gateways:
|
||||
- {{ .Values.keycloak.istio.auth.gateway }}
|
||||
hosts:
|
||||
- {{ .Values.keycloak.istio.auth.url }}
|
||||
http:
|
||||
- match:
|
||||
- uri:
|
||||
regex: ^/(js/|realms/|resources/|robots.txt).*
|
||||
route:
|
||||
- destination:
|
||||
host: {{ template "kubezero-lib.fullname" $ }}-keycloak
|
||||
{{- end }}
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
{{- if .Values.keycloak.enabled }}
|
||||
apiVersion: k8s.keycloak.org/v2alpha1
|
||||
kind: Keycloak
|
||||
metadata:
|
||||
name: {{ template "kubezero-lib.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
spec:
|
||||
instances: {{ .Values.keycloak.replicas }}
|
||||
|
||||
additionalOptions:
|
||||
# Needs int casting thx to https://github.com/kubernetes-sigs/yaml/issues/45
|
||||
{{- if lt (int .Values.keycloak.replicas) 2 }}
|
||||
- name: cache
|
||||
value: local
|
||||
{{- end }}
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
- name: db
|
||||
value: postgres
|
||||
- name: db-url-host
|
||||
value: {{ template "kubezero-lib.fullname" . }}-postgresql
|
||||
- name: db-username
|
||||
value: keycloak
|
||||
- name: db-password
|
||||
secret:
|
||||
name: {{ template "kubezero-lib.fullname" . }}-postgresql
|
||||
key: password
|
||||
{{- else }}
|
||||
# Fallback to local file within the pod - dev ONLY !!
|
||||
- name: db
|
||||
value: dev-file
|
||||
{{- end }}
|
||||
- name: hostname-strict-https
|
||||
value: "false"
|
||||
- name: proxy
|
||||
value: edge
|
||||
- name: http-enabled
|
||||
value: "true"
|
||||
- name: log-console-output
|
||||
value: json
|
||||
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
|
||||
http:
|
||||
httpEnabled: true
|
||||
|
||||
# We use Istio Ingress to terminate TLS
|
||||
# mTls down the road
|
||||
hostname:
|
||||
hostname: {{ default "keycloak" .Values.keycloak.istio.url }}
|
||||
strict: false
|
||||
strictBackchannel: false
|
||||
{{- end }}
|
|
@ -1,237 +0,0 @@
|
|||
{{- if .Values.keycloak.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
app.quarkus.io/build-timestamp: 2022-11-01 - 13:22:44 +0000
|
||||
labels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 20.0.0
|
||||
name: keycloak-operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
app.quarkus.io/build-timestamp: 2022-11-01 - 13:22:44 +0000
|
||||
labels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 20.0.0
|
||||
name: keycloak-operator
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 20.0.0
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: keycloak-operator-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
name: keycloak-operator-role-binding
|
||||
roleRef:
|
||||
kind: Role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: keycloak-operator-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: keycloak-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: keycloak-operator-view
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: view
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: keycloak-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: keycloakcontroller-role-binding
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: keycloakcontroller-cluster-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: keycloak-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: keycloakrealmimportcontroller-role-binding
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: keycloakrealmimportcontroller-cluster-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: keycloak-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: keycloakcontroller-cluster-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- k8s.keycloak.org
|
||||
resources:
|
||||
- keycloaks
|
||||
- keycloaks/status
|
||||
- keycloaks/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: keycloakrealmimportcontroller-cluster-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- k8s.keycloak.org
|
||||
resources:
|
||||
- keycloakrealmimports
|
||||
- keycloakrealmimports/status
|
||||
- keycloakrealmimports/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
app.quarkus.io/build-timestamp: 2022-11-01 - 13:22:44 +0000
|
||||
labels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 20.0.0
|
||||
name: keycloak-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 20.0.0
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
app.quarkus.io/build-timestamp: 2022-11-01 - 13:22:44 +0000
|
||||
labels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 20.0.0
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: KUBERNETES_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: OPERATOR_KEYCLOAK_IMAGE
|
||||
value: quay.io/keycloak/keycloak:20.0.0
|
||||
image: quay.io/keycloak/keycloak-operator:20.0.0
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /q/health/live
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 10
|
||||
name: keycloak-operator
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /q/health/ready
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 10
|
||||
serviceAccountName: keycloak-operator
|
||||
{{- end }}
|
|
@ -1,15 +0,0 @@
|
|||
{{- if and .Values.keycloak.podDisruptionBudget (gt (int .Values.keycloak.replicas) 1) }}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "kubezero-lib.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: keycloak
|
||||
app.kubernetes.io/managed-by: keycloak-operator
|
||||
{{- toYaml .Values.keycloak.podDisruptionBudget | nindent 2 }}
|
||||
{{- end }}
|
|
@ -1,19 +1,12 @@
|
|||
#!/bin/bash
|
||||
|
||||
# https://www.keycloak.org/operator/installation
|
||||
|
||||
set -ex
|
||||
|
||||
helm dep update
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
# Operator
|
||||
VERSION=$(yq eval '.appVersion' Chart.yaml)
|
||||
|
||||
wget -O crds/keycloak.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/"${VERSION}"/kubernetes/keycloaks.k8s.keycloak.org-v1.yml
|
||||
wget -O crds/keycloak-realmimports.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/"${VERSION}"/kubernetes/keycloakrealmimports.k8s.keycloak.org-v1.yml
|
||||
|
||||
wget -O templates/keycloak/operator.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/"${VERSION}"/kubernetes/kubernetes.yml
|
||||
patch -i keycloak.patch -p0 --no-backup-if-mismatch
|
||||
login_ecr_public
|
||||
update_helm
|
||||
|
||||
# Fetch dashboards
|
||||
../kubezero-metrics/sync_grafana_dashboards.py dashboards-keycloak.yaml templates/keycloak/grafana-dashboards.yaml
|
||||
|
||||
update_docs
|
||||
|
|
|
@ -1,29 +1,48 @@
|
|||
keycloak:
|
||||
enabled: false
|
||||
|
||||
replicas: 1
|
||||
podDisruptionBudget:
|
||||
minAvailable: 1
|
||||
proxy: edge
|
||||
production: true
|
||||
|
||||
istio:
|
||||
enabled: false
|
||||
gateway: istio-ingress/private-ingressgateway
|
||||
url: ""
|
||||
auth:
|
||||
adminUser: admin
|
||||
existingSecret: kubezero-auth
|
||||
passwordSecretKey: admin-password
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
pdb:
|
||||
create: false
|
||||
minAvailable: 1
|
||||
|
||||
metrics:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 512Mi
|
||||
|
||||
postgresql:
|
||||
enabled: false
|
||||
postgresql:
|
||||
auth:
|
||||
existingSecret: kubezero-auth
|
||||
username: keycloak
|
||||
database: keycloak
|
||||
|
||||
auth:
|
||||
existingSecret: kubezero-auth-postgresql
|
||||
username: keycloak
|
||||
database: keycloak
|
||||
primary:
|
||||
persistence:
|
||||
size: 1Gi
|
||||
|
||||
primary:
|
||||
persistence:
|
||||
size: 1Gi
|
||||
readReplicas:
|
||||
replicaCount: 0
|
||||
|
||||
readReplicas:
|
||||
replicaCount: 0
|
||||
istio:
|
||||
admin:
|
||||
enabled: false
|
||||
gateway: istio-ingress/private-ingressgateway
|
||||
url: ""
|
||||
auth:
|
||||
enabled: false
|
||||
gateway: istio-ingress/ingressgateway
|
||||
url: ""
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||
name: kubezero-cert-manager
|
||||
description: KubeZero Umbrella Chart for cert-manager
|
||||
type: application
|
||||
version: 0.9.3
|
||||
version: 0.9.7
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -13,9 +13,9 @@ maintainers:
|
|||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.5"
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: cert-manager
|
||||
version: 1.9.1
|
||||
version: v1.14.4
|
||||
repository: https://charts.jetstack.io
|
||||
kubeVersion: ">= 1.24.0"
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# kubezero-cert-manager
|
||||
|
||||
![Version: 0.9.3](https://img.shields.io/badge/Version-0.9.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.9.7](https://img.shields.io/badge/Version-0.9.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero Umbrella Chart for cert-manager
|
||||
|
||||
|
@ -14,12 +14,12 @@ KubeZero Umbrella Chart for cert-manager
|
|||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.24.0`
|
||||
Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
||||
| https://charts.jetstack.io | cert-manager | 1.9.1 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://charts.jetstack.io | cert-manager | v1.14.4 |
|
||||
|
||||
## AWS - OIDC IAM roles
|
||||
|
||||
|
@ -32,13 +32,16 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
|
|||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| cert-manager.cainjector.extraArgs[0] | string | `"--logging-format=json"` | |
|
||||
| cert-manager.cainjector.extraArgs[1] | string | `"--leader-elect=false"` | |
|
||||
| cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cert-manager.cainjector.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.cainjector.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.enableCertificateOwnerRef | bool | `true` | |
|
||||
| cert-manager.enabled | bool | `true` | |
|
||||
| cert-manager.extraArgs[0] | string | `"--dns01-recursive-nameservers-only"` | |
|
||||
| cert-manager.extraArgs[0] | string | `"--logging-format=json"` | |
|
||||
| cert-manager.extraArgs[1] | string | `"--leader-elect=false"` | |
|
||||
| cert-manager.extraArgs[2] | string | `"--dns01-recursive-nameservers-only"` | |
|
||||
| cert-manager.global.leaderElection.namespace | string | `"cert-manager"` | |
|
||||
| cert-manager.ingressShim.defaultIssuerKind | string | `"ClusterIssuer"` | |
|
||||
| cert-manager.ingressShim.defaultIssuerName | string | `"letsencrypt-dns-prod"` | |
|
||||
|
@ -46,14 +49,11 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
|
|||
| cert-manager.prometheus.servicemonitor.enabled | bool | `false` | |
|
||||
| cert-manager.startupapicheck.enabled | bool | `false` | |
|
||||
| cert-manager.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cert-manager.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.webhook.extraArgs[0] | string | `"--logging-format=json"` | |
|
||||
| cert-manager.webhook.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cert-manager.webhook.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cert-manager.webhook.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.webhook.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| clusterIssuer | object | `{}` | |
|
||||
| localCA.enabled | bool | `false` | |
|
||||
| localCA.selfsigning | bool | `true` | |
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
rules:
|
||||
- name: prometheus-rules
|
||||
condition: 'index .Values "cert-manager" "prometheus" "servicemonitor" "enabled"'
|
||||
url: file://rules/cert-manager-mixin-prometheusRule
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
configmap: cert-manager-grafana-dashboard
|
||||
configmap: grafana-dashboard
|
||||
gzip: true
|
||||
folder: KubeZero
|
||||
condition: 'index .Values "cert-manager" "prometheus" "servicemonitor" "enabled"'
|
||||
|
|
|
@ -1,6 +1,15 @@
|
|||
{
|
||||
"version": 1,
|
||||
"dependencies": [
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/imusmanmalik/cert-manager-mixin.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "main"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
|
@ -8,16 +17,7 @@
|
|||
"subdir": "jsonnet/kube-prometheus"
|
||||
}
|
||||
},
|
||||
"version": "release-0.10"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://gitlab.com/uneeq-oss/cert-manager-mixin.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
"version": "main"
|
||||
}
|
||||
],
|
||||
"legacyImports": true
|
||||
|
|
|
@ -8,8 +8,8 @@
|
|||
"subdir": "grafana"
|
||||
}
|
||||
},
|
||||
"version": "199e363523104ff8b3a12483a4e3eca86372b078",
|
||||
"sum": "/jDHzVAjHB4AOLkJHw1GyATX5ogZ1iMdcJXZAgaG3+g="
|
||||
"version": "5698c8940b6dadca3f42107b7839557bc041761f",
|
||||
"sum": "l6fPvh3tW6fWot308w71QY/amrYsFPeitvz1IgJxqQA="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -18,8 +18,18 @@
|
|||
"subdir": "contrib/mixin"
|
||||
}
|
||||
},
|
||||
"version": "b872757492ca70f350abd44eb95d81b95339f581",
|
||||
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
|
||||
"version": "5a53a708d8ab9ef936ac5b8062ffc66c77a2c18f",
|
||||
"sum": "xuUBd2vqF7asyVDe5CE08uPT/RxAdy8O75EjFJoMXXU="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafana.git",
|
||||
"subdir": "grafana-mixin"
|
||||
}
|
||||
},
|
||||
"version": "1120f9e255760a3c104b57871fcb91801e934382",
|
||||
"sum": "MkjR7zCgq6MUZgjDzop574tFKoTX2OBr7DTwm1K+Ofs="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -28,8 +38,48 @@
|
|||
"subdir": "grafonnet"
|
||||
}
|
||||
},
|
||||
"version": "6db00c292d3a1c71661fc875f90e0ec7caa538c2",
|
||||
"sum": "gF8foHByYcB25jcUOBqP6jxk0OPifQMjPvKY0HaCk6w="
|
||||
"version": "a1d61cce1da59c71409b99b5c7568511fec661ea",
|
||||
"sum": "342u++/7rViR/zj2jeJOjshzglkZ1SY+hFNuyCBFMdc="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet-lib.git",
|
||||
"subdir": "grafonnet-7.0"
|
||||
}
|
||||
},
|
||||
"version": "a1d61cce1da59c71409b99b5c7568511fec661ea",
|
||||
"sum": "gCtR9s/4D5fxU9aKXg0Bru+/njZhA0YjLjPiASc61FM="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet.git",
|
||||
"subdir": "gen/grafonnet-latest"
|
||||
}
|
||||
},
|
||||
"version": "6ac1593ca787638da223380ff4a3fd0f96e953e1",
|
||||
"sum": "GxEO83uxgsDclLp/fmlUJZDbSGpeUZY6Ap3G2cgdL1g="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet.git",
|
||||
"subdir": "gen/grafonnet-v10.0.0"
|
||||
}
|
||||
},
|
||||
"version": "6ac1593ca787638da223380ff4a3fd0f96e953e1",
|
||||
"sum": "W7sLuAvMSJPkC7Oo31t45Nz/cUdJV7jzNSJTd3F1daM="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet.git",
|
||||
"subdir": "gen/grafonnet-v10.4.0"
|
||||
}
|
||||
},
|
||||
"version": "6ac1593ca787638da223380ff4a3fd0f96e953e1",
|
||||
"sum": "ZSmDT7i/qU9P8ggmuPuJT+jonq1ZEsBRCXycW/H5L/A="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -38,8 +88,38 @@
|
|||
"subdir": "grafana-builder"
|
||||
}
|
||||
},
|
||||
"version": "5fb2525651cc6e5100e081b10ad9fbe7e3595231",
|
||||
"sum": "0KkygBQd/AFzUvVzezE4qF/uDYgrwUXVpZfINBti0oc="
|
||||
"version": "7561fd330312538d22b00e0c7caecb4ba66321ea",
|
||||
"sum": "+z5VY+bPBNqXcmNAV8xbJcbsRA+pro1R3IM7aIY8OlU="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/imusmanmalik/cert-manager-mixin.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "72a094ff162bbd93921803994241d73900592c9a",
|
||||
"sum": "h+YvBTXL5A02165i3yt3SxSAbFftChtXYJ0nYFnOAqo="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/jsonnet-libs/docsonnet.git",
|
||||
"subdir": "doc-util"
|
||||
}
|
||||
},
|
||||
"version": "6ac6c69685b8c29c54515448eaca583da2d88150",
|
||||
"sum": "BrAL/k23jq+xy9oA7TWIhUx07dsA/QLm3g7ktCwe//U="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/jsonnet-libs/xtd.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "fc2e57a8839902ed4ba6cab5a99d642500f7102b",
|
||||
"sum": "43waffw1QzvpY4rKcWoo3L7Vpee+DCYexwLDd5cPG0M="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -48,18 +128,8 @@
|
|||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "b538a10c89508f8d12885680cca72a134d3127f5",
|
||||
"sum": "GLt5T2k4RKg36Gfcaf9qlTfVumDitqotVD0ipz/bPJ4="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin.git",
|
||||
"subdir": "lib/promgrafonnet"
|
||||
}
|
||||
},
|
||||
"version": "62ad10fe9ceb53c6b846871997abbfe8e0bd7cf5",
|
||||
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
|
||||
"version": "a1c276d7a46c4b06fa5d8b4a64441939d398efe5",
|
||||
"sum": "b/mEai1MvVnZ22YvZlXEO4jWDZledrtJg8eOS1ZUj0M="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -68,8 +138,8 @@
|
|||
"subdir": "jsonnet/kube-state-metrics"
|
||||
}
|
||||
},
|
||||
"version": "e080c3ce73ad514254e38dccb37c93bec6b257ae",
|
||||
"sum": "U1wzIpTAtOvC1yj43Y8PfvT0JfvnAcMfNH12Wi+ab0Y="
|
||||
"version": "9ba1c3702142918e09e8eb5ca530e15198624259",
|
||||
"sum": "msMZyUvcebzRILLzNlTIiSOwa1XgQKtP7jbZTkiqwM0="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -78,8 +148,8 @@
|
|||
"subdir": "jsonnet/kube-state-metrics-mixin"
|
||||
}
|
||||
},
|
||||
"version": "e080c3ce73ad514254e38dccb37c93bec6b257ae",
|
||||
"sum": "u8gaydJoxEjzizQ8jY8xSjYgWooPmxw+wIWdDxifMAk="
|
||||
"version": "9ba1c3702142918e09e8eb5ca530e15198624259",
|
||||
"sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -88,8 +158,8 @@
|
|||
"subdir": "jsonnet/kube-prometheus"
|
||||
}
|
||||
},
|
||||
"version": "125fb56d7495e20f504e1537ae949e8261a2f812",
|
||||
"sum": "QwH53kTe1jWCeXmQe7+U1PBs/a1p4MCTEW3B8IiIxeo="
|
||||
"version": "76f2e1ef95be0df752037baa040781c5219e1fb3",
|
||||
"sum": "IgpAgyyBZ7VT2vr9kSYQP/lkZUNQnbqpGh2sYCtUKs0="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -98,8 +168,8 @@
|
|||
"subdir": "jsonnet/mixin"
|
||||
}
|
||||
},
|
||||
"version": "d8ba1c766a141cb35072ae2f2578ec8588c9efcd",
|
||||
"sum": "qZ4WgiweaE6eeKtFK60QUjLO8sf2L9Q8fgafWvDcyfY=",
|
||||
"version": "71d9433ba612f4b826ffa38520b23a7985b50db3",
|
||||
"sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=",
|
||||
"name": "prometheus-operator-mixin"
|
||||
},
|
||||
{
|
||||
|
@ -109,8 +179,8 @@
|
|||
"subdir": "jsonnet/prometheus-operator"
|
||||
}
|
||||
},
|
||||
"version": "d8ba1c766a141cb35072ae2f2578ec8588c9efcd",
|
||||
"sum": "yjdwZ+5UXL42EavJleAJmd8Ou6MSDfExvlKAxFCxXVE="
|
||||
"version": "71d9433ba612f4b826ffa38520b23a7985b50db3",
|
||||
"sum": "S4LFa0h1AzANixqGMowtwVswVP+y6f+fXloxpO7hMes="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -119,8 +189,8 @@
|
|||
"subdir": "doc/alertmanager-mixin"
|
||||
}
|
||||
},
|
||||
"version": "16fa045db47d68a09a102c7b80b8899c1f57c153",
|
||||
"sum": "pep+dHzfIjh2SU5pEkwilMCAT/NoL6YYflV4x8cr7vU=",
|
||||
"version": "14cbe6301c732658d6fe877ec55ad5b738abcf06",
|
||||
"sum": "IpF46ZXsm+0wJJAPtAre8+yxTNZA57mBqGpBP/r7/kw=",
|
||||
"name": "alertmanager"
|
||||
},
|
||||
{
|
||||
|
@ -130,8 +200,8 @@
|
|||
"subdir": "docs/node-mixin"
|
||||
}
|
||||
},
|
||||
"version": "a2321e7b940ddcff26873612bccdf7cd4c42b6b6",
|
||||
"sum": "MlWDAKGZ+JArozRKdKEvewHeWn8j2DNBzesJfLVd0dk="
|
||||
"version": "3accd4cf8286e69d70516abdced6bf186274322a",
|
||||
"sum": "vWhHvFqV7+fxrQddTeGVKi1e4EzB3VWtNyD8TjSmevY="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -140,10 +210,21 @@
|
|||
"subdir": "documentation/prometheus-mixin"
|
||||
}
|
||||
},
|
||||
"version": "41f1a8125e664985dd30674e5bdf6b683eff5d32",
|
||||
"sum": "ZjQoYhvgKwJNkg+h+m9lW3SYjnjv5Yx5btEipLhru88=",
|
||||
"version": "773170f372e0a57949854b74231ee3e09185f728",
|
||||
"sum": "u/Fpz2MPkezy71/q+c7mF0vc3hE9fWt2W/YbvF0LP/8=",
|
||||
"name": "prometheus"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/pyrra-dev/pyrra.git",
|
||||
"subdir": "config/crd/bases"
|
||||
}
|
||||
},
|
||||
"version": "551856d42dff02ec38c5b0ea6a2d99c4cb127e82",
|
||||
"sum": "bY/Pcrrbynguq8/HaI88cQ3B2hLv/xc+76QILY7IL+g=",
|
||||
"name": "pyrra"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
|
@ -151,19 +232,9 @@
|
|||
"subdir": "mixin"
|
||||
}
|
||||
},
|
||||
"version": "fb97c9a5ef51849ccb7960abbeb9581ad7f511b9",
|
||||
"sum": "X+060DnePPeN/87fgj0SrfxVitywTk8hZA9V4nHxl1g=",
|
||||
"version": "93c79b61825ec00889188e35a58635eee247bc36",
|
||||
"sum": "HhSSbGGCNHCMy1ee5jElYDm0yS9Vesa7QB2/SHKdjsY=",
|
||||
"name": "thanos-mixin"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://gitlab.com/uneeq-oss/cert-manager-mixin.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "eae22f642aaa5d422e4766f6811df2158fc05539",
|
||||
"sum": "DOg3fzS0OWrjjRPVsKgxID/rk9AC3ESQ4gDELc2RNgM="
|
||||
}
|
||||
],
|
||||
"legacyImports": false
|
||||
|
|
|
@ -2,7 +2,7 @@ local addMixin = (import 'kube-prometheus/lib/mixin.libsonnet');
|
|||
|
||||
local certManagerMixin = addMixin({
|
||||
name: 'cert-manager',
|
||||
mixin: (import 'gitlab.com/uneeq-oss/cert-manager-mixin/mixin.libsonnet')
|
||||
mixin: (import 'github.com/imusmanmalik/cert-manager-mixin/mixin.libsonnet')
|
||||
});
|
||||
|
||||
{ 'cert-manager-mixin-prometheusRule': certManagerMixin.prometheusRules }
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
"alert": "CertManagerAbsent",
|
||||
"annotations": {
|
||||
"description": "New certificates will not be able to be minted, and existing ones can't be renewed until cert-manager is back.",
|
||||
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent",
|
||||
"summary": "Cert Manager has dissapeared from Prometheus service discovery."
|
||||
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerabsent",
|
||||
"summary": "Cert Manager has disappeared from Prometheus service discovery."
|
||||
},
|
||||
"expr": "absent(up{job=\"cert-manager\"})",
|
||||
"for": "10m",
|
||||
|
@ -36,7 +36,7 @@
|
|||
"annotations": {
|
||||
"dashboard_url": "https://grafana.example.com/d/TvuRo2iMk/cert-manager",
|
||||
"description": "The domain that this cert covers will be unavailable after {{ $value | humanizeDuration }}. Clients using endpoints that this cert protects will start to fail in {{ $value | humanizeDuration }}.",
|
||||
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon",
|
||||
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertexpirysoon",
|
||||
"summary": "The cert `{{ $labels.name }}` is {{ $value | humanizeDuration }} from expiry, it should have renewed over a week ago."
|
||||
},
|
||||
"expr": "avg by (exported_namespace, namespace, name) (\n certmanager_certificate_expiration_timestamp_seconds - time()\n) < (21 * 24 * 3600) # 21 days in seconds\n",
|
||||
|
@ -50,7 +50,7 @@
|
|||
"annotations": {
|
||||
"dashboard_url": "https://grafana.example.com/d/TvuRo2iMk/cert-manager",
|
||||
"description": "This certificate has not been ready to serve traffic for at least 10m. If the cert is being renewed or there is another valid cert, the ingress controller _may_ be able to serve that instead.",
|
||||
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready",
|
||||
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertnotready",
|
||||
"summary": "The cert `{{ $labels.name }}` is not ready to serve traffic."
|
||||
},
|
||||
"expr": "max by (name, exported_namespace, namespace, condition) (\n certmanager_certificate_ready_status{condition!=\"True\"} == 1\n)\n",
|
||||
|
@ -64,7 +64,7 @@
|
|||
"annotations": {
|
||||
"dashboard_url": "https://grafana.example.com/d/TvuRo2iMk/cert-manager",
|
||||
"description": "Depending on the rate limit, cert-manager may be unable to generate certificates for up to a week.",
|
||||
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits",
|
||||
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerhittingratelimits",
|
||||
"summary": "Cert manager hitting LetsEncrypt rate limits."
|
||||
},
|
||||
"expr": "sum by (host) (\n rate(certmanager_http_acme_client_request_count{status=\"429\"}[5m])\n) > 0\n",
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "cert-manager-grafana-dashboard" | trunc 63 | trimSuffix "-" }}
|
||||
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "grafana-dashboard" | trunc 63 | trimSuffix "-" }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
grafana_dashboard: "1"
|
||||
|
|
|
@ -13,8 +13,8 @@ spec:
|
|||
- alert: CertManagerAbsent
|
||||
annotations:
|
||||
description: New certificates will not be able to be minted, and existing ones can't be renewed until cert-manager is back.
|
||||
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent
|
||||
summary: Cert Manager has dissapeared from Prometheus service discovery.
|
||||
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerabsent
|
||||
summary: Cert Manager has disappeared from Prometheus service discovery.
|
||||
expr: absent(up{job="cert-manager"})
|
||||
for: 10m
|
||||
labels:
|
||||
|
@ -25,7 +25,7 @@ spec:
|
|||
annotations:
|
||||
dashboard_url: https://grafana.example.com/d/TvuRo2iMk/cert-manager
|
||||
description: The domain that this cert covers will be unavailable after {{`{{`}} $value | humanizeDuration {{`}}`}}. Clients using endpoints that this cert protects will start to fail in {{`{{`}} $value | humanizeDuration {{`}}`}}.
|
||||
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon
|
||||
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertexpirysoon
|
||||
summary: The cert `{{`{{`}} $labels.name {{`}}`}}` is {{`{{`}} $value | humanizeDuration {{`}}`}} from expiry, it should have renewed over a week ago.
|
||||
expr: "avg by (exported_namespace, namespace, name) (\n certmanager_certificate_expiration_timestamp_seconds - time()\n) < (21 * 24 * 3600) # 21 days in seconds\n"
|
||||
for: 1h
|
||||
|
@ -35,7 +35,7 @@ spec:
|
|||
annotations:
|
||||
dashboard_url: https://grafana.example.com/d/TvuRo2iMk/cert-manager
|
||||
description: This certificate has not been ready to serve traffic for at least 10m. If the cert is being renewed or there is another valid cert, the ingress controller _may_ be able to serve that instead.
|
||||
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready
|
||||
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertnotready
|
||||
summary: The cert `{{`{{`}} $labels.name {{`}}`}}` is not ready to serve traffic.
|
||||
expr: "max by (name, exported_namespace, namespace, condition) (\n certmanager_certificate_ready_status{condition!=\"True\"} == 1\n)\n"
|
||||
for: 10m
|
||||
|
@ -45,10 +45,11 @@ spec:
|
|||
annotations:
|
||||
dashboard_url: https://grafana.example.com/d/TvuRo2iMk/cert-manager
|
||||
description: Depending on the rate limit, cert-manager may be unable to generate certificates for up to a week.
|
||||
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits
|
||||
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerhittingratelimits
|
||||
summary: Cert manager hitting LetsEncrypt rate limits.
|
||||
expr: "sum by (host) (\n rate(certmanager_http_acme_client_request_count{status=\"429\"}[5m])\n) > 0\n"
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
{{- end }}
|
||||
|
|
|
@ -1,24 +1,21 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
helm dep update
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
update_helm
|
||||
|
||||
update_jsonnet
|
||||
|
||||
# Install cert-mamanger mixin
|
||||
jb install github.com/imusmanmalik/cert-manager-mixin@main
|
||||
|
||||
# Install rules
|
||||
rm -rf rules && mkdir -p rules
|
||||
jsonnet -J vendor -m rules rules.jsonnet
|
||||
../kubezero-metrics/sync_prometheus_rules.py cert-manager-rules.yaml templates
|
||||
|
||||
# Fetch dashboards from Grafana.com and update ZDT CM
|
||||
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
|
||||
|
||||
# Get kube-mixin for alerts
|
||||
which jsonnet > /dev/null || { echo "Required jsonnet not found!"; exit 1;}
|
||||
which jb > /dev/null || { echo "Required jb ( json-bundler ) not found!"; exit 1;}
|
||||
|
||||
[ -r jsonnetfile.json ] || jb init
|
||||
if [ -r jsonnetfile.lock.json ]; then
|
||||
jb update
|
||||
else
|
||||
jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@release-0.10
|
||||
jb install gitlab.com/uneeq-oss/cert-manager-mixin@master
|
||||
fi
|
||||
|
||||
rm -rf rules && mkdir -p rules
|
||||
jsonnet -J vendor -m rules rules.jsonnet
|
||||
|
||||
../kubezero-metrics/sync_prometheus_rules.py cert-manager-rules.yaml templates
|
||||
update_docs
|
||||
|
|
|
@ -23,6 +23,18 @@ cert-manager:
|
|||
leaderElection:
|
||||
namespace: "cert-manager"
|
||||
|
||||
# remove secrets if the cert is deleted
|
||||
enableCertificateOwnerRef: true
|
||||
|
||||
extraArgs:
|
||||
- "--logging-format=json"
|
||||
- "--leader-elect=false"
|
||||
- "--dns01-recursive-nameservers-only"
|
||||
# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
|
||||
# - --enable-certificate-owner-ref=true
|
||||
|
||||
#enableCertificateOwnerRef: true
|
||||
|
||||
# On AWS enable Projected Service Accounts to assume IAM role
|
||||
#extraEnv:
|
||||
#- name: AWS_ROLE_ARN
|
||||
|
@ -47,8 +59,6 @@ cert-manager:
|
|||
# readOnly: true
|
||||
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
|
@ -60,26 +70,22 @@ cert-manager:
|
|||
|
||||
webhook:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
extraArgs:
|
||||
- "--logging-format=json"
|
||||
|
||||
cainjector:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
extraArgs:
|
||||
- "--dns01-recursive-nameservers-only"
|
||||
# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
|
||||
# - --enable-certificate-owner-ref=true
|
||||
extraArgs:
|
||||
- "--logging-format=json"
|
||||
- "--leader-elect=false"
|
||||
|
||||
prometheus:
|
||||
servicemonitor:
|
||||
|
|
|
@ -2,36 +2,35 @@ apiVersion: v2
|
|||
name: kubezero-ci
|
||||
description: KubeZero umbrella chart for all things CI
|
||||
type: application
|
||||
version: 0.5.20
|
||||
version: 0.8.11
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
- kubezero
|
||||
- jenkins
|
||||
- goCD
|
||||
- gitea
|
||||
- renovate
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.5"
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: gocd
|
||||
version: 1.40.8
|
||||
repository: https://gocd.github.io/helm-chart
|
||||
condition: gocd.enabled
|
||||
- name: gitea
|
||||
version: 6.0.3
|
||||
version: 10.1.4
|
||||
repository: https://dl.gitea.io/charts/
|
||||
condition: gitea.enabled
|
||||
- name: jenkins
|
||||
version: 4.2.13
|
||||
version: 5.1.18
|
||||
repository: https://charts.jenkins.io
|
||||
condition: jenkins.enabled
|
||||
- name: trivy
|
||||
version: 0.4.17
|
||||
version: 0.7.0
|
||||
repository: https://aquasecurity.github.io/helm-charts/
|
||||
condition: trivy.enabled
|
||||
|
||||
kubeVersion: ">= 1.20.0"
|
||||
- name: renovate
|
||||
version: 37.368.2
|
||||
repository: https://docs.renovatebot.com/helm-charts
|
||||
condition: renovate.enabled
|
||||
kubeVersion: ">= 1.25.0"
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue