Compare commits
313 Commits
Author | SHA1 | Date |
---|---|---|
Stefan Reimer | 95ed3a6969 | |
Stefan Reimer | 40760d4a8e | |
Stefan Reimer | a488b14f97 | |
Stefan Reimer | 7cd1cd0c5e | |
Stefan Reimer | 48c816b32c | |
Renovate Bot | e0a24a0af9 | |
Renovate Bot | d0ed102d57 | |
Renovate Bot | bbeeb0db3d | |
Stefan Reimer | f8e7a85d9c | |
Stefan Reimer | 8bd713c1c7 | |
Stefan Reimer | 73d457d1b9 | |
Stefan Reimer | 46ccd445e0 | |
Stefan Reimer | 3c8a2d7dbd | |
Stefan Reimer | 229f5bc759 | |
Stefan Reimer | 0060ec1ed1 | |
Stefan Reimer | f6b54cde36 | |
Stefan Reimer | b9ee65d128 | |
Stefan Reimer | 76cc875990 | |
Stefan Reimer | 4a54fde888 | |
Renovate Bot | 2957b898d9 | |
Renovate Bot | 42d5000fe0 | |
Stefan Reimer | e7a66a584b | |
Renovate Bot | 8994289608 | |
Renovate Bot | c93b4c8b52 | |
Renovate Bot | 8d27fc22a0 | |
Stefan Reimer | 7eba80b54d | |
Renovate Bot | d66cdb42b8 | |
Stefan Reimer | 9cfeaec3a8 | |
Renovate Bot | 7bac355303 | |
Renovate Bot | dedfd1f7a3 | |
Stefan Reimer | 193967f600 | |
Stefan Reimer | 5be0f7087e | |
Stefan Reimer | b9e52bc2d9 | |
Stefan Reimer | 2cdb30b178 | |
Renovate Bot | 828c467d37 | |
Renovate Bot | dbd1ade98c | |
Stefan Reimer | 730020b329 | |
Stefan Reimer | 1caa01b28b | |
Stefan Reimer | c91d570857 | |
Stefan Reimer | da0c33b02b | |
Stefan Reimer | 5c6fd9bd2c | |
Stefan Reimer | 995d159d3e | |
Renovate Bot | c9dc123eff | |
Stefan Reimer | 5237b002b4 | |
Stefan Reimer | 61d373af7a | |
Stefan Reimer | 012c26d3d6 | |
Stefan Reimer | 945642a551 | |
Stefan Reimer | 9d835e4385 | |
Stefan Reimer | c057f35547 | |
Stefan Reimer | b29774d6d5 | |
Renovate Bot | e748303864 | |
Renovate Bot | 3f8a2c929c | |
Stefan Reimer | 7a80650d9c | |
Stefan Reimer | 75fc295066 | |
Stefan Reimer | 705f36f9aa | |
Renovate Bot | aa597a4970 | |
Renovate Bot | 0e4ed20972 | |
Renovate Bot | 773f968d90 | |
Renovate Bot | c54c9d78c4 | |
Stefan Reimer | 8b7b1ec8fa | |
Stefan Reimer | e2770079eb | |
Renovate Bot | b2d8a11854 | |
Stefan Reimer | 1bdbb7c538 | |
Stefan Reimer | 1350500f7f | |
Stefan Reimer | 1cb0ff2c0d | |
Stefan Reimer | 734f19010f | |
Stefan Reimer | 3013c39061 | |
Stefan Reimer | ca14178e94 | |
Stefan Reimer | 4b4431919a | |
Stefan Reimer | 32e71b4129 | |
Stefan Reimer | 6b7746d3df | |
Stefan Reimer | 52de70a4a8 | |
Renovate Bot | f8605e4b07 | |
Renovate Bot | e8204779a5 | |
Renovate Bot | 9a56c99ee5 | |
Stefan Reimer | 5116e52bc9 | |
Stefan Reimer | 26d59f63da | |
Stefan Reimer | 8c2ef9cf2c | |
Stefan Reimer | 9fed97db49 | |
Stefan Reimer | 588e50f56e | |
Stefan Reimer | 908055bd36 | |
Renovate Bot | a05e6286cc | |
Renovate Bot | 7b153ac7cc | |
Renovate Bot | 3e1d8e9c3e | |
Stefan Reimer | 78639b623a | |
Stefan Reimer | 4e9c147b7e | |
Stefan Reimer | 64d76c283a | |
Renovate Bot | 71f909e49e | |
Stefan Reimer | ed4a47dcec | |
Stefan Reimer | 3ab37e7a7b | |
Stefan Reimer | 798c3cba57 | |
Renovate Bot | 3b536f7c44 | |
Renovate Bot | 69e132c857 | |
Stefan Reimer | 53f0bbffb6 | |
Stefan Reimer | b0a6326a09 | |
Stefan Reimer | 358042d38b | |
Stefan Reimer | 22b774c939 | |
Renovate Bot | 71061475c8 | |
Stefan Reimer | 3ea16b311b | |
Stefan Reimer | 46e115e4f5 | |
Stefan Reimer | e55f986de8 | |
Stefan Reimer | 9ed2dbca96 | |
Renovate Bot | fcd2192cb4 | |
Renovate Bot | 8aa50e4129 | |
Renovate Bot | d9146abf72 | |
Renovate Bot | 7d354402d6 | |
Renovate Bot | 91a0034b26 | |
Renovate Bot | 48e381cb0f | |
Renovate Bot | b98dc98e81 | |
Stefan Reimer | 16fab2e0a0 | |
Stefan Reimer | 3b2f83c124 | |
Stefan Reimer | e36b096a46 | |
Stefan Reimer | 7628debe0c | |
Stefan Reimer | 72c585b8ef | |
Stefan Reimer | d8a73bbb73 | |
Stefan Reimer | 21e5417331 | |
Renovate Bot | 2dc58765e7 | |
Renovate Bot | cfda9b6a92 | |
Renovate Bot | 48b1d08cc6 | |
Stefan Reimer | 18b75af746 | |
Stefan Reimer | d4c67997ae | |
Renovate Bot | 4628d1e1e7 | |
Renovate Bot | 1a0bd7f312 | |
Stefan Reimer | 81c2f24156 | |
Stefan Reimer | 18a4f3e517 | |
Renovate Bot | 8fde71babb | |
Stefan Reimer | ccf8a0788d | |
Stefan Reimer | c96b4a4ab9 | |
Stefan Reimer | 46a4435fad | |
Stefan Reimer | 81f599dbb1 | |
Renovate Bot | 2b0ab96344 | |
Renovate Bot | 27009c7926 | |
Stefan Reimer | 444888ad12 | |
Stefan Reimer | b34c7bd392 | |
Renovate Bot | c66cf3bde2 | |
Stefan Reimer | 7b3f1fe40e | |
Stefan Reimer | 61454581c4 | |
Stefan Reimer | c2a495a6a6 | |
Renovate Bot | 0051451e19 | |
Renovate Bot | c73a3b7007 | |
Renovate Bot | 14030824b1 | |
Stefan Reimer | 5224947818 | |
Renovate Bot | 2850ce02d8 | |
Stefan Reimer | a04b26b851 | |
Stefan Reimer | cf3d5726e2 | |
Stefan Reimer | a56a88f622 | |
Renovate Bot | 8e3331e257 | |
Stefan Reimer | f0cb8e6cc7 | |
Stefan Reimer | a39542e387 | |
Renovate Bot | 8b54524c58 | |
Renovate Bot | fdcf82065f | |
Stefan Reimer | 683b7623f4 | |
Stefan Reimer | ee7161651e | |
Stefan Reimer | 011fbc3062 | |
Stefan Reimer | ccaebf4dad | |
Stefan Reimer | 3fb4567ad6 | |
Stefan Reimer | b2cf56367d | |
Stefan Reimer | 8a7ff1f8a1 | |
Stefan Reimer | 3dfd8bd4e4 | |
Stefan Reimer | 674c3cbc7d | |
Stefan Reimer | c1df995447 | |
Stefan Reimer | 0005b7fdac | |
Stefan Reimer | 96bf297d78 | |
Stefan Reimer | 20ab3bc8c7 | |
Stefan Reimer | a569a6f6b0 | |
Stefan Reimer | 1c076eab61 | |
Stefan Reimer | 39db495adc | |
Renovate Bot | cb45553d64 | |
Stefan Reimer | 248d2e6ffa | |
Stefan Reimer | 42df583323 | |
Stefan Reimer | a65b515f8c | |
Stefan Reimer | f6336e5df5 | |
Stefan Reimer | ac0760d21a | |
Stefan Reimer | ea2d531719 | |
Renovate Bot | d2ae34792d | |
Stefan Reimer | 1db90d4e28 | |
Stefan Reimer | d25a76b526 | |
Stefan Reimer | 915d6c4afb | |
Stefan Reimer | fccc78fd30 | |
Stefan Reimer | 9466cc983e | |
Stefan Reimer | 53bc8b4d88 | |
Renovate Bot | fae3dd10ea | |
Renovate Bot | 64c6056e50 | |
Renovate Bot | f8138fba5f | |
Renovate Bot | 4d93bf817a | |
Stefan Reimer | 3b32f29f90 | |
Stefan Reimer | b08ee586e6 | |
Stefan Reimer | 60c454b534 | |
Stefan Reimer | be1a727fe8 | |
Stefan Reimer | cb0b167437 | |
Renovate Bot | 1ab604df95 | |
Stefan Reimer | 528c425d38 | |
Stefan Reimer | 907d46e6db | |
Stefan Reimer | d8b4c720c8 | |
Stefan Reimer | f4f51c082d | |
Renovate Bot | ead3e680a5 | |
Stefan Reimer | b5a90a6227 | |
Stefan Reimer | ffcd94679b | |
Stefan Reimer | 5ce1b42483 | |
Renovate Bot | f33a01a188 | |
Stefan Reimer | f2e14f1d5d | |
Stefan Reimer | 54d382625a | |
Renovate Bot | f936d17c4e | |
Renovate Bot | 6d60bedae3 | |
Stefan Reimer | ae2a6fa146 | |
Renovate Bot | 025df84873 | |
Stefan Reimer | 950be88780 | |
Stefan Reimer | c0abdf2db0 | |
Stefan Reimer | 9ecf804a90 | |
Renovate Bot | 9cf6922727 | |
Stefan Reimer | 267505d840 | |
Renovate Bot | c22ea88f64 | |
Stefan Reimer | aa903d9e92 | |
Renovate Bot | 6364d38dc7 | |
Stefan Reimer | cdb2848413 | |
Stefan Reimer | 5b32381120 | |
Stefan Reimer | 72bca2e77e | |
Stefan Reimer | 55dd52d5ab | |
Renovate Bot | 20875794cd | |
Stefan Reimer | 617d8fbac4 | |
Renovate Bot | 4291231a8a | |
Stefan Reimer | 5cffc80159 | |
Renovate Bot | 50c9236e92 | |
Stefan Reimer | c139218e1b | |
Stefan Reimer | a6e37ab0c9 | |
Stefan Reimer | 2789fe5be1 | |
Stefan Reimer | 1546c3976a | |
Stefan Reimer | 7eb86a88fd | |
Stefan Reimer | ec70307115 | |
Stefan Reimer | f18d255640 | |
Stefan Reimer | b65293ac3e | |
Stefan Reimer | ab7873dbbf | |
Stefan Reimer | c0cb525f63 | |
Stefan Reimer | 8f89c3ce14 | |
Stefan Reimer | 0966f77d51 | |
Stefan Reimer | 7fd6fe638c | |
Renovate Bot | 741068d7c4 | |
Renovate Bot | 1746a0579b | |
Stefan Reimer | 49b052e8d4 | |
Stefan Reimer | 6e08d8a474 | |
Stefan Reimer | 5845ca08b7 | |
Stefan Reimer | 3fb9fc21cc | |
Renovate Bot | 10be7a8754 | |
Stefan Reimer | d709d0e9e4 | |
Renovate Bot | 6aff67dae0 | |
Stefan Reimer | bc5e6295d3 | |
Stefan Reimer | 156f41fec6 | |
Stefan Reimer | 6a72988f52 | |
Stefan Reimer | 5a7e926202 | |
Stefan Reimer | a66dfe7302 | |
Stefan Reimer | b918c2d145 | |
Stefan Reimer | e0cea3b90e | |
Stefan Reimer | 62ed59426f | |
Stefan Reimer | daa5e72343 | |
Renovate Bot | 8e4b0ac7c9 | |
Stefan Reimer | 17ad966742 | |
Stefan Reimer | ebcd56ff06 | |
Stefan Reimer | 49a88ba8fa | |
Stefan Reimer | 88ee121198 | |
Stefan Reimer | e03e2b7538 | |
Stefan Reimer | 9f0b976b26 | |
Stefan Reimer | df9b85c1cf | |
Stefan Reimer | a4fabccc5a | |
Renovate Bot | 84b7c28db4 | |
Stefan Reimer | 883295aae2 | |
Stefan Reimer | 14e00f09f4 | |
Stefan Reimer | 004c53d8a5 | |
Stefan Reimer | 1de82faf28 | |
Renovate Bot | 967eaeb38d | |
Stefan Reimer | 1b414be803 | |
Renovate Bot | 9be7819091 | |
Renovate Bot | 238a4becfa | |
Renovate Bot | 9ac9f51997 | |
Renovate Bot | c2e70af850 | |
Renovate Bot | 97c5e871be | |
Stefan Reimer | 2abaad1904 | |
Stefan Reimer | bd54440210 | |
Stefan Reimer | 4827fec336 | |
Stefan Reimer | febf0e4efb | |
Stefan Reimer | 60aa548d2a | |
Stefan Reimer | 391bbfe6d5 | |
Stefan Reimer | 57690b1f1e | |
Stefan Reimer | 1e8244ff8d | |
Stefan Reimer | e0f9af604c | |
Stefan Reimer | 9b663e4f22 | |
Stefan Reimer | 413d5be90d | |
Stefan Reimer | b50a66ab79 | |
Stefan Reimer | 11889208c7 | |
Renovate Bot | 1477f77a6d | |
Stefan Reimer | 0cb16de099 | |
Stefan Reimer | a6fd7a7d04 | |
Stefan Reimer | 8f30aec867 | |
Stefan Reimer | 317cab8522 | |
Stefan Reimer | 1e927c95fc | |
Stefan Reimer | 322f4341d9 | |
Stefan Reimer | c979b7ad71 | |
Stefan Reimer | d5510984e4 | |
Stefan Reimer | 870b851864 | |
Stefan Reimer | 5bcf945213 | |
Stefan Reimer | 5036298e3b | |
Stefan Reimer | 8489d77a03 | |
Stefan Reimer | 9121234d40 | |
Stefan Reimer | 36de253216 | |
Stefan Reimer | d76cd279bf | |
Stefan Reimer | fdb3ad0ef2 | |
Stefan Reimer | 35356b3412 | |
Stefan Reimer | 1d97c3e338 | |
Stefan Reimer | 7cf5be2a75 | |
Stefan Reimer | 5008420349 | |
Stefan Reimer | 1455c8b800 | |
Stefan Reimer | 5aaf6c7537 | |
Stefan Reimer | 10b3332633 | |
Stefan Reimer | dd650e775b |
|
@ -0,0 +1,63 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import boto3
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Implement basic public ECR lifecycle policy')
|
||||
parser.add_argument('--repo', dest='repositoryName', action='store', required=True,
|
||||
help='Name of the public ECR repository')
|
||||
parser.add_argument('--keep', dest='keep', action='store', default=10, type=int,
|
||||
help='number of tagged images to keep, default 10')
|
||||
parser.add_argument('--dev', dest='delete_dev', action='store_true',
|
||||
help='also delete in-development images only having tags like v0.1.1-commitNr-githash')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
client = boto3.client('ecr-public', region_name='us-east-1')
|
||||
|
||||
images = client.describe_images(repositoryName=args.repositoryName)[
|
||||
"imageDetails"]
|
||||
|
||||
untagged = []
|
||||
kept = 0
|
||||
|
||||
# actual Image
|
||||
# imageManifestMediaType: 'application/vnd.oci.image.manifest.v1+json'
|
||||
# image Index
|
||||
# imageManifestMediaType: 'application/vnd.oci.image.index.v1+json'
|
||||
|
||||
# Sort by date uploaded
|
||||
for image in sorted(images, key=lambda d: d['imagePushedAt'], reverse=True):
|
||||
# Remove all untagged
|
||||
# if registry uses image index all actual images will be untagged anyways
|
||||
if 'imageTags' not in image:
|
||||
untagged.append({"imageDigest": image['imageDigest']})
|
||||
# print("Delete untagged image {}".format(image["imageDigest"]))
|
||||
continue
|
||||
|
||||
# check for dev tags
|
||||
if args.delete_dev:
|
||||
_delete = True
|
||||
for tag in image["imageTags"]:
|
||||
# Look for at least one tag NOT beign a SemVer dev tag
|
||||
if "-" not in tag:
|
||||
_delete = False
|
||||
if _delete:
|
||||
print("Deleting development image {}".format(image["imageTags"]))
|
||||
untagged.append({"imageDigest": image['imageDigest']})
|
||||
continue
|
||||
|
||||
if kept < args.keep:
|
||||
kept = kept+1
|
||||
print("Keeping tagged image {}".format(image["imageTags"]))
|
||||
continue
|
||||
else:
|
||||
print("Deleting tagged image {}".format(image["imageTags"]))
|
||||
untagged.append({"imageDigest": image['imageDigest']})
|
||||
|
||||
deleted_images = client.batch_delete_image(
|
||||
repositoryName=args.repositoryName, imageIds=untagged)
|
||||
|
||||
if deleted_images["imageIds"]:
|
||||
print("Deleted images: {}".format(deleted_images["imageIds"]))
|
|
@ -1,21 +1,26 @@
|
|||
# Parse version from latest git semver tag
|
||||
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
|
||||
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
GIT_TAG := $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
|
||||
|
||||
TAG ::= $(GIT_TAG)
|
||||
# append branch name to tag if NOT main nor master
|
||||
TAG := $(GIT_TAG)
|
||||
ifeq (,$(filter main master, $(GIT_BRANCH)))
|
||||
ifneq ($(GIT_TAG), $(GIT_BRANCH))
|
||||
TAG = $(GIT_TAG)-$(GIT_BRANCH)
|
||||
# If branch is substring of tag, omit branch name
|
||||
ifeq ($(findstring $(GIT_BRANCH), $(GIT_TAG)),)
|
||||
# only append branch name if not equal tag
|
||||
ifneq ($(GIT_TAG), $(GIT_BRANCH))
|
||||
# Sanitize GIT_BRANCH to allowed Docker tag character set
|
||||
TAG = $(GIT_TAG)-$(shell echo $$GIT_BRANCH | sed -e 's/[^a-zA-Z0-9]/-/g')
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ARCH := amd64
|
||||
ALL_ARCHS := amd64 arm64
|
||||
ARCH ::= amd64
|
||||
ALL_ARCHS ::= amd64 arm64
|
||||
_ARCH = $(or $(filter $(ARCH),$(ALL_ARCHS)),$(error $$ARCH [$(ARCH)] must be exactly one of "$(ALL_ARCHS)"))
|
||||
|
||||
ifneq ($(TRIVY_REMOTE),)
|
||||
TRIVY_OPTS := --server $(TRIVY_REMOTE)
|
||||
TRIVY_OPTS ::= --server $(TRIVY_REMOTE)
|
||||
endif
|
||||
|
||||
.SILENT: ; # no need for @
|
||||
|
@ -28,18 +33,20 @@ endif
|
|||
help: ## Show Help
|
||||
grep -E '^[a-zA-Z_-]+:.*?## .*$$' .ci/podman.mk | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
prepare:: ## custom step on the build agent before building
|
||||
|
||||
fmt:: ## auto format source
|
||||
|
||||
lint:: ## Lint source
|
||||
|
||||
build: ## Build the app
|
||||
buildah build --rm --layers -t $(IMAGE):$(TAG)-$(_ARCH) --build-arg TAG=$(TAG) --build-arg ARCH=$(_ARCH) --platform linux/$(_ARCH) .
|
||||
|
||||
test: rm-test-image ## Execute Dockerfile.test
|
||||
test -f Dockerfile.test && \
|
||||
{ buildah build --rm --layers -t $(REGISTRY)/$(IMAGE):$(TAG)-test --from=$(REGISTRY)/$(IMAGE):$(TAG) -f Dockerfile.test --platform linux/$(_ARCH) . && \
|
||||
podman run --rm --env-host -t $(REGISTRY)/$(IMAGE):$(TAG)-$(_ARCH)-test; } || \
|
||||
echo "No Dockerfile.test found, skipping test"
|
||||
test:: ## test built artificats
|
||||
|
||||
scan: ## Scan image using trivy
|
||||
echo "Scanning $(IMAGE):$(TAG)-$(_ARCH) using Trivy $(TRIVY_REMOTE)"
|
||||
trivy image $(TRIVY_OPTS) localhost/$(IMAGE):$(TAG)-$(_ARCH)
|
||||
trivy image $(TRIVY_OPTS) --quiet --no-progress localhost/$(IMAGE):$(TAG)-$(_ARCH)
|
||||
|
||||
# first tag and push all actual images
|
||||
# create new manifest for each tag and add all available TAG-ARCH before pushing
|
||||
|
@ -59,24 +66,19 @@ push: ecr-login ## push images to registry
|
|||
ecr-login: ## log into AWS ECR public
|
||||
aws ecr-public get-login-password --region $(REGION) | podman login --username AWS --password-stdin $(REGISTRY)
|
||||
|
||||
clean: rm-test-image rm-image ## delete local built container and test images
|
||||
rm-remote-untagged: ## delete all remote untagged and in-dev images, keep 10 tagged
|
||||
echo "Removing all untagged and in-dev images from $(IMAGE) in $(REGION)"
|
||||
.ci/ecr_public_lifecycle.py --repo $(IMAGE) --dev
|
||||
|
||||
rm-remote-untagged: ## delete all remote untagged images
|
||||
echo "Removing all untagged images from $(IMAGE) in $(REGION)"
|
||||
IMAGE_IDS=$$(for image in $$(aws ecr-public describe-images --repository-name $(IMAGE) --region $(REGION) --output json | jq -r '.imageDetails[] | select(.imageTags | not ).imageDigest'); do echo -n "imageDigest=$$image "; done) ; \
|
||||
[ -n "$$IMAGE_IDS" ] && aws ecr-public batch-delete-image --repository-name $(IMAGE) --region $(REGION) --image-ids $$IMAGE_IDS || echo "No image to remove"
|
||||
clean:: ## clean up source folder
|
||||
|
||||
rm-image:
|
||||
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || podman image rm -f $(IMAGE):$(TAG)-$(_ARCH) > /dev/null
|
||||
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH))" || echo "Error: Removing image failed"
|
||||
|
||||
# Ensure we run the tests by removing any previous runs
|
||||
rm-test-image:
|
||||
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH)-test)" || podman image rm -f $(IMAGE):$(TAG)-$(_ARCH)-test > /dev/null
|
||||
test -z "$$(podman image ls -q $(IMAGE):$(TAG)-$(_ARCH)-test)" || echo "Error: Removing test image failed"
|
||||
|
||||
## some useful tasks during development
|
||||
ci-pull-upstream: ## pull latest shared .ci subtree
|
||||
git stash && git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash && git stash pop
|
||||
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
|
||||
|
||||
create-repo: ## create new AWS ECR public repository
|
||||
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
|
||||
def call(Map config=[:]) {
|
||||
pipeline {
|
||||
options {
|
||||
disableConcurrentBuilds()
|
||||
}
|
||||
agent {
|
||||
node {
|
||||
label 'podman-aws-trivy'
|
||||
|
@ -10,18 +13,22 @@ def call(Map config=[:]) {
|
|||
stages {
|
||||
stage('Prepare') {
|
||||
steps {
|
||||
sh 'mkdir -p reports'
|
||||
|
||||
// we set pull tags as project adv. options
|
||||
// pull tags
|
||||
withCredentials([gitUsernamePassword(credentialsId: 'gitea-jenkins-user')]) {
|
||||
sh 'git fetch -q --tags ${GIT_URL}'
|
||||
}
|
||||
sh 'make prepare || true'
|
||||
//withCredentials([gitUsernamePassword(credentialsId: 'gitea-jenkins-user')]) {
|
||||
// sh 'git fetch -q --tags ${GIT_URL}'
|
||||
//}
|
||||
// Optional project specific preparations
|
||||
sh 'make prepare'
|
||||
}
|
||||
}
|
||||
|
||||
// Build using rootless podman
|
||||
stage('Build') {
|
||||
steps {
|
||||
sh 'make build'
|
||||
sh 'make build GIT_BRANCH=$GIT_BRANCH'
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -33,12 +40,13 @@ def call(Map config=[:]) {
|
|||
|
||||
// Scan via trivy
|
||||
stage('Scan') {
|
||||
environment {
|
||||
TRIVY_FORMAT = "template"
|
||||
TRIVY_OUTPUT = "reports/trivy.html"
|
||||
}
|
||||
steps {
|
||||
sh 'mkdir -p reports && make scan'
|
||||
// we always scan and create the full json report
|
||||
sh 'TRIVY_FORMAT=json TRIVY_OUTPUT="reports/trivy.json" make scan'
|
||||
|
||||
// render custom full html report
|
||||
sh 'trivy convert -f template -t @/home/jenkins/html.tpl -o reports/trivy.html reports/trivy.json'
|
||||
|
||||
publishHTML target: [
|
||||
allowMissing: true,
|
||||
alwaysLinkToLastBuild: true,
|
||||
|
@ -48,26 +56,33 @@ def call(Map config=[:]) {
|
|||
reportName: 'TrivyScan',
|
||||
reportTitles: 'TrivyScan'
|
||||
]
|
||||
sh 'echo "Trivy report at: $BUILD_URL/TrivyScan"'
|
||||
|
||||
// Scan again and fail on CRITICAL vulns, if not overridden
|
||||
// fail build if issues found above trivy threshold
|
||||
script {
|
||||
if (config.trivyFail == 'NONE') {
|
||||
echo 'trivyFail == NONE, review Trivy report manually. Proceeding ...'
|
||||
} else {
|
||||
sh "TRIVY_EXIT_CODE=1 TRIVY_SEVERITY=${config.trivyFail} make scan"
|
||||
if ( config.trivyFail ) {
|
||||
sh "TRIVY_SEVERITY=${config.trivyFail} trivy convert --report summary --exit-code 1 reports/trivy.json"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push to container registry, skip if PR
|
||||
// Push to container registry if not PR
|
||||
// incl. basic registry retention removing any untagged images
|
||||
stage('Push') {
|
||||
when { not { changeRequest() } }
|
||||
steps {
|
||||
sh 'make push'
|
||||
sh 'make rm-remote-untagged'
|
||||
}
|
||||
}
|
||||
|
||||
// generic clean
|
||||
stage('cleanup') {
|
||||
steps {
|
||||
sh 'make clean'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
ARG ALPINE_VERSION=3.17
|
||||
ARG ALPINE_VERSION=3.19
|
||||
|
||||
FROM docker.io/alpine:${ALPINE_VERSION}
|
||||
|
||||
ARG ALPINE_VERSION
|
||||
ARG KUBE_VERSION=1.25
|
||||
ARG KUBE_VERSION=1.28.9
|
||||
|
||||
RUN cd /etc/apk/keys && \
|
||||
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
||||
|
@ -30,7 +30,7 @@ RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \
|
|||
mkdir -p /var/lib/kubezero
|
||||
|
||||
ADD admin/kubezero.sh admin/libhelm.sh admin/migrate_argo_values.py /usr/bin
|
||||
ADD admin/libhelm.sh admin/pre-upgrade.sh /var/lib/kubezero
|
||||
ADD admin/libhelm.sh /var/lib/kubezero
|
||||
|
||||
ADD charts/kubeadm /charts/kubeadm
|
||||
ADD charts/kubezero /charts/kubezero
|
||||
|
|
6
Makefile
6
Makefile
|
@ -2,9 +2,11 @@ REGISTRY := public.ecr.aws/zero-downtime
|
|||
IMAGE := kubezero-admin
|
||||
REGION := us-east-1
|
||||
|
||||
# Use KubeZero chart version rather than git tag for admin image
|
||||
GIT_TAG = v$(shell yq .version < charts/kubezero/Chart.yaml)
|
||||
|
||||
# Also tag as Kubernetes major version
|
||||
MY_TAG = $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
|
||||
EXTRA_TAGS = $(shell echo $(MY_TAG) | awk -F '.' '{ print $$1 "." $$2 }')
|
||||
EXTRA_TAGS = $(shell echo $(GIT_TAG) | awk -F '.' '{ print $$1 "." $$2 }')
|
||||
|
||||
include .ci/podman.mk
|
||||
|
||||
|
|
59
README.md
59
README.md
|
@ -4,12 +4,12 @@ KubeZero is a Kubernetes distribution providing an integrated container platform
|
|||
|
||||
# Design philosophy
|
||||
|
||||
- Cloud provider agnostic, bare-metal/self-hosted
|
||||
- Focus on security and simplicity over feature creep
|
||||
- No vendor lock in, most components are optional and could be easily exchanged
|
||||
- Organic Open Source / open and permissive licenses over closed-source solutions
|
||||
- No vendor lock in, most components are optional and could be easily changed as needed
|
||||
- No premium services / subscriptions required
|
||||
- Staying up to date and contributing back to upstream projects, like alpine-cloud-images and others
|
||||
- Cloud provider agnostic, bare-metal/self-hosted
|
||||
- Organic Open Source / open and permissive licenses over closed-source solutions
|
||||
- Corgi approved :dog:
|
||||
|
||||
|
||||
|
@ -18,8 +18,8 @@ KubeZero is a Kubernetes distribution providing an integrated container platform
|
|||
|
||||
|
||||
# Version / Support Matrix
|
||||
KubeZero releases track the same *minor* version of Kubernetes.
|
||||
Any 1.24.X-Y release of Kubezero supports any Kubernetes cluster 1.24.X.
|
||||
KubeZero releases track the same *minor* version of Kubernetes.
|
||||
Any 1.26.X-Y release of Kubezero supports any Kubernetes cluster 1.26.X.
|
||||
|
||||
KubeZero is distributed as a collection of versioned Helm charts, allowing custom upgrade schedules and module versions as needed.
|
||||
|
||||
|
@ -28,15 +28,15 @@ KubeZero is distributed as a collection of versioned Helm charts, allowing custo
|
|||
gantt
|
||||
title KubeZero Support Timeline
|
||||
dateFormat YYYY-MM-DD
|
||||
section 1.23
|
||||
beta :123b, 2022-08-01, 2022-09-01
|
||||
release :after 123b, 2023-02-01
|
||||
section 1.24
|
||||
beta :124b, 2022-11-14, 2022-12-31
|
||||
release :after 124b, 2023-06-01
|
||||
section 1.25
|
||||
beta :125b, 2023-03-01, 2023-03-31
|
||||
release :after 125b, 2023-08-01
|
||||
section 1.27
|
||||
beta :127b, 2023-09-01, 2023-09-30
|
||||
release :after 127b, 2024-04-30
|
||||
section 1.28
|
||||
beta :128b, 2024-03-01, 2024-04-30
|
||||
release :after 128b, 2024-08-31
|
||||
section 1.29
|
||||
beta :129b, 2024-07-01, 2024-08-30
|
||||
release :after 129b, 2024-11-30
|
||||
```
|
||||
|
||||
[Upstream release policy](https://kubernetes.io/releases/)
|
||||
|
@ -44,14 +44,20 @@ gantt
|
|||
# Components
|
||||
|
||||
## OS
|
||||
- all nodes are based on Alpine V3.16
|
||||
- 2 GB encrypted root filesystem
|
||||
- no 3rd party dependencies at boot ( other than container registries )
|
||||
- all compute nodes are running on Alpine V3.19
|
||||
- 1 or 2 GB encrypted root file system
|
||||
- no external dependencies at boot time, apart from container registries
|
||||
- minimal attack surface
|
||||
- extremely small memory footprint / overhead
|
||||
- cri-o container runtime incl. AppArmor support
|
||||
|
||||
## Container runtime
|
||||
- cri-o rather than Docker for improved security and performance
|
||||
## GitOps
|
||||
- cli / cmd line install
|
||||
- optional full ArgoCD support and integration
|
||||
|
||||
## Featured workloads
|
||||
- rootless CI/CD build platform to build containers as part of a CI pipeline, using podman / fuse device plugin support
|
||||
- containerized AI models via integrated out of the box support for Nvidia GPU workers as well as AWS Neuron
|
||||
|
||||
## Control plane
|
||||
- all Kubernetes components compiled against Alpine OS using `buildmode=pie`
|
||||
|
@ -59,11 +65,6 @@ gantt
|
|||
- access to control plane from within the VPC only by default ( VPN access required for Admin tasks )
|
||||
- controller nodes are used for various platform admin controllers / operators to reduce costs and noise on worker nodes
|
||||
|
||||
## GitOps
|
||||
- cli / cmd line install
|
||||
- optional full ArgoCD support and integration
|
||||
- fuse device plugin support to build containers as part of a CI pipeline leveraging rootless podman build agents
|
||||
|
||||
## AWS integrations
|
||||
- IAM roles for service accounts allowing each pod to assume individual IAM roles
|
||||
- access to meta-data services is blocked all workload containers on all nodes
|
||||
|
@ -73,10 +74,8 @@ gantt
|
|||
- support for [Inf1 instances](https://aws.amazon.com/ec2/instance-types/inf1/) part of [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/).
|
||||
|
||||
## Network
|
||||
- Cilium using Geneve encapsulation, incl. increased MTU allowing flexible / more containers per worker node compared to eg. AWS VPC CNI
|
||||
- Multus support for multiple network interfaces per pod, eg. additional AWS CNI
|
||||
- Calico using VxLAN incl. increased MTU
|
||||
allows flexible / more containers per worker node compared to eg. AWS VPC CNI
|
||||
- isolates container traffic from VPC by using VxLAN overlay
|
||||
- no restrictions on IP space / sizing from the underlying VPC architecture
|
||||
|
||||
## Storage
|
||||
|
@ -86,16 +85,16 @@ allows flexible / more containers per worker node compared to eg. AWS VPC CNI
|
|||
- CSI Snapshot controller and Gemini snapshot groups and retention
|
||||
|
||||
## Ingress
|
||||
- AWS Network Loadbalancer and Istio Ingress controllers
|
||||
- AWS Network Loadbalancer and Istio Ingress controllers
|
||||
- no additional costs per exposed service
|
||||
- real client source IP available to workloads via HTTP header and access logs
|
||||
- ACME SSL Certificate handling via cert-manager incl. renewal etc.
|
||||
- support for TCP services
|
||||
- optional rate limiting support
|
||||
- optional rate limiting support
|
||||
- optional full service mesh
|
||||
|
||||
## Metrics
|
||||
- Prometheus support for all components
|
||||
- Prometheus support for all components, incl. out of cluster EC2 instances (node_exporter)
|
||||
- automated service discovery allowing instant access to common workload metrics
|
||||
- pre-configured Grafana dashboards and alerts
|
||||
- Alertmanager events via SNSAlertHub to Slack, Google, Matrix, etc.
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
#!/bin/bash
|
||||
#set -eEx
|
||||
#set -o pipefail
|
||||
set -x
|
||||
|
||||
ARTIFACTS=($(echo $1 | tr "," "\n"))
|
||||
ACTION=${2:-apply}
|
||||
|
||||
#VERSION="latest"
|
||||
KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"
|
||||
|
||||
WORKDIR=$(mktemp -p /tmp -d kubezero.XXX)
|
||||
[ -z "$DEBUG" ] && trap 'rm -rf $WORKDIR' ERR EXIT
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
# shellcheck disable=SC1091
|
||||
. "$SCRIPT_DIR"/libhelm.sh
|
||||
CHARTS="$(dirname $SCRIPT_DIR)/charts"
|
||||
|
||||
### Various hooks for modules
|
||||
|
||||
################
|
||||
# cert-manager #
|
||||
################
|
||||
function cert-manager-post() {
|
||||
# If any error occurs, wait for initial webhook deployment and try again
|
||||
# see: https://cert-manager.io/docs/concepts/webhook/#webhook-connection-problems-shortly-af
|
||||
ter-cert-manager-installation
|
||||
|
||||
if [ $rc -ne 0 ]; then
|
||||
wait_for "kubectl get deployment -n $namespace cert-manager-webhook"
|
||||
kubectl rollout status deployment -n $namespace cert-manager-webhook
|
||||
wait_for 'kubectl get validatingwebhookconfigurations -o yaml | grep "caBundle: LS0"'
|
||||
apply
|
||||
fi
|
||||
|
||||
wait_for "kubectl get ClusterIssuer -n $namespace kubezero-local-ca-issuer"
|
||||
kubectl wait --timeout=180s --for=condition=Ready -n $namespace ClusterIssuer/kubezero-local
|
||||
-ca-issuer
|
||||
}
|
||||
|
||||
|
||||
###########
|
||||
# ArgoCD #
|
||||
###########
|
||||
function argocd-pre() {
|
||||
for f in $CLUSTER/secrets/argocd-*.yaml; do
|
||||
kubectl apply -f $f
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
###########
|
||||
# Metrics #
|
||||
###########
|
||||
# Cleanup patch jobs from previous runs , ArgoCD does this automatically
|
||||
function metrics-pre() {
|
||||
kubectl delete jobs --field-selector status.successful=1 -n monitoring
|
||||
}
|
||||
|
||||
|
||||
### Main
|
||||
get_kubezero_values
|
||||
|
||||
# Always use embedded kubezero chart
|
||||
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $KUBE_VERSION --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
|
||||
|
||||
# Resolve all the all enabled artifacts
|
||||
if [ ${ARTIFACTS[0]} == "all" ]; then
|
||||
ARTIFACTS=($(ls $WORKDIR/kubezero/templates | sed -e 's/.yaml//g'))
|
||||
fi
|
||||
|
||||
if [ $ACTION == "apply" -o $ACTION == "crds" ]; then
|
||||
for t in ${ARTIFACTS[@]}; do
|
||||
_helm $ACTION $t || true
|
||||
done
|
||||
|
||||
# Delete in reverse order, continue even if errors
|
||||
elif [ $ACTION == "delete" ]; then
|
||||
set +e
|
||||
for (( idx=${#ARTIFACTS[@]}-1 ; idx>=0 ; idx-- )) ; do
|
||||
_helm delete ${ARTIFACTS[idx]} || true
|
||||
done
|
||||
fi
|
|
@ -13,7 +13,7 @@ export WORKDIR=/tmp/kubezero
|
|||
export HOSTFS=/host
|
||||
export CHARTS=/charts
|
||||
export KUBE_VERSION=$(kubeadm version -o json | jq -r .clientVersion.gitVersion)
|
||||
export KUBE_VERSION_MINOR="v1.$(kubectl version -o json | jq .clientVersion.minor -r)"
|
||||
export KUBE_VERSION_MINOR=$(echo $KUBE_VERSION | sed -e 's/\.[0-9]*$//')
|
||||
|
||||
export KUBECONFIG="${HOSTFS}/root/.kube/config"
|
||||
|
||||
|
@ -117,8 +117,28 @@ post_kubeadm() {
|
|||
|
||||
kubeadm_upgrade() {
|
||||
# pre upgrade hook
|
||||
[ -f /var/lib/kubezero/pre-upgrade.sh ] && . /var/lib/kubezero/pre-upgrade.sh
|
||||
|
||||
# get current values, argo app over cm
|
||||
get_kubezero_values
|
||||
|
||||
# tumble new config through migrate.py
|
||||
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
|
||||
|
||||
# Update kubezero-values CM
|
||||
kubectl get cm -n kube-system kubezero-values -o=yaml | \
|
||||
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
|
||||
kubectl replace -f -
|
||||
|
||||
# update argo app
|
||||
kubectl get application kubezero -n argocd -o yaml | \
|
||||
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
|
||||
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
|
||||
kubectl apply -f -
|
||||
|
||||
# finally remove annotation to allow argo to sync again
|
||||
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'
|
||||
|
||||
# Local node upgrade
|
||||
render_kubeadm
|
||||
|
||||
pre_kubeadm
|
||||
|
@ -161,7 +181,7 @@ control_plane_node() {
|
|||
|
||||
else
|
||||
# restore latest backup
|
||||
retry 10 60 30 restic restore latest --no-lock -t / #Review: Use latest no matter what for now: --tag $KUBE_VERSION_MINOR
|
||||
retry 10 60 30 restic restore latest --no-lock -t / # --tag $KUBE_VERSION_MINOR
|
||||
|
||||
# Make last etcd snapshot available
|
||||
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
|
||||
|
@ -214,11 +234,11 @@ control_plane_node() {
|
|||
sleep 3
|
||||
done
|
||||
|
||||
# see if we are a former member
|
||||
# see if we are a former member and remove our former self if so
|
||||
MY_ID=$(etcdctl member list --endpoints=$etcd_endpoints | grep $ETCD_NODENAME | awk '{print $1}' | sed -e 's/,$//')
|
||||
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
|
||||
|
||||
# flush etcd data directory as joining with previous store seems flaky, especially during etcd version upgrades
|
||||
# flush etcd data directory as joining with previous storage seems flaky, especially during etcd version upgrades
|
||||
rm -rf ${HOSTFS}/var/lib/etcd/member
|
||||
|
||||
# Announce new etcd member and capture ETCD_INITIAL_CLUSTER, retry needed in case another node joining causes temp quorum loss
|
||||
|
|
130
admin/libhelm.sh
130
admin/libhelm.sh
|
@ -1,7 +1,10 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Simulate well-known CRDs being available
|
||||
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1"
|
||||
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget"
|
||||
|
||||
#VERSION="latest"
|
||||
VERSION="v1.28"
|
||||
|
||||
# Waits for max 300s and retries
|
||||
function wait_for() {
|
||||
|
@ -166,6 +169,9 @@ function _helm() {
|
|||
render
|
||||
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$?
|
||||
|
||||
# Try again without server-side, review with 1.26, required for cert-manager during 1.25
|
||||
[ $rc -ne 0 ] && kubectl $action -f $WORKDIR/helm.yaml && rc=$? || rc=$?
|
||||
|
||||
# Optional post hook
|
||||
declare -F ${module}-post && ${module}-post
|
||||
|
||||
|
@ -179,3 +185,125 @@ function _helm() {
|
|||
|
||||
return 0
|
||||
}
|
||||
|
||||
function all_nodes_upgrade() {
|
||||
CMD="$1"
|
||||
|
||||
echo "Deploy all node upgrade daemonSet(busybox)"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: kubezero-upgrade
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
initContainers:
|
||||
- name: node-upgrade
|
||||
image: busybox
|
||||
command: ["/bin/sh"]
|
||||
args: ["-x", "-c", "$CMD" ]
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: hostproc
|
||||
mountPath: /hostproc
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
containers:
|
||||
- name: node-upgrade-wait
|
||||
image: busybox
|
||||
command: ["sleep", "3600"]
|
||||
volumes:
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
- name: hostproc
|
||||
hostPath:
|
||||
path: /proc
|
||||
type: Directory
|
||||
EOF
|
||||
|
||||
kubectl rollout status daemonset -n kube-system kubezero-all-nodes-upgrade --timeout 300s
|
||||
kubectl delete ds kubezero-all-nodes-upgrade -n kube-system
|
||||
}
|
||||
|
||||
|
||||
function control_plane_upgrade() {
|
||||
TASKS="$1"
|
||||
|
||||
echo "Deploy cluster admin task: $TASKS"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kubezero-upgrade
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: kubezero-upgrade
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: kubezero-admin
|
||||
image: public.ecr.aws/zero-downtime/kubezero-admin:${VERSION}
|
||||
imagePullPolicy: Always
|
||||
command: ["kubezero.sh"]
|
||||
args: [$TASKS]
|
||||
env:
|
||||
- name: DEBUG
|
||||
value: "$DEBUG"
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: workdir
|
||||
mountPath: /tmp
|
||||
securityContext:
|
||||
capabilities:
|
||||
add: ["SYS_CHROOT"]
|
||||
volumes:
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
- name: workdir
|
||||
emptyDir: {}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
restartPolicy: Never
|
||||
EOF
|
||||
|
||||
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
|
||||
while true; do
|
||||
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
|
||||
sleep 3
|
||||
done
|
||||
kubectl delete pod kubezero-upgrade -n kube-system
|
||||
}
|
||||
|
|
|
@ -8,15 +8,11 @@ import yaml
|
|||
def migrate(values):
|
||||
"""Actual changes here"""
|
||||
|
||||
# Move additional prometheus labels to better config tree
|
||||
# argoCD moves to argo module
|
||||
try:
|
||||
labels = {}
|
||||
for c in values["metrics"]['kube-prometheus-stack']["prometheus"]["prometheusSpec"]["additionalAlertRelabelConfigs"]:
|
||||
labels[c["target_label"]] = c["replacement"]
|
||||
|
||||
values["metrics"]['kube-prometheus-stack']["prometheus"]["prometheusSpec"]["externalLabels"] = labels
|
||||
deleteKey(values["metrics"]['kube-prometheus-stack']["prometheus"]["prometheusSpec"], "additionalAlertRelabelConfigs")
|
||||
|
||||
if values["argocd"]["enabled"]:
|
||||
values["argo"] = { "enabled": True, "argo-cd": values["argocd"] }
|
||||
values.pop("argocd")
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# get current values, argo app over cm
|
||||
get_kubezero_values
|
||||
|
||||
# tumble new config through migrate.py
|
||||
migrate_argo_values.py < "$WORKDIR"/kubezero-values.yaml > "$WORKDIR"/new-kubezero-values.yaml
|
||||
|
||||
# Update kubezero-values CM
|
||||
kubectl get cm -n kube-system kubezero-values -o=yaml | \
|
||||
yq e '.data."values.yaml" |= load_str("/tmp/kubezero/new-kubezero-values.yaml")' | \
|
||||
kubectl replace -f -
|
||||
|
||||
# update argo app
|
||||
kubectl get application kubezero -n argocd -o yaml | \
|
||||
kubezero_chart_version=$(yq .version /charts/kubezero/Chart.yaml) \
|
||||
yq '.spec.source.helm.values |= load_str("/tmp/kubezero/new-kubezero-values.yaml") | .spec.source.targetRevision = strenv(kubezero_chart_version)' | \
|
||||
kubectl apply -f -
|
||||
|
||||
# finally remove annotation to allow argo to sync again
|
||||
kubectl patch app kubezero -n argocd --type json -p='[{"op": "remove", "path": "/metadata/annotations"}]'
|
|
@ -1,144 +1,14 @@
|
|||
#!/bin/bash -e
|
||||
#!/bin/bash
|
||||
set -eE
|
||||
set -o pipefail
|
||||
|
||||
#VERSION="latest"
|
||||
VERSION="v1.25"
|
||||
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
# shellcheck disable=SC1091
|
||||
. "$SCRIPT_DIR"/libhelm.sh
|
||||
|
||||
[ -n "$DEBUG" ] && set -x
|
||||
|
||||
all_nodes_upgrade() {
|
||||
CMD="$1"
|
||||
|
||||
echo "Deploy all node upgrade daemonSet(busybox)"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: kubezero-upgrade
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: kubezero-all-nodes-upgrade
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
initContainers:
|
||||
- name: node-upgrade
|
||||
image: busybox
|
||||
command: ["/bin/sh"]
|
||||
args: ["-x", "-c", "$CMD" ]
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: hostproc
|
||||
mountPath: /hostproc
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
containers:
|
||||
- name: node-upgrade-wait
|
||||
image: busybox
|
||||
command: ["sleep", "3600"]
|
||||
volumes:
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
- name: hostproc
|
||||
hostPath:
|
||||
path: /proc
|
||||
type: Directory
|
||||
EOF
|
||||
|
||||
kubectl rollout status daemonset -n kube-system kubezero-all-nodes-upgrade --timeout 300s
|
||||
kubectl delete ds kubezero-all-nodes-upgrade -n kube-system
|
||||
}
|
||||
|
||||
|
||||
control_plane_upgrade() {
|
||||
TASKS="$1"
|
||||
|
||||
echo "Deploy cluster admin task: $TASKS"
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kubezero-upgrade
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: kubezero-upgrade
|
||||
spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: kubezero-admin
|
||||
image: public.ecr.aws/zero-downtime/kubezero-admin:${VERSION}
|
||||
imagePullPolicy: Always
|
||||
command: ["kubezero.sh"]
|
||||
args: [$TASKS]
|
||||
env:
|
||||
- name: DEBUG
|
||||
value: "$DEBUG"
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
- name: workdir
|
||||
mountPath: /tmp
|
||||
securityContext:
|
||||
capabilities:
|
||||
add: ["SYS_CHROOT"]
|
||||
volumes:
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
- name: workdir
|
||||
emptyDir: {}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
restartPolicy: Never
|
||||
EOF
|
||||
|
||||
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
|
||||
while true; do
|
||||
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
|
||||
sleep 3
|
||||
done
|
||||
kubectl delete pod kubezero-upgrade -n kube-system
|
||||
}
|
||||
|
||||
. "$SCRIPT_DIR"/libhelm.sh
|
||||
|
||||
echo "Checking that all pods in kube-system are running ..."
|
||||
waitSystemPodsRunning
|
||||
|
@ -147,29 +17,34 @@ argo_used && disable_argo
|
|||
|
||||
#all_nodes_upgrade ""
|
||||
|
||||
# Cleanup
|
||||
# Remove calico CRDs
|
||||
kubectl delete -f https://git.zero-downtime.net/ZeroDownTime/kubezero/raw/tag/v1.23.11/charts/kubezero-network/charts/calico/crds/crds.yaml 2>/dev/null || true
|
||||
kubectl delete servicemonitor calico-node -n kube-system 2>/dev/null || true
|
||||
|
||||
# delete old kubelet configs
|
||||
for cm in $(kubectl get cm -n kube-system --no-headers | awk '{if ($1 ~ "kubelet-config-1*") print $1}'); do kubectl delete cm $cm -n kube-system; done
|
||||
for rb in $(kubectl get rolebindings -n kube-system --no-headers | awk '{if ($1 ~ "kubelet-config-1*") print $1}'); do kubectl delete rolebindings $rb -n kube-system; done
|
||||
|
||||
control_plane_upgrade kubeadm_upgrade
|
||||
|
||||
echo "Adjust kubezero values as needed:"
|
||||
#echo "Adjust kubezero values as needed:"
|
||||
# shellcheck disable=SC2015
|
||||
argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
|
||||
#argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
|
||||
|
||||
control_plane_upgrade "apply_network, apply_addons, apply_storage"
|
||||
### v1.28
|
||||
# - remove old argocd app, all resources will be taken over by argo.argo-cd
|
||||
argo_used && rc=$? || rc=$?
|
||||
if [ $rc -eq 0 ]; then
|
||||
kubectl patch app argocd -n argocd \
|
||||
--type json \
|
||||
--patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]' && \
|
||||
kubectl delete app argocd -n argocd || true
|
||||
|
||||
# remove legacy argocd app resources, but NOT kubezero-git-sync nor the appproject
|
||||
kubectl api-resources --verbs=list --namespaced -o name | grep -ve 'app.*argoproj' | xargs -n 1 kubectl delete --ignore-not-found -l argocd.argoproj.io/instance=argocd -n argocd
|
||||
fi
|
||||
|
||||
# upgrade modules
|
||||
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators"
|
||||
|
||||
echo "Checking that all pods in kube-system are running ..."
|
||||
waitSystemPodsRunning
|
||||
|
||||
echo "Applying remaining KubeZero modules..."
|
||||
|
||||
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_argocd"
|
||||
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
|
||||
|
||||
# Trigger backup of upgraded cluster state
|
||||
kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$VERSION -n kube-system
|
||||
|
|
|
@ -1,18 +1,19 @@
|
|||
apiVersion: v2
|
||||
name: clamav
|
||||
description: Chart for deploying a ClamavD on kubernetes as statfulSet
|
||||
description: Chart for deploying a ClamAVd on Kubernetes as statfulSet
|
||||
type: application
|
||||
version: 0.1.1
|
||||
appVersion: 0.104.0
|
||||
version: "0.3.1"
|
||||
appVersion: "1.2.1"
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
- kubezero
|
||||
- clamav
|
||||
maintainers:
|
||||
- name: Quarky9
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.4"
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
kubeVersion: ">= 1.18.0"
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# clamav
|
||||
|
||||
![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.104.0](https://img.shields.io/badge/AppVersion-0.104.0-informational?style=flat-square)
|
||||
![Version: 0.3.1](https://img.shields.io/badge/Version-0.3.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.2.1](https://img.shields.io/badge/AppVersion-1.2.1-informational?style=flat-square)
|
||||
|
||||
Chart for deploying a ClamavD on kubernetes as statfulSet
|
||||
Chart for deploying a ClamAVd on Kubernetes as statfulSet
|
||||
|
||||
**Homepage:** <https://kubezero.com>
|
||||
|
||||
|
@ -10,32 +10,31 @@ Chart for deploying a ClamavD on kubernetes as statfulSet
|
|||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Quarky9 | | |
|
||||
| Stefan Reimer | <stefan@zero-downtime.net> | |
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.18.0`
|
||||
Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.4 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| clamav.freshclam.mirrors | string | `"database.clamav.net"` | A list of clamav mirrors to be used by the clamav service |
|
||||
| clamav.image | string | `"clamav/clamav"` | The clamav docker image |
|
||||
| clamav.limits.connectionQueueLength | int | `100` | Maximum length the queue of pending connections may grow to |
|
||||
| clamav.limits.fileSize | int | `20` | The largest file size scanable by clamav, in MB |
|
||||
| clamav.limits.maxThreads | int | `4` | Maximum number of threads running at the same time. |
|
||||
| clamav.limits.scanSize | int | `100` | The largest scan size permitted in clamav, in MB |
|
||||
| clamav.limits.sendBufTimeout | int | `500` | |
|
||||
| clamav.replicaCount | int | `1` | |
|
||||
| clamav.resources | object | `{"requests":{"cpu":"300m","memory":"1300M"}}` | The resource requests and limits for the clamav service |
|
||||
| clamav.version | string | `"unstable"` | The clamav docker image version - defaults to .Chart.appVersion |
|
||||
| freshclam.mirrors | string | `"database.clamav.net"` | A list of clamav mirrors to be used by the clamav service |
|
||||
| fullnameOverride | string | `""` | override the full name of the clamav chart |
|
||||
| image | object | `{"repository":"clamav/clamav","type":"base"}` | The clamav docker image |
|
||||
| limits.connectionQueueLength | int | `100` | Maximum length the queue of pending connections may grow to |
|
||||
| limits.fileSize | int | `25` | The largest file size scanable by clamav, in MB |
|
||||
| limits.maxThreads | int | `4` | Maximum number of threads running at the same time. |
|
||||
| limits.scanSize | int | `100` | The largest scan size permitted in clamav, in MB |
|
||||
| limits.sendBufTimeout | int | `500` | |
|
||||
| nameOverride | string | `""` | override the name of the clamav chart |
|
||||
| replicaCount | int | `1` | |
|
||||
| resources | object | `{"requests":{"cpu":"300m","memory":"2000M"}}` | The resource requests and limits for the clamav service |
|
||||
| service.port | int | `3310` | The port to be used by the clamav service |
|
||||
|
||||
----------------------------------------------
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
release=clamav
|
||||
namespace=clamav
|
||||
|
||||
helm template . --namespace $namespace --name-template $release > clamav.yaml
|
||||
kubectl apply --namespace $namespace -f clamav.yaml
|
|
@ -10,7 +10,7 @@ data:
|
|||
LogTime yes
|
||||
LogClean yes
|
||||
LogSyslog no
|
||||
LogVerbose no
|
||||
LogVerbose yes
|
||||
LogFileMaxSize 0
|
||||
LogFile /dev/stdout
|
||||
DatabaseDirectory /var/lib/clamav
|
||||
|
@ -19,28 +19,28 @@ data:
|
|||
User clamav
|
||||
ExitOnOOM yes
|
||||
Foreground yes
|
||||
MaxScanSize {{.Values.clamav.limits.scanSize}}M
|
||||
MaxFileSize {{.Values.clamav.limits.fileSize}}M
|
||||
MaxScanSize {{.Values.limits.scanSize}}M
|
||||
MaxFileSize {{.Values.limits.fileSize}}M
|
||||
|
||||
# Close the connection when the data size limit is exceeded.
|
||||
# The value should match your MTA's limit for a maximum attachment size.
|
||||
# Default: 25M
|
||||
StreamMaxLength {{.Values.clamav.limits.scanSize}}M
|
||||
StreamMaxLength {{.Values.limits.scanSize}}M
|
||||
|
||||
# Maximum length the queue of pending connections may grow to.
|
||||
# Default: 200
|
||||
MaxConnectionQueueLength {{.Values.clamav.limits.connectionQueueLength}}
|
||||
MaxConnectionQueueLength {{.Values.limits.connectionQueueLength}}
|
||||
|
||||
# Maximum number of threads running at the same time.
|
||||
# Default: 10
|
||||
MaxThreads {{.Values.clamav.limits.maxThreads}}
|
||||
MaxThreads {{.Values.limits.maxThreads}}
|
||||
|
||||
# This option specifies how long to wait (in milliseconds) if the send buffer
|
||||
# is full.
|
||||
# Keep this value low to prevent clamd hanging.
|
||||
#
|
||||
# Default: 500
|
||||
SendBufTimeout {{.Values.clamav.limits.sendBufTimeout}}
|
||||
SendBufTimeout {{.Values.limits.sendBufTimeout}}
|
||||
|
||||
freshclam.conf: |
|
||||
LogTime yes
|
||||
|
@ -49,4 +49,4 @@ data:
|
|||
Checks 24
|
||||
LogSyslog no
|
||||
DatabaseOwner root
|
||||
DatabaseMirror {{ .Values.clamav.freshclam.mirrors }}
|
||||
DatabaseMirror {{ .Values.freshclam.mirrors }}
|
||||
|
|
|
@ -6,7 +6,7 @@ metadata:
|
|||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.clamav.replicaCount }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "kubezero-lib.selectorLabels" . | nindent 6 }}
|
||||
|
@ -20,7 +20,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: clamav
|
||||
image: "{{ .Values.clamav.image }}:{{ default .Chart.AppVersion .Values.clamav.version }}_base"
|
||||
image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}_{{ .Values.image.type }}"
|
||||
ports:
|
||||
- containerPort: 3310
|
||||
name: clamav
|
||||
|
@ -41,7 +41,7 @@ spec:
|
|||
successThreshold: 1
|
||||
timeoutSeconds: 3
|
||||
resources:
|
||||
{{- toYaml .Values.clamav.resources | nindent 10 }}
|
||||
{{- toYaml .Values.resources | nindent 10 }}
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/clamav
|
||||
name: signatures
|
||||
|
@ -53,15 +53,15 @@ spec:
|
|||
- name: config-volume
|
||||
configMap:
|
||||
name: {{ include "kubezero-lib.fullname" . }}
|
||||
{{- with .Values.clamav.nodeSelector }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.clamav.affinity }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.clamav.tolerations }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
@ -70,7 +70,7 @@ spec:
|
|||
name: signatures
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
{{- with .Values.clamav.storageClassName }}
|
||||
{{- with .Values.storageClassName }}
|
||||
storageClassName: {{ . }}
|
||||
{{- end }}
|
||||
resources:
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
update_helm
|
||||
|
||||
|
||||
update_docs
|
|
@ -1,46 +1,41 @@
|
|||
# Default values for clamav.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# nameOverride -- override the name of the clamav chart
|
||||
nameOverride: ""
|
||||
|
||||
# fullnameOverride -- override the full name of the clamav chart
|
||||
fullnameOverride: ""
|
||||
|
||||
# image -- The clamav docker image
|
||||
image:
|
||||
repository: clamav/clamav
|
||||
# version: "latest"
|
||||
type: base
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
freshclam:
|
||||
# freshclam.mirrors -- A list of clamav mirrors to be used by the clamav service
|
||||
mirrors: database.clamav.net
|
||||
limits:
|
||||
# limits.fileSize -- The largest file size scanable by clamav, in MB
|
||||
fileSize: 25
|
||||
# limits.scanSize -- The largest scan size permitted in clamav, in MB
|
||||
scanSize: 100
|
||||
# limits.connectionQueueLength -- Maximum length the queue of pending connections may grow to
|
||||
connectionQueueLength: 100
|
||||
# limits.maxThreads --Maximum number of threads running at the same time.
|
||||
maxThreads: 4
|
||||
# sendBufTimeout -- This option specifies how long to wait (in milliseconds) if the send buffer is full, keep low to avoid clamd hanging
|
||||
sendBufTimeout: 500
|
||||
|
||||
service:
|
||||
# service.port -- The port to be used by the clamav service
|
||||
port: 3310
|
||||
|
||||
clamav:
|
||||
# clamav.image -- The clamav docker image
|
||||
image: clamav/clamav
|
||||
# clamav.version -- The clamav docker image version - defaults to .Chart.appVersion
|
||||
version: "unstable"
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
freshclam:
|
||||
# clamav.freshclam.mirrors -- A list of clamav mirrors to be used by the clamav service
|
||||
mirrors: database.clamav.net
|
||||
limits:
|
||||
# clamav.limits.fileSize -- The largest file size scanable by clamav, in MB
|
||||
fileSize: 20
|
||||
# clamav.limits.scanSize -- The largest scan size permitted in clamav, in MB
|
||||
scanSize: 100
|
||||
# clamav.limits.connectionQueueLength -- Maximum length the queue of pending connections may grow to
|
||||
connectionQueueLength: 100
|
||||
# clamav.limits.maxThreads --Maximum number of threads running at the same time.
|
||||
maxThreads: 4
|
||||
# clamav.sendBufTimeout -- This option specifies how long to wait (in milliseconds) if the send buffer is full, keep low to avoid clamd hanging
|
||||
sendBufTimeout: 500
|
||||
|
||||
|
||||
resources:
|
||||
# clamav.resources -- The resource requests and limits for the clamav service
|
||||
requests:
|
||||
cpu: 300m
|
||||
memory: 1300M
|
||||
#limits:
|
||||
# cpu: 1500m
|
||||
# memory: 2000M
|
||||
resources:
|
||||
# resources -- The resource requests and limits for the clamav service
|
||||
requests:
|
||||
cpu: 300m
|
||||
memory: 2000M
|
||||
#limits:
|
||||
# cpu: 2
|
||||
# memory: 4000M
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||
name: kubeadm
|
||||
description: KubeZero Kubeadm cluster config
|
||||
type: application
|
||||
version: 1.25.8
|
||||
version: 1.28.9
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -11,4 +11,4 @@ keywords:
|
|||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
kubeVersion: ">= 1.25.0"
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
|
|
@ -0,0 +1,159 @@
|
|||
#!/bin/sh
|
||||
|
||||
function createMasterAuditPolicy() {
|
||||
path="templates/apiserver/audit-policy.yaml"
|
||||
|
||||
known_apis='
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "node.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "scheduling.k8s.io"
|
||||
- group: "storage.k8s.io"'
|
||||
|
||||
cat <<EOF >"${path}"
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
users: ["system:unsecured"]
|
||||
namespaces: ["kube-system"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
- level: None
|
||||
users: ["kubelet"] # legacy kubelet identity
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
- system:kube-scheduler
|
||||
- system:serviceaccount:kube-system:endpoint-controller
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints"]
|
||||
- level: None
|
||||
users: ["system:apiserver"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
|
||||
- level: None
|
||||
users: ["cluster-autoscaler"]
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps", "endpoints"]
|
||||
# Don't log HPA fetching metrics.
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
verbs: ["get", "list"]
|
||||
resources:
|
||||
- group: "metrics.k8s.io"
|
||||
|
||||
# Don't log these read-only URLs.
|
||||
- level: None
|
||||
nonResourceURLs:
|
||||
- /healthz*
|
||||
- /version
|
||||
- /swagger*
|
||||
- /readyz
|
||||
|
||||
# Don't log events requests because of performance impact.
|
||||
- level: None
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["events"]
|
||||
|
||||
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
|
||||
- level: Request
|
||||
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
- level: Request
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# deletecollection calls can be large, don't log responses for expected namespace deletions
|
||||
- level: Request
|
||||
users: ["system:serviceaccount:kube-system:namespace-controller"]
|
||||
verbs: ["deletecollection"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
|
||||
# so only log at the Metadata level.
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["secrets", "configmaps", "serviceaccounts/token"]
|
||||
- group: authentication.k8s.io
|
||||
resources: ["tokenreviews"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources: ${known_apis}
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources: ${known_apis}
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for all other requests.
|
||||
- level: Metadata
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
EOF
|
||||
}
|
||||
|
||||
createMasterAuditPolicy
|
|
@ -2,14 +2,15 @@ apiVersion: kubeadm.k8s.io/v1beta3
|
|||
kind: ClusterConfiguration
|
||||
kubernetesVersion: {{ .Chart.Version }}
|
||||
clusterName: {{ .Values.global.clusterName }}
|
||||
#featureGates:
|
||||
featureGates:
|
||||
EtcdLearnerMode: true # becomes beta in 1.29
|
||||
# NonGracefulFailover: true
|
||||
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
||||
networking:
|
||||
podSubnet: 10.244.0.0/16
|
||||
etcd:
|
||||
local:
|
||||
imageTag: 3.5.4-0
|
||||
# imageTag: 3.5.12-0
|
||||
extraArgs:
|
||||
### DNS discovery
|
||||
#discovery-srv: {{ .Values.domain }}
|
||||
|
@ -59,8 +60,11 @@ apiServer:
|
|||
audit-policy-file: /etc/kubernetes/apiserver/audit-policy.yaml
|
||||
audit-log-maxage: "7"
|
||||
audit-log-maxsize: "100"
|
||||
audit-log-maxbackup: "3"
|
||||
audit-log-maxbackup: "1"
|
||||
audit-log-compress: "true"
|
||||
{{- if .Values.api.falco.enabled }}
|
||||
audit-webhook-config-file: /etc/kubernetes/apiserver/audit-webhook.yaml
|
||||
{{- end }}
|
||||
tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
|
||||
admission-control-config-file: /etc/kubernetes/apiserver/admission-configuration.yaml
|
||||
api-audiences: {{ .Values.api.apiAudiences }}
|
||||
|
@ -70,12 +74,13 @@ apiServer:
|
|||
{{- end }}
|
||||
{{- if .Values.api.awsIamAuth.enabled }}
|
||||
authentication-token-webhook-config-file: /etc/kubernetes/apiserver/aws-iam-authenticator.yaml
|
||||
authentication-token-webhook-cache-ttl: 3600s
|
||||
{{- end }}
|
||||
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
|
||||
enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit,ExtendedResourceToleration
|
||||
# {{- if .Values.global.highAvailable }}
|
||||
# goaway-chance: ".001"
|
||||
# {{- end }}
|
||||
{{- if .Values.global.highAvailable }}
|
||||
goaway-chance: ".001"
|
||||
{{- end }}
|
||||
logging-format: json
|
||||
{{- with .Values.api.extraArgs }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
|
|
|
@ -2,6 +2,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
|||
kind: KubeProxyConfiguration
|
||||
# kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways
|
||||
metricsBindAddress: "0.0.0.0:10249"
|
||||
# calico < 3.22.1 breaks starting with 1.23, see https://github.com/projectcalico/calico/issues/5011
|
||||
# we go Cilium anyways
|
||||
mode: "iptables"
|
||||
logging:
|
||||
format: json
|
||||
|
|
|
@ -6,6 +6,7 @@ cgroupDriver: cgroupfs
|
|||
logging:
|
||||
format: json
|
||||
hairpinMode: hairpin-veth
|
||||
containerRuntimeEndpoint: "unix:///var/run/crio/crio.sock"
|
||||
{{- if .Values.systemd }}
|
||||
resolvConf: /run/systemd/resolve/resolv.conf
|
||||
{{- end }}
|
||||
|
@ -32,4 +33,5 @@ kubeReserved:
|
|||
#evictionHard:
|
||||
# memory.available: "484Mi"
|
||||
imageGCLowThresholdPercent: 70
|
||||
# kernelMemcgNotification: true
|
||||
serializeImagePulls: false
|
||||
maxParallelImagePulls: 4
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
{{- /* Feature gates for all control plane components */ -}}
|
||||
{{- /* Issues: MemoryQoS */ -}}
|
||||
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
|
||||
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
|
||||
{{- /* v1.29: remove/beta SidecarContainers */ -}}
|
||||
{{- /* v1.30: remove/beta KubeProxyDrainingTerminatingNodes */ -}}
|
||||
{{- define "kubeadm.featuregates" }}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "NodeOutOfServiceVolumeDetach" }}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "SidecarContainers" "KubeProxyDrainingTerminatingNodes" }}
|
||||
{{- if eq .return "csv" }}
|
||||
{{- range $key := $gates }}
|
||||
{{- $key }}=true,
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
# Don't Log anything, but audit policy enabled
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
metadata:
|
||||
name: kubezero-auditpolicy
|
||||
rules:
|
||||
- level: None
|
|
@ -1,7 +1,164 @@
|
|||
# Don't Log anything, but audit policy enabled
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
metadata:
|
||||
name: kubezero-auditpolicy
|
||||
rules:
|
||||
- level: None
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
users: ["system:unsecured"]
|
||||
namespaces: ["kube-system"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
- level: None
|
||||
users: ["kubelet"] # legacy kubelet identity
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
- system:kube-scheduler
|
||||
- system:serviceaccount:kube-system:endpoint-controller
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints"]
|
||||
- level: None
|
||||
users: ["system:apiserver"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
|
||||
- level: None
|
||||
users: ["cluster-autoscaler"]
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps", "endpoints"]
|
||||
# Don't log HPA fetching metrics.
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
verbs: ["get", "list"]
|
||||
resources:
|
||||
- group: "metrics.k8s.io"
|
||||
|
||||
# Don't log these read-only URLs.
|
||||
- level: None
|
||||
nonResourceURLs:
|
||||
- /healthz*
|
||||
- /version
|
||||
- /swagger*
|
||||
|
||||
# Don't log events requests because of performance impact.
|
||||
- level: None
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["events"]
|
||||
|
||||
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
|
||||
- level: Request
|
||||
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
- level: Request
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# deletecollection calls can be large, don't log responses for expected namespace deletions
|
||||
- level: Request
|
||||
users: ["system:serviceaccount:kube-system:namespace-controller"]
|
||||
verbs: ["deletecollection"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
|
||||
# so only log at the Metadata level.
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["secrets", "configmaps", "serviceaccounts/token"]
|
||||
- group: authentication.k8s.io
|
||||
resources: ["tokenreviews"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "node.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "scheduling.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "node.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "scheduling.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for all other requests.
|
||||
- level: Metadata
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: falco
|
||||
cluster:
|
||||
server: http://falco-k8saudit-webhook:9765/k8s-audit
|
||||
contexts:
|
||||
- context:
|
||||
cluster: falco
|
||||
user: ""
|
||||
name: default-context
|
||||
current-context: default-context
|
||||
preferences: {}
|
||||
users: []
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: CredentialProviderConfig
|
||||
providers:
|
||||
- name: amazon-ecr-credential-helper
|
||||
matchImages:
|
||||
- "*.dkr.ecr.*.amazonaws.com"
|
||||
- "*.dkr.ecr.*.amazonaws.cn"
|
||||
- "*.dkr.ecr-fips.*.amazonaws.com"
|
||||
- "*.dkr.ecr.us-iso-east-1.c2s.ic.gov"
|
||||
- "*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov"
|
||||
defaultCacheDuration: "12h"
|
||||
apiVersion: credentialprovider.kubelet.k8s.io/v1alpha1
|
||||
args:
|
||||
- get
|
||||
#env:
|
||||
# - name: AWS_PROFILE
|
||||
# value: example_profile
|
|
@ -3,6 +3,6 @@ spec:
|
|||
- name: etcd
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 192Mi
|
||||
cpu: 50m
|
||||
memory: 256Mi
|
||||
#ephemeral-storage: 1Gi
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 1Gi
|
||||
cpu: 250m
|
||||
memory: 1268Mi
|
||||
|
|
|
@ -3,5 +3,5 @@ spec:
|
|||
- name: kube-controller-manager
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
cpu: 50m
|
||||
memory: 192Mi
|
||||
|
|
|
@ -3,5 +3,5 @@ spec:
|
|||
- name: kube-scheduler
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
cpu: 50m
|
||||
memory: 96Mi
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{{- if .Values.api.awsIamAuth.enabled }}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: aws-iam-authenticator
|
||||
rules:
|
||||
|
@ -51,8 +51,8 @@ metadata:
|
|||
namespace: kube-system
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: aws-iam-authenticator
|
||||
namespace: kube-system
|
||||
|
@ -85,8 +85,6 @@ metadata:
|
|||
name: aws-iam-authenticator
|
||||
labels:
|
||||
k8s-app: aws-iam-authenticator
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: runtime/default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
@ -98,6 +96,10 @@ spec:
|
|||
labels:
|
||||
k8s-app: aws-iam-authenticator
|
||||
spec:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
# use service account with access to
|
||||
|
@ -110,14 +112,12 @@ spec:
|
|||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
|
||||
containers:
|
||||
- name: aws-iam-authenticator
|
||||
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.11
|
||||
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.14
|
||||
args:
|
||||
- server
|
||||
- --backend-mode=CRD,MountedFile
|
||||
|
|
|
@ -25,6 +25,9 @@ api:
|
|||
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
|
||||
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"
|
||||
|
||||
falco:
|
||||
enabled: false
|
||||
|
||||
etcd:
|
||||
nodeName: etcd
|
||||
state: new
|
||||
|
|
|
@ -2,8 +2,8 @@ apiVersion: v2
|
|||
name: kubezero-addons
|
||||
description: KubeZero umbrella chart for various optional cluster addons
|
||||
type: application
|
||||
version: 0.7.5
|
||||
appVersion: v1.25
|
||||
version: 0.8.7
|
||||
appVersion: v1.28
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -20,28 +20,28 @@ maintainers:
|
|||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: external-dns
|
||||
version: 1.12.2
|
||||
version: 1.14.4
|
||||
repository: https://kubernetes-sigs.github.io/external-dns/
|
||||
condition: external-dns.enabled
|
||||
- name: cluster-autoscaler
|
||||
version: 9.28.0
|
||||
version: 9.36.0
|
||||
repository: https://kubernetes.github.io/autoscaler
|
||||
condition: cluster-autoscaler.enabled
|
||||
- name: nvidia-device-plugin
|
||||
version: 0.14.0
|
||||
version: 0.15.0
|
||||
# https://github.com/NVIDIA/k8s-device-plugin
|
||||
repository: https://nvidia.github.io/k8s-device-plugin
|
||||
condition: nvidia-device-plugin.enabled
|
||||
- name: sealed-secrets
|
||||
version: 2.8.1
|
||||
version: 2.15.3
|
||||
repository: https://bitnami-labs.github.io/sealed-secrets
|
||||
condition: sealed-secrets.enabled
|
||||
- name: aws-node-termination-handler
|
||||
version: 0.21.0
|
||||
# repository: https://aws.github.io/eks-charts
|
||||
version: 0.23.0
|
||||
repository: "oci://public.ecr.aws/aws-ec2/helm"
|
||||
condition: aws-node-termination-handler.enabled
|
||||
- name: aws-eks-asg-rolling-update-handler
|
||||
version: 1.3.0
|
||||
# repository: https://twin.github.io/helm-charts
|
||||
version: 1.5.0
|
||||
repository: https://twin.github.io/helm-charts
|
||||
condition: aws-eks-asg-rolling-update-handler.enabled
|
||||
kubeVersion: ">= 1.25.0"
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# kubezero-addons
|
||||
|
||||
![Version: 0.7.5](https://img.shields.io/badge/Version-0.7.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.25](https://img.shields.io/badge/AppVersion-v1.25-informational?style=flat-square)
|
||||
![Version: 0.8.7](https://img.shields.io/badge/Version-0.8.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.28](https://img.shields.io/badge/AppVersion-v1.28-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for various optional cluster addons
|
||||
|
||||
|
@ -14,16 +14,16 @@ KubeZero umbrella chart for various optional cluster addons
|
|||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.25.0`
|
||||
Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| | aws-eks-asg-rolling-update-handler | 1.3.0 |
|
||||
| | aws-node-termination-handler | 0.21.0 |
|
||||
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.8.1 |
|
||||
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.12.2 |
|
||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.28.0 |
|
||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.14.0 |
|
||||
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.15.3 |
|
||||
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.14.4 |
|
||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.36.0 |
|
||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.15.0 |
|
||||
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
|
||||
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.23.0 |
|
||||
|
||||
# MetalLB
|
||||
|
||||
|
@ -41,6 +41,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| aws-eks-asg-rolling-update-handler.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | |
|
||||
| aws-eks-asg-rolling-update-handler.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | |
|
||||
| aws-eks-asg-rolling-update-handler.enabled | bool | `false` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[0].name | string | `"CLUSTER_NAME"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[0].value | string | `""` | |
|
||||
|
@ -60,15 +62,17 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| aws-eks-asg-rolling-update-handler.environmentVars[7].value | string | `"/var/run/secrets/sts.amazonaws.com/serviceaccount/token"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[8].name | string | `"AWS_STS_REGIONAL_ENDPOINTS"` | |
|
||||
| aws-eks-asg-rolling-update-handler.environmentVars[8].value | string | `"regional"` | |
|
||||
| aws-eks-asg-rolling-update-handler.image.tag | string | `"v1.7.0"` | |
|
||||
| aws-eks-asg-rolling-update-handler.image.repository | string | `"twinproduction/aws-eks-asg-rolling-update-handler"` | |
|
||||
| aws-eks-asg-rolling-update-handler.image.tag | string | `"v1.8.3"` | |
|
||||
| aws-eks-asg-rolling-update-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| aws-eks-asg-rolling-update-handler.resources.limits.memory | string | `"128Mi"` | |
|
||||
| aws-eks-asg-rolling-update-handler.resources.requests.cpu | string | `"10m"` | |
|
||||
| aws-eks-asg-rolling-update-handler.resources.requests.memory | string | `"32Mi"` | |
|
||||
| aws-eks-asg-rolling-update-handler.securityContext.runAsNonRoot | bool | `true` | |
|
||||
| aws-eks-asg-rolling-update-handler.securityContext.runAsUser | int | `1001` | |
|
||||
| aws-eks-asg-rolling-update-handler.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
|
||||
| aws-eks-asg-rolling-update-handler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| aws-eks-asg-rolling-update-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| aws-eks-asg-rolling-update-handler.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| aws-eks-asg-rolling-update-handler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| aws-eks-asg-rolling-update-handler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| aws-node-termination-handler.deleteLocalData | bool | `true` | |
|
||||
| aws-node-termination-handler.emitKubernetesEvents | bool | `true` | |
|
||||
| aws-node-termination-handler.enableProbesServer | bool | `true` | |
|
||||
|
@ -85,7 +89,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| aws-node-termination-handler.ignoreDaemonSets | bool | `true` | |
|
||||
| aws-node-termination-handler.jsonLogging | bool | `true` | |
|
||||
| aws-node-termination-handler.logFormatVersion | int | `2` | |
|
||||
| aws-node-termination-handler.managedTag | string | `"aws-node-termination-handler/managed"` | "aws-node-termination-handler/${ClusterName}" |
|
||||
| aws-node-termination-handler.managedTag | string | `"zdt:kubezero:nth:${ClusterName}"` | "zdt:kubezero:nth:${ClusterName}" |
|
||||
| aws-node-termination-handler.metadataTries | int | `0` | |
|
||||
| aws-node-termination-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| aws-node-termination-handler.podMonitor.create | bool | `false` | |
|
||||
|
@ -93,13 +97,11 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| aws-node-termination-handler.rbac.pspEnabled | bool | `false` | |
|
||||
| aws-node-termination-handler.taintNode | bool | `true` | |
|
||||
| aws-node-termination-handler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| aws-node-termination-handler.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| aws-node-termination-handler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| aws-node-termination-handler.useProviderId | bool | `true` | |
|
||||
| awsNeuron.enabled | bool | `false` | |
|
||||
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
|
||||
| awsNeuron.image.tag | string | `"1.9.3.0"` | |
|
||||
| awsNeuron.image.tag | string | `"2.19.16.0"` | |
|
||||
| cluster-autoscaler.autoDiscovery.clusterName | string | `""` | |
|
||||
| cluster-autoscaler.awsRegion | string | `"us-west-2"` | |
|
||||
| cluster-autoscaler.enabled | bool | `false` | |
|
||||
|
@ -107,7 +109,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| cluster-autoscaler.extraArgs.ignore-taint | string | `"node.cilium.io/agent-not-ready"` | |
|
||||
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
|
||||
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
|
||||
| cluster-autoscaler.image.tag | string | `"v1.25.1"` | |
|
||||
| cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | |
|
||||
| cluster-autoscaler.image.tag | string | `"v1.28.2"` | |
|
||||
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cluster-autoscaler.podDisruptionBudget | bool | `false` | |
|
||||
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | |
|
||||
|
@ -115,9 +118,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| cluster-autoscaler.serviceMonitor.enabled | bool | `false` | |
|
||||
| cluster-autoscaler.serviceMonitor.interval | string | `"30s"` | |
|
||||
| cluster-autoscaler.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cluster-autoscaler.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cluster-autoscaler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| clusterBackup.enabled | bool | `false` | |
|
||||
| clusterBackup.extraEnv | list | `[]` | |
|
||||
| clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | |
|
||||
|
@ -129,9 +130,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| external-dns.provider | string | `"inmemory"` | |
|
||||
| external-dns.sources[0] | string | `"service"` | |
|
||||
| external-dns.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| external-dns.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| external-dns.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| external-dns.triggerLoopOnEvent | bool | `true` | |
|
||||
| forseti.aws.iamRoleArn | string | `""` | "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti" |
|
||||
| forseti.aws.region | string | `""` | |
|
||||
|
@ -139,6 +138,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| forseti.image.name | string | `"public.ecr.aws/zero-downtime/forseti"` | |
|
||||
| forseti.image.tag | string | `"v0.1.2"` | |
|
||||
| fuseDevicePlugin.enabled | bool | `false` | |
|
||||
| fuseDevicePlugin.image.name | string | `"public.ecr.aws/zero-downtime/fuse-device-plugin"` | |
|
||||
| fuseDevicePlugin.image.tag | string | `"v1.2.0"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node.kubernetes.io/instance-type"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | |
|
||||
| nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"g5.xlarge"` | |
|
||||
|
@ -171,6 +172,4 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
|||
| sealed-secrets.resources.requests.cpu | string | `"10m"` | |
|
||||
| sealed-secrets.resources.requests.memory | string | `"24Mi"` | |
|
||||
| sealed-secrets.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| sealed-secrets.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| sealed-secrets.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| sealed-secrets.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| sealed-secrets.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
|
|
|
@ -5,4 +5,4 @@ home: https://github.com/TwiN/aws-eks-asg-rolling-update-handler
|
|||
maintainers:
|
||||
- name: TwiN
|
||||
name: aws-eks-asg-rolling-update-handler
|
||||
version: 1.3.0
|
||||
version: 1.5.0
|
||||
|
|
|
@ -11,3 +11,6 @@ The following table lists the configurable parameters of the aws-eks-asg-rolling
|
|||
| image.pullPolicy | Image pull policy | yes | `IfNotPresent` |
|
||||
| resources | CPU/memory resource requests/limits | no | `{}` |
|
||||
| podAnnotations | Annotations to add to the aws-eks-asg-rolling-update-handler pod configuration | no | `{}` |
|
||||
| podLabels | Labels to add to the aws-eks-asg-rolling-update-handler pod configuration | no | `{}` |
|
||||
| securityContext | Pod security context | no | `{}` |
|
||||
| containerSecurityContext | Container security context | no | `{}` |
|
||||
|
|
|
@ -15,11 +15,18 @@ spec:
|
|||
metadata:
|
||||
labels:
|
||||
{{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 8 }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.securityContext | nindent 8 | trim }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: true
|
||||
serviceAccountName: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }}
|
||||
restartPolicy: Always
|
||||
|
@ -28,6 +35,10 @@ spec:
|
|||
- name: {{ template "aws-eks-asg-rolling-update-handler.name" . }}
|
||||
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.containerSecurityContext | nindent 12 | trim }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- toYaml .Values.environmentVars | nindent 12 }}
|
||||
{{- with .Values.resources }}
|
||||
|
|
|
@ -32,8 +32,20 @@ resources: {}
|
|||
podAnnotations: {}
|
||||
# prometheus.io/port: "8080"
|
||||
# prometheus.io/scrape: "true"
|
||||
|
||||
podLabels: {}
|
||||
serviceAccount:
|
||||
create: true
|
||||
#name: aws-eks-asg-rolling-update-handler
|
||||
annotations: {}
|
||||
|
||||
securityContext: {}
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1001
|
||||
# seccompProfile:
|
||||
# type: RuntimeDefault
|
||||
|
||||
containerSecurityContext: {}
|
||||
# allowPrivilegeEscalation: false
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v2
|
||||
appVersion: 1.19.0
|
||||
appVersion: 1.21.0
|
||||
description: A Helm chart for the AWS Node Termination Handler.
|
||||
home: https://github.com/aws/eks-charts
|
||||
home: https://github.com/aws/aws-node-termination-handler/
|
||||
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
||||
keywords:
|
||||
- aws
|
||||
|
@ -20,6 +20,5 @@ maintainers:
|
|||
name: aws-node-termination-handler
|
||||
sources:
|
||||
- https://github.com/aws/aws-node-termination-handler/
|
||||
- https://github.com/aws/eks-charts/
|
||||
type: application
|
||||
version: 0.21.0
|
||||
version: 0.23.0
|
||||
|
|
|
@ -8,22 +8,24 @@ AWS Node Termination Handler Helm chart for Kubernetes. For more information on
|
|||
|
||||
## Installing the Chart
|
||||
|
||||
Before you can install the chart you will need to add the `aws` repo to [Helm](https://helm.sh/).
|
||||
|
||||
Before you can install the chart you will need to authenticate your Helm client.
|
||||
```shell
|
||||
helm repo add eks https://aws.github.io/eks-charts/
|
||||
aws ecr-public get-login-password \
|
||||
--region us-east-1 | helm registry login \
|
||||
--username AWS \
|
||||
--password-stdin public.ecr.aws
|
||||
```
|
||||
|
||||
After you've installed the repo you can install the chart, the following command will install the chart with the release name `aws-node-termination-handler` and the default configuration to the `kube-system` namespace.
|
||||
Once the helm registry login succeeds, use the following command to install the chart with the release name `aws-node-termination-handler` and the default configuration to the `kube-system` namespace. In the below command, add the CHART_VERSION that you want to install.
|
||||
|
||||
```shell
|
||||
helm upgrade --install --namespace kube-system aws-node-termination-handler eks/aws-node-termination-handler
|
||||
helm upgrade --install --namespace kube-system aws-node-termination-handler oci://public.ecr.aws/aws-ec2/helm/aws-node-termination-handler --version $CHART_VERSION
|
||||
```
|
||||
|
||||
To install the chart on an EKS cluster where the AWS Node Termination Handler is already installed, you can run the following command.
|
||||
|
||||
```shell
|
||||
helm upgrade --install --namespace kube-system aws-node-termination-handler eks/aws-node-termination-handler --recreate-pods --force
|
||||
helm upgrade --install --namespace kube-system aws-node-termination-handler oci://public.ecr.aws/aws-ec2/helm/aws-node-termination-handler --version $CHART_VERSION --recreate-pods --force
|
||||
```
|
||||
|
||||
If you receive an error similar to the one below simply rerun the above command.
|
||||
|
@ -33,7 +35,7 @@ If you receive an error similar to the one below simply rerun the above command.
|
|||
To uninstall the `aws-node-termination-handler` chart installation from the `kube-system` namespace run the following command.
|
||||
|
||||
```shell
|
||||
helm delete --namespace kube-system aws-node-termination-handler
|
||||
helm uninstall --namespace kube-system aws-node-termination-handler
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
@ -117,7 +119,7 @@ The configuration in this table applies to AWS Node Termination Handler in queue
|
|||
| `checkASGTagBeforeDraining` | [DEPRECATED](Use `checkTagBeforeDraining` instead) If `true`, check that the instance is tagged with the `managedAsgTag` before draining the node. If `false`, disables calls ASG API. | `true` |
|
||||
| `managedAsgTag` | [DEPRECATED](Use `managedTag` instead) The node tag to check if `checkASGTagBeforeDraining` is `true`.
|
||||
| `useProviderId` | If `true`, fetch node name through Kubernetes node spec ProviderID instead of AWS event PrivateDnsHostname. | `false` |
|
||||
|
||||
| `topologySpreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) for pod scheduling. Useful with a highly available deployment to reduce the risk of running multiple replicas on the same Node | `[]` |
|
||||
### IMDS Mode Configuration
|
||||
|
||||
The configuration in this table applies to AWS Node Termination Handler in IMDS mode.
|
||||
|
@ -156,6 +158,7 @@ The configuration in this table applies to AWS Node Termination Handler in IMDS
|
|||
| `enableScheduledEventDraining` | If `true`, drain nodes before the maintenance window starts for an EC2 instance scheduled event. Only used in IMDS mode. | `true` |
|
||||
| `enableRebalanceMonitoring` | If `true`, cordon nodes when the rebalance recommendation notice is received. If you'd like to drain the node in addition to cordoning, then also set `enableRebalanceDraining`. Only used in IMDS mode. | `false` |
|
||||
| `enableRebalanceDraining` | If `true`, drain nodes when the rebalance recommendation notice is received. Only used in IMDS mode. | `false` |
|
||||
| `deleteSqsMsgIfNodeNotFound` | If `true`, delete the SQS Message from the SQS Queue if the targeted node is not found. Only used in Queue Processor mode. | `false` |
|
||||
|
||||
### Testing Configuration
|
||||
|
||||
|
@ -171,6 +174,6 @@ The configuration in this table applies to AWS Node Termination Handler testing
|
|||
|
||||
## Metrics Endpoint Considerations
|
||||
|
||||
AWS Node Termination HAndler in IMDS mode runs as a DaemonSet with `useHostNetwork: true` by default. If the Prometheus server is enabled with `enablePrometheusServer: true` nothing else will be able to bind to the configured port (by default `prometheusServerPort: 9092`) in the root network namespace. Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint.
|
||||
AWS Node Termination Handler in IMDS mode runs as a DaemonSet with `useHostNetwork: true` by default. If the Prometheus server is enabled with `enablePrometheusServer: true` nothing else will be able to bind to the configured port (by default `prometheusServerPort: 9092`) in the root network namespace. Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint.
|
||||
|
||||
You can switch NTH in IMDS mode to run w/ `useHostNetwork: false`, but you will need to make sure that IMDSv1 is enabled or IMDSv2 IP hop count will need to be incremented to 2 (see the [IMDSv2 documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html).
|
||||
|
|
|
@ -164,6 +164,8 @@ spec:
|
|||
{{- end }}
|
||||
- name: QUEUE_URL
|
||||
value: {{ .Values.queueURL | quote }}
|
||||
- name: DELETE_SQS_MSG_IF_NODE_NOT_FOUND
|
||||
value: {{ .Values.deleteSqsMsgIfNodeNotFound | quote }}
|
||||
- name: WORKERS
|
||||
value: {{ .Values.workers | quote }}
|
||||
{{- with .Values.extraEnv }}
|
||||
|
@ -218,4 +220,8 @@ spec:
|
|||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -52,6 +52,8 @@ affinity: {}
|
|||
|
||||
tolerations: []
|
||||
|
||||
topologySpreadConstraints: []
|
||||
|
||||
# Extra environment variables
|
||||
extraEnv: []
|
||||
|
||||
|
@ -277,6 +279,9 @@ enableRebalanceMonitoring: false
|
|||
# enableRebalanceDraining If true, drain nodes when the rebalance recommendation notice is received. Only used in IMDS mode.
|
||||
enableRebalanceDraining: false
|
||||
|
||||
# deleteSqsMsgIfNodeNotFound If true, delete the SQS Message from the SQS Queue if the targeted node(s) are not found. Only used in Queue Processor mode.
|
||||
deleteSqsMsgIfNodeNotFound: false
|
||||
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
# Testing
|
||||
# ---------------------------------------------------------------------------------------------------------------------
|
||||
|
|
|
@ -24,21 +24,22 @@ spec:
|
|||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
#readOnly: true
|
||||
- name: workdir
|
||||
mountPath: /tmp
|
||||
env:
|
||||
env:
|
||||
- name: DEBUG
|
||||
value: ""
|
||||
- name: RESTIC_REPOSITORY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: kubezero-backup-restic
|
||||
key: repository
|
||||
- name: RESTIC_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: kubezero-backup-restic
|
||||
key: password
|
||||
key: password
|
||||
{{- with .Values.clusterBackup.extraEnv }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
|
@ -54,9 +55,8 @@ spec:
|
|||
emptyDir: {}
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
restartPolicy: Never
|
||||
|
|
|
@ -17,6 +17,8 @@ spec:
|
|||
spec:
|
||||
serviceAccount: neuron-device-plugin
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: aws.amazon.com/neuron
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
|
@ -40,6 +42,14 @@ spec:
|
|||
- inf1.2xlarge
|
||||
- inf1.6xlarge
|
||||
- inf1.24xlarge
|
||||
- inf2.xlarge
|
||||
- inf2.4xlarge
|
||||
- inf2.8xlarge
|
||||
- inf2.24xlarge
|
||||
- inf2.48xlarge
|
||||
- trn1.2xlarge
|
||||
- trn1.32xlarge
|
||||
- trn1n.32xlarge
|
||||
containers:
|
||||
- image: "{{ .Values.awsNeuron.image.name }}:{{ .Values.awsNeuron.image.tag }}"
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
|
|
@ -13,9 +13,15 @@ spec:
|
|||
labels:
|
||||
name: fuse-device-plugin
|
||||
spec:
|
||||
hostNetwork: true
|
||||
priorityClassName: system-node-critical
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: kubezero-workergroup
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- image: public.ecr.aws/zero-downtime/fuse-device-plugin:v1.1.0
|
||||
- image: "{{ .Values.fuseDevicePlugin.image.name }}:{{ .Values.fuseDevicePlugin.image.tag }}"
|
||||
# imagePullPolicy: Always
|
||||
name: fuse-device-plugin
|
||||
securityContext:
|
||||
|
|
|
@ -69,10 +69,9 @@ spec:
|
|||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
priorityClassName: system-cluster-critical
|
||||
volumes:
|
||||
- name: aws-token
|
||||
projected:
|
||||
|
|
|
@ -1,19 +1,12 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
helm repo update
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
NTH_VERSION=$(yq eval '.dependencies[] | select(.name=="aws-node-termination-handler") | .version' Chart.yaml)
|
||||
RUH_VERSION=$(yq eval '.dependencies[] | select(.name=="aws-eks-asg-rolling-update-handler") | .version' Chart.yaml)
|
||||
login_ecr_public
|
||||
update_helm
|
||||
|
||||
rm -rf charts/aws-node-termination-handler
|
||||
helm pull eks/aws-node-termination-handler --untar --untardir charts --version $NTH_VERSION
|
||||
patch_chart aws-node-termination-handler
|
||||
patch_chart aws-eks-asg-rolling-update-handler
|
||||
|
||||
# diff -tuNr charts/aws-node-termination-handler.orig charts/aws-node-termination-handler > nth.patch
|
||||
patch -p0 -i nth.patch --no-backup-if-mismatch
|
||||
|
||||
rm -rf charts/aws-eks-asg-rolling-update-handler
|
||||
helm pull twin/aws-eks-asg-rolling-update-handler --untar --untardir charts --version $RUH_VERSION
|
||||
patch -p0 -i ruh.patch --no-backup-if-mismatch
|
||||
|
||||
helm dep update
|
||||
update_docs
|
||||
|
|
|
@ -47,15 +47,14 @@ sealed-secrets:
|
|||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
|
||||
aws-eks-asg-rolling-update-handler:
|
||||
enabled: false
|
||||
image:
|
||||
tag: v1.7.0
|
||||
repository: twinproduction/aws-eks-asg-rolling-update-handler
|
||||
tag: v1.8.3
|
||||
|
||||
environmentVars:
|
||||
- name: CLUSTER_NAME
|
||||
|
@ -78,6 +77,18 @@ aws-eks-asg-rolling-update-handler:
|
|||
- name: AWS_STS_REGIONAL_ENDPOINTS
|
||||
value: "regional"
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1001
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
containerSecurityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
@ -88,8 +99,6 @@ aws-eks-asg-rolling-update-handler:
|
|||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
|
||||
|
@ -98,8 +107,8 @@ aws-node-termination-handler:
|
|||
|
||||
fullnameOverride: "aws-node-termination-handler"
|
||||
|
||||
# -- "aws-node-termination-handler/${ClusterName}"
|
||||
managedTag: "aws-node-termination-handler/managed"
|
||||
# -- "zdt:kubezero:nth:${ClusterName}"
|
||||
managedTag: "zdt:kubezero:nth:${ClusterName}"
|
||||
|
||||
useProviderId: true
|
||||
enableSqsTerminationDraining: true
|
||||
|
@ -132,8 +141,6 @@ aws-node-termination-handler:
|
|||
logFormatVersion: 2
|
||||
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
|
@ -144,13 +151,16 @@ aws-node-termination-handler:
|
|||
|
||||
fuseDevicePlugin:
|
||||
enabled: false
|
||||
image:
|
||||
name: public.ecr.aws/zero-downtime/fuse-device-plugin
|
||||
tag: v1.2.0
|
||||
|
||||
awsNeuron:
|
||||
enabled: false
|
||||
|
||||
image:
|
||||
name: public.ecr.aws/neuron/neuron-device-plugin
|
||||
tag: 1.9.3.0
|
||||
tag: 2.19.16.0
|
||||
|
||||
nvidia-device-plugin:
|
||||
enabled: false
|
||||
|
@ -189,7 +199,8 @@ cluster-autoscaler:
|
|||
enabled: false
|
||||
|
||||
image:
|
||||
tag: v1.25.1
|
||||
repository: registry.k8s.io/autoscaling/cluster-autoscaler
|
||||
tag: v1.28.2
|
||||
|
||||
autoDiscovery:
|
||||
clusterName: ""
|
||||
|
@ -218,15 +229,13 @@ cluster-autoscaler:
|
|||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
|
||||
# On AWS enable Projected Service Accounts to assume IAM role
|
||||
#extraEnv:
|
||||
# AWS_ROLE_ARN: <IamArn>
|
||||
# AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
||||
# AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
|
||||
# AWS_STS_REGIONAL_ENDPOINTS: "regional"
|
||||
|
||||
#extraVolumes:
|
||||
|
@ -250,8 +259,6 @@ external-dns:
|
|||
triggerLoopOnEvent: true
|
||||
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
apiVersion: v2
|
||||
description: KubeZero ArgoCD - config, branding, image-updater (optional)
|
||||
name: kubezero-argocd
|
||||
version: 0.12.0
|
||||
description: KubeZero Argo - Events, Workflow, CD
|
||||
name: kubezero-argo
|
||||
version: 0.2.2
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
- kubezero
|
||||
- argocd
|
||||
- argocd-image-updater
|
||||
- argo-events
|
||||
- argo-workflow
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
|
@ -16,14 +17,20 @@ dependencies:
|
|||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: argo-events
|
||||
version: 2.4.4
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-events.enabled
|
||||
- name: argo-cd
|
||||
version: 5.28.2
|
||||
version: 6.9.2
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-cd.enabled
|
||||
- name: argocd-apps
|
||||
version: 0.0.9
|
||||
version: 2.0.0
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-cd.enabled
|
||||
- name: argocd-image-updater
|
||||
version: 0.8.5
|
||||
version: 0.10.0
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argocd-image-updater.enabled
|
||||
kubeVersion: ">= 1.25.0"
|
||||
kubeVersion: ">= 1.26.0"
|
|
@ -1,8 +1,8 @@
|
|||
# kubezero-argocd
|
||||
# kubezero-argo
|
||||
|
||||
![Version: 0.12.0](https://img.shields.io/badge/Version-0.12.0-informational?style=flat-square)
|
||||
![Version: 0.2.1](https://img.shields.io/badge/Version-0.2.1-informational?style=flat-square)
|
||||
|
||||
KubeZero ArgoCD - config, branding, image-updater (optional)
|
||||
KubeZero Argo - Events, Workflow, CD
|
||||
|
||||
**Homepage:** <https://kubezero.com>
|
||||
|
||||
|
@ -14,13 +14,14 @@ KubeZero ArgoCD - config, branding, image-updater (optional)
|
|||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.25.0`
|
||||
Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 5.28.2 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-apps | 0.0.9 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.8.5 |
|
||||
| https://argoproj.github.io/argo-helm | argo-cd | 6.7.10 |
|
||||
| https://argoproj.github.io/argo-helm | argo-events | 2.4.4 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-apps | 2.0.0 |
|
||||
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.9.6 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
|
||||
## Values
|
||||
|
@ -29,33 +30,52 @@ Kubernetes: `>= 1.25.0`
|
|||
|-----|------|---------|-------------|
|
||||
| argo-cd.applicationSet.enabled | bool | `false` | |
|
||||
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
|
||||
| argo-cd.configs.cm."timeout.reconciliation" | int | `300` | |
|
||||
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.25 - Release notes"` | |
|
||||
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
|
||||
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.27 - Release notes"` | |
|
||||
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
|
||||
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
|
||||
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.25"` | |
|
||||
| argo-cd.configs.cm.url | string | `"argocd.example.com"` | |
|
||||
| argo-cd.configs.knownHosts.data.ssh_known_hosts | string | `"bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==\ngithub.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==\ngitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=\ngitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf\ngitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9\ngit.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==\n"` | |
|
||||
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.27"` | |
|
||||
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
|
||||
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
|
||||
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
|
||||
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
|
||||
| argo-cd.configs.params."server.insecure" | bool | `true` | |
|
||||
| argo-cd.configs.secret.createSecret | bool | `false` | |
|
||||
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
|
||||
| argo-cd.configs.ssh.extraHosts | string | `"git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ=="` | |
|
||||
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar__logo__text-logo { height: 0em; }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
|
||||
| argo-cd.controller.metrics.enabled | bool | `false` | |
|
||||
| argo-cd.controller.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argo-cd.controller.resources.limits.memory | string | `"2048Mi"` | |
|
||||
| argo-cd.controller.resources.requests.cpu | string | `"100m"` | |
|
||||
| argo-cd.controller.resources.requests.memory | string | `"256Mi"` | |
|
||||
| argo-cd.controller.resources.requests.memory | string | `"512Mi"` | |
|
||||
| argo-cd.dex.enabled | bool | `false` | |
|
||||
| argo-cd.enabled | bool | `false` | |
|
||||
| argo-cd.global.logging.format | string | `"json"` | |
|
||||
| argo-cd.istio.enabled | bool | `false` | |
|
||||
| argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | |
|
||||
| argo-cd.istio.ipBlocks | list | `[]` | |
|
||||
| argo-cd.notifications.enabled | bool | `false` | |
|
||||
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
|
||||
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argo-cd.server.metrics.enabled | bool | `false` | |
|
||||
| argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
|
||||
| argocd-apps.applications | list | `[]` | |
|
||||
| argocd-apps.projects | list | `[]` | |
|
||||
| argo-events.configs.jetstream.settings.maxFileStore | int | `-1` | Maximum size of the file storage (e.g. 20G) |
|
||||
| argo-events.configs.jetstream.settings.maxMemoryStore | int | `-1` | Maximum size of the memory storage (e.g. 1G) |
|
||||
| argo-events.configs.jetstream.streamConfig.duplicates | string | `"300s"` | Not documented at the moment |
|
||||
| argo-events.configs.jetstream.streamConfig.maxAge | string | `"72h"` | Maximum age of existing messages, i.e. “72h”, “4h35m” |
|
||||
| argo-events.configs.jetstream.streamConfig.maxBytes | string | `"1GB"` | |
|
||||
| argo-events.configs.jetstream.streamConfig.maxMsgs | int | `1000000` | Maximum number of messages before expiring oldest message |
|
||||
| argo-events.configs.jetstream.streamConfig.replicas | int | `1` | Number of replicas, defaults to 3 and requires minimal 3 |
|
||||
| argo-events.configs.jetstream.versions[0].configReloaderImage | string | `"natsio/nats-server-config-reloader:0.14.1"` | |
|
||||
| argo-events.configs.jetstream.versions[0].metricsExporterImage | string | `"natsio/prometheus-nats-exporter:0.14.0"` | |
|
||||
| argo-events.configs.jetstream.versions[0].natsImage | string | `"nats:2.10.11-scratch"` | |
|
||||
| argo-events.configs.jetstream.versions[0].startCommand | string | `"/nats-server"` | |
|
||||
| argo-events.configs.jetstream.versions[0].version | string | `"2.10.11"` | |
|
||||
| argo-events.enabled | bool | `false` | |
|
||||
| argocd-apps.applications | object | `{}` | |
|
||||
| argocd-apps.enabled | bool | `false` | |
|
||||
| argocd-apps.projects | object | `{}` | |
|
||||
| argocd-image-updater.authScripts.enabled | bool | `true` | |
|
||||
| argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | |
|
||||
| argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | |
|
||||
|
@ -65,10 +85,8 @@ Kubernetes: `>= 1.25.0`
|
|||
| argocd-image-updater.metrics.enabled | bool | `false` | |
|
||||
| argocd-image-updater.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| argocd-image-updater.sshConfig.config | string | `"Host *\n PubkeyAcceptedAlgorithms +ssh-rsa\n HostkeyAlgorithms +ssh-rsa\n"` | |
|
||||
| istio.enabled | bool | `false` | Deploy Istio VirtualService to expose ArgoCD |
|
||||
| istio.gateway | string | `"istio-ingress/ingressgateway"` | Name of the Istio gateway to add the VirtualService to |
|
||||
| istio.ipBlocks | list | `[]` | |
|
||||
|
||||
## Resources
|
||||
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
|
||||
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json
|
||||
|
|
@ -18,3 +18,4 @@
|
|||
## Resources
|
||||
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
|
||||
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{{- if .Values.istio.enabled }}
|
||||
{{- if .Values.istio.ipBlocks }}
|
||||
{{- if index .Values "argo-cd" "istio" "enabled" }}
|
||||
{{- if index .Values "argo-cd" "istio" "ipBlocks" }}
|
||||
apiVersion: security.istio.io/v1beta1
|
||||
kind: AuthorizationPolicy
|
||||
metadata:
|
||||
|
@ -16,7 +16,7 @@ spec:
|
|||
- from:
|
||||
- source:
|
||||
notIpBlocks:
|
||||
{{- toYaml .Values.istio.ipBlocks | nindent 8 }}
|
||||
{{- toYaml (index .Values "argo-cd" "istio" "ipBlocks") | nindent 8 }}
|
||||
to:
|
||||
- operation:
|
||||
hosts: [{{ index .Values "argo-cd" "configs" "cm" "url" | quote }}]
|
|
@ -1,4 +1,4 @@
|
|||
{{- if .Values.istio.enabled }}
|
||||
{{- if index .Values "argo-cd" "istio" "enabled" }}
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
|
@ -8,9 +8,9 @@ metadata:
|
|||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
spec:
|
||||
gateways:
|
||||
- {{ .Values.istio.gateway }}
|
||||
- {{ index .Values "argo-cd" "istio" "gateway" }}
|
||||
hosts:
|
||||
- {{ index .Values "argo-cd" "configs" "cm" "url" }}
|
||||
- {{ get (urlParse (index .Values "argo-cd" "configs" "cm" "url")) "host" }}
|
||||
http:
|
||||
- name: grpc
|
||||
match:
|
||||
|
@ -19,13 +19,13 @@ spec:
|
|||
prefix: argocd-client
|
||||
route:
|
||||
- destination:
|
||||
host: argocd-server
|
||||
host: argo-argocd-server
|
||||
port:
|
||||
number: 443
|
||||
- name: http
|
||||
route:
|
||||
- destination:
|
||||
host: argocd-server
|
||||
host: argo-argocd-server
|
||||
port:
|
||||
number: 80
|
||||
{{- end }}
|
|
@ -1,6 +1,10 @@
|
|||
#!/bin/bash
|
||||
|
||||
helm dep update
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
update_helm
|
||||
|
||||
# Create ZDT dashboard configmap
|
||||
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
|
||||
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/argo-cd/grafana-dashboards.yaml
|
||||
|
||||
update_docs
|
|
@ -1,16 +1,43 @@
|
|||
# Support for Istio Ingress for ArgoCD
|
||||
istio:
|
||||
# istio.enabled -- Deploy Istio VirtualService to expose ArgoCD
|
||||
argo-events:
|
||||
enabled: false
|
||||
# istio.gateway -- Name of the Istio gateway to add the VirtualService to
|
||||
gateway: istio-ingress/ingressgateway
|
||||
ipBlocks: []
|
||||
configs:
|
||||
jetstream:
|
||||
# Default JetStream settings, could be overridden by EventBus JetStream spec
|
||||
# Ref: https://docs.nats.io/running-a-nats-service/configuration#jetstream
|
||||
settings:
|
||||
# -- Maximum size of the memory storage (e.g. 1G)
|
||||
maxMemoryStore: -1
|
||||
# -- Maximum size of the file storage (e.g. 20G)
|
||||
maxFileStore: -1
|
||||
streamConfig:
|
||||
# -- Maximum number of messages before expiring oldest message
|
||||
maxMsgs: 1000000
|
||||
# -- Maximum age of existing messages, i.e. “72h”, “4h35m”
|
||||
maxAge: 72h
|
||||
# Total size of messages before expiring oldest message, 0 means unlimited.
|
||||
maxBytes: 1GB
|
||||
# -- Number of replicas, defaults to 3 and requires minimal 3
|
||||
replicas: 1
|
||||
# -- Not documented at the moment
|
||||
duplicates: 300s
|
||||
# Supported versions of JetStream eventbus
|
||||
# see: https://github.com/nats-io/k8s/blob/main/helm/charts/nats/values.yaml
|
||||
# do NOT use -alpine tag as the entrypoint differs
|
||||
versions:
|
||||
- version: 2.10.11
|
||||
natsImage: nats:2.10.11-scratch
|
||||
metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0
|
||||
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
|
||||
startCommand: /nats-server
|
||||
|
||||
|
||||
argocd-apps:
|
||||
projects: []
|
||||
applications: []
|
||||
enabled: false
|
||||
projects: {}
|
||||
applications: {}
|
||||
|
||||
argo-cd:
|
||||
enabled: false
|
||||
#configs:
|
||||
# secret:
|
||||
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
|
||||
|
@ -26,18 +53,19 @@ argo-cd:
|
|||
configs:
|
||||
styles: |
|
||||
.sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }
|
||||
.sidebar__logo__text-logo { height: 0em; }
|
||||
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
|
||||
|
||||
cm:
|
||||
ui.bannercontent: "KubeZero v1.25 - Release notes"
|
||||
ui.bannerurl: "https://kubezero.com/releases/v1.25"
|
||||
ui.bannercontent: "KubeZero v1.28 - Release notes"
|
||||
ui.bannerurl: "https://kubezero.com/releases/v1.28"
|
||||
ui.bannerpermanent: "true"
|
||||
ui.bannerposition: "bottom"
|
||||
|
||||
# argo-cd.server.config.url -- ArgoCD hostname to be exposed via Istio
|
||||
url: argocd.example.com
|
||||
# argo-cd.server.config.url -- ArgoCD URL being exposed via Istio
|
||||
url: https://argocd.example.com
|
||||
|
||||
timeout.reconciliation: 300
|
||||
timeout.reconciliation: 300s
|
||||
|
||||
resource.customizations: |
|
||||
cert-manager.io/Certificate:
|
||||
|
@ -67,17 +95,8 @@ argo-cd:
|
|||
secret:
|
||||
createSecret: false
|
||||
|
||||
knownHosts:
|
||||
data:
|
||||
ssh_known_hosts: |
|
||||
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw==
|
||||
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
|
||||
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
||||
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
|
||||
gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
|
||||
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
|
||||
git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==
|
||||
ssh:
|
||||
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ=="
|
||||
|
||||
params:
|
||||
controller.status.processors: "10"
|
||||
|
@ -93,12 +112,12 @@ argo-cd:
|
|||
enabled: true
|
||||
|
||||
resources:
|
||||
# limits:
|
||||
limits:
|
||||
# cpu: 500m
|
||||
# memory: 2048Mi
|
||||
memory: 2048Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
memory: 512Mi
|
||||
|
||||
repoServer:
|
||||
metrics:
|
||||
|
@ -128,6 +147,14 @@ argo-cd:
|
|||
notifications:
|
||||
enabled: false
|
||||
|
||||
# Support for Istio Ingress for ArgoCD
|
||||
istio:
|
||||
# istio.enabled -- Deploy Istio VirtualService to expose ArgoCD
|
||||
enabled: false
|
||||
# istio.gateway -- Name of the Istio gateway to add the VirtualService to
|
||||
gateway: istio-ingress/ingressgateway
|
||||
ipBlocks: []
|
||||
|
||||
argocd-image-updater:
|
||||
enabled: false
|
||||
|
|
@ -2,8 +2,8 @@ apiVersion: v2
|
|||
name: kubezero-auth
|
||||
description: KubeZero umbrella chart for all things Authentication and Identity management
|
||||
type: application
|
||||
version: 0.3.5
|
||||
appVersion: 21.1.1
|
||||
version: 0.4.6
|
||||
appVersion: 22.0.5
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -16,8 +16,8 @@ dependencies:
|
|||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: postgresql
|
||||
version: 11.8.1
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
kubeVersion: ">= 1.25.0"
|
||||
- name: keycloak
|
||||
version: 18.7.1
|
||||
repository: "oci://registry-1.docker.io/bitnamicharts"
|
||||
condition: keycloak.enabled
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# kubezero-auth
|
||||
|
||||
![Version: 0.3.5](https://img.shields.io/badge/Version-0.3.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 21.1.1](https://img.shields.io/badge/AppVersion-21.1.1-informational?style=flat-square)
|
||||
![Version: 0.4.5](https://img.shields.io/badge/Version-0.4.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 22.0.5](https://img.shields.io/badge/AppVersion-22.0.5-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for all things Authentication and Identity management
|
||||
|
||||
|
@ -14,40 +14,50 @@ KubeZero umbrella chart for all things Authentication and Identity management
|
|||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.25.0`
|
||||
Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://charts.bitnami.com/bitnami | postgresql | 11.8.1 |
|
||||
| oci://registry-1.docker.io/bitnamicharts | keycloak | 18.3.2 |
|
||||
|
||||
# Keycloak
|
||||
|
||||
## Operator
|
||||
|
||||
https://www.keycloak.org/operator/installation
|
||||
https://github.com/keycloak/keycloak/tree/main/operator
|
||||
https://github.com/aerogear/keycloak-metrics-spi
|
||||
https://github.com/keycloak/keycloak-benchmark/tree/main/provision/minikube/keycloak/templates
|
||||
|
||||
## Resources
|
||||
|
||||
- Codecentric Helm chart: `https://github.com/codecentric/helm-charts/tree/master/charts/keycloak`
|
||||
- custom image: `https://www.keycloak.org/server/containers`
|
||||
- https://github.com/bitnami/charts/tree/main/bitnami/keycloak
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| keycloak.auth.adminUser | string | `"admin"` | |
|
||||
| keycloak.auth.existingSecret | string | `"kubezero-auth"` | |
|
||||
| keycloak.auth.passwordSecretKey | string | `"admin-password"` | |
|
||||
| keycloak.enabled | bool | `false` | |
|
||||
| keycloak.istio.enabled | bool | `false` | |
|
||||
| keycloak.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| keycloak.istio.url | string | `""` | |
|
||||
| keycloak.istio.admin.enabled | bool | `false` | |
|
||||
| keycloak.istio.admin.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| keycloak.istio.admin.url | string | `""` | |
|
||||
| keycloak.istio.auth.enabled | bool | `false` | |
|
||||
| keycloak.istio.auth.gateway | string | `"istio-ingress/ingressgateway"` | |
|
||||
| keycloak.istio.auth.url | string | `""` | |
|
||||
| keycloak.metrics.enabled | bool | `false` | |
|
||||
| keycloak.podDisruptionBudget.minAvailable | int | `1` | |
|
||||
| keycloak.replicas | int | `1` | |
|
||||
| postgresql.auth.database | string | `"keycloak"` | |
|
||||
| postgresql.auth.existingSecret | string | `"kubezero-auth-postgresql"` | |
|
||||
| postgresql.auth.username | string | `"keycloak"` | |
|
||||
| postgresql.enabled | bool | `false` | |
|
||||
| postgresql.primary.persistence.size | string | `"1Gi"` | |
|
||||
| postgresql.readReplicas.replicaCount | int | `0` | |
|
||||
| keycloak.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| keycloak.pdb.create | bool | `false` | |
|
||||
| keycloak.pdb.minAvailable | int | `1` | |
|
||||
| keycloak.postgresql.auth.database | string | `"keycloak"` | |
|
||||
| keycloak.postgresql.auth.existingSecret | string | `"kubezero-auth"` | |
|
||||
| keycloak.postgresql.auth.username | string | `"keycloak"` | |
|
||||
| keycloak.postgresql.primary.persistence.size | string | `"1Gi"` | |
|
||||
| keycloak.postgresql.readReplicas.replicaCount | int | `0` | |
|
||||
| keycloak.production | bool | `true` | |
|
||||
| keycloak.proxy | string | `"edge"` | |
|
||||
| keycloak.replicaCount | int | `1` | |
|
||||
| keycloak.resources.requests.cpu | string | `"100m"` | |
|
||||
| keycloak.resources.requests.memory | string | `"512Mi"` | |
|
||||
|
|
|
@ -17,13 +17,12 @@
|
|||
|
||||
## Operator
|
||||
|
||||
https://www.keycloak.org/operator/installation
|
||||
https://github.com/keycloak/keycloak/tree/main/operator
|
||||
https://github.com/aerogear/keycloak-metrics-spi
|
||||
https://github.com/keycloak/keycloak-benchmark/tree/main/provision/minikube/keycloak/templates
|
||||
|
||||
## Resources
|
||||
|
||||
- Codecentric Helm chart: `https://github.com/codecentric/helm-charts/tree/master/charts/keycloak`
|
||||
- custom image: `https://www.keycloak.org/server/containers`
|
||||
- https://github.com/bitnami/charts/tree/main/bitnami/keycloak
|
||||
|
||||
{{ template "chart.valuesSection" . }}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -4,5 +4,6 @@ gzip: true
|
|||
# folder:
|
||||
dashboards:
|
||||
- name: keycloak
|
||||
url: https://grafana.com/api/dashboards/10441/revisions/2/download
|
||||
# url: https://grafana.com/api/dashboards/10441/revisions/2/download
|
||||
url: https://grafana.com/api/dashboards/17878/revisions/1/download
|
||||
tags: ['Keycloak', 'Auth']
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
# Abstract
|
||||
|
||||
## IdP
|
||||
|
||||
### AWS
|
||||
|
||||
Get client descriptor for your realm and client via:
|
||||
`wget https://<auth-endpoint>/realms/<realm>/protocol/saml/descriptor`
|
||||
|
||||
# Resources
|
||||
|
||||
## AWS
|
||||
- https://aws.amazon.com/blogs/business-intelligence/federate-amazon-quicksight-access-with-open-source-identity-provider-keycloak/
|
||||
- https://docs.aws.amazon.com/singlesignon/latest/userguide/troubleshooting.html#issue8
|
|
@ -0,0 +1,49 @@
|
|||
# Upgrade Postgres major version
|
||||
|
||||
## backup
|
||||
|
||||
- shell into running posgres-auth pod
|
||||
```
|
||||
export PGPASSWORD="<postgres_password from secret>"
|
||||
cd /bitnami/posgres
|
||||
pg_dumpall > backup
|
||||
```
|
||||
|
||||
- store backup off-site
|
||||
```
|
||||
kubectl cp keycloak/kubezero-auth-postgresql-0:/bitnami/postgresql/backup postgres-backup
|
||||
```
|
||||
|
||||
## upgrade
|
||||
|
||||
- upgrade auth chart
|
||||
- set replica of the keycloak statefulSet to 0
|
||||
- set replica of the postgres-auth statefulSet to 0
|
||||
- delete postgres-auth PVC and POD to flush old DB
|
||||
|
||||
## restore
|
||||
|
||||
- restore replica of postgres-auth statefulSet
|
||||
- copy backup to new PVC
|
||||
```
|
||||
kubectl cp postgres-backup keycloak/kubezero-auth-postgresql-0:/bitnami/postgresql/backup
|
||||
```
|
||||
|
||||
- log into psql as admin ( shell on running pod )
|
||||
```
|
||||
psql -U postgres
|
||||
```
|
||||
|
||||
- drop database `keycloak` in case the keycloak instances connected early
|
||||
```
|
||||
DROP database keycloak
|
||||
```
|
||||
|
||||
- actual restore
|
||||
```
|
||||
psql -U postgres -d postgres -f backup
|
||||
```
|
||||
|
||||
- reset replia of keycloak statefulSet or force ArgoCD sync
|
||||
|
||||
success.
|
|
@ -1,12 +0,0 @@
|
|||
--- templates/keycloak/operator.yaml.orig 2022-05-11 12:46:15.860204871 +0200
|
||||
+++ templates/keycloak/operator.yaml 2022-05-11 12:46:02.840068240 +0200
|
||||
@@ -1,3 +1,4 @@
|
||||
+{{- if .Values.keycloak.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
@@ -233,3 +234,4 @@
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 10
|
||||
serviceAccountName: keycloak-operator
|
||||
+{{- end }}
|
File diff suppressed because one or more lines are too long
|
@ -1,8 +1,8 @@
|
|||
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.enabled }}
|
||||
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.admin.enabled }}
|
||||
apiVersion: security.istio.io/v1beta1
|
||||
kind: AuthorizationPolicy
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-deny-metrics-ipblocks
|
||||
name: {{ .Release.Name }}-keycloak-admin-deny-not-in-ipblocks
|
||||
namespace: istio-system
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" $ | nindent 4 }}
|
||||
|
@ -12,22 +12,23 @@ spec:
|
|||
app: istio-ingressgateway
|
||||
action: DENY
|
||||
rules:
|
||||
# block access to metrics via Ingress
|
||||
- to:
|
||||
- operation:
|
||||
hosts: ["{{ .Values.keycloak.istio.url }}"]
|
||||
paths: ["/auth/realms/master/metrics"]
|
||||
hosts: ["{{ .Values.keycloak.istio.admin.url }}"]
|
||||
paths: ["/metrics", "/realms/*/metrics"]
|
||||
when:
|
||||
- key: connection.sni
|
||||
values:
|
||||
- '*'
|
||||
{{- if .Values.keycloak.istio.ipBlocks }}
|
||||
{{- if .Values.keycloak.istio.admin.ipBlocks }}
|
||||
- from:
|
||||
- source:
|
||||
notIpBlocks:
|
||||
{{- toYaml .Values.keycloak.istio.ipBlocks | nindent 8 }}
|
||||
{{- toYaml .Values.keycloak.istio.admin.ipBlocks | nindent 8 }}
|
||||
to:
|
||||
- operation:
|
||||
hosts: ["{{ .Values.keycloak.istio.url }}"]
|
||||
hosts: ["{{ .Values.keycloak.istio.admin.url }}"]
|
||||
when:
|
||||
- key: connection.sni
|
||||
values:
|
||||
|
|
|
@ -1,18 +1,44 @@
|
|||
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.enabled .Values.keycloak.istio.url }}
|
||||
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.admin.enabled .Values.keycloak.istio.admin.url }}
|
||||
# Admin endpoint / all URLs allowed
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: {{ template "kubezero-lib.fullname" $ }}
|
||||
name: {{ template "kubezero-lib.fullname" $ }}-admin
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" $ | nindent 4 }}
|
||||
spec:
|
||||
gateways:
|
||||
- {{ .Values.keycloak.istio.gateway }}
|
||||
- {{ .Values.keycloak.istio.admin.gateway }}
|
||||
hosts:
|
||||
- {{ .Values.keycloak.istio.url }}
|
||||
- {{ .Values.keycloak.istio.admin.url }}
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: {{ template "kubezero-lib.fullname" $ }}-service
|
||||
host: {{ template "kubezero-lib.fullname" $ }}-keycloak
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
|
||||
{{- if and .Values.keycloak.enabled .Values.keycloak.istio.auth.enabled .Values.keycloak.istio.auth.url }}
|
||||
# auth endpoint - only expose minimal URls
|
||||
apiVersion: networking.istio.io/v1beta1
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: {{ template "kubezero-lib.fullname" $ }}-auth
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" $ | nindent 4 }}
|
||||
spec:
|
||||
gateways:
|
||||
- {{ .Values.keycloak.istio.auth.gateway }}
|
||||
hosts:
|
||||
- {{ .Values.keycloak.istio.auth.url }}
|
||||
http:
|
||||
- match:
|
||||
- uri:
|
||||
regex: ^/(js/|realms/|resources/|robots.txt).*
|
||||
route:
|
||||
- destination:
|
||||
host: {{ template "kubezero-lib.fullname" $ }}-keycloak
|
||||
{{- end }}
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
{{- if .Values.keycloak.enabled }}
|
||||
apiVersion: k8s.keycloak.org/v2alpha1
|
||||
kind: Keycloak
|
||||
metadata:
|
||||
name: {{ template "kubezero-lib.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
spec:
|
||||
instances: {{ .Values.keycloak.replicas }}
|
||||
|
||||
additionalOptions:
|
||||
# Needs int casting thx to https://github.com/kubernetes-sigs/yaml/issues/45
|
||||
{{- if lt (int .Values.keycloak.replicas) 2 }}
|
||||
- name: cache
|
||||
value: local
|
||||
{{- end }}
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
- name: db
|
||||
value: postgres
|
||||
- name: db-url-host
|
||||
value: {{ template "kubezero-lib.fullname" . }}-postgresql
|
||||
- name: db-username
|
||||
value: keycloak
|
||||
- name: db-password
|
||||
secret:
|
||||
name: {{ template "kubezero-lib.fullname" . }}-postgresql
|
||||
key: password
|
||||
{{- else }}
|
||||
# Fallback to local file within the pod - dev ONLY !!
|
||||
- name: db
|
||||
value: dev-file
|
||||
{{- end }}
|
||||
- name: hostname-strict-https
|
||||
value: "false"
|
||||
- name: proxy
|
||||
value: edge
|
||||
- name: http-enabled
|
||||
value: "true"
|
||||
- name: log-console-output
|
||||
value: json
|
||||
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
|
||||
http:
|
||||
httpEnabled: true
|
||||
|
||||
# We use Istio Ingress to terminate TLS
|
||||
# mTls down the road
|
||||
hostname:
|
||||
hostname: {{ default "keycloak" .Values.keycloak.istio.url }}
|
||||
strict: false
|
||||
strictBackchannel: false
|
||||
{{- end }}
|
|
@ -1,237 +0,0 @@
|
|||
{{- if .Values.keycloak.enabled }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
app.quarkus.io/build-timestamp: 2023-04-26 - 10:32:03 +0000
|
||||
labels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 21.1.1
|
||||
name: keycloak-operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
app.quarkus.io/build-timestamp: 2023-04-26 - 10:32:03 +0000
|
||||
labels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 21.1.1
|
||||
name: keycloak-operator
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 21.1.1
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: keycloak-operator-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
name: keycloak-operator-role-binding
|
||||
roleRef:
|
||||
kind: Role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: keycloak-operator-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: keycloak-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: keycloak-operator-view
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: view
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: keycloak-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: keycloakcontroller-role-binding
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: keycloakcontroller-cluster-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: keycloak-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: keycloakrealmimportcontroller-role-binding
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: keycloakrealmimportcontroller-cluster-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: keycloak-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: keycloakcontroller-cluster-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- k8s.keycloak.org
|
||||
resources:
|
||||
- keycloaks
|
||||
- keycloaks/status
|
||||
- keycloaks/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: keycloakrealmimportcontroller-cluster-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- k8s.keycloak.org
|
||||
resources:
|
||||
- keycloakrealmimports
|
||||
- keycloakrealmimports/status
|
||||
- keycloakrealmimports/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
app.quarkus.io/build-timestamp: 2023-04-26 - 10:32:03 +0000
|
||||
labels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 21.1.1
|
||||
name: keycloak-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 21.1.1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
app.quarkus.io/build-timestamp: 2023-04-26 - 10:32:03 +0000
|
||||
labels:
|
||||
app.kubernetes.io/name: keycloak-operator
|
||||
app.kubernetes.io/version: 21.1.1
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: KUBERNETES_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: OPERATOR_KEYCLOAK_IMAGE
|
||||
value: quay.io/keycloak/keycloak:21.1.1
|
||||
image: quay.io/keycloak/keycloak-operator:21.1.1
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /q/health/live
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 10
|
||||
name: keycloak-operator
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /q/health/ready
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 10
|
||||
serviceAccountName: keycloak-operator
|
||||
{{- end }}
|
|
@ -1,15 +0,0 @@
|
|||
{{- if and .Values.keycloak.podDisruptionBudget (gt (int .Values.keycloak.replicas) 1) }}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "kubezero-lib.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: keycloak
|
||||
app.kubernetes.io/managed-by: keycloak-operator
|
||||
{{- toYaml .Values.keycloak.podDisruptionBudget | nindent 2 }}
|
||||
{{- end }}
|
|
@ -1,19 +1,12 @@
|
|||
#!/bin/bash
|
||||
|
||||
# https://www.keycloak.org/operator/installation
|
||||
|
||||
set -ex
|
||||
|
||||
helm dep update
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
# Operator
|
||||
VERSION=$(yq eval '.appVersion' Chart.yaml)
|
||||
|
||||
wget -O crds/keycloak.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/"${VERSION}"/kubernetes/keycloaks.k8s.keycloak.org-v1.yml
|
||||
wget -O crds/keycloak-realmimports.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/"${VERSION}"/kubernetes/keycloakrealmimports.k8s.keycloak.org-v1.yml
|
||||
|
||||
wget -O templates/keycloak/operator.yaml https://raw.githubusercontent.com/keycloak/keycloak-k8s-resources/"${VERSION}"/kubernetes/kubernetes.yml
|
||||
patch -i keycloak.patch -p0 --no-backup-if-mismatch
|
||||
login_ecr_public
|
||||
update_helm
|
||||
|
||||
# Fetch dashboards
|
||||
../kubezero-metrics/sync_grafana_dashboards.py dashboards-keycloak.yaml templates/keycloak/grafana-dashboards.yaml
|
||||
|
||||
update_docs
|
||||
|
|
|
@ -1,29 +1,48 @@
|
|||
keycloak:
|
||||
enabled: false
|
||||
|
||||
replicas: 1
|
||||
podDisruptionBudget:
|
||||
minAvailable: 1
|
||||
proxy: edge
|
||||
production: true
|
||||
|
||||
istio:
|
||||
enabled: false
|
||||
gateway: istio-ingress/private-ingressgateway
|
||||
url: ""
|
||||
auth:
|
||||
adminUser: admin
|
||||
existingSecret: kubezero-auth
|
||||
passwordSecretKey: admin-password
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
pdb:
|
||||
create: false
|
||||
minAvailable: 1
|
||||
|
||||
metrics:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 512Mi
|
||||
|
||||
postgresql:
|
||||
enabled: false
|
||||
postgresql:
|
||||
auth:
|
||||
existingSecret: kubezero-auth
|
||||
username: keycloak
|
||||
database: keycloak
|
||||
|
||||
auth:
|
||||
existingSecret: kubezero-auth-postgresql
|
||||
username: keycloak
|
||||
database: keycloak
|
||||
primary:
|
||||
persistence:
|
||||
size: 1Gi
|
||||
|
||||
primary:
|
||||
persistence:
|
||||
size: 1Gi
|
||||
readReplicas:
|
||||
replicaCount: 0
|
||||
|
||||
readReplicas:
|
||||
replicaCount: 0
|
||||
istio:
|
||||
admin:
|
||||
enabled: false
|
||||
gateway: istio-ingress/private-ingressgateway
|
||||
url: ""
|
||||
auth:
|
||||
enabled: false
|
||||
gateway: istio-ingress/ingressgateway
|
||||
url: ""
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v2
|
|||
name: kubezero-cert-manager
|
||||
description: KubeZero Umbrella Chart for cert-manager
|
||||
type: application
|
||||
version: 0.9.4
|
||||
version: 0.9.7
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -16,6 +16,6 @@ dependencies:
|
|||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: cert-manager
|
||||
version: 1.11.1
|
||||
version: v1.14.4
|
||||
repository: https://charts.jetstack.io
|
||||
kubeVersion: ">= 1.25.0"
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# kubezero-cert-manager
|
||||
|
||||
![Version: 0.9.4](https://img.shields.io/badge/Version-0.9.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.9.7](https://img.shields.io/badge/Version-0.9.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero Umbrella Chart for cert-manager
|
||||
|
||||
|
@ -14,12 +14,12 @@ KubeZero Umbrella Chart for cert-manager
|
|||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.25.0`
|
||||
Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://charts.jetstack.io | cert-manager | 1.11.1 |
|
||||
| https://charts.jetstack.io | cert-manager | v1.14.4 |
|
||||
|
||||
## AWS - OIDC IAM roles
|
||||
|
||||
|
@ -32,13 +32,16 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
|
|||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| cert-manager.cainjector.extraArgs[0] | string | `"--logging-format=json"` | |
|
||||
| cert-manager.cainjector.extraArgs[1] | string | `"--leader-elect=false"` | |
|
||||
| cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cert-manager.cainjector.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.cainjector.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.enableCertificateOwnerRef | bool | `true` | |
|
||||
| cert-manager.enabled | bool | `true` | |
|
||||
| cert-manager.extraArgs[0] | string | `"--dns01-recursive-nameservers-only"` | |
|
||||
| cert-manager.extraArgs[0] | string | `"--logging-format=json"` | |
|
||||
| cert-manager.extraArgs[1] | string | `"--leader-elect=false"` | |
|
||||
| cert-manager.extraArgs[2] | string | `"--dns01-recursive-nameservers-only"` | |
|
||||
| cert-manager.global.leaderElection.namespace | string | `"cert-manager"` | |
|
||||
| cert-manager.ingressShim.defaultIssuerKind | string | `"ClusterIssuer"` | |
|
||||
| cert-manager.ingressShim.defaultIssuerName | string | `"letsencrypt-dns-prod"` | |
|
||||
|
@ -46,14 +49,11 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
|
|||
| cert-manager.prometheus.servicemonitor.enabled | bool | `false` | |
|
||||
| cert-manager.startupapicheck.enabled | bool | `false` | |
|
||||
| cert-manager.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cert-manager.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.webhook.extraArgs[0] | string | `"--logging-format=json"` | |
|
||||
| cert-manager.webhook.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cert-manager.webhook.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
|
||||
| cert-manager.webhook.tolerations[1].effect | string | `"NoSchedule"` | |
|
||||
| cert-manager.webhook.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| clusterIssuer | object | `{}` | |
|
||||
| localCA.enabled | bool | `false` | |
|
||||
| localCA.selfsigning | bool | `true` | |
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
rules:
|
||||
- name: prometheus-rules
|
||||
condition: 'index .Values "cert-manager" "prometheus" "servicemonitor" "enabled"'
|
||||
url: file://rules/cert-manager-mixin-prometheusRule
|
||||
|
|
|
@ -1,6 +1,15 @@
|
|||
{
|
||||
"version": 1,
|
||||
"dependencies": [
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/imusmanmalik/cert-manager-mixin.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "main"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
|
@ -8,16 +17,7 @@
|
|||
"subdir": "jsonnet/kube-prometheus"
|
||||
}
|
||||
},
|
||||
"version": "release-0.10"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://gitlab.com/uneeq-oss/cert-manager-mixin.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
"version": "main"
|
||||
}
|
||||
],
|
||||
"legacyImports": true
|
||||
|
|
|
@ -8,8 +8,8 @@
|
|||
"subdir": "grafana"
|
||||
}
|
||||
},
|
||||
"version": "199e363523104ff8b3a12483a4e3eca86372b078",
|
||||
"sum": "/jDHzVAjHB4AOLkJHw1GyATX5ogZ1iMdcJXZAgaG3+g="
|
||||
"version": "5698c8940b6dadca3f42107b7839557bc041761f",
|
||||
"sum": "l6fPvh3tW6fWot308w71QY/amrYsFPeitvz1IgJxqQA="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -18,8 +18,18 @@
|
|||
"subdir": "contrib/mixin"
|
||||
}
|
||||
},
|
||||
"version": "9d2cda4e44a26f064d8578e258bbba2fc3cd5b73",
|
||||
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
|
||||
"version": "5a53a708d8ab9ef936ac5b8062ffc66c77a2c18f",
|
||||
"sum": "xuUBd2vqF7asyVDe5CE08uPT/RxAdy8O75EjFJoMXXU="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafana.git",
|
||||
"subdir": "grafana-mixin"
|
||||
}
|
||||
},
|
||||
"version": "1120f9e255760a3c104b57871fcb91801e934382",
|
||||
"sum": "MkjR7zCgq6MUZgjDzop574tFKoTX2OBr7DTwm1K+Ofs="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -28,9 +38,49 @@
|
|||
"subdir": "grafonnet"
|
||||
}
|
||||
},
|
||||
"version": "f0b70307b8e5f12236b277883d998af129a8211f",
|
||||
"version": "a1d61cce1da59c71409b99b5c7568511fec661ea",
|
||||
"sum": "342u++/7rViR/zj2jeJOjshzglkZ1SY+hFNuyCBFMdc="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet-lib.git",
|
||||
"subdir": "grafonnet-7.0"
|
||||
}
|
||||
},
|
||||
"version": "a1d61cce1da59c71409b99b5c7568511fec661ea",
|
||||
"sum": "gCtR9s/4D5fxU9aKXg0Bru+/njZhA0YjLjPiASc61FM="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet.git",
|
||||
"subdir": "gen/grafonnet-latest"
|
||||
}
|
||||
},
|
||||
"version": "6ac1593ca787638da223380ff4a3fd0f96e953e1",
|
||||
"sum": "GxEO83uxgsDclLp/fmlUJZDbSGpeUZY6Ap3G2cgdL1g="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet.git",
|
||||
"subdir": "gen/grafonnet-v10.0.0"
|
||||
}
|
||||
},
|
||||
"version": "6ac1593ca787638da223380ff4a3fd0f96e953e1",
|
||||
"sum": "W7sLuAvMSJPkC7Oo31t45Nz/cUdJV7jzNSJTd3F1daM="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet.git",
|
||||
"subdir": "gen/grafonnet-v10.4.0"
|
||||
}
|
||||
},
|
||||
"version": "6ac1593ca787638da223380ff4a3fd0f96e953e1",
|
||||
"sum": "ZSmDT7i/qU9P8ggmuPuJT+jonq1ZEsBRCXycW/H5L/A="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
|
@ -38,8 +88,38 @@
|
|||
"subdir": "grafana-builder"
|
||||
}
|
||||
},
|
||||
"version": "e0b90a4435817ad642d8d049e7dd975264cb960e",
|
||||
"sum": "tDR6yT2GVfw0wTU12iZH+m01HrbIr6g/xN+/8nzNkU0="
|
||||
"version": "7561fd330312538d22b00e0c7caecb4ba66321ea",
|
||||
"sum": "+z5VY+bPBNqXcmNAV8xbJcbsRA+pro1R3IM7aIY8OlU="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/imusmanmalik/cert-manager-mixin.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "72a094ff162bbd93921803994241d73900592c9a",
|
||||
"sum": "h+YvBTXL5A02165i3yt3SxSAbFftChtXYJ0nYFnOAqo="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/jsonnet-libs/docsonnet.git",
|
||||
"subdir": "doc-util"
|
||||
}
|
||||
},
|
||||
"version": "6ac6c69685b8c29c54515448eaca583da2d88150",
|
||||
"sum": "BrAL/k23jq+xy9oA7TWIhUx07dsA/QLm3g7ktCwe//U="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/jsonnet-libs/xtd.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "fc2e57a8839902ed4ba6cab5a99d642500f7102b",
|
||||
"sum": "43waffw1QzvpY4rKcWoo3L7Vpee+DCYexwLDd5cPG0M="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -48,18 +128,8 @@
|
|||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "ab104c5c406b91078d676475c14ab18644f84f2d",
|
||||
"sum": "tRpIInEClWUNe5IS6uIjucFN/KqDFgg19+yo78VrLfU="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin.git",
|
||||
"subdir": "lib/promgrafonnet"
|
||||
}
|
||||
},
|
||||
"version": "eed459199703c969afc318ea55b9361ae48180a7",
|
||||
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
|
||||
"version": "a1c276d7a46c4b06fa5d8b4a64441939d398efe5",
|
||||
"sum": "b/mEai1MvVnZ22YvZlXEO4jWDZledrtJg8eOS1ZUj0M="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -68,8 +138,8 @@
|
|||
"subdir": "jsonnet/kube-state-metrics"
|
||||
}
|
||||
},
|
||||
"version": "e080c3ce73ad514254e38dccb37c93bec6b257ae",
|
||||
"sum": "U1wzIpTAtOvC1yj43Y8PfvT0JfvnAcMfNH12Wi+ab0Y="
|
||||
"version": "9ba1c3702142918e09e8eb5ca530e15198624259",
|
||||
"sum": "msMZyUvcebzRILLzNlTIiSOwa1XgQKtP7jbZTkiqwM0="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -78,8 +148,8 @@
|
|||
"subdir": "jsonnet/kube-state-metrics-mixin"
|
||||
}
|
||||
},
|
||||
"version": "e080c3ce73ad514254e38dccb37c93bec6b257ae",
|
||||
"sum": "u8gaydJoxEjzizQ8jY8xSjYgWooPmxw+wIWdDxifMAk="
|
||||
"version": "9ba1c3702142918e09e8eb5ca530e15198624259",
|
||||
"sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -88,8 +158,8 @@
|
|||
"subdir": "jsonnet/kube-prometheus"
|
||||
}
|
||||
},
|
||||
"version": "e7eff18e7e70d7f1168105521451c4d7bd6a6d96",
|
||||
"sum": "gcgf9y8wos4W8jgcJKuTDfORYDigCxx+q3QOYEijQFo="
|
||||
"version": "76f2e1ef95be0df752037baa040781c5219e1fb3",
|
||||
"sum": "IgpAgyyBZ7VT2vr9kSYQP/lkZUNQnbqpGh2sYCtUKs0="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -98,8 +168,8 @@
|
|||
"subdir": "jsonnet/mixin"
|
||||
}
|
||||
},
|
||||
"version": "d8ba1c766a141cb35072ae2f2578ec8588c9efcd",
|
||||
"sum": "qZ4WgiweaE6eeKtFK60QUjLO8sf2L9Q8fgafWvDcyfY=",
|
||||
"version": "71d9433ba612f4b826ffa38520b23a7985b50db3",
|
||||
"sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=",
|
||||
"name": "prometheus-operator-mixin"
|
||||
},
|
||||
{
|
||||
|
@ -109,8 +179,8 @@
|
|||
"subdir": "jsonnet/prometheus-operator"
|
||||
}
|
||||
},
|
||||
"version": "d8ba1c766a141cb35072ae2f2578ec8588c9efcd",
|
||||
"sum": "yjdwZ+5UXL42EavJleAJmd8Ou6MSDfExvlKAxFCxXVE="
|
||||
"version": "71d9433ba612f4b826ffa38520b23a7985b50db3",
|
||||
"sum": "S4LFa0h1AzANixqGMowtwVswVP+y6f+fXloxpO7hMes="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -119,8 +189,8 @@
|
|||
"subdir": "doc/alertmanager-mixin"
|
||||
}
|
||||
},
|
||||
"version": "16fa045db47d68a09a102c7b80b8899c1f57c153",
|
||||
"sum": "pep+dHzfIjh2SU5pEkwilMCAT/NoL6YYflV4x8cr7vU=",
|
||||
"version": "14cbe6301c732658d6fe877ec55ad5b738abcf06",
|
||||
"sum": "IpF46ZXsm+0wJJAPtAre8+yxTNZA57mBqGpBP/r7/kw=",
|
||||
"name": "alertmanager"
|
||||
},
|
||||
{
|
||||
|
@ -130,8 +200,8 @@
|
|||
"subdir": "docs/node-mixin"
|
||||
}
|
||||
},
|
||||
"version": "a2321e7b940ddcff26873612bccdf7cd4c42b6b6",
|
||||
"sum": "MlWDAKGZ+JArozRKdKEvewHeWn8j2DNBzesJfLVd0dk="
|
||||
"version": "3accd4cf8286e69d70516abdced6bf186274322a",
|
||||
"sum": "vWhHvFqV7+fxrQddTeGVKi1e4EzB3VWtNyD8TjSmevY="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
|
@ -140,10 +210,21 @@
|
|||
"subdir": "documentation/prometheus-mixin"
|
||||
}
|
||||
},
|
||||
"version": "41f1a8125e664985dd30674e5bdf6b683eff5d32",
|
||||
"sum": "ZjQoYhvgKwJNkg+h+m9lW3SYjnjv5Yx5btEipLhru88=",
|
||||
"version": "773170f372e0a57949854b74231ee3e09185f728",
|
||||
"sum": "u/Fpz2MPkezy71/q+c7mF0vc3hE9fWt2W/YbvF0LP/8=",
|
||||
"name": "prometheus"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/pyrra-dev/pyrra.git",
|
||||
"subdir": "config/crd/bases"
|
||||
}
|
||||
},
|
||||
"version": "551856d42dff02ec38c5b0ea6a2d99c4cb127e82",
|
||||
"sum": "bY/Pcrrbynguq8/HaI88cQ3B2hLv/xc+76QILY7IL+g=",
|
||||
"name": "pyrra"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
|
@ -151,19 +232,9 @@
|
|||
"subdir": "mixin"
|
||||
}
|
||||
},
|
||||
"version": "fb97c9a5ef51849ccb7960abbeb9581ad7f511b9",
|
||||
"sum": "X+060DnePPeN/87fgj0SrfxVitywTk8hZA9V4nHxl1g=",
|
||||
"version": "93c79b61825ec00889188e35a58635eee247bc36",
|
||||
"sum": "HhSSbGGCNHCMy1ee5jElYDm0yS9Vesa7QB2/SHKdjsY=",
|
||||
"name": "thanos-mixin"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://gitlab.com/uneeq-oss/cert-manager-mixin.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "eae22f642aaa5d422e4766f6811df2158fc05539",
|
||||
"sum": "DOg3fzS0OWrjjRPVsKgxID/rk9AC3ESQ4gDELc2RNgM="
|
||||
}
|
||||
],
|
||||
"legacyImports": false
|
||||
|
|
|
@ -2,7 +2,7 @@ local addMixin = (import 'kube-prometheus/lib/mixin.libsonnet');
|
|||
|
||||
local certManagerMixin = addMixin({
|
||||
name: 'cert-manager',
|
||||
mixin: (import 'gitlab.com/uneeq-oss/cert-manager-mixin/mixin.libsonnet')
|
||||
mixin: (import 'github.com/imusmanmalik/cert-manager-mixin/mixin.libsonnet')
|
||||
});
|
||||
|
||||
{ 'cert-manager-mixin-prometheusRule': certManagerMixin.prometheusRules }
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
"alert": "CertManagerAbsent",
|
||||
"annotations": {
|
||||
"description": "New certificates will not be able to be minted, and existing ones can't be renewed until cert-manager is back.",
|
||||
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent",
|
||||
"summary": "Cert Manager has dissapeared from Prometheus service discovery."
|
||||
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerabsent",
|
||||
"summary": "Cert Manager has disappeared from Prometheus service discovery."
|
||||
},
|
||||
"expr": "absent(up{job=\"cert-manager\"})",
|
||||
"for": "10m",
|
||||
|
@ -36,7 +36,7 @@
|
|||
"annotations": {
|
||||
"dashboard_url": "https://grafana.example.com/d/TvuRo2iMk/cert-manager",
|
||||
"description": "The domain that this cert covers will be unavailable after {{ $value | humanizeDuration }}. Clients using endpoints that this cert protects will start to fail in {{ $value | humanizeDuration }}.",
|
||||
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon",
|
||||
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertexpirysoon",
|
||||
"summary": "The cert `{{ $labels.name }}` is {{ $value | humanizeDuration }} from expiry, it should have renewed over a week ago."
|
||||
},
|
||||
"expr": "avg by (exported_namespace, namespace, name) (\n certmanager_certificate_expiration_timestamp_seconds - time()\n) < (21 * 24 * 3600) # 21 days in seconds\n",
|
||||
|
@ -50,7 +50,7 @@
|
|||
"annotations": {
|
||||
"dashboard_url": "https://grafana.example.com/d/TvuRo2iMk/cert-manager",
|
||||
"description": "This certificate has not been ready to serve traffic for at least 10m. If the cert is being renewed or there is another valid cert, the ingress controller _may_ be able to serve that instead.",
|
||||
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready",
|
||||
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertnotready",
|
||||
"summary": "The cert `{{ $labels.name }}` is not ready to serve traffic."
|
||||
},
|
||||
"expr": "max by (name, exported_namespace, namespace, condition) (\n certmanager_certificate_ready_status{condition!=\"True\"} == 1\n)\n",
|
||||
|
@ -64,7 +64,7 @@
|
|||
"annotations": {
|
||||
"dashboard_url": "https://grafana.example.com/d/TvuRo2iMk/cert-manager",
|
||||
"description": "Depending on the rate limit, cert-manager may be unable to generate certificates for up to a week.",
|
||||
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits",
|
||||
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerhittingratelimits",
|
||||
"summary": "Cert manager hitting LetsEncrypt rate limits."
|
||||
},
|
||||
"expr": "sum by (host) (\n rate(certmanager_http_acme_client_request_count{status=\"429\"}[5m])\n) > 0\n",
|
||||
|
|
|
@ -13,8 +13,8 @@ spec:
|
|||
- alert: CertManagerAbsent
|
||||
annotations:
|
||||
description: New certificates will not be able to be minted, and existing ones can't be renewed until cert-manager is back.
|
||||
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent
|
||||
summary: Cert Manager has dissapeared from Prometheus service discovery.
|
||||
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerabsent
|
||||
summary: Cert Manager has disappeared from Prometheus service discovery.
|
||||
expr: absent(up{job="cert-manager"})
|
||||
for: 10m
|
||||
labels:
|
||||
|
@ -25,7 +25,7 @@ spec:
|
|||
annotations:
|
||||
dashboard_url: https://grafana.example.com/d/TvuRo2iMk/cert-manager
|
||||
description: The domain that this cert covers will be unavailable after {{`{{`}} $value | humanizeDuration {{`}}`}}. Clients using endpoints that this cert protects will start to fail in {{`{{`}} $value | humanizeDuration {{`}}`}}.
|
||||
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon
|
||||
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertexpirysoon
|
||||
summary: The cert `{{`{{`}} $labels.name {{`}}`}}` is {{`{{`}} $value | humanizeDuration {{`}}`}} from expiry, it should have renewed over a week ago.
|
||||
expr: "avg by (exported_namespace, namespace, name) (\n certmanager_certificate_expiration_timestamp_seconds - time()\n) < (21 * 24 * 3600) # 21 days in seconds\n"
|
||||
for: 1h
|
||||
|
@ -35,7 +35,7 @@ spec:
|
|||
annotations:
|
||||
dashboard_url: https://grafana.example.com/d/TvuRo2iMk/cert-manager
|
||||
description: This certificate has not been ready to serve traffic for at least 10m. If the cert is being renewed or there is another valid cert, the ingress controller _may_ be able to serve that instead.
|
||||
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready
|
||||
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertnotready
|
||||
summary: The cert `{{`{{`}} $labels.name {{`}}`}}` is not ready to serve traffic.
|
||||
expr: "max by (name, exported_namespace, namespace, condition) (\n certmanager_certificate_ready_status{condition!=\"True\"} == 1\n)\n"
|
||||
for: 10m
|
||||
|
@ -45,7 +45,7 @@ spec:
|
|||
annotations:
|
||||
dashboard_url: https://grafana.example.com/d/TvuRo2iMk/cert-manager
|
||||
description: Depending on the rate limit, cert-manager may be unable to generate certificates for up to a week.
|
||||
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits
|
||||
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerhittingratelimits
|
||||
summary: Cert manager hitting LetsEncrypt rate limits.
|
||||
expr: "sum by (host) (\n rate(certmanager_http_acme_client_request_count{status=\"429\"}[5m])\n) > 0\n"
|
||||
for: 5m
|
||||
|
|
|
@ -1,24 +1,21 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
helm dep update
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
update_helm
|
||||
|
||||
update_jsonnet
|
||||
|
||||
# Install cert-mamanger mixin
|
||||
jb install github.com/imusmanmalik/cert-manager-mixin@main
|
||||
|
||||
# Install rules
|
||||
rm -rf rules && mkdir -p rules
|
||||
jsonnet -J vendor -m rules rules.jsonnet
|
||||
../kubezero-metrics/sync_prometheus_rules.py cert-manager-rules.yaml templates
|
||||
|
||||
# Fetch dashboards from Grafana.com and update ZDT CM
|
||||
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
|
||||
|
||||
# Get kube-mixin for alerts
|
||||
which jsonnet > /dev/null || { echo "Required jsonnet not found!"; exit 1;}
|
||||
which jb > /dev/null || { echo "Required jb ( json-bundler ) not found!"; exit 1;}
|
||||
|
||||
[ -r jsonnetfile.json ] || jb init
|
||||
if [ -r jsonnetfile.lock.json ]; then
|
||||
jb update
|
||||
else
|
||||
jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@release-0.10
|
||||
jb install gitlab.com/uneeq-oss/cert-manager-mixin@master
|
||||
fi
|
||||
|
||||
rm -rf rules && mkdir -p rules
|
||||
jsonnet -J vendor -m rules rules.jsonnet
|
||||
|
||||
../kubezero-metrics/sync_prometheus_rules.py cert-manager-rules.yaml templates
|
||||
update_docs
|
||||
|
|
|
@ -23,6 +23,16 @@ cert-manager:
|
|||
leaderElection:
|
||||
namespace: "cert-manager"
|
||||
|
||||
# remove secrets if the cert is deleted
|
||||
enableCertificateOwnerRef: true
|
||||
|
||||
extraArgs:
|
||||
- "--logging-format=json"
|
||||
- "--leader-elect=false"
|
||||
- "--dns01-recursive-nameservers-only"
|
||||
# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
|
||||
# - --enable-certificate-owner-ref=true
|
||||
|
||||
#enableCertificateOwnerRef: true
|
||||
|
||||
# On AWS enable Projected Service Accounts to assume IAM role
|
||||
|
@ -49,8 +59,6 @@ cert-manager:
|
|||
# readOnly: true
|
||||
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
|
@ -62,26 +70,22 @@ cert-manager:
|
|||
|
||||
webhook:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
extraArgs:
|
||||
- "--logging-format=json"
|
||||
|
||||
cainjector:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
extraArgs:
|
||||
- "--dns01-recursive-nameservers-only"
|
||||
# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
|
||||
# - --enable-certificate-owner-ref=true
|
||||
extraArgs:
|
||||
- "--logging-format=json"
|
||||
- "--leader-elect=false"
|
||||
|
||||
prometheus:
|
||||
servicemonitor:
|
||||
|
|
|
@ -2,14 +2,14 @@ apiVersion: v2
|
|||
name: kubezero-ci
|
||||
description: KubeZero umbrella chart for all things CI
|
||||
type: application
|
||||
version: 0.6.2
|
||||
version: 0.8.11
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
- kubezero
|
||||
- jenkins
|
||||
- goCD
|
||||
- gitea
|
||||
- renovate
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
|
@ -17,20 +17,20 @@ dependencies:
|
|||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: gocd
|
||||
version: 1.40.8
|
||||
repository: https://gocd.github.io/helm-chart
|
||||
condition: gocd.enabled
|
||||
- name: gitea
|
||||
version: 8.2.0
|
||||
version: 10.1.4
|
||||
repository: https://dl.gitea.io/charts/
|
||||
condition: gitea.enabled
|
||||
- name: jenkins
|
||||
version: 4.3.20
|
||||
version: 5.1.18
|
||||
repository: https://charts.jenkins.io
|
||||
condition: jenkins.enabled
|
||||
- name: trivy
|
||||
version: 0.7.0
|
||||
repository: https://aquasecurity.github.io/helm-charts/
|
||||
condition: trivy.enabled
|
||||
kubeVersion: ">= 1.24.0"
|
||||
- name: renovate
|
||||
version: 37.368.2
|
||||
repository: https://docs.renovatebot.com/helm-charts
|
||||
condition: renovate.enabled
|
||||
kubeVersion: ">= 1.25.0"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# kubezero-ci
|
||||
|
||||
![Version: 0.6.2](https://img.shields.io/badge/Version-0.6.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
![Version: 0.8.11](https://img.shields.io/badge/Version-0.8.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
KubeZero umbrella chart for all things CI
|
||||
|
||||
|
@ -14,15 +14,15 @@ KubeZero umbrella chart for all things CI
|
|||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.24.0`
|
||||
Kubernetes: `>= 1.25.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.7.0 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://charts.jenkins.io | jenkins | 4.3.20 |
|
||||
| https://dl.gitea.io/charts/ | gitea | 8.2.0 |
|
||||
| https://gocd.github.io/helm-chart | gocd | 1.40.8 |
|
||||
| https://charts.jenkins.io | jenkins | 5.1.18 |
|
||||
| https://dl.gitea.io/charts/ | gitea | 10.1.4 |
|
||||
| https://docs.renovatebot.com/helm-charts | renovate | 37.368.2 |
|
||||
|
||||
# Jenkins
|
||||
- default build retention 10 builds, 32days
|
||||
|
@ -34,8 +34,10 @@ Kubernetes: `>= 1.24.0`
|
|||
|
||||
# Gitea
|
||||
|
||||
## OpenSSH 8.8 RSA disabled
|
||||
- https://github.com/go-gitea/gitea/issues/17798
|
||||
# Verdaccio
|
||||
|
||||
## Authentication sealed-secret
|
||||
```htpasswd -n -b -B -C 4 <username> <password> | kubeseal --raw --namespace verdaccio --name verdaccio-htpasswd```
|
||||
|
||||
## Resources
|
||||
|
||||
|
@ -46,76 +48,92 @@ Kubernetes: `>= 1.24.0`
|
|||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| gitea.checkDeprecation | bool | `false` | |
|
||||
| gitea.enabled | bool | `false` | |
|
||||
| gitea.extraVolumeMounts[0].mountPath | string | `"/data/gitea/public/assets/css"` | |
|
||||
| gitea.extraVolumeMounts[0].name | string | `"gitea-themes"` | |
|
||||
| gitea.extraVolumeMounts[0].readOnly | bool | `true` | |
|
||||
| gitea.extraVolumes[0].configMap.name | string | `"gitea-kubezero-ci-themes"` | |
|
||||
| gitea.extraVolumes[0].name | string | `"gitea-themes"` | |
|
||||
| gitea.gitea.admin.existingSecret | string | `"gitea-admin-secret"` | |
|
||||
| gitea.gitea.config.cache.ADAPTER | string | `"memory"` | |
|
||||
| gitea.gitea.config.database.DB_TYPE | string | `"sqlite3"` | |
|
||||
| gitea.gitea.config.log.LEVEL | string | `"warn"` | |
|
||||
| gitea.gitea.config.queue.TYPE | string | `"level"` | |
|
||||
| gitea.gitea.config.session.PROVIDER | string | `"memory"` | |
|
||||
| gitea.gitea.config.ui.DEFAULT_THEME | string | `"github-dark"` | |
|
||||
| gitea.gitea.config.ui.THEMES | string | `"gitea,github-dark"` | |
|
||||
| gitea.gitea.demo | bool | `false` | |
|
||||
| gitea.gitea.metrics.enabled | bool | `false` | |
|
||||
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| gitea.image.rootless | bool | `true` | |
|
||||
| gitea.image.tag | string | `"1.21.11"` | |
|
||||
| gitea.istio.enabled | bool | `false` | |
|
||||
| gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| gitea.istio.url | string | `"git.example.com"` | |
|
||||
| gitea.mariadb.enabled | bool | `false` | |
|
||||
| gitea.memcached.enabled | bool | `false` | |
|
||||
| gitea.mysql.enabled | bool | `false` | |
|
||||
| gitea.persistence.enabled | bool | `true` | |
|
||||
| gitea.persistence.claimName | string | `"data-gitea-0"` | |
|
||||
| gitea.persistence.size | string | `"4Gi"` | |
|
||||
| gitea.postgresql-ha.enabled | bool | `false` | |
|
||||
| gitea.postgresql.enabled | bool | `false` | |
|
||||
| gitea.redis-cluster.enabled | bool | `false` | |
|
||||
| gitea.repliaCount | int | `1` | |
|
||||
| gitea.resources.limits.memory | string | `"2048Mi"` | |
|
||||
| gitea.resources.requests.cpu | string | `"150m"` | |
|
||||
| gitea.resources.requests.memory | string | `"320Mi"` | |
|
||||
| gitea.securityContext.allowPrivilegeEscalation | bool | `false` | |
|
||||
| gitea.securityContext.capabilities.add[0] | string | `"SYS_CHROOT"` | |
|
||||
| gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | |
|
||||
| gocd.enabled | bool | `false` | |
|
||||
| gocd.istio.enabled | bool | `false` | |
|
||||
| gocd.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| gocd.istio.url | string | `""` | |
|
||||
| gocd.server.ingress.enabled | bool | `false` | |
|
||||
| gocd.server.service.type | string | `"ClusterIP"` | |
|
||||
| gitea.strategy.type | string | `"Recreate"` | |
|
||||
| gitea.test.enabled | bool | `false` | |
|
||||
| jenkins.agent.annotations."container.apparmor.security.beta.kubernetes.io/jnlp" | string | `"unconfined"` | |
|
||||
| jenkins.agent.containerCap | int | `2` | |
|
||||
| jenkins.agent.customJenkinsLabels[0] | string | `"podman-aws-trivy"` | |
|
||||
| jenkins.agent.idleMinutes | int | `15` | |
|
||||
| jenkins.agent.image | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
|
||||
| jenkins.agent.idleMinutes | int | `30` | |
|
||||
| jenkins.agent.image.repository | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
|
||||
| jenkins.agent.image.tag | string | `"v0.5.0"` | |
|
||||
| jenkins.agent.podName | string | `"podman-aws"` | |
|
||||
| jenkins.agent.podRetention | string | `"Default"` | |
|
||||
| jenkins.agent.resources.limits.cpu | string | `""` | |
|
||||
| jenkins.agent.resources.limits.memory | string | `""` | |
|
||||
| jenkins.agent.resources.requests.cpu | string | `""` | |
|
||||
| jenkins.agent.resources.requests.memory | string | `""` | |
|
||||
| jenkins.agent.serviceAccount | string | `"jenkins-podman-aws"` | |
|
||||
| jenkins.agent.showRawYaml | bool | `false` | |
|
||||
| jenkins.agent.tag | string | `"v0.4.1"` | |
|
||||
| jenkins.agent.yamlMergeStrategy | string | `"merge"` | |
|
||||
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n serviceAccountName: jenkins-podman-aws\n containers:\n - name: jnlp\n resources:\n requests:\n cpu: \"512m\"\n memory: \"1024Mi\"\n limits:\n cpu: \"4\"\n memory: \"6144Mi\"\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | |
|
||||
| jenkins.controller.JCasC.configScripts.zdt-settings | string | `"jenkins:\n noUsageStatistics: true\n disabledAdministrativeMonitors:\n - \"jenkins.security.ResourceDomainRecommendation\"\nunclassified:\n buildDiscarders:\n configuredBuildDiscarders:\n - \"jobBuildDiscarder\"\n - defaultBuildDiscarder:\n discarder:\n logRotator:\n artifactDaysToKeepStr: \"32\"\n artifactNumToKeepStr: \"10\"\n daysToKeepStr: \"100\"\n numToKeepStr: \"10\"\n"` | |
|
||||
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n containers:\n - name: jnlp\n resources:\n requests:\n cpu: \"512m\"\n memory: \"1024Mi\"\n limits:\n cpu: \"4\"\n memory: \"6144Mi\"\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | |
|
||||
| jenkins.controller.JCasC.configScripts.zdt-settings | string | `"jenkins:\n noUsageStatistics: true\n disabledAdministrativeMonitors:\n - \"jenkins.security.ResourceDomainRecommendation\"\nappearance:\n themeManager:\n disableUserThemes: true\n theme: \"dark\"\nunclassified:\n openTelemetry:\n configurationProperties: |-\n otel.exporter.otlp.protocol=grpc\n otel.instrumentation.jenkins.web.enabled=false\n ignoredSteps: \"dir,echo,isUnix,pwd,properties\"\n #endpoint: \"telemetry-jaeger-collector.telemetry:4317\"\n exportOtelConfigurationAsEnvironmentVariables: false\n #observabilityBackends:\n # - jaeger:\n # jaegerBaseUrl: \"https://jaeger.example.com\"\n # name: \"KubeZero Jaeger\"\n serviceName: \"Jenkins\"\n buildDiscarders:\n configuredBuildDiscarders:\n - \"jobBuildDiscarder\"\n - defaultBuildDiscarder:\n discarder:\n logRotator:\n artifactDaysToKeepStr: \"32\"\n artifactNumToKeepStr: \"10\"\n daysToKeepStr: \"100\"\n numToKeepStr: \"10\"\n"` | |
|
||||
| jenkins.controller.containerEnv[0].name | string | `"OTEL_LOGS_EXPORTER"` | |
|
||||
| jenkins.controller.containerEnv[0].value | string | `"none"` | |
|
||||
| jenkins.controller.containerEnv[1].name | string | `"OTEL_METRICS_EXPORTER"` | |
|
||||
| jenkins.controller.containerEnv[1].value | string | `"none"` | |
|
||||
| jenkins.controller.disableRememberMe | bool | `true` | |
|
||||
| jenkins.controller.enableRawHtmlMarkupFormatter | bool | `true` | |
|
||||
| jenkins.controller.image.tag | string | `"alpine-jdk17"` | |
|
||||
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
|
||||
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
|
||||
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
|
||||
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3910.ve59cec5e33ea_"` | |
|
||||
| jenkins.controller.installPlugins[10] | string | `"build-discarder:139.v05696a_7fe240"` | |
|
||||
| jenkins.controller.installPlugins[11] | string | `"dark-theme:315.va_22e7d692ea_a"` | |
|
||||
| jenkins.controller.installPlugins[12] | string | `"kubernetes-credentials-provider:1.211.vc236a_f5a_2f3c"` | |
|
||||
| jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | |
|
||||
| jenkins.controller.installPlugins[2] | string | `"git:5.0.0"` | |
|
||||
| jenkins.controller.installPlugins[3] | string | `"basic-branch-build-strategies:71.vc1421f89888e"` | |
|
||||
| jenkins.controller.installPlugins[4] | string | `"pipeline-graph-view:183.v9e27732d970f"` | |
|
||||
| jenkins.controller.installPlugins[5] | string | `"pipeline-stage-view:2.32"` | |
|
||||
| jenkins.controller.installPlugins[6] | string | `"configuration-as-code:1625.v27444588cc3d"` | |
|
||||
| jenkins.controller.installPlugins[7] | string | `"antisamy-markup-formatter:159.v25b_c67cd35fb_"` | |
|
||||
| jenkins.controller.installPlugins[8] | string | `"prometheus:2.2.1"` | |
|
||||
| jenkins.controller.installPlugins[9] | string | `"htmlpublisher:1.31"` | |
|
||||
| jenkins.controller.installPlugins[0] | string | `"kubernetes"` | |
|
||||
| jenkins.controller.installPlugins[10] | string | `"htmlpublisher"` | |
|
||||
| jenkins.controller.installPlugins[11] | string | `"build-discarder"` | |
|
||||
| jenkins.controller.installPlugins[12] | string | `"dark-theme"` | |
|
||||
| jenkins.controller.installPlugins[13] | string | `"matrix-auth"` | |
|
||||
| jenkins.controller.installPlugins[14] | string | `"reverse-proxy-auth-plugin"` | |
|
||||
| jenkins.controller.installPlugins[15] | string | `"opentelemetry"` | |
|
||||
| jenkins.controller.installPlugins[1] | string | `"kubernetes-credentials-provider"` | |
|
||||
| jenkins.controller.installPlugins[2] | string | `"workflow-aggregator"` | |
|
||||
| jenkins.controller.installPlugins[3] | string | `"git"` | |
|
||||
| jenkins.controller.installPlugins[4] | string | `"basic-branch-build-strategies"` | |
|
||||
| jenkins.controller.installPlugins[5] | string | `"pipeline-graph-view"` | |
|
||||
| jenkins.controller.installPlugins[6] | string | `"pipeline-stage-view"` | |
|
||||
| jenkins.controller.installPlugins[7] | string | `"configuration-as-code"` | |
|
||||
| jenkins.controller.installPlugins[8] | string | `"antisamy-markup-formatter"` | |
|
||||
| jenkins.controller.installPlugins[9] | string | `"prometheus"` | |
|
||||
| jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | |
|
||||
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | |
|
||||
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=300 --sessionEviction=10800"` | |
|
||||
| jenkins.controller.prometheus.enabled | bool | `false` | |
|
||||
| jenkins.controller.resources.limits.memory | string | `"4096Mi"` | |
|
||||
| jenkins.controller.resources.requests.cpu | string | `"250m"` | |
|
||||
| jenkins.controller.resources.requests.memory | string | `"1280Mi"` | |
|
||||
| jenkins.controller.tag | string | `"alpine-jdk17"` | |
|
||||
| jenkins.controller.testEnabled | bool | `false` | |
|
||||
| jenkins.enabled | bool | `false` | |
|
||||
| jenkins.istio.agent.enabled | bool | `false` | |
|
||||
|
@ -131,8 +149,15 @@ Kubernetes: `>= 1.24.0`
|
|||
| jenkins.rbac.readSecrets | bool | `true` | |
|
||||
| jenkins.serviceAccountAgent.create | bool | `true` | |
|
||||
| jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | |
|
||||
| renovate.cronjob.concurrencyPolicy | string | `"Forbid"` | |
|
||||
| renovate.cronjob.jobBackoffLimit | int | `3` | |
|
||||
| renovate.cronjob.schedule | string | `"0 3 * * *"` | |
|
||||
| renovate.cronjob.successfulJobsHistoryLimit | int | `1` | |
|
||||
| renovate.enabled | bool | `false` | |
|
||||
| renovate.env.LOG_FORMAT | string | `"json"` | |
|
||||
| renovate.securityContext.fsGroup | int | `1000` | |
|
||||
| trivy.enabled | bool | `false` | |
|
||||
| trivy.image.tag | string | `"0.39.1"` | |
|
||||
| trivy.image.tag | string | `"0.50.1"` | |
|
||||
| trivy.persistence.enabled | bool | `true` | |
|
||||
| trivy.persistence.size | string | `"1Gi"` | |
|
||||
| trivy.rbac.create | bool | `false` | |
|
||||
|
|
|
@ -23,8 +23,10 @@
|
|||
|
||||
# Gitea
|
||||
|
||||
## OpenSSH 8.8 RSA disabled
|
||||
- https://github.com/go-gitea/gitea/issues/17798
|
||||
# Verdaccio
|
||||
|
||||
## Authentication sealed-secret
|
||||
```htpasswd -n -b -B -C 4 <username> <password> | kubeseal --raw --namespace verdaccio --name verdaccio-htpasswd```
|
||||
|
||||
## Resources
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -12,6 +12,15 @@ spec:
|
|||
hosts:
|
||||
- {{ .Values.gitea.istio.url }}
|
||||
http:
|
||||
{{- if .Values.gitea.istio.authProvider }}
|
||||
# https://github.com/go-gitea/gitea/issues/13606
|
||||
- match:
|
||||
- uri:
|
||||
regex: ^/user/login.*
|
||||
redirect:
|
||||
uri: /user/oauth2/{{ .Values.gitea.istio.authProvider }}
|
||||
redirectCode: 302
|
||||
{{- end }}
|
||||
- route:
|
||||
- destination:
|
||||
host: gitea-http
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
{{- if .Values.gitea.enabled }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" (include "kubezero-lib.fullname" $) "themes" | trunc 63 | trimSuffix "-" }}
|
||||
data:
|
||||
{{ (.Files.Glob "files/gitea/themes/*").AsConfig | nindent 2 }}
|
||||
{{- end }}
|
|
@ -1,7 +1,13 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
helm dep update
|
||||
. ../../scripts/lib-update.sh
|
||||
|
||||
#login_ecr_public
|
||||
update_helm
|
||||
|
||||
# Create ZDT dashboard configmap
|
||||
../kubezero-metrics/sync_grafana_dashboards.py dashboard-jenkins.yaml templates/jenkins/grafana-dashboard.yaml
|
||||
../kubezero-metrics/sync_grafana_dashboards.py dashboard-gitea.yaml templates/gitea/grafana-dashboard.yaml
|
||||
|
||||
update_docs
|
||||
|
|
|
@ -1,25 +1,21 @@
|
|||
gocd:
|
||||
enabled: false
|
||||
|
||||
server:
|
||||
service:
|
||||
type: "ClusterIP"
|
||||
ingress:
|
||||
enabled: false
|
||||
|
||||
istio:
|
||||
enabled: false
|
||||
gateway: istio-ingress/private-ingressgateway
|
||||
url: "" # gocd.example.com
|
||||
|
||||
|
||||
gitea:
|
||||
enabled: false
|
||||
|
||||
image:
|
||||
#tag: 1.17.4
|
||||
tag: 1.21.11
|
||||
rootless: true
|
||||
|
||||
repliaCount: 1
|
||||
|
||||
# We use RWO persistence
|
||||
strategy:
|
||||
type: "Recreate"
|
||||
|
||||
# Since V9 they default to RWX and deployment, we default to old existing RWO from statefulset
|
||||
persistence:
|
||||
claimName: data-gitea-0
|
||||
size: 4Gi
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
|
@ -28,10 +24,6 @@ gitea:
|
|||
add:
|
||||
- SYS_CHROOT
|
||||
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 4Gi
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: "150m"
|
||||
|
@ -39,6 +31,20 @@ gitea:
|
|||
limits:
|
||||
memory: "2048Mi"
|
||||
|
||||
extraVolumes:
|
||||
- name: gitea-themes
|
||||
configMap:
|
||||
name: gitea-kubezero-ci-themes
|
||||
|
||||
extraVolumeMounts:
|
||||
- name: gitea-themes
|
||||
readOnly: true
|
||||
mountPath: "/data/gitea/public/assets/css"
|
||||
|
||||
checkDeprecation: false
|
||||
test:
|
||||
enabled: false
|
||||
|
||||
gitea:
|
||||
admin:
|
||||
existingSecret: gitea-admin-secret
|
||||
|
@ -56,15 +62,22 @@ gitea:
|
|||
DB_TYPE: sqlite3
|
||||
cache:
|
||||
ADAPTER: memory
|
||||
session:
|
||||
PROVIDER: memory
|
||||
queue:
|
||||
TYPE: level
|
||||
ui:
|
||||
THEMES: "gitea,github-dark"
|
||||
DEFAULT_THEME: "github-dark"
|
||||
log:
|
||||
LEVEL: warn
|
||||
|
||||
memcached:
|
||||
redis-cluster:
|
||||
enabled: false
|
||||
postgresql-ha:
|
||||
enabled: false
|
||||
postgresql:
|
||||
enabled: false
|
||||
mysql:
|
||||
enabled: false
|
||||
mariadb:
|
||||
enabled: false
|
||||
|
||||
istio:
|
||||
enabled: false
|
||||
|
@ -76,15 +89,23 @@ jenkins:
|
|||
enabled: false
|
||||
|
||||
controller:
|
||||
tag: alpine-jdk17
|
||||
#tagLabel: alpine
|
||||
image:
|
||||
tag: alpine-jdk17
|
||||
#tagLabel: alpine
|
||||
disableRememberMe: true
|
||||
prometheus:
|
||||
enabled: false
|
||||
testEnabled: false
|
||||
enableRawHtmlMarkupFormatter: true
|
||||
javaOpts: "-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""
|
||||
jenkinsOpts: "--sessionTimeout=180 --sessionEviction=3600"
|
||||
jenkinsOpts: "--sessionTimeout=300 --sessionEviction=10800"
|
||||
|
||||
# Until we setup the logging and metrics pipelines in OTEL
|
||||
containerEnv:
|
||||
- name: OTEL_LOGS_EXPORTER
|
||||
value: "none"
|
||||
- name: OTEL_METRICS_EXPORTER
|
||||
value: "none"
|
||||
|
||||
resources:
|
||||
requests:
|
||||
|
@ -108,7 +129,23 @@ jenkins:
|
|||
noUsageStatistics: true
|
||||
disabledAdministrativeMonitors:
|
||||
- "jenkins.security.ResourceDomainRecommendation"
|
||||
appearance:
|
||||
themeManager:
|
||||
disableUserThemes: true
|
||||
theme: "dark"
|
||||
unclassified:
|
||||
openTelemetry:
|
||||
configurationProperties: |-
|
||||
otel.exporter.otlp.protocol=grpc
|
||||
otel.instrumentation.jenkins.web.enabled=false
|
||||
ignoredSteps: "dir,echo,isUnix,pwd,properties"
|
||||
#endpoint: "telemetry-jaeger-collector.telemetry:4317"
|
||||
exportOtelConfigurationAsEnvironmentVariables: false
|
||||
#observabilityBackends:
|
||||
# - jaeger:
|
||||
# jaegerBaseUrl: "https://jaeger.example.com"
|
||||
# name: "KubeZero Jaeger"
|
||||
serviceName: "Jenkins"
|
||||
buildDiscarders:
|
||||
configuredBuildDiscarders:
|
||||
- "jobBuildDiscarder"
|
||||
|
@ -121,19 +158,22 @@ jenkins:
|
|||
numToKeepStr: "10"
|
||||
|
||||
installPlugins:
|
||||
- kubernetes:3910.ve59cec5e33ea_
|
||||
- workflow-aggregator:581.v0c46fa_697ffd
|
||||
- git:5.0.0
|
||||
- basic-branch-build-strategies:71.vc1421f89888e
|
||||
- pipeline-graph-view:183.v9e27732d970f
|
||||
- pipeline-stage-view:2.32
|
||||
- configuration-as-code:1625.v27444588cc3d
|
||||
- antisamy-markup-formatter:159.v25b_c67cd35fb_
|
||||
- prometheus:2.2.1
|
||||
- htmlpublisher:1.31
|
||||
- build-discarder:139.v05696a_7fe240
|
||||
- dark-theme:315.va_22e7d692ea_a
|
||||
- kubernetes-credentials-provider:1.211.vc236a_f5a_2f3c
|
||||
- kubernetes
|
||||
- kubernetes-credentials-provider
|
||||
- workflow-aggregator
|
||||
- git
|
||||
- basic-branch-build-strategies
|
||||
- pipeline-graph-view
|
||||
- pipeline-stage-view
|
||||
- configuration-as-code
|
||||
- antisamy-markup-formatter
|
||||
- prometheus
|
||||
- htmlpublisher
|
||||
- build-discarder
|
||||
- dark-theme
|
||||
- matrix-auth
|
||||
- reverse-proxy-auth-plugin
|
||||
- opentelemetry
|
||||
|
||||
serviceAccountAgent:
|
||||
create: true
|
||||
|
@ -141,18 +181,20 @@ jenkins:
|
|||
|
||||
# Preconfigure agents to use zdt podman requires fuse/overlayfs
|
||||
agent:
|
||||
image: public.ecr.aws/zero-downtime/jenkins-podman
|
||||
tag: v0.4.1
|
||||
image:
|
||||
repository: public.ecr.aws/zero-downtime/jenkins-podman
|
||||
tag: v0.5.0
|
||||
#alwaysPullImage: true
|
||||
podRetention: "Default"
|
||||
showRawYaml: false
|
||||
podName: "podman-aws"
|
||||
customJenkinsLabels:
|
||||
- podman-aws-trivy
|
||||
idleMinutes: 15
|
||||
containerCap: 2
|
||||
serviceAccount: jenkins-podman-aws
|
||||
annotations:
|
||||
container.apparmor.security.beta.kubernetes.io/jnlp: unconfined
|
||||
customJenkinsLabels:
|
||||
- podman-aws-trivy
|
||||
idleMinutes: 30
|
||||
containerCap: 2
|
||||
resources:
|
||||
requests:
|
||||
cpu: ""
|
||||
|
@ -174,7 +216,6 @@ jenkins:
|
|||
spec:
|
||||
securityContext:
|
||||
fsGroup: 1000
|
||||
serviceAccountName: jenkins-podman-aws
|
||||
containers:
|
||||
- name: jnlp
|
||||
resources:
|
||||
|
@ -231,9 +272,22 @@ jenkins:
|
|||
trivy:
|
||||
enabled: false
|
||||
image:
|
||||
tag: 0.39.1
|
||||
tag: 0.50.1
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 1Gi
|
||||
rbac:
|
||||
create: false
|
||||
|
||||
renovate:
|
||||
enabled: false
|
||||
|
||||
env:
|
||||
LOG_FORMAT: json
|
||||
cronjob:
|
||||
concurrencyPolicy: Forbid
|
||||
jobBackoffLimit: 3
|
||||
schedule: "0 3 * * *"
|
||||
successfulJobsHistoryLimit: 1
|
||||
securityContext:
|
||||
fsGroup: 1000
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
apiVersion: v2
|
||||
name: kubezero-falco
|
||||
description: Falco Container Security and Audit components
|
||||
type: application
|
||||
version: 0.1.2
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
- kubezero
|
||||
- falco
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: falco
|
||||
version: 4.2.5
|
||||
repository: https://falcosecurity.github.io/charts
|
||||
condition: k8saudit.enabled
|
||||
alias: k8saudit
|
||||
kubeVersion: ">= 1.26.0"
|
|
@ -0,0 +1,64 @@
|
|||
# kubezero-falco
|
||||
|
||||
![Version: 0.1.2](https://img.shields.io/badge/Version-0.1.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
|
||||
|
||||
Falco Container Security and Audit components
|
||||
|
||||
**Homepage:** <https://kubezero.com>
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Stefan Reimer | <stefan@zero-downtime.net> | |
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://falcosecurity.github.io/charts | k8saudit(falco) | 4.2.5 |
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| k8saudit.collectors | object | `{"enabled":false}` | Disable the collectors, no syscall events to enrich with metadata. |
|
||||
| k8saudit.controller | object | `{"deployment":{"replicas":1},"kind":"deployment"}` | Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurabale. |
|
||||
| k8saudit.controller.deployment.replicas | int | `1` | Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing. For more info check the section on Plugins in the README.md file. |
|
||||
| k8saudit.driver | object | `{"enabled":false}` | Disable the drivers since we want to deploy only the k8saudit plugin. |
|
||||
| k8saudit.enabled | bool | `false` | |
|
||||
| k8saudit.falco.buffered_outputs | bool | `true` | |
|
||||
| k8saudit.falco.json_output | bool | `true` | |
|
||||
| k8saudit.falco.load_plugins[0] | string | `"k8saudit"` | |
|
||||
| k8saudit.falco.load_plugins[1] | string | `"json"` | |
|
||||
| k8saudit.falco.log_syslog | bool | `false` | |
|
||||
| k8saudit.falco.plugins[0].init_config.maxEventSize | int | `1048576` | |
|
||||
| k8saudit.falco.plugins[0].library_path | string | `"libk8saudit.so"` | |
|
||||
| k8saudit.falco.plugins[0].name | string | `"k8saudit"` | |
|
||||
| k8saudit.falco.plugins[0].open_params | string | `"http://:9765/k8s-audit"` | |
|
||||
| k8saudit.falco.plugins[1].init_config | string | `""` | |
|
||||
| k8saudit.falco.plugins[1].library_path | string | `"libjson.so"` | |
|
||||
| k8saudit.falco.plugins[1].name | string | `"json"` | |
|
||||
| k8saudit.falco.rules_file[0] | string | `"/etc/falco/rules.d"` | |
|
||||
| k8saudit.falco.syslog_output.enabled | bool | `false` | |
|
||||
| k8saudit.falcoctl.artifact.follow.enabled | bool | `false` | |
|
||||
| k8saudit.falcoctl.artifact.install.enabled | bool | `false` | |
|
||||
| k8saudit.fullnameOverride | string | `"falco-k8saudit"` | |
|
||||
| k8saudit.mounts.volumeMounts[0].mountPath | string | `"/etc/falco/rules.d"` | |
|
||||
| k8saudit.mounts.volumeMounts[0].name | string | `"rules-volume"` | |
|
||||
| k8saudit.mounts.volumes[0].configMap.name | string | `"falco-k8saudit-rules"` | |
|
||||
| k8saudit.mounts.volumes[0].name | string | `"rules-volume"` | |
|
||||
| k8saudit.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| k8saudit.resources.limits.cpu | string | `"1000m"` | |
|
||||
| k8saudit.resources.limits.memory | string | `"512Mi"` | |
|
||||
| k8saudit.resources.requests.cpu | string | `"100m"` | |
|
||||
| k8saudit.resources.requests.memory | string | `"256Mi"` | |
|
||||
| k8saudit.services[0].name | string | `"webhook"` | |
|
||||
| k8saudit.services[0].ports[0].port | int | `9765` | |
|
||||
| k8saudit.services[0].ports[0].protocol | string | `"TCP"` | |
|
||||
|
||||
----------------------------------------------
|
||||
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)
|
|
@ -0,0 +1,804 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (C) 2023 The Falco Authors.
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
- required_engine_version: 15
|
||||
|
||||
- required_plugin_versions:
|
||||
- name: k8saudit
|
||||
version: 0.7.0
|
||||
alternatives:
|
||||
- name: k8saudit-eks
|
||||
version: 0.4.0
|
||||
- name: k8saudit-gke
|
||||
version: 0.1.0
|
||||
- name: json
|
||||
version: 0.7.0
|
||||
|
||||
# Like always_true/always_false, but works with k8s audit events
|
||||
- macro: k8s_audit_always_true
|
||||
condition: (jevt.rawtime exists)
|
||||
|
||||
- macro: k8s_audit_never_true
|
||||
condition: (jevt.rawtime=0)
|
||||
|
||||
# Generally only consider audit events once the response has completed
|
||||
- list: k8s_audit_stages
|
||||
items: ["ResponseComplete"]
|
||||
|
||||
# Generally exclude users starting with "system:"
|
||||
- macro: non_system_user
|
||||
condition: (not ka.user.name startswith "system:")
|
||||
|
||||
# This macro selects the set of Audit Events used by the below rules.
|
||||
- macro: kevt
|
||||
condition: (jevt.value[/stage] in (k8s_audit_stages))
|
||||
|
||||
- macro: kevt_started
|
||||
condition: (jevt.value[/stage]=ResponseStarted)
|
||||
|
||||
# If you wish to restrict activity to a specific set of users, override/append to this list.
|
||||
# users created by kops are included
|
||||
- list: vertical_pod_autoscaler_users
|
||||
items: ["vpa-recommender", "vpa-updater"]
|
||||
|
||||
- list: allowed_k8s_users
|
||||
items: [
|
||||
"minikube", "minikube-user", "kubelet", "kops", "admin", "kube", "kube-proxy", "kube-apiserver-healthcheck",
|
||||
"kubernetes-admin",
|
||||
vertical_pod_autoscaler_users,
|
||||
cluster-autoscaler,
|
||||
"system:addon-manager",
|
||||
"cloud-controller-manager",
|
||||
"system:kube-controller-manager"
|
||||
]
|
||||
|
||||
- list: eks_allowed_k8s_users
|
||||
items: [
|
||||
"eks:node-manager",
|
||||
"eks:certificate-controller",
|
||||
"eks:fargate-scheduler",
|
||||
"eks:k8s-metrics",
|
||||
"eks:authenticator",
|
||||
"eks:cluster-event-watcher",
|
||||
"eks:nodewatcher",
|
||||
"eks:pod-identity-mutating-webhook",
|
||||
"eks:cloud-controller-manager",
|
||||
"eks:vpc-resource-controller",
|
||||
"eks:addon-manager",
|
||||
]
|
||||
|
||||
- list: k8s_audit_sensitive_mount_images
|
||||
items: [
|
||||
falcosecurity/falco, docker.io/falcosecurity/falco, public.ecr.aws/falcosecurity/falco,
|
||||
docker.io/sysdig/sysdig, sysdig/sysdig,
|
||||
gcr.io/google_containers/hyperkube,
|
||||
gcr.io/google_containers/kube-proxy, docker.io/calico/node,
|
||||
docker.io/rook/toolbox, docker.io/cloudnativelabs/kube-router, docker.io/consul,
|
||||
docker.io/datadog/docker-dd-agent, docker.io/datadog/agent, docker.io/docker/ucp-agent, docker.io/gliderlabs/logspout,
|
||||
docker.io/netdata/netdata, docker.io/google/cadvisor, docker.io/prom/node-exporter,
|
||||
amazon/amazon-ecs-agent, prom/node-exporter, amazon/cloudwatch-agent
|
||||
]
|
||||
|
||||
- list: k8s_audit_privileged_images
|
||||
items: [
|
||||
falcosecurity/falco, docker.io/falcosecurity/falco, public.ecr.aws/falcosecurity/falco,
|
||||
docker.io/calico/node, calico/node,
|
||||
docker.io/cloudnativelabs/kube-router,
|
||||
docker.io/docker/ucp-agent,
|
||||
docker.io/mesosphere/mesos-slave,
|
||||
docker.io/rook/toolbox,
|
||||
docker.io/sysdig/sysdig,
|
||||
gcr.io/google_containers/kube-proxy,
|
||||
gcr.io/google-containers/startup-script,
|
||||
gcr.io/projectcalico-org/node,
|
||||
gke.gcr.io/kube-proxy,
|
||||
gke.gcr.io/gke-metadata-server,
|
||||
gke.gcr.io/netd-amd64,
|
||||
gke.gcr.io/watcher-daemonset,
|
||||
gcr.io/google-containers/prometheus-to-sd,
|
||||
registry.k8s.io/ip-masq-agent-amd64,
|
||||
registry.k8s.io/kube-proxy,
|
||||
registry.k8s.io/prometheus-to-sd,
|
||||
quay.io/calico/node,
|
||||
sysdig/sysdig,
|
||||
registry.k8s.io/dns/k8s-dns-node-cache,
|
||||
mcr.microsoft.com/oss/kubernetes/kube-proxy
|
||||
]
|
||||
|
||||
- rule: Disallowed K8s User
|
||||
desc: Detect any k8s operation by users outside of an allowed set of users.
|
||||
condition: kevt and non_system_user and not ka.user.name in (allowed_k8s_users) and not ka.user.name in (eks_allowed_k8s_users)
|
||||
output: K8s Operation performed by user not in allowed list of users (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
# In a local/user rules file, you could override this macro to
|
||||
# explicitly enumerate the container images that you want to run in
|
||||
# your environment. In this main falco rules file, there isn't any way
|
||||
# to know all the containers that can run, so any container is
|
||||
# allowed, by using the always_true macro. In the overridden macro, the condition
|
||||
# would look something like (ka.req.pod.containers.image.repository in (my-repo/my-image))
|
||||
- macro: allowed_k8s_containers
|
||||
condition: (k8s_audit_always_true)
|
||||
|
||||
- macro: response_successful
|
||||
condition: (ka.response.code startswith 2)
|
||||
|
||||
- macro: kget
|
||||
condition: ka.verb=get
|
||||
|
||||
- macro: kcreate
|
||||
condition: ka.verb=create
|
||||
|
||||
- macro: kmodify
|
||||
condition: (ka.verb in (create,update,patch))
|
||||
|
||||
- macro: kdelete
|
||||
condition: ka.verb=delete
|
||||
|
||||
- macro: pod
|
||||
condition: ka.target.resource=pods and not ka.target.subresource exists
|
||||
|
||||
- macro: pod_subresource
|
||||
condition: ka.target.resource=pods and ka.target.subresource exists
|
||||
|
||||
- macro: deployment
|
||||
condition: ka.target.resource=deployments
|
||||
|
||||
- macro: service
|
||||
condition: ka.target.resource=services
|
||||
|
||||
- macro: configmap
|
||||
condition: ka.target.resource=configmaps
|
||||
|
||||
- macro: namespace
|
||||
condition: ka.target.resource=namespaces
|
||||
|
||||
- macro: serviceaccount
|
||||
condition: ka.target.resource=serviceaccounts
|
||||
|
||||
- macro: clusterrole
|
||||
condition: ka.target.resource=clusterroles
|
||||
|
||||
- macro: clusterrolebinding
|
||||
condition: ka.target.resource=clusterrolebindings
|
||||
|
||||
- macro: role
|
||||
condition: ka.target.resource=roles
|
||||
|
||||
- macro: secret
|
||||
condition: ka.target.resource=secrets
|
||||
|
||||
- macro: health_endpoint
|
||||
condition: ka.uri=/healthz or ka.uri startswith /healthz?
|
||||
|
||||
- macro: live_endpoint
|
||||
condition: ka.uri=/livez or ka.uri startswith /livez?
|
||||
|
||||
- macro: ready_endpoint
|
||||
condition: ka.uri=/readyz or ka.uri startswith /readyz?
|
||||
|
||||
- rule: Create Disallowed Pod
|
||||
desc: >
|
||||
Detect an attempt to start a pod with a container image outside of a list of allowed images.
|
||||
condition: kevt and pod and kcreate and not allowed_k8s_containers
|
||||
output: Pod started with container not in allowed list (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: Create Privileged Pod
|
||||
desc: >
|
||||
Detect an attempt to start a pod with a privileged container
|
||||
condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_privileged_images)
|
||||
output: Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- macro: sensitive_vol_mount
|
||||
condition: >
|
||||
(ka.req.pod.volumes.hostpath intersects (/proc, /var/run/docker.sock, /, /etc, /root, /var/run/crio/crio.sock, /run/containerd/containerd.sock, /home/admin, /var/lib/kubelet, /var/lib/kubelet/pki, /etc/kubernetes, /etc/kubernetes/manifests))
|
||||
|
||||
- rule: Create Sensitive Mount Pod
|
||||
desc: >
|
||||
Detect an attempt to start a pod with a volume from a sensitive host directory (i.e. /proc).
|
||||
Exceptions are made for known trusted images.
|
||||
condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.pod.containers.image.repository in (k8s_audit_sensitive_mount_images)
|
||||
output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace resource=%ka.target.resource images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes])
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
# These container images are allowed to run with hostnetwork=true
|
||||
# TODO: Remove k8s.gcr.io reference after 01/Dec/2023
|
||||
- list: k8s_audit_hostnetwork_images
|
||||
items: [
|
||||
gcr.io/google-containers/prometheus-to-sd,
|
||||
gcr.io/projectcalico-org/typha,
|
||||
gcr.io/projectcalico-org/node,
|
||||
gke.gcr.io/gke-metadata-server,
|
||||
gke.gcr.io/kube-proxy,
|
||||
gke.gcr.io/netd-amd64,
|
||||
registry.k8s.io/ip-masq-agent-amd64,
|
||||
registry.k8s.io/prometheus-to-sd
|
||||
]
|
||||
|
||||
# Corresponds to K8s CIS Benchmark 1.7.4
|
||||
- rule: Create HostNetwork Pod
|
||||
desc: Detect an attempt to start a pod using the host network.
|
||||
condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostnetwork_images)
|
||||
output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- list: k8s_audit_hostpid_images
|
||||
items: []
|
||||
|
||||
- rule: Create HostPid Pod
|
||||
desc: Detect an attempt to start a pod using the host pid namespace.
|
||||
condition: kevt and pod and kcreate and ka.req.pod.host_pid intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostpid_images)
|
||||
output: Pod started using host pid namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- list: k8s_audit_hostipc_images
|
||||
items: []
|
||||
|
||||
- rule: Create HostIPC Pod
|
||||
desc: Detect an attempt to start a pod using the host ipc namespace.
|
||||
condition: kevt and pod and kcreate and ka.req.pod.host_ipc intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostipc_images)
|
||||
output: Pod started using host ipc namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- macro: user_known_node_port_service
|
||||
condition: (k8s_audit_never_true)
|
||||
|
||||
- rule: Create NodePort Service
|
||||
desc: >
|
||||
Detect an attempt to start a service with a NodePort service type
|
||||
condition: kevt and service and kcreate and ka.req.service.type=NodePort and not user_known_node_port_service
|
||||
output: NodePort Service Created (user=%ka.user.name service=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace ports=%ka.req.service.ports)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- macro: contains_private_credentials
|
||||
condition: >
|
||||
(ka.req.configmap.obj contains "aws_access_key_id" or
|
||||
ka.req.configmap.obj contains "aws-access-key-id" or
|
||||
ka.req.configmap.obj contains "aws_s3_access_key_id" or
|
||||
ka.req.configmap.obj contains "aws-s3-access-key-id" or
|
||||
ka.req.configmap.obj contains "password" or
|
||||
ka.req.configmap.obj contains "passphrase")
|
||||
|
||||
- rule: Create/Modify Configmap With Private Credentials
|
||||
desc: >
|
||||
Detect creating/modifying a configmap containing a private credential (aws key, password, etc.)
|
||||
condition: kevt and configmap and kmodify and contains_private_credentials
|
||||
output: K8s configmap with private credential (user=%ka.user.name verb=%ka.verb resource=%ka.target.resource configmap=%ka.req.configmap.name config=%ka.req.configmap.obj)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
# Corresponds to K8s CIS Benchmark, 1.1.1.
|
||||
- rule: Anonymous Request Allowed
|
||||
desc: >
|
||||
Detect any request made by the anonymous user that was allowed
|
||||
condition: kevt and ka.user.name=system:anonymous and ka.auth.decision="allow" and not health_endpoint and not live_endpoint and not ready_endpoint
|
||||
output: Request by anonymous user allowed (user=%ka.user.name verb=%ka.verb uri=%ka.uri reason=%ka.auth.reason))
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
# Roughly corresponds to K8s CIS Benchmark, 1.1.12. In this case,
|
||||
# notifies an attempt to exec/attach to a privileged container.
|
||||
|
||||
# Ideally, we'd add a more stringent rule that detects attaches/execs
|
||||
# to a privileged pod, but that requires the engine for k8s audit
|
||||
# events to be stateful, so it could know if a container named in an
|
||||
# attach request was created privileged or not. For now, we have a
|
||||
# less severe rule that detects attaches/execs to any pod.
|
||||
#
|
||||
# For the same reason, you can't use things like image names/prefixes,
|
||||
# as the event that creates the pod (which has the images) is a
|
||||
# separate event than the actual exec/attach to the pod.
|
||||
|
||||
- macro: user_known_exec_pod_activities
|
||||
condition: (k8s_audit_never_true)
|
||||
|
||||
- rule: Attach/Exec Pod
|
||||
desc: >
|
||||
Detect any attempt to attach/exec to a pod
|
||||
condition: kevt_started and pod_subresource and kcreate and ka.target.subresource in (exec,attach) and not user_known_exec_pod_activities
|
||||
output: Attach/Exec to pod (user=%ka.user.name pod=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace action=%ka.target.subresource command=%ka.uri.param[command])
|
||||
priority: NOTICE
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- macro: user_known_portforward_activities
|
||||
condition: (k8s_audit_never_true)
|
||||
|
||||
- rule: port-forward
|
||||
desc: >
|
||||
Detect any attempt to portforward
|
||||
condition: ka.target.subresource in (portforward) and not user_known_portforward_activities
|
||||
output: Portforward to pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace action=%ka.target.subresource )
|
||||
priority: NOTICE
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- macro: user_known_pod_debug_activities
|
||||
condition: (k8s_audit_never_true)
|
||||
|
||||
# Only works when feature gate EphemeralContainers is enabled
|
||||
- rule: EphemeralContainers Created
|
||||
desc: >
|
||||
Detect any ephemeral container created
|
||||
condition: kevt and pod_subresource and kmodify and ka.target.subresource in (ephemeralcontainers) and not user_known_pod_debug_activities
|
||||
output: Ephemeral container is created in pod (user=%ka.user.name pod=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace ephemeral_container_name=%jevt.value[/requestObject/ephemeralContainers/0/name] ephemeral_container_image=%jevt.value[/requestObject/ephemeralContainers/0/image])
|
||||
priority: NOTICE
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
# In a local/user rules fie, you can append to this list to add additional allowed namespaces
|
||||
- list: allowed_namespaces
|
||||
items: [kube-system, kube-public, default]
|
||||
|
||||
- rule: Create Disallowed Namespace
|
||||
desc: Detect any attempt to create a namespace outside of a set of known namespaces
|
||||
condition: kevt and namespace and kcreate and not ka.target.name in (allowed_namespaces)
|
||||
output: Disallowed namespace created (user=%ka.user.name ns=%ka.target.name resource=%ka.target.resource)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
# Only defined for backwards compatibility. Use the more specific
|
||||
# user_allowed_kube_namespace_image_list instead.
|
||||
- list: user_trusted_image_list
|
||||
items: []
|
||||
|
||||
- list: user_allowed_kube_namespace_image_list
|
||||
items: [user_trusted_image_list]
|
||||
|
||||
# Only defined for backwards compatibility. Use the more specific
|
||||
# allowed_kube_namespace_image_list instead.
|
||||
- list: k8s_image_list
|
||||
items: []
|
||||
|
||||
# TODO: Remove k8s.gcr.io reference after 01/Dec/2023
|
||||
- list: allowed_kube_namespace_image_list
|
||||
items: [
|
||||
gcr.io/google-containers/prometheus-to-sd,
|
||||
gcr.io/projectcalico-org/node,
|
||||
gke.gcr.io/addon-resizer,
|
||||
gke.gcr.io/heapster,
|
||||
gke.gcr.io/gke-metadata-server,
|
||||
registry.k8s.io/ip-masq-agent-amd64,
|
||||
registry.k8s.io/kube-apiserver,
|
||||
gke.gcr.io/kube-proxy,
|
||||
gke.gcr.io/netd-amd64,
|
||||
gke.gcr.io/watcher-daemonset,
|
||||
registry.k8s.io/addon-resizer,
|
||||
registry.k8s.io/prometheus-to-sd,
|
||||
registry.k8s.io/k8s-dns-dnsmasq-nanny-amd64,
|
||||
registry.k8s.io/k8s-dns-kube-dns-amd64,
|
||||
registry.k8s.io/k8s-dns-sidecar-amd64,
|
||||
registry.k8s.io/metrics-server-amd64,
|
||||
kope/kube-apiserver-healthcheck,
|
||||
k8s_image_list
|
||||
]
|
||||
|
||||
- macro: allowed_kube_namespace_pods
|
||||
condition: (ka.req.pod.containers.image.repository in (user_allowed_kube_namespace_image_list) or
|
||||
ka.req.pod.containers.image.repository in (allowed_kube_namespace_image_list))
|
||||
|
||||
# Detect any new pod created in the kube-system namespace
|
||||
- rule: Pod Created in Kube Namespace
|
||||
desc: Detect any attempt to create a pod in the kube-system or kube-public namespaces
|
||||
condition: kevt and pod and kcreate and ka.target.namespace in (kube-system, kube-public) and not allowed_kube_namespace_pods
|
||||
output: Pod created in kube namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- list: user_known_sa_list
|
||||
items: []
|
||||
|
||||
- list: known_sa_list
|
||||
items: [
|
||||
coredns,
|
||||
coredns-autoscaler,
|
||||
cronjob-controller,
|
||||
daemon-set-controller,
|
||||
deployment-controller,
|
||||
disruption-controller,
|
||||
endpoint-controller,
|
||||
endpointslice-controller,
|
||||
endpointslicemirroring-controller,
|
||||
generic-garbage-collector,
|
||||
horizontal-pod-autoscaler,
|
||||
job-controller,
|
||||
namespace-controller,
|
||||
node-controller,
|
||||
persistent-volume-binder,
|
||||
pod-garbage-collector,
|
||||
pv-protection-controller,
|
||||
pvc-protection-controller,
|
||||
replicaset-controller,
|
||||
resourcequota-controller,
|
||||
root-ca-cert-publisher,
|
||||
service-account-controller,
|
||||
statefulset-controller
|
||||
]
|
||||
|
||||
- macro: trusted_sa
|
||||
condition: (ka.target.name in (known_sa_list, user_known_sa_list))
|
||||
|
||||
# Detect creating a service account in the kube-system/kube-public namespace
|
||||
- rule: Service Account Created in Kube Namespace
|
||||
desc: Detect any attempt to create a serviceaccount in the kube-system or kube-public namespaces
|
||||
condition: kevt and serviceaccount and kcreate and ka.target.namespace in (kube-system, kube-public) and response_successful and not trusted_sa
|
||||
output: Service account created in kube namespace (user=%ka.user.name serviceaccount=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
# Detect any modify/delete to any ClusterRole starting with
|
||||
# "system:". "system:coredns" is excluded as changes are expected in
|
||||
# normal operation.
|
||||
- rule: System ClusterRole Modified/Deleted
|
||||
desc: Detect any attempt to modify/delete a ClusterRole/Role starting with system
|
||||
condition: kevt and (role or clusterrole) and (kmodify or kdelete) and (ka.target.name startswith "system:") and
|
||||
not ka.target.name in (system:coredns, system:managed-certificate-controller)
|
||||
output: System ClusterRole/Role modified or deleted (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace action=%ka.verb)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
# Detect any attempt to create a ClusterRoleBinding to the cluster-admin user
|
||||
# (expand this to any built-in cluster role that does "sensitive" things)
|
||||
- rule: Attach to cluster-admin Role
|
||||
desc: Detect any attempt to create a ClusterRoleBinding to the cluster-admin user
|
||||
condition: kevt and clusterrolebinding and kcreate and ka.req.binding.role=cluster-admin
|
||||
output: Cluster Role Binding to cluster-admin role (user=%ka.user.name subject=%ka.req.binding.subjects)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: ClusterRole With Wildcard Created
|
||||
desc: Detect any attempt to create a Role/ClusterRole with wildcard resources or verbs
|
||||
condition: kevt and (role or clusterrole) and kcreate and (ka.req.role.rules.resources intersects ("*") or ka.req.role.rules.verbs intersects ("*"))
|
||||
output: Created Role/ClusterRole with wildcard (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- macro: writable_verbs
|
||||
condition: >
|
||||
(ka.req.role.rules.verbs intersects (create, update, patch, delete, deletecollection))
|
||||
|
||||
- rule: ClusterRole With Write Privileges Created
|
||||
desc: Detect any attempt to create a Role/ClusterRole that can perform write-related actions
|
||||
condition: kevt and (role or clusterrole) and kcreate and writable_verbs
|
||||
output: Created Role/ClusterRole with write privileges (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules)
|
||||
priority: NOTICE
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: ClusterRole With Pod Exec Created
|
||||
desc: Detect any attempt to create a Role/ClusterRole that can exec to pods
|
||||
condition: kevt and (role or clusterrole) and kcreate and ka.req.role.rules.resources intersects ("pods/exec")
|
||||
output: Created Role/ClusterRole with pod exec privileges (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
# The rules below this point are less discriminatory and generally
|
||||
# represent a stream of activity for a cluster. If you wish to disable
|
||||
# these events, modify the following macro.
|
||||
- macro: consider_activity_events
|
||||
condition: (k8s_audit_always_true)
|
||||
|
||||
- macro: kactivity
|
||||
condition: (kevt and consider_activity_events)
|
||||
|
||||
- rule: K8s Deployment Created
|
||||
desc: Detect any attempt to create a deployment
|
||||
condition: (kactivity and kcreate and deployment and response_successful)
|
||||
output: K8s Deployment Created (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Deployment Deleted
|
||||
desc: Detect any attempt to delete a deployment
|
||||
condition: (kactivity and kdelete and deployment and response_successful)
|
||||
output: K8s Deployment Deleted (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Service Created
|
||||
desc: Detect any attempt to create a service
|
||||
condition: (kactivity and kcreate and service and response_successful)
|
||||
output: K8s Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Service Deleted
|
||||
desc: Detect any attempt to delete a service
|
||||
condition: (kactivity and kdelete and service and response_successful)
|
||||
output: K8s Service Deleted (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s ConfigMap Created
|
||||
desc: Detect any attempt to create a configmap
|
||||
condition: (kactivity and kcreate and configmap and response_successful)
|
||||
output: K8s ConfigMap Created (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s ConfigMap Deleted
|
||||
desc: Detect any attempt to delete a configmap
|
||||
condition: (kactivity and kdelete and configmap and response_successful)
|
||||
output: K8s ConfigMap Deleted (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Namespace Created
|
||||
desc: Detect any attempt to create a namespace
|
||||
condition: (kactivity and kcreate and namespace and response_successful)
|
||||
output: K8s Namespace Created (user=%ka.user.name namespace=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Namespace Deleted
|
||||
desc: Detect any attempt to delete a namespace
|
||||
condition: (kactivity and non_system_user and kdelete and namespace and response_successful)
|
||||
output: K8s Namespace Deleted (user=%ka.user.name namespace=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Serviceaccount Created
|
||||
desc: Detect any attempt to create a service account
|
||||
condition: (kactivity and kcreate and serviceaccount and response_successful)
|
||||
output: K8s Serviceaccount Created (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Serviceaccount Deleted
|
||||
desc: Detect any attempt to delete a service account
|
||||
condition: (kactivity and kdelete and serviceaccount and response_successful)
|
||||
output: K8s Serviceaccount Deleted (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Role/Clusterrole Created
|
||||
desc: Detect any attempt to create a cluster role/role
|
||||
condition: (kactivity and kcreate and (clusterrole or role) and response_successful)
|
||||
output: K8s Cluster Role Created (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Role/Clusterrole Deleted
|
||||
desc: Detect any attempt to delete a cluster role/role
|
||||
condition: (kactivity and kdelete and (clusterrole or role) and response_successful)
|
||||
output: K8s Cluster Role Deleted (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Role/Clusterrolebinding Created
|
||||
desc: Detect any attempt to create a clusterrolebinding
|
||||
condition: (kactivity and kcreate and clusterrolebinding and response_successful)
|
||||
output: K8s Cluster Role Binding Created (user=%ka.user.name binding=%ka.target.name resource=%ka.target.resource subjects=%ka.req.binding.subjects role=%ka.req.binding.role resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Role/Clusterrolebinding Deleted
|
||||
desc: Detect any attempt to delete a clusterrolebinding
|
||||
condition: (kactivity and kdelete and clusterrolebinding and response_successful)
|
||||
output: K8s Cluster Role Binding Deleted (user=%ka.user.name binding=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Secret Created
|
||||
desc: Detect any attempt to create a secret. Service account tokens are excluded.
|
||||
condition: (kactivity and kcreate and secret and ka.target.namespace!=kube-system and non_system_user and response_successful)
|
||||
output: K8s Secret Created (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Secret Deleted
|
||||
desc: Detect any attempt to delete a secret. Service account tokens are excluded.
|
||||
condition: (kactivity and kdelete and secret and ka.target.namespace!=kube-system and non_system_user and response_successful)
|
||||
output: K8s Secret Deleted (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: INFO
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Secret Get Successfully
|
||||
desc: >
|
||||
Detect any attempt to get a secret. Service account tokens are excluded.
|
||||
condition: >
|
||||
secret and kget
|
||||
and kactivity
|
||||
and response_successful
|
||||
output: K8s Secret Get Successfully (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: ERROR
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: K8s Secret Get Unsuccessfully Tried
|
||||
desc: >
|
||||
Detect an unsuccessful attempt to get the secret. Service account tokens are excluded.
|
||||
condition: >
|
||||
secret and kget
|
||||
and kactivity
|
||||
and not response_successful
|
||||
output: K8s Secret Get Unsuccessfully Tried (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
# This rule generally matches all events, and as a result is disabled
|
||||
# by default. If you wish to enable these events, modify the
|
||||
# following macro.
|
||||
# condition: (jevt.rawtime exists)
|
||||
- macro: consider_all_events
|
||||
condition: (k8s_audit_never_true)
|
||||
|
||||
- macro: kall
|
||||
condition: (kevt and consider_all_events)
|
||||
|
||||
- rule: All K8s Audit Events
|
||||
desc: Match all K8s Audit Events
|
||||
condition: kall
|
||||
output: K8s Audit Event received (user=%ka.user.name verb=%ka.verb uri=%ka.uri obj=%jevt.obj)
|
||||
priority: DEBUG
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
|
||||
# This macro disables following rule, change to k8s_audit_never_true to enable it
|
||||
- macro: allowed_full_admin_users
|
||||
condition: (k8s_audit_always_true)
|
||||
|
||||
# This list includes some of the default user names for an administrator in several K8s installations
|
||||
- list: full_admin_k8s_users
|
||||
items: ["admin", "kubernetes-admin", "kubernetes-admin@kubernetes", "kubernetes-admin@cluster.local", "minikube-user"]
|
||||
|
||||
# This rules detect an operation triggered by an user name that is
|
||||
# included in the list of those that are default administrators upon
|
||||
# cluster creation. This may signify a permission setting too broader.
|
||||
# As we can't check for role of the user on a general ka.* event, this
|
||||
# may or may not be an administrator. Customize the full_admin_k8s_users
|
||||
# list to your needs, and activate at your discretion.
|
||||
|
||||
# # How to test:
|
||||
# # Execute any kubectl command connected using default cluster user, as:
|
||||
# kubectl create namespace rule-test
|
||||
|
||||
- rule: Full K8s Administrative Access
|
||||
desc: Detect any k8s operation by a user name that may be an administrator with full access.
|
||||
condition: >
|
||||
kevt
|
||||
and non_system_user
|
||||
and ka.user.name in (full_admin_k8s_users)
|
||||
and not allowed_full_admin_users
|
||||
output: K8s Operation performed by full admin user (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- macro: ingress
|
||||
condition: ka.target.resource=ingresses
|
||||
|
||||
- macro: ingress_tls
|
||||
condition: (jevt.value[/requestObject/spec/tls] exists)
|
||||
|
||||
# # How to test:
|
||||
# # Create an ingress.yaml file with content:
|
||||
# apiVersion: networking.k8s.io/v1beta1
|
||||
# kind: Ingress
|
||||
# metadata:
|
||||
# name: test-ingress
|
||||
# annotations:
|
||||
# nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
# spec:
|
||||
# rules:
|
||||
# - http:
|
||||
# paths:
|
||||
# - path: /testpath
|
||||
# backend:
|
||||
# serviceName: test
|
||||
# servicePort: 80
|
||||
# # Execute: kubectl apply -f ingress.yaml
|
||||
|
||||
- rule: Ingress Object without TLS Certificate Created
|
||||
desc: Detect any attempt to create an ingress without TLS certification.
|
||||
condition: >
|
||||
(kactivity and kcreate and ingress and response_successful and not ingress_tls)
|
||||
output: >
|
||||
K8s Ingress Without TLS Cert Created (user=%ka.user.name ingress=%ka.target.name
|
||||
namespace=%ka.target.namespace resource=%ka.target.resource)
|
||||
source: k8s_audit
|
||||
priority: WARNING
|
||||
tags: [k8s, network]
|
||||
|
||||
- macro: node
|
||||
condition: ka.target.resource=nodes
|
||||
|
||||
- macro: allow_all_k8s_nodes
|
||||
condition: (k8s_audit_always_true)
|
||||
|
||||
- list: allowed_k8s_nodes
|
||||
items: []
|
||||
|
||||
# # How to test:
|
||||
# # Create a Falco monitored cluster with Kops
|
||||
# # Increase the number of minimum nodes with:
|
||||
# kops edit ig nodes
|
||||
# kops apply --yes
|
||||
|
||||
- rule: Untrusted Node Successfully Joined the Cluster
|
||||
desc: >
|
||||
Detect a node successfully joined the cluster outside of the list of allowed nodes.
|
||||
condition: >
|
||||
kevt and node
|
||||
and kcreate
|
||||
and response_successful
|
||||
and not allow_all_k8s_nodes
|
||||
and not ka.target.name in (allowed_k8s_nodes)
|
||||
output: Node not in allowed list successfully joined the cluster (user=%ka.user.name node=%ka.target.name resource=%ka.target.resource)
|
||||
priority: ERROR
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
||||
|
||||
- rule: Untrusted Node Unsuccessfully Tried to Join the Cluster
|
||||
desc: >
|
||||
Detect an unsuccessful attempt to join the cluster for a node not in the list of allowed nodes.
|
||||
condition: >
|
||||
kevt and node
|
||||
and kcreate
|
||||
and not response_successful
|
||||
and not allow_all_k8s_nodes
|
||||
and not ka.target.name in (allowed_k8s_nodes)
|
||||
output: Node not in allowed list tried unsuccessfully to join the cluster (user=%ka.user.name node=%ka.target.name reason=%ka.response.reason resource=%ka.target.resource)
|
||||
priority: WARNING
|
||||
source: k8s_audit
|
||||
tags: [k8s]
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue