Compare commits

..

1 Commits

Author SHA1 Message Date
Renovate Bot c4ab772c0d chore(deps): update helm release manticoresearch to v6 2023-11-22 18:39:38 +00:00
412 changed files with 58435 additions and 64410 deletions

View File

@ -1,26 +1,25 @@
# Parse version from latest git semver tag
GIT_TAG ?= $(shell git describe --tags --match v*.*.* 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null | sed -e 's/[^a-zA-Z0-9]/-/g')
TAG ::= $(GIT_TAG)
TAG := $(GIT_TAG)
# append branch name to tag if NOT main nor master
ifeq (,$(filter main master, $(GIT_BRANCH)))
# If branch is substring of tag, omit branch name
ifeq ($(findstring $(GIT_BRANCH), $(GIT_TAG)),)
# only append branch name if not equal tag
ifneq ($(GIT_TAG), $(GIT_BRANCH))
# Sanitize GIT_BRANCH to allowed Docker tag character set
TAG = $(GIT_TAG)-$(shell echo $$GIT_BRANCH | sed -e 's/[^a-zA-Z0-9]/-/g')
TAG = $(GIT_TAG)-$(GIT_BRANCH)
endif
endif
endif
ARCH ::= amd64
ALL_ARCHS ::= amd64 arm64
ARCH := amd64
ALL_ARCHS := amd64 arm64
_ARCH = $(or $(filter $(ARCH),$(ALL_ARCHS)),$(error $$ARCH [$(ARCH)] must be exactly one of "$(ALL_ARCHS)"))
ifneq ($(TRIVY_REMOTE),)
TRIVY_OPTS ::= --server $(TRIVY_REMOTE)
TRIVY_OPTS := --server $(TRIVY_REMOTE)
endif
.SILENT: ; # no need for @
@ -46,7 +45,7 @@ test:: ## test built artificats
scan: ## Scan image using trivy
echo "Scanning $(IMAGE):$(TAG)-$(_ARCH) using Trivy $(TRIVY_REMOTE)"
trivy image $(TRIVY_OPTS) --quiet --no-progress localhost/$(IMAGE):$(TAG)-$(_ARCH)
trivy image $(TRIVY_OPTS) localhost/$(IMAGE):$(TAG)-$(_ARCH)
# first tag and push all actual images
# create new manifest for each tag and add all available TAG-ARCH before pushing
@ -78,7 +77,7 @@ rm-image:
## some useful tasks during development
ci-pull-upstream: ## pull latest shared .ci subtree
git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash -m "Merge latest ci-tools-lib"
git stash && git subtree pull --prefix .ci ssh://git@git.zero-downtime.net/ZeroDownTime/ci-tools-lib.git master --squash && git stash pop
create-repo: ## create new AWS ECR public repository
aws ecr-public create-repository --repository-name $(IMAGE) --region $(REGION)

View File

@ -2,9 +2,6 @@
def call(Map config=[:]) {
pipeline {
options {
disableConcurrentBuilds()
}
agent {
node {
label 'podman-aws-trivy'
@ -13,8 +10,6 @@ def call(Map config=[:]) {
stages {
stage('Prepare') {
steps {
sh 'mkdir -p reports'
// we set pull tags as project adv. options
// pull tags
//withCredentials([gitUsernamePassword(credentialsId: 'gitea-jenkins-user')]) {
@ -40,13 +35,12 @@ def call(Map config=[:]) {
// Scan via trivy
stage('Scan') {
environment {
TRIVY_FORMAT = "template"
TRIVY_OUTPUT = "reports/trivy.html"
}
steps {
// we always scan and create the full json report
sh 'TRIVY_FORMAT=json TRIVY_OUTPUT="reports/trivy.json" make scan'
// render custom full html report
sh 'trivy convert -f template -t @/home/jenkins/html.tpl -o reports/trivy.html reports/trivy.json'
sh 'mkdir -p reports && make scan'
publishHTML target: [
allowMissing: true,
alwaysLinkToLastBuild: true,
@ -56,12 +50,13 @@ def call(Map config=[:]) {
reportName: 'TrivyScan',
reportTitles: 'TrivyScan'
]
sh 'echo "Trivy report at: $BUILD_URL/TrivyScan"'
// fail build if issues found above trivy threshold
// Scan again and fail on CRITICAL vulns, if not overridden
script {
if ( config.trivyFail ) {
sh "TRIVY_SEVERITY=${config.trivyFail} trivy convert --report summary --exit-code 1 reports/trivy.json"
if (config.trivyFail == 'NONE') {
echo 'trivyFail == NONE, review Trivy report manually. Proceeding ...'
} else {
sh "TRIVY_EXIT_CODE=1 TRIVY_SEVERITY=${config.trivyFail} make scan"
}
}
}

View File

@ -1,9 +1,9 @@
ARG ALPINE_VERSION=3.19
ARG ALPINE_VERSION=3.18
FROM docker.io/alpine:${ALPINE_VERSION}
ARG ALPINE_VERSION
ARG KUBE_VERSION=1.28.9
ARG KUBE_VERSION=1.27
RUN cd /etc/apk/keys && \
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \

View File

@ -18,7 +18,7 @@ KubeZero is a Kubernetes distribution providing an integrated container platform
# Version / Support Matrix
KubeZero releases track the same *minor* version of Kubernetes.
KubeZero releases track the same *minor* version of Kubernetes.
Any 1.26.X-Y release of Kubezero supports any Kubernetes cluster 1.26.X.
KubeZero is distributed as a collection of versioned Helm charts, allowing custom upgrade schedules and module versions as needed.
@ -28,15 +28,15 @@ KubeZero is distributed as a collection of versioned Helm charts, allowing custo
gantt
title KubeZero Support Timeline
dateFormat YYYY-MM-DD
section 1.25
beta :125b, 2023-03-01, 2023-03-31
release :after 125b, 2023-08-01
section 1.26
beta :126b, 2023-06-01, 2023-06-30
release :after 126b, 2023-11-01
section 1.27
beta :127b, 2023-09-01, 2023-09-30
release :after 127b, 2024-04-30
section 1.28
beta :128b, 2024-03-01, 2024-04-30
release :after 128b, 2024-08-31
section 1.29
beta :129b, 2024-07-01, 2024-08-30
release :after 129b, 2024-11-30
release :after 127b, 2024-02-01
```
[Upstream release policy](https://kubernetes.io/releases/)
@ -44,8 +44,8 @@ gantt
# Components
## OS
- all compute nodes are running on Alpine V3.19
- 1 or 2 GB encrypted root file system
- all compute nodes are running on Alpine V3.18
- 2 GB encrypted root file system
- no external dependencies at boot time, apart from container registries
- minimal attack surface
- extremely small memory footprint / overhead
@ -57,7 +57,7 @@ gantt
## Featured workloads
- rootless CI/CD build platform to build containers as part of a CI pipeline, using podman / fuse device plugin support
- containerized AI models via integrated out of the box support for Nvidia GPU workers as well as AWS Neuron
- containerized AI models via integrated out of the box support for Nvidia GPU workers as well as AWS Neuron
## Control plane
- all Kubernetes components compiled against Alpine OS using `buildmode=pie`
@ -85,12 +85,12 @@ gantt
- CSI Snapshot controller and Gemini snapshot groups and retention
## Ingress
- AWS Network Loadbalancer and Istio Ingress controllers
- AWS Network Loadbalancer and Istio Ingress controllers
- no additional costs per exposed service
- real client source IP available to workloads via HTTP header and access logs
- ACME SSL Certificate handling via cert-manager incl. renewal etc.
- support for TCP services
- optional rate limiting support
- optional rate limiting support
- optional full service mesh
## Metrics
@ -104,4 +104,4 @@ gantt
- flexible ElasticSearch setup, leveraging the ECK operator, for easy maintenance & minimal admin knowledge required, incl. automated backups to S3
- Kibana allowing easy search and dashboards for all logs, incl. pre configured index templates and index management
- [fluentd-concerter](https://git.zero-downtime.net/ZeroDownTime/container-park/src/branch/master/fluentd-concenter) service providing queuing during highload as well as additional parsing options
- lightweight fluent-bit agents on each node requiring minimal resources forwarding logs secure via TLS to fluentd-concenter
- lightweight fluent-bit agents on each node requiring minimal resources forwarding logs secure via TLS to fluentd-concenter

View File

@ -3,9 +3,6 @@
# Simulate well-known CRDs being available
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget"
#VERSION="latest"
VERSION="v1.28"
# Waits for max 300s and retries
function wait_for() {
local TRIES=0
@ -185,125 +182,3 @@ function _helm() {
return 0
}
function all_nodes_upgrade() {
CMD="$1"
echo "Deploy all node upgrade daemonSet(busybox)"
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kubezero-all-nodes-upgrade
namespace: kube-system
labels:
app: kubezero-upgrade
spec:
selector:
matchLabels:
name: kubezero-all-nodes-upgrade
template:
metadata:
labels:
name: kubezero-all-nodes-upgrade
spec:
hostNetwork: true
hostIPC: true
hostPID: true
tolerations:
- operator: Exists
effect: NoSchedule
initContainers:
- name: node-upgrade
image: busybox
command: ["/bin/sh"]
args: ["-x", "-c", "$CMD" ]
volumeMounts:
- name: host
mountPath: /host
- name: hostproc
mountPath: /hostproc
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
containers:
- name: node-upgrade-wait
image: busybox
command: ["sleep", "3600"]
volumes:
- name: host
hostPath:
path: /
type: Directory
- name: hostproc
hostPath:
path: /proc
type: Directory
EOF
kubectl rollout status daemonset -n kube-system kubezero-all-nodes-upgrade --timeout 300s
kubectl delete ds kubezero-all-nodes-upgrade -n kube-system
}
function control_plane_upgrade() {
TASKS="$1"
echo "Deploy cluster admin task: $TASKS"
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: kubezero-upgrade
namespace: kube-system
labels:
app: kubezero-upgrade
spec:
hostNetwork: true
hostIPC: true
hostPID: true
containers:
- name: kubezero-admin
image: public.ecr.aws/zero-downtime/kubezero-admin:${VERSION}
imagePullPolicy: Always
command: ["kubezero.sh"]
args: [$TASKS]
env:
- name: DEBUG
value: "$DEBUG"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: host
mountPath: /host
- name: workdir
mountPath: /tmp
securityContext:
capabilities:
add: ["SYS_CHROOT"]
volumes:
- name: host
hostPath:
path: /
type: Directory
- name: workdir
emptyDir: {}
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
restartPolicy: Never
EOF
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
while true; do
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
sleep 3
done
kubectl delete pod kubezero-upgrade -n kube-system
}

View File

@ -8,11 +8,20 @@ import yaml
def migrate(values):
"""Actual changes here"""
# argoCD moves to argo module
# Cleanup
values.pop("Domain", None)
values.pop("clusterName", None)
if "addons" in values:
if not values["addons"]:
values.pop("addons")
# migrate eck operator to new operator module
try:
if values["argocd"]["enabled"]:
values["argo"] = { "enabled": True, "argo-cd": values["argocd"] }
values.pop("argocd")
if values["logging"]["eck-operator"]["enabled"]:
if "operators" not in values:
values["operators"] = { "enabled": True }
values["operators"]["eck-operator"] = { "enabled": True }
values["logging"].pop("eck-operator", None)
except KeyError:
pass

View File

@ -2,13 +2,139 @@
set -eE
set -o pipefail
#VERSION="latest"
VERSION="v1.27"
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
# shellcheck disable=SC1091
. "$SCRIPT_DIR"/libhelm.sh
[ -n "$DEBUG" ] && set -x
. "$SCRIPT_DIR"/libhelm.sh
all_nodes_upgrade() {
CMD="$1"
echo "Deploy all node upgrade daemonSet(busybox)"
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kubezero-all-nodes-upgrade
namespace: kube-system
labels:
app: kubezero-upgrade
spec:
selector:
matchLabels:
name: kubezero-all-nodes-upgrade
template:
metadata:
labels:
name: kubezero-all-nodes-upgrade
spec:
hostNetwork: true
hostIPC: true
hostPID: true
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
initContainers:
- name: node-upgrade
image: busybox
command: ["/bin/sh"]
args: ["-x", "-c", "$CMD" ]
volumeMounts:
- name: host
mountPath: /host
- name: hostproc
mountPath: /hostproc
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
containers:
- name: node-upgrade-wait
image: busybox
command: ["sleep", "3600"]
volumes:
- name: host
hostPath:
path: /
type: Directory
- name: hostproc
hostPath:
path: /proc
type: Directory
EOF
kubectl rollout status daemonset -n kube-system kubezero-all-nodes-upgrade --timeout 300s
kubectl delete ds kubezero-all-nodes-upgrade -n kube-system
}
control_plane_upgrade() {
TASKS="$1"
echo "Deploy cluster admin task: $TASKS"
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: kubezero-upgrade
namespace: kube-system
labels:
app: kubezero-upgrade
spec:
hostNetwork: true
hostIPC: true
hostPID: true
containers:
- name: kubezero-admin
image: public.ecr.aws/zero-downtime/kubezero-admin:${VERSION}
imagePullPolicy: Always
command: ["kubezero.sh"]
args: [$TASKS]
env:
- name: DEBUG
value: "$DEBUG"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: host
mountPath: /host
- name: workdir
mountPath: /tmp
securityContext:
capabilities:
add: ["SYS_CHROOT"]
volumes:
- name: host
hostPath:
path: /
type: Directory
- name: workdir
emptyDir: {}
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
restartPolicy: Never
EOF
kubectl wait pod kubezero-upgrade -n kube-system --timeout 120s --for=condition=initialized 2>/dev/null
while true; do
kubectl logs kubezero-upgrade -n kube-system -f 2>/dev/null && break
sleep 3
done
kubectl delete pod kubezero-upgrade -n kube-system
}
echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning
@ -17,34 +143,38 @@ argo_used && disable_argo
#all_nodes_upgrade ""
control_plane_upgrade kubeadm_upgrade
#echo "Adjust kubezero values as needed:"
echo "Adjust kubezero values as needed:"
# shellcheck disable=SC2015
#argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-values -n kube-system
### v1.28
# - remove old argocd app, all resources will be taken over by argo.argo-cd
argo_used && rc=$? || rc=$?
if [ $rc -eq 0 ]; then
kubectl patch app argocd -n argocd \
--type json \
--patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]' && \
kubectl delete app argocd -n argocd || true
# We need to restore the network ready file as cilium decided to rename it
control_plane_upgrade apply_network
# remove legacy argocd app resources, but NOT kubezero-git-sync nor the appproject
kubectl api-resources --verbs=list --namespaced -o name | grep -ve 'app.*argoproj' | xargs -n 1 kubectl delete --ignore-not-found -l argocd.argoproj.io/instance=argocd -n argocd
fi
echo "Wait for all CNI agents to be running ..."
kubectl rollout status ds/cilium -n kube-system --timeout=60s
# upgrade modules
control_plane_upgrade "apply_network, apply_addons, apply_storage, apply_operators"
all_nodes_upgrade "cd /host/etc/cni/net.d && ln -s 05-cilium.conflist 05-cilium.conf || true"
# now the rest
control_plane_upgrade "apply_addons, apply_storage, apply_operators"
# Remove legacy eck-operator as part of logging if running
kubectl delete statefulset elastic-operator -n logging || true
echo "Checking that all pods in kube-system are running ..."
waitSystemPodsRunning
echo "Applying remaining KubeZero modules..."
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argo"
### Cleanup of some deprecated Istio Crds
for crd in clusterrbacconfigs.rbac.istio.io rbacconfigs.rbac.istio.io servicerolebindings.rbac.istio.io serviceroles.rbac.istio.io; do
kubectl delete crds $crd || true
done
control_plane_upgrade "apply_cert-manager, apply_istio, apply_istio-ingress, apply_istio-private-ingress, apply_logging, apply_metrics, apply_telemetry, apply_argocd"
# Trigger backup of upgraded cluster state
kubectl create job --from=cronjob/kubezero-backup kubezero-backup-$VERSION -n kube-system

View File

@ -1,19 +1,18 @@
apiVersion: v2
name: clamav
description: Chart for deploying a ClamAVd on Kubernetes as statfulSet
description: Chart for deploying a ClamavD on kubernetes as statfulSet
type: application
version: "0.3.1"
appVersion: "1.2.1"
version: "0.2.0"
appVersion: "1.1.0"
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- clamav
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
kubeVersion: ">= 1.26.0"
kubeVersion: ">= 1.25.0"

View File

@ -1,8 +1,8 @@
# clamav
![Version: 0.3.1](https://img.shields.io/badge/Version-0.3.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.2.1](https://img.shields.io/badge/AppVersion-1.2.1-informational?style=flat-square)
![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.104.0](https://img.shields.io/badge/AppVersion-0.104.0-informational?style=flat-square)
Chart for deploying a ClamAVd on Kubernetes as statfulSet
Chart for deploying a ClamavD on kubernetes as statfulSet
**Homepage:** <https://kubezero.com>
@ -10,31 +10,32 @@ Chart for deploying a ClamAVd on Kubernetes as statfulSet
| Name | Email | Url |
| ---- | ------ | --- |
| Stefan Reimer | <stefan@zero-downtime.net> | |
| Quarky9 | | |
## Requirements
Kubernetes: `>= 1.26.0`
Kubernetes: `>= 1.18.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.4 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| freshclam.mirrors | string | `"database.clamav.net"` | A list of clamav mirrors to be used by the clamav service |
| clamav.freshclam.mirrors | string | `"database.clamav.net"` | A list of clamav mirrors to be used by the clamav service |
| clamav.image | string | `"clamav/clamav"` | The clamav docker image |
| clamav.limits.connectionQueueLength | int | `100` | Maximum length the queue of pending connections may grow to |
| clamav.limits.fileSize | int | `20` | The largest file size scanable by clamav, in MB |
| clamav.limits.maxThreads | int | `4` | Maximum number of threads running at the same time. |
| clamav.limits.scanSize | int | `100` | The largest scan size permitted in clamav, in MB |
| clamav.limits.sendBufTimeout | int | `500` | |
| clamav.replicaCount | int | `1` | |
| clamav.resources | object | `{"requests":{"cpu":"300m","memory":"1300M"}}` | The resource requests and limits for the clamav service |
| clamav.version | string | `"unstable"` | The clamav docker image version - defaults to .Chart.appVersion |
| fullnameOverride | string | `""` | override the full name of the clamav chart |
| image | object | `{"repository":"clamav/clamav","type":"base"}` | The clamav docker image |
| limits.connectionQueueLength | int | `100` | Maximum length the queue of pending connections may grow to |
| limits.fileSize | int | `25` | The largest file size scanable by clamav, in MB |
| limits.maxThreads | int | `4` | Maximum number of threads running at the same time. |
| limits.scanSize | int | `100` | The largest scan size permitted in clamav, in MB |
| limits.sendBufTimeout | int | `500` | |
| nameOverride | string | `""` | override the name of the clamav chart |
| replicaCount | int | `1` | |
| resources | object | `{"requests":{"cpu":"300m","memory":"2000M"}}` | The resource requests and limits for the clamav service |
| service.port | int | `3310` | The port to be used by the clamav service |
----------------------------------------------

7
charts/clamav/deploy.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
release=clamav
namespace=clamav
helm template . --namespace $namespace --name-template $release > clamav.yaml
kubectl apply --namespace $namespace -f clamav.yaml

View File

@ -10,7 +10,7 @@ data:
LogTime yes
LogClean yes
LogSyslog no
LogVerbose yes
LogVerbose no
LogFileMaxSize 0
LogFile /dev/stdout
DatabaseDirectory /var/lib/clamav
@ -19,28 +19,28 @@ data:
User clamav
ExitOnOOM yes
Foreground yes
MaxScanSize {{.Values.limits.scanSize}}M
MaxFileSize {{.Values.limits.fileSize}}M
MaxScanSize {{.Values.clamav.limits.scanSize}}M
MaxFileSize {{.Values.clamav.limits.fileSize}}M
# Close the connection when the data size limit is exceeded.
# The value should match your MTA's limit for a maximum attachment size.
# Default: 25M
StreamMaxLength {{.Values.limits.scanSize}}M
StreamMaxLength {{.Values.clamav.limits.scanSize}}M
# Maximum length the queue of pending connections may grow to.
# Default: 200
MaxConnectionQueueLength {{.Values.limits.connectionQueueLength}}
MaxConnectionQueueLength {{.Values.clamav.limits.connectionQueueLength}}
# Maximum number of threads running at the same time.
# Default: 10
MaxThreads {{.Values.limits.maxThreads}}
MaxThreads {{.Values.clamav.limits.maxThreads}}
# This option specifies how long to wait (in milliseconds) if the send buffer
# is full.
# Keep this value low to prevent clamd hanging.
#
# Default: 500
SendBufTimeout {{.Values.limits.sendBufTimeout}}
SendBufTimeout {{.Values.clamav.limits.sendBufTimeout}}
freshclam.conf: |
LogTime yes
@ -49,4 +49,4 @@ data:
Checks 24
LogSyslog no
DatabaseOwner root
DatabaseMirror {{ .Values.freshclam.mirrors }}
DatabaseMirror {{ .Values.clamav.freshclam.mirrors }}

View File

@ -6,7 +6,7 @@ metadata:
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
replicas: {{ .Values.clamav.replicaCount }}
selector:
matchLabels:
{{- include "kubezero-lib.selectorLabels" . | nindent 6 }}
@ -20,7 +20,7 @@ spec:
spec:
containers:
- name: clamav
image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}_{{ .Values.image.type }}"
image: "{{ .Values.clamav.image }}:{{ default .Chart.AppVersion .Values.clamav.version }}_base"
ports:
- containerPort: 3310
name: clamav
@ -41,7 +41,7 @@ spec:
successThreshold: 1
timeoutSeconds: 3
resources:
{{- toYaml .Values.resources | nindent 10 }}
{{- toYaml .Values.clamav.resources | nindent 10 }}
volumeMounts:
- mountPath: /var/lib/clamav
name: signatures
@ -53,15 +53,15 @@ spec:
- name: config-volume
configMap:
name: {{ include "kubezero-lib.fullname" . }}
{{- with .Values.nodeSelector }}
{{- with .Values.clamav.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
{{- with .Values.clamav.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
{{- with .Values.clamav.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
@ -70,7 +70,7 @@ spec:
name: signatures
spec:
accessModes: [ "ReadWriteOnce" ]
{{- with .Values.storageClassName }}
{{- with .Values.clamav.storageClassName }}
storageClassName: {{ . }}
{{- end }}
resources:

View File

@ -1,9 +0,0 @@
#!/bin/bash
set -ex
. ../../scripts/lib-update.sh
update_helm
update_docs

View File

@ -1,41 +1,46 @@
# Default values for clamav.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# nameOverride -- override the name of the clamav chart
nameOverride: ""
# fullnameOverride -- override the full name of the clamav chart
fullnameOverride: ""
# image -- The clamav docker image
image:
repository: clamav/clamav
# version: "latest"
type: base
replicaCount: 1
freshclam:
# freshclam.mirrors -- A list of clamav mirrors to be used by the clamav service
mirrors: database.clamav.net
limits:
# limits.fileSize -- The largest file size scanable by clamav, in MB
fileSize: 25
# limits.scanSize -- The largest scan size permitted in clamav, in MB
scanSize: 100
# limits.connectionQueueLength -- Maximum length the queue of pending connections may grow to
connectionQueueLength: 100
# limits.maxThreads --Maximum number of threads running at the same time.
maxThreads: 4
# sendBufTimeout -- This option specifies how long to wait (in milliseconds) if the send buffer is full, keep low to avoid clamd hanging
sendBufTimeout: 500
service:
# service.port -- The port to be used by the clamav service
port: 3310
resources:
# resources -- The resource requests and limits for the clamav service
requests:
cpu: 300m
memory: 2000M
#limits:
# cpu: 2
# memory: 4000M
clamav:
# clamav.image -- The clamav docker image
image: clamav/clamav
# clamav.version -- The clamav docker image version - defaults to .Chart.appVersion
# version: "unstable"
replicaCount: 1
freshclam:
# clamav.freshclam.mirrors -- A list of clamav mirrors to be used by the clamav service
mirrors: database.clamav.net
limits:
# clamav.limits.fileSize -- The largest file size scanable by clamav, in MB
fileSize: 20
# clamav.limits.scanSize -- The largest scan size permitted in clamav, in MB
scanSize: 100
# clamav.limits.connectionQueueLength -- Maximum length the queue of pending connections may grow to
connectionQueueLength: 100
# clamav.limits.maxThreads --Maximum number of threads running at the same time.
maxThreads: 4
# clamav.sendBufTimeout -- This option specifies how long to wait (in milliseconds) if the send buffer is full, keep low to avoid clamd hanging
sendBufTimeout: 500
resources:
# clamav.resources -- The resource requests and limits for the clamav service
requests:
cpu: 300m
memory: 2000M
#limits:
# cpu: 2
# memory: 4000M

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubeadm
description: KubeZero Kubeadm cluster config
type: application
version: 1.28.9
version: 1.27.8
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -2,15 +2,14 @@ apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: {{ .Chart.Version }}
clusterName: {{ .Values.global.clusterName }}
featureGates:
EtcdLearnerMode: true # becomes beta in 1.29
#featureGates:
# NonGracefulFailover: true
controlPlaneEndpoint: {{ .Values.api.endpoint }}
networking:
podSubnet: 10.244.0.0/16
etcd:
local:
# imageTag: 3.5.12-0
# imageTag: 3.5.5-0
extraArgs:
### DNS discovery
#discovery-srv: {{ .Values.domain }}
@ -74,7 +73,6 @@ apiServer:
{{- end }}
{{- if .Values.api.awsIamAuth.enabled }}
authentication-token-webhook-config-file: /etc/kubernetes/apiserver/aws-iam-authenticator.yaml
authentication-token-webhook-cache-ttl: 3600s
{{- end }}
feature-gates: {{ include "kubeadm.featuregates" ( dict "return" "csv" ) | trimSuffix "," | quote }}
enable-admission-plugins: DenyServiceExternalIPs,NodeRestriction,EventRateLimit,ExtendedResourceToleration

View File

@ -2,6 +2,6 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# kube-proxy doesnt really support setting dynamic bind-address via config, replaced by cilium long-term anyways
metricsBindAddress: "0.0.0.0:10249"
# calico < 3.22.1 breaks starting with 1.23, see https://github.com/projectcalico/calico/issues/5011
# we go Cilium anyways
mode: "iptables"
logging:
format: json

View File

@ -6,7 +6,6 @@ cgroupDriver: cgroupfs
logging:
format: json
hairpinMode: hairpin-veth
containerRuntimeEndpoint: "unix:///var/run/crio/crio.sock"
{{- if .Values.systemd }}
resolvConf: /run/systemd/resolve/resolv.conf
{{- end }}
@ -33,5 +32,6 @@ kubeReserved:
#evictionHard:
# memory.available: "484Mi"
imageGCLowThresholdPercent: 70
# kernelMemcgNotification: true
serializeImagePulls: false
maxParallelImagePulls: 4
maxParallelImagePulls: 2

View File

@ -1,11 +1,6 @@
{{- /* Feature gates for all control plane components */ -}}
{{- /* Issues: MemoryQoS */ -}}
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
{{- /* v1.29: remove/beta SidecarContainers */ -}}
{{- /* v1.30: remove/beta KubeProxyDrainingTerminatingNodes */ -}}
{{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "SidecarContainers" "KubeProxyDrainingTerminatingNodes" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "MemoryQoS" }}
{{- if eq .return "csv" }}
{{- range $key := $gates }}
{{- $key }}=true,

View File

@ -3,6 +3,6 @@ spec:
- name: etcd
resources:
requests:
cpu: 50m
memory: 256Mi
cpu: 200m
memory: 192Mi
#ephemeral-storage: 1Gi

View File

@ -4,5 +4,5 @@ spec:
- name: kube-apiserver
resources:
requests:
cpu: 250m
memory: 1268Mi
cpu: 200m
memory: 1Gi

View File

@ -3,5 +3,5 @@ spec:
- name: kube-controller-manager
resources:
requests:
cpu: 50m
memory: 192Mi
cpu: 100m
memory: 128Mi

View File

@ -3,5 +3,5 @@ spec:
- name: kube-scheduler
resources:
requests:
cpu: 50m
memory: 96Mi
cpu: 100m
memory: 64Mi

View File

@ -117,7 +117,7 @@ spec:
containers:
- name: aws-iam-authenticator
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.14
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.11
args:
- server
- --backend-mode=CRD,MountedFile

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons
type: application
version: 0.8.7
appVersion: v1.28
version: 0.8.4
appVersion: v1.27
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -20,24 +20,24 @@ maintainers:
email: stefan@zero-downtime.net
dependencies:
- name: external-dns
version: 1.14.4
version: 1.13.1
repository: https://kubernetes-sigs.github.io/external-dns/
condition: external-dns.enabled
- name: cluster-autoscaler
version: 9.36.0
version: 9.29.5
repository: https://kubernetes.github.io/autoscaler
condition: cluster-autoscaler.enabled
- name: nvidia-device-plugin
version: 0.15.0
version: 0.14.2
# https://github.com/NVIDIA/k8s-device-plugin
repository: https://nvidia.github.io/k8s-device-plugin
condition: nvidia-device-plugin.enabled
- name: sealed-secrets
version: 2.15.3
version: 2.13.2
repository: https://bitnami-labs.github.io/sealed-secrets
condition: sealed-secrets.enabled
- name: aws-node-termination-handler
version: 0.23.0
version: 0.22.0
repository: "oci://public.ecr.aws/aws-ec2/helm"
condition: aws-node-termination-handler.enabled
- name: aws-eks-asg-rolling-update-handler

View File

@ -1,6 +1,6 @@
# kubezero-addons
![Version: 0.8.7](https://img.shields.io/badge/Version-0.8.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.28](https://img.shields.io/badge/AppVersion-v1.28-informational?style=flat-square)
![Version: 0.8.4](https://img.shields.io/badge/Version-0.8.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.27](https://img.shields.io/badge/AppVersion-v1.27-informational?style=flat-square)
KubeZero umbrella chart for various optional cluster addons
@ -18,12 +18,12 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.15.3 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.14.4 |
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.36.0 |
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.15.0 |
| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.13.2 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.13.1 |
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.29.5 |
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.14.2 |
| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 |
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.23.0 |
| oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.22.0 |
# MetalLB
@ -63,7 +63,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| aws-eks-asg-rolling-update-handler.environmentVars[8].name | string | `"AWS_STS_REGIONAL_ENDPOINTS"` | |
| aws-eks-asg-rolling-update-handler.environmentVars[8].value | string | `"regional"` | |
| aws-eks-asg-rolling-update-handler.image.repository | string | `"twinproduction/aws-eks-asg-rolling-update-handler"` | |
| aws-eks-asg-rolling-update-handler.image.tag | string | `"v1.8.3"` | |
| aws-eks-asg-rolling-update-handler.image.tag | string | `"v1.8.2"` | |
| aws-eks-asg-rolling-update-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| aws-eks-asg-rolling-update-handler.resources.limits.memory | string | `"128Mi"` | |
| aws-eks-asg-rolling-update-handler.resources.requests.cpu | string | `"10m"` | |
@ -73,6 +73,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| aws-eks-asg-rolling-update-handler.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
| aws-eks-asg-rolling-update-handler.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-eks-asg-rolling-update-handler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| aws-node-termination-handler.checkASGTagBeforeDraining | bool | `false` | |
| aws-node-termination-handler.deleteLocalData | bool | `true` | |
| aws-node-termination-handler.emitKubernetesEvents | bool | `true` | |
| aws-node-termination-handler.enableProbesServer | bool | `true` | |
@ -101,7 +102,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| aws-node-termination-handler.useProviderId | bool | `true` | |
| awsNeuron.enabled | bool | `false` | |
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
| awsNeuron.image.tag | string | `"2.19.16.0"` | |
| awsNeuron.image.tag | string | `"2.12.5.0"` | |
| cluster-autoscaler.autoDiscovery.clusterName | string | `""` | |
| cluster-autoscaler.awsRegion | string | `"us-west-2"` | |
| cluster-autoscaler.enabled | bool | `false` | |
@ -110,7 +111,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| cluster-autoscaler.extraArgs.scan-interval | string | `"30s"` | |
| cluster-autoscaler.extraArgs.skip-nodes-with-local-storage | bool | `false` | |
| cluster-autoscaler.image.repository | string | `"registry.k8s.io/autoscaling/cluster-autoscaler"` | |
| cluster-autoscaler.image.tag | string | `"v1.28.2"` | |
| cluster-autoscaler.image.tag | string | `"v1.27.3"` | |
| cluster-autoscaler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cluster-autoscaler.podDisruptionBudget | bool | `false` | |
| cluster-autoscaler.prometheusRule.enabled | bool | `false` | |

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 1.21.0
appVersion: 1.20.0
description: A Helm chart for the AWS Node Termination Handler.
home: https://github.com/aws/aws-node-termination-handler/
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
@ -21,4 +21,4 @@ name: aws-node-termination-handler
sources:
- https://github.com/aws/aws-node-termination-handler/
type: application
version: 0.23.0
version: 0.22.0

View File

@ -119,7 +119,7 @@ The configuration in this table applies to AWS Node Termination Handler in queue
| `checkASGTagBeforeDraining` | [DEPRECATED](Use `checkTagBeforeDraining` instead) If `true`, check that the instance is tagged with the `managedAsgTag` before draining the node. If `false`, disables calls ASG API. | `true` |
| `managedAsgTag` | [DEPRECATED](Use `managedTag` instead) The node tag to check if `checkASGTagBeforeDraining` is `true`.
| `useProviderId` | If `true`, fetch node name through Kubernetes node spec ProviderID instead of AWS event PrivateDnsHostname. | `false` |
| `topologySpreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) for pod scheduling. Useful with a highly available deployment to reduce the risk of running multiple replicas on the same Node | `[]` |
### IMDS Mode Configuration
The configuration in this table applies to AWS Node Termination Handler in IMDS mode.
@ -174,6 +174,6 @@ The configuration in this table applies to AWS Node Termination Handler testing
## Metrics Endpoint Considerations
AWS Node Termination Handler in IMDS mode runs as a DaemonSet with `useHostNetwork: true` by default. If the Prometheus server is enabled with `enablePrometheusServer: true` nothing else will be able to bind to the configured port (by default `prometheusServerPort: 9092`) in the root network namespace. Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint.
AWS Node Termination HAndler in IMDS mode runs as a DaemonSet with `useHostNetwork: true` by default. If the Prometheus server is enabled with `enablePrometheusServer: true` nothing else will be able to bind to the configured port (by default `prometheusServerPort: 9092`) in the root network namespace. Therefore, it will need to have a firewall/security group configured on the nodes to block access to the `/metrics` endpoint.
You can switch NTH in IMDS mode to run w/ `useHostNetwork: false`, but you will need to make sure that IMDSv1 is enabled or IMDSv2 IP hop count will need to be incremented to 2 (see the [IMDSv2 documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html).

View File

@ -220,8 +220,4 @@ spec:
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -52,8 +52,6 @@ affinity: {}
tolerations: []
topologySpreadConstraints: []
# Extra environment variables
extraEnv: []

View File

@ -24,22 +24,21 @@ spec:
volumeMounts:
- name: host
mountPath: /host
#readOnly: true
- name: workdir
mountPath: /tmp
env:
env:
- name: DEBUG
value: ""
- name: RESTIC_REPOSITORY
valueFrom:
secretKeyRef:
valueFrom:
secretKeyRef:
name: kubezero-backup-restic
key: repository
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
valueFrom:
secretKeyRef:
name: kubezero-backup-restic
key: password
key: password
{{- with .Values.clusterBackup.extraEnv }}
{{- toYaml . | nindent 12 }}
{{- end }}

View File

@ -54,7 +54,7 @@ aws-eks-asg-rolling-update-handler:
enabled: false
image:
repository: twinproduction/aws-eks-asg-rolling-update-handler
tag: v1.8.3
tag: v1.8.2
environmentVars:
- name: CLUSTER_NAME
@ -107,6 +107,7 @@ aws-node-termination-handler:
fullnameOverride: "aws-node-termination-handler"
checkASGTagBeforeDraining: false
# -- "zdt:kubezero:nth:${ClusterName}"
managedTag: "zdt:kubezero:nth:${ClusterName}"
@ -160,7 +161,7 @@ awsNeuron:
image:
name: public.ecr.aws/neuron/neuron-device-plugin
tag: 2.19.16.0
tag: 2.12.5.0
nvidia-device-plugin:
enabled: false
@ -200,7 +201,7 @@ cluster-autoscaler:
image:
repository: registry.k8s.io/autoscaling/cluster-autoscaler
tag: v1.28.2
tag: v1.27.3
autoDiscovery:
clusterName: ""
@ -235,7 +236,7 @@ cluster-autoscaler:
# On AWS enable Projected Service Accounts to assume IAM role
#extraEnv:
# AWS_ROLE_ARN: <IamArn>
# AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
# AWS_WEB_IDENTITY_TOKEN_FILE: "/var/run/secrets/sts.amazonaws.com/serviceaccount/token"
# AWS_STS_REGIONAL_ENDPOINTS: "regional"
#extraVolumes:

View File

@ -1,92 +0,0 @@
# kubezero-argo
![Version: 0.2.1](https://img.shields.io/badge/Version-0.2.1-informational?style=flat-square)
KubeZero Argo - Events, Workflow, CD
**Homepage:** <https://kubezero.com>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Stefan Reimer | <stefan@zero-downtime.net> | |
## Requirements
Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 6.7.10 |
| https://argoproj.github.io/argo-helm | argo-events | 2.4.4 |
| https://argoproj.github.io/argo-helm | argocd-apps | 2.0.0 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.9.6 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| argo-cd.applicationSet.enabled | bool | `false` | |
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
| argo-cd.configs.cm."timeout.reconciliation" | string | `"300s"` | |
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.27 - Release notes"` | |
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.27"` | |
| argo-cd.configs.cm.url | string | `"https://argocd.example.com"` | |
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
| argo-cd.configs.params."server.insecure" | bool | `true` | |
| argo-cd.configs.secret.createSecret | bool | `false` | |
| argo-cd.configs.ssh.extraHosts | string | `"git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ=="` | |
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar__logo__text-logo { height: 0em; }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
| argo-cd.controller.metrics.enabled | bool | `false` | |
| argo-cd.controller.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.controller.resources.limits.memory | string | `"2048Mi"` | |
| argo-cd.controller.resources.requests.cpu | string | `"100m"` | |
| argo-cd.controller.resources.requests.memory | string | `"512Mi"` | |
| argo-cd.dex.enabled | bool | `false` | |
| argo-cd.enabled | bool | `false` | |
| argo-cd.global.logging.format | string | `"json"` | |
| argo-cd.istio.enabled | bool | `false` | |
| argo-cd.istio.gateway | string | `"istio-ingress/ingressgateway"` | |
| argo-cd.istio.ipBlocks | list | `[]` | |
| argo-cd.notifications.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.server.metrics.enabled | bool | `false` | |
| argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
| argo-events.configs.jetstream.settings.maxFileStore | int | `-1` | Maximum size of the file storage (e.g. 20G) |
| argo-events.configs.jetstream.settings.maxMemoryStore | int | `-1` | Maximum size of the memory storage (e.g. 1G) |
| argo-events.configs.jetstream.streamConfig.duplicates | string | `"300s"` | Not documented at the moment |
| argo-events.configs.jetstream.streamConfig.maxAge | string | `"72h"` | Maximum age of existing messages, i.e. “72h”, “4h35m” |
| argo-events.configs.jetstream.streamConfig.maxBytes | string | `"1GB"` | |
| argo-events.configs.jetstream.streamConfig.maxMsgs | int | `1000000` | Maximum number of messages before expiring oldest message |
| argo-events.configs.jetstream.streamConfig.replicas | int | `1` | Number of replicas, defaults to 3 and requires minimal 3 |
| argo-events.configs.jetstream.versions[0].configReloaderImage | string | `"natsio/nats-server-config-reloader:0.14.1"` | |
| argo-events.configs.jetstream.versions[0].metricsExporterImage | string | `"natsio/prometheus-nats-exporter:0.14.0"` | |
| argo-events.configs.jetstream.versions[0].natsImage | string | `"nats:2.10.11-scratch"` | |
| argo-events.configs.jetstream.versions[0].startCommand | string | `"/nats-server"` | |
| argo-events.configs.jetstream.versions[0].version | string | `"2.10.11"` | |
| argo-events.enabled | bool | `false` | |
| argocd-apps.applications | object | `{}` | |
| argocd-apps.enabled | bool | `false` | |
| argocd-apps.projects | object | `{}` | |
| argocd-image-updater.authScripts.enabled | bool | `true` | |
| argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | |
| argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | |
| argocd-image-updater.config.argocd.plaintext | bool | `true` | |
| argocd-image-updater.enabled | bool | `false` | |
| argocd-image-updater.fullnameOverride | string | `"argocd-image-updater"` | |
| argocd-image-updater.metrics.enabled | bool | `false` | |
| argocd-image-updater.metrics.serviceMonitor.enabled | bool | `true` | |
| argocd-image-updater.sshConfig.config | string | `"Host *\n PubkeyAcceptedAlgorithms +ssh-rsa\n HostkeyAlgorithms +ssh-rsa\n"` | |
## Resources
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json

View File

@ -1,186 +0,0 @@
argo-events:
enabled: false
configs:
jetstream:
# Default JetStream settings, could be overridden by EventBus JetStream spec
# Ref: https://docs.nats.io/running-a-nats-service/configuration#jetstream
settings:
# -- Maximum size of the memory storage (e.g. 1G)
maxMemoryStore: -1
# -- Maximum size of the file storage (e.g. 20G)
maxFileStore: -1
streamConfig:
# -- Maximum number of messages before expiring oldest message
maxMsgs: 1000000
# -- Maximum age of existing messages, i.e. “72h”, “4h35m”
maxAge: 72h
# Total size of messages before expiring oldest message, 0 means unlimited.
maxBytes: 1GB
# -- Number of replicas, defaults to 3 and requires minimal 3
replicas: 1
# -- Not documented at the moment
duplicates: 300s
# Supported versions of JetStream eventbus
# see: https://github.com/nats-io/k8s/blob/main/helm/charts/nats/values.yaml
# do NOT use -alpine tag as the entrypoint differs
versions:
- version: 2.10.11
natsImage: nats:2.10.11-scratch
metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server
argocd-apps:
enabled: false
projects: {}
applications: {}
argo-cd:
enabled: false
#configs:
# secret:
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
# argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG"
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
global:
logging:
format: json
# image:
# tag: v2.1.6
configs:
styles: |
.sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }
.sidebar__logo__text-logo { height: 0em; }
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
cm:
ui.bannercontent: "KubeZero v1.28 - Release notes"
ui.bannerurl: "https://kubezero.com/releases/v1.28"
ui.bannerpermanent: "true"
ui.bannerposition: "bottom"
# argo-cd.server.config.url -- ArgoCD URL being exposed via Istio
url: https://argocd.example.com
timeout.reconciliation: 300s
resource.customizations: |
cert-manager.io/Certificate:
# Lua script for customizing the health status assessment
health.lua: |
hs = {}
if obj.status ~= nil then
if obj.status.conditions ~= nil then
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Ready" and condition.status == "False" then
hs.status = "Degraded"
hs.message = condition.message
return hs
end
if condition.type == "Ready" and condition.status == "True" then
hs.status = "Healthy"
hs.message = condition.message
return hs
end
end
end
end
hs.status = "Progressing"
hs.message = "Waiting for certificate"
return hs
secret:
createSecret: false
ssh:
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ=="
params:
controller.status.processors: "10"
controller.operation.processors: "5"
server.insecure: true
server.enable.gzip: true
controller:
metrics:
enabled: false
serviceMonitor:
enabled: true
resources:
limits:
# cpu: 500m
memory: 2048Mi
requests:
cpu: 100m
memory: 512Mi
repoServer:
metrics:
enabled: false
serviceMonitor:
enabled: true
server:
# Rename former https port to grpc, works with istio + insecure
service:
servicePortHttpsName: grpc
metrics:
enabled: false
serviceMonitor:
enabled: true
# redis:
# We might want to try to keep redis close to the controller
# affinity:
dex:
enabled: false
applicationSet:
enabled: false
notifications:
enabled: false
# Support for Istio Ingress for ArgoCD
istio:
# istio.enabled -- Deploy Istio VirtualService to expose ArgoCD
enabled: false
# istio.gateway -- Name of the Istio gateway to add the VirtualService to
gateway: istio-ingress/ingressgateway
ipBlocks: []
argocd-image-updater:
enabled: false
# Unify all ArgoCD pieces under the same argocd namespace
fullnameOverride: argocd-image-updater
config:
argocd:
plaintext: true
metrics:
enabled: false
serviceMonitor:
enabled: true
authScripts:
enabled: true
scripts:
ecr-login.sh: |
#!/bin/sh
aws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d
ecr-public-login.sh: |
#!/bin/sh
aws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d
sshConfig:
config: |
Host *
PubkeyAcceptedAlgorithms +ssh-rsa
HostkeyAlgorithms +ssh-rsa

View File

@ -1,14 +1,13 @@
apiVersion: v2
description: KubeZero Argo - Events, Workflow, CD
name: kubezero-argo
version: 0.2.2
description: KubeZero ArgoCD - config, branding, image-updater (optional)
name: kubezero-argocd
version: 0.13.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- argocd
- argo-events
- argo-workflow
- argocd-image-updater
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
@ -17,20 +16,14 @@ dependencies:
- name: kubezero-lib
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: argo-events
version: 2.4.4
repository: https://argoproj.github.io/argo-helm
condition: argo-events.enabled
- name: argo-cd
version: 6.9.2
version: 5.43.5
repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled
- name: argocd-apps
version: 2.0.0
version: 1.4.1
repository: https://argoproj.github.io/argo-helm
condition: argo-cd.enabled
- name: argocd-image-updater
version: 0.10.0
version: 0.9.1
repository: https://argoproj.github.io/argo-helm
condition: argocd-image-updater.enabled
kubeVersion: ">= 1.26.0"

View File

@ -0,0 +1,74 @@
# kubezero-argocd
![Version: 0.12.0](https://img.shields.io/badge/Version-0.12.0-informational?style=flat-square)
KubeZero ArgoCD - config, branding, image-updater (optional)
**Homepage:** <https://kubezero.com>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Stefan Reimer | <stefan@zero-downtime.net> | |
## Requirements
Kubernetes: `>= 1.25.0`
| Repository | Name | Version |
|------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 5.28.2 |
| https://argoproj.github.io/argo-helm | argocd-apps | 0.0.9 |
| https://argoproj.github.io/argo-helm | argocd-image-updater | 0.8.5 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| argo-cd.applicationSet.enabled | bool | `false` | |
| argo-cd.configs.cm."resource.customizations" | string | `"cert-manager.io/Certificate:\n # Lua script for customizing the health status assessment\n health.lua: |\n hs = {}\n if obj.status ~= nil then\n if obj.status.conditions ~= nil then\n for i, condition in ipairs(obj.status.conditions) do\n if condition.type == \"Ready\" and condition.status == \"False\" then\n hs.status = \"Degraded\"\n hs.message = condition.message\n return hs\n end\n if condition.type == \"Ready\" and condition.status == \"True\" then\n hs.status = \"Healthy\"\n hs.message = condition.message\n return hs\n end\n end\n end\n end\n hs.status = \"Progressing\"\n hs.message = \"Waiting for certificate\"\n return hs\n"` | |
| argo-cd.configs.cm."timeout.reconciliation" | int | `300` | |
| argo-cd.configs.cm."ui.bannercontent" | string | `"KubeZero v1.25 - Release notes"` | |
| argo-cd.configs.cm."ui.bannerpermanent" | string | `"true"` | |
| argo-cd.configs.cm."ui.bannerposition" | string | `"bottom"` | |
| argo-cd.configs.cm."ui.bannerurl" | string | `"https://kubezero.com/releases/v1.25"` | |
| argo-cd.configs.cm.url | string | `"argocd.example.com"` | |
| argo-cd.configs.knownHosts.data.ssh_known_hosts | string | `"bitbucket.org ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPIQmuzMBuKdWeF4+a2sjSSpBK0iqitSQ+5BM9KhpexuGt20JpTVM7u5BDZngncgrqDMbWdxMWWOGtZ9UgbqgZE=\nbitbucket.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIazEu89wgQZ4bqs3d63QSMzYVa0MuJ2e2gKTKqu+UUO\nbitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDQeJzhupRu0u0cdegZIa8e86EG2qOCsIsD1Xw0xSeiPDlCr7kq97NLmMbpKTX6Esc30NuoqEEHCuc7yWtwp8dI76EEEB1VqY9QJq6vk+aySyboD5QF61I/1WeTwu+deCbgKMGbUijeXhtfbxSxm6JwGrXrhBdofTsbKRUsrN1WoNgUa8uqN1Vx6WAJw1JHPhglEGGHea6QICwJOAr/6mrui/oB7pkaWKHj3z7d1IC4KWLtY47elvjbaTlkN04Kc/5LFEirorGYVbt15kAUlqGM65pk6ZBxtaO3+30LVlORZkxOh+LKL/BvbZ/iRNhItLqNyieoQj/uh/7Iv4uyH/cV/0b4WDSd3DptigWq84lJubb9t/DnZlrJazxyDCulTmKdOR7vs9gMTo+uoIrPSb8ScTtvw65+odKAlBj59dhnVp9zd7QUojOpXlL62Aw56U4oO+FALuevvMjiWeavKhJqlR7i5n9srYcrNV7ttmDw7kf/97P5zauIhxcjX+xHv4M=\ngithub.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=\ngithub.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl\ngithub.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==\ngitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=\ngitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf\ngitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9\ngit.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==\n"` | |
| argo-cd.configs.params."controller.operation.processors" | string | `"5"` | |
| argo-cd.configs.params."controller.status.processors" | string | `"10"` | |
| argo-cd.configs.params."server.enable.gzip" | bool | `true` | |
| argo-cd.configs.params."server.insecure" | bool | `true` | |
| argo-cd.configs.secret.createSecret | bool | `false` | |
| argo-cd.configs.styles | string | `".sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }\n.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }\n"` | |
| argo-cd.controller.metrics.enabled | bool | `false` | |
| argo-cd.controller.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.controller.resources.requests.cpu | string | `"100m"` | |
| argo-cd.controller.resources.requests.memory | string | `"256Mi"` | |
| argo-cd.dex.enabled | bool | `false` | |
| argo-cd.global.logging.format | string | `"json"` | |
| argo-cd.notifications.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.enabled | bool | `false` | |
| argo-cd.repoServer.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.server.metrics.enabled | bool | `false` | |
| argo-cd.server.metrics.serviceMonitor.enabled | bool | `true` | |
| argo-cd.server.service.servicePortHttpsName | string | `"grpc"` | |
| argocd-apps.applications | list | `[]` | |
| argocd-apps.projects | list | `[]` | |
| argocd-image-updater.authScripts.enabled | bool | `true` | |
| argocd-image-updater.authScripts.scripts."ecr-login.sh" | string | `"#!/bin/sh\naws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d\n"` | |
| argocd-image-updater.authScripts.scripts."ecr-public-login.sh" | string | `"#!/bin/sh\naws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d\n"` | |
| argocd-image-updater.config.argocd.plaintext | bool | `true` | |
| argocd-image-updater.enabled | bool | `false` | |
| argocd-image-updater.fullnameOverride | string | `"argocd-image-updater"` | |
| argocd-image-updater.metrics.enabled | bool | `false` | |
| argocd-image-updater.metrics.serviceMonitor.enabled | bool | `true` | |
| argocd-image-updater.sshConfig.config | string | `"Host *\n PubkeyAcceptedAlgorithms +ssh-rsa\n HostkeyAlgorithms +ssh-rsa\n"` | |
| istio.enabled | bool | `false` | Deploy Istio VirtualService to expose ArgoCD |
| istio.gateway | string | `"istio-ingress/ingressgateway"` | Name of the Istio gateway to add the VirtualService to |
| istio.ipBlocks | list | `[]` | |
## Resources
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json

View File

@ -18,4 +18,3 @@
## Resources
- https://argoproj.github.io/argo-cd/operator-manual/metrics/
- https://raw.githubusercontent.com/argoproj/argo-cd/master/examples/dashboard.json

View File

@ -1,5 +1,5 @@
{{- if index .Values "argo-cd" "istio" "enabled" }}
{{- if index .Values "argo-cd" "istio" "ipBlocks" }}
{{- if .Values.istio.enabled }}
{{- if .Values.istio.ipBlocks }}
apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy
metadata:
@ -16,7 +16,7 @@ spec:
- from:
- source:
notIpBlocks:
{{- toYaml (index .Values "argo-cd" "istio" "ipBlocks") | nindent 8 }}
{{- toYaml .Values.istio.ipBlocks | nindent 8 }}
to:
- operation:
hosts: [{{ index .Values "argo-cd" "configs" "cm" "url" | quote }}]

View File

@ -1,4 +1,4 @@
{{- if index .Values "argo-cd" "istio" "enabled" }}
{{- if .Values.istio.enabled }}
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
@ -8,9 +8,9 @@ metadata:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
gateways:
- {{ index .Values "argo-cd" "istio" "gateway" }}
- {{ .Values.istio.gateway }}
hosts:
- {{ get (urlParse (index .Values "argo-cd" "configs" "cm" "url")) "host" }}
- {{ index .Values "argo-cd" "configs" "cm" "url" }}
http:
- name: grpc
match:
@ -19,13 +19,13 @@ spec:
prefix: argocd-client
route:
- destination:
host: argo-argocd-server
host: argocd-server
port:
number: 443
- name: http
route:
- destination:
host: argo-argocd-server
host: argocd-server
port:
number: 80
{{- end }}

View File

@ -5,6 +5,4 @@
update_helm
# Create ZDT dashboard configmap
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/argo-cd/grafana-dashboards.yaml
update_docs
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml

View File

@ -0,0 +1,162 @@
# Support for Istio Ingress for ArgoCD
istio:
# istio.enabled -- Deploy Istio VirtualService to expose ArgoCD
enabled: false
# istio.gateway -- Name of the Istio gateway to add the VirtualService to
gateway: istio-ingress/ingressgateway
ipBlocks: []
argocd-apps:
projects: []
applications: []
argo-cd:
#configs:
# secret:
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
# argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG"
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
global:
logging:
format: json
# image:
# tag: v2.1.6
configs:
styles: |
.sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }
.sidebar__logo__text-logo { height: 0em; }
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
cm:
ui.bannercontent: "KubeZero v1.26 - Release notes"
ui.bannerurl: "https://kubezero.com/releases/v1.26"
ui.bannerpermanent: "true"
ui.bannerposition: "bottom"
# argo-cd.server.config.url -- ArgoCD hostname to be exposed via Istio
url: argocd.example.com
timeout.reconciliation: 300
resource.customizations: |
cert-manager.io/Certificate:
# Lua script for customizing the health status assessment
health.lua: |
hs = {}
if obj.status ~= nil then
if obj.status.conditions ~= nil then
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Ready" and condition.status == "False" then
hs.status = "Degraded"
hs.message = condition.message
return hs
end
if condition.type == "Ready" and condition.status == "True" then
hs.status = "Healthy"
hs.message = condition.message
return hs
end
end
end
end
hs.status = "Progressing"
hs.message = "Waiting for certificate"
return hs
secret:
createSecret: false
knownHosts:
data:
ssh_known_hosts: |
bitbucket.org ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPIQmuzMBuKdWeF4+a2sjSSpBK0iqitSQ+5BM9KhpexuGt20JpTVM7u5BDZngncgrqDMbWdxMWWOGtZ9UgbqgZE=
bitbucket.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIazEu89wgQZ4bqs3d63QSMzYVa0MuJ2e2gKTKqu+UUO
bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDQeJzhupRu0u0cdegZIa8e86EG2qOCsIsD1Xw0xSeiPDlCr7kq97NLmMbpKTX6Esc30NuoqEEHCuc7yWtwp8dI76EEEB1VqY9QJq6vk+aySyboD5QF61I/1WeTwu+deCbgKMGbUijeXhtfbxSxm6JwGrXrhBdofTsbKRUsrN1WoNgUa8uqN1Vx6WAJw1JHPhglEGGHea6QICwJOAr/6mrui/oB7pkaWKHj3z7d1IC4KWLtY47elvjbaTlkN04Kc/5LFEirorGYVbt15kAUlqGM65pk6ZBxtaO3+30LVlORZkxOh+LKL/BvbZ/iRNhItLqNyieoQj/uh/7Iv4uyH/cV/0b4WDSd3DptigWq84lJubb9t/DnZlrJazxyDCulTmKdOR7vs9gMTo+uoIrPSb8ScTtvw65+odKAlBj59dhnVp9zd7QUojOpXlL62Aw56U4oO+FALuevvMjiWeavKhJqlR7i5n9srYcrNV7ttmDw7kf/97P5zauIhxcjX+xHv4M=
github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=
gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf
gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9
git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ==
params:
controller.status.processors: "10"
controller.operation.processors: "5"
server.insecure: true
server.enable.gzip: true
controller:
metrics:
enabled: false
serviceMonitor:
enabled: true
resources:
# limits:
# cpu: 500m
# memory: 2048Mi
requests:
cpu: 100m
memory: 256Mi
repoServer:
metrics:
enabled: false
serviceMonitor:
enabled: true
server:
# Rename former https port to grpc, works with istio + insecure
service:
servicePortHttpsName: grpc
metrics:
enabled: false
serviceMonitor:
enabled: true
# redis:
# We might want to try to keep redis close to the controller
# affinity:
dex:
enabled: false
applicationSet:
enabled: false
notifications:
enabled: false
argocd-image-updater:
enabled: false
# Unify all ArgoCD pieces under the same argocd namespace
fullnameOverride: argocd-image-updater
config:
argocd:
plaintext: true
metrics:
enabled: false
serviceMonitor:
enabled: true
authScripts:
enabled: true
scripts:
ecr-login.sh: |
#!/bin/sh
aws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d
ecr-public-login.sh: |
#!/bin/sh
aws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d
sshConfig:
config: |
Host *
PubkeyAcceptedAlgorithms +ssh-rsa
HostkeyAlgorithms +ssh-rsa

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-auth
description: KubeZero umbrella chart for all things Authentication and Identity management
type: application
version: 0.4.6
version: 0.4.3
appVersion: 22.0.5
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -17,7 +17,7 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: keycloak
version: 18.7.1
version: 17.3.0
repository: "oci://registry-1.docker.io/bitnamicharts"
condition: keycloak.enabled
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-auth
![Version: 0.4.5](https://img.shields.io/badge/Version-0.4.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 22.0.5](https://img.shields.io/badge/AppVersion-22.0.5-informational?style=flat-square)
![Version: 0.4.3](https://img.shields.io/badge/Version-0.4.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 22.0.5](https://img.shields.io/badge/AppVersion-22.0.5-informational?style=flat-square)
KubeZero umbrella chart for all things Authentication and Identity management
@ -19,7 +19,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| oci://registry-1.docker.io/bitnamicharts | keycloak | 18.3.2 |
| oci://registry-1.docker.io/bitnamicharts | keycloak | 17.3.0 |
# Keycloak

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cert-manager
description: KubeZero Umbrella Chart for cert-manager
type: application
version: 0.9.7
version: 0.9.5
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -16,6 +16,6 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: cert-manager
version: v1.14.4
version: v1.12.3
repository: https://charts.jetstack.io
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-cert-manager
![Version: 0.9.7](https://img.shields.io/badge/Version-0.9.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.9.5](https://img.shields.io/badge/Version-0.9.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for cert-manager
@ -19,7 +19,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.jetstack.io | cert-manager | v1.14.4 |
| https://charts.jetstack.io | cert-manager | v1.12.3 |
## AWS - OIDC IAM roles
@ -37,7 +37,6 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
| cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cert-manager.enableCertificateOwnerRef | bool | `true` | |
| cert-manager.enabled | bool | `true` | |
| cert-manager.extraArgs[0] | string | `"--logging-format=json"` | |
| cert-manager.extraArgs[1] | string | `"--leader-elect=false"` | |

View File

@ -1,15 +1,6 @@
{
"version": 1,
"dependencies": [
{
"source": {
"git": {
"remote": "https://github.com/imusmanmalik/cert-manager-mixin.git",
"subdir": ""
}
},
"version": "main"
},
{
"source": {
"git": {
@ -18,6 +9,15 @@
}
},
"version": "main"
},
{
"source": {
"git": {
"remote": "https://gitlab.com/uneeq-oss/cert-manager-mixin.git",
"subdir": ""
}
},
"version": "master"
}
],
"legacyImports": true

View File

@ -18,8 +18,8 @@
"subdir": "contrib/mixin"
}
},
"version": "5a53a708d8ab9ef936ac5b8062ffc66c77a2c18f",
"sum": "xuUBd2vqF7asyVDe5CE08uPT/RxAdy8O75EjFJoMXXU="
"version": "e2e17c75fe1006ea44b6ad793fa7b23f5e3546f4",
"sum": "GdePvMDfLQcVhwzk/Ephi/jC27ywGObLB5t0eC0lXd4="
},
{
"source": {
@ -51,36 +51,6 @@
"version": "a1d61cce1da59c71409b99b5c7568511fec661ea",
"sum": "gCtR9s/4D5fxU9aKXg0Bru+/njZhA0YjLjPiASc61FM="
},
{
"source": {
"git": {
"remote": "https://github.com/grafana/grafonnet.git",
"subdir": "gen/grafonnet-latest"
}
},
"version": "6ac1593ca787638da223380ff4a3fd0f96e953e1",
"sum": "GxEO83uxgsDclLp/fmlUJZDbSGpeUZY6Ap3G2cgdL1g="
},
{
"source": {
"git": {
"remote": "https://github.com/grafana/grafonnet.git",
"subdir": "gen/grafonnet-v10.0.0"
}
},
"version": "6ac1593ca787638da223380ff4a3fd0f96e953e1",
"sum": "W7sLuAvMSJPkC7Oo31t45Nz/cUdJV7jzNSJTd3F1daM="
},
{
"source": {
"git": {
"remote": "https://github.com/grafana/grafonnet.git",
"subdir": "gen/grafonnet-v10.4.0"
}
},
"version": "6ac1593ca787638da223380ff4a3fd0f96e953e1",
"sum": "ZSmDT7i/qU9P8ggmuPuJT+jonq1ZEsBRCXycW/H5L/A="
},
{
"source": {
"git": {
@ -88,38 +58,8 @@
"subdir": "grafana-builder"
}
},
"version": "7561fd330312538d22b00e0c7caecb4ba66321ea",
"sum": "+z5VY+bPBNqXcmNAV8xbJcbsRA+pro1R3IM7aIY8OlU="
},
{
"source": {
"git": {
"remote": "https://github.com/imusmanmalik/cert-manager-mixin.git",
"subdir": ""
}
},
"version": "72a094ff162bbd93921803994241d73900592c9a",
"sum": "h+YvBTXL5A02165i3yt3SxSAbFftChtXYJ0nYFnOAqo="
},
{
"source": {
"git": {
"remote": "https://github.com/jsonnet-libs/docsonnet.git",
"subdir": "doc-util"
}
},
"version": "6ac6c69685b8c29c54515448eaca583da2d88150",
"sum": "BrAL/k23jq+xy9oA7TWIhUx07dsA/QLm3g7ktCwe//U="
},
{
"source": {
"git": {
"remote": "https://github.com/jsonnet-libs/xtd.git",
"subdir": ""
}
},
"version": "fc2e57a8839902ed4ba6cab5a99d642500f7102b",
"sum": "43waffw1QzvpY4rKcWoo3L7Vpee+DCYexwLDd5cPG0M="
"version": "62aec8403a5c38d5dc97ba596703753289b1c33b",
"sum": "xEFMv4+ObwP5L1Wu0XK5agWci4AJzNApys6iKAQxLlQ="
},
{
"source": {
@ -128,8 +68,8 @@
"subdir": ""
}
},
"version": "a1c276d7a46c4b06fa5d8b4a64441939d398efe5",
"sum": "b/mEai1MvVnZ22YvZlXEO4jWDZledrtJg8eOS1ZUj0M="
"version": "46fc905d5b2981642043088ac7902ea50db2903e",
"sum": "8FAie1MXww5Ip9F8hQWkU9Fio1Af+hO4weQuuexioIQ="
},
{
"source": {
@ -138,8 +78,8 @@
"subdir": "jsonnet/kube-state-metrics"
}
},
"version": "9ba1c3702142918e09e8eb5ca530e15198624259",
"sum": "msMZyUvcebzRILLzNlTIiSOwa1XgQKtP7jbZTkiqwM0="
"version": "570970378edf10655dd81e662658359eb10d9329",
"sum": "+dOzAK+fwsFf97uZpjcjTcEJEC1H8hh/j8f5uIQK/5g="
},
{
"source": {
@ -148,7 +88,7 @@
"subdir": "jsonnet/kube-state-metrics-mixin"
}
},
"version": "9ba1c3702142918e09e8eb5ca530e15198624259",
"version": "570970378edf10655dd81e662658359eb10d9329",
"sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c="
},
{
@ -158,8 +98,8 @@
"subdir": "jsonnet/kube-prometheus"
}
},
"version": "76f2e1ef95be0df752037baa040781c5219e1fb3",
"sum": "IgpAgyyBZ7VT2vr9kSYQP/lkZUNQnbqpGh2sYCtUKs0="
"version": "4b5b94347dd71b3649fef612ab3b8cf237ac48b9",
"sum": "8AeC579AWxP6VzLTxQ/ccIrwOY0G782ZceLlWmOL5/o="
},
{
"source": {
@ -168,8 +108,8 @@
"subdir": "jsonnet/mixin"
}
},
"version": "71d9433ba612f4b826ffa38520b23a7985b50db3",
"sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=",
"version": "8b947d4ff1329440a46903c16f05717b24170061",
"sum": "n3flMIzlADeyygb0uipZ4KPp2uNSjdtkrwgHjTC7Ca4=",
"name": "prometheus-operator-mixin"
},
{
@ -179,8 +119,8 @@
"subdir": "jsonnet/prometheus-operator"
}
},
"version": "71d9433ba612f4b826ffa38520b23a7985b50db3",
"sum": "S4LFa0h1AzANixqGMowtwVswVP+y6f+fXloxpO7hMes="
"version": "8b947d4ff1329440a46903c16f05717b24170061",
"sum": "LLGbS2uangsA5enNpZKxwdCAPZnO1Bj+W+o8Esk0QLw="
},
{
"source": {
@ -189,8 +129,8 @@
"subdir": "doc/alertmanager-mixin"
}
},
"version": "14cbe6301c732658d6fe877ec55ad5b738abcf06",
"sum": "IpF46ZXsm+0wJJAPtAre8+yxTNZA57mBqGpBP/r7/kw=",
"version": "6fe1a24df07eed6f6818abd500708040beee7d7b",
"sum": "1d7ZKYArJKacAWXLUz0bRC1uOkozee/PPw97/W5zGhc=",
"name": "alertmanager"
},
{
@ -200,8 +140,8 @@
"subdir": "docs/node-mixin"
}
},
"version": "3accd4cf8286e69d70516abdced6bf186274322a",
"sum": "vWhHvFqV7+fxrQddTeGVKi1e4EzB3VWtNyD8TjSmevY="
"version": "f2b274350a07bfd8afcad1a62ef561f8a303fcc2",
"sum": "By6n6U10hYDogUsyhsaKZehbhzxBZZobJloiKyKadgM="
},
{
"source": {
@ -210,8 +150,8 @@
"subdir": "documentation/prometheus-mixin"
}
},
"version": "773170f372e0a57949854b74231ee3e09185f728",
"sum": "u/Fpz2MPkezy71/q+c7mF0vc3hE9fWt2W/YbvF0LP/8=",
"version": "4d8e380269da5912265274469ff873142bbbabc3",
"sum": "8OngT76gVXOUROOOeP9yTe6E/dn+2D2J34Dn690QCG0=",
"name": "prometheus"
},
{
@ -221,9 +161,8 @@
"subdir": "config/crd/bases"
}
},
"version": "551856d42dff02ec38c5b0ea6a2d99c4cb127e82",
"sum": "bY/Pcrrbynguq8/HaI88cQ3B2hLv/xc+76QILY7IL+g=",
"name": "pyrra"
"version": "2b8c6d372d90942c3b53a9b225a82441be8c5b7b",
"sum": "L3lljFFoFB+nhXnyo8Yl1hKqe60nhHXY0IZCO3H2iVk="
},
{
"source": {
@ -232,9 +171,19 @@
"subdir": "mixin"
}
},
"version": "93c79b61825ec00889188e35a58635eee247bc36",
"sum": "HhSSbGGCNHCMy1ee5jElYDm0yS9Vesa7QB2/SHKdjsY=",
"version": "8fcd30ffcedf9e2728518dc2970d070d4c301302",
"sum": "WhheqsiX0maUXByZFsb9xhCEsGXK2955bPmPPf1x+Cs=",
"name": "thanos-mixin"
},
{
"source": {
"git": {
"remote": "https://gitlab.com/uneeq-oss/cert-manager-mixin.git",
"subdir": ""
}
},
"version": "eae22f642aaa5d422e4766f6811df2158fc05539",
"sum": "DOg3fzS0OWrjjRPVsKgxID/rk9AC3ESQ4gDELc2RNgM="
}
],
"legacyImports": false

View File

@ -2,7 +2,7 @@ local addMixin = (import 'kube-prometheus/lib/mixin.libsonnet');
local certManagerMixin = addMixin({
name: 'cert-manager',
mixin: (import 'github.com/imusmanmalik/cert-manager-mixin/mixin.libsonnet')
mixin: (import 'gitlab.com/uneeq-oss/cert-manager-mixin/mixin.libsonnet')
});
{ 'cert-manager-mixin-prometheusRule': certManagerMixin.prometheusRules }

View File

@ -17,8 +17,8 @@
"alert": "CertManagerAbsent",
"annotations": {
"description": "New certificates will not be able to be minted, and existing ones can't be renewed until cert-manager is back.",
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerabsent",
"summary": "Cert Manager has disappeared from Prometheus service discovery."
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent",
"summary": "Cert Manager has dissapeared from Prometheus service discovery."
},
"expr": "absent(up{job=\"cert-manager\"})",
"for": "10m",
@ -36,7 +36,7 @@
"annotations": {
"dashboard_url": "https://grafana.example.com/d/TvuRo2iMk/cert-manager",
"description": "The domain that this cert covers will be unavailable after {{ $value | humanizeDuration }}. Clients using endpoints that this cert protects will start to fail in {{ $value | humanizeDuration }}.",
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertexpirysoon",
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon",
"summary": "The cert `{{ $labels.name }}` is {{ $value | humanizeDuration }} from expiry, it should have renewed over a week ago."
},
"expr": "avg by (exported_namespace, namespace, name) (\n certmanager_certificate_expiration_timestamp_seconds - time()\n) < (21 * 24 * 3600) # 21 days in seconds\n",
@ -50,7 +50,7 @@
"annotations": {
"dashboard_url": "https://grafana.example.com/d/TvuRo2iMk/cert-manager",
"description": "This certificate has not been ready to serve traffic for at least 10m. If the cert is being renewed or there is another valid cert, the ingress controller _may_ be able to serve that instead.",
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertnotready",
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready",
"summary": "The cert `{{ $labels.name }}` is not ready to serve traffic."
},
"expr": "max by (name, exported_namespace, namespace, condition) (\n certmanager_certificate_ready_status{condition!=\"True\"} == 1\n)\n",
@ -64,7 +64,7 @@
"annotations": {
"dashboard_url": "https://grafana.example.com/d/TvuRo2iMk/cert-manager",
"description": "Depending on the rate limit, cert-manager may be unable to generate certificates for up to a week.",
"runbook_url": "https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerhittingratelimits",
"runbook_url": "https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits",
"summary": "Cert manager hitting LetsEncrypt rate limits."
},
"expr": "sum by (host) (\n rate(certmanager_http_acme_client_request_count{status=\"429\"}[5m])\n) > 0\n",

View File

@ -13,8 +13,8 @@ spec:
- alert: CertManagerAbsent
annotations:
description: New certificates will not be able to be minted, and existing ones can't be renewed until cert-manager is back.
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerabsent
summary: Cert Manager has disappeared from Prometheus service discovery.
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerabsent
summary: Cert Manager has dissapeared from Prometheus service discovery.
expr: absent(up{job="cert-manager"})
for: 10m
labels:
@ -25,7 +25,7 @@ spec:
annotations:
dashboard_url: https://grafana.example.com/d/TvuRo2iMk/cert-manager
description: The domain that this cert covers will be unavailable after {{`{{`}} $value | humanizeDuration {{`}}`}}. Clients using endpoints that this cert protects will start to fail in {{`{{`}} $value | humanizeDuration {{`}}`}}.
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertexpirysoon
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertexpirysoon
summary: The cert `{{`{{`}} $labels.name {{`}}`}}` is {{`{{`}} $value | humanizeDuration {{`}}`}} from expiry, it should have renewed over a week ago.
expr: "avg by (exported_namespace, namespace, name) (\n certmanager_certificate_expiration_timestamp_seconds - time()\n) < (21 * 24 * 3600) # 21 days in seconds\n"
for: 1h
@ -35,7 +35,7 @@ spec:
annotations:
dashboard_url: https://grafana.example.com/d/TvuRo2iMk/cert-manager
description: This certificate has not been ready to serve traffic for at least 10m. If the cert is being renewed or there is another valid cert, the ingress controller _may_ be able to serve that instead.
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagercertnotready
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagercertnotready
summary: The cert `{{`{{`}} $labels.name {{`}}`}}` is not ready to serve traffic.
expr: "max by (name, exported_namespace, namespace, condition) (\n certmanager_certificate_ready_status{condition!=\"True\"} == 1\n)\n"
for: 10m
@ -45,7 +45,7 @@ spec:
annotations:
dashboard_url: https://grafana.example.com/d/TvuRo2iMk/cert-manager
description: Depending on the rate limit, cert-manager may be unable to generate certificates for up to a week.
runbook_url: https://github.com/imusmanmalik/cert-manager-mixin/blob/main/RUNBOOK.md#certmanagerhittingratelimits
runbook_url: https://gitlab.com/uneeq-oss/cert-manager-mixin/-/blob/master/RUNBOOK.md#certmanagerhittingratelimits
summary: Cert manager hitting LetsEncrypt rate limits.
expr: "sum by (host) (\n rate(certmanager_http_acme_client_request_count{status=\"429\"}[5m])\n) > 0\n"
for: 5m

View File

@ -8,7 +8,7 @@ update_helm
update_jsonnet
# Install cert-mamanger mixin
jb install github.com/imusmanmalik/cert-manager-mixin@main
jb install gitlab.com/uneeq-oss/cert-manager-mixin@master
# Install rules
rm -rf rules && mkdir -p rules
@ -17,5 +17,3 @@ jsonnet -J vendor -m rules rules.jsonnet
# Fetch dashboards from Grafana.com and update ZDT CM
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
update_docs

View File

@ -23,9 +23,6 @@ cert-manager:
leaderElection:
namespace: "cert-manager"
# remove secrets if the cert is deleted
enableCertificateOwnerRef: true
extraArgs:
- "--logging-format=json"
- "--leader-elect=false"

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-ci
description: KubeZero umbrella chart for all things CI
type: application
version: 0.8.11
version: 0.8.3
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -18,11 +18,11 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: gitea
version: 10.1.4
version: 9.6.0
repository: https://dl.gitea.io/charts/
condition: gitea.enabled
- name: jenkins
version: 5.1.18
version: 4.8.3
repository: https://charts.jenkins.io
condition: jenkins.enabled
- name: trivy
@ -30,7 +30,7 @@ dependencies:
repository: https://aquasecurity.github.io/helm-charts/
condition: trivy.enabled
- name: renovate
version: 37.368.2
version: 37.64.3
repository: https://docs.renovatebot.com/helm-charts
condition: renovate.enabled
kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,6 @@
# kubezero-ci
![Version: 0.8.11](https://img.shields.io/badge/Version-0.8.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.8.3](https://img.shields.io/badge/Version-0.8.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI
@ -20,9 +20,9 @@ Kubernetes: `>= 1.25.0`
|------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.7.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.jenkins.io | jenkins | 5.1.18 |
| https://dl.gitea.io/charts/ | gitea | 10.1.4 |
| https://docs.renovatebot.com/helm-charts | renovate | 37.368.2 |
| https://charts.jenkins.io | jenkins | 4.8.3 |
| https://dl.gitea.io/charts/ | gitea | 9.6.0 |
| https://docs.renovatebot.com/helm-charts | renovate | 36.109.4 |
# Jenkins
- default build retention 10 builds, 32days
@ -48,30 +48,21 @@ Kubernetes: `>= 1.25.0`
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| gitea.checkDeprecation | bool | `false` | |
| gitea.enabled | bool | `false` | |
| gitea.extraVolumeMounts[0].mountPath | string | `"/data/gitea/public/assets/css"` | |
| gitea.extraVolumeMounts[0].name | string | `"gitea-themes"` | |
| gitea.extraVolumeMounts[0].readOnly | bool | `true` | |
| gitea.extraVolumes[0].configMap.name | string | `"gitea-kubezero-ci-themes"` | |
| gitea.extraVolumes[0].name | string | `"gitea-themes"` | |
| gitea.gitea.admin.existingSecret | string | `"gitea-admin-secret"` | |
| gitea.gitea.config.cache.ADAPTER | string | `"memory"` | |
| gitea.gitea.config.database.DB_TYPE | string | `"sqlite3"` | |
| gitea.gitea.config.log.LEVEL | string | `"warn"` | |
| gitea.gitea.config.queue.TYPE | string | `"level"` | |
| gitea.gitea.config.session.PROVIDER | string | `"memory"` | |
| gitea.gitea.config.ui.DEFAULT_THEME | string | `"github-dark"` | |
| gitea.gitea.config.ui.THEMES | string | `"gitea,github-dark"` | |
| gitea.gitea.demo | bool | `false` | |
| gitea.gitea.metrics.enabled | bool | `false` | |
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | |
| gitea.image.rootless | bool | `true` | |
| gitea.image.tag | string | `"1.21.11"` | |
| gitea.istio.enabled | bool | `false` | |
| gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| gitea.istio.url | string | `"git.example.com"` | |
| gitea.persistence.claimName | string | `"data-gitea-0"` | |
| gitea.persistence.create | bool | `false` | |
| gitea.persistence.enabled | bool | `true` | |
| gitea.persistence.mount | bool | `true` | |
| gitea.persistence.size | string | `"4Gi"` | |
| gitea.postgresql-ha.enabled | bool | `false` | |
| gitea.postgresql.enabled | bool | `false` | |
@ -84,31 +75,23 @@ Kubernetes: `>= 1.25.0`
| gitea.securityContext.capabilities.add[0] | string | `"SYS_CHROOT"` | |
| gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | |
| gitea.strategy.type | string | `"Recreate"` | |
| gitea.test.enabled | bool | `false` | |
| jenkins.agent.annotations."container.apparmor.security.beta.kubernetes.io/jnlp" | string | `"unconfined"` | |
| jenkins.agent.containerCap | int | `2` | |
| jenkins.agent.customJenkinsLabels[0] | string | `"podman-aws-trivy"` | |
| jenkins.agent.idleMinutes | int | `30` | |
| jenkins.agent.image.repository | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
| jenkins.agent.image.tag | string | `"v0.5.0"` | |
| jenkins.agent.image | string | `"public.ecr.aws/zero-downtime/jenkins-podman"` | |
| jenkins.agent.podName | string | `"podman-aws"` | |
| jenkins.agent.podRetention | string | `"Default"` | |
| jenkins.agent.resources.limits.cpu | string | `""` | |
| jenkins.agent.resources.limits.memory | string | `""` | |
| jenkins.agent.resources.requests.cpu | string | `""` | |
| jenkins.agent.resources.requests.memory | string | `""` | |
| jenkins.agent.serviceAccount | string | `"jenkins-podman-aws"` | |
| jenkins.agent.showRawYaml | bool | `false` | |
| jenkins.agent.tag | string | `"v0.4.5"` | |
| jenkins.agent.yamlMergeStrategy | string | `"merge"` | |
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n containers:\n - name: jnlp\n resources:\n requests:\n cpu: \"512m\"\n memory: \"1024Mi\"\n limits:\n cpu: \"4\"\n memory: \"6144Mi\"\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | |
| jenkins.controller.JCasC.configScripts.zdt-settings | string | `"jenkins:\n noUsageStatistics: true\n disabledAdministrativeMonitors:\n - \"jenkins.security.ResourceDomainRecommendation\"\nappearance:\n themeManager:\n disableUserThemes: true\n theme: \"dark\"\nunclassified:\n openTelemetry:\n configurationProperties: |-\n otel.exporter.otlp.protocol=grpc\n otel.instrumentation.jenkins.web.enabled=false\n ignoredSteps: \"dir,echo,isUnix,pwd,properties\"\n #endpoint: \"telemetry-jaeger-collector.telemetry:4317\"\n exportOtelConfigurationAsEnvironmentVariables: false\n #observabilityBackends:\n # - jaeger:\n # jaegerBaseUrl: \"https://jaeger.example.com\"\n # name: \"KubeZero Jaeger\"\n serviceName: \"Jenkins\"\n buildDiscarders:\n configuredBuildDiscarders:\n - \"jobBuildDiscarder\"\n - defaultBuildDiscarder:\n discarder:\n logRotator:\n artifactDaysToKeepStr: \"32\"\n artifactNumToKeepStr: \"10\"\n daysToKeepStr: \"100\"\n numToKeepStr: \"10\"\n"` | |
| jenkins.controller.containerEnv[0].name | string | `"OTEL_LOGS_EXPORTER"` | |
| jenkins.controller.containerEnv[0].value | string | `"none"` | |
| jenkins.controller.containerEnv[1].name | string | `"OTEL_METRICS_EXPORTER"` | |
| jenkins.controller.containerEnv[1].value | string | `"none"` | |
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n serviceAccountName: jenkins-podman-aws\n containers:\n - name: jnlp\n resources:\n requests:\n cpu: \"512m\"\n memory: \"1024Mi\"\n limits:\n cpu: \"4\"\n memory: \"6144Mi\"\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | |
| jenkins.controller.JCasC.configScripts.zdt-settings | string | `"jenkins:\n noUsageStatistics: true\n disabledAdministrativeMonitors:\n - \"jenkins.security.ResourceDomainRecommendation\"\nappearance:\n themeManager:\n disableUserThemes: true\n theme: \"dark\"\nunclassified:\n buildDiscarders:\n configuredBuildDiscarders:\n - \"jobBuildDiscarder\"\n - defaultBuildDiscarder:\n discarder:\n logRotator:\n artifactDaysToKeepStr: \"32\"\n artifactNumToKeepStr: \"10\"\n daysToKeepStr: \"100\"\n numToKeepStr: \"10\"\n"` | |
| jenkins.controller.disableRememberMe | bool | `true` | |
| jenkins.controller.enableRawHtmlMarkupFormatter | bool | `true` | |
| jenkins.controller.image.tag | string | `"alpine-jdk17"` | |
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
@ -117,8 +100,6 @@ Kubernetes: `>= 1.25.0`
| jenkins.controller.installPlugins[11] | string | `"build-discarder"` | |
| jenkins.controller.installPlugins[12] | string | `"dark-theme"` | |
| jenkins.controller.installPlugins[13] | string | `"matrix-auth"` | |
| jenkins.controller.installPlugins[14] | string | `"reverse-proxy-auth-plugin"` | |
| jenkins.controller.installPlugins[15] | string | `"opentelemetry"` | |
| jenkins.controller.installPlugins[1] | string | `"kubernetes-credentials-provider"` | |
| jenkins.controller.installPlugins[2] | string | `"workflow-aggregator"` | |
| jenkins.controller.installPlugins[3] | string | `"git"` | |
@ -134,6 +115,7 @@ Kubernetes: `>= 1.25.0`
| jenkins.controller.resources.limits.memory | string | `"4096Mi"` | |
| jenkins.controller.resources.requests.cpu | string | `"250m"` | |
| jenkins.controller.resources.requests.memory | string | `"1280Mi"` | |
| jenkins.controller.tag | string | `"alpine-jdk17"` | |
| jenkins.controller.testEnabled | bool | `false` | |
| jenkins.enabled | bool | `false` | |
| jenkins.istio.agent.enabled | bool | `false` | |
@ -157,7 +139,7 @@ Kubernetes: `>= 1.25.0`
| renovate.env.LOG_FORMAT | string | `"json"` | |
| renovate.securityContext.fsGroup | int | `1000` | |
| trivy.enabled | bool | `false` | |
| trivy.image.tag | string | `"0.50.1"` | |
| trivy.image.tag | string | `"0.45.1"` | |
| trivy.persistence.enabled | bool | `true` | |
| trivy.persistence.size | string | `"1Gi"` | |
| trivy.rbac.create | bool | `false` | |

View File

@ -1,10 +1,6 @@
#!/bin/bash
set -ex
. ../../scripts/lib-update.sh
#login_ecr_public
update_helm
helm dep update
# Create ZDT dashboard configmap
../kubezero-metrics/sync_grafana_dashboards.py dashboard-jenkins.yaml templates/jenkins/grafana-dashboard.yaml

View File

@ -1,9 +1,9 @@
gitea:
enabled: false
image:
tag: 1.21.11
rootless: true
#image:
#tag: 1.17.4
#rootless: true
repliaCount: 1
@ -13,7 +13,10 @@ gitea:
# Since V9 they default to RWX and deployment, we default to old existing RWO from statefulset
persistence:
claimName: data-gitea-0
enabled: true
mount: true
create: false
#claimName: <set per install>
size: 4Gi
securityContext:
@ -39,7 +42,7 @@ gitea:
extraVolumeMounts:
- name: gitea-themes
readOnly: true
mountPath: "/data/gitea/public/assets/css"
mountPath: "/data/gitea/public/assets/css"
checkDeprecation: false
test:
@ -69,8 +72,6 @@ gitea:
ui:
THEMES: "gitea,github-dark"
DEFAULT_THEME: "github-dark"
log:
LEVEL: warn
redis-cluster:
enabled: false
@ -89,9 +90,8 @@ jenkins:
enabled: false
controller:
image:
tag: alpine-jdk17
#tagLabel: alpine
tag: alpine-jdk17
#tagLabel: alpine
disableRememberMe: true
prometheus:
enabled: false
@ -100,13 +100,6 @@ jenkins:
javaOpts: "-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""
jenkinsOpts: "--sessionTimeout=300 --sessionEviction=10800"
# Until we setup the logging and metrics pipelines in OTEL
containerEnv:
- name: OTEL_LOGS_EXPORTER
value: "none"
- name: OTEL_METRICS_EXPORTER
value: "none"
resources:
requests:
cpu: "250m"
@ -134,18 +127,6 @@ jenkins:
disableUserThemes: true
theme: "dark"
unclassified:
openTelemetry:
configurationProperties: |-
otel.exporter.otlp.protocol=grpc
otel.instrumentation.jenkins.web.enabled=false
ignoredSteps: "dir,echo,isUnix,pwd,properties"
#endpoint: "telemetry-jaeger-collector.telemetry:4317"
exportOtelConfigurationAsEnvironmentVariables: false
#observabilityBackends:
# - jaeger:
# jaegerBaseUrl: "https://jaeger.example.com"
# name: "KubeZero Jaeger"
serviceName: "Jenkins"
buildDiscarders:
configuredBuildDiscarders:
- "jobBuildDiscarder"
@ -172,8 +153,6 @@ jenkins:
- build-discarder
- dark-theme
- matrix-auth
- reverse-proxy-auth-plugin
- opentelemetry
serviceAccountAgent:
create: true
@ -181,16 +160,12 @@ jenkins:
# Preconfigure agents to use zdt podman requires fuse/overlayfs
agent:
image:
repository: public.ecr.aws/zero-downtime/jenkins-podman
tag: v0.5.0
image: public.ecr.aws/zero-downtime/jenkins-podman
tag: v0.4.5
#alwaysPullImage: true
podRetention: "Default"
showRawYaml: false
podName: "podman-aws"
serviceAccount: jenkins-podman-aws
annotations:
container.apparmor.security.beta.kubernetes.io/jnlp: unconfined
customJenkinsLabels:
- podman-aws-trivy
idleMinutes: 30
@ -216,6 +191,7 @@ jenkins:
spec:
securityContext:
fsGroup: 1000
serviceAccountName: jenkins-podman-aws
containers:
- name: jnlp
resources:
@ -272,7 +248,7 @@ jenkins:
trivy:
enabled: false
image:
tag: 0.50.1
tag: 0.45.1
persistence:
enabled: true
size: 1Gi

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-falco
description: Falco Container Security and Audit components
type: application
version: 0.1.2
version: 0.1.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -16,7 +16,7 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: falco
version: 4.2.5
version: 3.8.4
repository: https://falcosecurity.github.io/charts
condition: k8saudit.enabled
alias: k8saudit

View File

@ -1,64 +0,0 @@
# kubezero-falco
![Version: 0.1.2](https://img.shields.io/badge/Version-0.1.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
Falco Container Security and Audit components
**Homepage:** <https://kubezero.com>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Stefan Reimer | <stefan@zero-downtime.net> | |
## Requirements
Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://falcosecurity.github.io/charts | k8saudit(falco) | 4.2.5 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| k8saudit.collectors | object | `{"enabled":false}` | Disable the collectors, no syscall events to enrich with metadata. |
| k8saudit.controller | object | `{"deployment":{"replicas":1},"kind":"deployment"}` | Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurabale. |
| k8saudit.controller.deployment.replicas | int | `1` | Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing. For more info check the section on Plugins in the README.md file. |
| k8saudit.driver | object | `{"enabled":false}` | Disable the drivers since we want to deploy only the k8saudit plugin. |
| k8saudit.enabled | bool | `false` | |
| k8saudit.falco.buffered_outputs | bool | `true` | |
| k8saudit.falco.json_output | bool | `true` | |
| k8saudit.falco.load_plugins[0] | string | `"k8saudit"` | |
| k8saudit.falco.load_plugins[1] | string | `"json"` | |
| k8saudit.falco.log_syslog | bool | `false` | |
| k8saudit.falco.plugins[0].init_config.maxEventSize | int | `1048576` | |
| k8saudit.falco.plugins[0].library_path | string | `"libk8saudit.so"` | |
| k8saudit.falco.plugins[0].name | string | `"k8saudit"` | |
| k8saudit.falco.plugins[0].open_params | string | `"http://:9765/k8s-audit"` | |
| k8saudit.falco.plugins[1].init_config | string | `""` | |
| k8saudit.falco.plugins[1].library_path | string | `"libjson.so"` | |
| k8saudit.falco.plugins[1].name | string | `"json"` | |
| k8saudit.falco.rules_file[0] | string | `"/etc/falco/rules.d"` | |
| k8saudit.falco.syslog_output.enabled | bool | `false` | |
| k8saudit.falcoctl.artifact.follow.enabled | bool | `false` | |
| k8saudit.falcoctl.artifact.install.enabled | bool | `false` | |
| k8saudit.fullnameOverride | string | `"falco-k8saudit"` | |
| k8saudit.mounts.volumeMounts[0].mountPath | string | `"/etc/falco/rules.d"` | |
| k8saudit.mounts.volumeMounts[0].name | string | `"rules-volume"` | |
| k8saudit.mounts.volumes[0].configMap.name | string | `"falco-k8saudit-rules"` | |
| k8saudit.mounts.volumes[0].name | string | `"rules-volume"` | |
| k8saudit.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| k8saudit.resources.limits.cpu | string | `"1000m"` | |
| k8saudit.resources.limits.memory | string | `"512Mi"` | |
| k8saudit.resources.requests.cpu | string | `"100m"` | |
| k8saudit.resources.requests.memory | string | `"256Mi"` | |
| k8saudit.services[0].name | string | `"webhook"` | |
| k8saudit.services[0].ports[0].port | int | `9765` | |
| k8saudit.services[0].ports[0].protocol | string | `"TCP"` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)

View File

@ -20,12 +20,10 @@
- required_plugin_versions:
- name: k8saudit
version: 0.7.0
version: 0.6.0
alternatives:
- name: k8saudit-eks
version: 0.4.0
- name: k8saudit-gke
version: 0.1.0
version: 0.2.0
- name: json
version: 0.7.0
@ -81,45 +79,7 @@
"eks:vpc-resource-controller",
"eks:addon-manager",
]
- list: k8s_audit_sensitive_mount_images
items: [
falcosecurity/falco, docker.io/falcosecurity/falco, public.ecr.aws/falcosecurity/falco,
docker.io/sysdig/sysdig, sysdig/sysdig,
gcr.io/google_containers/hyperkube,
gcr.io/google_containers/kube-proxy, docker.io/calico/node,
docker.io/rook/toolbox, docker.io/cloudnativelabs/kube-router, docker.io/consul,
docker.io/datadog/docker-dd-agent, docker.io/datadog/agent, docker.io/docker/ucp-agent, docker.io/gliderlabs/logspout,
docker.io/netdata/netdata, docker.io/google/cadvisor, docker.io/prom/node-exporter,
amazon/amazon-ecs-agent, prom/node-exporter, amazon/cloudwatch-agent
]
- list: k8s_audit_privileged_images
items: [
falcosecurity/falco, docker.io/falcosecurity/falco, public.ecr.aws/falcosecurity/falco,
docker.io/calico/node, calico/node,
docker.io/cloudnativelabs/kube-router,
docker.io/docker/ucp-agent,
docker.io/mesosphere/mesos-slave,
docker.io/rook/toolbox,
docker.io/sysdig/sysdig,
gcr.io/google_containers/kube-proxy,
gcr.io/google-containers/startup-script,
gcr.io/projectcalico-org/node,
gke.gcr.io/kube-proxy,
gke.gcr.io/gke-metadata-server,
gke.gcr.io/netd-amd64,
gke.gcr.io/watcher-daemonset,
gcr.io/google-containers/prometheus-to-sd,
registry.k8s.io/ip-masq-agent-amd64,
registry.k8s.io/kube-proxy,
registry.k8s.io/prometheus-to-sd,
quay.io/calico/node,
sysdig/sysdig,
registry.k8s.io/dns/k8s-dns-node-cache,
mcr.microsoft.com/oss/kubernetes/kube-proxy
]
-
- rule: Disallowed K8s User
desc: Detect any k8s operation by users outside of an allowed set of users.
condition: kevt and non_system_user and not ka.user.name in (allowed_k8s_users) and not ka.user.name in (eks_allowed_k8s_users)
@ -206,7 +166,7 @@
- rule: Create Privileged Pod
desc: >
Detect an attempt to start a pod with a privileged container
condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_privileged_images)
condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (falco_privileged_images)
output: Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
@ -220,7 +180,7 @@
desc: >
Detect an attempt to start a pod with a volume from a sensitive host directory (i.e. /proc).
Exceptions are made for known trusted images.
condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.pod.containers.image.repository in (k8s_audit_sensitive_mount_images)
condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.pod.containers.image.repository in (falco_sensitive_mount_images)
output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace resource=%ka.target.resource images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes])
priority: WARNING
source: k8s_audit
@ -228,7 +188,7 @@
# These container images are allowed to run with hostnetwork=true
# TODO: Remove k8s.gcr.io reference after 01/Dec/2023
- list: k8s_audit_hostnetwork_images
- list: falco_hostnetwork_images
items: [
gcr.io/google-containers/prometheus-to-sd,
gcr.io/projectcalico-org/typha,
@ -236,6 +196,8 @@
gke.gcr.io/gke-metadata-server,
gke.gcr.io/kube-proxy,
gke.gcr.io/netd-amd64,
k8s.gcr.io/ip-masq-agent-amd64,
k8s.gcr.io/prometheus-to-sd,
registry.k8s.io/ip-masq-agent-amd64,
registry.k8s.io/prometheus-to-sd
]
@ -243,29 +205,29 @@
# Corresponds to K8s CIS Benchmark 1.7.4
- rule: Create HostNetwork Pod
desc: Detect an attempt to start a pod using the host network.
condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostnetwork_images)
condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostnetwork_images)
output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
tags: [k8s]
- list: k8s_audit_hostpid_images
- list: falco_hostpid_images
items: []
- rule: Create HostPid Pod
desc: Detect an attempt to start a pod using the host pid namespace.
condition: kevt and pod and kcreate and ka.req.pod.host_pid intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostpid_images)
condition: kevt and pod and kcreate and ka.req.pod.host_pid intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostpid_images)
output: Pod started using host pid namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
tags: [k8s]
- list: k8s_audit_hostipc_images
- list: falco_hostipc_images
items: []
- rule: Create HostIPC Pod
desc: Detect an attempt to start a pod using the host ipc namespace.
condition: kevt and pod and kcreate and ka.req.pod.host_ipc intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostipc_images)
condition: kevt and pod and kcreate and ka.req.pod.host_ipc intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostipc_images)
output: Pod started using host ipc namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority: WARNING
source: k8s_audit
@ -336,18 +298,6 @@
source: k8s_audit
tags: [k8s]
- macro: user_known_portforward_activities
condition: (k8s_audit_never_true)
- rule: port-forward
desc: >
Detect any attempt to portforward
condition: ka.target.subresource in (portforward) and not user_known_portforward_activities
output: Portforward to pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace action=%ka.target.subresource )
priority: NOTICE
source: k8s_audit
tags: [k8s]
- macro: user_known_pod_debug_activities
condition: (k8s_audit_never_true)
@ -394,11 +344,19 @@
gke.gcr.io/addon-resizer,
gke.gcr.io/heapster,
gke.gcr.io/gke-metadata-server,
k8s.gcr.io/ip-masq-agent-amd64,
k8s.gcr.io/kube-apiserver,
registry.k8s.io/ip-masq-agent-amd64,
registry.k8s.io/kube-apiserver,
gke.gcr.io/kube-proxy,
gke.gcr.io/netd-amd64,
gke.gcr.io/watcher-daemonset,
k8s.gcr.io/addon-resizer,
k8s.gcr.io/prometheus-to-sd,
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64,
k8s.gcr.io/k8s-dns-kube-dns-amd64,
k8s.gcr.io/k8s-dns-sidecar-amd64,
k8s.gcr.io/metrics-server-amd64,
registry.k8s.io/addon-resizer,
registry.k8s.io/prometheus-to-sd,
registry.k8s.io/k8s-dns-dnsmasq-nanny-amd64,

View File

@ -15,9 +15,9 @@ k8saudit:
resources:
requests:
cpu: 100m
memory: 64Mi
memory: 256Mi
limits:
cpu: 1
cpu: 1000m
memory: 512Mi
nodeSelector:
@ -43,16 +43,10 @@ k8saudit:
falcoctl:
artifact:
install:
enabled: false
follow:
enabled: false
# Since 0.37 the plugins are not part of the image anymore
# but we provide our rules static via our CM
config:
artifact:
allowedTypes:
- plugin
install:
refs: [k8saudit:0.7.0,json:0.7.2]
services:
- name: webhook

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio-gateway
description: KubeZero Umbrella Chart for Istio gateways
type: application
version: 0.21.2
version: 0.19.4
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -17,6 +17,6 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: gateway
version: 1.21.2
version: 1.19.4
repository: https://istio-release.storage.googleapis.com/charts
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-istio-gateway
![Version: 0.21.2](https://img.shields.io/badge/Version-0.21.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.19.4](https://img.shields.io/badge/Version-0.19.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Istio gateways
@ -21,7 +21,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://istio-release.storage.googleapis.com/charts | gateway | 1.21.2 |
| https://istio-release.storage.googleapis.com/charts | gateway | 1.19.4 |
## Values
@ -41,8 +41,6 @@ Kubernetes: `>= 1.26.0`
| gateway.service.externalTrafficPolicy | string | `"Local"` | |
| gateway.service.type | string | `"NodePort"` | |
| gateway.terminationGracePeriodSeconds | int | `120` | |
| hardening.rejectUnderscoresHeaders | bool | `true` | |
| hardening.unescapeSlashes | bool | `true` | |
| proxyProtocol | bool | `true` | |
| telemetry.enabled | bool | `false` | |

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 1.21.2
appVersion: 1.19.4
description: Helm chart for deploying Istio gateways
icon: https://istio.io/latest/favicons/android-192x192.png
keywords:
@ -9,4 +9,4 @@ name: gateway
sources:
- https://github.com/istio/istio
type: application
version: 1.21.2
version: 1.19.4

View File

@ -35,28 +35,6 @@ To view support configuration options and documentation, run:
helm show values istio/gateway
```
### Profiles
Istio Helm charts have a concept of a `profile`, which is a bundled collection of value presets.
These can be set with `--set profile=<profile>`.
For example, the `demo` profile offers a preset configuration to try out Istio in a test environment, with additional features enabled and lowered resource requirements.
For consistency, the same profiles are used across each chart, even if they do not impact a given chart.
Explicitly set values have highest priority, then profile settings, then chart defaults.
As an implementation detail of profiles, the default values for the chart are all nested under `defaults`.
When configuring the chart, you should not include this.
That is, `--set some.field=true` should be passed, not `--set defaults.some.field=true`.
### OpenShift
When deploying the gateway in an OpenShift cluster, use the `openshift` profile to override the default values, for example:
```console
helm install istio-ingressgateway istio/gateway -- set profile=openshift
```
### `image: auto` Information
The image used by the chart, `auto`, may be unintuitive.

View File

@ -1,25 +0,0 @@
# The ambient profile enables ambient mode. The Istiod, CNI, and ztunnel charts must be deployed
meshConfig:
defaultConfig:
proxyMetadata:
ISTIO_META_ENABLE_HBONE: "true"
variant: distroless
pilot:
variant: distroless
env:
# Setup more secure default that is off in 'default' only for backwards compatibility
VERIFY_CERTIFICATE_AT_CLIENT: "true"
ENABLE_AUTO_SNI: "true"
PILOT_ENABLE_HBONE: "true"
CA_TRUSTED_NODE_ACCOUNTS: "istio-system/ztunnel,kube-system/ztunnel"
PILOT_ENABLE_AMBIENT_CONTROLLERS: "true"
cni:
logLevel: info
privileged: true
ambient:
enabled: true
# Default excludes istio-system; its actually fine to redirect there since we opt-out istiod, ztunnel, and istio-cni
excludeNamespaces:
- kube-system

View File

@ -1,6 +0,0 @@
pilot:
env:
ENABLE_EXTERNAL_NAME_ALIAS: "false"
PERSIST_OLDEST_FIRST_HEURISTIC_FOR_VIRTUAL_SERVICE_HOST_MATCHING: "true"
VERIFY_CERTIFICATE_AT_CLIENT: "false"
ENABLE_AUTO_SNI: "false"

View File

@ -1,69 +0,0 @@
# The demo profile enables a variety of things to try out Istio in non-production environments.
# * Lower resource utilization.
# * Some additional features are enabled by default; especially ones used in some tasks in istio.io.
# * More ports enabled on the ingress, which is used in some tasks.
meshConfig:
accessLogFile: /dev/stdout
extensionProviders:
- name: otel
envoyOtelAls:
service: opentelemetry-collector.istio-system.svc.cluster.local
port: 4317
- name: skywalking
skywalking:
service: tracing.istio-system.svc.cluster.local
port: 11800
- name: otel-tracing
opentelemetry:
port: 4317
service: opentelemetry-collector.otel-collector.svc.cluster.local
global:
proxy:
resources:
requests:
cpu: 10m
memory: 40Mi
pilot:
autoscaleEnabled: false
traceSampling: 100
resources:
requests:
cpu: 10m
memory: 100Mi
gateways:
istio-egressgateway:
autoscaleEnabled: false
resources:
requests:
cpu: 10m
memory: 40Mi
istio-ingressgateway:
autoscaleEnabled: false
ports:
## You can add custom gateway ports in user values overrides, but it must include those ports since helm replaces.
# Note that AWS ELB will by default perform health checks on the first port
# on this list. Setting this to the health check port will ensure that health
# checks always work. https://github.com/istio/istio/issues/12503
- port: 15021
targetPort: 15021
name: status-port
- port: 80
targetPort: 8080
name: http2
- port: 443
targetPort: 8443
name: https
- port: 31400
targetPort: 31400
name: tcp
# This is the port where sni routing happens
- port: 15443
targetPort: 15443
name: tls
resources:
requests:
cpu: 10m
memory: 40Mi

View File

@ -1,18 +0,0 @@
# The OpenShift profile provides a basic set of settings to run Istio on OpenShift
# CNI must be installed.
cni:
cniBinDir: /var/lib/cni/bin
cniConfDir: /etc/cni/multus/net.d
chained: false
cniConfFileName: "istio-cni.conf"
excludeNamespaces:
- istio-system
- kube-system
logLevel: info
privileged: true
provider: "multus"
global:
platform: openshift
istio_cni:
enabled: true
chained: false

View File

@ -1,9 +0,0 @@
# The preview profile contains features that are experimental.
# This is intended to explore new features coming to Istio.
# Stability, security, and performance are not guaranteed - use at your own risk.
meshConfig:
defaultConfig:
proxyMetadata:
# Enable Istio agent to handle DNS requests for known hosts
# Unknown hosts will automatically be resolved using upstream dns servers in resolv.conf
ISTIO_META_DNS_CAPTURE: "true"

View File

@ -46,10 +46,6 @@ spec:
- name: net.ipv4.ip_unprivileged_port_start
value: "0"
{{- end }}
{{- with .Values.volumes }}
volumes:
{{ toYaml . | nindent 8 }}
{{- end }}
containers:
- name: istio-proxy
# "auto" will be populated at runtime by the mutating webhook. See https://istio.io/latest/docs/setup/additional-setup/sidecar-injection/#customizing-injection
@ -98,9 +94,9 @@ spec:
name: http-envoy-prom
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.volumeMounts }}
{{- if .Values.volumeMounts }}
volumeMounts:
{{ toYaml . | nindent 12 }}
{{- toYaml .Values.volumeMounts | nindent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
@ -122,3 +118,7 @@ spec:
{{- with .Values.priorityClassName }}
priorityClassName: {{ . }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -28,15 +28,4 @@ spec:
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
type: Utilization
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
type: Utilization
{{- end }}
{{- if .Values.autoscaling.autoscaleBehavior }}
behavior: {{ toYaml .Values.autoscaling.autoscaleBehavior | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -15,19 +15,12 @@ spec:
{{- with .Values.service.loadBalancerIP }}
loadBalancerIP: "{{ . }}"
{{- end }}
{{- if eq .Values.service.type "LoadBalancer" }}
{{- if hasKey .Values.service "allocateLoadBalancerNodePorts" }}
allocateLoadBalancerNodePorts: {{ .Values.service.allocateLoadBalancerNodePorts }}
{{- end }}
{{- with .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: "{{ . }}"
{{- end }}
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
{{- with .Values.service.ipFamilies }}
ipFamilies:
{{- range .Values.service.ipFamilies }}
- {{ . }}
{{- end }}
{{ toYaml . | indent 4 }}
{{- end }}
{{- with .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:

View File

@ -1,34 +0,0 @@
{{/*
Complex logic ahead...
We have three sets of values, in order of precedence (last wins):
1. The builtin values.yaml defaults
2. The profile the user selects
3. Users input (-f or --set)
Unfortunately, Helm provides us (1) and (3) together (as .Values), making it hard to insert (2).
However, we can workaround this by placing all of (1) under a specific key (.Values.defaults).
We can then merge the profile onto the defaults, then the user settings onto that.
Finally, we can set all of that under .Values so the chart behaves without awareness.
*/}}
{{- $defaults := $.Values.defaults }}
{{- $_ := unset $.Values "defaults" }}
{{- $profile := dict }}
{{- with .Values.profile }}
{{- with $.Files.Get (printf "files/profile-%s.yaml" .)}}
{{- $profile = (. | fromYaml) }}
{{- else }}
{{ fail (cat "unknown profile" $.Values.profile) }}
{{- end }}
{{- end }}
{{- with .Values.compatibilityVersion }}
{{- with $.Files.Get (printf "files/profile-compatibility-version-%s.yaml" .) }}
{{- $ignore := mustMergeOverwrite $profile (. | fromYaml) }}
{{- else }}
{{ fail (cat "unknown compatibility version" $.Values.compatibilityVersion) }}
{{- end }}
{{- end }}
{{- if $profile }}
{{- $a := mustMergeOverwrite $defaults $profile }}
{{- end }}
{{- $b := set $ "Values" (mustMergeOverwrite $defaults $.Values) }}

View File

@ -2,300 +2,240 @@
"$schema": "http://json-schema.org/schema#",
"type": "object",
"additionalProperties": false,
"$defs": {
"values": {
"properties": {
"global": {
"type": "object"
},
"affinity": {
"type": "object"
},
"securityContext": {
"type": ["object", "null"]
},
"containerSecurityContext": {
"type": ["object", "null"]
},
"kind":{
"type": "string",
"enum": ["Deployment", "DaemonSet"]
},
"annotations": {
"additionalProperties": {
"type": [
"string",
"integer"
]
},
"type": "object"
},
"autoscaling": {
"type": "object",
"properties": {
"global": {
"type": "object"
},
"affinity": {
"type": "object"
},
"securityContext": {
"type": [
"object",
"null"
]
},
"containerSecurityContext": {
"type": [
"object",
"null"
]
},
"kind": {
"type": "string",
"enum": [
"Deployment",
"DaemonSet"
]
},
"annotations": {
"additionalProperties": {
"type": [
"string",
"integer"
]
},
"type": "object"
},
"autoscaling": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"maxReplicas": {
"type": "integer"
},
"minReplicas": {
"type": "integer"
},
"targetCPUUtilizationPercentage": {
"type": "integer"
}
}
},
"env": {
"type": "object"
},
"labels": {
"type": "object"
},
"name": {
"type": "string"
},
"nodeSelector": {
"type": "object"
},
"podAnnotations": {
"type": "object",
"properties": {
"inject.istio.io/templates": {
"type": "string"
},
"prometheus.io/path": {
"type": "string"
},
"prometheus.io/port": {
"type": "string"
},
"prometheus.io/scrape": {
"type": "string"
}
}
},
"replicaCount": {
"type": [
"integer",
"null"
]
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpu": {
"type": "string"
},
"memory": {
"type": "string"
}
}
},
"requests": {
"type": "object",
"properties": {
"cpu": {
"type": "string"
},
"memory": {
"type": "string"
}
}
}
}
},
"revision": {
"type": "string"
},
"compatibilityVersion": {
"type": "string"
},
"runAsRoot": {
"enabled": {
"type": "boolean"
},
"unprivilegedPort": {
"type": [
"string",
"boolean"
],
"enum": [
true,
false,
"auto"
]
"maxReplicas": {
"type": "integer"
},
"service": {
"type": "object",
"properties": {
"annotations": {
"type": "object"
},
"externalTrafficPolicy": {
"type": "string"
},
"loadBalancerIP": {
"type": "string"
},
"loadBalancerSourceRanges": {
"type": "array"
},
"ipFamilies": {
"items": {
"type": "string",
"enum": [
"IPv4",
"IPv6"
]
}
},
"ipFamilyPolicy": {
"type": "string",
"enum": [
"",
"SingleStack",
"PreferDualStack",
"RequireDualStack"
]
},
"ports": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"port": {
"type": "integer"
},
"protocol": {
"type": "string"
},
"targetPort": {
"type": "integer"
}
}
}
},
"type": {
"type": "string"
}
}
"minReplicas": {
"type": "integer"
},
"serviceAccount": {
"type": "object",
"properties": {
"annotations": {
"type": "object"
},
"name": {
"type": "string"
},
"create": {
"type": "boolean"
}
}
},
"rbac": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
}
}
},
"tolerations": {
"type": "array"
},
"topologySpreadConstraints": {
"type": "array"
},
"networkGateway": {
"targetCPUUtilizationPercentage": {
"type": "integer"
}
}
},
"env": {
"type": "object"
},
"labels": {
"type": "object"
},
"volumes": {
"type": "array"
},
"volumeMounts": {
"type": "array"
},
"name": {
"type": "string"
},
"nodeSelector": {
"type": "object"
},
"podAnnotations": {
"type": "object",
"properties": {
"inject.istio.io/templates": {
"type": "string"
},
"imagePullPolicy": {
"type": "string",
"enum": [
"",
"Always",
"IfNotPresent",
"Never"
]
"prometheus.io/path": {
"type": "string"
},
"imagePullSecrets": {
"prometheus.io/port": {
"type": "string"
},
"prometheus.io/scrape": {
"type": "string"
}
}
},
"replicaCount": {
"type": [ "integer", "null" ]
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpu": {
"type": "string"
},
"memory": {
"type": "string"
}
}
},
"requests": {
"type": "object",
"properties": {
"cpu": {
"type": "string"
},
"memory": {
"type": "string"
}
}
}
}
},
"revision": {
"type": "string"
},
"runAsRoot": {
"type": "boolean"
},
"unprivilegedPort": {
"type": ["string", "boolean"],
"enum": [true, false, "auto"]
},
"service": {
"type": "object",
"properties": {
"annotations": {
"type": "object"
},
"externalTrafficPolicy": {
"type": "string"
},
"loadBalancerIP": {
"type": "string"
},
"loadBalancerSourceRanges": {
"type": "array"
},
"ipFamilies" : {
"items": {
"type": "string",
"enum": ["IPv4", "IPv6"]
}
},
"ipFamilyPolicy" : {
"type": "string",
"enum": ["", "SingleStack", "PreferDualStack", "RequireDualStack"]
},
"ports": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"port": {
"type": "integer"
},
"protocol": {
"type": "string"
},
"targetPort": {
"type": "integer"
}
}
}
},
"podDisruptionBudget": {
"type": "object",
"properties": {
"minAvailable": {
"type": [
"integer",
"string"
]
},
"maxUnavailable": {
"type": [
"integer",
"string"
]
},
"unhealthyPodEvictionPolicy": {
"type": "string",
"enum": [
"",
"IfHealthyBudget",
"AlwaysAllow"
]
}
}
},
"terminationGracePeriodSeconds": {
"type": "number"
},
"volumes": {
"type": "array",
"items": {
"type": "object"
}
},
"volumeMounts": {
"type": "array",
"items": {
"type": "object"
}
},
"priorityClassName": {
"type": {
"type": "string"
}
}
},
"serviceAccount": {
"type": "object",
"properties": {
"annotations": {
"type": "object"
},
"name": {
"type": "string"
},
"create": {
"type": "boolean"
}
}
},
"rbac": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
}
}
},
"tolerations": {
"type": "array"
},
"topologySpreadConstraints": {
"type": "array"
},
"networkGateway": {
"type": "string"
},
"imagePullPolicy": {
"type": "string",
"enum": ["", "Always", "IfNotPresent", "Never"]
},
"imagePullSecrets": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
}
}
},
"podDisruptionBudget": {
"type": "object",
"properties": {
"minAvailable": {
"type": ["integer", "string"]
},
"maxUnavailable": {
"type": ["integer", "string"]
},
"unhealthyPodEvictionPolicy": {
"type": "string",
"enum": ["", "IfHealthyBudget", "AlwaysAllow"]
}
}
},
"terminationGracePeriodSeconds": {
"type": "number"
},
"priorityClassName": {
"type": "string"
}
},
"defaults": {
"$ref": "#/$defs/values"
},
"$ref": "#/$defs/values"
}
}

View File

@ -1,152 +1,139 @@
defaults:
# Name allows overriding the release name. Generally this should not be set
name: ""
# revision declares which revision this gateway is a part of
revision: ""
# Name allows overriding the release name. Generally this should not be set
name: ""
# revision declares which revision this gateway is a part of
revision: ""
# Controls the spec.replicas setting for the Gateway deployment if set.
# Otherwise defaults to Kubernetes Deployment default (1).
replicaCount:
# Controls the spec.replicas setting for the Gateway deployment if set.
# Otherwise defaults to Kubernetes Deployment default (1).
replicaCount:
kind: Deployment
kind: Deployment
rbac:
# If enabled, roles will be created to enable accessing certificates from Gateways. This is not needed
# when using http://gateway-api.org/.
enabled: true
rbac:
# If enabled, roles will be created to enable accessing certificates from Gateways. This is not needed
# when using http://gateway-api.org/.
enabled: true
serviceAccount:
# If set, a service account will be created. Otherwise, the default is used
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set, the release name is used
name: ""
podAnnotations:
prometheus.io/port: "15020"
prometheus.io/scrape: "true"
prometheus.io/path: "/stats/prometheus"
inject.istio.io/templates: "gateway"
sidecar.istio.io/inject: "true"
# Define the security context for the pod.
# If unset, this will be automatically set to the minimum privileges required to bind to port 80 and 443.
# On Kubernetes 1.22+, this only requires the `net.ipv4.ip_unprivileged_port_start` sysctl.
securityContext: ~
containerSecurityContext: ~
service:
# Type of service. Set to "None" to disable the service entirely
type: LoadBalancer
ports:
- name: status-port
port: 15021
protocol: TCP
targetPort: 15021
- name: http2
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
annotations: {}
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
externalIPs: []
ipFamilyPolicy: ""
ipFamilies: []
## Whether to automatically allocate NodePorts (only for LoadBalancers).
# allocateLoadBalancerNodePorts: false
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: {}
autoscaleBehavior: {}
# Pod environment variables
env: {}
# Labels to apply to all resources
labels: {}
# Annotations to apply to all resources
serviceAccount:
# If set, a service account will be created. Otherwise, the default is used
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set, the release name is used
name: ""
nodeSelector: {}
podAnnotations:
prometheus.io/port: "15020"
prometheus.io/scrape: "true"
prometheus.io/path: "/stats/prometheus"
inject.istio.io/templates: "gateway"
sidecar.istio.io/inject: "true"
tolerations: []
# Define the security context for the pod.
# If unset, this will be automatically set to the minimum privileges required to bind to port 80 and 443.
# On Kubernetes 1.22+, this only requires the `net.ipv4.ip_unprivileged_port_start` sysctl.
securityContext: ~
containerSecurityContext: ~
topologySpreadConstraints: []
service:
# Type of service. Set to "None" to disable the service entirely
type: LoadBalancer
ports:
- name: status-port
port: 15021
protocol: TCP
targetPort: 15021
- name: http2
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
annotations: {}
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
externalIPs: []
ipFamilyPolicy: ""
ipFamilies: []
affinity: {}
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
# If specified, the gateway will act as a network gateway for the given network.
networkGateway: ""
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
# Specify image pull policy if default behavior isn't desired.
# Default behavior: latest images will be Always else IfNotPresent
imagePullPolicy: ""
# Pod environment variables
env: {}
imagePullSecrets: []
# Labels to apply to all resources
labels: {}
# This value is used to configure a Kubernetes PodDisruptionBudget for the gateway.
#
# By default, the `podDisruptionBudget` is disabled (set to `{}`),
# which means that no PodDisruptionBudget resource will be created.
#
# To enable the PodDisruptionBudget, configure it by specifying the
# `minAvailable` or `maxUnavailable`. For example, to set the
# minimum number of available replicas to 1, you can update this value as follows:
#
# podDisruptionBudget:
# minAvailable: 1
#
# Or, to allow a maximum of 1 unavailable replica, you can set:
#
# podDisruptionBudget:
# maxUnavailable: 1
#
# You can also specify the `unhealthyPodEvictionPolicy` field, and the valid values are `IfHealthyBudget` and `AlwaysAllow`.
# For example, to set the `unhealthyPodEvictionPolicy` to `AlwaysAllow`, you can update this value as follows:
#
# podDisruptionBudget:
# minAvailable: 1
# unhealthyPodEvictionPolicy: AlwaysAllow
#
# To disable the PodDisruptionBudget, you can leave it as an empty object `{}`:
#
# podDisruptionBudget: {}
#
podDisruptionBudget: {}
# Annotations to apply to all resources
annotations: {}
terminationGracePeriodSeconds: 30
nodeSelector: {}
# A list of `Volumes` added into the Gateway Pods. See
# https://kubernetes.io/docs/concepts/storage/volumes/.
volumes: []
tolerations: []
# A list of `VolumeMounts` added into the Gateway Pods. See
# https://kubernetes.io/docs/concepts/storage/volumes/.
volumeMounts: []
topologySpreadConstraints: []
# Configure this to a higher priority class in order to make sure your Istio gateway pods
# will not be killed because of low priority class.
# Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
# for more detail.
priorityClassName: ""
affinity: {}
# If specified, the gateway will act as a network gateway for the given network.
networkGateway: ""
# Specify image pull policy if default behavior isn't desired.
# Default behavior: latest images will be Always else IfNotPresent
imagePullPolicy: ""
imagePullSecrets: []
# This value is used to configure a Kubernetes PodDisruptionBudget for the gateway.
#
# By default, the `podDisruptionBudget` is disabled (set to `{}`),
# which means that no PodDisruptionBudget resource will be created.
#
# To enable the PodDisruptionBudget, configure it by specifying the
# `minAvailable` or `maxUnavailable`. For example, to set the
# minimum number of available replicas to 1, you can update this value as follows:
#
# podDisruptionBudget:
# minAvailable: 1
#
# Or, to allow a maximum of 1 unavailable replica, you can set:
#
# podDisruptionBudget:
# maxUnavailable: 1
#
# You can also specify the `unhealthyPodEvictionPolicy` field, and the valid values are `IfHealthyBudget` and `AlwaysAllow`.
# For example, to set the `unhealthyPodEvictionPolicy` to `AlwaysAllow`, you can update this value as follows:
#
# podDisruptionBudget:
# minAvailable: 1
# unhealthyPodEvictionPolicy: AlwaysAllow
#
# To disable the PodDisruptionBudget, you can leave it as an empty object `{}`:
#
# podDisruptionBudget: {}
#
podDisruptionBudget: {}
terminationGracePeriodSeconds: 30
# Configure this to a higher priority class in order to make sure your Istio gateway pods
# will not be killed because of low priority class.
# Refer to https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
# for more detail.
priorityClassName: ""

View File

@ -11,6 +11,25 @@ diff -tubr charts/gateway.orig/templates/deployment.yaml charts/gateway/template
selector:
matchLabels:
{{- include "gateway.selectorLabels" . | nindent 6 }}
@@ -86,6 +90,10 @@
name: http-envoy-prom
resources:
{{- toYaml .Values.resources | nindent 12 }}
+ {{- if .Values.volumeMounts }}
+ volumeMounts:
+ {{- toYaml .Values.volumeMounts | nindent 12 }}
+ {{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
@@ -102,3 +110,7 @@
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
+ {{- with .Values.volumes }}
+ volumes:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff -tubr charts/gateway.orig/templates/service.yaml charts/gateway/templates/service.yaml
--- charts/gateway.orig/templates/service.yaml 2022-12-09 14:58:33.000000000 +0000
+++ charts/gateway/templates/service.yaml 2022-12-12 22:52:27.629670669 +0000
@ -30,3 +49,19 @@ diff -tubr charts/gateway.orig/templates/service.yaml charts/gateway/templates/s
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs: {{- range .Values.service.externalIPs }}
diff -tubr charts/gateway.orig/values.schema.json charts/gateway/values.schema.json
--- charts/gateway.orig/values.schema.json 2022-12-09 14:58:33.000000000 +0000
+++ charts/gateway/values.schema.json 2022-12-12 22:52:27.629670669 +0000
@@ -51,6 +51,12 @@
"labels": {
"type": "object"
},
+ "volumes": {
+ "type": "array"
+ },
+ "volumeMounts": {
+ "type": "array"
+ },
"name": {
"type": "string"
},

View File

@ -32,14 +32,10 @@ spec:
use_remote_address: true
normalize_path: true
merge_slashes: true
{{- if .Values.hardening.unescapeSlashes }}
path_with_escaped_slashes_action: UNESCAPE_AND_REDIRECT
{{- end }}
common_http_protocol_options:
idle_timeout: 3600s # 1 hour
{{- if .Values.hardening.rejectUnderscoresHeaders }}
headers_with_underscores_action: REJECT_REQUEST
{{- end }}
http2_protocol_options:
max_concurrent_streams: 100
initial_stream_window_size: 65536 # 64 KiB

View File

@ -15,11 +15,11 @@ spec:
operation: MERGE
value:
listener_filters:
- name: envoy.filters.listener.tls_inspector
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector
{{- if .Values.proxyProtocol }}
- name: envoy.filters.listener.proxy_protocol
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.listener.proxy_protocol.v3.ProxyProtocol
{{- end }}
- name: envoy.filters.listener.tls_inspector
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector

View File

@ -39,7 +39,3 @@ telemetry:
enabled: false
proxyProtocol: true
hardening:
rejectUnderscoresHeaders: true
unescapeSlashes: true

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio
description: KubeZero Umbrella Chart for Istio
type: application
version: 0.21.2
version: 0.19.4
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -16,13 +16,13 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: base
version: 1.21.2
version: 1.19.4
repository: https://istio-release.storage.googleapis.com/charts
- name: istiod
version: 1.21.2
version: 1.19.4
repository: https://istio-release.storage.googleapis.com/charts
- name: kiali-server
version: "1.83.0"
version: "1.76.0"
repository: https://kiali.org/helm-charts
condition: kiali-server.enabled
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-istio
![Version: 0.21.2](https://img.shields.io/badge/Version-0.21.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.19.4](https://img.shields.io/badge/Version-0.19.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Istio
@ -21,9 +21,9 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://istio-release.storage.googleapis.com/charts | base | 1.21.2 |
| https://istio-release.storage.googleapis.com/charts | istiod | 1.21.2 |
| https://kiali.org/helm-charts | kiali-server | 1.83.0 |
| https://istio-release.storage.googleapis.com/charts | base | 1.19.4 |
| https://istio-release.storage.googleapis.com/charts | istiod | 1.19.4 |
| https://kiali.org/helm-charts | kiali-server | 1.76.0 |
## Values

View File

@ -5,18 +5,18 @@ folder: Istio
condition: '.Values.istiod.telemetry.enabled'
dashboards:
- name: istio-control-plane
url: https://grafana.com/api/dashboards/7645/revisions/201/download
url: https://grafana.com/api/dashboards/7645/revisions/174/download
tags:
- Istio
- name: istio-mesh
url: https://grafana.com/api/dashboards/7639/revisions/201/download
url: https://grafana.com/api/dashboards/7639/revisions/174/download
tags:
- Istio
- name: istio-service
url: https://grafana.com/api/dashboards/7636/revisions/201/download
url: https://grafana.com/api/dashboards/7636/revisions/174/download
tags:
- Istio
- name: istio-workload
url: https://grafana.com/api/dashboards/7630/revisions/201/download
url: https://grafana.com/api/dashboards/7630/revisions/174/download
tags:
- Istio

File diff suppressed because one or more lines are too long

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack
type: application
version: 0.8.12
version: 0.8.8
appVersion: 1.6.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -20,11 +20,11 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: fluentd
version: 0.5.2
version: 0.4.3
repository: https://fluent.github.io/helm-charts
condition: fluentd.enabled
- name: fluent-bit
version: 0.46.2
version: 0.37.1
repository: https://fluent.github.io/helm-charts
condition: fluent-bit.enabled
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-logging
![Version: 0.8.12](https://img.shields.io/badge/Version-0.8.12-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
![Version: 0.8.6](https://img.shields.io/badge/Version-0.8.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack
@ -14,13 +14,14 @@ KubeZero Umbrella Chart for complete EFK stack
## Requirements
Kubernetes: `>= 1.26.0`
Kubernetes: `>= 1.24.0`
| Repository | Name | Version |
|------------|------|---------|
| | eck-operator | 2.4.0 |
| | fluent-bit | 0.24.0 |
| | fluentd | 0.3.9 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.46.2 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
## Changes from upstream
### ECK
@ -56,6 +57,11 @@ Kubernetes: `>= 1.26.0`
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| eck-operator.enabled | bool | `false` | |
| eck-operator.installCRDs | bool | `false` | |
| eck-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| eck-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| eck-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| elastic_password | string | `""` | |
| es.nodeSets | list | `[]` | |
| es.prometheus | bool | `false` | |
@ -82,33 +88,33 @@ Kubernetes: `>= 1.26.0`
| fluent-bit.daemonSetVolumes[1].hostPath.path | string | `"/var/lib/containers/logs"` | |
| fluent-bit.daemonSetVolumes[1].name | string | `"newlog"` | |
| fluent-bit.enabled | bool | `false` | |
| fluent-bit.image.tag | string | `"2.0.10"` | |
| fluent-bit.luaScripts."kubezero.lua" | string | `"function nest_k8s_ns(tag, timestamp, record)\n if not record['kubernetes']['namespace_name'] then\n return 0, 0, 0\n end\n new_record = {}\n for key, val in pairs(record) do\n if key == 'kube' then\n new_record[key] = {}\n new_record[key][record['kubernetes']['namespace_name']] = record[key]\n else\n new_record[key] = record[key]\n end\n end\n return 1, timestamp, new_record\nend\n"` | |
| fluent-bit.resources.limits.memory | string | `"128Mi"` | |
| fluent-bit.resources.limits.memory | string | `"64Mi"` | |
| fluent-bit.resources.requests.cpu | string | `"20m"` | |
| fluent-bit.resources.requests.memory | string | `"48Mi"` | |
| fluent-bit.resources.requests.memory | string | `"32Mi"` | |
| fluent-bit.serviceMonitor.enabled | bool | `false` | |
| fluent-bit.serviceMonitor.selector.release | string | `"metrics"` | |
| fluent-bit.testFramework.enabled | bool | `false` | |
| fluent-bit.tolerations[0].effect | string | `"NoSchedule"` | |
| fluent-bit.tolerations[0].operator | string | `"Exists"` | |
| fluentd.configMapConfigs[0] | string | `"fluentd-prometheus-conf"` | |
| fluentd.dashboards.enabled | bool | `false` | |
| fluentd.enabled | bool | `false` | |
| fluentd.env[0].name | string | `"OUTPUT_PASSWORD"` | |
| fluentd.env[0].valueFrom.secretKeyRef.key | string | `"elastic"` | |
| fluentd.env[0].valueFrom.secretKeyRef.name | string | `"logging-es-elastic-user"` | |
| fluentd.env[0].name | string | `"FLUENTD_CONF"` | |
| fluentd.env[0].value | string | `"../../etc/fluent/fluent.conf"` | |
| fluentd.env[1].name | string | `"OUTPUT_PASSWORD"` | |
| fluentd.env[1].valueFrom.secretKeyRef.key | string | `"elastic"` | |
| fluentd.env[1].valueFrom.secretKeyRef.name | string | `"logging-es-elastic-user"` | |
| fluentd.fileConfigs."00_system.conf" | string | `"<system>\n root_dir /fluentd/log\n log_level info\n ignore_repeated_log_interval 60s\n ignore_same_log_interval 60s\n workers 1\n</system>"` | |
| fluentd.fileConfigs."01_sources.conf" | string | `"<source>\n @type http\n @label @KUBERNETES\n port 9880\n bind 0.0.0.0\n keepalive_timeout 30\n</source>\n\n<source>\n @type forward\n @label @KUBERNETES\n port 24224\n bind 0.0.0.0\n # skip_invalid_event true\n send_keepalive_packet true\n <security>\n self_hostname \"#{ENV['HOSTNAME']}\"\n shared_key {{ .Values.shared_key }}\n </security>\n</source>"` | |
| fluentd.fileConfigs."02_filters.conf" | string | `"<label @KUBERNETES>\n # prevent log feedback loops eg. ES has issues etc.\n # discard logs from our own pods\n <match kube.logging.fluentd>\n @type relabel\n @label @FLUENT_LOG\n </match>\n\n # Exclude current fluent-bit multiline noise\n <filter kube.logging.fluent-bit>\n @type grep\n <exclude>\n key log\n pattern /could not append content to multiline context/\n </exclude>\n </filter>\n\n # Generate Hash ID to break endless loop for already ingested events during retries\n <filter **>\n @type elasticsearch_genid\n use_entire_record true\n </filter>\n\n # Route through DISPATCH for Prometheus metrics\n <match **>\n @type relabel\n @label @DISPATCH\n </match>\n</label>"` | |
| fluentd.fileConfigs."04_outputs.conf" | string | `"<label @OUTPUT>\n <match **>\n @id out_es\n @type elasticsearch\n # @log_level debug\n include_tag_key true\n\n id_key _hash\n remove_keys _hash\n write_operation create\n\n # KubeZero pipeline incl. GeoIP etc.\n pipeline fluentd\n\n hosts \"{{ .Values.output.host }}\"\n port 9200\n scheme http\n user elastic\n password \"#{ENV['OUTPUT_PASSWORD']}\"\n\n log_es_400_reason\n logstash_format true\n reconnect_on_error true\n reload_on_failure true\n request_timeout 300s\n slow_flush_log_threshold 55.0\n\n #with_transporter_log true\n\n verify_es_version_at_startup false\n default_elasticsearch_version 7\n suppress_type_name true\n\n # Retry failed bulk requests\n # https://github.com/uken/fluent-plugin-elasticsearch#unrecoverable-error-types\n unrecoverable_error_types [\"out_of_memory_error\"]\n bulk_message_request_threshold 1048576\n\n <buffer>\n @type file\n\n flush_mode interval\n flush_thread_count 2\n flush_interval 10s\n\n chunk_limit_size 2MB\n total_limit_size 1GB\n\n flush_at_shutdown true\n retry_type exponential_backoff\n retry_timeout 6h\n overflow_action drop_oldest_chunk\n disable_chunk_backup true\n </buffer>\n </match>\n</label>"` | |
| fluentd.image.repository | string | `"public.ecr.aws/zero-downtime/fluentd-concenter"` | |
| fluentd.image.tag | string | `"v1.16.3"` | |
| fluentd.image.tag | string | `"v1.16.0"` | |
| fluentd.istio.enabled | bool | `false` | |
| fluentd.kind | string | `"Deployment"` | |
| fluentd.metrics.serviceMonitor.additionalLabels.release | string | `"metrics"` | |
| fluentd.metrics.serviceMonitor.enabled | bool | `false` | |
| fluentd.mountDockerContainersDirectory | bool | `false` | |
| fluentd.mountVarLogDirectory | bool | `false` | |
| fluentd.output.host | string | `"logging-es-http"` | |
| fluentd.podSecurityPolicy.enabled | bool | `false` | |
| fluentd.replicaCount | int | `1` | |
@ -122,6 +128,16 @@ Kubernetes: `>= 1.26.0`
| fluentd.service.ports[1].name | string | `"http-fluentd"` | |
| fluentd.service.ports[1].protocol | string | `"TCP"` | |
| fluentd.shared_key | string | `"cloudbender"` | |
| fluentd.volumeMounts[0].mountPath | string | `"/etc/fluent"` | |
| fluentd.volumeMounts[0].name | string | `"etcfluentd-main"` | |
| fluentd.volumeMounts[1].mountPath | string | `"/etc/fluent/config.d/"` | |
| fluentd.volumeMounts[1].name | string | `"etcfluentd-config"` | |
| fluentd.volumes[0].configMap.defaultMode | int | `511` | |
| fluentd.volumes[0].configMap.name | string | `"fluentd-main"` | |
| fluentd.volumes[0].name | string | `"etcfluentd-main"` | |
| fluentd.volumes[1].configMap.defaultMode | int | `511` | |
| fluentd.volumes[1].configMap.name | string | `"fluentd-config"` | |
| fluentd.volumes[1].name | string | `"etcfluentd-config"` | |
| kibana.count | int | `1` | |
| kibana.istio.enabled | bool | `false` | |
| kibana.istio.gateway | string | `"istio-system/ingressgateway"` | |

View File

@ -1,9 +1,9 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: "Updated _Fluent Bit_ OCI image to [v3.0.2](https://github.com/fluent/fluent-bit/releases/tag/v3.0.2)."
- kind: added
description: "Added events permission to ClusteRole"
apiVersion: v1
appVersion: 3.0.2
appVersion: 2.1.8
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems.
home: https://fluentbit.io/
@ -24,4 +24,4 @@ maintainers:
name: fluent-bit
sources:
- https://github.com/fluent/fluent-bit/
version: 0.46.2
version: 0.37.1

View File

@ -1,6 +1,3 @@
testFramework:
enabled: true
logLevel: debug
dashboards:

View File

@ -5,7 +5,7 @@
"builtIn": 1,
"datasource": {
"type": "prometheus",
"uid": "$DS_PROMETHEUS"
"uid": "prometheus"
},
"enable": true,
"hide": true,
@ -28,7 +28,7 @@
"collapsed": false,
"datasource": {
"type": "prometheus",
"uid": "$DS_PROMETHEUS"
"uid": "prometheus"
},
"gridPos": {
"h": 1,
@ -42,7 +42,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "$DS_PROMETHEUS"
"uid": "prometheus"
},
"refId": "A"
}
@ -144,7 +144,7 @@
"collapsed": false,
"datasource": {
"type": "prometheus",
"uid": "$DS_PROMETHEUS"
"uid": "prometheus"
},
"gridPos": {
"h": 1,
@ -158,7 +158,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "$DS_PROMETHEUS"
"uid": "prometheus"
},
"refId": "A"
}
@ -1171,7 +1171,7 @@
"collapsed": false,
"datasource": {
"type": "prometheus",
"uid": "$DS_PROMETHEUS"
"uid": "prometheus"
},
"gridPos": {
"h": 1,
@ -1185,7 +1185,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "$DS_PROMETHEUS"
"uid": "prometheus"
},
"refId": "A"
}
@ -1321,7 +1321,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "$DS_PROMETHEUS"
"uid": "prometheus"
},
"fieldConfig": {
"defaults": {
@ -1420,7 +1420,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "$DS_PROMETHEUS"
"uid": "prometheus"
},
"editorMode": "code",
"expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{namespace=\"$namespace\",pod=~\"$pod\",container=\"fluent-bit\"}) by (pod)",
@ -1432,7 +1432,7 @@
{
"datasource": {
"type": "prometheus",
"uid": "$DS_PROMETHEUS"
"uid": "prometheus"
},
"editorMode": "code",
"expr": "avg(kube_pod_container_resource_requests{job=\"kube-state-metrics\",namespace=\"$namespace\",pod=~\"$pod\",container=\"fluent-bit\",resource=\"cpu\"})",

View File

@ -14,9 +14,7 @@ metadata:
{{- include "fluent-bit.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- range $key, $value := . }}
{{ printf "%s: %s" $key ((tpl $value $) | quote) }}
{{- end }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }}

View File

@ -17,11 +17,6 @@ spec:
{{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if (eq .Values.kind "DaemonSet") }}
{{- with .Values.service.internalTrafficPolicy }}
internalTrafficPolicy: {{ . }}
{{- end }}
{{- end }}
{{- if (eq .Values.service.type "LoadBalancer")}}
{{- with .Values.service.loadBalancerClass}}
loadBalancerClass: {{ . }}

View File

@ -13,7 +13,7 @@ spec:
jobLabel: app.kubernetes.io/instance
endpoints:
- port: http
path: {{ default "/api/v2/metrics/prometheus" .Values.serviceMonitor.path }}
path: /api/v1/metrics/prometheus
{{- with .Values.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}

View File

@ -5,19 +5,16 @@ metadata:
name: "{{ include "fluent-bit.fullname" . }}-test-connection"
namespace: {{ default .Release.Namespace .Values.testFramework.namespace }}
labels:
helm.sh/chart: {{ include "fluent-bit.chart" . }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- include "fluent-bit.labels" . | nindent 4 }}
annotations:
helm.sh/hook: test
helm.sh/hook-delete-policy: hook-succeeded
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: {{ include "fluent-bit.image" .Values.testFramework.image | quote }}
imagePullPolicy: {{ .Values.testFramework.image.pullPolicy }}
command: ["sh"]
args: ["-c", "wget -O- {{ include "fluent-bit.fullname" . }}:{{ .Values.service.port }}"]
command: ['wget']
args: ['{{ include "fluent-bit.fullname" . }}:{{ .Values.service.port }}']
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 4 }}

View File

@ -12,7 +12,7 @@ image:
# Set to "-" to not use the default value
tag:
digest:
pullPolicy: IfNotPresent
pullPolicy: Always
testFramework:
enabled: true
@ -91,7 +91,6 @@ securityContext: {}
service:
type: ClusterIP
port: 2020
internalTrafficPolicy:
loadBalancerClass:
loadBalancerSourceRanges: []
labels: {}
@ -129,7 +128,7 @@ serviceMonitor:
# scheme: ""
# tlsConfig: {}
## Bear in mind if you want to collect metrics from a different port
## Beare in mind if youn want to collec metrics from a different port
## you will need to configure the new ports on the extraPorts property.
additionalEndpoints: []
# - port: metrics
@ -419,7 +418,7 @@ config:
Time_Key time
Time_Format %Y-%m-%dT%H:%M:%S.%L
# This allows adding more files with arbitrary filenames to /fluent-bit/etc/conf by providing key/value pairs.
# This allows adding more files with arbitary filenames to /fluent-bit/etc/conf by providing key/value pairs.
# The key becomes the filename, the value becomes the file content.
extraFiles: {}
# upstream.conf: |

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: v1.16.2
appVersion: v1.15.2
description: A Helm chart for Kubernetes
home: https://www.fluentd.org/
icon: https://www.fluentd.org/images/miscellany/fluentd-logo_2x.png
@ -12,4 +12,4 @@ name: fluentd
sources:
- https://github.com/fluent/fluentd/
- https://github.com/fluent/fluentd-kubernetes-daemonset
version: 0.5.2
version: 0.4.3

View File

@ -90,15 +90,3 @@ Name of the configMap used for additional configuration files; allows users to o
{{ printf "%s-%s" "fluentd-config" ( include "fluentd.shortReleaseName" . ) }}
{{- end -}}
{{- end -}}
{{/*
HPA ApiVersion according k8s version
Check legacy first so helm template / kustomize will default to latest version
*/}}
{{- define "fluentd.hpa.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "autoscaling/v2beta2") (semverCompare "<1.23-0" .Capabilities.KubeVersion.GitVersion) -}}
autoscaling/v2beta2
{{- else -}}
autoscaling/v2
{{- end -}}
{{- end -}}

View File

@ -1,5 +1,5 @@
{{- define "fluentd.pod" -}}
{{- $defaultTag := printf "%s-debian-%s-1.0" (.Chart.AppVersion) (.Values.variant) -}}
{{- $defaultTag := printf "%s-debian-elasticsearch7-1.0" (.Chart.AppVersion) -}}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}

View File

@ -1,5 +1,5 @@
{{- if and ( eq .Values.kind "Deployment" ) .Values.autoscaling.enabled }}
apiVersion: {{ include "fluentd.hpa.apiVersion" . }}
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "fluentd.fullname" . }}

View File

@ -1,44 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "fluentd.fullname" . -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "fluentd.fullname" . }}
labels:
{{- include "fluentd.labels" . | nindent 4 }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
{{- with .secretName }}
secretName: {{ . }}
{{- end }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ $fullName }}
port:
number: {{ .port }}
{{ if .host -}}
host: {{ .host | quote }}
{{- end -}}
{{- end -}}
{{- end -}}

Some files were not shown because too many files have changed in this diff Show More