Merge pull request 'merge WIP v1.26 to start use Renovate' (#91) from v1.26 into master

Reviewed-on: ZeroDownTime/kubezero#91
This commit is contained in:
Stefan Reimer 2023-08-16 10:40:16 +00:00
commit a7ef468227
50 changed files with 590 additions and 240 deletions

View File

@ -37,7 +37,7 @@ build: ## Build the app
test: rm-test-image ## Execute Dockerfile.test
test -f Dockerfile.test && \
{ buildah build --rm --layers -t $(REGISTRY)/$(IMAGE):$(TAG)-test --from=$(REGISTRY)/$(IMAGE):$(TAG) -f Dockerfile.test --platform linux/$(_ARCH) . && \
{ buildah build --rm --layers -t $(REGISTRY)/$(IMAGE):$(TAG)-$(_ARCH)-test --from=$(REGISTRY)/$(IMAGE):$(TAG) -f Dockerfile.test --platform linux/$(_ARCH) . && \
podman run --rm --env-host -t $(REGISTRY)/$(IMAGE):$(TAG)-$(_ARCH)-test; } || \
echo "No Dockerfile.test found, skipping test"

View File

@ -1,9 +1,9 @@
ARG ALPINE_VERSION=3.17
ARG ALPINE_VERSION=3.18
FROM docker.io/alpine:${ALPINE_VERSION}
ARG ALPINE_VERSION
ARG KUBE_VERSION=1.25
ARG KUBE_VERSION=1.26
RUN cd /etc/apk/keys && \
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \

22
admin/dev_apply.sh Executable file
View File

@ -0,0 +1,22 @@
#!/bin/bash
#set -eEx
#set -o pipefail
set -x
#VERSION="latest"
KUBE_VERSION="v1.26.6"
WORKDIR=$(mktemp -p /tmp -d kubezero.XXX)
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
# shellcheck disable=SC1091
. "$SCRIPT_DIR"/libhelm.sh
CHARTS="$(dirname $SCRIPT_DIR)/charts"
get_kubezero_values
# Always use embedded kubezero chart
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $KUBE_VERSION --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
# CRDs first
_helm crds $1
_helm apply $1

View File

@ -8,11 +8,6 @@ import yaml
def migrate(values):
"""Actual changes here"""
# Remove various keys as they have been merged into the metrics template
deleteKey(values["metrics"]['kube-prometheus-stack']["alertmanager"]["alertmanagerSpec"], "podMetadata")
deleteKey(values["metrics"]['kube-prometheus-stack']["alertmanager"], "config")
deleteKey(values["metrics"]['kube-prometheus-stack']["prometheus"]["prometheusSpec"], "externalLabels")
return values

View File

@ -1,7 +1,9 @@
#!/bin/bash -e
#!/bin/bash
set -eE
set -o pipefail
#VERSION="latest"
VERSION="v1.25"
VERSION="v1.26"
ARGO_APP=${1:-/tmp/new-kubezero-argoapp.yaml}
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
@ -35,9 +37,6 @@ spec:
hostIPC: true
hostPID: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
@ -122,9 +121,6 @@ spec:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
@ -147,14 +143,6 @@ argo_used && disable_argo
#all_nodes_upgrade ""
# Cleanup
# Remove calico CRDs
kubectl delete -f https://git.zero-downtime.net/ZeroDownTime/kubezero/raw/tag/v1.23.11/charts/kubezero-network/charts/calico/crds/crds.yaml 2>/dev/null || true
kubectl delete servicemonitor calico-node -n kube-system 2>/dev/null || true
# delete old kubelet configs
for cm in $(kubectl get cm -n kube-system --no-headers | awk '{if ($1 ~ "kubelet-config-1*") print $1}'); do kubectl delete cm $cm -n kube-system; done
for rb in $(kubectl get rolebindings -n kube-system --no-headers | awk '{if ($1 ~ "kubelet-config-1*") print $1}'); do kubectl delete rolebindings $rb -n kube-system; done
control_plane_upgrade kubeadm_upgrade

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: clamav
description: Chart for deploying a ClamavD on kubernetes as statfulSet
type: application
version: 0.1.1
appVersion: 0.104.0
version: "0.2.0"
appVersion: "1.1.0"
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -13,6 +13,6 @@ maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.4"
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
kubeVersion: ">= 1.18.0"
kubeVersion: ">= 1.25.0"

View File

@ -16,7 +16,7 @@ clamav:
# clamav.image -- The clamav docker image
image: clamav/clamav
# clamav.version -- The clamav docker image version - defaults to .Chart.appVersion
version: "unstable"
# version: "unstable"
replicaCount: 1
@ -40,7 +40,7 @@ clamav:
# clamav.resources -- The resource requests and limits for the clamav service
requests:
cpu: 300m
memory: 1300M
memory: 2000M
#limits:
# cpu: 1500m
# memory: 2000M
# cpu: 2
# memory: 4000M

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubeadm
description: KubeZero Kubeadm cluster config
type: application
version: 1.25.8
version: 1.26.7
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -11,4 +11,4 @@ keywords:
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
kubeVersion: ">= 1.25.0"
kubeVersion: ">= 1.26.0"

View File

@ -0,0 +1,159 @@
#!/bin/sh
function createMasterAuditPolicy() {
path="templates/apiserver/audit-policy.yaml"
known_apis='
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "node.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "storage.k8s.io"'
cat <<EOF >"${path}"
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account.
users: ["system:unsecured"]
namespaces: ["kube-system"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["configmaps"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
users:
- system:kube-controller-manager
- system:cloud-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
- level: None
users: ["cluster-autoscaler"]
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["configmaps", "endpoints"]
# Don't log HPA fetching metrics.
- level: None
users:
- system:kube-controller-manager
- system:cloud-controller-manager
verbs: ["get", "list"]
resources:
- group: "metrics.k8s.io"
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
- /readyz
# Don't log events requests because of performance impact.
- level: None
resources:
- group: "" # core
resources: ["events"]
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
- level: Request
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
- level: Request
userGroups: ["system:nodes"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
# deletecollection calls can be large, don't log responses for expected namespace deletions
- level: Request
users: ["system:serviceaccount:kube-system:namespace-controller"]
verbs: ["deletecollection"]
omitStages:
- "RequestReceived"
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps", "serviceaccounts/token"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
omitStages:
- "RequestReceived"
# Get responses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources: ${known_apis}
omitStages:
- "RequestReceived"
# Default level for known APIs
- level: RequestResponse
resources: ${known_apis}
omitStages:
- "RequestReceived"
# Default level for all other requests.
- level: Metadata
omitStages:
- "RequestReceived"
EOF
}
createMasterAuditPolicy

View File

@ -9,7 +9,7 @@ networking:
podSubnet: 10.244.0.0/16
etcd:
local:
imageTag: 3.5.4-0
# imageTag: 3.5.5-0
extraArgs:
### DNS discovery
#discovery-srv: {{ .Values.domain }}
@ -59,8 +59,11 @@ apiServer:
audit-policy-file: /etc/kubernetes/apiserver/audit-policy.yaml
audit-log-maxage: "7"
audit-log-maxsize: "100"
audit-log-maxbackup: "3"
audit-log-maxbackup: "1"
audit-log-compress: "true"
{{- if .Values.api.falco.enabled }}
audit-webhook-config-file: /etc/kubernetes/apiserver/audit-webhook.yaml
{{- end }}
tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
admission-control-config-file: /etc/kubernetes/apiserver/admission-configuration.yaml
api-audiences: {{ .Values.api.apiAudiences }}

View File

@ -1,6 +1,6 @@
{{- /* Feature gates for all control plane components */ -}}
{{- define "kubeadm.featuregates" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" "NodeOutOfServiceVolumeDetach" }}
{{- $gates := list "CustomCPUCFSQuotaPeriod" }}
{{- if eq .return "csv" }}
{{- range $key := $gates }}
{{- $key }}=true,

View File

@ -0,0 +1,7 @@
# Don't Log anything, but audit policy enabled
apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
name: kubezero-auditpolicy
rules:
- level: None

View File

@ -1,7 +1,164 @@
# Don't Log anything, but audit policy enabled
apiVersion: audit.k8s.io/v1
kind: Policy
metadata:
name: kubezero-auditpolicy
rules:
- level: None
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services", "services/status"]
- level: None
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account.
users: ["system:unsecured"]
namespaces: ["kube-system"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["configmaps"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes", "nodes/status"]
- level: None
users:
- system:kube-controller-manager
- system:cloud-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
- level: None
users: ["cluster-autoscaler"]
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["configmaps", "endpoints"]
# Don't log HPA fetching metrics.
- level: None
users:
- system:kube-controller-manager
- system:cloud-controller-manager
verbs: ["get", "list"]
resources:
- group: "metrics.k8s.io"
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
# Don't log events requests because of performance impact.
- level: None
resources:
- group: "" # core
resources: ["events"]
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
- level: Request
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
- level: Request
userGroups: ["system:nodes"]
verbs: ["update","patch"]
resources:
- group: "" # core
resources: ["nodes/status", "pods/status"]
omitStages:
- "RequestReceived"
# deletecollection calls can be large, don't log responses for expected namespace deletions
- level: Request
users: ["system:serviceaccount:kube-system:namespace-controller"]
verbs: ["deletecollection"]
omitStages:
- "RequestReceived"
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps", "serviceaccounts/token"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
omitStages:
- "RequestReceived"
# Get responses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "node.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "storage.k8s.io"
omitStages:
- "RequestReceived"
# Default level for known APIs
- level: RequestResponse
resources:
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apiextensions.k8s.io"
- group: "apiregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "metrics.k8s.io"
- group: "networking.k8s.io"
- group: "node.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "scheduling.k8s.io"
- group: "storage.k8s.io"
omitStages:
- "RequestReceived"
# Default level for all other requests.
- level: Metadata
omitStages:
- "RequestReceived"

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Config
clusters:
- name: falco
cluster:
server: http://falco-control-plane-k8saudit-webhook:9765/k8s-audit
contexts:
- context:
cluster: falco
user: ""
name: default-context
current-context: default-context
preferences: {}
users: []

View File

@ -1,4 +1,5 @@
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: kube-apiserver
resources:

View File

@ -110,14 +110,12 @@ spec:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
containers:
- name: aws-iam-authenticator
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.11
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.10
args:
- server
- --backend-mode=CRD,MountedFile

View File

@ -25,6 +25,9 @@ api:
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"
falco:
enabled: false
etcd:
nodeName: etcd
state: new

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons
type: application
version: 0.7.5
appVersion: v1.25
version: 0.8.0
appVersion: v1.26
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -15,6 +15,7 @@ keywords:
- sealed-secrets
- external-dns
- aws-node-termination-handler
- falco
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
@ -44,4 +45,9 @@ dependencies:
version: 1.3.0
# repository: https://twin.github.io/helm-charts
condition: aws-eks-asg-rolling-update-handler.enabled
kubeVersion: ">= 1.25.0"
- name: falco
version: 3.3.0
repository: https://falcosecurity.github.io/charts
condition: falco-control-plane.enabled
alias: falco-control-plane
kubeVersion: ">= 1.26.0"

View File

@ -66,9 +66,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| aws-eks-asg-rolling-update-handler.resources.requests.cpu | string | `"10m"` | |
| aws-eks-asg-rolling-update-handler.resources.requests.memory | string | `"32Mi"` | |
| aws-eks-asg-rolling-update-handler.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-eks-asg-rolling-update-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| aws-eks-asg-rolling-update-handler.tolerations[1].effect | string | `"NoSchedule"` | |
| aws-eks-asg-rolling-update-handler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| aws-eks-asg-rolling-update-handler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| aws-node-termination-handler.deleteLocalData | bool | `true` | |
| aws-node-termination-handler.emitKubernetesEvents | bool | `true` | |
| aws-node-termination-handler.enableProbesServer | bool | `true` | |
@ -93,9 +91,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| aws-node-termination-handler.rbac.pspEnabled | bool | `false` | |
| aws-node-termination-handler.taintNode | bool | `true` | |
| aws-node-termination-handler.tolerations[0].effect | string | `"NoSchedule"` | |
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| aws-node-termination-handler.tolerations[1].effect | string | `"NoSchedule"` | |
| aws-node-termination-handler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| aws-node-termination-handler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| aws-node-termination-handler.useProviderId | bool | `true` | |
| awsNeuron.enabled | bool | `false` | |
| awsNeuron.image.name | string | `"public.ecr.aws/neuron/neuron-device-plugin"` | |
@ -115,9 +111,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| cluster-autoscaler.serviceMonitor.enabled | bool | `false` | |
| cluster-autoscaler.serviceMonitor.interval | string | `"30s"` | |
| cluster-autoscaler.tolerations[0].effect | string | `"NoSchedule"` | |
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cluster-autoscaler.tolerations[1].effect | string | `"NoSchedule"` | |
| cluster-autoscaler.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cluster-autoscaler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| clusterBackup.enabled | bool | `false` | |
| clusterBackup.extraEnv | list | `[]` | |
| clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | |
@ -129,9 +123,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| external-dns.provider | string | `"inmemory"` | |
| external-dns.sources[0] | string | `"service"` | |
| external-dns.tolerations[0].effect | string | `"NoSchedule"` | |
| external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| external-dns.tolerations[1].effect | string | `"NoSchedule"` | |
| external-dns.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| external-dns.triggerLoopOnEvent | bool | `true` | |
| forseti.aws.iamRoleArn | string | `""` | "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti" |
| forseti.aws.region | string | `""` | |
@ -171,6 +163,4 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| sealed-secrets.resources.requests.cpu | string | `"10m"` | |
| sealed-secrets.resources.requests.memory | string | `"24Mi"` | |
| sealed-secrets.tolerations[0].effect | string | `"NoSchedule"` | |
| sealed-secrets.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| sealed-secrets.tolerations[1].effect | string | `"NoSchedule"` | |
| sealed-secrets.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| sealed-secrets.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |

View File

@ -55,8 +55,6 @@ spec:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
restartPolicy: Never

View File

@ -69,8 +69,6 @@ spec:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
volumes:

View File

@ -47,8 +47,6 @@ sealed-secrets:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
@ -88,8 +86,6 @@ aws-eks-asg-rolling-update-handler:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
@ -98,8 +94,9 @@ aws-node-termination-handler:
fullnameOverride: "aws-node-termination-handler"
# -- "aws-node-termination-handler/${ClusterName}"
managedTag: "aws-node-termination-handler/managed"
checkASGTagBeforeDraining: false
# -- "zdt:kubezero:nth:${ClusterName}"
managedTag: "zdt:kubezero:nth:${ClusterName}"
useProviderId: true
enableSqsTerminationDraining: true
@ -132,8 +129,6 @@ aws-node-termination-handler:
logFormatVersion: 2
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
@ -218,8 +213,6 @@ cluster-autoscaler:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
@ -250,8 +243,6 @@ external-dns:
triggerLoopOnEvent: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
@ -263,3 +254,72 @@ external-dns:
#- istio-gateway
provider: inmemory
falco-control-plane:
enabled: false
fullnameOverride: falco-control-plane
# -- Disable the drivers since we want to deploy only the k8saudit plugin.
driver:
enabled: false
# -- Disable the collectors, no syscall events to enrich with metadata.
collectors:
enabled: false
nodeSelector:
node-role.kubernetes.io/control-plane: ""
# -- Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurabale.
controller:
kind: deployment
deployment:
# -- Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing.
# For more info check the section on Plugins in the README.md file.
replicas: 1
falcoctl:
artifact:
install:
# -- Enable the init container. We do not recommend installing (or following) plugins for security reasons since they are executable objects.
enabled: true
follow:
# -- Enable the sidecar container. We do not support it yet for plugins. It is used only for rules feed such as k8saudit-rules rules.
enabled: true
config:
artifact:
install:
# -- Do not resolve the depenencies for artifacts. By default is true, but for our use case we disable it.
resolveDeps: false
# -- List of artifacts to be installed by the falcoctl init container.
# Only rulesfiles, we do no recommend plugins for security reasonts since they are executable objects.
refs: [k8saudit-rules:0.6]
follow:
# -- List of artifacts to be followed by the falcoctl sidecar container.
# Only rulesfiles, we do no recommend plugins for security reasonts since they are executable objects.
refs: [k8saudit-rules:0.6]
services:
- name: k8saudit-webhook
ports:
- port: 9765 # See plugin open_params
protocol: TCP
falco:
rules_file:
- /etc/falco/k8s_audit_rules.yaml
- /etc/falco/rules.d
plugins:
- name: k8saudit
library_path: libk8saudit.so
init_config:
maxEventBytes: 1048576
# sslCertificate: /etc/falco/falco.pem
open_params: "http://:9765/k8s-audit"
- name: json
library_path: libjson.so
init_config: ""
# Plugins that Falco will load. Note: the same plugins are installed by the falcoctl-artifact-install init container.
load_plugins: [k8saudit, json]

View File

@ -1,7 +1,7 @@
apiVersion: v2
description: KubeZero ArgoCD - config, branding, image-updater (optional)
name: kubezero-argocd
version: 0.12.0
version: 0.13.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -17,13 +17,13 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: argo-cd
version: 5.28.2
version: 5.37.1
repository: https://argoproj.github.io/argo-helm
- name: argocd-apps
version: 0.0.9
version: 1.2.0
repository: https://argoproj.github.io/argo-helm
- name: argocd-image-updater
version: 0.8.5
version: 0.9.1
repository: https://argoproj.github.io/argo-helm
condition: argocd-image-updater.enabled
kubeVersion: ">= 1.25.0"
kubeVersion: ">= 1.26.0"

View File

@ -26,6 +26,7 @@ argo-cd:
configs:
styles: |
.sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }
.sidebar__logo__text-logo { height: 0em; }
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
cm:

View File

@ -34,9 +34,7 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
|-----|------|---------|-------------|
| cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cert-manager.cainjector.tolerations[1].effect | string | `"NoSchedule"` | |
| cert-manager.cainjector.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cert-manager.enabled | bool | `true` | |
| cert-manager.extraArgs[0] | string | `"--dns01-recursive-nameservers-only"` | |
| cert-manager.global.leaderElection.namespace | string | `"cert-manager"` | |
@ -46,14 +44,10 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
| cert-manager.prometheus.servicemonitor.enabled | bool | `false` | |
| cert-manager.startupapicheck.enabled | bool | `false` | |
| cert-manager.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cert-manager.tolerations[1].effect | string | `"NoSchedule"` | |
| cert-manager.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cert-manager.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cert-manager.webhook.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| cert-manager.webhook.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cert-manager.webhook.tolerations[1].effect | string | `"NoSchedule"` | |
| cert-manager.webhook.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| clusterIssuer | object | `{}` | |
| localCA.enabled | bool | `false` | |
| localCA.selfsigning | bool | `true` | |

View File

@ -49,8 +49,6 @@ cert-manager:
# readOnly: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
@ -62,8 +60,6 @@ cert-manager:
webhook:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
@ -71,8 +67,6 @@ cert-manager:
cainjector:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:

View File

@ -2,14 +2,14 @@ apiVersion: v2
name: kubezero-ci
description: KubeZero umbrella chart for all things CI
type: application
version: 0.6.3
version: 0.7.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- jenkins
- goCD
- gitea
- renovate
maintainers:
- name: Stefan Reimer
email: stefan@zero-downtime.net
@ -17,20 +17,20 @@ dependencies:
- name: kubezero-lib
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: gocd
version: 1.40.8
repository: https://gocd.github.io/helm-chart
condition: gocd.enabled
- name: gitea
version: 8.3.0
version: 9.1.0
repository: https://dl.gitea.io/charts/
condition: gitea.enabled
- name: jenkins
version: 4.3.24
version: 4.5.0
repository: https://charts.jenkins.io
condition: jenkins.enabled
- name: trivy
version: 0.7.0
repository: https://aquasecurity.github.io/helm-charts/
condition: trivy.enabled
kubeVersion: ">= 1.24.0"
- name: renovate
version: 36.31.0
repository: https://docs.renovatebot.com/helm-charts
condition: renovate.enabled
kubeVersion: ">= 1.25.0"

View File

@ -21,7 +21,7 @@ Kubernetes: `>= 1.24.0`
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.7.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://charts.jenkins.io | jenkins | 4.3.24 |
| https://dl.gitea.io/charts/ | gitea | 8.2.0 |
| https://dl.gitea.io/charts/ | gitea | 8.3.0 |
| https://gocd.github.io/helm-chart | gocd | 1.40.8 |
# Jenkins
@ -101,9 +101,8 @@ Kubernetes: `>= 1.24.0`
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3937.vd7b_82db_e347b_"` | |
| jenkins.controller.installPlugins[10] | string | `"build-discarder:139.v05696a_7fe240"` | |
| jenkins.controller.installPlugins[11] | string | `"dark-theme:315.va_22e7d692ea_a"` | |
| jenkins.controller.installPlugins[12] | string | `"kubernetes-credentials-provider:1.211.vc236a_f5a_2f3c"` | |
| jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | |
| jenkins.controller.installPlugins[2] | string | `"git:5.0.2"` | |
| jenkins.controller.installPlugins[2] | string | `"git:5.1.0"` | |
| jenkins.controller.installPlugins[3] | string | `"basic-branch-build-strategies:71.vc1421f89888e"` | |
| jenkins.controller.installPlugins[4] | string | `"pipeline-graph-view:183.v9e27732d970f"` | |
| jenkins.controller.installPlugins[5] | string | `"pipeline-stage-view:2.32"` | |
@ -112,7 +111,7 @@ Kubernetes: `>= 1.24.0`
| jenkins.controller.installPlugins[8] | string | `"prometheus:2.2.3"` | |
| jenkins.controller.installPlugins[9] | string | `"htmlpublisher:1.31"` | |
| jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | |
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | |
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=300 --sessionEviction=10800"` | |
| jenkins.controller.prometheus.enabled | bool | `false` | |
| jenkins.controller.resources.limits.memory | string | `"4096Mi"` | |
| jenkins.controller.resources.requests.cpu | string | `"250m"` | |

View File

@ -1,18 +0,0 @@
{{- if and .Values.gocd.enabled .Values.gocd.istio.enabled }}
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: {{ include "kubezero-lib.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
gateways:
- {{ .Values.gocd.istio.gateway }}
hosts:
- {{ .Values.gocd.istio.url }}
http:
- route:
- destination:
host: gocd-server
{{- end }}

View File

@ -1,24 +1,23 @@
gocd:
enabled: false
server:
service:
type: "ClusterIP"
ingress:
enabled: false
istio:
enabled: false
gateway: istio-ingress/private-ingressgateway
url: "" # gocd.example.com
gitea:
enabled: false
image:
#image:
#tag: 1.17.4
rootless: true
#rootless: true
repliaCount: 1
# We use RWO persistence
strategy:
type: "Recreate"
# Since V9 they default to RWX and deployment, we default to old existing RWO from statefulset
persistence:
enabled: true
mount: true
create: false
#claimName: <set per install>
size: 4Gi
securityContext:
allowPrivilegeEscalation: false
@ -28,10 +27,6 @@ gitea:
add:
- SYS_CHROOT
persistence:
enabled: true
size: 4Gi
resources:
requests:
cpu: "150m"
@ -56,15 +51,17 @@ gitea:
DB_TYPE: sqlite3
cache:
ADAPTER: memory
session:
PROVIDER: memory
queue:
TYPE: level
memcached:
redis-cluster:
enabled: false
postgresql-ha:
enabled: false
postgresql:
enabled: false
mysql:
enabled: false
mariadb:
enabled: false
istio:
enabled: false
@ -121,19 +118,19 @@ jenkins:
numToKeepStr: "10"
installPlugins:
- kubernetes:3937.vd7b_82db_e347b_
- kubernetes:3985.vd26d77b_2a_48a_
- kubernetes-credentials-provider:1.225.v14f9e6b_28f53
- workflow-aggregator:581.v0c46fa_697ffd
- git:5.1.0
- basic-branch-build-strategies:71.vc1421f89888e
- git:5.2.0
- basic-branch-build-strategies:81.v05e333931c7d
- pipeline-graph-view:183.v9e27732d970f
- pipeline-stage-view:2.32
- configuration-as-code:1647.ve39ca_b_829b_42
- antisamy-markup-formatter:159.v25b_c67cd35fb_
- pipeline-stage-view:2.33
- configuration-as-code:1670.v564dc8b_982d0
- antisamy-markup-formatter:162.v0e6ec0fcfcf6
- prometheus:2.2.3
- htmlpublisher:1.31
- htmlpublisher:1.32
- build-discarder:139.v05696a_7fe240
- dark-theme:315.va_22e7d692ea_a
- kubernetes-credentials-provider:1.211.vc236a_f5a_2f3c
- dark-theme:336.v02165cd8c2ee
serviceAccountAgent:
create: true
@ -142,7 +139,7 @@ jenkins:
# Preconfigure agents to use zdt podman requires fuse/overlayfs
agent:
image: public.ecr.aws/zero-downtime/jenkins-podman
tag: v0.4.2
tag: v0.4.3
#alwaysPullImage: true
podRetention: "Default"
showRawYaml: false
@ -237,3 +234,16 @@ trivy:
size: 1Gi
rbac:
create: false
renovate:
enabled: false
env:
LOG_FORMAT: json
cronjob:
concurrencyPolicy: Forbid
jobBackoffLimit: 3
schedule: "0 3 * * *"
successfulJobsHistoryLimit: 1
securityContext:
fsGroup: 1000

View File

@ -43,9 +43,7 @@ Kubernetes: `>= 1.25.0`
| istiod.pilot.resources.requests.cpu | string | `"100m"` | |
| istiod.pilot.resources.requests.memory | string | `"128Mi"` | |
| istiod.pilot.tolerations[0].effect | string | `"NoSchedule"` | |
| istiod.pilot.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| istiod.pilot.tolerations[1].effect | string | `"NoSchedule"` | |
| istiod.pilot.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| istiod.pilot.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| istiod.telemetry.enabled | bool | `false` | |
| kiali-server.auth.strategy | string | `"anonymous"` | |
| kiali-server.deployment.ingress_enabled | bool | `false` | |

View File

@ -16,10 +16,8 @@ istiod:
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
resources:
requests:

View File

@ -61,9 +61,7 @@ Kubernetes: `>= 1.24.0`
| eck-operator.installCRDs | bool | `false` | |
| eck-operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| eck-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| eck-operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| eck-operator.tolerations[1].effect | string | `"NoSchedule"` | |
| eck-operator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| eck-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| elastic_password | string | `""` | |
| es.nodeSets | list | `[]` | |
| es.prometheus | bool | `false` | |

View File

@ -5,8 +5,6 @@ eck-operator:
enabled: false
installCRDs: false
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:

View File

@ -126,9 +126,7 @@ Kubernetes: `>= 1.25.0`
| kube-prometheus-stack.grafana.testFramework.enabled | bool | `false` | |
| kube-prometheus-stack.kube-state-metrics.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[1].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| kube-prometheus-stack.kubeApiServer.enabled | bool | `true` | |
| kube-prometheus-stack.kubeControllerManager.enabled | bool | `true` | |
| kube-prometheus-stack.kubeControllerManager.service.port | int | `10257` | |
@ -172,9 +170,7 @@ Kubernetes: `>= 1.25.0`
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[1].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| kube-prometheus-stack.prometheusOperator.enabled | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.logFormat | string | `"json"` | |
| kube-prometheus-stack.prometheusOperator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
@ -182,9 +178,7 @@ Kubernetes: `>= 1.25.0`
| kube-prometheus-stack.prometheusOperator.resources.requests.cpu | string | `"20m"` | |
| kube-prometheus-stack.prometheusOperator.resources.requests.memory | string | `"32Mi"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[1].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| prometheus-adapter.enabled | bool | `true` | |
| prometheus-adapter.logLevel | int | `1` | |
| prometheus-adapter.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
@ -204,9 +198,7 @@ Kubernetes: `>= 1.25.0`
| prometheus-adapter.rules.resource.memory.resources.overrides.pod.resource | string | `"pod"` | |
| prometheus-adapter.rules.resource.window | string | `"5m"` | |
| prometheus-adapter.tolerations[0].effect | string | `"NoSchedule"` | |
| prometheus-adapter.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| prometheus-adapter.tolerations[1].effect | string | `"NoSchedule"` | |
| prometheus-adapter.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| prometheus-adapter.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| prometheus-pushgateway.enabled | bool | `false` | |
| prometheus-pushgateway.serviceMonitor.enabled | bool | `true` | |

View File

@ -50,8 +50,6 @@ kube-prometheus-stack:
# Run on controller nodes
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
@ -66,8 +64,6 @@ kube-prometheus-stack:
admissionWebhooks:
patch:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
@ -210,8 +206,6 @@ kube-prometheus-stack:
# Assign state metrics to control plane
kube-state-metrics:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
@ -327,8 +321,6 @@ prometheus-adapter:
prometheus:
url: http://metrics-kube-prometheus-st-prometheus
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-mq
description: KubeZero umbrella chart for MQ systems like NATS, RabbitMQ
type: application
version: 0.3.5
version: 0.3.6
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -22,11 +22,11 @@ dependencies:
#repository: https://nats-io.github.io/k8s/helm/charts/
condition: nats.enabled
- name: rabbitmq
version: 11.3.2
version: 12.0.3
repository: https://charts.bitnami.com/bitnami
condition: rabbitmq.enabled
- name: rabbitmq-cluster-operator
version: 3.1.4
repository: https://charts.bitnami.com/bitnami
condition: rabbitmq-cluster-operator.enabled
kubeVersion: ">= 1.20.0"
kubeVersion: ">= 1.25.0"

View File

@ -1,6 +1,8 @@
#!/bin/bash
set -ex
helm dep update
## NATS
NATS_VERSION=0.8.4

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-network
description: KubeZero umbrella chart for all things network
type: application
version: 0.4.3
version: 0.4.5
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -19,11 +19,11 @@ dependencies:
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts/
- name: cilium
version: 1.13.1
version: 1.13.5
repository: https://helm.cilium.io/
condition: cilium.enabled
- name: metallb
version: 0.13.9
repository: https://metallb.github.io/metallb
condition: metallb.enabled
kubeVersion: ">= 1.25.0"
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero-network
![Version: 0.4.3](https://img.shields.io/badge/Version-0.4.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.4.4](https://img.shields.io/badge/Version-0.4.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things network
@ -19,7 +19,7 @@ Kubernetes: `>= 1.25.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
| https://helm.cilium.io/ | cilium | 1.13.1 |
| https://helm.cilium.io/ | cilium | 1.13.4 |
| https://metallb.github.io/metallb | metallb | 0.13.9 |
## Values
@ -33,7 +33,6 @@ Kubernetes: `>= 1.25.0`
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
| cilium.cni.exclusive | bool | `false` | |
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
| cilium.containerRuntime.integration | string | `"crio"` | |
| cilium.enabled | bool | `false` | |
| cilium.hubble.enabled | bool | `false` | |
| cilium.hubble.relay.enabled | bool | `false` | |
@ -42,6 +41,7 @@ Kubernetes: `>= 1.25.0`
| cilium.hubble.tls.auto.certManagerIssuerRef.name | string | `"kubezero-local-ca-issuer"` | |
| cilium.hubble.tls.auto.method | string | `"cert-manager"` | |
| cilium.hubble.ui.enabled | bool | `false` | |
| cilium.image.useDigest | bool | `false` | |
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
| cilium.l7Proxy | bool | `false` | |
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
@ -49,22 +49,17 @@ Kubernetes: `>= 1.25.0`
| cilium.operator.prometheus.serviceMonitor.enabled | bool | `false` | |
| cilium.operator.replicas | int | `1` | |
| cilium.operator.tolerations[0].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cilium.operator.tolerations[1].effect | string | `"NoSchedule"` | |
| cilium.operator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cilium.operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| cilium.prometheus.enabled | bool | `false` | |
| cilium.prometheus.port | int | `9091` | |
| cilium.prometheus.serviceMonitor.enabled | bool | `false` | |
| cilium.resources.limits.memory | string | `"1024Mi"` | |
| cilium.resources.requests.cpu | string | `"10m"` | |
| cilium.resources.requests.memory | string | `"256Mi"` | |
| cilium.securityContext.privileged | bool | `true` | |
| cilium.tunnel | string | `"geneve"` | |
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| metallb.controller.tolerations[1].effect | string | `"NoSchedule"` | |
| metallb.controller.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| metallb.enabled | bool | `false` | |
| metallb.ipAddressPools | list | `[]` | |
| multus.clusterNetwork | string | `"cilium"` | |

View File

@ -3,8 +3,6 @@ metallb:
controller:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
@ -27,12 +25,9 @@ multus:
cilium:
enabled: false
containerRuntime:
integration: crio
# remove with 1.26
securityContext:
privileged: true
# breaks preloaded images otherwise
image:
useDigest: false
resources:
requests:
@ -85,8 +80,6 @@ cilium:
operator:
replicas: 1
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:

View File

@ -44,9 +44,7 @@ Kubernetes: `>= 1.24.0`
| pxc-operator.resources.requests.cpu | string | `"50m"` | |
| pxc-operator.resources.requests.memory | string | `"32Mi"` | |
| pxc-operator.tolerations[0].effect | string | `"NoSchedule"` | |
| pxc-operator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| pxc-operator.tolerations[1].effect | string | `"NoSchedule"` | |
| pxc-operator.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
| pxc-operator.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |
| pxc-operator.watchAllNamespaces | bool | `true` | |
# Changes

View File

@ -6,8 +6,6 @@ pxc-operator:
# running on the control-plane
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero
description: KubeZero - Root App of Apps chart
type: application
version: 1.25.8-2
version: 1.26.7
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -15,4 +15,4 @@ dependencies:
- name: kubezero-lib
version: ">= 0.1.6"
repository: https://cdn.zero-downtime.net/charts
kubeVersion: ">= 1.25.0"
kubeVersion: ">= 1.26.0"

View File

@ -1,6 +1,6 @@
# kubezero
![Version: 1.25.8-2](https://img.shields.io/badge/Version-1.25.8--2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 1.26.6](https://img.shields.io/badge/Version-1.26.6-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero - Root App of Apps chart
@ -14,7 +14,7 @@ KubeZero - Root App of Apps chart
## Requirements
Kubernetes: `>= 1.25.0`
Kubernetes: `>= 1.26.0`
| Repository | Name | Version |
|------------|------|---------|
@ -66,12 +66,13 @@ Kubernetes: `>= 1.25.0`
| metrics.enabled | bool | `false` | |
| metrics.istio.grafana | object | `{}` | |
| metrics.istio.prometheus | object | `{}` | |
| metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigs | list | `[]` | |
| metrics.namespace | string | `"monitoring"` | |
| metrics.targetRevision | string | `"0.9.2"` | |
| network.cilium.cluster | object | `{}` | |
| network.enabled | bool | `true` | |
| network.retain | bool | `true` | |
| network.targetRevision | string | `"0.4.3"` | |
| network.targetRevision | string | `"0.4.4"` | |
| storage.aws-ebs-csi-driver.enabled | bool | `false` | |
| storage.aws-efs-csi-driver.enabled | bool | `false` | |
| storage.enabled | bool | `false` | |

View File

@ -131,6 +131,11 @@ sealed-secrets:
{{- end }}
{{- end }}
{{- with index .Values "addons" "falco-control-plane" }}
falco-control-plane:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- if .Values.global.aws }}
# AWS only
aws-node-termination-handler:
@ -145,7 +150,7 @@ aws-node-termination-handler:
{{- end }}
queueURL: "https://sqs.{{ .Values.global.aws.region }}.amazonaws.com/{{ .Values.global.aws.accountId }}/{{ .Values.global.clusterName }}_Nth"
managedTag: "aws-node-termination-handler/{{ .Values.global.clusterName }}"
managedTag: "zdt:kubezero:nth:{{ .Values.global.clusterName }}"
extraEnv:
- name: AWS_ROLE_ARN
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.awsNth"

View File

@ -50,7 +50,7 @@ prometheus:
region: {{ .global.aws.region }}
filters:
- name: 'tag-key'
values: ['zdt:prometheus.crio']
values: ['zdt:prometheus:crio']
{{- with .metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigsEC2Filters }}
{{- toYaml . | nindent 14 }}
{{- end }}

View File

@ -10,7 +10,7 @@ global:
addons:
enabled: true
targetRevision: 0.7.5
targetRevision: 0.8.0
external-dns:
enabled: false
forseti:
@ -25,11 +25,13 @@ addons:
enabled: false
aws-eks-asg-rolling-update-handler:
enabled: false
falco-control-plane:
enabled: false
network:
enabled: true
retain: true
targetRevision: 0.4.3
targetRevision: 0.4.5
cilium:
cluster: {}
@ -94,7 +96,7 @@ logging:
argocd:
enabled: false
namespace: argocd
targetRevision: 0.12.1
targetRevision: 0.13.0
argocd-image-updater:
enabled: false
istio:

View File

@ -27,6 +27,7 @@ Something along the lines of https://github.com/onfido/k8s-cleanup which doesnt
## Resources
- https://docs.google.com/spreadsheets/d/1WPHt0gsb7adVzY3eviMK2W8LejV0I5m_Zpc8tMzl_2w/edit#gid=0
- https://github.com/ishantanu/awesome-kubectl-plugins
- https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/gci/configure-helper.sh
## Update Api-server config
Add the following extraArgs to the ClusterConfiguration configMap in the kube-system namespace:

View File

@ -1,7 +1,7 @@
#!/bin/bash
set -ex
REPO_URL_S3="s3://zero-downtime-web/cdn/charts"
REPO_URL_S3="s3://zero-downtime-web-cdn/charts"
REPO_URL="https://cdn.zero-downtime.net/charts"
CHARTS=${1:-'.*'}
@ -55,6 +55,6 @@ function publish_chart() {
publish_chart
CF_DIST=E1YFUJXMCXT2RN
CF_DIST=E11OFTOA3L8IVY
aws cloudfront create-invalidation --distribution $CF_DIST --paths "/charts/*"