WIP: 1.26.7
This commit is contained in:
parent
5c198676c9
commit
ede7f022ee
22
admin/dev_apply.sh
Executable file
22
admin/dev_apply.sh
Executable file
@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
#set -eEx
|
||||
#set -o pipefail
|
||||
set -x
|
||||
|
||||
#VERSION="latest"
|
||||
KUBE_VERSION="v1.26.6"
|
||||
WORKDIR=$(mktemp -p /tmp -d kubezero.XXX)
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
# shellcheck disable=SC1091
|
||||
. "$SCRIPT_DIR"/libhelm.sh
|
||||
CHARTS="$(dirname $SCRIPT_DIR)/charts"
|
||||
|
||||
get_kubezero_values
|
||||
|
||||
# Always use embedded kubezero chart
|
||||
helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $KUBE_VERSION --version ~$KUBE_VERSION --devel --output-dir $WORKDIR
|
||||
|
||||
# CRDs first
|
||||
_helm crds $1
|
||||
_helm apply $1
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubeadm
|
||||
description: KubeZero Kubeadm cluster config
|
||||
type: application
|
||||
version: 1.26.6
|
||||
version: 1.26.7
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
159
charts/kubeadm/create_audit_policy.sh
Executable file
159
charts/kubeadm/create_audit_policy.sh
Executable file
@ -0,0 +1,159 @@
|
||||
#!/bin/sh
|
||||
|
||||
function createMasterAuditPolicy() {
|
||||
path="templates/apiserver/audit-policy.yaml"
|
||||
|
||||
known_apis='
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "node.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "scheduling.k8s.io"
|
||||
- group: "storage.k8s.io"'
|
||||
|
||||
cat <<EOF >"${path}"
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
rules:
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
users: ["system:unsecured"]
|
||||
namespaces: ["kube-system"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
- level: None
|
||||
users: ["kubelet"] # legacy kubelet identity
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
- system:kube-scheduler
|
||||
- system:serviceaccount:kube-system:endpoint-controller
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints"]
|
||||
- level: None
|
||||
users: ["system:apiserver"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
|
||||
- level: None
|
||||
users: ["cluster-autoscaler"]
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps", "endpoints"]
|
||||
# Don't log HPA fetching metrics.
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
verbs: ["get", "list"]
|
||||
resources:
|
||||
- group: "metrics.k8s.io"
|
||||
|
||||
# Don't log these read-only URLs.
|
||||
- level: None
|
||||
nonResourceURLs:
|
||||
- /healthz*
|
||||
- /version
|
||||
- /swagger*
|
||||
- /readyz
|
||||
|
||||
# Don't log events requests because of performance impact.
|
||||
- level: None
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["events"]
|
||||
|
||||
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
|
||||
- level: Request
|
||||
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
- level: Request
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# deletecollection calls can be large, don't log responses for expected namespace deletions
|
||||
- level: Request
|
||||
users: ["system:serviceaccount:kube-system:namespace-controller"]
|
||||
verbs: ["deletecollection"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
|
||||
# so only log at the Metadata level.
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["secrets", "configmaps", "serviceaccounts/token"]
|
||||
- group: authentication.k8s.io
|
||||
resources: ["tokenreviews"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources: ${known_apis}
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources: ${known_apis}
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for all other requests.
|
||||
- level: Metadata
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
EOF
|
||||
}
|
||||
|
||||
createMasterAuditPolicy
|
@ -9,7 +9,7 @@ networking:
|
||||
podSubnet: 10.244.0.0/16
|
||||
etcd:
|
||||
local:
|
||||
imageTag: 3.5.4-0
|
||||
# imageTag: 3.5.5-0
|
||||
extraArgs:
|
||||
### DNS discovery
|
||||
#discovery-srv: {{ .Values.domain }}
|
||||
@ -59,8 +59,11 @@ apiServer:
|
||||
audit-policy-file: /etc/kubernetes/apiserver/audit-policy.yaml
|
||||
audit-log-maxage: "7"
|
||||
audit-log-maxsize: "100"
|
||||
audit-log-maxbackup: "3"
|
||||
audit-log-maxbackup: "1"
|
||||
audit-log-compress: "true"
|
||||
{{- if .Values.api.falco.enabled }}
|
||||
audit-webhook-config-file: /etc/kubernetes/apiserver/audit-webhook.yaml
|
||||
{{- end }}
|
||||
tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
|
||||
admission-control-config-file: /etc/kubernetes/apiserver/admission-configuration.yaml
|
||||
api-audiences: {{ .Values.api.apiAudiences }}
|
||||
|
7
charts/kubeadm/templates/apiserver/audit-policy-off.yaml
Normal file
7
charts/kubeadm/templates/apiserver/audit-policy-off.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
# Don't Log anything, but audit policy enabled
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
metadata:
|
||||
name: kubezero-auditpolicy
|
||||
rules:
|
||||
- level: None
|
@ -1,7 +1,164 @@
|
||||
# Don't Log anything, but audit policy enabled
|
||||
apiVersion: audit.k8s.io/v1
|
||||
kind: Policy
|
||||
metadata:
|
||||
name: kubezero-auditpolicy
|
||||
rules:
|
||||
- level: None
|
||||
# The following requests were manually identified as high-volume and low-risk,
|
||||
# so drop them.
|
||||
- level: None
|
||||
users: ["system:kube-proxy"]
|
||||
verbs: ["watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints", "services", "services/status"]
|
||||
- level: None
|
||||
# Ingress controller reads 'configmaps/ingress-uid' through the unsecured port.
|
||||
# TODO(#46983): Change this to the ingress controller service account.
|
||||
users: ["system:unsecured"]
|
||||
namespaces: ["kube-system"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps"]
|
||||
- level: None
|
||||
users: ["kubelet"] # legacy kubelet identity
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes", "nodes/status"]
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
- system:kube-scheduler
|
||||
- system:serviceaccount:kube-system:endpoint-controller
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["endpoints"]
|
||||
- level: None
|
||||
users: ["system:apiserver"]
|
||||
verbs: ["get"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["namespaces", "namespaces/status", "namespaces/finalize"]
|
||||
- level: None
|
||||
users: ["cluster-autoscaler"]
|
||||
verbs: ["get", "update"]
|
||||
namespaces: ["kube-system"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["configmaps", "endpoints"]
|
||||
# Don't log HPA fetching metrics.
|
||||
- level: None
|
||||
users:
|
||||
- system:kube-controller-manager
|
||||
- system:cloud-controller-manager
|
||||
verbs: ["get", "list"]
|
||||
resources:
|
||||
- group: "metrics.k8s.io"
|
||||
|
||||
# Don't log these read-only URLs.
|
||||
- level: None
|
||||
nonResourceURLs:
|
||||
- /healthz*
|
||||
- /version
|
||||
- /swagger*
|
||||
|
||||
# Don't log events requests because of performance impact.
|
||||
- level: None
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["events"]
|
||||
|
||||
# node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
|
||||
- level: Request
|
||||
users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
- level: Request
|
||||
userGroups: ["system:nodes"]
|
||||
verbs: ["update","patch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["nodes/status", "pods/status"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# deletecollection calls can be large, don't log responses for expected namespace deletions
|
||||
- level: Request
|
||||
users: ["system:serviceaccount:kube-system:namespace-controller"]
|
||||
verbs: ["deletecollection"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
||||
# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,
|
||||
# so only log at the Metadata level.
|
||||
- level: Metadata
|
||||
resources:
|
||||
- group: "" # core
|
||||
resources: ["secrets", "configmaps", "serviceaccounts/token"]
|
||||
- group: authentication.k8s.io
|
||||
resources: ["tokenreviews"]
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Get responses can be large; skip them.
|
||||
- level: Request
|
||||
verbs: ["get", "list", "watch"]
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "node.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "scheduling.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for known APIs
|
||||
- level: RequestResponse
|
||||
resources:
|
||||
- group: "" # core
|
||||
- group: "admissionregistration.k8s.io"
|
||||
- group: "apiextensions.k8s.io"
|
||||
- group: "apiregistration.k8s.io"
|
||||
- group: "apps"
|
||||
- group: "authentication.k8s.io"
|
||||
- group: "authorization.k8s.io"
|
||||
- group: "autoscaling"
|
||||
- group: "batch"
|
||||
- group: "certificates.k8s.io"
|
||||
- group: "extensions"
|
||||
- group: "metrics.k8s.io"
|
||||
- group: "networking.k8s.io"
|
||||
- group: "node.k8s.io"
|
||||
- group: "policy"
|
||||
- group: "rbac.authorization.k8s.io"
|
||||
- group: "scheduling.k8s.io"
|
||||
- group: "storage.k8s.io"
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
# Default level for all other requests.
|
||||
- level: Metadata
|
||||
omitStages:
|
||||
- "RequestReceived"
|
||||
|
14
charts/kubeadm/templates/apiserver/audit-webhook.yaml
Normal file
14
charts/kubeadm/templates/apiserver/audit-webhook.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- name: falco
|
||||
cluster:
|
||||
server: http://falco-control-plane-k8saudit-webhook:9765/k8s-audit
|
||||
contexts:
|
||||
- context:
|
||||
cluster: falco
|
||||
user: ""
|
||||
name: default-context
|
||||
current-context: default-context
|
||||
preferences: {}
|
||||
users: []
|
@ -1,4 +1,5 @@
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
resources:
|
||||
|
@ -115,7 +115,7 @@ spec:
|
||||
|
||||
containers:
|
||||
- name: aws-iam-authenticator
|
||||
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.5.11
|
||||
image: public.ecr.aws/zero-downtime/aws-iam-authenticator:v0.6.10
|
||||
args:
|
||||
- server
|
||||
- --backend-mode=CRD,MountedFile
|
||||
|
@ -25,6 +25,9 @@ api:
|
||||
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
|
||||
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"
|
||||
|
||||
falco:
|
||||
enabled: false
|
||||
|
||||
etcd:
|
||||
nodeName: etcd
|
||||
state: new
|
||||
|
@ -2,8 +2,8 @@ apiVersion: v2
|
||||
name: kubezero-addons
|
||||
description: KubeZero umbrella chart for various optional cluster addons
|
||||
type: application
|
||||
version: 0.7.5
|
||||
appVersion: v1.25
|
||||
version: 0.8.0
|
||||
appVersion: v1.26
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -15,6 +15,7 @@ keywords:
|
||||
- sealed-secrets
|
||||
- external-dns
|
||||
- aws-node-termination-handler
|
||||
- falco
|
||||
maintainers:
|
||||
- name: Stefan Reimer
|
||||
email: stefan@zero-downtime.net
|
||||
@ -44,4 +45,9 @@ dependencies:
|
||||
version: 1.3.0
|
||||
# repository: https://twin.github.io/helm-charts
|
||||
condition: aws-eks-asg-rolling-update-handler.enabled
|
||||
kubeVersion: ">= 1.25.0"
|
||||
- name: falco
|
||||
version: 3.3.0
|
||||
repository: https://falcosecurity.github.io/charts
|
||||
condition: falco-control-plane.enabled
|
||||
alias: falco-control-plane
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
@ -94,8 +94,9 @@ aws-node-termination-handler:
|
||||
|
||||
fullnameOverride: "aws-node-termination-handler"
|
||||
|
||||
# -- "aws-node-termination-handler/${ClusterName}"
|
||||
managedTag: "aws-node-termination-handler/managed"
|
||||
checkASGTagBeforeDraining: false
|
||||
# -- "zdt:kubezero:nth:${ClusterName}"
|
||||
managedTag: "zdt:kubezero:nth:${ClusterName}"
|
||||
|
||||
useProviderId: true
|
||||
enableSqsTerminationDraining: true
|
||||
@ -253,3 +254,72 @@ external-dns:
|
||||
#- istio-gateway
|
||||
|
||||
provider: inmemory
|
||||
|
||||
falco-control-plane:
|
||||
enabled: false
|
||||
|
||||
fullnameOverride: falco-control-plane
|
||||
|
||||
# -- Disable the drivers since we want to deploy only the k8saudit plugin.
|
||||
driver:
|
||||
enabled: false
|
||||
|
||||
# -- Disable the collectors, no syscall events to enrich with metadata.
|
||||
collectors:
|
||||
enabled: false
|
||||
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
|
||||
# -- Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurabale.
|
||||
controller:
|
||||
kind: deployment
|
||||
deployment:
|
||||
# -- Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing.
|
||||
# For more info check the section on Plugins in the README.md file.
|
||||
replicas: 1
|
||||
|
||||
|
||||
falcoctl:
|
||||
artifact:
|
||||
install:
|
||||
# -- Enable the init container. We do not recommend installing (or following) plugins for security reasons since they are executable objects.
|
||||
enabled: true
|
||||
follow:
|
||||
# -- Enable the sidecar container. We do not support it yet for plugins. It is used only for rules feed such as k8saudit-rules rules.
|
||||
enabled: true
|
||||
config:
|
||||
artifact:
|
||||
install:
|
||||
# -- Do not resolve the depenencies for artifacts. By default is true, but for our use case we disable it.
|
||||
resolveDeps: false
|
||||
# -- List of artifacts to be installed by the falcoctl init container.
|
||||
# Only rulesfiles, we do no recommend plugins for security reasonts since they are executable objects.
|
||||
refs: [k8saudit-rules:0.6]
|
||||
follow:
|
||||
# -- List of artifacts to be followed by the falcoctl sidecar container.
|
||||
# Only rulesfiles, we do no recommend plugins for security reasonts since they are executable objects.
|
||||
refs: [k8saudit-rules:0.6]
|
||||
|
||||
services:
|
||||
- name: k8saudit-webhook
|
||||
ports:
|
||||
- port: 9765 # See plugin open_params
|
||||
protocol: TCP
|
||||
|
||||
falco:
|
||||
rules_file:
|
||||
- /etc/falco/k8s_audit_rules.yaml
|
||||
- /etc/falco/rules.d
|
||||
plugins:
|
||||
- name: k8saudit
|
||||
library_path: libk8saudit.so
|
||||
init_config:
|
||||
maxEventBytes: 1048576
|
||||
# sslCertificate: /etc/falco/falco.pem
|
||||
open_params: "http://:9765/k8s-audit"
|
||||
- name: json
|
||||
library_path: libjson.so
|
||||
init_config: ""
|
||||
# Plugins that Falco will load. Note: the same plugins are installed by the falcoctl-artifact-install init container.
|
||||
load_plugins: [k8saudit, json]
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: v2
|
||||
description: KubeZero ArgoCD - config, branding, image-updater (optional)
|
||||
name: kubezero-argocd
|
||||
version: 0.12.0
|
||||
version: 0.13.0
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -17,13 +17,13 @@ dependencies:
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: argo-cd
|
||||
version: 5.28.2
|
||||
version: 5.37.1
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
- name: argocd-apps
|
||||
version: 0.0.9
|
||||
version: 1.2.0
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
- name: argocd-image-updater
|
||||
version: 0.8.5
|
||||
version: 0.9.1
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argocd-image-updater.enabled
|
||||
kubeVersion: ">= 1.25.0"
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
@ -26,6 +26,7 @@ argo-cd:
|
||||
configs:
|
||||
styles: |
|
||||
.sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }
|
||||
.sidebar__logo__text-logo { height: 0em; }
|
||||
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
|
||||
|
||||
cm:
|
||||
|
@ -30,7 +30,7 @@ dependencies:
|
||||
repository: https://aquasecurity.github.io/helm-charts/
|
||||
condition: trivy.enabled
|
||||
- name: renovate
|
||||
version: 36.29.0
|
||||
version: 36.31.0
|
||||
repository: https://docs.renovatebot.com/helm-charts
|
||||
condition: renovate.enabled
|
||||
kubeVersion: ">= 1.25.0"
|
||||
|
@ -118,17 +118,17 @@ jenkins:
|
||||
numToKeepStr: "10"
|
||||
|
||||
installPlugins:
|
||||
- kubernetes:3971.v94b_4c914ca_75
|
||||
- kubernetes:3985.vd26d77b_2a_48a_
|
||||
- kubernetes-credentials-provider:1.225.v14f9e6b_28f53
|
||||
- workflow-aggregator:581.v0c46fa_697ffd
|
||||
- git:5.2.0
|
||||
- basic-branch-build-strategies:81.v05e333931c7d
|
||||
- pipeline-graph-view:183.v9e27732d970f
|
||||
- pipeline-stage-view:2.33
|
||||
- configuration-as-code:1647.ve39ca_b_829b_42
|
||||
- antisamy-markup-formatter:159.v25b_c67cd35fb_
|
||||
- configuration-as-code:1670.v564dc8b_982d0
|
||||
- antisamy-markup-formatter:162.v0e6ec0fcfcf6
|
||||
- prometheus:2.2.3
|
||||
- htmlpublisher:1.31
|
||||
- htmlpublisher:1.32
|
||||
- build-discarder:139.v05696a_7fe240
|
||||
- dark-theme:336.v02165cd8c2ee
|
||||
|
||||
@ -139,7 +139,7 @@ jenkins:
|
||||
# Preconfigure agents to use zdt podman requires fuse/overlayfs
|
||||
agent:
|
||||
image: public.ecr.aws/zero-downtime/jenkins-podman
|
||||
tag: v0.4.2
|
||||
tag: v0.4.3
|
||||
#alwaysPullImage: true
|
||||
podRetention: "Default"
|
||||
showRawYaml: false
|
||||
@ -237,3 +237,13 @@ trivy:
|
||||
|
||||
renovate:
|
||||
enabled: false
|
||||
|
||||
env:
|
||||
LOG_FORMAT: json
|
||||
cronjob:
|
||||
concurrencyPolicy: Forbid
|
||||
jobBackoffLimit: 3
|
||||
schedule: "0 3 * * *"
|
||||
successfulJobsHistoryLimit: 1
|
||||
securityContext:
|
||||
fsGroup: 1000
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-network
|
||||
description: KubeZero umbrella chart for all things network
|
||||
type: application
|
||||
version: 0.4.4
|
||||
version: 0.4.5
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -19,11 +19,11 @@ dependencies:
|
||||
version: ">= 0.1.6"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: cilium
|
||||
version: 1.13.4
|
||||
version: 1.13.5
|
||||
repository: https://helm.cilium.io/
|
||||
condition: cilium.enabled
|
||||
- name: metallb
|
||||
version: 0.13.9
|
||||
repository: https://metallb.github.io/metallb
|
||||
condition: metallb.enabled
|
||||
kubeVersion: ">= 1.25.0"
|
||||
kubeVersion: ">= 1.26.0"
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero
|
||||
description: KubeZero - Root App of Apps chart
|
||||
type: application
|
||||
version: 1.26.6
|
||||
version: 1.26.7
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -131,6 +131,11 @@ sealed-secrets:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- with index .Values "addons" "falco-control-plane" }}
|
||||
falco-control-plane:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.global.aws }}
|
||||
# AWS only
|
||||
aws-node-termination-handler:
|
||||
@ -145,7 +150,7 @@ aws-node-termination-handler:
|
||||
{{- end }}
|
||||
|
||||
queueURL: "https://sqs.{{ .Values.global.aws.region }}.amazonaws.com/{{ .Values.global.aws.accountId }}/{{ .Values.global.clusterName }}_Nth"
|
||||
managedTag: "aws-node-termination-handler/{{ .Values.global.clusterName }}"
|
||||
managedTag: "zdt:kubezero:nth:{{ .Values.global.clusterName }}"
|
||||
extraEnv:
|
||||
- name: AWS_ROLE_ARN
|
||||
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.awsNth"
|
||||
|
@ -50,7 +50,7 @@ prometheus:
|
||||
region: {{ .global.aws.region }}
|
||||
filters:
|
||||
- name: 'tag-key'
|
||||
values: ['zdt:prometheus.crio']
|
||||
values: ['zdt:prometheus:crio']
|
||||
{{- with .metrics.kubezero.prometheus.prometheusSpec.additionalScrapeConfigsEC2Filters }}
|
||||
{{- toYaml . | nindent 14 }}
|
||||
{{- end }}
|
||||
|
@ -10,7 +10,7 @@ global:
|
||||
|
||||
addons:
|
||||
enabled: true
|
||||
targetRevision: 0.7.5
|
||||
targetRevision: 0.8.0
|
||||
external-dns:
|
||||
enabled: false
|
||||
forseti:
|
||||
@ -25,11 +25,13 @@ addons:
|
||||
enabled: false
|
||||
aws-eks-asg-rolling-update-handler:
|
||||
enabled: false
|
||||
falco-control-plane:
|
||||
enabled: false
|
||||
|
||||
network:
|
||||
enabled: true
|
||||
retain: true
|
||||
targetRevision: 0.4.4
|
||||
targetRevision: 0.4.5
|
||||
cilium:
|
||||
cluster: {}
|
||||
|
||||
@ -94,7 +96,7 @@ logging:
|
||||
argocd:
|
||||
enabled: false
|
||||
namespace: argocd
|
||||
targetRevision: 0.12.1
|
||||
targetRevision: 0.13.0
|
||||
argocd-image-updater:
|
||||
enabled: false
|
||||
istio:
|
||||
|
@ -27,6 +27,7 @@ Something along the lines of https://github.com/onfido/k8s-cleanup which doesnt
|
||||
## Resources
|
||||
- https://docs.google.com/spreadsheets/d/1WPHt0gsb7adVzY3eviMK2W8LejV0I5m_Zpc8tMzl_2w/edit#gid=0
|
||||
- https://github.com/ishantanu/awesome-kubectl-plugins
|
||||
- https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/gci/configure-helper.sh
|
||||
|
||||
## Update Api-server config
|
||||
Add the following extraArgs to the ClusterConfiguration configMap in the kube-system namespace:
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
REPO_URL_S3="s3://zero-downtime-web/cdn/charts"
|
||||
REPO_URL_S3="s3://zero-downtime-web-cdn/charts"
|
||||
REPO_URL="https://cdn.zero-downtime.net/charts"
|
||||
|
||||
CHARTS=${1:-'.*'}
|
||||
@ -55,6 +55,6 @@ function publish_chart() {
|
||||
|
||||
publish_chart
|
||||
|
||||
CF_DIST=E1YFUJXMCXT2RN
|
||||
CF_DIST=E11OFTOA3L8IVY
|
||||
aws cloudfront create-invalidation --distribution $CF_DIST --paths "/charts/*"
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user