feat: first alpha release of v1.23.10

This commit is contained in:
Stefan Reimer 2022-08-24 17:13:39 +02:00
parent 25f9ae6106
commit dcc584f737
195 changed files with 6313 additions and 3508 deletions

View File

@ -13,6 +13,7 @@ RUN cd /etc/apk/keys && \
apk --no-cache add \
jq \
yq \
diffutils \
cri-tools@kubezero \
kubeadm@kubezero~=${KUBE_VERSION} \
kubectl@kubezero~=${KUBE_VERSION} \

View File

@ -9,7 +9,8 @@ fi
export WORKDIR=/tmp/kubezero
export HOSTFS=/host
export CHARTS=/charts
export VERSION=$(kubeadm version --output json | jq -r .clientVersion.gitVersion)
export KUBE_VERSION=$(kubeadm version -o json | jq -r .clientVersion.gitVersion)
export KUBE_VERSION_MINOR="v1.$(kubectl version -o json | jq .clientVersion.minor -r)"
export KUBECONFIG="${HOSTFS}/root/.kube/config"
@ -63,13 +64,11 @@ render_kubeadm() {
parse_kubezero() {
[ -f ${HOSTFS}/etc/kubernetes/kubezero.yaml ] || { echo "Missing /etc/kubernetes/kubezero.yaml!"; return 1; }
export KUBE_VERSION=$(kubeadm version -o yaml | yq eval .clientVersion.gitVersion -)
export CLUSTERNAME=$(yq eval '.clusterName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
export ETCD_NODENAME=$(yq eval '.etcd.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
export NODENAME=$(yq eval '.nodeName' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
export PROVIDER_ID=$(yq eval '.providerID' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
export AWS_IAM_AUTH=$(yq eval '.api.awsIamAuth.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
export AWS_NTH=$(yq eval '.addons.aws-node-termination-handler.enabled' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
# From here on bail out, allows debug_shell even in error cases
set -e
@ -114,13 +113,16 @@ post_kubeadm() {
}
# First parse kubezero.yaml
parse_kubezero
if [ "$1" == 'upgrade' ]; then
cluster_upgrade() {
### PRE 1.23 specific
#####################
# Migrate addons and network values into CM from kubezero.yaml
kubectl get cm -n kube-system kubezero-values || \
kubectl create configmap -n kube-system kubezero-values \
--from-literal addons="$(yq e '.addons | del .clusterBackup.repository | del .clusterBackup.password' ${HOSTFS}/etc/kubernetes/kubezero.yaml)" \
--from-literal network="$(yq e .network ${HOSTFS}/etc/kubernetes/kubezero.yaml)"
#####################
render_kubeadm
@ -142,16 +144,6 @@ if [ "$1" == 'upgrade' ]; then
######################
# network
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
helm template $CHARTS/kubezero-network --namespace kube-system --include-crds --name-template network \
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
# addons
yq eval '.addons // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
helm template $CHARTS/kubezero-addons --namespace kube-system --include-crds --name-template addons \
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
# Cleanup after kubeadm on the host
rm -rf ${HOSTFS}/etc/kubernetes/tmp
@ -163,23 +155,28 @@ if [ "$1" == 'upgrade' ]; then
# Removed:
# - update oidc do we need that ?
}
elif [[ "$1" == 'node-upgrade' ]]; then
node_upgrade() {
echo "Starting node upgrade ..."
echo "All done."
}
elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
control_plane_node() {
CMD=$1
render_kubeadm
# Ensure clean slate if bootstrap, restore PKI otherwise
if [[ "$1" =~ "^(bootstrap)$" ]]; then
if [[ "$CMD" =~ "^(bootstrap)$" ]]; then
rm -rf ${HOSTFS}/var/lib/etcd/member
else
# restore latest backup
retry 10 60 30 restic restore latest --no-lock -t / --tag $VERSION
retry 10 60 30 restic restore latest --no-lock -t / --tag $KUBE_VERSION_MINOR
# Make last etcd snapshot available
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
@ -191,7 +188,7 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
# Only restore etcd data during "restore" and none exists already
if [[ "$1" =~ "^(restore)$" ]]; then
if [[ "$CMD" =~ "^(restore)$" ]]; then
if [ ! -d ${HOSTFS}/var/lib/etcd/member ]; then
etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \
--name $ETCD_NODENAME \
@ -218,7 +215,7 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
_kubeadm init phase preflight
_kubeadm init phase kubeconfig all
if [[ "$1" =~ "^(join)$" ]]; then
if [[ "$CMD" =~ "^(join)$" ]]; then
# Delete any former self in case forseti did not delete yet
kubectl delete node ${NODENAME} --wait=true || true
# Wait for all pods to be deleted otherwise we end up with stale pods eg. kube-proxy and all goes to ....
@ -277,8 +274,7 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
retry 0 5 30 kubectl cluster-info --request-timeout 3 >/dev/null
# Update providerID as underlying VM changed during restore
if [[ "$1" =~ "^(restore)$" ]]; then
PROVIDER_ID=$(yq eval '.providerID' ${HOSTFS}/etc/kubernetes/kubezero.yaml)
if [[ "$CMD" =~ "^(restore)$" ]]; then
if [ -n "$PROVIDER_ID" ]; then
etcdhelper \
-cacert ${HOSTFS}/etc/kubernetes/pki/etcd/ca.crt \
@ -289,7 +285,7 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
fi
fi
if [[ ! "$1" =~ "^(join)$" ]]; then
if [[ "$CMD" =~ "^(bootstrap|restore)$" ]]; then
_kubeadm init phase upload-config all
_kubeadm init phase upload-certs --skip-certificate-key-print
@ -300,7 +296,7 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
_kubeadm init phase mark-control-plane
_kubeadm init phase kubelet-finalize all
if [[ ! "$1" =~ "^(join)$" ]]; then
if [[ "$CMD" =~ "^(bootstrap|restore)$" ]]; then
_kubeadm init phase addon all
fi
@ -315,34 +311,42 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
yq eval -M ".clusters[0].cluster.certificate-authority-data = \"$(cat ${HOSTFS}/etc/kubernetes/pki/ca.crt | base64 -w0)\"" ${WORKDIR}/kubeadm/templates/admin-aws-iam.yaml > ${HOSTFS}/etc/kubernetes/admin-aws-iam.yaml
fi
# install / update network and addons
if [[ "$1" =~ "^(bootstrap|join)$" ]]; then
# network
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
# Ensure multus is first
helm template $CHARTS/kubezero-network --namespace kube-system --include-crds --name-template network \
--set multus.enabled=true --kube-version $KUBE_VERSION | kubectl apply -f - $LOG
helm template $CHARTS/kubezero-network --namespace kube-system --include-crds --name-template network \
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
# addons
yq eval '.addons // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
helm template $CHARTS/kubezero-addons --namespace kube-system --include-crds --name-template addons \
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
fi
post_kubeadm
echo "${1} cluster $CLUSTERNAME successfull."
}
apply_module() {
MODULE=$1
# network
kubectl get configmap -n kube-system kubezero-values -o custom-columns=NAME:".data.$MODULE" --no-headers=true > _values.yaml
helm template $CHARTS/kubezero-$MODULE --namespace kube-system --name-template $MODULE --skip-crds --set installCRDs=false -f _values.yaml --kube-version $KUBE_VERSION > helm-no-crds.yaml
helm template $CHARTS/kubezero-$MODULE --namespace kube-system --name-template $MODULE --include-crds --set installCRDs=true -f _values.yaml --kube-version $KUBE_VERSION > helm-crds.yaml
diff -e helm-no-crds.yaml helm-crds.yaml | head -n-1 | tail -n+2 > crds.yaml
# Only apply if there are actually any crds
if [ -s crds.yaml ]; then
kubectl apply -f crds.yaml --server-side $LOG
fi
helm template $CHARTS/kubezero-$MODULE --namespace kube-system --include-crds --name-template $MODULE \
-f _values.yaml --kube-version $KUBE_VERSION | kubectl apply --namespace kube-system -f - $LOG
echo "Applied KubeZero module: $MODULE"
}
# backup etcd + /etc/kubernetes/pki
elif [ "$1" == 'backup' ]; then
backup() {
# Display all ENVs, careful this exposes the password !
[ -n "$DEBUG" ] && env
restic snapshots || restic init || exit 1
CV=$(kubectl version --short=true -o json | jq .serverVersion.minor -r)
CV=$(kubectl version -o json | jq .serverVersion.minor -r)
let PCV=$CV-1
CLUSTER_VERSION="v1.$CV"
@ -368,16 +372,32 @@ elif [ "$1" == 'backup' ]; then
# Defrag etcd backend
etcdctl --endpoints=https://${ETCD_NODENAME}:2379 defrag
}
elif [ "$1" == 'debug_shell' ]; then
debug_shell() {
echo "Entering debug shell"
printf "For manual etcdctl commands use:\n # export ETCDCTL_ENDPOINTS=$ETCD_NODENAME:2379\n"
/bin/sh
}
else
echo "Unknown command!"
exit 1
fi
# First parse kubezero.yaml
parse_kubezero
# Execute tasks
for t in $@; do
case "$t" in
cluster_upgrade) cluster_upgrade;;
node_upgrade) node_upgrade;;
bootstrap) control_plane_node bootstrap;;
join) control_plane_node join;;
restore) control_plane_node restore;;
apply_network) apply_module network;;
apply_addons) apply_module addons;;
backup) backup;;
debug_shell) debug_shell;;
*) echo "Unknown command: '$t'";;
esac
done

View File

@ -1,14 +1,15 @@
#!/bin/bash -e
VERSION="v1.22"
VERSION="v1.23"
[ -n "$DEBUG" ] && set -x
# unset any AWS_DEFAULT_PROFILE as it will break aws-iam-auth
unset AWS_DEFAULT_PROFILE
echo "Deploying node upgrade daemonSet..."
cat <<EOF | kubectl apply -f -
controller_nodes_upgrade() {
echo "Deploying node upgrade daemonSet..."
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: DaemonSet
metadata:
@ -48,9 +49,9 @@ spec:
type: Directory
EOF
#kubectl rollout status daemonset -n kube-system kubezero-upgrade-${VERSION//.} --timeout 300s
kubectl delete ds kubezero-upgrade-${VERSION//.} -n kube-system
#kubectl rollout status daemonset -n kube-system kubezero-upgrade-${VERSION//.} --timeout 300s
kubectl delete ds kubezero-upgrade-${VERSION//.} -n kube-system
}
echo "Deploying cluster upgrade job ..."
@ -72,7 +73,9 @@ spec:
imagePullPolicy: Always
command: ["kubezero.sh"]
args:
- upgrade
- cluster_upgrade
- apply_network
- apply_addons
env:
- name: DEBUG
value: "$DEBUG"
@ -109,15 +112,3 @@ while true; do
sleep 3
done
kubectl delete pod kubezero-upgrade-${VERSION//.} -n kube-system
# Fix backup cron for proper version
kubectl set image cronjob/kubezero-backup kubezero-admin=public.ecr.aws/zero-downtime/kubezero-admin:${VERSION} -n kube-system
# Now lets rolling restart bunch of ds to make sure they picked up the changes
for ds in calico-node kube-multus-ds kube-proxy ebs-csi-node; do
kubectl rollout restart daemonset/$ds -n kube-system
kubectl rollout status daemonset/$ds -n kube-system
done
# Force replace the ECK CRDs
kubectl get crd elasticsearches.elasticsearch.k8s.elastic.co && kubectl replace -f https://download.elastic.co/downloads/eck/2.1.0/crds.yaml

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubeadm
description: KubeZero Kubeadm cluster config
type: application
version: 1.23.8
version: 1.23.10
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -1,6 +1,6 @@
# kubeadm
![Version: 1.22.8](https://img.shields.io/badge/Version-1.22.8-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 1.23.10](https://img.shields.io/badge/Version-1.23.10-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Kubeadm cluster config
@ -20,12 +20,6 @@ Kubernetes: `>= 1.20.0`
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| addons.aws-node-termination-handler.enabled | bool | `false` | |
| addons.aws-node-termination-handler.queueURL | string | `""` | arn:aws:sqs:${REGION}:${AWS_ACCOUNT_ID}:${CLUSTERNAME}_Nth |
| addons.clusterBackup.enabled | bool | `false` | |
| addons.clusterBackup.passwordFile | string | `""` | /etc/cloudbender/clusterBackup.passphrase |
| addons.clusterBackup.repository | string | `""` | s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup |
| addons.external-dns.enabled | bool | `false` | |
| api.apiAudiences | string | `"istio-ca"` | |
| api.awsIamAuth.enabled | bool | `false` | |
| api.awsIamAuth.kubeAdminRole | string | `"arn:aws:iam::000000000000:role/KubernetesNode"` | |
@ -43,10 +37,6 @@ Kubernetes: `>= 1.20.0`
| etcd.state | string | `"new"` | |
| highAvailable | bool | `false` | |
| listenAddress | string | `"0.0.0.0"` | Needs to be set to primary node IP |
| network.calico.enabled | bool | `false` | |
| network.cilium.enabled | bool | `false` | |
| network.multus.enabled | bool | `false` | |
| network.multus.tag | string | `"v3.8"` | |
| nodeName | string | `"kubezero-node"` | set to $HOSTNAME |
| protectKernelDefaults | bool | `false` | |
| systemd | bool | `false` | Set to false for openrc, eg. on Gentoo or Alpine |

View File

@ -1,4 +1,3 @@
{{- if index .Values "addons" "external-dns" "enabled" }}
apiVersion: v1
kind: Service
metadata:
@ -13,4 +12,3 @@ spec:
selector:
component: kube-apiserver
tier: control-plane
{{- end }}

View File

@ -22,31 +22,6 @@ api:
workerNodeRole: "arn:aws:iam::000000000000:role/KubernetesNode"
kubeAdminRole: "arn:aws:iam::000000000000:role/KubernetesNode"
addons:
aws-node-termination-handler:
enabled: false
# -- arn:aws:sqs:${REGION}:${AWS_ACCOUNT_ID}:${CLUSTERNAME}_Nth
queueURL: ""
clusterBackup:
enabled: false
# -- s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup
repository: ""
# -- /etc/cloudbender/clusterBackup.passphrase
passwordFile: ""
external-dns:
enabled: false
network:
multus:
enabled: false
tag: "v3.8"
cilium:
enabled: false
calico:
enabled: false
highAvailable: false
etcd:

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons
type: application
version: 0.5.5
appVersion: v1.22.8
version: 0.6.0
appVersion: v1.23.10
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -20,7 +20,7 @@ dependencies:
# repository: https://aws.github.io/eks-charts
condition: aws-node-termination-handler.enabled
- name: external-dns
version: 1.7.1
version: 1.11.0
repository: https://kubernetes-sigs.github.io/external-dns/
condition: external-dns.enabled
kubeVersion: ">= 1.20.0"

View File

@ -1,6 +1,6 @@
# kubezero-addons
![Version: 0.5.5](https://img.shields.io/badge/Version-0.5.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.22.8](https://img.shields.io/badge/AppVersion-v1.22.8-informational?style=flat-square)
![Version: 0.6.0](https://img.shields.io/badge/Version-0.6.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.23.10](https://img.shields.io/badge/AppVersion-v1.23.10-informational?style=flat-square)
KubeZero umbrella chart for various optional cluster addons
@ -19,7 +19,7 @@ Kubernetes: `>= 1.20.0`
| Repository | Name | Version |
|------------|------|---------|
| | aws-node-termination-handler | 0.18.5 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.7.1 |
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.11.0 |
# MetalLB
@ -63,8 +63,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
| clusterBackup.enabled | bool | `false` | |
| clusterBackup.extraEnv | list | `[]` | |
| clusterBackup.image.name | string | `"public.ecr.aws/zero-downtime/kubezero-admin"` | |
| clusterBackup.password | string | `""` | |
| clusterBackup.repository | string | `""` | |
| clusterBackup.password | string | `""` | /etc/cloudbender/clusterBackup.passphrase |
| clusterBackup.repository | string | `""` | s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup |
| external-dns.enabled | bool | `false` | |
| external-dns.env[0] | object | `{"name":"AWS_ROLE_ARN","value":""}` | "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.externalDNS" |
| external-dns.env[1].name | string | `"AWS_WEB_IDENTITY_TOKEN_FILE"` | |

View File

@ -27,7 +27,7 @@ spec:
mountPath: /tmp
env:
- name: DEBUG
value: "1"
value: ""
- name: RESTIC_REPOSITORY
valueFrom:
secretKeyRef:

View File

@ -1,4 +1,4 @@
{{- if and .Values.clusterBackup.enabled .Values.clusterBackup.repository }}
{{- if and .Values.clusterBackup.enabled .Values.clusterBackup.repository .Values.clusterBackup.password }}
apiVersion: v1
kind: Secret
metadata:

View File

@ -5,8 +5,11 @@ clusterBackup:
name: public.ecr.aws/zero-downtime/kubezero-admin
# tag: v1.22.8
# -- s3:https://s3.amazonaws.com/${CFN[ConfigBucket]}/k8s/${CLUSTERNAME}/clusterBackup
repository: ""
# -- /etc/cloudbender/clusterBackup.passphrase
password: ""
extraEnv: []
forseti:

View File

@ -1,7 +1,7 @@
apiVersion: v2
description: KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
name: kubezero-argocd
version: 0.10.1
version: 0.10.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -13,9 +13,9 @@ maintainers:
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: ">= 0.1.4"
version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts/
- name: argo-cd
version: 4.5.4
version: 4.10.9
repository: https://argoproj.github.io/argo-helm
kubeVersion: ">= 1.20.0"

View File

@ -1,6 +1,6 @@
# kubezero-argocd
![Version: 0.10.1](https://img.shields.io/badge/Version-0.10.1-informational?style=flat-square)
![Version: 0.10.2](https://img.shields.io/badge/Version-0.10.2-informational?style=flat-square)
KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
@ -18,8 +18,8 @@ Kubernetes: `>= 1.20.0`
| Repository | Name | Version |
|------------|------|---------|
| https://argoproj.github.io/argo-helm | argo-cd | 4.5.4 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.4 |
| https://argoproj.github.io/argo-helm | argo-cd | 4.10.9 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
## Values

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cert-manager
description: KubeZero Umbrella Chart for cert-manager
type: application
version: 0.9.1
version: 0.9.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -13,9 +13,9 @@ maintainers:
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: ">= 0.1.4"
version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts/
- name: cert-manager
version: 1.8.0
version: 1.9.1
repository: https://charts.jetstack.io
kubeVersion: ">= 1.20.0"

View File

@ -1,6 +1,6 @@
# kubezero-cert-manager
![Version: 0.9.1](https://img.shields.io/badge/Version-0.9.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.9.2](https://img.shields.io/badge/Version-0.9.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for cert-manager
@ -18,8 +18,8 @@ Kubernetes: `>= 1.20.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.4 |
| https://charts.jetstack.io | cert-manager | 1.8.0 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://charts.jetstack.io | cert-manager | 1.9.1 |
## AWS - OIDC IAM roles

View File

@ -1,6 +1,6 @@
# kubezero-ci
![Version: 0.4.54](https://img.shields.io/badge/Version-0.4.54-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.5.12](https://img.shields.io/badge/Version-0.5.12-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero umbrella chart for all things CI
@ -18,10 +18,10 @@ Kubernetes: `>= 1.20.0`
| Repository | Name | Version |
|------------|------|---------|
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.15 |
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.4.16 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://charts.jenkins.io | jenkins | 4.1.10 |
| https://dl.gitea.io/charts/ | gitea | 5.0.5 |
| https://charts.jenkins.io | jenkins | 4.1.16 |
| https://dl.gitea.io/charts/ | gitea | 5.0.9 |
| https://gocd.github.io/helm-chart | gocd | 1.40.8 |
# Jenkins
@ -54,7 +54,7 @@ Kubernetes: `>= 1.20.0`
| gitea.gitea.metrics.enabled | bool | `false` | |
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `false` | |
| gitea.image.rootless | bool | `true` | |
| gitea.image.tag | string | `"1.16.8"` | |
| gitea.image.tag | string | `"1.17.1"` | |
| gitea.istio.enabled | bool | `false` | |
| gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
| gitea.istio.url | string | `"git.example.com"` | |
@ -85,9 +85,9 @@ Kubernetes: `>= 1.20.0`
| jenkins.agent.resources.requests.cpu | string | `"512m"` | |
| jenkins.agent.resources.requests.memory | string | `"1024Mi"` | |
| jenkins.agent.showRawYaml | bool | `false` | |
| jenkins.agent.tag | string | `"v0.2.5-1"` | |
| jenkins.agent.tag | string | `"v0.3.2"` | |
| jenkins.agent.yamlMergeStrategy | string | `"merge"` | |
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n serviceAccountName: jenkins-podman-aws\n containers:\n - name: jnlp\n resources:\n limits:\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\""` | |
| jenkins.agent.yamlTemplate | string | `"apiVersion: v1\nkind: Pod\nspec:\n securityContext:\n fsGroup: 1000\n serviceAccountName: jenkins-podman-aws\n containers:\n - name: jnlp\n resources:\n limits:\n github.com/fuse: 1\n volumeMounts:\n - name: aws-token\n mountPath: \"/var/run/secrets/sts.amazonaws.com/serviceaccount/\"\n readOnly: true\n - name: host-registries-conf\n mountPath: \"/home/jenkins/.config/containers/registries.conf\"\n readOnly: true\n volumes:\n - name: aws-token\n projected:\n sources:\n - serviceAccountToken:\n path: token\n expirationSeconds: 86400\n audience: \"sts.amazonaws.com\"\n - name: host-registries-conf\n hostPath:\n path: /etc/containers/registries.conf\n type: File"` | |
| jenkins.controller.JCasC.configScripts.zdt-settings | string | `"jenkins:\n noUsageStatistics: true\n disabledAdministrativeMonitors:\n - \"jenkins.security.ResourceDomainRecommendation\"\nunclassified:\n buildDiscarders:\n configuredBuildDiscarders:\n - \"jobBuildDiscarder\"\n - defaultBuildDiscarder:\n discarder:\n logRotator:\n artifactDaysToKeepStr: \"32\"\n artifactNumToKeepStr: \"10\"\n daysToKeepStr: \"100\"\n numToKeepStr: \"10\"\n"` | |
| jenkins.controller.disableRememberMe | bool | `true` | |
| jenkins.controller.enableRawHtmlMarkupFormatter | bool | `true` | |
@ -95,15 +95,16 @@ Kubernetes: `>= 1.20.0`
| jenkins.controller.initContainerResources.limits.memory | string | `"1024Mi"` | |
| jenkins.controller.initContainerResources.requests.cpu | string | `"50m"` | |
| jenkins.controller.initContainerResources.requests.memory | string | `"256Mi"` | |
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3651.v908e7db_10d06"` | |
| jenkins.controller.installPlugins[0] | string | `"kubernetes:3670.v6ca_059233222"` | |
| jenkins.controller.installPlugins[1] | string | `"workflow-aggregator:581.v0c46fa_697ffd"` | |
| jenkins.controller.installPlugins[2] | string | `"git:4.11.3"` | |
| jenkins.controller.installPlugins[3] | string | `"configuration-as-code:1429.v09b_044a_c93de"` | |
| jenkins.controller.installPlugins[2] | string | `"git:4.11.4"` | |
| jenkins.controller.installPlugins[3] | string | `"configuration-as-code:1512.vb_79d418d5fc8"` | |
| jenkins.controller.installPlugins[4] | string | `"antisamy-markup-formatter:2.7"` | |
| jenkins.controller.installPlugins[5] | string | `"prometheus:2.0.11"` | |
| jenkins.controller.installPlugins[6] | string | `"htmlpublisher:1.30"` | |
| jenkins.controller.installPlugins[7] | string | `"build-discarder:60.v1747b0eb632a"` | |
| jenkins.controller.installPlugins[8] | string | `"dark-theme:156.v6cf16af6f9ef"` | |
| jenkins.controller.installPlugins[8] | string | `"dark-theme:185.v276b_5a_8966a_e"` | |
| jenkins.controller.installPlugins[9] | string | `"kubernetes-credentials-provider:1.196.va_55f5e31e3c2"` | |
| jenkins.controller.javaOpts | string | `"-XX:+UseContainerSupport -XX:+UseStringDeduplication -Dhudson.model.DirectoryBrowserSupport.CSP=\"sandbox allow-popups; default-src 'none'; img-src 'self' cdn.zero-downtime.net; style-src 'unsafe-inline';\""` | |
| jenkins.controller.jenkinsOpts | string | `"--sessionTimeout=180 --sessionEviction=3600"` | |
| jenkins.controller.prometheus.enabled | bool | `false` | |
@ -111,7 +112,7 @@ Kubernetes: `>= 1.20.0`
| jenkins.controller.resources.limits.memory | string | `"4096Mi"` | |
| jenkins.controller.resources.requests.cpu | string | `"250m"` | |
| jenkins.controller.resources.requests.memory | string | `"1280Mi"` | |
| jenkins.controller.tag | string | `"2.356-alpine-jdk17"` | |
| jenkins.controller.tag | string | `"2.362-alpine-jdk17"` | |
| jenkins.controller.testEnabled | bool | `false` | |
| jenkins.enabled | bool | `false` | |
| jenkins.istio.agent.enabled | bool | `false` | |
@ -124,6 +125,7 @@ Kubernetes: `>= 1.20.0`
| jenkins.istio.webhook.gateway | string | `"istio-ingress/ingressgateway"` | |
| jenkins.istio.webhook.url | string | `"jenkins-webhook.example.com"` | |
| jenkins.persistence.size | string | `"4Gi"` | |
| jenkins.rbac.readSecrets | bool | `true` | |
| jenkins.serviceAccountAgent.create | bool | `true` | |
| jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | |
| trivy.enabled | bool | `false` | |

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio-gateway
description: KubeZero Umbrella Chart for Istio gateways
type: application
version: 0.8.1
version: 0.8.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -14,9 +14,9 @@ maintainers:
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: ">= 0.1.4"
version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts/
- name: gateway
version: 1.13.5
version: 1.14.3
repository: https://istio-release.storage.googleapis.com/charts
kubeVersion: ">= 1.20.0"

View File

@ -1,6 +1,6 @@
# kubezero-istio-gateway
![Version: 0.8.1](https://img.shields.io/badge/Version-0.8.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.8.2](https://img.shields.io/badge/Version-0.8.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Istio gateways
@ -20,8 +20,8 @@ Kubernetes: `>= 1.20.0`
| Repository | Name | Version |
|------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.4 |
| https://istio-release.storage.googleapis.com/charts | gateway | 1.13.5 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://istio-release.storage.googleapis.com/charts | gateway | 1.14.3 |
## Values

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 1.13.5
appVersion: 1.14.3
description: Helm chart for deploying Istio gateways
icon: https://istio.io/latest/favicons/android-192x192.png
keywords:
@ -9,4 +9,4 @@ name: gateway
sources:
- http://github.com/istio/istio
type: application
version: 1.13.5
version: 1.14.3

View File

@ -1,5 +1,5 @@
apiVersion: apps/v1
kind: Deployment
kind: {{ .Values.kind | default "Deployment" }}
metadata:
name: {{ include "gateway.name" . }}
namespace: {{ .Release.Namespace }}

View File

@ -1,5 +1,9 @@
{{- if .Values.autoscaling.enabled }}
{{- if and (.Values.autoscaling.enabled) (eq .Values.kind "Deployment") }}
{{- if (semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion)}}
apiVersion: autoscaling/v2
{{- else }}
apiVersion: autoscaling/v2beta2
{{- end }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "gateway.name" . }}
@ -11,7 +15,7 @@ metadata:
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
kind: {{ .Values.kind | default "Deployment" }}
name: {{ include "gateway.name" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
@ -24,5 +28,4 @@ spec:
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
type: Utilization
{{- end }}
{{- end }}

View File

@ -46,6 +46,11 @@ spec:
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs: {{- range .Values.service.externalIPs }}
- {{.}}
{{- end }}
{{- end }}
selector:
{{- include "gateway.selectorLabels" . | nindent 4 }}

View File

@ -15,6 +15,10 @@
"containerSecurityContext": {
"type": ["object", "null"]
},
"kind":{
"type": "string",
"enum": ["Deployment", "DaemonSet"]
},
"annotations": {
"additionalProperties": {
"type": [

View File

@ -5,6 +5,8 @@ revision: ""
replicaCount: 1
kind: Deployment
rbac:
# If enabled, roles will be created to enable accessing certificates from Gateways. This is not needed
# when using http://gateway-api.org/.
@ -52,6 +54,7 @@ service:
loadBalancerIP: ""
loadBalancerSourceRanges: []
externalTrafficPolicy: ""
externalIPs: []
resources:
requests:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-istio
description: KubeZero Umbrella Chart for Istio
type: application
version: 0.8.1
version: 0.8.2
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -13,16 +13,16 @@ maintainers:
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: ">= 0.1.4"
version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts/
- name: base
version: 1.13.5
version: 1.14.3
repository: https://istio-release.storage.googleapis.com/charts
- name: istiod
version: 1.13.5
version: 1.14.3
repository: https://istio-release.storage.googleapis.com/charts
- name: kiali-server
version: 1.38.1
# repository: https://github.com/kiali/helm-charts/tree/master/docs
version: 1.54
repository: https://kiali.org/helm-charts
condition: kiali-server.enabled
kubeVersion: ">= 1.20.0"

View File

@ -1,6 +1,6 @@
# kubezero-istio
![Version: 0.8.1](https://img.shields.io/badge/Version-0.8.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.8.2](https://img.shields.io/badge/Version-0.8.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Istio
@ -20,10 +20,10 @@ Kubernetes: `>= 1.20.0`
| Repository | Name | Version |
|------------|------|---------|
| | kiali-server | 1.38.1 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.4 |
| https://istio-release.storage.googleapis.com/charts | base | 1.13.5 |
| https://istio-release.storage.googleapis.com/charts | istiod | 1.13.5 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://istio-release.storage.googleapis.com/charts | base | 1.14.3 |
| https://istio-release.storage.googleapis.com/charts | istiod | 1.14.3 |
| https://kiali.org/helm-charts | kiali-server | 1.54 |
## Values
@ -32,7 +32,7 @@ Kubernetes: `>= 1.20.0`
| global.defaultPodDisruptionBudget.enabled | bool | `false` | |
| global.logAsJson | bool | `true` | |
| global.priorityClassName | string | `"system-cluster-critical"` | |
| global.tag | string | `"1.13.5-distroless"` | |
| global.tag | string | `"1.14.3-distroless"` | |
| istiod.meshConfig.accessLogEncoding | string | `"JSON"` | |
| istiod.meshConfig.accessLogFile | string | `"/dev/stdout"` | |
| istiod.meshConfig.tcpKeepalive.interval | string | `"60s"` | |

View File

@ -1,20 +0,0 @@
apiVersion: v2
appVersion: v1.38.1
description: Kiali is an open source project for service mesh observability, refer
to https://www.kiali.io for details.
home: https://github.com/kiali/kiali
icon: https://raw.githubusercontent.com/kiali/kiali.io/master/themes/kiali/static/img/kiali_logo_masthead.png
keywords:
- istio
- kiali
maintainers:
- email: kiali-users@googlegroups.com
name: Kiali
url: https://kiali.io
name: kiali-server
sources:
- https://github.com/kiali/kiali
- https://github.com/kiali/kiali-ui
- https://github.com/kiali/kiali-operator
- https://github.com/kiali/helm-charts
version: 1.38.1

View File

@ -1,5 +0,0 @@
Welcome to Kiali! For more details on Kiali, see: https://kiali.io
The Kiali Server [{{ .Chart.AppVersion }}] has been installed in namespace [{{ .Release.Namespace }}]. It will be ready soon.
(Helm: Chart=[{{ .Chart.Name }}], Release=[{{ .Release.Name }}], Version=[{{ .Chart.Version }}])

View File

@ -1,143 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Create a default fully qualified instance name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
To simulate the way the operator works, use deployment.instance_name rather than the old fullnameOverride.
For backwards compatibility, if fullnameOverride is not kiali but deployment.instance_name is kiali,
use fullnameOverride, otherwise use deployment.instance_name.
*/}}
{{- define "kiali-server.fullname" -}}
{{- if (and (eq .Values.deployment.instance_name "kiali") (ne .Values.fullnameOverride "kiali")) }}
{{- .Values.fullnameOverride | trunc 63 }}
{{- else }}
{{- .Values.deployment.instance_name | trunc 63 }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kiali-server.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Identifies the log_level with the old verbose_mode and the new log_level considered.
*/}}
{{- define "kiali-server.logLevel" -}}
{{- if .Values.deployment.verbose_mode -}}
{{- .Values.deployment.verbose_mode -}}
{{- else -}}
{{- .Values.deployment.logger.log_level -}}
{{- end -}}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kiali-server.labels" -}}
helm.sh/chart: {{ include "kiali-server.chart" . }}
app: kiali
{{ include "kiali-server.selectorLabels" . }}
version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }}
app.kubernetes.io/version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: "kiali"
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kiali-server.selectorLabels" -}}
app.kubernetes.io/name: kiali
app.kubernetes.io/instance: {{ include "kiali-server.fullname" . }}
{{- end }}
{{/*
Determine the default login token signing key.
*/}}
{{- define "kiali-server.login_token.signing_key" -}}
{{- if .Values.login_token.signing_key }}
{{- .Values.login_token.signing_key }}
{{- else }}
{{- randAlphaNum 16 }}
{{- end }}
{{- end }}
{{/*
Determine the default web root.
*/}}
{{- define "kiali-server.server.web_root" -}}
{{- if .Values.server.web_root }}
{{- .Values.server.web_root | trimSuffix "/" }}
{{- else }}
{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }}
{{- "/" }}
{{- else }}
{{- "/kiali" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Determine the default identity cert file. There is no default if on k8s; only on OpenShift.
*/}}
{{- define "kiali-server.identity.cert_file" -}}
{{- if hasKey .Values.identity "cert_file" }}
{{- .Values.identity.cert_file }}
{{- else }}
{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }}
{{- "/kiali-cert/tls.crt" }}
{{- else }}
{{- "" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Determine the default identity private key file. There is no default if on k8s; only on OpenShift.
*/}}
{{- define "kiali-server.identity.private_key_file" -}}
{{- if hasKey .Values.identity "private_key_file" }}
{{- .Values.identity.private_key_file }}
{{- else }}
{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }}
{{- "/kiali-cert/tls.key" }}
{{- else }}
{{- "" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Determine the istio namespace - default is where Kiali is installed.
*/}}
{{- define "kiali-server.istio_namespace" -}}
{{- if .Values.istio_namespace }}
{{- .Values.istio_namespace }}
{{- else }}
{{- .Release.Namespace }}
{{- end }}
{{- end }}
{{/*
Determine the auth strategy to use - default is "token" on Kubernetes and "openshift" on OpenShift.
*/}}
{{- define "kiali-server.auth.strategy" -}}
{{- if .Values.auth.strategy }}
{{- if (and (eq .Values.auth.strategy "openshift") (not .Values.kiali_route_url)) }}
{{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or use a different auth strategy via the --set auth.strategy=... option." }}
{{- end }}
{{- .Values.auth.strategy }}
{{- else }}
{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }}
{{- if not .Values.kiali_route_url }}
{{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or explicitly indicate another auth strategy you want via the --set auth.strategy=... option." }}
{{- end }}
{{- "openshift" }}
{{- else }}
{{- "token" }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,13 +0,0 @@
{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kiali-server.fullname" . }}-cabundle
namespace: {{ .Release.Namespace }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
annotations:
service.beta.openshift.io/inject-cabundle: "true"
...
{{- end }}

View File

@ -1,25 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kiali-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
data:
config.yaml: |
{{- /* Most of .Values is simply the ConfigMap - strip out the keys that are not part of the ConfigMap */}}
{{- $cm := omit .Values "nameOverride" "fullnameOverride" "kiali_route_url" }}
{{- /* The helm chart defines namespace for us, but pass it to the ConfigMap in case the server needs it */}}
{{- $_ := set $cm.deployment "namespace" .Release.Namespace }}
{{- /* Some values of the ConfigMap are generated, but might not be identical, from .Values */}}
{{- $_ := set $cm "istio_namespace" (include "kiali-server.istio_namespace" .) }}
{{- $_ := set $cm.auth "strategy" (include "kiali-server.auth.strategy" .) }}
{{- $_ := set $cm.auth.openshift "client_id_prefix" (include "kiali-server.fullname" .) }}
{{- $_ := set $cm.deployment "instance_name" (include "kiali-server.fullname" .) }}
{{- $_ := set $cm.identity "cert_file" (include "kiali-server.identity.cert_file" .) }}
{{- $_ := set $cm.identity "private_key_file" (include "kiali-server.identity.private_key_file" .) }}
{{- $_ := set $cm.login_token "signing_key" (include "kiali-server.login_token.signing_key" .) }}
{{- $_ := set $cm.server "web_root" (include "kiali-server.server.web_root" .) }}
{{- toYaml $cm | nindent 4 }}
...

View File

@ -1,165 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kiali-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.deployment.replicas }}
selector:
matchLabels:
{{- include "kiali-server.selectorLabels" . | nindent 6 }}
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
name: {{ include "kiali-server.fullname" . }}
labels:
{{- include "kiali-server.labels" . | nindent 8 }}
{{- if .Values.deployment.pod_labels }}
{{- toYaml .Values.deployment.pod_labels | nindent 8 }}
{{- end }}
annotations:
{{- if .Values.server.metrics_enabled }}
prometheus.io/scrape: "true"
prometheus.io/port: {{ .Values.server.metrics_port | quote }}
{{- else }}
prometheus.io/scrape: "false"
prometheus.io/port: ""
{{- end }}
kiali.io/dashboards: go,kiali
{{- if .Values.deployment.pod_annotations }}
{{- toYaml .Values.deployment.pod_annotations | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ include "kiali-server.fullname" . }}
{{- if .Values.deployment.priority_class_name }}
priorityClassName: {{ .Values.deployment.priority_class_name | quote }}
{{- end }}
{{- if .Values.deployment.image_pull_secrets }}
imagePullSecrets:
{{- range .Values.deployment.image_pull_secrets }}
- name: {{ . }}
{{- end }}
{{- end }}
containers:
- image: "{{ .Values.deployment.image_name }}:{{ .Values.deployment.image_version }}"
imagePullPolicy: {{ .Values.deployment.image_pull_policy | default "Always" }}
name: {{ include "kiali-server.fullname" . }}
command:
- "/opt/kiali/kiali"
- "-config"
- "/kiali-configuration/config.yaml"
securityContext:
allowPrivilegeEscalation: false
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
ports:
- name: api-port
containerPort: {{ .Values.server.port | default 20001 }}
{{- if .Values.server.metrics_enabled }}
- name: http-metrics
containerPort: {{ .Values.server.metrics_port | default 9090 }}
{{- end }}
readinessProbe:
httpGet:
path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz
port: api-port
{{- if (include "kiali-server.identity.cert_file" .) }}
scheme: HTTPS
{{- else }}
scheme: HTTP
{{- end }}
initialDelaySeconds: 5
periodSeconds: 30
livenessProbe:
httpGet:
path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz
port: api-port
{{- if (include "kiali-server.identity.cert_file" .) }}
scheme: HTTPS
{{- else }}
scheme: HTTP
{{- end }}
initialDelaySeconds: 5
periodSeconds: 30
env:
- name: ACTIVE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LOG_LEVEL
value: "{{ include "kiali-server.logLevel" . }}"
- name: LOG_FORMAT
value: "{{ .Values.deployment.logger.log_format }}"
- name: LOG_TIME_FIELD_FORMAT
value: "{{ .Values.deployment.logger.time_field_format }}"
- name: LOG_SAMPLER_RATE
value: "{{ .Values.deployment.logger.sampler_rate }}"
volumeMounts:
- name: {{ include "kiali-server.fullname" . }}-configuration
mountPath: "/kiali-configuration"
- name: {{ include "kiali-server.fullname" . }}-cert
mountPath: "/kiali-cert"
- name: {{ include "kiali-server.fullname" . }}-secret
mountPath: "/kiali-secret"
- name: {{ include "kiali-server.fullname" . }}-cabundle
mountPath: "/kiali-cabundle"
{{- if .Values.deployment.resources }}
resources:
{{- toYaml .Values.deployment.resources | nindent 10 }}
{{- end }}
volumes:
- name: {{ include "kiali-server.fullname" . }}-configuration
configMap:
name: {{ include "kiali-server.fullname" . }}
- name: {{ include "kiali-server.fullname" . }}-cert
secret:
{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }}
secretName: {{ include "kiali-server.fullname" . }}-cert-secret
{{- else }}
secretName: istio.{{ include "kiali-server.fullname" . }}-service-account
{{- end }}
{{- if not (include "kiali-server.identity.cert_file" .) }}
optional: true
{{- end }}
- name: {{ include "kiali-server.fullname" . }}-secret
secret:
secretName: {{ .Values.deployment.secret_name }}
optional: true
- name: {{ include "kiali-server.fullname" . }}-cabundle
configMap:
name: {{ include "kiali-server.fullname" . }}-cabundle
{{- if not (.Capabilities.APIVersions.Has "route.openshift.io/v1") }}
optional: true
{{- end }}
{{- if or (.Values.deployment.affinity.node) (or (.Values.deployment.affinity.pod) (.Values.deployment.affinity.pod_anti)) }}
affinity:
{{- if .Values.deployment.affinity.node }}
nodeAffinity:
{{- toYaml .Values.deployment.affinity.node | nindent 10 }}
{{- end }}
{{- if .Values.deployment.affinity.pod }}
podAffinity:
{{- toYaml .Values.deployment.affinity.pod | nindent 10 }}
{{- end }}
{{- if .Values.deployment.affinity.pod_anti }}
podAntiAffinity:
{{- toYaml .Values.deployment.affinity.pod_anti | nindent 10 }}
{{- end }}
{{- end }}
{{- if .Values.deployment.tolerations }}
tolerations:
{{- toYaml .Values.deployment.tolerations | nindent 8 }}
{{- end }}
{{- if .Values.deployment.node_selector }}
nodeSelector:
{{- toYaml .Values.deployment.node_selector | nindent 8 }}
{{- end }}
...

View File

@ -1,17 +0,0 @@
{{- if .Values.deployment.hpa.spec }}
---
apiVersion: {{ .Values.deployment.hpa.api_version }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "kiali-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "kiali-server.fullname" . }}
{{- toYaml .Values.deployment.hpa.spec | nindent 2 }}
...
{{- end }}

View File

@ -1,56 +0,0 @@
{{- if not (.Capabilities.APIVersions.Has "route.openshift.io/v1") }}
{{- if .Values.deployment.ingress_enabled }}
---
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
apiVersion: networking.k8s.io/v1
{{- else }}
apiVersion: networking.k8s.io/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ include "kiali-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
annotations:
{{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }}
{{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }}
{{- else }}
# For ingress-nginx versions older than 0.20.0 use secure-backends.
# (see: https://github.com/kubernetes/ingress-nginx/issues/3416#issuecomment-438247948)
# For ingress-nginx versions 0.20.0 and later use backend-protocol.
{{- if (include "kiali-server.identity.cert_file" .) }}
nginx.ingress.kubernetes.io/secure-backends: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
{{- else }}
nginx.ingress.kubernetes.io/secure-backends: "false"
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
{{- end }}
{{- end }}
spec:
{{- if hasKey .Values.deployment.override_ingress_yaml "spec" }}
{{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }}
{{- else }}
rules:
- http:
paths:
- path: {{ include "kiali-server.server.web_root" . }}
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
pathType: Prefix
backend:
service:
name: {{ include "kiali-server.fullname" . }}
port:
number: {{ .Values.server.port }}
{{- else }}
backend:
serviceName: {{ include "kiali-server.fullname" . }}
servicePort: {{ .Values.server.port }}
{{- end }}
{{- if not (empty .Values.server.web_fqdn) }}
host: {{ .Values.server.web_fqdn }}
{{- end }}
{{- end }}
...
{{- end }}
{{- end }}

View File

@ -1,17 +0,0 @@
{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }}
{{- if .Values.kiali_route_url }}
---
apiVersion: oauth.openshift.io/v1
kind: OAuthClient
metadata:
name: {{ include "kiali-server.fullname" . }}-{{ .Release.Namespace }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
redirectURIs:
- {{ .Values.kiali_route_url }}
grantMethod: auto
allowAnyScope: true
...
{{- end }}
{{- end }}

View File

@ -1,15 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "kiali-server.fullname" . }}-controlplane
namespace: {{ include "kiali-server.istio_namespace" . }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources:
- secrets
verbs:
- list
...

View File

@ -1,89 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "kiali-server.fullname" . }}-viewer
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources:
- configmaps
- endpoints
- pods/log
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- namespaces
- pods
- replicationcontrollers
- services
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/portforward
verbs:
- create
- post
- apiGroups: ["extensions", "apps"]
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- get
- list
- watch
- apiGroups: ["batch"]
resources:
- cronjobs
- jobs
verbs:
- get
- list
- watch
- apiGroups:
- networking.istio.io
- security.istio.io
resources: ["*"]
verbs:
- get
- list
- watch
- apiGroups: ["apps.openshift.io"]
resources:
- deploymentconfigs
verbs:
- get
- list
- watch
- apiGroups: ["project.openshift.io"]
resources:
- projects
verbs:
- get
- apiGroups: ["route.openshift.io"]
resources:
- routes
verbs:
- get
- apiGroups: ["iter8.tools"]
resources:
- experiments
verbs:
- get
- list
- watch
- apiGroups: ["authentication.k8s.io"]
resources:
- tokenreviews
verbs:
- create
...

View File

@ -1,99 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "kiali-server.fullname" . }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources:
- configmaps
- endpoints
- pods/log
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- namespaces
- pods
- replicationcontrollers
- services
verbs:
- get
- list
- watch
- patch
- apiGroups: [""]
resources:
- pods/portforward
verbs:
- create
- post
- apiGroups: ["extensions", "apps"]
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- get
- list
- watch
- patch
- apiGroups: ["batch"]
resources:
- cronjobs
- jobs
verbs:
- get
- list
- watch
- patch
- apiGroups:
- networking.istio.io
- security.istio.io
resources: ["*"]
verbs:
- get
- list
- watch
- create
- delete
- patch
- apiGroups: ["apps.openshift.io"]
resources:
- deploymentconfigs
verbs:
- get
- list
- watch
- patch
- apiGroups: ["project.openshift.io"]
resources:
- projects
verbs:
- get
- apiGroups: ["route.openshift.io"]
resources:
- routes
verbs:
- get
- apiGroups: ["iter8.tools"]
resources:
- experiments
verbs:
- get
- list
- watch
- create
- delete
- patch
- apiGroups: ["authentication.k8s.io"]
resources:
- tokenreviews
verbs:
- create
...

View File

@ -1,17 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "kiali-server.fullname" . }}-controlplane
namespace: {{ include "kiali-server.istio_namespace" . }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "kiali-server.fullname" . }}-controlplane
subjects:
- kind: ServiceAccount
name: {{ include "kiali-server.fullname" . }}
namespace: {{ .Release.Namespace }}
...

View File

@ -1,20 +0,0 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "kiali-server.fullname" . }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
{{- if .Values.deployment.view_only_mode }}
name: {{ include "kiali-server.fullname" . }}-viewer
{{- else }}
name: {{ include "kiali-server.fullname" . }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ include "kiali-server.fullname" . }}
namespace: {{ .Release.Namespace }}
...

View File

@ -1,30 +0,0 @@
{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }}
{{- if .Values.deployment.ingress_enabled }}
# As of OpenShift 4.5, need to use --disable-openapi-validation when installing via Helm
---
apiVersion: route.openshift.io/v1
kind: Route
metadata:
name: {{ include "kiali-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
{{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }}}
annotations:
{{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }}
{{- end }}
spec:
{{- if hasKey .Values.deployment.override_ingress_yaml "spec" }}
{{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }}
{{- else }}
tls:
termination: reencrypt
insecureEdgeTerminationPolicy: Redirect
to:
kind: Service
targetPort: {{ .Values.server.port }}
name: {{ include "kiali-server.fullname" . }}
{{- end }}
...
{{- end }}
{{- end }}

View File

@ -1,45 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "kiali-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
annotations:
{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }}
service.beta.openshift.io/serving-cert-secret-name: {{ include "kiali-server.fullname" . }}-cert-secret
{{- end }}
{{- if and (not (empty .Values.server.web_fqdn)) (not (empty .Values.server.web_schema)) }}
{{- if empty .Values.server.web_port }}
kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}{{ default "" .Values.server.web_root }}
{{- else }}
kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}:{{ .Values.server.web_port }}{{(default "" .Values.server.web_root) }}
{{- end }}
{{- end }}
{{- if .Values.deployment.service_annotations }}
{{- toYaml .Values.deployment.service_annotations | nindent 4 }}
{{- end }}
spec:
{{- if .Values.deployment.service_type }}
type: {{ .Values.deployment.service_type }}
{{- end }}
ports:
{{- if (include "kiali-server.identity.cert_file" .) }}
- name: tcp
{{- else }}
- name: http
{{- end }}
protocol: TCP
port: {{ .Values.server.port }}
{{- if .Values.server.metrics_enabled }}
- name: http-metrics
protocol: TCP
port: {{ .Values.server.metrics_port }}
{{- end }}
selector:
{{- include "kiali-server.selectorLabels" . | nindent 4 }}
{{- if .Values.deployment.additional_service_yaml }}
{{- toYaml .Values.deployment.additional_service_yaml | nindent 2 }}
{{- end }}
...

View File

@ -1,9 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "kiali-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kiali-server.labels" . | nindent 4 }}
...

View File

@ -1,82 +0,0 @@
# 'fullnameOverride' is deprecated. Use 'deployment.instance_name' instead.
# This is only supported for backward compatibility and will be removed in a future version.
# If 'fullnameOverride' is not "kiali" and 'deployment.instance_name' is "kiali",
# then 'deployment.instance_name' will take the value of 'fullnameOverride' value.
# Otherwise, 'fullnameOverride' is ignored and 'deployment.instance_name' is used.
fullnameOverride: "kiali"
# This is required for "openshift" auth strategy.
# You have to know ahead of time what your Route URL will be because
# right now the helm chart can't figure this out at runtime (it would
# need to wait for the Kiali Route to be deployed and for OpenShift
# to start it up). If someone knows how to update this helm chart to
# do this, a PR would be welcome.
kiali_route_url: ""
#
# Settings that mimic the Kiali CR which are placed in the ConfigMap.
# Note that only those values used by the Helm Chart will be here.
#
istio_namespace: "" # default is where Kiali is installed
auth:
openid: {}
openshift: {}
strategy: ""
deployment:
# This only limits what Kiali will attempt to see, but Kiali Service Account has permissions to see everything.
# For more control over what the Kial Service Account can see, use the Kiali Operator
accessible_namespaces:
- "**"
additional_service_yaml: {}
affinity:
node: {}
pod: {}
pod_anti: {}
hpa:
api_version: "autoscaling/v2beta2"
spec: {}
image_name: quay.io/kiali/kiali
image_pull_policy: "Always"
image_pull_secrets: []
image_version: v1.38.1
ingress_enabled: true
instance_name: "kiali"
logger:
log_format: "text"
log_level: "info"
time_field_format: "2006-01-02T15:04:05Z07:00"
sampler_rate: "1"
node_selector: {}
override_ingress_yaml:
metadata: {}
pod_annotations: {}
pod_labels: {}
priority_class_name: ""
replicas: 1
resources: {}
secret_name: "kiali"
service_annotations: {}
service_type: ""
tolerations: []
version_label: v1.38.1
view_only_mode: false
external_services:
custom_dashboards:
enabled: true
identity: {}
#cert_file:
#private_key_file:
login_token:
signing_key: ""
server:
port: 20001
metrics_enabled: true
metrics_port: 9090
web_root: ""

View File

@ -11,7 +11,3 @@ helm dep update
# Fetch dashboards from Grafana.com and update ZDT CM
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
# Kiali
rm -rf charts/kiali-server
curl -sL https://github.com/kiali/helm-charts/blob/master/docs/kiali-server-${KIALI_VERSION}.tgz?raw=true | tar xz -C charts

View File

@ -1,6 +1,6 @@
global:
# hub: docker.io/istio
tag: 1.13.5-distroless
tag: 1.14.3-distroless
logAsJson: true

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack
type: application
version: 0.8.1
version: 0.8.2
appVersion: 1.6.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -13,19 +13,20 @@ keywords:
- fluentd
- fluent-bit
maintainers:
- name: Quarky9
- name: Stefan Reimer
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: ">= 0.1.3"
version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts/
- name: eck-operator
version: 2.1.0
version: 2.4.0
# repository: https://helm.elastic.co
condition: eck-operator.enabled
- name: fluentd
version: 0.3.7
version: 0.3.9
condition: fluentd.enabled
- name: fluent-bit
version: 0.19.23
version: 0.20.6
condition: fluent-bit.enabled
kubeVersion: ">= 1.18.0"
kubeVersion: ">= 1.20.0"

View File

@ -1,6 +1,6 @@
# kubezero-logging
![Version: 0.8.1](https://img.shields.io/badge/Version-0.8.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
![Version: 0.8.2](https://img.shields.io/badge/Version-0.8.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack
@ -10,18 +10,18 @@ KubeZero Umbrella Chart for complete EFK stack
| Name | Email | Url |
| ---- | ------ | --- |
| Quarky9 | | |
| Stefan Reimer | <stefan@zero-downtime.net> | |
## Requirements
Kubernetes: `>= 1.18.0`
Kubernetes: `>= 1.20.0`
| Repository | Name | Version |
|------------|------|---------|
| | eck-operator | 2.1.0 |
| | fluent-bit | 0.19.23 |
| | fluentd | 0.3.7 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.3 |
| | eck-operator | 2.4.0 |
| | fluent-bit | 0.20.6 |
| | fluentd | 0.3.9 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
## Changes from upstream
### ECK

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 2.1.0
appVersion: 2.4.0
description: 'A Helm chart for deploying the Elastic Cloud on Kubernetes (ECK) operator: the official Kubernetes operator for orchestrating Elasticsearch, Kibana, APM Server, Enterprise Search, and Beats.'
home: https://github.com/elastic/cloud-on-k8s
icon: https://helm.elastic.co/icons/eck.png
@ -17,4 +17,4 @@ maintainers:
name: Elastic
name: eck-operator
type: application
version: 2.1.0
version: 2.4.0

View File

@ -12,7 +12,7 @@ For more information about the ECK Operator, see:
## Requirements
- Supported Kubernetes versions are listed in the documentation: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s_supported_versions.html
- Helm >= 3.0.0
- Helm >= 3.2.0
## Usage

View File

@ -126,6 +126,22 @@ updating docs/operating-eck/eck-permissions.asciidoc file.
- subjectaccessreviews
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- elastic-operator-leader
verbs:
- get
- watch
- update
- apiGroups:
- ""
resources:

View File

@ -45,3 +45,4 @@ data:
{{- if .Values.managedNamespaces }}
namespaces: [{{ join "," .Values.managedNamespaces }}]
{{- end }}
enable-leader-election: {{ .Values.config.enableLeaderElection }}

View File

@ -31,6 +31,9 @@ spec:
spec:
terminationGracePeriodSeconds: 10
serviceAccountName: {{ include "eck-operator.serviceAccountName" . }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}

View File

@ -21,3 +21,9 @@
{{- fail "Storage class validation cannot be enabled when cluster-scoped resource creation is disabled" -}}
{{- end -}}
{{- end -}}
{{- if (not .Values.config.enableLeaderElection) -}}
{{- if gt (int .Values.replicaCount) 1 -}}
{{- fail "Leader election must be enabled with more than one replica" -}}
{{- end -}}
{{- end -}}

View File

@ -25,6 +25,9 @@ image:
# tag is the container image tag. If not defined, defaults to chart appVersion.
tag: null
# priorityClassName defines the PriorityClass to be used by the operator pods.
priorityClassName: ""
# imagePullSecrets defines the secrets to use when pulling the operator container image.
imagePullSecrets: []
@ -176,6 +179,9 @@ config:
# Can be disabled if cluster-wide storage class RBAC access is not available.
validateStorageClass: true
# enableLeaderElection specifies whether leader election should be enabled
enableLeaderElection: true
# Prometheus PodMonitor configuration
# Reference: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#podmonitor
podMonitor:

View File

@ -1,9 +1,9 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: "Update fluent-bit image to 1.8.15."
description: "Additional upstream config option added"
apiVersion: v1
appVersion: 1.8.15
appVersion: 1.9.7
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems.
home: https://fluentbit.io/
@ -24,4 +24,4 @@ maintainers:
name: fluent-bit
sources:
- https://github.com/fluent/fluent-bit/
version: 0.19.23
version: 0.20.6

View File

@ -29,4 +29,14 @@ rules:
verbs:
- use
{{- end }}
{{- if and .Values.openShift.enabled .Values.openShift.securityContextConstraints.create }}
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
resourceNames:
- {{ include "fluent-bit.fullname" . }}
verbs:
- use
{{- end }}
{{- end -}}

View File

@ -13,6 +13,10 @@ data:
{{- (tpl .Values.config.inputs $) | nindent 4 }}
{{- (tpl .Values.config.filters $) | nindent 4 }}
{{- (tpl .Values.config.outputs $) | nindent 4 }}
{{- range $key, $val := .Values.config.upstream }}
{{ $key }}: |
{{- (tpl $val $) | nindent 4 }}
{{- end }}
{{- range $key, $val := .Values.config.extraFiles }}
{{ $key }}: |
{{- (tpl $val $) | nindent 4 }}

View File

@ -0,0 +1,37 @@
{{- if and .Values.openShift.enabled .Values.openShift.securityContextConstraints.create }}
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: {{ include "fluent-bit.fullname" . }}
{{- if .Values.openShift.securityContextConstraints.annotations }}
annotations:
{{- toYaml .Values.openShift.securityContextConstraints.annotations | nindent 4 }}
{{- end }}
allowPrivilegedContainer: true
allowPrivilegeEscalation: true
allowHostDirVolumePlugin: true
defaultAllowPrivilegeEscalation: false
# forbid host namespaces
allowHostNetwork: false
allowHostIPC: false
allowHostPorts: false
allowHostPID: false
allowedCapabilities: []
forbiddenSysctls:
- "*"
readOnlyRootFilesystem: false
requiredDropCapabilities:
- MKNOD
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
supplementalGroups:
type: RunAsAny
volumes:
- configMap
- emptyDir
- hostPath
- persistentVolumeClaim
- secret
{{- end }}

View File

@ -13,6 +13,9 @@ metadata:
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if and (eq .Values.service.type "ClusterIP") (.Values.service.clusterIP) }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
ports:
- port: {{ .Values.service.port }}
targetPort: http

View File

@ -36,6 +36,14 @@ podSecurityPolicy:
create: false
annotations: {}
openShift:
# Sets Openshift support
enabled: false
# Creates SCC for Fluent-bit when Openshift support is enabled
securityContextConstraints:
create: true
annotations: {}
podSecurityContext: {}
# fsGroup: 2000
@ -72,6 +80,7 @@ service:
port: 2020
labels: {}
# nodePort: 30020
# clusterIP: 172.16.10.1
annotations: {}
# prometheus.io/path: "/api/v1/metrics/prometheus"
# prometheus.io/port: "2020"
@ -322,6 +331,17 @@ config:
Logstash_Prefix node
Retry_Limit False
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/upstream-servers
upstream: {}
# upstream.conf: |
# [UPSTREAM]
# upstream1
#
# [NODE]
# name node-1
# host 127.0.0.1
# port 43000
## https://docs.fluentbit.io/manual/pipeline/parsers
customParsers: |
[PARSER]

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: v1.12.4
appVersion: v1.14.6
description: A Helm chart for Kubernetes
home: https://www.fluentd.org/
icon: https://www.fluentd.org/images/miscellany/fluentd-logo_2x.png
@ -12,4 +12,4 @@ name: fluentd
sources:
- https://github.com/fluent/fluentd/
- https://github.com/fluent/fluentd-kubernetes-daemonset
version: 0.3.7
version: 0.3.9

View File

@ -13,6 +13,10 @@ securityContext:
{{- with .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ . }}
{{- end }}
{{- with .Values.initContainers }}
initContainers:
{{- toYaml . | nindent 2 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
securityContext:

View File

@ -173,6 +173,8 @@ env:
envFrom: []
initContainers: []
volumes:
- name: varlog
hostPath:

View File

@ -244,7 +244,7 @@ fluent-bit:
image:
#repository: public.ecr.aws/zero-downtime/fluent-bit
tag: 1.9.3
tag: 1.9.7
serviceMonitor:
enabled: false

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-metrics
description: KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
type: application
version: 0.8.0
version: 0.8.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
@ -15,18 +15,18 @@ maintainers:
email: stefan@zero-downtime.net
dependencies:
- name: kubezero-lib
version: ">= 0.1.4"
version: ">= 0.1.5"
repository: https://cdn.zero-downtime.net/charts/
- name: kube-prometheus-stack
version: 34.9.0
version: 39.9.0
# Switch back to upstream once all alerts are fixed eg. etcd gpcr
# repository: https://prometheus-community.github.io/helm-charts
- name: prometheus-adapter
version: 3.2.0
version: 3.4.0
repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-adapter.enabled
- name: prometheus-pushgateway
version: 1.16.1
version: 1.18.2
# Switch back to upstream once namespaces are supported
# repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-pushgateway.enabled

View File

@ -1,6 +1,6 @@
# kubezero-metrics
![Version: 0.8.0](https://img.shields.io/badge/Version-0.8.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
![Version: 0.8.1](https://img.shields.io/badge/Version-0.8.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Prometheus, Grafana and Alertmanager as well as all Kubernetes integrations.
@ -18,10 +18,10 @@ Kubernetes: `>= 1.20.0`
| Repository | Name | Version |
|------------|------|---------|
| | kube-prometheus-stack | 34.9.0 |
| | prometheus-pushgateway | 1.16.1 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.4 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 3.2.0 |
| | kube-prometheus-stack | 39.9.0 |
| | prometheus-pushgateway | 1.18.2 |
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 3.4.0 |
## Values

View File

@ -6,20 +6,20 @@ annotations:
url: https://github.com/prometheus-operator/kube-prometheus
artifacthub.io/operator: "true"
apiVersion: v2
appVersion: 0.55.0
appVersion: 0.58.0
dependencies:
- condition: kubeStateMetrics.enabled
name: kube-state-metrics
repository: https://prometheus-community.github.io/helm-charts
version: 4.7.*
version: 4.15.*
- condition: nodeExporter.enabled
name: prometheus-node-exporter
repository: https://prometheus-community.github.io/helm-charts
version: 3.1.*
version: 3.3.*
- condition: grafana.enabled
name: grafana
repository: https://grafana.github.io/helm-charts
version: 6.26.*
version: 6.32.*
description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards,
and Prometheus rules combined with documentation and scripts to provide easy to
operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus
@ -34,7 +34,6 @@ kubeVersion: '>=1.16.0-0'
maintainers:
- email: andrew@quadcorps.co.uk
name: andrewgkew
- name: bismarck
- email: cedric@desaintmartin.fr
name: desaintmartin
- email: gianrubio@gmail.com
@ -52,4 +51,4 @@ sources:
- https://github.com/prometheus-community/helm-charts
- https://github.com/prometheus-operator/kube-prometheus
type: application
version: 34.9.0
version: 39.9.0

View File

@ -11,20 +11,19 @@ _Note: This chart was formerly named `prometheus-operator` chart, now renamed to
- Kubernetes 1.16+
- Helm 3+
## Get Repo Info
## Get Helm Repository Info
```console
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
```
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
_See [`helm repo`](https://helm.sh/docs/helm/helm_repo/) for command documentation._
## Install Chart
## Install Helm Chart
```console
# Helm
$ helm install [RELEASE_NAME] prometheus-community/kube-prometheus-stack
helm install [RELEASE_NAME] prometheus-community/kube-prometheus-stack
```
_See [configuration](#configuration) below._
@ -43,11 +42,10 @@ To disable dependencies during installation, see [multiple releases](#multiple-r
_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._
## Uninstall Chart
## Uninstall Helm Chart
```console
# Helm
$ helm uninstall [RELEASE_NAME]
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes components associated with the chart and deletes the release.
@ -70,8 +68,7 @@ kubectl delete crd thanosrulers.monitoring.coreos.com
## Upgrading Chart
```console
# Helm
$ helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack
helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack
```
With Helm v3, CRDs created by this chart are not updated by default and should be manually updated.
@ -83,10 +80,71 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen
A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions.
### From 38.x to 39.x
This upgraded prometheus-operator to v0.58.0 and prometheus to v2.37.0
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
### From 37.x to 38.x
Reverted one of the default metrics relabelings for cAdvisor added in 36.x, due to it breaking container_network_* and various other statistics. If you do not want this change, you will need to override the `kubelet.cAdvisorMetricRelabelings`.
### From 36.x to 37.x
This includes some default metric relabelings for cAdvisor and apiserver metrics to reduce cardinality. If you do not want these defaults, you will need to override the `kubeApiServer.metricRelabelings` and or `kubelet.cAdvisorMetricRelabelings`.
### From 35.x to 36.x
This upgraded prometheus-operator to v0.57.0 and prometheus to v2.36.1
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
### From 34.x to 35.x
This upgraded prometheus-operator to v0.56.0 and prometheus to v2.35.0
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
### From 33.x to 34.x
This upgrades to prometheus-operator to v0.55.0 and prometheus to v2.33.5.
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
@ -98,14 +156,16 @@ kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-oper
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
### From 32.x to 33.x
This upgrades the node exporter Chart to v3.0.0. Please review the changes to this subchart if you make customizations to hostMountPropagation.
This upgrades the prometheus-node-exporter Chart to v3.0.0. Please review the changes to this subchart if you make customizations to hostMountPropagation.
### From 31.x to 32.x
This upgrades to prometheus-operator to v0.54.0 and prometheus to v2.33.1. It also changes the default for `grafana.serviceMonitor.enabled` to `true.
Run these commands to update the CRDs before applying the upgrade.
```console
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
@ -117,7 +177,6 @@ kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-oper
kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
```
### From 30.x to 31.x
This version removes the built-in grafana ServiceMonitor and instead relies on the ServiceMonitor of the sub-chart.
@ -145,7 +204,7 @@ If you are using PodSecurityPolicies you can enable the previous behaviour by se
### From 26.x to 27.x
This version splits Node Exporter recording and altering rules in separate config values.
This version splits prometheus-node-exporter chart recording and altering rules in separate config values.
Instead of `defaultRules.rules.node` the 2 new variables `defaultRules.rules.nodeExporterAlerting` and `defaultRules.rules.nodeExporterRecording` are used.
Also the following defaultRules.rules has been removed as they had no effect: `kubeApiserverError`, `kubePrometheusNodeAlerting`, `kubernetesAbsent`, `time`.
@ -173,7 +232,7 @@ kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-oper
### From 23.x to 24.x
The custom `ServiceMonitor` for the _kube-state-metrics_ & _prometheus-node-exporter_ charts have been removed in favour of the built in sub-chart `ServiceMonitor`; for both sub-charts this means that `ServiceMonitor` customisations happen via the values passed to the chart. If you haven't directly customised this behaviour then there are no changes required to upgrade, but if you have please read the following.
The custom `ServiceMonitor` for the _kube-state-metrics_ & _prometheus-node-exporter_ charts have been removed in favour of the built-in sub-chart `ServiceMonitor`; for both sub-charts this means that `ServiceMonitor` customisations happen via the values passed to the chart. If you haven't directly customised this behaviour then there are no changes required to upgrade, but if you have please read the following.
For _kube-state-metrics_ the `ServiceMonitor` customisation is now set via `kube-state-metrics.prometheus.monitor` and the `kubeStateMetrics.serviceMonitor.selfMonitor.enabled` value has moved to `kube-state-metrics.selfMonitor.enabled`.
@ -395,7 +454,7 @@ With Prometheus Operator version 0.30+, the core Prometheus Operator pod exposes
A validating and mutating webhook configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks.
1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end-user certificates. If the certificate already exists, the hook exits.
1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end user certificates. If the certificate already exists, the hook exits.
2. The prometheus operator pod is configured to use a TLS proxy container, which will load that certificate.
3. Validating and Mutating webhook configurations are created in the cluster, with their failure mode set to Ignore. This allows rules to be created by the same chart at the same time, even though the webhook has not yet been fully set up - it does not have the correct CA field set.
4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations
@ -412,7 +471,7 @@ Because the operator can only run as a single pod, there is potential for this c
## Developing Prometheus Rules and Grafana Dashboards
This chart Grafana Dashboards and Prometheus Rules are just a copy from [prometheus-operator/prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to the original repo](https://github.com/prometheus-operator/kube-prometheus/blob/master/docs/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts.
This chart Grafana Dashboards and Prometheus Rules are just a copy from [prometheus-operator/prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to the original repository](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/customizations/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts.
## Further Information

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 8.4.5
appVersion: 9.0.5
description: The leading tool for querying and visualizing time series and metrics.
home: https://grafana.net
icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png
@ -19,4 +19,4 @@ name: grafana
sources:
- https://github.com/grafana/grafana
type: application
version: 6.26.0
version: 6.32.10

View File

@ -59,17 +59,17 @@ This version requires Helm >= 3.1.0.
| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` |
| `priorityClassName` | Name of Priority Class to assign pods | `nil` |
| `image.repository` | Image repository | `grafana/grafana` |
| `image.tag` | Image tag (`Must be >= 5.0.0`) | `8.2.5` |
| `image.sha` | Image sha (optional) | `2acf04c016c77ca2e89af3536367ce847ee326effb933121881c7c89781051d3` |
| `image.tag` | Overrides the Grafana image tag whose default is the chart appVersion (`Must be >= 5.0.0`) | `` |
| `image.sha` | Image sha (optional) | `` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Image pull secrets | `{}` |
| `image.pullSecrets` | Image pull secrets (can be templated) | `[]` |
| `service.enabled` | Enable grafana service | `true` |
| `service.type` | Kubernetes service type | `ClusterIP` |
| `service.port` | Kubernetes port where service is exposed | `80` |
| `service.portName` | Name of the port on the service | `service` |
| `service.targetPort` | Internal service is port | `3000` |
| `service.nodePort` | Kubernetes service nodePort | `nil` |
| `service.annotations` | Service annotations | `{}` |
| `service.annotations` | Service annotations (can be templated) | `{}` |
| `service.labels` | Custom labels | `{}` |
| `service.clusterIP` | internal cluster service IP | `nil` |
| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` |
@ -98,12 +98,12 @@ This version requires Helm >= 3.1.0.
| `persistence.enabled` | Use persistent volume to store data | `false` |
| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` |
| `persistence.size` | Size of persistent volume claim | `10Gi` |
| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` |
| `persistence.existingClaim` | Use an existing PVC to persist data (can be templated) | `nil` |
| `persistence.storageClassName` | Type of persistent volume claim | `nil` |
| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` |
| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` |
| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` |
| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` |
| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` |
| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` |
| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` |
| `initChownData.enabled` | If false, don't reset data ownership at startup | true |
@ -122,7 +122,8 @@ This version requires Helm >= 3.1.0.
| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` |
| `extraSecretMounts` | Additional grafana server secret mounts | `[]` |
| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` |
| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` |
| `createConfigmap` | Enable creating the grafana configmap | `true` |
| `extraConfigmapMounts` | Additional grafana server configMap volume mounts (values are templated) | `[]` |
| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` |
| `plugins` | Plugins to be loaded along with Grafana | `[]` |
| `datasources` | Configure grafana datasources (passed through tpl) | `{}` |
@ -139,8 +140,9 @@ This version requires Helm >= 3.1.0.
| `podAnnotations` | Pod annotations | `{}` |
| `podLabels` | Pod labels | `{}` |
| `podPortName` | Name of the grafana port on the pod | `grafana` |
| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` |
| `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` |
| `sidecar.image.tag` | Sidecar image tag | `1.15.6` |
| `sidecar.image.tag` | Sidecar image tag | `1.19.2` |
| `sidecar.image.sha` | Sidecar image sha (optional) | `""` |
| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` |
| `sidecar.resources` | Sidecar resources | `{}` |
@ -158,7 +160,7 @@ This version requires Helm >= 3.1.0.
| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` |
| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` |
| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` |
| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `nil` |
| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `""` |
| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` |
| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` |
| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` |
@ -168,7 +170,7 @@ This version requires Helm >= 3.1.0.
| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` |
| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` |
| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` |
| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` |
| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` |
| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` |
| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` |
| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` |
@ -180,7 +182,7 @@ This version requires Helm >= 3.1.0.
| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` |
| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` |
| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` |
| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` |
| `admin.existingSecret` | The name of an existing secret containing the admin credentials (can be templated). | `""` |
| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` |
| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` |
| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` |
@ -239,9 +241,15 @@ This version requires Helm >= 3.1.0.
| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` |
| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` |
| `imageRenderer.resources` | Set resource limits for image-renderer pdos | `{}` |
| `imageRenderer.nodeSelector` | Node labels for pod assignment | `{}` |
| `imageRenderer.tolerations` | Toleration labels for pod assignment | `[]` |
| `imageRenderer.affinity` | Affinity settings for pod assignment | `{}` |
| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` |
| `networkPolicy.ingress` | Enable the creation of an ingress network policy | `true` |
| `networkPolicy.egress.enabled` | Enable the creation of an egress network policy | `false` |
| `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` |
| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` |

View File

@ -0,0 +1,16 @@
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: grafana-test
app.kubernetes.io/name: grafana
topologyKey: failure-domain.beta.kubernetes.io/zone
weight: 100
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: grafana-test
app.kubernetes.io/name: grafana
topologyKey: kubernetes.io/hostname

View File

@ -0,0 +1,7 @@
extraConfigmapMounts:
- name: '{{ template "grafana.fullname" . }}'
configMap: '{{ template "grafana.fullname" . }}'
mountPath: /var/lib/grafana/dashboards/test-dashboard.json
# This is not a realistic test, but for this we only care about extraConfigmapMounts not being empty and pointing to an existing ConfigMap
subPath: grafana.ini
readOnly: true

View File

@ -0,0 +1,3 @@
persistence:
type: pvc
enabled: true

View File

@ -141,6 +141,28 @@ Return the appropriate apiVersion for ingress.
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for podSecurityPolicy.
*/}}
{{- define "grafana.podSecurityPolicy.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1") (semverCompare ">= 1.16-0" .Capabilities.KubeVersion.Version) -}}
{{- print "policy/v1beta1" -}}
{{- else -}}
{{- print "extensions/v1beta1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for podDisruptionBudget.
*/}}
{{- define "grafana.podDisruptionBudget.apiVersion" -}}
{{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">= 1.21-0" .Capabilities.KubeVersion.Version) -}}
{{- print "policy/v1" -}}
{{- else -}}
{{- print "policy/v1beta1" -}}
{{- end -}}
{{- end -}}
{{/*
Return if ingress is stable.
*/}}

View File

@ -37,7 +37,7 @@ initContainers:
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
subPath: {{ tpl .Values.persistence.subPath . }}
{{- end }}
{{- end }}
{{- if .Values.dashboards }}
@ -69,7 +69,7 @@ initContainers:
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
subPath: {{ tpl .Values.persistence.subPath . }}
{{- end }}
{{- range .Values.extraSecretMounts }}
- name: {{ .name }}
@ -149,6 +149,14 @@ initContainers:
- name: SKIP_TLS_VERIFY
value: "{{ .Values.sidecar.skipTlsVerify }}"
{{- end }}
{{- if .Values.sidecar.livenessProbe }}
livenessProbe:
{{ toYaml .Values.livenessProbe | indent 6 }}
{{- end }}
{{- if .Values.sidecar.readinessProbe }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 6 }}
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
{{- if .Values.sidecar.securityContext }}
@ -164,8 +172,9 @@ initContainers:
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- $root := . }}
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
- name: {{ tpl . $root }}
{{- end}}
{{- end }}
{{- if not .Values.enableKubeBackwardCompatibility }}
@ -189,6 +198,10 @@ containers:
- name: LABEL_VALUE
value: {{ quote .Values.sidecar.dashboards.labelValue }}
{{- end }}
{{- if .Values.sidecar.logLevel }}
- name: LOG_LEVEL
value: {{ quote .Values.sidecar.logLevel }}
{{- end }}
- name: FOLDER
value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}"
- name: RESOURCE
@ -221,6 +234,14 @@ containers:
- name: WATCH_CLIENT_TIMEOUT
value: "{{ .Values.sidecar.dashboards.watchClientTimeout }}"
{{- end }}
{{- if .Values.sidecar.livenessProbe }}
livenessProbe:
{{ toYaml .Values.livenessProbe | indent 6 }}
{{- end }}
{{- if .Values.sidecar.readinessProbe }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 6 }}
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
{{- if .Values.sidecar.securityContext }}
@ -271,14 +292,14 @@ containers:
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.userKey | default "admin-user" }}
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.passwordKey | default "admin-password" }}
{{- end }}
{{- if not .Values.sidecar.datasources.skipReload }}
@ -287,6 +308,14 @@ containers:
- name: REQ_METHOD
value: POST
{{- end }}
{{- if .Values.sidecar.livenessProbe }}
livenessProbe:
{{ toYaml .Values.livenessProbe | indent 6 }}
{{- end }}
{{- if .Values.sidecar.readinessProbe }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 6 }}
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
{{- if .Values.sidecar.securityContext }}
@ -334,14 +363,14 @@ containers:
- name: REQ_USERNAME
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.userKey | default "admin-user" }}
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
- name: REQ_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.passwordKey | default "admin-password" }}
{{- end }}
{{- if not .Values.sidecar.plugins.skipReload }}
@ -350,6 +379,14 @@ containers:
- name: REQ_METHOD
value: POST
{{- end }}
{{- if .Values.sidecar.livenessProbe }}
livenessProbe:
{{ toYaml .Values.livenessProbe | indent 6 }}
{{- end }}
{{- if .Values.sidecar.readinessProbe }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 6 }}
{{- end }}
resources:
{{ toYaml .Values.sidecar.resources | indent 6 }}
{{- if .Values.sidecar.securityContext }}
@ -362,9 +399,9 @@ containers:
{{- end}}
- name: {{ .Chart.Name }}
{{- if .Values.image.sha }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}"
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.image.sha }}"
{{- else }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.command }}
@ -386,16 +423,17 @@ containers:
mountPath: "/etc/grafana/ldap.toml"
subPath: ldap.toml
{{- end }}
{{- $root := . }}
{{- range .Values.extraConfigmapMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
subPath: {{ .subPath | default "" }}
- name: {{ tpl .name $root }}
mountPath: {{ tpl .mountPath $root }}
subPath: {{ (tpl .subPath $root) | default "" }}
readOnly: {{ .readOnly }}
{{- end }}
- name: storage
mountPath: "/var/lib/grafana"
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
subPath: {{ tpl .Values.persistence.subPath . }}
{{- end }}
{{- if .Values.dashboards }}
{{- range $provider, $dashboards := .Values.dashboards }}
@ -484,14 +522,14 @@ containers:
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.userKey | default "admin-user" }}
{{- end }}
{{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }}
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }}
name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }}
key: {{ .Values.admin.passwordKey | default "admin-password" }}
{{- end }}
{{- if .Values.plugins }}
@ -561,6 +599,9 @@ containers:
{{ toYaml .Values.livenessProbe | indent 6 }}
readinessProbe:
{{ toYaml .Values.readinessProbe | indent 6 }}
{{- if .Values.lifecycleHooks }}
lifecycle: {{ tpl (.Values.lifecycleHooks | toYaml) . | nindent 6 }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 6 }}
{{- with .Values.extraContainers }}
@ -570,9 +611,10 @@ containers:
nodeSelector:
{{ toYaml . | indent 2 }}
{{- end }}
{{- $root := . }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 2 }}
{{ tpl (toYaml .) $root | indent 2 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
@ -582,10 +624,14 @@ volumes:
- name: config
configMap:
name: {{ template "grafana.fullname" . }}
{{- $root := . }}
{{- range .Values.extraConfigmapMounts }}
- name: {{ .name }}
- name: {{ tpl .name $root }}
configMap:
name: {{ .configMap }}
name: {{ tpl .configMap $root }}
{{- if .items }}
items: {{ toYaml .items | nindent 6 }}
{{- end }}
{{- end }}
{{- if .Values.dashboards }}
{{- range (keys .Values.dashboards | sortAlpha) }}
@ -617,7 +663,7 @@ volumes:
{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }}
- name: storage
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }}
claimName: {{ tpl (.Values.persistence.existingClaim | default (include "grafana.fullname" .)) . }}
{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }}
# nothing
{{- else }}
@ -634,7 +680,12 @@ volumes:
{{- end -}}
{{- if .Values.sidecar.dashboards.enabled }}
- name: sc-dashboard-volume
{{- if .Values.sidecar.dashboards.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.sidecar.dashboards.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- if .Values.sidecar.dashboards.SCProvider }}
- name: sc-dashboard-provider
configMap:
@ -643,22 +694,40 @@ volumes:
{{- end }}
{{- if .Values.sidecar.datasources.enabled }}
- name: sc-datasources-volume
{{- if .Values.sidecar.datasources.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.sidecar.datasources.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- end -}}
{{- if .Values.sidecar.plugins.enabled }}
- name: sc-plugins-volume
{{- if .Values.sidecar.plugins.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.sidecar.plugins.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- end -}}
{{- if .Values.sidecar.notifiers.enabled }}
- name: sc-notifiers-volume
{{- if .Values.sidecar.notifiers.sizeLimit }}
emptyDir:
sizeLimit: {{ .Values.sidecar.notifiers.sizeLimit }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- end -}}
{{- range .Values.extraSecretMounts }}
{{- if .secretName }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
defaultMode: {{ .defaultMode }}
{{- if .items }}
items: {{ toYaml .items | nindent 6 }}
{{- end }}
{{- else if .projected }}
- name: {{ .name }}
projected: {{- toYaml .projected | nindent 6 }}

View File

@ -9,9 +9,9 @@ metadata:
{{ toYaml . | indent 4 }}
{{- end }}
name: {{ template "grafana.fullname" . }}-clusterrole
{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }}
{{- if or .Values.sidecar.dashboards.enabled (or .Values.rbac.extraClusterRoleRules (or .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled)) }}
rules:
{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }}
{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }}
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"]

View File

@ -1,3 +1,4 @@
{{- if .Values.createConfigmap }}
apiVersion: v1
kind: ConfigMap
metadata:
@ -14,7 +15,19 @@ data:
plugins: {{ join "," .Values.plugins }}
{{- end }}
grafana.ini: |
{{- range $elem, $elemVal := index .Values "grafana.ini" }}
{{- if not (kindIs "map" $elemVal) }}
{{- if kindIs "invalid" $elemVal }}
{{ $elem }} =
{{- else if kindIs "string" $elemVal }}
{{ $elem }} = {{ tpl $elemVal $ }}
{{- else }}
{{ $elem }} = {{ $elemVal }}
{{- end }}
{{- end }}
{{- end }}
{{- range $key, $value := index .Values "grafana.ini" }}
{{- if kindIs "map" $value }}
[{{ $key }}]
{{- range $elem, $elemVal := $value }}
{{- if kindIs "invalid" $elemVal }}
@ -25,6 +38,7 @@ data:
{{ $elem }} = {{ $elemVal }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.datasources }}
@ -60,7 +74,7 @@ data:
{{- end }}
{{- end }}
{{- end }}
{{ $dashboardProviders := .Values.dashboardProviders }}
{{- range $provider, $dashboards := .Values.dashboards }}
{{- range $key, $value := $dashboards }}
{{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }}
@ -74,9 +88,16 @@ data:
{{- end }}
-H "Content-Type: application/json;charset=UTF-8" \
{{ end }}
{{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \
> "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json"
{{- $dpPath := "" -}}
{{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers -}}
{{- if eq $kd.name $provider -}}
{{- $dpPath = $kd.options.path -}}
{{- end -}}
{{- end }}
{{- end -}}
{{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \
> "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json"
{{- end }}
{{- end -}}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,4 +1,4 @@
{{ if (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc")) }}
{{ if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }}
apiVersion: apps/v1
kind: Deployment
metadata:

View File

@ -56,8 +56,9 @@ spec:
{{- end }}
{{- if .Values.imageRenderer.image.pullSecrets }}
imagePullSecrets:
{{- $root := . }}
{{- range .Values.imageRenderer.image.pullSecrets }}
- name: {{ . }}
- name: {{ tpl . $root }}
{{- end}}
{{- end }}
containers:
@ -105,9 +106,10 @@ spec:
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- $root := . }}
{{- with .Values.imageRenderer.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{ tpl (toYaml .) $root | indent 8 }}
{{- end }}
{{- with .Values.imageRenderer.tolerations }}
tolerations:

View File

@ -64,10 +64,7 @@ spec:
- port: {{ .Values.service.port }}
protocol: TCP
to:
- namespaceSelector:
matchLabels:
name: {{ template "grafana.namespace" . }}
podSelector:
- podSelector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 14 }}
{{- if .Values.podLabels }}

View File

@ -14,9 +14,23 @@ metadata:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
policyTypes:
{{- if .Values.networkPolicy.ingress }}
- Ingress
{{- end }}
{{- if .Values.networkPolicy.egress.enabled }}
- Egress
{{- end }}
podSelector:
matchLabels:
{{- include "grafana.selectorLabels" . | nindent 6 }}
{{- if .Values.networkPolicy.egress.enabled }}
egress:
- ports:
{{ .Values.networkPolicy.egress.ports | toJson }}
{{- end }}
{{- if .Values.networkPolicy.ingress }}
ingress:
- ports:
- port: {{ .Values.service.targetPort }}
@ -34,4 +48,5 @@ spec:
{{- include "grafana.labels" . | nindent 14 }}
role: read
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,5 +1,5 @@
{{- if .Values.podDisruptionBudget }}
apiVersion: policy/v1beta1
apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }}
kind: PodDisruptionBudget
metadata:
name: {{ template "grafana.fullname" . }}

View File

@ -1,5 +1,5 @@
{{- if .Values.rbac.pspEnabled }}
apiVersion: policy/v1beta1
apiVersion: {{ include "grafana.podSecurityPolicy.apiVersion" . }}
kind: PodSecurityPolicy
metadata:
name: {{ template "grafana.fullname" . }}

View File

@ -10,7 +10,7 @@ metadata:
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }}
{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled (or .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)))) }}
rules:
{{- if .Values.rbac.pspEnabled }}
- apiGroups: ['extensions']
@ -18,7 +18,7 @@ rules:
verbs: ['use']
resourceNames: [{{ template "grafana.fullname" . }}]
{{- end }}
{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }}
{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled)) }}
- apiGroups: [""] # "" indicates the core API group
resources: ["configmaps", "secrets"]
verbs: ["get", "watch", "list"]

View File

@ -4,9 +4,10 @@ kind: ServiceAccount
metadata:
labels:
{{- include "grafana.labels" . | nindent 4 }}
{{- $root := . }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{ tpl (toYaml . | indent 4) $root }}
{{- end }}
name: {{ template "grafana.serviceAccountName" . }}
namespace: {{ template "grafana.namespace" . }}

View File

@ -6,6 +6,8 @@ metadata:
name: {{ template "grafana.fullname" . }}
{{- if .Values.serviceMonitor.namespace }}
namespace: {{ .Values.serviceMonitor.namespace }}
{{- else }}
namespace: {{ template "grafana.namespace" . }}
{{- end }}
labels:
{{- include "grafana.labels" . | nindent 4 }}
@ -38,5 +40,5 @@ spec:
{{- include "grafana.selectorLabels" . | nindent 8 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
- {{ template "grafana.namespace" . }}
{{- end }}

View File

@ -1,4 +1,4 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}}
{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")))}}
apiVersion: apps/v1
kind: StatefulSet
metadata:
@ -35,6 +35,7 @@ spec:
{{- end }}
spec:
{{- include "grafana.pod" . | nindent 6 }}
{{- if .Values.persistence.enabled}}
volumeClaimTemplates:
- metadata:
name: storage
@ -49,4 +50,5 @@ spec:
matchLabels:
{{ toYaml . | indent 10 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -7,25 +7,28 @@ metadata:
{{- include "grafana.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test-success
"helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded"
namespace: {{ template "grafana.namespace" . }}
spec:
serviceAccountName: {{ template "grafana.serviceAccountNameTest" . }}
{{- if .Values.testFramework.securityContext }}
securityContext: {{ toYaml .Values.testFramework.securityContext | nindent 4 }}
{{- end }}
{{- $root := . }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
- name: {{ tpl . $root }}
{{- end}}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 4 }}
{{- end }}
{{- $root := . }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 4 }}
{{ tpl (toYaml .) $root | indent 4 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:

View File

@ -17,6 +17,7 @@ serviceAccount:
create: true
name:
nameTest:
## Service account annotations. Can be templated.
# annotations:
# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here
autoMount: true
@ -73,13 +74,15 @@ livenessProbe:
image:
repository: grafana/grafana
tag: 8.4.5
# Overrides the Grafana image tag whose default is the chart appVersion
tag: ""
sha: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Can be templated.
##
# pullSecrets:
# - myRegistrKeySecretName
@ -99,6 +102,11 @@ securityContext:
containerSecurityContext:
{}
# Enable creating the grafana configmap
createConfigmap: true
# Extra configmaps to mount in grafana pods
# Values are templated.
extraConfigmapMounts: []
# - name: certs-configmap
# mountPath: /etc/grafana/ssl/
@ -236,7 +244,7 @@ nodeSelector: {}
##
tolerations: []
## Affinity for pod assignment
## Affinity for pod assignment (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
@ -286,7 +294,9 @@ persistence:
finalizers:
- kubernetes.io/pvc-protection
# selectorLabels: {}
## Sub-directory of the PV to mount. Can be templated.
# subPath: ""
## Name of an existing PVC. Can be templated.
# existingClaim:
## If persistence is not enabled, this allows to mount the
@ -332,6 +342,7 @@ adminUser: admin
# Use an existing secret for the admin user.
admin:
## Name of the secret. Can be templated.
existingSecret: ""
userKey: admin-user
passwordKey: admin-password
@ -458,6 +469,12 @@ extraVolumeMounts: []
# readOnly: true
# hostPath: /usr/shared/
## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request
lifecycleHooks: {}
# postStart:
# exec:
# command: []
## Pass the plugins you want installed as a list.
##
plugins: []
@ -634,7 +651,7 @@ smtp:
sidecar:
image:
repository: quay.io/kiwigrid/k8s-sidecar
tag: 1.15.6
tag: 1.19.2
sha: ""
imagePullPolicy: IfNotPresent
resources: {}
@ -648,13 +665,17 @@ sidecar:
# skipTlsVerify Set to true to skip tls verification for kube api calls
# skipTlsVerify: true
enableUniqueFilenames: false
readinessProbe: {}
livenessProbe: {}
# Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL.
logLevel: INFO
dashboards:
enabled: false
SCProvider: true
# label that the configmaps with dashboards are marked with
label: grafana_dashboard
# value of label that the configmaps with dashboards are set to
labelValue: null
labelValue: ""
# folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
folder: /tmp/dashboards
# The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
@ -700,12 +721,14 @@ sidecar:
foldersFromFilesStructure: false
# Additional dashboard sidecar volume mounts
extraMounts: []
# Sets the size limit of the dashboard sidecar emptyDir volume
sizeLimit: {}
datasources:
enabled: false
# label that the configmaps with datasources are marked with
label: grafana_datasource
# value of label that the configmaps with datasources are set to
labelValue: null
labelValue: ""
# If specified, the sidecar will search for datasource config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
@ -720,12 +743,14 @@ sidecar:
# Deploy the datasource sidecar as an initContainer in addition to a container.
# This is needed if skipReload is true, to load any datasources defined at startup time.
initDatasources: false
# Sets the size limit of the datasource sidecar emptyDir volume
sizeLimit: {}
plugins:
enabled: false
# label that the configmaps with plugins are marked with
label: grafana_plugin
# value of label that the configmaps with plugins are set to
labelValue: null
labelValue: ""
# If specified, the sidecar will search for plugin config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
@ -740,6 +765,8 @@ sidecar:
# Deploy the datasource sidecar as an initContainer in addition to a container.
# This is needed if skipReload is true, to load any plugins defined at startup time.
initPlugins: false
# Sets the size limit of the plugin sidecar emptyDir volume
sizeLimit: {}
notifiers:
enabled: false
# label that the configmaps with notifiers are marked with
@ -750,6 +777,8 @@ sidecar:
searchNamespace: null
# search in configmap, secret or both
resource: both
# Sets the size limit of the notifier sidecar emptyDir volume
sizeLimit: {}
## Override the deployment namespace
##
@ -815,6 +844,20 @@ imageRenderer:
# requests:
# cpu: 50m
# memory: 50Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment (evaluated as template)
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
networkPolicy:
## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
@ -826,6 +869,10 @@ networkPolicy:
## When true, grafana will accept connections from any source
## (with the correct destination port).
##
ingress: true
## @param networkPolicy.ingress When true enables the creation
## an ingress network policy
##
allowExternal: true
## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed
## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
@ -841,10 +888,34 @@ networkPolicy:
## - {key: role, operator: In, values: [frontend]}
##
explicitNamespacesSelector: {}
##
##
##
##
##
##
egress:
## @param networkPolicy.egress.enabled When enabled, an egress network policy will be
## created allowing grafana to connect to external data sources from kubernetes cluster.
enabled: false
##
## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress
ports: []
## Add ports to the egress by specifying - port: <port number>
## E.X.
## ports:
## - port: 80
## - port: 443
##
##
##
##
##
##
# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option
enableKubeBackwardCompatibility: false
useStatefulSet: false
# Create a dynamic manifests via values:
extraObjects: []
# - apiVersion: "kubernetes-client.io/v1"

View File

@ -1,5 +1,5 @@
apiVersion: v2
appVersion: 2.4.1
appVersion: 2.5.0
description: Install kube-state-metrics to generate and expose cluster-level metrics
home: https://github.com/kubernetes/kube-state-metrics/
keywords:
@ -18,4 +18,4 @@ name: kube-state-metrics
sources:
- https://github.com/kubernetes/kube-state-metrics/
type: application
version: 4.7.0
version: 4.15.0

View File

@ -9,6 +9,10 @@ metadata:
namespace: {{ template "kube-state-metrics.namespace" . }}
labels:
{{- include "kube-state-metrics.labels" . | indent 4 }}
{{- if .Values.annotations }}
annotations:
{{ toYaml .Values.annotations | indent 4 }}
{{- end }}
spec:
selector:
matchLabels:
@ -30,16 +34,13 @@ spec:
hostNetwork: {{ .Values.hostNetwork }}
serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsGroup: {{ .Values.securityContext.runAsGroup }}
runAsUser: {{ .Values.securityContext.runAsUser }}
securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
- name: {{ template "kube-state-metrics.name" . }}
{{- if .Values.autosharding.enabled }}
env:
- name: POD_NAME
@ -53,9 +54,7 @@ spec:
{{- end }}
args:
{{- if .Values.extraArgs }}
{{- range .Values.extraArgs }}
- {{ . }}
{{- end }}
{{- .Values.extraArgs | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.service.port }}
- --port={{ .Values.service.port | default 8080}}
@ -75,7 +74,9 @@ spec:
{{- if .Values.metricDenylist }}
- --metric-denylist={{ .Values.metricDenylist | join "," }}
{{- end }}
{{- if .Values.namespaces }}
{{- if .Values.releaseNamespace }}
- --namespaces={{ template "kube-state-metrics.namespace" . }}
{{- else if .Values.namespaces }}
- --namespaces={{ tpl (.Values.namespaces | join ",") $ }}
{{- end }}
{{- if .Values.namespacesDenylist }}
@ -92,12 +93,17 @@ spec:
- --telemetry-host={{ .Values.selfMonitor.telemetryHost }}
{{- end }}
- --telemetry-port={{ .Values.selfMonitor.telemetryPort | default 8081 }}
{{- if .Values.kubeconfig.enabled }}
{{- if or (.Values.kubeconfig.enabled) (.Values.volumeMounts) }}
volumeMounts:
{{- if .Values.kubeconfig.enabled }}
- name: kubeconfig
mountPath: /opt/k8s/.kube/
readOnly: true
{{- end }}
{{- if .Values.volumeMounts }}
{{ toYaml .Values.volumeMounts | indent 8 }}
{{- end }}
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
@ -143,9 +149,18 @@ spec:
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.kubeconfig.enabled}}
{{- if .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{ toYaml .Values.topologySpreadConstraints | indent 8 }}
{{- end }}
{{- if or (.Values.kubeconfig.enabled) (.Values.volumes) }}
volumes:
{{- if .Values.kubeconfig.enabled}}
- name: kubeconfig
secret:
secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig
{{- end }}
{{- if .Values.volumes }}
{{ toYaml .Values.volumes | indent 8 }}
{{- end }}
{{- end }}

View File

@ -1,5 +1,9 @@
{{- if .Values.podDisruptionBudget -}}
{{ if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}}
apiVersion: policy/v1
{{- else -}}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ template "kube-state-metrics.fullname" . }}

View File

@ -183,5 +183,8 @@ rules:
- verticalpodautoscalers
verbs: ["list", "watch"]
{{ end -}}
{{ if $.Values.rbac.extraRules }}
{{ toYaml $.Values.rbac.extraRules }}
{{ end }}
{{- end -}}
{{- end -}}

View File

@ -27,6 +27,9 @@ spec:
protocol: TCP
port: {{ .Values.selfMonitor.telemetryPort | default 8081 }}
targetPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }}
{{- if .Values.selfMonitor.telemetryNodePort }}
nodePort: {{ .Values.selfMonitor.telemetryNodePort }}
{{- end }}
{{ end }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: "{{ .Values.service.loadBalancerIP }}"

View File

@ -40,6 +40,13 @@ spec:
relabelings:
{{- toYaml .Values.prometheus.monitor.relabelings | nindent 8 }}
{{- end }}
{{- if .Values.prometheus.monitor.scheme }}
scheme: {{ .Values.prometheus.monitor.scheme }}
{{- end }}
{{- if .Values.prometheus.monitor.tlsConfig }}
tlsConfig:
{{- toYaml .Values.prometheus.monitor.tlsConfig | nindent 8 }}
{{- end }}
{{- if .Values.selfMonitor.enabled }}
- port: metrics
{{- if .Values.prometheus.monitor.interval }}
@ -62,5 +69,12 @@ spec:
relabelings:
{{- toYaml .Values.prometheus.monitor.relabelings | nindent 8 }}
{{- end }}
{{- if .Values.prometheus.monitor.scheme }}
scheme: {{ .Values.prometheus.monitor.scheme }}
{{- end }}
{{- if .Values.prometheus.monitor.tlsConfig }}
tlsConfig:
{{- toYaml .Values.prometheus.monitor.tlsConfig | nindent 8 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,8 +1,8 @@
# Default values for kube-state-metrics.
prometheusScrape: true
image:
repository: k8s.gcr.io/kube-state-metrics/kube-state-metrics
tag: v2.4.1
repository: registry.k8s.io/kube-state-metrics/kube-state-metrics
tag: v2.5.0
pullPolicy: IfNotPresent
imagePullSecrets: []
@ -50,6 +50,13 @@ rbac:
# If set to false - Run without Cluteradmin privs needed - ONLY works if namespace is also set (if useExistingRole is set this name is used as ClusterRole or Role to bind to)
useClusterRole: true
# Add permissions for CustomResources' apiGroups in Role/ClusterRole. Should be used in conjunction with Custom Resource State Metrics configuration
# Example:
# - apiGroups: ["monitoring.coreos.com"]
# resources: ["prometheuses"]
# verbs: ["list", "watch"]
extraRules: []
serviceAccount:
# Specifies whether a ServiceAccount should be created, require rbac true
create: true
@ -77,6 +84,8 @@ prometheus:
honorLabels: false
metricRelabelings: []
relabelings: []
scheme: ""
tlsConfig: {}
## Specify if a Pod Security Policy for kube-state-metrics must be created
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
@ -118,6 +127,13 @@ affinity: {}
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
## Topology spread constraints for pod assignment
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
topologySpreadConstraints: []
# Annotations to be added to the deployment/statefulset
annotations: {}
# Annotations to be added to the pod
podAnnotations: {}
@ -193,6 +209,10 @@ kubeconfig:
# base64 encoded kube-config file
secret:
# Enable only the release namespace for collecting resources. By default all namespaces are collected.
# If releaseNamespace and namespaces are both set only releaseNamespace will be used.
releaseNamespace: false
# Comma-separated list of namespaces to be enabled for collecting resources. By default all namespaces are collected.
namespaces: ""
@ -223,7 +243,22 @@ kubeTargetVersionOverride: ""
# Enable self metrics configuration for service and Service Monitor
# Default values for telemetry configuration can be overridden
# If you set telemetryNodePort, you must also set service.type to NodePort
selfMonitor:
enabled: false
# telemetryHost: 0.0.0.0
# telemetryPort: 8081
# telemetryNodePort: 0
# volumeMounts are used to add custom volume mounts to deployment.
# See example below
volumeMounts: []
# - mountPath: /etc/config
# name: config-volume
# volumes are used to add custom volumes to deployment
# See example below
volumes: []
# - configMap:
# name: cm-for-volume
# name: config-volume

View File

@ -9,11 +9,10 @@ keywords:
maintainers:
- email: gianrubio@gmail.com
name: gianrubio
- name: bismarck
- email: zanhsieh@gmail.com
name: zanhsieh
name: prometheus-node-exporter
sources:
- https://github.com/prometheus/node_exporter/
type: application
version: 3.1.0
version: 3.3.1

Some files were not shown because too many files have changed in this diff Show More