KubeZero/charts/kubezero-argo/values.yaml

220 lines
7.1 KiB
YAML
Raw Normal View History

2024-02-28 20:55:00 +00:00
argo-events:
enabled: false
configs:
jetstream:
# Default JetStream settings, could be overridden by EventBus JetStream spec
# Ref: https://docs.nats.io/running-a-nats-service/configuration#jetstream
settings:
# -- Maximum size of the memory storage (e.g. 1G)
maxMemoryStore: -1
# -- Maximum size of the file storage (e.g. 20G)
maxFileStore: -1
streamConfig:
# -- Maximum number of messages before expiring oldest message
maxMsgs: 1000000
# -- Maximum age of existing messages, i.e. “72h”, “4h35m”
maxAge: 72h
# Total size of messages before expiring oldest message, 0 means unlimited.
maxBytes: 1GB
# -- Number of replicas, defaults to 3 and requires minimal 3
replicas: 1
# -- Not documented at the moment
duplicates: 300s
# Supported versions of JetStream eventbus
# see: https://github.com/nats-io/k8s/blob/main/helm/charts/nats/values.yaml
# do NOT use -alpine tag as the entrypoint differs
versions:
- version: 2.10.11
natsImage: nats:2.10.11-scratch
metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0
configReloaderImage: natsio/nats-server-config-reloader:0.14.1
startCommand: /nats-server
argocd-apps:
enabled: false
projects: {}
applications: {}
argo-cd:
enabled: false
global:
logging:
format: json
image:
repository: public.ecr.aws/zero-downtime/zdt-argocd
tag: v2.11.5
configs:
styles: |
.sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); }
.sidebar__logo__text-logo { height: 0em; }
.sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); }
cm:
2024-06-25 13:46:01 +00:00
ui.bannercontent: "KubeZero v1.29 - Release notes"
ui.bannerurl: "https://kubezero.com/releases/v1.29"
ui.bannerpermanent: "true"
ui.bannerposition: "bottom"
# argo-cd.server.config.url -- ArgoCD URL being exposed via Istio
url: https://argocd.example.com
timeout.reconciliation: 300s
resource.customizations: |
cert-manager.io/Certificate:
# Lua script for customizing the health status assessment
health.lua: |
hs = {}
if obj.status ~= nil then
if obj.status.conditions ~= nil then
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Ready" and condition.status == "False" then
hs.status = "Degraded"
hs.message = condition.message
return hs
end
if condition.type == "Ready" and condition.status == "True" then
hs.status = "Healthy"
hs.message = condition.message
return hs
end
end
end
end
hs.status = "Progressing"
hs.message = "Waiting for certificate"
return hs
secret:
createSecret: false
# `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'`
# argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG"
# argocdServerAdminPassword: "ref+file://secrets.yaml#/test"
# argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST"
ssh:
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8YdJ4YcOK7A0K7qOWsRjCS+wHTStXRcwBe7gjG43HPSNijiCKoGf/c+tfNsRhyouawg7Law6M6ahmS/jKWBpznRIM+OdOFVSuhnK/nr6h6wG3/ZfdLicyAPvx1/STGY/Fc6/zXA88i/9PV+g84gSVmhf3fGY92wokiASiu9DU4T9dT1gIkdyOX6fbMi1/mMKLSrHnAQcjyasYDvw9ISCJ95EoSwbj7O4c+7jo9fxYvdCfZZZAEZGozTRLAAO0AnjVcRah7bZV/jfHJuhOipV/TB7UVAhlVv1dfGV7hoTp9UKtKZFJF4cjIrSGxqQA/mdhSdLgkepK7yc4Jp2xGnaarhY29DfqsQqop+ugFpTbj7Xy5Rco07mXc6XssbAZhI1xtCOX20N4PufBuYippCK5AE6AiAyVtJmvfGQk4HP+TjOyhFo7PZm3wc9Hym7IBBVC0Sl30K8ddufkAgHwNGvvu1ZmD9ZWaMOXJDHBCZGMMr16QREZwVtZTwMEQalc7/yqmuqMhmcJIfs/GA2Lt91y+pq9C8XyeUL0VFPch0vkcLSRe3ghMZpRFJ/ht307xPcLzgTJqN6oQtNNDzSQglSEjwhge2K4GyWcIh+oGsWxWz5dHyk1iJmw90Y976BZIl/mYVgbTtZAJ81oGe/0k5rAe+LDL+Yq6tG28QFOg0QmiQ=="
params:
controller.status.processors: "10"
controller.operation.processors: "5"
server.insecure: true
server.enable.gzip: true
controller:
metrics:
enabled: false
serviceMonitor:
enabled: true
resources:
limits:
# cpu: 500m
memory: 2048Mi
requests:
cpu: 100m
memory: 512Mi
repoServer:
metrics:
enabled: false
serviceMonitor:
enabled: true
volumes:
- name: kubeconfigs
emptyDir: {}
volumeMounts:
- mountPath: /home/argocd/.kube
name: kubeconfigs
# Allow vals to read internal secrets across all namespaces
clusterRoleRules:
enabled: true
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "watch", "list"]
initContainers:
- name: create-kubeconfig
image: '{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include "argo-cd.defaultTag" .) .Values.repoServer.image.tag }}'
imagePullPolicy: '{{ default .Values.global.image.imagePullPolicy .Values.repoServer.image.imagePullPolicy }}'
command:
- /usr/local/bin/sa2kubeconfig.sh
- /home/argocd/.kube/config
volumeMounts:
- mountPath: /home/argocd/.kube
name: kubeconfigs
securityContext:
runAsNonRoot: true
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
server:
# Rename former https port to grpc, works with istio + insecure
service:
servicePortHttpsName: grpc
metrics:
enabled: false
serviceMonitor:
enabled: true
# redis:
# We might want to try to keep redis close to the controller
# affinity:
dex:
enabled: false
notifications:
enabled: false
# Support for Istio Ingress for ArgoCD
istio:
# istio.enabled -- Deploy Istio VirtualService to expose ArgoCD
enabled: false
# istio.gateway -- Name of the Istio gateway to add the VirtualService to
gateway: istio-ingress/ingressgateway
ipBlocks: []
argocd-image-updater:
enabled: false
# Unify all ArgoCD pieces under the same argocd namespace
fullnameOverride: argocd-image-updater
config:
argocd:
plaintext: true
metrics:
enabled: false
serviceMonitor:
enabled: true
authScripts:
enabled: true
scripts:
ecr-login.sh: |
#!/bin/sh
aws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d
ecr-public-login.sh: |
#!/bin/sh
aws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d
sshConfig:
config: |
Host *
PubkeyAcceptedAlgorithms +ssh-rsa
HostkeyAlgorithms +ssh-rsa