argo-events: enabled: false configs: jetstream: # Default JetStream settings, could be overridden by EventBus JetStream spec # Ref: https://docs.nats.io/running-a-nats-service/configuration#jetstream settings: # -- Maximum size of the memory storage (e.g. 1G) maxMemoryStore: -1 # -- Maximum size of the file storage (e.g. 20G) maxFileStore: -1 streamConfig: # -- Maximum number of messages before expiring oldest message maxMsgs: 1000000 # -- Maximum age of existing messages, i.e. “72h”, “4h35m” maxAge: 72h # Total size of messages before expiring oldest message, 0 means unlimited. maxBytes: 1GB # -- Number of replicas, defaults to 3 and requires minimal 3 replicas: 1 # -- Not documented at the moment duplicates: 300s # Supported versions of JetStream eventbus # see: https://github.com/nats-io/k8s/blob/main/helm/charts/nats/values.yaml # do NOT use -alpine tag as the entrypoint differs versions: - version: 2.10.11 natsImage: nats:2.10.11-scratch metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0 configReloaderImage: natsio/nats-server-config-reloader:0.16.1 startCommand: /nats-server argocd-apps: enabled: false projects: {} applications: {} argo-cd: enabled: false global: logging: format: json image: repository: public.ecr.aws/zero-downtime/zdt-argocd tag: v2.13.0 configs: styles: | .sidebar__logo img { content: url(https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png); } .sidebar__logo__text-logo { height: 0em; } .sidebar { background: linear-gradient(to bottom, #6A4D79, #493558, #2D1B30, #0D0711); } cm: ui.bannercontent: "KubeZero v1.30 - Release notes" ui.bannerurl: "https://kubezero.com/releases/v1.30" ui.bannerpermanent: "true" ui.bannerposition: "bottom" # argo-cd.server.config.url -- ArgoCD URL being exposed via Istio url: https://argocd.example.com timeout.reconciliation: 300s resource.customizations: | cert-manager.io/Certificate: # Lua script for customizing the health status assessment health.lua: | hs = {} if obj.status ~= nil then if obj.status.conditions ~= nil then for i, condition in ipairs(obj.status.conditions) do if condition.type == "Ready" and condition.status == "False" then hs.status = "Degraded" hs.message = condition.message return hs end if condition.type == "Ready" and condition.status == "True" then hs.status = "Healthy" hs.message = condition.message return hs end end end end hs.status = "Progressing" hs.message = "Waiting for certificate" return hs secret: createSecret: false # `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/' | base64 -w0` # argocdServerAdminPassword: "$2a$10$ivKzaXVxMqdeDSfS3nqi1Od3iDbnL7oXrixzDfZFRHlXHnAG6LydG" # argocdServerAdminPassword: "ref+file://secrets.yaml#/test" # argocdServerAdminPasswordMtime: "2020-04-24T15:33:09BST" ssh: extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ==" params: controller.status.processors: "10" controller.operation.processors: "5" controller.diff.server.side: "true" server.insecure: true server.enable.gzip: true controller: metrics: enabled: false serviceMonitor: enabled: true resources: limits: # cpu: 500m memory: 2048Mi requests: cpu: 100m memory: 512Mi repoServer: metrics: enabled: false serviceMonitor: enabled: true volumes: - name: kubeconfigs emptyDir: {} volumeMounts: - mountPath: /home/argocd/.kube name: kubeconfigs # Allow vals to read internal secrets across all namespaces clusterRoleRules: enabled: true rules: - apiGroups: [""] resources: ["secrets"] verbs: ["get", "watch", "list"] initContainers: - name: create-kubeconfig image: '{{ default .Values.global.image.repository .Values.repoServer.image.repository }}:{{ default (include "argo-cd.defaultTag" .) .Values.repoServer.image.tag }}' imagePullPolicy: '{{ default .Values.global.image.imagePullPolicy .Values.repoServer.image.imagePullPolicy }}' command: - /usr/local/bin/sa2kubeconfig.sh - /home/argocd/.kube/config volumeMounts: - mountPath: /home/argocd/.kube name: kubeconfigs securityContext: runAsNonRoot: true readOnlyRootFilesystem: true allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault capabilities: drop: - ALL server: # Rename former https port to grpc, works with istio + insecure service: servicePortHttpsName: grpc metrics: enabled: false serviceMonitor: enabled: true # redis: # We might want to try to keep redis close to the controller # affinity: dex: enabled: false notifications: enabled: false # Support for Istio Ingress for ArgoCD istio: # istio.enabled -- Deploy Istio VirtualService to expose ArgoCD enabled: false # istio.gateway -- Name of the Istio gateway to add the VirtualService to gateway: istio-ingress/ingressgateway ipBlocks: [] argocd-image-updater: enabled: false # Unify all ArgoCD pieces under the same argocd namespace fullnameOverride: argocd-image-updater config: argocd: plaintext: true metrics: enabled: false serviceMonitor: enabled: true authScripts: enabled: true scripts: ecr-login.sh: | #!/bin/sh aws ecr --region $AWS_REGION get-authorization-token --output text --query 'authorizationData[].authorizationToken' | base64 -d ecr-public-login.sh: | #!/bin/sh aws ecr-public --region us-east-1 get-authorization-token --output text --query 'authorizationData.authorizationToken' | base64 -d sshConfig: config: | Host * PubkeyAcceptedAlgorithms +ssh-rsa HostkeyAlgorithms +ssh-rsa