KubeZero/charts/kubezero-network/values.yaml

235 lines
4.5 KiB
YAML

metallb:
enabled: false
controller:
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
ipAddressPools: []
#- name: my-ip-space
# protocol: layer2
# addresses:
# - 192.168.42.0/24
multus:
enabled: false
image:
repository: ghcr.io/k8snetworkplumbingwg/multus-cni
tag: v3.9.3
clusterNetwork: "cilium"
defaultNetworks: []
readinessindicatorfile: "/etc/cni/net.d/05-cilium.conflist"
cilium:
enabled: false
# Always use cached images
image:
useDigest: false
resources:
requests:
cpu: 10m
memory: 160Mi
limits:
memory: 1Gi
# cpu: 4000m
cni:
binPath: "/usr/libexec/cni"
logFile: /var/log/cilium-cni.log
#-- Ensure this is false if multus is enabled
exclusive: false
cluster:
# This should match the second octet of clusterPoolIPv4PodCIDRList
# to prevent IP space overlap and easy tracking
# use 240 as default, less likely to clash with 1
id: 240
name: default
ipam:
operator:
clusterPoolIPv4PodCIDRList:
- 10.240.0.0/16
# Keep it simple for now
l7Proxy: false
envoy:
enabled: false
#rollOutCiliumPods: true
cgroup:
autoMount:
enabled: false
hostRoot: "/sys/fs/cgroup"
# we need biDirectional so use helm init-container
#bpf:
# autoMount:
# enabled: false
sysctlfix:
enabled: false
routingMode: tunnel
tunnelProtocol: geneve
prometheus:
enabled: false
serviceMonitor:
enabled: false
port: 9091
operator:
replicas: 1
tolerations:
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
# the operator removes the taints,
# so we need to break chicken egg on single controller
- key: node.cilium.io/agent-not-ready
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
prometheus:
enabled: false
serviceMonitor:
enabled: false
hubble:
enabled: false
relay:
enabled: false
ui:
enabled: false
tls:
auto:
method: cert-manager
certManagerIssuerRef:
group: cert-manager.io
kind: ClusterIssuer
name: kubezero-local-ca-issuer
haproxy:
enabled: false
replicaCount: 1
# enable pdb if replica > 1
PodDisruptionBudget:
enable: false
minAvailable: 1
containerPorts:
http: 8080
https: null
prometheus: 8404
stat: null
serviceMonitor:
enabled: false
endpoints:
- port: prometheus
path: /metrics
scheme: http
interval: 30s
params:
no-maint:
- empty
config: |
frontend fe_main
bind :8080
default_backend be_main
backend be_main
server web1 10.0.0.1:8080 check
includes:
global.cfg: |
global
log stdout format raw local0
maxconn 2048
defaults
log global
mode tcp
option http-server-close
timeout connect 10s
timeout client 30s
timeout client-fin 30s
timeout server 30s
timeout tunnel 1h
resolvers coredns
accepted_payload_size 4096
parse-resolv-conf
hold valid 10s
hold other 10s
hold refused 10s
hold nx 10s
hold timeout 10s
prometheus.cfg: |
frontend prometheus
bind *:8404
mode http
http-request use-service prometheus-exporter if { path /metrics }
no log
stats enable
stats uri /stats
stats refresh 10s
stats auth admin:letmein
args:
defaults:
- "-f"
- "/usr/local/etc/haproxy/includes/global.cfg"
- "-f"
- "/usr/local/etc/haproxy/includes/prometheus.cfg"
- "-f"
- "/usr/local/etc/haproxy/haproxy.cfg"
livenessProbe:
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 0
timeoutSeconds: 1
tcpSocket:
port: 8404
periodSeconds: 10
readinessProbe:
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 0
timeoutSeconds: 1
tcpSocket:
port: 8404
periodSeconds: 10
securityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
resources:
requests:
cpu: 10m
memory: 48Mi
# limits:
# cpu: 250m
# memory: 128Mi