kubezero/charts/kubezero-network/values.yaml

96 lines
1.8 KiB
YAML

metallb:
enabled: false
controller:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
ipAddressPools: []
#- name: my-ip-space
# protocol: layer2
# addresses:
# - 192.168.42.0/24
multus:
enabled: false
tag: "v3.9.2"
#clusterNetwork: "calico"
#defaultNetworks: []
# - "cilium"
#readinessindicatorfile: "/etc/cni/net.d/10-calico.conflist"
cilium:
enabled: false
containerRuntime:
integration: crio
# Until we figured out AppArmore on Alpine and Gentoo
securityContext:
privileged: true
cni:
binPath: "/usr/libexec/cni"
#-- Ensure this is false if multus is enabled
# exclusive: false
# chainingMode: generic-veth
bpf:
hostLegacyRouting: true
# tproxy: false
cluster:
# This should match the second octet of clusterPoolIPv4PodCIDRList
# to prevent IP space overlap and easy tracking
# use 240 as default, less likely to clash with 1, do NOT use 244 used by calico until 1.25
id: 240
name: default
ipam:
operator:
clusterPoolIPv4PodCIDRList:
- 10.240.0.0/16
# Should be handled by multus
nodePort:
enabled: true
# Keep it simple for now
l7Proxy: false
l2NeighDiscovery:
enabled: false
cgroup:
autoMount:
enabled: false
hostRoot: "/sys/fs/cgroup"
tunnel: geneve
prometheus:
enabled: false
port: 9091
operator:
replicas: 1
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/control-plane: ""
hubble:
enabled: false
# Legacy / Testing
calico:
enabled: false