feature: remove calico, re-enable multus, set cilium as default, addons version bumps
This commit is contained in:
parent
7aeb0209b3
commit
dc962dfed1
@ -8,7 +8,8 @@ ARG KUBE_VERSION=1.24
|
||||
RUN cd /etc/apk/keys && \
|
||||
wget "https://cdn.zero-downtime.net/alpine/stefan@zero-downtime.net-61bb6bfb.rsa.pub" && \
|
||||
echo "@kubezero https://cdn.zero-downtime.net/alpine/v${ALPINE_VERSION}/kubezero" >> /etc/apk/repositories && \
|
||||
echo "@testing http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
||||
echo "@edge-testing http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
|
||||
echo "@edge-community http://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories && \
|
||||
apk upgrade -U -a --no-cache && \
|
||||
apk --no-cache add \
|
||||
jq \
|
||||
@ -21,9 +22,9 @@ RUN cd /etc/apk/keys && \
|
||||
kubeadm@kubezero~=${KUBE_VERSION} \
|
||||
kubectl@kubezero~=${KUBE_VERSION} \
|
||||
etcdhelper@kubezero \
|
||||
etcd-ctl@testing \
|
||||
restic@testing \
|
||||
helm@testing
|
||||
etcd-ctl@edge-testing \
|
||||
restic@edge-community \
|
||||
helm@edge-community
|
||||
|
||||
RUN helm repo add kubezero https://cdn.zero-downtime.net/charts && \
|
||||
mkdir -p /var/lib/kubezero
|
||||
|
@ -8,6 +8,24 @@ import yaml
|
||||
def migrate(values):
|
||||
"""Actual changes here"""
|
||||
|
||||
# ClusterBackup is enabled on AWS anyways, same with cluster-autoscaler
|
||||
if "aws" in values["global"]:
|
||||
deleteKey(values["addons"], "clusterBackup")
|
||||
deleteKey(values["addons"], "cluster-autoscaler")
|
||||
|
||||
# Remove calico and multus
|
||||
deleteKey(values["network"], "calico")
|
||||
deleteKey(values["network"], "multus")
|
||||
|
||||
return values
|
||||
|
||||
|
||||
def deleteKey(values, key):
|
||||
"""Delete key from dictionary if exists"""
|
||||
try:
|
||||
values.pop(key)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return values
|
||||
|
||||
@ -16,9 +34,11 @@ class MyDumper(yaml.Dumper):
|
||||
"""
|
||||
Required to add additional indent for arrays to match yq behaviour to reduce noise in diffs
|
||||
"""
|
||||
|
||||
def increase_indent(self, flow=False, indentless=False):
|
||||
return super(MyDumper, self).increase_indent(flow, False)
|
||||
|
||||
|
||||
def str_presenter(dumper, data):
|
||||
if len(data.splitlines()) > 1: # check for multiline string
|
||||
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
|
||||
@ -65,5 +85,5 @@ yaml.dump(
|
||||
default_flow_style=False,
|
||||
indent=2,
|
||||
sort_keys=False,
|
||||
Dumper=MyDumper
|
||||
Dumper=MyDumper,
|
||||
)
|
||||
|
@ -153,6 +153,7 @@ argo_used && kubectl edit app kubezero -n argocd || kubectl edit cm kubezero-val
|
||||
control_plane_upgrade "apply_network, apply_addons, apply_storage"
|
||||
|
||||
kubectl rollout restart daemonset/cilium -n kube-system
|
||||
kubectl rollout restart daemonset/kube-multus-ds -n kube-system
|
||||
|
||||
echo "Checking that all pods in kube-system are running ..."
|
||||
waitSystemPodsRunning
|
||||
|
@ -18,7 +18,7 @@ maintainers:
|
||||
email: stefan@zero-downtime.net
|
||||
dependencies:
|
||||
- name: aws-node-termination-handler
|
||||
version: 0.18.5
|
||||
version: 0.19.3
|
||||
# repository: https://aws.github.io/eks-charts
|
||||
condition: aws-node-termination-handler.enabled
|
||||
- name: external-dns
|
||||
@ -30,7 +30,7 @@ dependencies:
|
||||
repository: https://kubernetes.github.io/autoscaler
|
||||
condition: cluster-autoscaler.enabled
|
||||
- name: nvidia-device-plugin
|
||||
version: 0.12.2
|
||||
version: 0.12.3
|
||||
# https://github.com/NVIDIA/k8s-device-plugin
|
||||
repository: https://nvidia.github.io/k8s-device-plugin
|
||||
condition: nvidia-device-plugin.enabled
|
||||
|
@ -18,10 +18,10 @@ Kubernetes: `>= 1.24.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| | aws-node-termination-handler | 0.18.5 |
|
||||
| | aws-node-termination-handler | 0.19.3 |
|
||||
| https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.11.0 |
|
||||
| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.21.0 |
|
||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.12.2 |
|
||||
| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.12.3 |
|
||||
|
||||
# MetalLB
|
||||
|
||||
@ -54,7 +54,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/)
|
||||
| aws-node-termination-handler.fullnameOverride | string | `"aws-node-termination-handler"` | |
|
||||
| aws-node-termination-handler.ignoreDaemonSets | bool | `true` | |
|
||||
| aws-node-termination-handler.jsonLogging | bool | `true` | |
|
||||
| aws-node-termination-handler.managedAsgTag | string | `"aws-node-termination-handler/managed"` | "aws-node-termination-handler/${ClusterName}" |
|
||||
| aws-node-termination-handler.managedTag | string | `"aws-node-termination-handler/managed"` | "aws-node-termination-handler/${ClusterName}" |
|
||||
| aws-node-termination-handler.metadataTries | int | `0` | |
|
||||
| aws-node-termination-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| aws-node-termination-handler.podMonitor.create | bool | `false` | |
|
||||
|
@ -1,5 +1,5 @@
|
||||
apiVersion: v2
|
||||
appVersion: 1.16.5
|
||||
appVersion: 1.17.3
|
||||
description: A Helm chart for the AWS Node Termination Handler.
|
||||
home: https://github.com/aws/eks-charts
|
||||
icon: https://raw.githubusercontent.com/aws/eks-charts/master/docs/logo/aws.png
|
||||
@ -22,4 +22,4 @@ sources:
|
||||
- https://github.com/aws/aws-node-termination-handler/
|
||||
- https://github.com/aws/eks-charts/
|
||||
type: application
|
||||
version: 0.18.5
|
||||
version: 0.19.3
|
||||
|
@ -82,6 +82,7 @@ The configuration in this table applies to all AWS Node Termination Handler mode
|
||||
| `podTerminationGracePeriod` | The time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used, which defaults to 30 seconds if not specified for the pod. | `-1` |
|
||||
| `nodeTerminationGracePeriod` | Period of time in seconds given to each node to terminate gracefully. Node draining will be scheduled based on this value to optimize the amount of compute time, but still safely drain the node before an event. | `120` |
|
||||
| `emitKubernetesEvents` | If `true`, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event. More information [here](https://github.com/aws/aws-node-termination-handler/blob/main/docs/kubernetes_events.md). | `false` |
|
||||
| `completeLifecycleActionDelaySeconds` | Pause after draining the node before completing the EC2 Autoscaling lifecycle action. This may be helpful if Pods on the node have Persistent Volume Claims. | -1 |
|
||||
| `kubernetesEventsExtraAnnotations` | A comma-separated list of `key=value` extra annotations to attach to all emitted Kubernetes events (e.g. `first=annotation,sample.annotation/number=two"`). | `""` |
|
||||
| `webhookURL` | Posts event data to URL upon instance interruption action. | `""` |
|
||||
| `webhookURLSecretName` | Pass the webhook URL as a Secret using the key `webhookurl`. | `""` |
|
||||
@ -110,8 +111,10 @@ The configuration in this table applies to AWS Node Termination Handler in queue
|
||||
| `awsRegion` | If specified, use the AWS region for AWS API calls, else NTH will try to find the region through the `AWS_REGION` environment variable, IMDS, or the specified queue URL. | `""` |
|
||||
| `queueURL` | Listens for messages on the specified SQS queue URL. | `""` |
|
||||
| `workers` | The maximum amount of parallel event processors to handle concurrent events. | `10` |
|
||||
| `checkASGTagBeforeDraining` | If `true`, check that the instance is tagged with the `managedAsgTag` before draining the node. If `false`, disables calls ASG API. | `true` |
|
||||
| `managedAsgTag` | The node tag to check if `checkASGTagBeforeDraining` is `true`. | `aws-node-termination-handler/managed` |
|
||||
| `checkTagBeforeDraining` | If `true`, check that the instance is tagged with the `managedTag` before draining the node. | `true` |
|
||||
| `managedTag` | The node tag to check if `checkTagBeforeDraining` is `true`. | `aws-node-termination-handler/managed` |
|
||||
| `checkASGTagBeforeDraining` | [DEPRECATED](Use `checkTagBeforeDraining` instead) If `true`, check that the instance is tagged with the `managedAsgTag` before draining the node. If `false`, disables calls ASG API. | `true` |
|
||||
| `managedAsgTag` | [DEPRECATED](Use `managedTag` instead) The node tag to check if `checkASGTagBeforeDraining` is `true`.
|
||||
| `useProviderId` | If `true`, fetch node name through Kubernetes node spec ProviderID instead of AWS event PrivateDnsHostname. | `false` |
|
||||
|
||||
### IMDS Mode Configuration
|
||||
|
@ -151,6 +151,9 @@ spec:
|
||||
value: "false"
|
||||
- name: UPTIME_FROM_FILE
|
||||
value: {{ .Values.procUptimeFile | quote }}
|
||||
{{- with .Values.extraEnv }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.enablePrometheusServer .Values.enableProbesServer }}
|
||||
ports:
|
||||
{{- if .Values.enableProbesServer }}
|
||||
|
@ -52,7 +52,7 @@ spec:
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: aws-node-termination-handler
|
||||
{{- with .Values.securityContext }}
|
||||
{{- with unset .Values.securityContext "runAsUser" }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
@ -149,6 +149,9 @@ spec:
|
||||
value: {{ .Values.enableRebalanceDraining | quote }}
|
||||
- name: ENABLE_SQS_TERMINATION_DRAINING
|
||||
value: "false"
|
||||
{{- with .Values.extraEnv }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.enablePrometheusServer .Values.enableProbesServer }}
|
||||
ports:
|
||||
{{- if .Values.enableProbesServer }}
|
||||
|
@ -82,10 +82,16 @@ spec:
|
||||
value: {{ .Values.enablePrometheusServer | quote }}
|
||||
- name: PROMETHEUS_SERVER_PORT
|
||||
value: {{ .Values.prometheusServerPort | quote }}
|
||||
# [DEPRECATED] Use CHECK_TAG_BEFORE_DRAINING instead
|
||||
- name: CHECK_ASG_TAG_BEFORE_DRAINING
|
||||
value: {{ .Values.checkASGTagBeforeDraining | quote }}
|
||||
- name: CHECK_TAG_BEFORE_DRAINING
|
||||
value: {{ .Values.checkTagBeforeDraining | quote }}
|
||||
# [DEPRECATED] Use MANAGED_TAG instead
|
||||
- name: MANAGED_ASG_TAG
|
||||
value: {{ .Values.managedAsgTag | quote }}
|
||||
- name: MANAGED_TAG
|
||||
value: {{ .Values.managedTag | quote }}
|
||||
- name: USE_PROVIDER_ID
|
||||
value: {{ .Values.useProviderId | quote }}
|
||||
- name: DRY_RUN
|
||||
@ -106,6 +112,8 @@ spec:
|
||||
value: {{ .Values.nodeTerminationGracePeriod | quote }}
|
||||
- name: EMIT_KUBERNETES_EVENTS
|
||||
value: {{ .Values.emitKubernetesEvents | quote }}
|
||||
- name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS
|
||||
value: {{ .Values.completeLifecycleActionDelaySeconds | quote }}
|
||||
{{- with .Values.kubernetesEventsExtraAnnotations }}
|
||||
- name: KUBERNETES_EVENTS_EXTRA_ANNOTATIONS
|
||||
value: {{ . | quote }}
|
||||
|
@ -100,6 +100,9 @@ nodeTerminationGracePeriod: 120
|
||||
# emitKubernetesEvents If true, Kubernetes events will be emitted when interruption events are received and when actions are taken on Kubernetes nodes. In IMDS Processor mode a default set of annotations with all the node metadata gathered from IMDS will be attached to each event
|
||||
emitKubernetesEvents: false
|
||||
|
||||
# completeLifecycleActionDelaySeconds will pause for the configured duration after draining the node before completing the EC2 Autoscaling lifecycle action. This may be helpful if Pods on the node have Persistent Volume Claims.
|
||||
completeLifecycleActionDelaySeconds: -1
|
||||
|
||||
# kubernetesEventsExtraAnnotations A comma-separated list of key=value extra annotations to attach to all emitted Kubernetes events
|
||||
# Example: "first=annotation,sample.annotation/number=two"
|
||||
kubernetesEventsExtraAnnotations: ""
|
||||
@ -170,13 +173,18 @@ queueURL: ""
|
||||
# The maximum amount of parallel event processors to handle concurrent events
|
||||
workers: 10
|
||||
|
||||
# If true, check that the instance is tagged with "aws-node-termination-handler/managed" as the key before draining the node
|
||||
# If false, disables calls to ASG API.
|
||||
# [DEPRECATED] Use checkTagBeforeDraining instead
|
||||
checkASGTagBeforeDraining: true
|
||||
|
||||
# The tag to ensure is on a node if checkASGTagBeforeDraining is true
|
||||
# If true, check that the instance is tagged with "aws-node-termination-handler/managed" as the key before draining the node
|
||||
checkTagBeforeDraining: true
|
||||
|
||||
# [DEPRECATED] Use managedTag instead
|
||||
managedAsgTag: "aws-node-termination-handler/managed"
|
||||
|
||||
# The tag to ensure is on a node if checkTagBeforeDraining is true
|
||||
managedTag: "aws-node-termination-handler/managed"
|
||||
|
||||
# If true, fetch node name through Kubernetes node spec ProviderID instead of AWS event PrivateDnsHostname.
|
||||
useProviderId: false
|
||||
|
||||
|
@ -33,7 +33,7 @@ aws-node-termination-handler:
|
||||
# tag: v1.14.1
|
||||
|
||||
# -- "aws-node-termination-handler/${ClusterName}"
|
||||
managedAsgTag: "aws-node-termination-handler/managed"
|
||||
managedTag: "aws-node-termination-handler/managed"
|
||||
|
||||
useProviderId: true
|
||||
enableSqsTerminationDraining: true
|
||||
|
@ -26,7 +26,4 @@ dependencies:
|
||||
version: 0.13.7
|
||||
repository: https://metallb.github.io/metallb
|
||||
condition: metallb.enabled
|
||||
- name: calico
|
||||
version: 0.2.2
|
||||
condition: calico.enabled
|
||||
kubeVersion: ">= 1.20.0"
|
||||
kubeVersion: ">= 1.24.0"
|
||||
|
@ -14,11 +14,10 @@ KubeZero umbrella chart for all things network
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
Kubernetes: `>= 1.24.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| | calico | 0.2.2 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.5 |
|
||||
| https://helm.cilium.io/ | cilium | 1.12.3 |
|
||||
| https://metallb.github.io/metallb | metallb | 0.13.7 |
|
||||
@ -27,20 +26,18 @@ Kubernetes: `>= 1.20.0`
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| calico.enabled | bool | `false` | |
|
||||
| cilium.bpf.hostLegacyRouting | bool | `true` | |
|
||||
| cilium.cgroup.autoMount.enabled | bool | `false` | |
|
||||
| cilium.cgroup.hostRoot | string | `"/sys/fs/cgroup"` | |
|
||||
| cilium.cluster.id | int | `240` | |
|
||||
| cilium.cluster.name | string | `"default"` | |
|
||||
| cilium.cni.binPath | string | `"/usr/libexec/cni"` | |
|
||||
| cilium.cni.logFile | string | `"/var/log/cilium-cni.log"` | |
|
||||
| cilium.containerRuntime.integration | string | `"crio"` | |
|
||||
| cilium.enabled | bool | `false` | |
|
||||
| cilium.hubble.enabled | bool | `false` | |
|
||||
| cilium.ipam.operator.clusterPoolIPv4PodCIDRList[0] | string | `"10.240.0.0/16"` | |
|
||||
| cilium.l2NeighDiscovery.enabled | bool | `false` | |
|
||||
| cilium.l7Proxy | bool | `false` | |
|
||||
| cilium.nodePort.enabled | bool | `true` | |
|
||||
| cilium.operator.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
|
||||
| cilium.operator.replicas | int | `1` | |
|
||||
| cilium.operator.tolerations[0].effect | string | `"NoSchedule"` | |
|
||||
@ -58,5 +55,7 @@ Kubernetes: `>= 1.20.0`
|
||||
| metallb.controller.tolerations[1].key | string | `"node-role.kubernetes.io/control-plane"` | |
|
||||
| metallb.enabled | bool | `false` | |
|
||||
| metallb.ipAddressPools | list | `[]` | |
|
||||
| multus.clusterNetwork | string | `"cilium"` | |
|
||||
| multus.defaultNetworks | list | `[]` | |
|
||||
| multus.enabled | bool | `false` | |
|
||||
| multus.tag | string | `"v3.9.2"` | |
|
||||
|
@ -1,18 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: calico
|
||||
description: KubeZero Chart for Calico
|
||||
type: application
|
||||
version: 0.2.2
|
||||
appVersion: v3.16.10
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
- kubezero
|
||||
- calico
|
||||
maintainers:
|
||||
- name: Quarky9
|
||||
dependencies:
|
||||
- name: kubezero-lib
|
||||
version: ">= 0.1.3"
|
||||
repository: https://zero-down-time.github.io/kubezero/
|
||||
kubeVersion: ">= 1.20.0"
|
@ -1,57 +0,0 @@
|
||||
# calico
|
||||
|
||||
![Version: 0.2.2](https://img.shields.io/badge/Version-0.2.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v3.16.5](https://img.shields.io/badge/AppVersion-v3.16.5-informational?style=flat-square)
|
||||
|
||||
KubeZero Chart for Calico
|
||||
|
||||
**Homepage:** <https://kubezero.com>
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Quarky9 | | |
|
||||
|
||||
## Requirements
|
||||
|
||||
Kubernetes: `>= 1.20.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
|
||||
|
||||
## KubeZero default configuration
|
||||
|
||||
## AWS
|
||||
The setup is based on the upstream calico-vxlan config from
|
||||
`https://docs.projectcalico.org/v3.15/manifests/calico-vxlan.yaml`
|
||||
|
||||
### Changes
|
||||
|
||||
- VxLAN set to Always to not expose cluster communication to VPC
|
||||
|
||||
-> EC2 SecurityGroups still apply and only need to allow UDP 4789 for VxLAN traffic
|
||||
-> No need to disable source/destination check on EC2 instances
|
||||
-> Prepared for optional WireGuard encryption for all inter node traffic
|
||||
|
||||
- MTU set to 8941
|
||||
|
||||
- Removed migration init-container
|
||||
|
||||
- Disable BGB and BIRD health checks
|
||||
|
||||
- Set FELIX log level to warning
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| image.tag | string | `""` | |
|
||||
| loglevel | string | `"Warning"` | |
|
||||
| mtu | int | `8941` | |
|
||||
| network | string | `"vxlan"` | |
|
||||
| prometheus | bool | `false` | |
|
||||
|
||||
## Resources
|
||||
|
||||
- Grafana Dashboard: https://grafana.com/grafana/dashboards/12175
|
@ -1,42 +0,0 @@
|
||||
{{ template "chart.header" . }}
|
||||
{{ template "chart.deprecationWarning" . }}
|
||||
|
||||
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
|
||||
|
||||
{{ template "chart.description" . }}
|
||||
|
||||
{{ template "chart.homepageLine" . }}
|
||||
|
||||
{{ template "chart.maintainersSection" . }}
|
||||
|
||||
{{ template "chart.sourcesSection" . }}
|
||||
|
||||
{{ template "chart.requirementsSection" . }}
|
||||
|
||||
## KubeZero default configuration
|
||||
|
||||
## AWS
|
||||
The setup is based on the upstream calico-vxlan config from
|
||||
`https://docs.projectcalico.org/v3.15/manifests/calico-vxlan.yaml`
|
||||
|
||||
### Changes
|
||||
|
||||
- VxLAN set to Always to not expose cluster communication to VPC
|
||||
|
||||
-> EC2 SecurityGroups still apply and only need to allow UDP 4789 for VxLAN traffic
|
||||
-> No need to disable source/destination check on EC2 instances
|
||||
-> Prepared for optional WireGuard encryption for all inter node traffic
|
||||
|
||||
- MTU set to 8941
|
||||
|
||||
- Removed migration init-container
|
||||
|
||||
- Disable BGB and BIRD health checks
|
||||
|
||||
- Set FELIX log level to warning
|
||||
|
||||
{{ template "chart.valuesSection" . }}
|
||||
|
||||
## Resources
|
||||
|
||||
- Grafana Dashboard: https://grafana.com/grafana/dashboards/12175
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,647 +0,0 @@
|
||||
---
|
||||
# Source: calico/templates/calico-config.yaml
|
||||
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Typha is disabled.
|
||||
typha_service_name: "none"
|
||||
# Configure the backend to use.
|
||||
calico_backend: "{{ .Values.network }}"
|
||||
# Configure the MTU to use for workload interfaces and tunnels.
|
||||
# - If Wireguard is enabled, set to your network MTU - 60
|
||||
# - Otherwise, if VXLAN or BPF mode is enabled, set to your network MTU - 50
|
||||
# - Otherwise, if IPIP is enabled, set to your network MTU - 20
|
||||
# - Otherwise, if not using any encapsulation, set to your network MTU.
|
||||
veth_mtu: "{{ .Values.mtu }}"
|
||||
|
||||
# The CNI network configuration to install on each node. The special
|
||||
# values in this config will be automatically populated.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.1",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"log_level": "info",
|
||||
"log_file_path": "/var/log/calico/cni/cni.log",
|
||||
"datastore_type": "kubernetes",
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
"mtu": __CNI_MTU__,
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"snat": true,
|
||||
"capabilities": {"portMappings": true}
|
||||
},
|
||||
{
|
||||
"type": "bandwidth",
|
||||
"capabilities": {"bandwidth": true}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-kube-controllers-rbac.yaml
|
||||
|
||||
# Include a clusterrole for the kube-controllers component,
|
||||
# and bind it to the calico-kube-controllers serviceaccount.
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
rules:
|
||||
# Nodes are watched to monitor for deletions.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- get
|
||||
# Pods are queried to check for existence.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
# IPAM resources are manipulated when nodes are deleted.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ippools
|
||||
verbs:
|
||||
- list
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
# kube-controllers manages hostendpoints.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- hostendpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
# Needs access to update clusterinformations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
# KubeControllersConfiguration is where it gets its config
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- kubecontrollersconfigurations
|
||||
verbs:
|
||||
# read its own config
|
||||
- get
|
||||
# create a default if none exists
|
||||
- create
|
||||
# update status
|
||||
- update
|
||||
# watch for changes
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-kube-controllers
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
---
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-node-rbac.yaml
|
||||
# Include a clusterrole for the calico-node DaemonSet,
|
||||
# and bind it to the calico-node serviceaccount.
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-node
|
||||
rules:
|
||||
# The CNI plugin needs to get pods, nodes, and namespaces.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
verbs:
|
||||
# Used to discover service IPs for advertisement.
|
||||
- watch
|
||||
- list
|
||||
# Used to discover Typhas.
|
||||
- get
|
||||
# Pod CIDR auto-detection on kubeadm needs access to config maps.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
# Needed for clearing NodeNetworkUnavailable flag.
|
||||
- patch
|
||||
# Calico stores some configuration information in node annotations.
|
||||
- update
|
||||
# Watch for changes to Kubernetes NetworkPolicies.
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
# Used by Calico for policy information.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
# The CNI plugin patches pods/status.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
# Calico monitors various CRDs for config.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- ipamblocks
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- networksets
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# Calico must create and update some CRDs on startup.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ippools
|
||||
- felixconfigurations
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
# Calico stores some configuration information on the node.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# These permissions are only required for upgrade from v2.6, and can
|
||||
# be removed after upgrade or on fresh installations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- bgpconfigurations
|
||||
- bgppeers
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
# These permissions are required for Calico CNI to perform IPAM allocations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ipamconfigs
|
||||
verbs:
|
||||
- get
|
||||
# Block affinities must also be watchable by confd for route aggregation.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- watch
|
||||
# The Calico IPAM migration needs to get daemonsets. These permissions can be
|
||||
# removed if not upgrading from an installation using host-local IPAM.
|
||||
- apiGroups: ["apps"]
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-node
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-node
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-node.yaml
|
||||
# This manifest installs the calico-node container, as well
|
||||
# as the CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
# Make sure calico-node gets scheduled on all nodes.
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
serviceAccountName: calico-node
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
priorityClassName: system-node-critical
|
||||
initContainers:
|
||||
# This container installs the CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: calico/cni:{{ default .Chart.AppVersion .Values.image.tag }}
|
||||
command: ["/opt/cni/bin/install"]
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-calico.conflist"
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: cni_network_config
|
||||
# Set the hostname based on the k8s node name.
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# CNI MTU Config variable
|
||||
- name: CNI_MTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Prevents the container from sleeping forever.
|
||||
- name: SLEEP
|
||||
value: "false"
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
|
||||
# to communicate with Felix over the Policy Sync API.
|
||||
- name: flexvol-driver
|
||||
image: calico/pod2daemon-flexvol:{{ default .Chart.AppVersion .Values.image.tag }}
|
||||
volumeMounts:
|
||||
- name: flexvol-driver-host
|
||||
mountPath: /host/driver
|
||||
securityContext:
|
||||
privileged: true
|
||||
containers:
|
||||
# Runs calico-node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: calico/node:{{ default .Chart.AppVersion .Values.image.tag }}
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
|
||||
name: kubernetes-services-endpoint
|
||||
optional: true
|
||||
env:
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
# Wait for the datastore.
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
# Set based on the k8s node name.
|
||||
- name: NODENAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Choose the backend to use.
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
# Cluster type to identify the deployment type
|
||||
- name: CLUSTER_TYPE
|
||||
value: "k8s,kubeadm"
|
||||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: "autodetect"
|
||||
- name: IP_AUTODETECTION_METHOD
|
||||
value: "interface=eth.*"
|
||||
# Enable IPIP
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "Never"
|
||||
# Enable or Disable VXLAN on the default IP pool.
|
||||
- name: CALICO_IPV4POOL_VXLAN
|
||||
value: "Always"
|
||||
# Set MTU for tunnel device used if ipip is enabled
|
||||
- name: FELIX_IPINIPMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Set MTU for the VXLAN tunnel device.
|
||||
- name: FELIX_VXLANMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Set MTU for the Wireguard tunnel device.
|
||||
- name: FELIX_WIREGUARDMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Disable AWS source-destination check on nodes.
|
||||
- name: FELIX_AWSSRCDSTCHECK
|
||||
value: DoNothing
|
||||
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
|
||||
# chosen from this range. Changing this value after installation will have
|
||||
# no effect. This should fall within `--cluster-cidr`.
|
||||
# - name: CALICO_IPV4POOL_CIDR
|
||||
# value: "192.168.0.0/16"
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
# Disable IPv6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
# Set Felix logging to "info"
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "{{ .Values.loglevel }}"
|
||||
- name: FELIX_LOGSEVERITYFILE
|
||||
value: "{{ .Values.loglevel }}"
|
||||
- name: FELIX_LOGSEVERITYSYS
|
||||
value: ""
|
||||
- name: FELIX_PROMETHEUSGOMETRICSENABLED
|
||||
value: "{{ .Values.prometheus }}"
|
||||
- name: FELIX_PROMETHEUSMETRICSENABLED
|
||||
value: "{{ .Values.prometheus }}"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-live
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-ready
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
readOnly: false
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
- name: policysync
|
||||
mountPath: /var/run/nodeagent
|
||||
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
|
||||
# parent directory.
|
||||
- name: sysfs
|
||||
mountPath: /sys/fs/
|
||||
# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.
|
||||
# If the host is known to mount that filesystem already then Bidirectional can be omitted.
|
||||
# mountPropagation: Bidirectional
|
||||
volumes:
|
||||
# Used by calico-node.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
- name: sysfs
|
||||
hostPath:
|
||||
path: /sys/fs/
|
||||
type: DirectoryOrCreate
|
||||
# Used to install CNI.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /usr/libexec/cni
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
# Mount in the directory for host-local IPAM allocations. This is
|
||||
# used when upgrading from host-local to calico-ipam, and can be removed
|
||||
# if not using the upgrade-ipam init container.
|
||||
- name: host-local-net-dir
|
||||
hostPath:
|
||||
path: /var/lib/cni/networks
|
||||
# Used to create per-pod Unix Domain Sockets
|
||||
- name: policysync
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: /var/run/nodeagent
|
||||
# Used to install Flex Volume Driver
|
||||
- name: flexvol-driver-host
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-kube-controllers.yaml
|
||||
# See https://github.com/projectcalico/kube-controllers
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
spec:
|
||||
# The controllers can only have a single active instance.
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-kube-controllers
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
tolerations:
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
serviceAccountName: calico-kube-controllers
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: calico/kube-controllers:{{ default .Chart.AppVersion .Values.image.tag }}
|
||||
env:
|
||||
# Choose which controllers to run.
|
||||
- name: ENABLED_CONTROLLERS
|
||||
value: node
|
||||
- name: DATASTORE_TYPE
|
||||
value: kubernetes
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /usr/bin/check-status
|
||||
- -r
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-etcd-secrets.yaml
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-typha.yaml
|
||||
|
||||
---
|
||||
# Source: calico/templates/configure-canal.yaml
|
||||
|
||||
|
@ -1,18 +0,0 @@
|
||||
{{- if .Values.prometheus }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
name: calico-node
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9091
|
||||
protocol: TCP
|
||||
targetPort: 9091
|
||||
selector:
|
||||
k8s-app: calico-node
|
||||
type: ClusterIP
|
||||
{{- end }}
|
@ -1,19 +0,0 @@
|
||||
{{- if .Values.prometheus }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: calico-node
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
release: metrics
|
||||
spec:
|
||||
jobLabel: k8s-app
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- kube-system
|
||||
endpoints:
|
||||
- port: metrics
|
||||
{{- end }}
|
@ -1,34 +0,0 @@
|
||||
# Once pod is running:
|
||||
# kubectl -n NAME-SPACE-TO-TEST exec -it pod/POD_NAME /bin/sh
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: netshoot
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: netshoot
|
||||
spec:
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: netshoot
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: netshoot
|
||||
spec:
|
||||
containers:
|
||||
- name: netshoot
|
||||
image: nicolaka/netshoot
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /bin/sleep
|
||||
args:
|
||||
- "3600"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- topologyKey: "kubernetes.io/hostname"
|
@ -1,10 +0,0 @@
|
||||
image:
|
||||
tag: ""
|
||||
|
||||
network: vxlan
|
||||
|
||||
mtu: 8941
|
||||
|
||||
loglevel: Warning
|
||||
|
||||
prometheus: false
|
@ -1,25 +0,0 @@
|
||||
{{- if and .Values.multus.enabled .Values.calico.enabled }}
|
||||
apiVersion: k8s.cni.cncf.io/v1
|
||||
kind: NetworkAttachmentDefinition
|
||||
metadata:
|
||||
name: calico
|
||||
namespace: kube-system
|
||||
spec:
|
||||
config: '{
|
||||
"type": "calico",
|
||||
"cniVersion": "0.3.1",
|
||||
"log_level": "info",
|
||||
"log_file_path": "/var/log/calico/cni/cni.log",
|
||||
"datastore_type": "kubernetes",
|
||||
"mtu": {{ .Values.calico.mtu }},
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
|
||||
}
|
||||
}'
|
||||
{{- end }}
|
@ -20,10 +20,9 @@ multus:
|
||||
enabled: false
|
||||
tag: "v3.9.2"
|
||||
|
||||
#clusterNetwork: "calico"
|
||||
#defaultNetworks: []
|
||||
# - "cilium"
|
||||
#readinessindicatorfile: "/etc/cni/net.d/10-calico.conflist"
|
||||
clusterNetwork: "cilium"
|
||||
defaultNetworks: []
|
||||
readinessindicatorfile: "/etc/cni/net.d/05-cilium.conf"
|
||||
|
||||
cilium:
|
||||
enabled: false
|
||||
@ -37,9 +36,9 @@ cilium:
|
||||
|
||||
cni:
|
||||
binPath: "/usr/libexec/cni"
|
||||
logFile: /var/log/cilium-cni.log
|
||||
#-- Ensure this is false if multus is enabled
|
||||
# exclusive: false
|
||||
# chainingMode: generic-veth
|
||||
exclusive: false
|
||||
|
||||
bpf:
|
||||
hostLegacyRouting: true
|
||||
@ -48,7 +47,7 @@ cilium:
|
||||
cluster:
|
||||
# This should match the second octet of clusterPoolIPv4PodCIDRList
|
||||
# to prevent IP space overlap and easy tracking
|
||||
# use 240 as default, less likely to clash with 1, do NOT use 244 used by calico until 1.25
|
||||
# use 240 as default, less likely to clash with 1
|
||||
id: 240
|
||||
name: default
|
||||
|
||||
@ -57,14 +56,10 @@ cilium:
|
||||
clusterPoolIPv4PodCIDRList:
|
||||
- 10.240.0.0/16
|
||||
|
||||
# Should be handled by multus
|
||||
nodePort:
|
||||
enabled: true
|
||||
|
||||
# Keep it simple for now
|
||||
# nodePort:
|
||||
# enabled: true
|
||||
l7Proxy: false
|
||||
l2NeighDiscovery:
|
||||
enabled: false
|
||||
|
||||
cgroup:
|
||||
autoMount:
|
||||
@ -89,7 +84,3 @@ cilium:
|
||||
|
||||
hubble:
|
||||
enabled: false
|
||||
|
||||
# Legacy / Testing
|
||||
calico:
|
||||
enabled: false
|
||||
|
@ -42,7 +42,7 @@ aws-node-termination-handler:
|
||||
{{- if .Values.global.aws }}
|
||||
# AWS
|
||||
queueURL: "https://sqs.{{ .Values.global.aws.region }}.amazonaws.com/{{ .Values.global.aws.accountId }}/{{ .Values.global.clusterName }}_Nth"
|
||||
managedAsgTag: "aws-node-termination-handler/{{ .Values.global.clusterName }}"
|
||||
managedTag: "aws-node-termination-handler/{{ .Values.global.clusterName }}"
|
||||
extraEnv:
|
||||
- name: AWS_ROLE_ARN
|
||||
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.awsNth"
|
||||
@ -80,7 +80,7 @@ external-dns:
|
||||
{{- end }}
|
||||
|
||||
cluster-autoscaler:
|
||||
enabled: {{ default "false" (index .Values "addons" "cluster-autoscaler" "enabled") }}
|
||||
enabled: {{ ternary "true" "false" (or (hasKey .Values.global "aws") (index .Values "addons" "cluster-autoscaler" "enabled")) }}
|
||||
|
||||
{{- with omit (index .Values "addons" "cluster-autoscaler") "enabled" }}
|
||||
{{- toYaml . | nindent 2 }}
|
||||
|
@ -1,6 +1,9 @@
|
||||
{{- define "network-values" }}
|
||||
|
||||
# since 1.23
|
||||
multus:
|
||||
enabled: true
|
||||
clusterNetwork: "cilium"
|
||||
|
||||
cilium:
|
||||
enabled: true
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user