Latest Calico, add QUICKSTART draft

This commit is contained in:
Stefan Reimer 2020-06-03 15:45:01 +01:00
parent cb1c37b992
commit 3d84ca4fd9
4 changed files with 251 additions and 6 deletions

View File

@ -9,4 +9,4 @@ See eg: `https://github.com/kubernetes-sigs/kustomize/issues/1351`
## Upgrade ## Upgrade
See: https://docs.projectcalico.org/maintenance/kubernetes-upgrade See: https://docs.projectcalico.org/maintenance/kubernetes-upgrade
`curl https://docs.projectcalico.org/manifests/canal.yaml -O` `curl https://docs.projectcalico.org/manifests/canal.yaml -O && patch < remove-namespace.patch`

View File

@ -150,6 +150,8 @@ spec:
kind: GlobalNetworkPolicy kind: GlobalNetworkPolicy
plural: globalnetworkpolicies plural: globalnetworkpolicies
singular: globalnetworkpolicy singular: globalnetworkpolicy
shortNames:
- gnp
--- ---
apiVersion: apiextensions.k8s.io/v1beta1 apiVersion: apiextensions.k8s.io/v1beta1
@ -238,6 +240,19 @@ spec:
--- ---
apiVersion: apiextensions.k8s.io/v1beta1 apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata:
name: kubecontrollersconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: KubeControllersConfiguration
plural: kubecontrollersconfigurations
singular: kubecontrollersconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata: metadata:
name: networkpolicies.crd.projectcalico.org name: networkpolicies.crd.projectcalico.org
spec: spec:
@ -267,6 +282,89 @@ spec:
--- ---
# Source: calico/templates/rbac.yaml # Source: calico/templates/rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are queried to check for existence.
- apiGroups: [""]
resources:
- pods
verbs:
- get
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# kube-controllers manages hostendpoints.
- apiGroups: ["crd.projectcalico.org"]
resources:
- hostendpoints
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
# KubeControllersConfiguration is where it gets its config
- apiGroups: ["crd.projectcalico.org"]
resources:
- kubecontrollersconfigurations
verbs:
# read its own config
- get
# create a default if none exists
- create
# update status
- update
# watch for changes
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet, # Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount. # and bind it to the calico-node serviceaccount.
kind: ClusterRole kind: ClusterRole
@ -479,7 +577,7 @@ spec:
# This container installs the CNI binaries # This container installs the CNI binaries
# and CNI network config file on each node. # and CNI network config file on each node.
- name: install-cni - name: install-cni
image: calico/cni:v3.13.3 image: calico/cni:v3.14.1
command: ["/install-cni.sh"] command: ["/install-cni.sh"]
env: env:
# Name of the CNI config file to create. # Name of the CNI config file to create.
@ -515,7 +613,7 @@ spec:
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API. # to communicate with Felix over the Policy Sync API.
- name: flexvol-driver - name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.13.3 image: calico/pod2daemon-flexvol:v3.14.1
volumeMounts: volumeMounts:
- name: flexvol-driver-host - name: flexvol-driver-host
mountPath: /host/driver mountPath: /host/driver
@ -526,7 +624,7 @@ spec:
# container programs network policy and routes on each # container programs network policy and routes on each
# host. # host.
- name: calico-node - name: calico-node
image: calico/node:v3.13.3 image: calico/node:v3.14.1
env: env:
# Use Kubernetes API as the backing datastore. # Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE - name: DATASTORE_TYPE
@ -683,10 +781,64 @@ metadata:
name: canal name: canal
--- ---
# Source: calico/templates/calico-etcd-secrets.yaml # Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
labels:
k8s-app: calico-kube-controllers
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:v3.14.1
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
--- ---
# Source: calico/templates/calico-kube-controllers.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
---
# Source: calico/templates/calico-etcd-secrets.yaml
--- ---
# Source: calico/templates/calico-typha.yaml # Source: calico/templates/calico-typha.yaml

View File

@ -0,0 +1,50 @@
--- canal.yaml.orig 2020-06-03 15:39:41.972295775 +0100
+++ canal.yaml 2020-06-03 15:39:59.718477177 +0100
@@ -5,7 +5,6 @@
apiVersion: v1
metadata:
name: canal-config
- namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
@@ -536,7 +535,6 @@
apiVersion: apps/v1
metadata:
name: canal
- namespace: kube-system
labels:
k8s-app: canal
spec:
@@ -781,7 +779,6 @@
kind: ServiceAccount
metadata:
name: canal
- namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
@@ -790,7 +787,6 @@
kind: Deployment
metadata:
name: calico-kube-controllers
- namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
@@ -804,7 +800,6 @@
template:
metadata:
name: calico-kube-controllers
- namespace: kube-system
labels:
k8s-app: calico-kube-controllers
annotations:
@@ -841,7 +836,6 @@
kind: ServiceAccount
metadata:
name: calico-kube-controllers
- namespace: kube-system
---
# Source: calico/templates/calico-etcd-secrets.yaml

View File

@ -0,0 +1,43 @@
# Quickstart
---
# CloudBender
## Prepare Config
- check config/kube/kube-control-plane.yaml
- check config/kube/kube-workers.yaml
## Deploy Control Plane
- cloudbender sync kube-control-plane
## Get kubectl config
- get admin.conf from S3 and store in your local `~/.kube` folder
## Verify controller nodes
- Verify all controller nodes have the expected version and are *Ready*, eg via: `kubectl get nodes`
## Deploy Worker group
- cloudbender sync kube-workers
---
# KubeZero
## Prepare Config
- check values.yaml
## Deploy KubeZero Helm chart
`./deploy.sh`
## Verify ArgoCD
At this stage we there is no support for any kind of Ingress yet. Therefore in order to reach the Argo API you port forwarding.
`kubectl port-forward svc/argocd-server -n argocd 8080:443`
Next we to download the argo-cd cli, see https://argoproj.github.io/argo-cd/cli_installation/
Finally login into argo-cd via `argocd login localhost:8080` using the *admin* user and the password set in values.yaml earlier.
# Demo / own apps
- Add your own application to ArgoCD via the cli