From 3d84ca4fd93445489b4a2bd9752f8d70318a2640 Mon Sep 17 00:00:00 2001 From: Stefan Reimer Date: Wed, 3 Jun 2020 15:45:01 +0100 Subject: [PATCH] Latest Calico, add QUICKSTART draft --- artifacts/kubezero-calico/README.md | 2 +- artifacts/kubezero-calico/canal.yaml | 162 +++++++++++++++++- .../kubezero-calico/remove-namespace.patch | 50 ++++++ charts/kubezero/Quickstart.md | 43 +++++ 4 files changed, 251 insertions(+), 6 deletions(-) create mode 100644 artifacts/kubezero-calico/remove-namespace.patch create mode 100644 charts/kubezero/Quickstart.md diff --git a/artifacts/kubezero-calico/README.md b/artifacts/kubezero-calico/README.md index ba64f7ec..288574d1 100644 --- a/artifacts/kubezero-calico/README.md +++ b/artifacts/kubezero-calico/README.md @@ -9,4 +9,4 @@ See eg: `https://github.com/kubernetes-sigs/kustomize/issues/1351` ## Upgrade See: https://docs.projectcalico.org/maintenance/kubernetes-upgrade -`curl https://docs.projectcalico.org/manifests/canal.yaml -O` +`curl https://docs.projectcalico.org/manifests/canal.yaml -O && patch < remove-namespace.patch` diff --git a/artifacts/kubezero-calico/canal.yaml b/artifacts/kubezero-calico/canal.yaml index ef79974e..6adfdb5a 100644 --- a/artifacts/kubezero-calico/canal.yaml +++ b/artifacts/kubezero-calico/canal.yaml @@ -150,6 +150,8 @@ spec: kind: GlobalNetworkPolicy plural: globalnetworkpolicies singular: globalnetworkpolicy + shortNames: + - gnp --- apiVersion: apiextensions.k8s.io/v1beta1 @@ -238,6 +240,19 @@ spec: --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition +metadata: + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: KubeControllersConfiguration + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: name: networkpolicies.crd.projectcalico.org spec: @@ -267,6 +282,89 @@ spec: --- # Source: calico/templates/rbac.yaml +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are queried to check for existence. + - apiGroups: [""] + resources: + - pods + verbs: + - get + # IPAM resources are manipulated when nodes are deleted. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - create + - update + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- # Include a clusterrole for the calico-node DaemonSet, # and bind it to the calico-node serviceaccount. kind: ClusterRole @@ -479,7 +577,7 @@ spec: # This container installs the CNI binaries # and CNI network config file on each node. - name: install-cni - image: calico/cni:v3.13.3 + image: calico/cni:v3.14.1 command: ["/install-cni.sh"] env: # Name of the CNI config file to create. @@ -515,7 +613,7 @@ spec: # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes # to communicate with Felix over the Policy Sync API. - name: flexvol-driver - image: calico/pod2daemon-flexvol:v3.13.3 + image: calico/pod2daemon-flexvol:v3.14.1 volumeMounts: - name: flexvol-driver-host mountPath: /host/driver @@ -526,7 +624,7 @@ spec: # container programs network policy and routes on each # host. - name: calico-node - image: calico/node:v3.13.3 + image: calico/node:v3.14.1 env: # Use Kubernetes API as the backing datastore. - name: DATASTORE_TYPE @@ -683,10 +781,64 @@ metadata: name: canal --- -# Source: calico/templates/calico-etcd-secrets.yaml +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + labels: + k8s-app: calico-kube-controllers + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + kubernetes.io/os: linux + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: calico/kube-controllers:v3.14.1 + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r --- -# Source: calico/templates/calico-kube-controllers.yaml + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + +--- +# Source: calico/templates/calico-etcd-secrets.yaml --- # Source: calico/templates/calico-typha.yaml diff --git a/artifacts/kubezero-calico/remove-namespace.patch b/artifacts/kubezero-calico/remove-namespace.patch new file mode 100644 index 00000000..84b7822d --- /dev/null +++ b/artifacts/kubezero-calico/remove-namespace.patch @@ -0,0 +1,50 @@ +--- canal.yaml.orig 2020-06-03 15:39:41.972295775 +0100 ++++ canal.yaml 2020-06-03 15:39:59.718477177 +0100 +@@ -5,7 +5,6 @@ + apiVersion: v1 + metadata: + name: canal-config +- namespace: kube-system + data: + # Typha is disabled. + typha_service_name: "none" +@@ -536,7 +535,6 @@ + apiVersion: apps/v1 + metadata: + name: canal +- namespace: kube-system + labels: + k8s-app: canal + spec: +@@ -781,7 +779,6 @@ + kind: ServiceAccount + metadata: + name: canal +- namespace: kube-system + + --- + # Source: calico/templates/calico-kube-controllers.yaml +@@ -790,7 +787,6 @@ + kind: Deployment + metadata: + name: calico-kube-controllers +- namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: +@@ -804,7 +800,6 @@ + template: + metadata: + name: calico-kube-controllers +- namespace: kube-system + labels: + k8s-app: calico-kube-controllers + annotations: +@@ -841,7 +836,6 @@ + kind: ServiceAccount + metadata: + name: calico-kube-controllers +- namespace: kube-system + + --- + # Source: calico/templates/calico-etcd-secrets.yaml diff --git a/charts/kubezero/Quickstart.md b/charts/kubezero/Quickstart.md new file mode 100644 index 00000000..67f5bafa --- /dev/null +++ b/charts/kubezero/Quickstart.md @@ -0,0 +1,43 @@ +# Quickstart +--- + +# CloudBender + +## Prepare Config +- check config/kube/kube-control-plane.yaml +- check config/kube/kube-workers.yaml + + +## Deploy Control Plane +- cloudbender sync kube-control-plane + +## Get kubectl config +- get admin.conf from S3 and store in your local `~/.kube` folder + +## Verify controller nodes +- Verify all controller nodes have the expected version and are *Ready*, eg via: `kubectl get nodes` + +## Deploy Worker group +- cloudbender sync kube-workers + + +--- +# KubeZero + +## Prepare Config +- check values.yaml + +## Deploy KubeZero Helm chart +`./deploy.sh` + + +## Verify ArgoCD +At this stage we there is no support for any kind of Ingress yet. Therefore in order to reach the Argo API you port forwarding. +`kubectl port-forward svc/argocd-server -n argocd 8080:443` + +Next we to download the argo-cd cli, see https://argoproj.github.io/argo-cd/cli_installation/ + +Finally login into argo-cd via `argocd login localhost:8080` using the *admin* user and the password set in values.yaml earlier. + +# Demo / own apps +- Add your own application to ArgoCD via the cli \ No newline at end of file