Bugfix release for Calico, README updates
This commit is contained in:
parent
64dbb4e4a6
commit
afe2e4a34c
@ -2,7 +2,7 @@ kubezero-aws-ebs-csi-driver
|
||||
===========================
|
||||
KubeZero Umbrella Chart for aws-ebs-csi-driver
|
||||
|
||||
Current chart version is `0.2.0`
|
||||
Current chart version is `0.3.0`
|
||||
|
||||
Source code can be found [here](https://kubezero.com)
|
||||
|
||||
@ -10,7 +10,7 @@ Source code can be found [here](https://kubezero.com)
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
|
||||
|
||||
## IAM Role
|
||||
If you use kiam or kube2iam and restrict access on nodes running this controller please adjust:
|
||||
|
@ -2,7 +2,7 @@ kubezero-aws-efs-csi-driver
|
||||
===========================
|
||||
KubeZero Umbrella Chart for aws-efs-csi-driver
|
||||
|
||||
Current chart version is `0.1.0`
|
||||
Current chart version is `0.1.1`
|
||||
|
||||
Source code can be found [here](https://kubezero.com)
|
||||
|
||||
@ -10,7 +10,7 @@ Source code can be found [here](https://kubezero.com)
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
|
||||
|
||||
## Storage Class
|
||||
Optionally creates the *efs-cs* storage class.
|
||||
|
@ -2,8 +2,8 @@ apiVersion: v2
|
||||
name: kubezero-calico
|
||||
description: KubeZero Umbrella Chart for Calico
|
||||
type: application
|
||||
version: 0.1.8
|
||||
appVersion: 3.15
|
||||
version: 0.1.9
|
||||
appVersion: 3.15.1
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -2,7 +2,7 @@ kubezero-calico
|
||||
===============
|
||||
KubeZero Umbrella Chart for Calico
|
||||
|
||||
Current chart version is `0.1.7`
|
||||
Current chart version is `0.1.9`
|
||||
|
||||
Source code can be found [here](https://kubezero.com)
|
||||
|
||||
@ -10,7 +10,7 @@ Source code can be found [here](https://kubezero.com)
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
|
||||
|
||||
## KubeZero default configuration
|
||||
|
||||
|
@ -322,10 +322,6 @@ spec:
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
{{- if .Values.migration }}
|
||||
# Only run Calico on nodes that have been migrated.
|
||||
projectcalico.org/node-network-during-migration: calico
|
||||
{{- end }}
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
# Make sure calico-node gets scheduled on all nodes.
|
||||
@ -345,7 +341,7 @@ spec:
|
||||
# This container installs the CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: calico/cni:v3.15.0
|
||||
image: calico/cni:v3.15.1
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
@ -381,7 +377,7 @@ spec:
|
||||
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
|
||||
# to communicate with Felix over the Policy Sync API.
|
||||
- name: flexvol-driver
|
||||
image: calico/pod2daemon-flexvol:v3.15.0
|
||||
image: calico/pod2daemon-flexvol:v3.15.1
|
||||
volumeMounts:
|
||||
- name: flexvol-driver-host
|
||||
mountPath: /host/driver
|
||||
@ -392,7 +388,7 @@ spec:
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: calico/node:v3.15.0
|
||||
image: calico/node:v3.15.1
|
||||
env:
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
@ -594,7 +590,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: calico/kube-controllers:v3.15.0
|
||||
image: calico/kube-controllers:v3.15.1
|
||||
env:
|
||||
# Choose which controllers to run.
|
||||
- name: ENABLED_CONTROLLERS
|
||||
|
@ -5,7 +5,7 @@ metadata:
|
||||
name: calico-node
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
prometheus: kube-prometheus
|
||||
release: metrics
|
||||
spec:
|
||||
jobLabel: k8s-app
|
||||
selector:
|
||||
|
@ -2,7 +2,7 @@ kubezero-cert-manager
|
||||
=====================
|
||||
KubeZero Umbrella Chart for cert-manager
|
||||
|
||||
Current chart version is `0.3.5`
|
||||
Current chart version is `0.3.6`
|
||||
|
||||
Source code can be found [here](https://kubezero.com)
|
||||
|
||||
@ -11,7 +11,7 @@ Source code can be found [here](https://kubezero.com)
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://charts.jetstack.io | cert-manager | 0.15.1 |
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
|
||||
|
||||
## AWS - IAM Role
|
||||
If you use kiam or kube2iam and restrict access on nodes running cert-manager please adjust:
|
||||
|
@ -5,7 +5,7 @@ KubeZero Umbrella Chart for Istio
|
||||
Installs Istio Operator and KubeZero Istio profile
|
||||
|
||||
|
||||
Current chart version is `0.2.1`
|
||||
Current chart version is `0.2.3`
|
||||
|
||||
Source code can be found [here](https://kubezero.com)
|
||||
|
||||
@ -24,7 +24,8 @@ Source code can be found [here](https://kubezero.com)
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| ingress.autoscaleEnabled | bool | `false` | |
|
||||
| ingress.private | bool | `true` | |
|
||||
| ingress.private.enabled | bool | `true` | |
|
||||
| ingress.private.nodeSelector | string | `"31080_31443_30671_30672_31224"` | |
|
||||
| ingress.replicaCount | int | `2` | |
|
||||
| ingress.type | string | `"NodePort"` | |
|
||||
| istio-operator.hub | string | `"docker.io/istio"` | |
|
||||
|
@ -2,7 +2,7 @@ kubezero-kiam
|
||||
=============
|
||||
KubeZero Umbrella Chart for Kiam
|
||||
|
||||
Current chart version is `0.2.6`
|
||||
Current chart version is `0.2.7`
|
||||
|
||||
Source code can be found [here](https://kubezero.com)
|
||||
|
||||
@ -11,7 +11,7 @@ Source code can be found [here](https://kubezero.com)
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://uswitch.github.io/kiam-helm-charts/charts/ | kiam | 5.8.1 |
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
|
||||
|
||||
## KubeZero default configuration
|
||||
We run agents on the controllers as well, so we force eg. ebs csi controllers and others to assume roles etc.
|
||||
|
@ -4,7 +4,7 @@ KubeZero Umbrella Chart for local-static-provisioner
|
||||
|
||||
Provides persistent volumes backed by local volumes, eg. additional SSDs or spindles.
|
||||
|
||||
Current chart version is `0.0.1`
|
||||
Current chart version is `0.1.0`
|
||||
|
||||
Source code can be found [here](https://kubezero.com)
|
||||
|
||||
@ -12,7 +12,7 @@ Source code can be found [here](https://kubezero.com)
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
|
||||
|
||||
## KubeZero default configuration
|
||||
|
||||
|
@ -2,7 +2,7 @@ kubezero-logging
|
||||
================
|
||||
KubeZero Umbrella Chart for complete EFK stack
|
||||
|
||||
Current chart version is `0.0.1`
|
||||
Current chart version is `0.0.2`
|
||||
|
||||
Source code can be found [here](https://kubezero.com)
|
||||
|
||||
@ -16,7 +16,9 @@ Source code can be found [here](https://kubezero.com)
|
||||
### ECK
|
||||
- Operator mapped to controller nodes
|
||||
|
||||
### ElasticSearch
|
||||
### ES
|
||||
|
||||
- SSL disabled ( Todo: provide cluster certs and setup Kibana/Fluentd to use https incl. client certs )
|
||||
|
||||
- Installed Plugins:
|
||||
- repository-s3
|
||||
@ -24,22 +26,32 @@ Source code can be found [here](https://kubezero.com)
|
||||
|
||||
- [Cross AZ Zone awareness](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-advanced-node-scheduling.html#k8s-availability-zone-awareness) is implemented via nodeSets
|
||||
|
||||
### Kibana
|
||||
|
||||
- increased timeout to ES to 3 minutes
|
||||
|
||||
|
||||
## Manual tasks ATM
|
||||
|
||||
- Install index template
|
||||
- setup Kibana
|
||||
- create `logstash-*` Index Pattern
|
||||
|
||||
|
||||
## Chart Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| es.replicas | int | `2` | |
|
||||
| es.storage.class | string | `"local-sc-xfs"` | |
|
||||
| es.storage.size | string | `"16Gi"` | |
|
||||
| fullnameOverride | string | `"logging"` | |
|
||||
| kibana.replicas | int | `1` | |
|
||||
| version | string | `"7.6.0"` | |
|
||||
| es.elastic_password | string | `""` | |
|
||||
| es.nodeSets | list | `[]` | |
|
||||
| es.prometheus | bool | `false` | |
|
||||
| es.s3Snapshot.enabled | bool | `false` | |
|
||||
| es.s3Snapshot.iamrole | string | `""` | |
|
||||
| kibana.count | int | `1` | |
|
||||
| kibana.istio.enabled | bool | `false` | |
|
||||
| kibana.istio.gateway | string | `"istio-system/ingressgateway"` | |
|
||||
| kibana.istio.url | string | `""` | |
|
||||
| version | string | `"7.8.1"` | |
|
||||
|
||||
## Resources:
|
||||
|
||||
|
@ -2,7 +2,7 @@ kubezero-metrics
|
||||
================
|
||||
KubeZero Umbrella Chart for prometheus-operator
|
||||
|
||||
Current chart version is `0.1.2`
|
||||
Current chart version is `0.1.3`
|
||||
|
||||
Source code can be found [here](https://kubezero.com)
|
||||
|
||||
@ -87,6 +87,7 @@ Source code can be found [here](https://kubezero.com)
|
||||
| prometheus-operator.prometheusOperator.enabled | bool | `true` | |
|
||||
| prometheus-operator.prometheusOperator.manageCrds | bool | `false` | |
|
||||
| prometheus-operator.prometheusOperator.namespaces.additional[0] | string | `"kube-system"` | |
|
||||
| prometheus-operator.prometheusOperator.namespaces.additional[1] | string | `"logging"` | |
|
||||
| prometheus-operator.prometheusOperator.namespaces.releaseNamespace | bool | `true` | |
|
||||
| prometheus-operator.prometheusOperator.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
|
||||
| prometheus-operator.prometheusOperator.tlsProxy.enabled | bool | `false` | |
|
||||
|
@ -2,7 +2,7 @@ kubezero
|
||||
========
|
||||
KubeZero ArgoCD Application - Root App of Apps chart of KubeZero
|
||||
|
||||
Current chart version is `0.4.1`
|
||||
Current chart version is `0.4.3`
|
||||
|
||||
Source code can be found [here](https://kubezero.com)
|
||||
|
||||
@ -10,7 +10,7 @@ Source code can be found [here](https://kubezero.com)
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.1 |
|
||||
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
|
||||
|
||||
## Chart Values
|
||||
|
||||
@ -27,5 +27,6 @@ Source code can be found [here](https://kubezero.com)
|
||||
| istio.enabled | bool | `false` | |
|
||||
| kiam.enabled | bool | `false` | |
|
||||
| local-volume-provisioner.enabled | bool | `false` | |
|
||||
| logging.enabled | bool | `false` | |
|
||||
| metrics.enabled | bool | `false` | |
|
||||
| platform | string | `"aws"` | |
|
||||
|
@ -56,7 +56,7 @@ EOF
|
||||
kubectl apply -f cert-manager-backup.yaml
|
||||
else
|
||||
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml --set kiam.not_ready=true --set istio.enabled=false --set metrics.enabled=false --set logging.enabled=false > generated-values.yaml
|
||||
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd --create-namespace -f generated-values.yaml
|
||||
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
|
||||
wait_for kubectl get Issuer -n kube-system kubezero-local-ca-issuer 2>/dev/null 1>&2
|
||||
wait_for kubectl get ClusterIssuer letsencrypt-dns-prod 2>/dev/null 1>&2
|
||||
kubectl wait --for=condition=Ready -n kube-system Issuer/kubezero-local-ca-issuer
|
||||
@ -64,12 +64,12 @@ EOF
|
||||
fi
|
||||
|
||||
# Now that we have the cert-manager webhook, get the kiam certs in place but do NOT deploy kiam yet
|
||||
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml --set kiam.not_ready=true --set kiam.enabled=false > generated-values.yaml
|
||||
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd --create-namespace -f generated-values.yaml
|
||||
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml --set kiam.not_ready=true --set kiam.enabled=false --set istio.enabled=false --set metrics.enabled=false --set logging.enabled=false > generated-values.yaml
|
||||
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
|
||||
|
||||
# Now lets make sure kiam is working
|
||||
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml --set kiam.not_ready=true > generated-values.yaml
|
||||
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd --create-namespace -f generated-values.yaml
|
||||
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml --set kiam.not_ready=true --set istio.enabled=false --set metrics.enabled=false --set logging.enabled=false > generated-values.yaml
|
||||
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
|
||||
wait_for kubectl get daemonset -n kube-system kiam-agent 2>/dev/null 1>&2
|
||||
kubectl rollout status daemonset -n kube-system kiam-agent
|
||||
|
||||
|
@ -5,7 +5,6 @@ kubezero:
|
||||
{{- end }}
|
||||
calico:
|
||||
enabled: {{ .Values.calico.enabled }}
|
||||
type: {{ default "kustomize" .Values.calico.type }}
|
||||
values:
|
||||
network: {{ default "vxlan" .Values.calico.network }}
|
||||
mtu: {{ default "8941" .Values.calico.mtu }}
|
||||
|
Loading…
Reference in New Issue
Block a user