Update of various components, new aroless bootstrap working

This commit is contained in:
Stefan Reimer 2020-11-21 04:24:57 -08:00
parent cd0e559678
commit 35b1570d18
64 changed files with 5545 additions and 8187 deletions

View File

@ -1 +0,0 @@
../../helm-charts/charts/fluent-bit

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
description: KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application description: KubeZero ArgoCD Helm chart to install ArgoCD itself and the KubeZero ArgoCD Application
name: kubezero-argo-cd name: kubezero-argo-cd
version: 0.6.0 version: 0.6.1
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -15,6 +15,6 @@ dependencies:
version: ">= 0.1.3" version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/ repository: https://zero-down-time.github.io/kubezero/
- name: argo-cd - name: argo-cd
version: 2.9.3 version: 2.9.5
repository: https://argoproj.github.io/argo-helm repository: https://argoproj.github.io/argo-helm
kubeVersion: ">= 1.17.0" kubeVersion: ">= 1.17.0"

View File

@ -12,7 +12,6 @@ spec:
sourceRepos: sourceRepos:
- '*' - '*'
# Only permit applications to deploy to the guestbook namespace in the same cluster
destinations: destinations:
- namespace: argocd - namespace: argocd
server: https://kubernetes.default.svc server: https://kubernetes.default.svc

View File

@ -31,7 +31,7 @@ argo-cd:
global: global:
image: image:
tag: v1.7.8 tag: v1.7.10
controller: controller:
args: args:

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-calico name: kubezero-calico
description: KubeZero Umbrella Chart for Calico description: KubeZero Umbrella Chart for Calico
type: application type: application
version: 0.2.0 version: 0.2.1
appVersion: v3.16.1 appVersion: v3.16.5
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:

View File

@ -1,6 +1,6 @@
# kubezero-calico # kubezero-calico
![Version: 0.2.0](https://img.shields.io/badge/Version-0.2.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v3.16.1](https://img.shields.io/badge/AppVersion-v3.16.1-informational?style=flat-square) ![Version: 0.2.1](https://img.shields.io/badge/Version-0.2.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v3.16.5](https://img.shields.io/badge/AppVersion-v3.16.5-informational?style=flat-square)
KubeZero Umbrella Chart for Calico KubeZero Umbrella Chart for Calico
@ -47,7 +47,6 @@ The setup is based on the upstream calico-vxlan config from
| Key | Type | Default | Description | | Key | Type | Default | Description |
|-----|------|---------|-------------| |-----|------|---------|-------------|
| image.tag | string | `""` | | | image.tag | string | `""` | |
| installCRDs | bool | `false` | |
| loglevel | string | `"Warning"` | | | loglevel | string | `"Warning"` | |
| mtu | int | `8941` | | | mtu | int | `8941` | |
| network | string | `"vxlan"` | | | network | string | `"vxlan"` | |

View File

@ -1,101 +0,0 @@
--- calico-vxlan.yaml 2020-07-03 15:32:40.740506882 +0100
+++ calico.yaml 2020-07-03 15:27:47.651499841 +0100
@@ -10,13 +10,13 @@
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
- calico_backend: "bird"
+ calico_backend: "vxlan"
# Configure the MTU to use for workload interfaces and tunnels.
# - If Wireguard is enabled, set to your network MTU - 60
# - Otherwise, if VXLAN or BPF mode is enabled, set to your network MTU - 50
# - Otherwise, if IPIP is enabled, set to your network MTU - 20
# - Otherwise, if not using any encapsulation, set to your network MTU.
- veth_mtu: "1410"
+ veth_mtu: "8941"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
@@ -3451,29 +3451,6 @@
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
- # This container performs upgrade from host-local IPAM to calico-ipam.
- # It can be deleted if this is a fresh installation, or if you have already
- # upgraded to use calico-ipam.
- - name: upgrade-ipam
- image: calico/cni:v3.15.0
- command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
- env:
- - name: KUBERNETES_NODE_NAME
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- - name: CALICO_NETWORKING_BACKEND
- valueFrom:
- configMapKeyRef:
- name: calico-config
- key: calico_backend
- volumeMounts:
- - mountPath: /var/lib/cni/networks
- name: host-local-net-dir
- - mountPath: /host/opt/cni/bin
- name: cni-bin-dir
- securityContext:
- privileged: true
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
@@ -3545,7 +3522,7 @@
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
- value: "k8s,bgp"
+ value: "k8s,kubeadm"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
@@ -3554,7 +3531,7 @@
value: "Never"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
- value: "CrossSubnet"
+ value: "Always"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
@@ -3595,9 +3572,17 @@
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
- value: "info"
+ value: "Warning"
+ - name: FELIX_LOGSEVERITYFILE
+ value: "Warning"
+ - name: FELIX_LOGSEVERITYSYS
+ value: ""
- name: FELIX_HEALTHENABLED
value: "true"
+ - name: FELIX_PROMETHEUSGOMETRICSENABLED
+ value: "false"
+ - name: FELIX_PROMETHEUSMETRICSENABLED
+ value: "true"
securityContext:
privileged: true
resources:
@@ -3608,7 +3593,6 @@
command:
- /bin/calico-node
- -felix-live
- - -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
@@ -3617,7 +3601,6 @@
command:
- /bin/calico-node
- -felix-ready
- - -bird-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,4 @@
---
# Source: calico/templates/kdd-crds.yaml # Source: calico/templates/kdd-crds.yaml
@ -192,6 +193,29 @@ spec:
description: Selector for the nodes that should have this peering. When description: Selector for the nodes that should have this peering. When
this is set, the Node field must be empty. this is set, the Node field must be empty.
type: string type: string
password:
description: Optional BGP password for the peerings generated by this
BGPPeer resource.
properties:
secretKeyRef:
description: Selects a key of a secret in the node pod's namespace.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
type: object
peerIP: peerIP:
description: The IP address of the peer followed by an optional port description: The IP address of the peer followed by an optional port
number to peer with. If port number is given, format should be `[<IPv6>]:port` number to peer with. If port number is given, format should be `[<IPv6>]:port`
@ -396,6 +420,16 @@ spec:
spec: spec:
description: FelixConfigurationSpec contains the values of the Felix configuration. description: FelixConfigurationSpec contains the values of the Felix configuration.
properties: properties:
allowIPIPPacketsFromWorkloads:
description: 'AllowIPIPPacketsFromWorkloads controls whether Felix
will add a rule to drop IPIP encapsulated traffic from workloads
[Default: false]'
type: boolean
allowVXLANPacketsFromWorkloads:
description: 'AllowVXLANPacketsFromWorkloads controls whether Felix
will add a rule to drop VXLAN encapsulated traffic from workloads
[Default: false]'
type: boolean
awsSrcDstCheck: awsSrcDstCheck:
description: 'Set source-destination-check on AWS EC2 instances. Accepted description: 'Set source-destination-check on AWS EC2 instances. Accepted
value must be one of "DoNothing", "Enabled" or "Disabled". [Default: value must be one of "DoNothing", "Enabled" or "Disabled". [Default:

View File

@ -1,6 +0,0 @@
{{- if .Values.installCRDs }}
{{- range $path, $_ := .Files.Glob "crds/*.yaml" }}
{{ $.Files.Get $path }}
---
{{- end }}
{{- end }}

View File

@ -1,5 +1,3 @@
installCRDs: false
image: image:
tag: "" tag: ""

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-cert-manager name: kubezero-cert-manager
description: KubeZero Umbrella Chart for cert-manager description: KubeZero Umbrella Chart for cert-manager
type: application type: application
version: 0.4.0 version: 0.4.1
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -15,6 +15,7 @@ dependencies:
version: ">= 0.1.3" version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/ repository: https://zero-down-time.github.io/kubezero/
- name: cert-manager - name: cert-manager
version: 1.0.3 version: 1.0.4
repository: https://charts.jetstack.io repository: https://charts.jetstack.io
condition: cert-manager.enabled
kubeVersion: ">= 1.16.0" kubeVersion: ">= 1.16.0"

View File

@ -1,6 +1,6 @@
# kubezero-cert-manager # kubezero-cert-manager
![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.4.1](https://img.shields.io/badge/Version-0.4.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for cert-manager KubeZero Umbrella Chart for cert-manager
@ -18,7 +18,7 @@ Kubernetes: `>= 1.16.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://charts.jetstack.io | cert-manager | 1.0.3 | | https://charts.jetstack.io | cert-manager | 1.0.4 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 | | https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## AWS - IAM Role ## AWS - IAM Role
@ -38,10 +38,10 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
| cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/master" | string | `""` | | | cert-manager.cainjector.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | | | cert-manager.cainjector.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | cert-manager.cainjector.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| cert-manager.enabled | bool | `true` | |
| cert-manager.extraArgs[0] | string | `"--dns01-recursive-nameservers-only"` | | | cert-manager.extraArgs[0] | string | `"--dns01-recursive-nameservers-only"` | |
| cert-manager.ingressShim.defaultIssuerKind | string | `"ClusterIssuer"` | | | cert-manager.ingressShim.defaultIssuerKind | string | `"ClusterIssuer"` | |
| cert-manager.ingressShim.defaultIssuerName | string | `"letsencrypt-dns-prod"` | | | cert-manager.ingressShim.defaultIssuerName | string | `"letsencrypt-dns-prod"` | |
| cert-manager.installCRDs | bool | `true` | |
| cert-manager.nodeSelector."node-role.kubernetes.io/master" | string | `""` | | | cert-manager.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| cert-manager.podAnnotations | object | `{}` | "iam.amazonaws.com/roleIAM:" role ARN the cert-manager might use via kiam eg."arn:aws:iam::123456789012:role/certManagerRoleArn" | | cert-manager.podAnnotations | object | `{}` | "iam.amazonaws.com/roleIAM:" role ARN the cert-manager might use via kiam eg."arn:aws:iam::123456789012:role/certManagerRoleArn" |
| cert-manager.prometheus.servicemonitor.enabled | bool | `false` | | | cert-manager.prometheus.servicemonitor.enabled | bool | `false` | |
@ -51,5 +51,5 @@ If your resolvers need additional sercrets like CloudFlare API tokens etc. make
| cert-manager.webhook.tolerations[0].effect | string | `"NoSchedule"` | | | cert-manager.webhook.tolerations[0].effect | string | `"NoSchedule"` | |
| cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | cert-manager.webhook.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| clusterIssuer | object | `{}` | | | clusterIssuer | object | `{}` | |
| localCA.enabled | bool | `true` | | | localCA.enabled | bool | `false` | |
| localCA.selfsigning | bool | `true` | | | localCA.selfsigning | bool | `true` | |

View File

@ -3,11 +3,11 @@
# KubeZero / Local cluster CA # KubeZero / Local cluster CA
# The resources are serialized via waves in Argo # The resources are serialized via waves in Argo
apiVersion: cert-manager.io/v1alpha2 apiVersion: cert-manager.io/v1
kind: Issuer kind: ClusterIssuer
metadata: metadata:
name: kubezero-selfsigning-issuer name: kubezero-selfsigning-issuer
namespace: kube-system namespace: {{ .Release.Namespace }}
labels: labels:
{{ include "kubezero-lib.labels" . | indent 4 }} {{ include "kubezero-lib.labels" . | indent 4 }}
annotations: annotations:
@ -15,11 +15,11 @@ metadata:
spec: spec:
selfSigned: {} selfSigned: {}
--- ---
apiVersion: cert-manager.io/v1alpha2 apiVersion: cert-manager.io/v1
kind: Certificate kind: Certificate
metadata: metadata:
name: kubezero-local-ca name: kubezero-local-ca
namespace: kube-system namespace: {{ .Release.Namespace }}
labels: labels:
{{ include "kubezero-lib.labels" . | indent 4 }} {{ include "kubezero-lib.labels" . | indent 4 }}
annotations: annotations:
@ -30,6 +30,7 @@ spec:
isCA: true isCA: true
issuerRef: issuerRef:
name: kubezero-selfsigning-issuer name: kubezero-selfsigning-issuer
kind: ClusterIssuer
usages: usages:
- "any" - "any"
--- ---
@ -39,7 +40,7 @@ apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:
name: kubezero-ca-tls name: kubezero-ca-tls
namespace: kube-system namespace: {{ .Release.Namespace }}
labels: labels:
{{ include "kubezero-lib.labels" . | indent 4 }} {{ include "kubezero-lib.labels" . | indent 4 }}
data: data:
@ -48,11 +49,11 @@ data:
--- ---
{{- end }} {{- end }}
apiVersion: cert-manager.io/v1alpha2 apiVersion: cert-manager.io/v1
kind: Issuer kind: ClusterIssuer
metadata: metadata:
name: kubezero-local-ca-issuer name: kubezero-local-ca-issuer
namespace: kube-system namespace: {{ .Release.Namespace }}
labels: labels:
{{ include "kubezero-lib.labels" . | indent 4 }} {{ include "kubezero-lib.labels" . | indent 4 }}
annotations: annotations:

View File

@ -1,5 +1,5 @@
{{- if .Values.clusterIssuer.name }} {{- if .Values.clusterIssuer.name }}
apiVersion: cert-manager.io/v1alpha2 apiVersion: cert-manager.io/v1
kind: ClusterIssuer kind: ClusterIssuer
metadata: metadata:
name: {{ .Values.clusterIssuer.name }} name: {{ .Values.clusterIssuer.name }}

View File

@ -17,34 +17,45 @@ localCA:
# crt: <pem-crt-material> # crt: <pem-crt-material>
cert-manager: cert-manager:
installCRDs: true enabled: true
global:
leaderElection:
namespace: "cert-manager"
podAnnotations: {}
# iam.amazonaws.com/role: ""
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/master: ""
ingressShim: ingressShim:
defaultIssuerName: letsencrypt-dns-prod defaultIssuerName: letsencrypt-dns-prod
defaultIssuerKind: ClusterIssuer defaultIssuerKind: ClusterIssuer
webhook: webhook:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/master: ""
cainjector: cainjector:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/master: ""
extraArgs: extraArgs:
- "--dns01-recursive-nameservers-only" - "--dns01-recursive-nameservers-only"
# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted # When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
# - --enable-certificate-owner-ref=true # - --enable-certificate-owner-ref=true
prometheus: prometheus:
servicemonitor: servicemonitor:
enabled: false enabled: false
# cert-manager.podAnnotations -- "iam.amazonaws.com/roleIAM:" role ARN the cert-manager might use via kiam eg."arn:aws:iam::123456789012:role/certManagerRoleArn" # cert-manager.podAnnotations -- "iam.amazonaws.com/roleIAM:" role ARN the cert-manager might use via kiam eg."arn:aws:iam::123456789012:role/certManagerRoleArn"
podAnnotations: {}
# iam.amazonaws.com/role: ""

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-istio name: kubezero-istio
description: KubeZero Umbrella Chart for Istio description: KubeZero Umbrella Chart for Istio
type: application type: application
version: 0.3.4 version: 0.4.0
appVersion: 1.7.3 appVersion: 1.7.4
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:

View File

@ -1,6 +1,6 @@
# kubezero-istio # kubezero-istio
![Version: 0.3.4](https://img.shields.io/badge/Version-0.3.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.7.3](https://img.shields.io/badge/AppVersion-1.7.3-informational?style=flat-square) ![Version: 0.4.0](https://img.shields.io/badge/Version-0.4.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.7.4](https://img.shields.io/badge/AppVersion-1.7.4-informational?style=flat-square)
KubeZero Umbrella Chart for Istio KubeZero Umbrella Chart for Istio
@ -34,10 +34,11 @@ Kubernetes: `>= 1.16.0`
| ingress.dnsNames[0] | string | `"*"` | | | ingress.dnsNames[0] | string | `"*"` | |
| ingress.private.enabled | bool | `true` | | | ingress.private.enabled | bool | `true` | |
| ingress.private.nodeSelector | string | `"31080_31443_31671_31672_31224"` | | | ingress.private.nodeSelector | string | `"31080_31443_31671_31672_31224"` | |
| ingress.public.enabled | bool | `true` | |
| ingress.replicaCount | int | `2` | | | ingress.replicaCount | int | `2` | |
| ingress.type | string | `"NodePort"` | | | ingress.type | string | `"NodePort"` | |
| istio-operator.hub | string | `"docker.io/istio"` | | | istio-operator.hub | string | `"docker.io/istio"` | |
| istio-operator.tag | string | `"1.7.3"` | | | istio-operator.tag | string | `"1.7.4"` | |
| istiod.autoscaleEnabled | bool | `false` | | | istiod.autoscaleEnabled | bool | `false` | |
| istiod.replicaCount | int | `1` | | | istiod.replicaCount | int | `1` | |

View File

@ -99,6 +99,7 @@ rules:
- events - events
- namespaces - namespaces
- pods - pods
- pods/proxy
- persistentvolumeclaims - persistentvolumeclaims
- secrets - secrets
- services - services

View File

@ -0,0 +1,82 @@
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1
metadata:
name: adapters.config.istio.io
labels:
app: mixer
package: adapter
istio: mixer-adapter
chart: istio
heritage: Tiller
release: istio
annotations:
"helm.sh/resource-policy": keep
spec:
group: config.istio.io
names:
kind: adapter
plural: adapters
singular: adapter
categories:
- istio-io
- policy-istio-io
scope: Namespaced
versions:
- name: v1alpha2
schema:
openAPIV3Schema:
properties:
spec:
x-kubernetes-preserve-unknown-fields: true
type: object
status:
x-kubernetes-preserve-unknown-fields: true
type: object
type: object
served: true
storage: true
subresources:
status: {}
---
kind: CustomResourceDefinition
apiVersion: apiextensions.k8s.io/v1
metadata:
name: templates.config.istio.io
labels:
app: mixer
package: template
istio: mixer-template
chart: istio
heritage: Tiller
release: istio
annotations:
"helm.sh/resource-policy": keep
spec:
group: config.istio.io
names:
kind: template
plural: templates
singular: template
categories:
- istio-io
- policy-istio-io
scope: Namespaced
versions:
- name: v1alpha2
schema:
openAPIV3Schema:
properties:
spec:
x-kubernetes-preserve-unknown-fields: true
type: object
status:
x-kubernetes-preserve-unknown-fields: true
type: object
type: object
served: true
storage: true
subresources:
status: {}
---

View File

@ -0,0 +1,74 @@
# SYNC WITH manifests/charts/istio-operator/templates
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: istiooperators.install.istio.io
labels:
release: istio
spec:
group: install.istio.io
names:
kind: IstioOperator
plural: istiooperators
singular: istiooperator
shortNames:
- iop
scope: Namespaced
versions:
- additionalPrinterColumns:
- description: Istio control plane revision
jsonPath: .spec.revision
name: Revision
type: string
- description: IOP current state
jsonPath: .status.status
type: string
name: Status
- jsonPath: .metadata.creationTimestamp
description:
"CreationTimestamp is a timestamp representing the server time when
this object was created. It is not guaranteed to be set in happens-before order
across separate operations. Clients may not set this value. It is represented
in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for
lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description:
"APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values.
More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#resources"
type: string
kind:
description:
"Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase.
More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
type: string
spec:
description:
"Specification of the desired state of the istio control plane resource.
More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status"
x-kubernetes-preserve-unknown-fields: true
type: object
status:
description:
"Status describes each of istio control plane component status at the current time.
0 means NONE, 1 means UPDATING, 2 means HEALTHY, 3 means ERROR, 4 means RECONCILING.
More info: https://github.com/istio/api/blob/master/operator/v1alpha1/istio.operator.v1alpha1.pb.html &
https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status"
x-kubernetes-preserve-unknown-fields: true
type: object
type: object
served: true
storage: true
subresources:
status: {}
---

View File

@ -1,3 +1,4 @@
{{- if .Values.ingress.public.enabled }}
apiVersion: networking.istio.io/v1alpha3 apiVersion: networking.istio.io/v1alpha3
kind: EnvoyFilter kind: EnvoyFilter
metadata: metadata:
@ -30,6 +31,7 @@ spec:
name: 5 name: 5
int_value: 60 int_value: 60
state: STATE_LISTENING state: STATE_LISTENING
{{- end }}
{{- if .Values.ingress.private.enabled }} {{- if .Values.ingress.private.enabled }}
--- ---

View File

@ -1,5 +1,5 @@
{{- if .Values.ingress.dnsNames }} {{- if .Values.ingress.dnsNames }}
apiVersion: cert-manager.io/v1alpha2 apiVersion: cert-manager.io/v1
kind: Certificate kind: Certificate
metadata: metadata:
name: public-ingress-cert name: public-ingress-cert

View File

@ -1,4 +1,5 @@
apiVersion: networking.istio.io/v1alpha3 {{- if .Values.ingress.public.enabled }}
apiVersion: networking.istio.io/v1beta1
kind: Gateway kind: Gateway
metadata: metadata:
name: ingressgateway name: ingressgateway
@ -28,10 +29,10 @@ spec:
privateKey: /etc/istio/ingressgateway-certs/tls.key privateKey: /etc/istio/ingressgateway-certs/tls.key
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
credentialName: public-ingress-cert credentialName: public-ingress-cert
{{- end }}
{{- if .Values.ingress.private.enabled }} {{- if .Values.ingress.private.enabled }}
--- ---
apiVersion: networking.istio.io/v1alpha3 apiVersion: networking.istio.io/v1beta1
kind: Gateway kind: Gateway
metadata: metadata:
name: private-ingressgateway name: private-ingressgateway
@ -84,4 +85,16 @@ spec:
privateKey: /etc/istio/ingressgateway-certs/tls.key privateKey: /etc/istio/ingressgateway-certs/tls.key
serverCertificate: /etc/istio/ingressgateway-certs/tls.crt serverCertificate: /etc/istio/ingressgateway-certs/tls.crt
credentialName: public-ingress-cert credentialName: public-ingress-cert
- port:
number: 6379
name: redis
protocol: TCP
hosts:
{{- toYaml .Values.ingress.dnsNames | nindent 4 }}
- port:
number: 6380
name: redis-1
protocol: TCP
hosts:
{{- toYaml .Values.ingress.dnsNames | nindent 4 }}
{{- end }} {{- end }}

View File

@ -120,6 +120,16 @@ spec:
{{- if eq .Values.ingress.type "NodePort" }} {{- if eq .Values.ingress.type "NodePort" }}
nodePort: 31672 nodePort: 31672
{{- end }} {{- end }}
- name: redis
port: 6379
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 31379
{{- end }}
- name: redis-1
port: 6380
{{- if eq .Values.ingress.type "NodePort" }}
nodePort: 31380
{{- end }}
global: global:
jwtPolicy: first-party-jwt jwtPolicy: first-party-jwt

View File

@ -8,6 +8,9 @@ metadata:
spec: spec:
profile: empty profile: empty
components: components:
base:
enabled: true
{{- if .Values.ingress.public.enabled }}
ingressGateways: ingressGateways:
- enabled: true - enabled: true
k8s: k8s:
@ -62,6 +65,7 @@ spec:
value: 90 value: 90
name: istio-ingressgateway name: istio-ingressgateway
{{- end }}
pilot: pilot:
enabled: true enabled: true
k8s: k8s:
@ -102,6 +106,7 @@ spec:
interval: 30s interval: 30s
time: 60s time: 60s
values: values:
{{- if .Values.ingress.public.enabled }}
gateways: gateways:
istio-ingressgateway: istio-ingressgateway:
autoscaleEnabled: {{ .Values.ingress.autoscaleEnabled }} autoscaleEnabled: {{ .Values.ingress.autoscaleEnabled }}
@ -134,6 +139,7 @@ spec:
{{- if eq .Values.ingress.type "NodePort" }} {{- if eq .Values.ingress.type "NodePort" }}
nodePort: 30443 nodePort: 30443
{{- end }} {{- end }}
{{- end }}
global: global:
jwtPolicy: first-party-jwt jwtPolicy: first-party-jwt
logAsJson: true logAsJson: true

View File

@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
set -ex set -ex
export ISTIO_VERSION=1.7.3 export ISTIO_VERSION=1.7.4
if [ ! -d istio-$ISTIO_VERSION ]; then if [ ! -d istio-$ISTIO_VERSION ]; then
NAME="istio-$ISTIO_VERSION" NAME="istio-$ISTIO_VERSION"
@ -10,17 +10,17 @@ if [ ! -d istio-$ISTIO_VERSION ]; then
curl -sL "$URL" | tar xz curl -sL "$URL" | tar xz
fi fi
# Now lets extract what we need # Get matching istioctl
[ -x istioctl ] && [ "$(./istioctl version --remote=false)" == $ISTIO_VERSION ] || { curl -sL https://github.com/istio/istio/releases/download/${ISTIO_VERSION}/istioctl-${ISTIO_VERSION}-linux-amd64.tar.gz | tar xz; chmod +x istioctl; }
# Extract base / CRDs from istioctl into plain manifest to workaround chicken egg problem with CRDs
# Now lets extract istio-operator chart
rm -rf charts/istio-operator rm -rf charts/istio-operator
cp -r istio-${ISTIO_VERSION}/manifests/charts/istio-operator charts cp -r istio-${ISTIO_VERSION}/manifests/charts/istio-operator charts
# Apply our patch # Apply our patch
patch -i istio-operator.patch -p0 patch -i istio-operator.patch -p0
[ -x istioctl ] || { curl -sL https://github.com/istio/istio/releases/download/${ISTIO_VERSION}/istioctl-${ISTIO_VERSION}-linux-amd64.tar.gz | tar xz; chmod +x istioctl; } # Extract crds
rm -rf crds
# Extract base / CRDs from istioctl into plain manifest to workaround chicken egg problem with CRDs cp -r istio-${ISTIO_VERSION}/manifests/charts/base/crds .
./istioctl manifest generate --set profile=empty --set components.base.enabled=true > templates/istio-base.yaml
# Remove double CRD
patch -i istio-base.patch -p3

View File

@ -6,6 +6,8 @@ ingress:
autoscaleEnabled: false autoscaleEnabled: false
replicaCount: 2 replicaCount: 2
type: NodePort type: NodePort
public:
enabled: true
private: private:
enabled: true enabled: true
nodeSelector: "31080_31443_31671_31672_31224" nodeSelector: "31080_31443_31671_31672_31224"
@ -13,5 +15,6 @@ ingress:
- "*" - "*"
istio-operator: istio-operator:
operatorNamespace: istio-system
hub: docker.io/istio hub: docker.io/istio
tag: 1.7.3 tag: 1.7.4

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-kiam name: kubezero-kiam
description: KubeZero Umbrella Chart for Kiam description: KubeZero Umbrella Chart for Kiam
type: application type: application
version: 0.2.11 version: 0.2.12
appVersion: 3.6 appVersion: 3.6
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
@ -16,7 +16,7 @@ dependencies:
version: ">= 0.1.3" version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/ repository: https://zero-down-time.github.io/kubezero/
- name: kiam - name: kiam
version: 5.8.1 version: 5.9.0
repository: https://uswitch.github.io/kiam-helm-charts/charts/ repository: https://uswitch.github.io/kiam-helm-charts/charts/
condition: kiam.enabled condition: kiam.enabled
kubeVersion: ">= 1.16.0" kubeVersion: ">= 1.16.0"

View File

@ -1,6 +1,6 @@
# kubezero-kiam # kubezero-kiam
![Version: 0.2.11](https://img.shields.io/badge/Version-0.2.11-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.6](https://img.shields.io/badge/AppVersion-3.6-informational?style=flat-square) ![Version: 0.2.12](https://img.shields.io/badge/Version-0.2.12-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.6](https://img.shields.io/badge/AppVersion-3.6-informational?style=flat-square)
KubeZero Umbrella Chart for Kiam KubeZero Umbrella Chart for Kiam
@ -18,7 +18,7 @@ Kubernetes: `>= 1.16.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://uswitch.github.io/kiam-helm-charts/charts/ | kiam | 5.8.1 | | https://uswitch.github.io/kiam-helm-charts/charts/ | kiam | 5.9.0 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 | | https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## KubeZero default configuration ## KubeZero default configuration
@ -47,8 +47,8 @@ By default all access to the meta-data service is blocked, expect for:
| kiam.agent.gatewayTimeoutCreation | string | `"5s"` | | | kiam.agent.gatewayTimeoutCreation | string | `"5s"` | |
| kiam.agent.host.interface | string | `"cali+"` | | | kiam.agent.host.interface | string | `"cali+"` | |
| kiam.agent.host.iptables | bool | `false` | | | kiam.agent.host.iptables | bool | `false` | |
| kiam.agent.image.tag | string | `"v3.6"` | | | kiam.agent.log.level | string | `"info"` | |
| kiam.agent.log.level | string | `"warn"` | | | kiam.agent.priorityClassName | string | `"system-node-critical"` | |
| kiam.agent.prometheus.servicemonitor.enabled | bool | `false` | | | kiam.agent.prometheus.servicemonitor.enabled | bool | `false` | |
| kiam.agent.prometheus.servicemonitor.interval | string | `"30s"` | | | kiam.agent.prometheus.servicemonitor.interval | string | `"30s"` | |
| kiam.agent.prometheus.servicemonitor.labels.release | string | `"metrics"` | | | kiam.agent.prometheus.servicemonitor.labels.release | string | `"metrics"` | |
@ -69,9 +69,9 @@ By default all access to the meta-data service is blocked, expect for:
| kiam.server.assumeRoleArn | string | `""` | kiam server IAM role to assume, required as we run the agents next to the servers normally, eg. arn:aws:iam::123456789012:role/kiam-server-role | | kiam.server.assumeRoleArn | string | `""` | kiam server IAM role to assume, required as we run the agents next to the servers normally, eg. arn:aws:iam::123456789012:role/kiam-server-role |
| kiam.server.deployment.enabled | bool | `true` | | | kiam.server.deployment.enabled | bool | `true` | |
| kiam.server.deployment.replicas | int | `1` | | | kiam.server.deployment.replicas | int | `1` | |
| kiam.server.image.tag | string | `"v3.6"` | | | kiam.server.log.level | string | `"info"` | |
| kiam.server.log.level | string | `"warn"` | |
| kiam.server.nodeSelector."node-role.kubernetes.io/master" | string | `""` | | | kiam.server.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| kiam.server.priorityClassName | string | `"system-cluster-critical"` | |
| kiam.server.prometheus.servicemonitor.enabled | bool | `false` | | | kiam.server.prometheus.servicemonitor.enabled | bool | `false` | |
| kiam.server.prometheus.servicemonitor.interval | string | `"30s"` | | | kiam.server.prometheus.servicemonitor.interval | string | `"30s"` | |
| kiam.server.prometheus.servicemonitor.labels.release | string | `"metrics"` | | | kiam.server.prometheus.servicemonitor.labels.release | string | `"metrics"` | |

View File

@ -1,28 +1,32 @@
apiVersion: cert-manager.io/v1alpha2 apiVersion: cert-manager.io/v1
kind: Certificate kind: Certificate
metadata: metadata:
name: kiam-agent name: kiam-agent
namespace: {{ .Release.Namespace }}
labels: labels:
{{ include "kubezero-lib.labels" . | indent 4 }} {{ include "kubezero-lib.labels" . | indent 4 }}
spec: spec:
secretName: kiam-agent-tls secretName: kiam-agent-tls
issuerRef: issuerRef:
name: kubezero-local-ca-issuer name: kubezero-local-ca-issuer
kind: ClusterIssuer
usages: usages:
- "any" - "any"
dnsNames: dnsNames:
- "kiam-agent" - "kiam-agent"
--- ---
apiVersion: cert-manager.io/v1alpha2 apiVersion: cert-manager.io/v1
kind: Certificate kind: Certificate
metadata: metadata:
name: kiam-server name: kiam-server
namespace: {{ .Release.Namespace }}
labels: labels:
{{ include "kubezero-lib.labels" . | indent 4 }} {{ include "kubezero-lib.labels" . | indent 4 }}
spec: spec:
secretName: kiam-server-tls secretName: kiam-server-tls
issuerRef: issuerRef:
name: kubezero-local-ca-issuer name: kubezero-local-ca-issuer
kind: ClusterIssuer
usages: usages:
- "any" - "any"
dnsNames: dnsNames:

View File

@ -3,8 +3,6 @@ annotateKubeSystemNameSpace: false
kiam: kiam:
enabled: true enabled: true
server: server:
image:
tag: "v3.6"
# kiam.server.assumeRoleArn -- kiam server IAM role to assume, required as we run the agents next to the servers normally, eg. arn:aws:iam::123456789012:role/kiam-server-role # kiam.server.assumeRoleArn -- kiam server IAM role to assume, required as we run the agents next to the servers normally, eg. arn:aws:iam::123456789012:role/kiam-server-role
assumeRoleArn: '' assumeRoleArn: ''
useHostNetwork: true useHostNetwork: true
@ -33,6 +31,7 @@ kiam:
effect: NoSchedule effect: NoSchedule
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/master: ""
priorityClassName: system-cluster-critical
prometheus: prometheus:
servicemonitor: servicemonitor:
enabled: false enabled: false
@ -40,11 +39,9 @@ kiam:
labels: labels:
release: metrics release: metrics
log: log:
level: warn level: info
agent: agent:
image:
tag: "v3.6"
gatewayTimeoutCreation: "5s" gatewayTimeoutCreation: "5s"
updateStrategy: RollingUpdate updateStrategy: RollingUpdate
# IP tables set on each node at boot, see CloudBender # IP tables set on each node at boot, see CloudBender
@ -68,6 +65,7 @@ kiam:
tolerations: tolerations:
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
priorityClassName: system-node-critical
prometheus: prometheus:
servicemonitor: servicemonitor:
enabled: false enabled: false
@ -75,7 +73,7 @@ kiam:
labels: labels:
release: metrics release: metrics
log: log:
level: warn level: info
# extraEnv: # extraEnv:
# - name: GRPC_GO_LOG_SEVERITY_LEVEL # - name: GRPC_GO_LOG_SEVERITY_LEVEL
# value: "info" # value: "info"

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-logging name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack description: KubeZero Umbrella Chart for complete EFK stack
type: application type: application
version: 0.4.1 version: 0.5.0
appVersion: 1.2.1 appVersion: 1.3.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -18,6 +18,10 @@ dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.3" version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/ repository: https://zero-down-time.github.io/kubezero/
- name: eck-operator
version: 1.3.0
repository: https://helm.elastic.co
condition: eck-operator.enabled
- name: fluentd - name: fluentd
version: 2.5.1 version: 2.5.1
repository: https://kubernetes-charts.storage.googleapis.com/ repository: https://kubernetes-charts.storage.googleapis.com/

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +0,0 @@
resources:
- all-in-one.yaml
# map operator to controller nodes
patchesStrategicMerge:
- map-operator.yaml

View File

@ -1,14 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elastic-operator
spec:
template:
spec:
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule

View File

@ -1,7 +0,0 @@
#!/bin/bash
ECK_VERSION=1.2.1
curl -o all-in-one.yaml https://download.elastic.co/downloads/eck/${ECK_VERSION}/all-in-one.yaml
kubectl kustomize . > ../templates/eck-operator.yaml

File diff suppressed because it is too large Load Diff

View File

@ -5,11 +5,11 @@
# This is for backwards compatibility with older zdt-logging setup # This is for backwards compatibility with older zdt-logging setup
fullnameOverride: logging fullnameOverride: logging
# Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.6.0
elastic_password: "dsfsfs" # super_secret_elastic_password elastic_password: "dsfsfs" # super_secret_elastic_password
eck-operator:
enabled: true
es: es:
nodeSets: nodeSets:
- name: default-zone-0 - name: default-zone-0

View File

@ -1,8 +1,16 @@
# use this for backwards compatability # use this for backwards compatability
# fullnameOverride: "" # fullnameOverride: ""
eck-operator:
enabled: false
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
# Version for ElasticSearch and Kibana have to match so we define it at top-level # Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.8.1 version: 7.10.0
elastic_password: "" # super_secret_elastic_password elastic_password: "" # super_secret_elastic_password
@ -67,7 +75,7 @@ fluentd:
enabled: true enabled: true
additionalLabels: additionalLabels:
release: metrics release: metrics
namespace: monitoring # namespace: monitoring
output: output:
# Default should be "logging-kubezero-logging-es-http" if fullnameOverride is NOT used # Default should be "logging-kubezero-logging-es-http" if fullnameOverride is NOT used

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-metrics name: kubezero-metrics
description: KubeZero Umbrella Chart for prometheus-operator description: KubeZero Umbrella Chart for prometheus-operator
type: application type: application
version: 0.2.1 version: 0.3.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
@ -16,9 +16,10 @@ dependencies:
version: ">= 0.1.3" version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/ repository: https://zero-down-time.github.io/kubezero/
- name: kube-prometheus-stack - name: kube-prometheus-stack
version: 10.1.3 version: 11.1.1
repository: https://prometheus-community.github.io/helm-charts repository: https://prometheus-community.github.io/helm-charts
- name: prometheus-adapter - name: prometheus-adapter
version: 2.7.0 version: 2.7.1
repository: https://prometheus-community.github.io/helm-charts repository: https://prometheus-community.github.io/helm-charts
condition: prometheus-adapter.enabled
kubeVersion: ">= 1.16.0" kubeVersion: ">= 1.16.0"

View File

@ -1,6 +1,6 @@
# kubezero-metrics # kubezero-metrics
![Version: 0.2.0](https://img.shields.io/badge/Version-0.2.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.3.0](https://img.shields.io/badge/Version-0.3.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for prometheus-operator KubeZero Umbrella Chart for prometheus-operator
@ -18,8 +18,8 @@ Kubernetes: `>= 1.16.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://prometheus-community.github.io/helm-charts | kube-prometheus-stack | 10.0.1 | | https://prometheus-community.github.io/helm-charts | kube-prometheus-stack | 11.1.1 |
| https://prometheus-community.github.io/helm-charts | prometheus-adapter | 2.7.0 | | https://prometheus-community.github.io/helm-charts | prometheus-adapter | 2.7.1 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 | | https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## Values ## Values
@ -41,6 +41,9 @@ Kubernetes: `>= 1.16.0`
| kube-prometheus-stack.grafana.plugins[0] | string | `"grafana-piechart-panel"` | | | kube-prometheus-stack.grafana.plugins[0] | string | `"grafana-piechart-panel"` | |
| kube-prometheus-stack.grafana.service.portName | string | `"http-grafana"` | | | kube-prometheus-stack.grafana.service.portName | string | `"http-grafana"` | |
| kube-prometheus-stack.grafana.testFramework.enabled | bool | `false` | | | kube-prometheus-stack.grafana.testFramework.enabled | bool | `false` | |
| kube-prometheus-stack.kube-state-metrics.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.kube-state-metrics.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kube-prometheus-stack.kubeApiServer.enabled | bool | `true` | | | kube-prometheus-stack.kubeApiServer.enabled | bool | `true` | |
| kube-prometheus-stack.kubeControllerManager.enabled | bool | `true` | | | kube-prometheus-stack.kubeControllerManager.enabled | bool | `true` | |
| kube-prometheus-stack.kubeControllerManager.service.port | int | `10257` | | | kube-prometheus-stack.kubeControllerManager.service.port | int | `10257` | |
@ -69,7 +72,6 @@ Kubernetes: `>= 1.16.0`
| kube-prometheus-stack.nodeExporter.serviceMonitor.relabelings[0].targetLabel | string | `"node"` | | | kube-prometheus-stack.nodeExporter.serviceMonitor.relabelings[0].targetLabel | string | `"node"` | |
| kube-prometheus-stack.prometheus.enabled | bool | `true` | | | kube-prometheus-stack.prometheus.enabled | bool | `true` | |
| kube-prometheus-stack.prometheus.prometheusSpec.portName | string | `"http-prometheus"` | | | kube-prometheus-stack.prometheus.prometheusSpec.portName | string | `"http-prometheus"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.limits.cpu | string | `"1000m"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.limits.memory | string | `"3Gi"` | | | kube-prometheus-stack.prometheus.prometheusSpec.resources.limits.memory | string | `"3Gi"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.cpu | string | `"500m"` | | | kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.cpu | string | `"500m"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.memory | string | `"1Gi"` | | | kube-prometheus-stack.prometheus.prometheusSpec.resources.requests.memory | string | `"1Gi"` | |
@ -77,17 +79,17 @@ Kubernetes: `>= 1.16.0`
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | | | kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.accessModes[0] | string | `"ReadWriteOnce"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | | | kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage | string | `"16Gi"` | |
| kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName | string | `"ebs-sc-gp2-xfs"` | | | kube-prometheus-stack.prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName | string | `"ebs-sc-gp2-xfs"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.enabled | bool | `false` | | | kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| kube-prometheus-stack.prometheusOperator.createCustomResource | bool | `true` | | | kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.admissionWebhooks.patch.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| kube-prometheus-stack.prometheusOperator.enabled | bool | `true` | | | kube-prometheus-stack.prometheusOperator.enabled | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.manageCrds | bool | `false` | |
| kube-prometheus-stack.prometheusOperator.namespaces.additional[0] | string | `"kube-system"` | | | kube-prometheus-stack.prometheusOperator.namespaces.additional[0] | string | `"kube-system"` | |
| kube-prometheus-stack.prometheusOperator.namespaces.additional[1] | string | `"logging"` | | | kube-prometheus-stack.prometheusOperator.namespaces.additional[1] | string | `"logging"` | |
| kube-prometheus-stack.prometheusOperator.namespaces.releaseNamespace | bool | `true` | | | kube-prometheus-stack.prometheusOperator.namespaces.releaseNamespace | bool | `true` | |
| kube-prometheus-stack.prometheusOperator.nodeSelector."node-role.kubernetes.io/master" | string | `""` | | | kube-prometheus-stack.prometheusOperator.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| kube-prometheus-stack.prometheusOperator.tlsProxy.enabled | bool | `false` | |
| kube-prometheus-stack.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | | | kube-prometheus-stack.prometheusOperator.tolerations[0].effect | string | `"NoSchedule"` | |
| kube-prometheus-stack.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | | | kube-prometheus-stack.prometheusOperator.tolerations[0].key | string | `"node-role.kubernetes.io/master"` | |
| prometheus-adapter.enabled | bool | `true` | |
| prometheus-adapter.nodeSelector."node-role.kubernetes.io/master" | string | `""` | | | prometheus-adapter.nodeSelector."node-role.kubernetes.io/master" | string | `""` | |
| prometheus-adapter.prometheus.url | string | `"http://metrics-kube-prometheus-st-prometheus"` | | | prometheus-adapter.prometheus.url | string | `"http://metrics-kube-prometheus-st-prometheus"` | |
| prometheus-adapter.rules.default | bool | `false` | | | prometheus-adapter.rules.default | bool | `false` | |

View File

@ -3,6 +3,7 @@ apiVersion: networking.istio.io/v1alpha3
kind: VirtualService kind: VirtualService
metadata: metadata:
name: grafana name: grafana
namespace: {{ .Release.Namespace }}
labels: labels:
{{ include "kubezero-lib.labels" . | indent 4 }} {{ include "kubezero-lib.labels" . | indent 4 }}
spec: spec:
@ -21,6 +22,7 @@ apiVersion: networking.istio.io/v1alpha3
kind: VirtualService kind: VirtualService
metadata: metadata:
name: prometheus name: prometheus
namespace: {{ .Release.Namespace }}
labels: labels:
{{ include "kubezero-lib.labels" . | indent 4 }} {{ include "kubezero-lib.labels" . | indent 4 }}
spec: spec:

View File

@ -59,10 +59,6 @@ kube-prometheus-stack:
prometheusOperator: prometheusOperator:
enabled: true enabled: true
#image:
# tag: v0.42.1
#prometheusConfigReloaderImage:
# tag: v0.42.1
# Run on controller nodes # Run on controller nodes
tolerations: tolerations:
@ -71,24 +67,20 @@ kube-prometheus-stack:
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/master: ""
# Argo takes care of CRDs
manageCrds: false
createCustomResource: true
# Operator has TLS support starting 0.39, but chart does not support CAConfig and operator flags yet
# see: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/webhook.md#deploying-the-admission-webhook
# Until then we disable them as the patching interferes with Argo anyways
tlsProxy:
enabled: false
admissionWebhooks:
enabled: false
namespaces: namespaces:
releaseNamespace: true releaseNamespace: true
additional: additional:
- kube-system - kube-system
- logging - logging
admissionWebhooks:
patch:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
nodeExporter: nodeExporter:
enabled: true enabled: true
serviceMonitor: serviceMonitor:
@ -141,12 +133,21 @@ kube-prometheus-stack:
testFramework: testFramework:
enabled: false enabled: false
# Assign state metrics to control plane
kube-state-metrics:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
nodeSelector:
node-role.kubernetes.io/master: ""
# Todo # Todo
alertmanager: alertmanager:
enabled: false enabled: false
# Metrics adapter # Metrics adapter
prometheus-adapter: prometheus-adapter:
enabled: true
prometheus: prometheus:
url: http://metrics-kube-prometheus-st-prometheus url: http://metrics-kube-prometheus-st-prometheus
tolerations: tolerations:

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,20 @@
apiVersion: v2
name: kubezero-redis
description: KubeZero Umbrella Chart for Redis HA
type: application
version: 0.1.0
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- redis
maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.3"
repository: https://zero-down-time.github.io/kubezero/
- name: redis
version: 12.0.0
repository: https://charts.bitnami.com/bitnami
kubeVersion: ">= 1.16.0"

View File

@ -0,0 +1,44 @@
# kubezero-redis
![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square)
KubeZero Umbrella Chart for Redis HA
**Homepage:** <https://kubezero.com>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Quarky9 | | |
## Requirements
Kubernetes: `>= 1.16.0`
| Repository | Name | Version |
|------------|------|---------|
| https://charts.bitnami.com/bitnami | redis | 12.0.0 |
| https://zero-down-time.github.io/kubezero/ | kubezero-lib | >= 0.1.3 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| istio.enabled | bool | `false` | |
| redis.cluster.slaveCount | int | `0` | |
| redis.master.persistence.enabled | bool | `false` | |
| redis.metrics.enabled | bool | `false` | |
| redis.metrics.serviceMonitor.enabled | bool | `false` | |
| redis.metrics.serviceMonitor.namespace | string | `"monitoring"` | |
| redis.metrics.serviceMonitor.selector.release | string | `"metrics"` | |
| redis.usePassword | bool | `false` | |
# Dashboards
## Redis
# Resources
- https://github.com/helm/charts/tree/master/stable/redis
- https://github.com/rustudorcalin/deploying-redis-cluster
-

View File

@ -0,0 +1,26 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
{{ template "chart.description" . }}
{{ template "chart.homepageLine" . }}
{{ template "chart.maintainersSection" . }}
{{ template "chart.sourcesSection" . }}
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}
# Dashboards
https://grafana.com/grafana/dashboards/11835
## Redis
# Resources
- https://github.com/helm/charts/tree/master/stable/redis
- https://github.com/rustudorcalin/deploying-redis-cluster
-

View File

@ -0,0 +1,26 @@
{{- if .Values.istio.enabled }}
{{- if .Values.istio.ipBlocks }}
apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy
metadata:
name: {{ .Release.Namespace }}-redis-deny-not-in-ipblocks
namespace: istio-system
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
selector:
matchLabels:
app: istio-private-ingressgateway
action: DENY
rules:
- from:
- source:
notIpBlocks:
{{- with .Values.istio.ipBlocks }}
{{- . | toYaml | nindent 8 }}
{{- end }}
to:
- operation:
ports: ["{{ default 6379 .Values.redis.redisPort }}"]
{{- end }}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if .Values.istio.enabled }}
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: redis
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
hosts:
- {{ .Values.istio.url }}
gateways:
- {{ .Values.istio.gateway }}
tcp:
- match:
- port: {{ default 6379 .Values.redis.redisPort }}
route:
- destination:
host: redis-headless
port:
number: {{ default 6379 .Values.redis.redisPort }}
{{- end }}

View File

@ -0,0 +1,27 @@
redis:
redisPort: 6379
cluster:
slaveCount: 0
usePassword: false
master:
persistence:
enabled: false
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
metrics:
enabled: false
serviceMonitor:
enabled: false
selector:
release: metrics
# extraArgs:
# redis.addr: "redis://localhost:6379"
istio:
enabled: false

View File

@ -44,4 +44,4 @@ Kubernetes: `>= 1.16.0`
| platform | string | `"aws"` | | | platform | string | `"aws"` | |
---------------------------------------------- ----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.4.0](https://github.com/norwoodj/helm-docs/releases/v1.4.0) Autogenerated from chart metadata using [helm-docs v1.2.1](https://github.com/norwoodj/helm-docs/releases/v1.2.1)

View File

@ -1,7 +1,22 @@
#!/bin/bash #!/bin/bash
set -ex set -ex
LOCATION=${1-""} ACTION=$1
ARTIFACTS=("$2")
LOCATION=${3:-""}
DEPLOY_DIR=$( dirname $( realpath $0 ))
which yq || { echo "yq not found!"; exit 1; }
TMPDIR=$(mktemp -d kubezero.XXX)
# First lets generate kubezero.yaml
# This will be stored as secret during the initial kubezero chart install
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml > $TMPDIR/kubezero.yaml
if [ ${ARTIFACTS[0]} == "all" ]; then
ARTIFACTS=($(yq r -p p $TMPDIR/kubezero.yaml "kubezero.*.enabled" | awk -F "." '{print $2}'))
fi
# Update only if we use upstream # Update only if we use upstream
if [ -z "$LOCATION" ]; then if [ -z "$LOCATION" ]; then
@ -9,20 +24,27 @@ if [ -z "$LOCATION" ]; then
helm repo update helm repo update
fi fi
DEPLOY_DIR=$( dirname $( realpath $0 ))
which yq || { echo "yq not found!"; exit 1; }
# Waits for max 300s and retries # Waits for max 300s and retries
function wait_for() { function wait_for() {
local TRIES=0 local TRIES=0
while true; do while true; do
$@ && break eval " $@" && break
[ $TRIES -eq 200 ] && return 1 [ $TRIES -eq 100 ] && return 1
let TRIES=$TRIES+1 let TRIES=$TRIES+1
sleep 3 sleep 3
done done
} }
function chart_location() {
if [ -z "$LOCATION" ]; then
echo "$1 --repo https://zero-down-time.github.io/kubezero"
else
echo "$LOCATION/$1"
fi
}
function _helm() { function _helm() {
local action=$1 local action=$1
local chart=$2 local chart=$2
@ -30,89 +52,257 @@ function _helm() {
local namespace=$4 local namespace=$4
shift 4 shift 4
local location helm template $(chart_location $chart) --namespace $namespace --name-template $release --skip-crds $@ > $TMPDIR/helm.yaml
if [ -z "$LOCATION" ]; then if [ $action == "apply" ]; then
location="$chart --repo https://zero-down-time.github.io/kubezero" # make sure namespace exists prior to calling helm as the create-namespace options doesn't work
else kubectl get ns $namespace || kubectl create ns $namespace
location="$LOCATION/$chart"
fi fi
[ -n "$namespace" ] && kubectl get ns $namespace || kubectl create ns $namespace # If resources are out of the single $namespace, apply without restrictions
helm template $location --namespace $namespace --name-template $release $@ | kubectl $action -f - nr_ns=$(grep -e '^ namespace:' $TMPDIR/helm.yaml | sed "s/\"//g" | sort | uniq | wc -l)
if [ $nr_ns -gt 1 ]; then
kubectl $action -f $TMPDIR/helm.yaml
else
kubectl $action --namespace $namespace -f $TMPDIR/helm.yaml
fi
} }
function deploy() { function deploy() {
_helm apply $@ _helm apply $@
} }
function delete() { function delete() {
_helm delete $@ _helm delete $@
} }
function is_enabled() {
local chart=$1
enabled=$(yq r $TMPDIR/kubezero.yaml kubezero.${chart}.enabled)
if [ "$enabled" == "true" ]; then
yq r $TMPDIR/kubezero.yaml kubezero.${chart}.values > $TMPDIR/values.yaml
return 0
fi
return 1
}
##########
# Calico #
##########
function calico() {
local chart="kubezero-calico"
local release="calico"
local namespace="kube-system"
local task=$1
if [ $task == "deploy" ]; then
deploy $chart $release $namespace -f $TMPDIR/values.yaml && rc=$? || rc=$?
kubectl apply -f $TMPDIR/helm.yaml
# Don't delete the only CNI
#elif [ $task == "delete" ]; then
# delete $chart $release $namespace -f $TMPDIR/values.yaml
elif [ $task == "crds" ]; then
helm template $(chart_location $chart) --namespace $namespace --name-template $release --skip-crds > $TMPDIR/helm-no-crds.yaml
helm template $(chart_location $chart) --namespace $namespace --name-template $release --include-crds > $TMPDIR/helm-crds.yaml
diff -e $TMPDIR/helm-no-crds.yaml $TMPDIR/helm-crds.yaml | head -n-1 | tail -n+2 > $TMPDIR/crds.yaml
kubectl apply -f $TMPDIR/crds.yaml
fi
}
################ ################
# cert-manager # # cert-manager #
################ ################
function cert-manager() {
local chart="kubezero-cert-manager"
local release="cert-manager"
local namespace="cert-manager"
# Let's start with minimal cert-manager to get the webhook in place local task=$1
deploy kubezero-cert-manager cert-manager cert-manager
echo "Waiting for cert-manager to be ready..." if [ $task == "deploy" ]; then
wait_for kubectl get deployment -n cert-manager cert-manager-webhook 2>/dev/null 1>&2 deploy $chart $release $namespace -f $TMPDIR/values.yaml && rc=$? || rc=$?
kubectl rollout status deployment -n cert-manager cert-manager-webhook
wait_for kubectl get validatingwebhookconfigurations -o yaml | grep "caBundle: LS0" 2>/dev/null 1>&2
# Either inject cert-manager backup or bootstrap # If any error occurs, wait for initial webhook deployment and try again
if [ -f cert-manager-backup.yaml ]; then # see: https://cert-manager.io/docs/concepts/webhook/#webhook-connection-problems-shortly-after-cert-manager-installation
kubectl apply -f cert-manager-backup.yaml if [ $rc -ne 0 ]; then
else wait_for "kubectl get deployment -n $namespace cert-manager-webhook"
deploy kubezero-cert-manager cert-manager cert-manager --set localCA.enabled=true kubectl rollout status deployment -n $namespace cert-manager-webhook
wait_for kubectl get Issuer -n kube-system kubezero-local-ca-issuer 2>/dev/null 1>&2 wait_for 'kubectl get validatingwebhookconfigurations -o yaml | grep "caBundle: LS0"'
kubectl wait --for=condition=Ready -n kube-system Issuer/kubezero-local-ca-issuer deploy $chart $release $namespace -f $TMPDIR/values.yaml
fi fi
echo "KubeZero installed successfully." wait_for "kubectl get ClusterIssuer -n $namespace kubezero-local-ca-issuer"
read kubectl wait --timeout=180s --for=condition=Ready -n $namespace ClusterIssuer/kubezero-local-ca-issuer
# Remove all kubezero elif [ $task == "delete" ]; then
delete kubezero-cert-manager cert-manager cert-manager delete $chart $release $namespace -f $TMPDIR/values.yaml
kubectl delete ns $namespace
exit 0 elif [ $task == "crds" ]; then
helm template $(chart_location $chart) --namespace $namespace --name-template $release --skip-crds --set cert-manager.installCRDs=false > $TMPDIR/helm-no-crds.yaml
# Determine if we bootstrap or update helm template $(chart_location $chart) --namespace $namespace --name-template $release --include-crds --set cert-manager.installCRDs=true > $TMPDIR/helm-crds.yaml
helm list -n argocd -f kubezero -q | grep -q kubezero && rc=$? || rc=$? diff -e $TMPDIR/helm-no-crds.yaml $TMPDIR/helm-crds.yaml | head -n-1 | tail -n+2 > $TMPDIR/crds.yaml
if [ $rc -eq 0 ]; then kubectl apply -f $TMPDIR/crds.yaml
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml > generated-values.yaml fi
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml }
else
# Make sure kube-system is allowed to kiam ########
# Kiam #
########
function kiam() {
local chart="kubezero-kiam"
local release="kiam"
local namespace="kube-system"
local task=$1
if [ $task == "deploy" ]; then
# Certs only first
deploy $chart $release $namespace --set kiam.enabled=false
kubectl wait --timeout=120s --for=condition=Ready -n kube-system Certificate/kiam-server
# Make sure kube-system and cert-manager are allowed to kiam
kubectl annotate --overwrite namespace kube-system 'iam.amazonaws.com/permitted=.*' kubectl annotate --overwrite namespace kube-system 'iam.amazonaws.com/permitted=.*'
kubectl annotate --overwrite namespace cert-manager 'iam.amazonaws.com/permitted=.*CertManagerRole.*'
# Now that we have the cert-manager webhook, get the kiam certs in place but do NOT deploy kiam yet # Get kiam rolled out and make sure it is working
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml -f $DEPLOY_DIR/values-step-3.yaml > generated-values.yaml deploy $chart $release $namespace -f $TMPDIR/values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml wait_for 'kubectl get daemonset -n kube-system kiam-agent'
kubectl wait --for=condition=Ready -n kube-system certificates/kiam-server
# Now lets make sure kiam is working
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml -f $DEPLOY_DIR/values-step-4.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
wait_for kubectl get daemonset -n kube-system kiam-agent 2>/dev/null 1>&2
kubectl rollout status daemonset -n kube-system kiam-agent kubectl rollout status daemonset -n kube-system kiam-agent
# Install Istio if enabled, but keep ArgoCD istio support disabled for now in case elif [ $task == "delete" ]; then
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml -f $DEPLOY_DIR/values-step-5.yaml > generated-values.yaml delete $chart $release $namespace -f $TMPDIR/values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
wait_for kubectl get deployment -n istio-operator istio-operator 2>/dev/null 1>&2
kubectl rollout status deployment -n istio-operator istio-operator
# Metrics
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml -f $DEPLOY_DIR/values-step-6.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
wait_for kubectl get crds servicemonitors.monitoring.coreos.com 2>/dev/null 1>&2
# Finally we could enable the actual config and deploy all
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
fi fi
}
#######
# EBS #
#######
function aws-ebs-csi-driver() {
local chart="kubezero-aws-ebs-csi-driver"
local release="aws-ebs-csi-driver"
local namespace="kube-system"
local task=$1
if [ $task == "deploy" ]; then
deploy $chart $release $namespace -f $TMPDIR/values.yaml
elif [ $task == "delete" ]; then
delete $chart $release $namespace -f $TMPDIR/values.yaml
fi
}
#########
# Istio #
#########
function istio() {
local chart="kubezero-istio"
local release="istio"
local namespace="istio-system"
local task=$1
if [ $task == "deploy" ]; then
deploy $chart $release $namespace -f $TMPDIR/values.yaml
elif [ $task == "delete" ]; then
for i in $(kubectl get istiooperators -A -o name); do
kubectl delete $i -n istio-system
done
delete $chart $release $namespace -f $TMPDIR/values.yaml
kubectl delete ns istio-system
elif [ $task == "crds" ]; then
helm template $(chart_location $chart) --namespace $namespace --name-template $release --skip-crds > $TMPDIR/helm-no-crds.yaml
helm template $(chart_location $chart) --namespace $namespace --name-template $release --include-crds > $TMPDIR/helm-crds.yaml
diff -e $TMPDIR/helm-no-crds.yaml $TMPDIR/helm-crds.yaml | head -n-1 | tail -n+2 > $TMPDIR/crds.yaml
kubectl apply -f $TMPDIR/crds.yaml
fi
}
###########
# Metrics #
###########
function metrics() {
local chart="kubezero-metrics"
local release="metrics"
local namespace="monitoring"
local task=$1
if [ $task == "deploy" ]; then
deploy $chart $release $namespace -f $TMPDIR/values.yaml
elif [ $task == "delete" ]; then
delete $chart $release $namespace -f $TMPDIR/values.yaml
kubectl delete ns monitoring
elif [ $task == "crds" ]; then
helm template $(chart_location $chart) --namespace $namespace --name-template $release --skip-crds > $TMPDIR/helm-no-crds.yaml
helm template $(chart_location $chart) --namespace $namespace --name-template $release --include-crds > $TMPDIR/helm-crds.yaml
diff -e $TMPDIR/helm-no-crds.yaml $TMPDIR/helm-crds.yaml | head -n-1 | tail -n+2 > $TMPDIR/crds.yaml
kubectl apply -f $TMPDIR/crds.yaml
fi
}
###########
# Logging #
###########
function logging() {
local chart="kubezero-logging"
local release="logging"
local namespace="logging"
local task=$1
if [ $task == "deploy" ]; then
deploy $chart $release $namespace -f $TMPDIR/values.yaml
kubectl annotate --overwrite namespace logging 'iam.amazonaws.com/permitted=.*ElasticSearchSnapshots.*'
elif [ $task == "delete" ]; then
delete $chart $release $namespace -f $TMPDIR/values.yaml
kubectl delete ns logging
# Doesnt work right now due to V2 Helm implementation of the eck-operator-crd chart
#elif [ $task == "crds" ]; then
# helm template $(chart_location $chart) --namespace $namespace --name-template $release --skip-crds > $TMPDIR/helm-no-crds.yaml
# helm template $(chart_location $chart) --namespace $namespace --name-template $release --include-crds > $TMPDIR/helm-crds.yaml
# diff -e $TMPDIR/helm-no-crds.yaml $TMPDIR/helm-crds.yaml | head -n-1 | tail -n+2 > $TMPDIR/crds.yaml
# kubectl apply -f $TMPDIR/crds.yaml
fi
}
## MAIN ##
if [ $1 == "deploy" ]; then
for t in ${ARTIFACTS[@]}; do
is_enabled $t && $t deploy
done
elif [ $1 == "crds" ]; then
for t in ${ARTIFACTS[@]}; do
is_enabled $t && $t crds
done
# Delete in reverse order, continue even if errors
elif [ $1 == "delete" ]; then
set +e
for (( idx=${#ARTIFACTS[@]}-1 ; idx>=0 ; idx-- )) ; do
is_enabled ${ARTIFACTS[idx]} && ${ARTIFACTS[idx]} delete
done
fi
[ "$DEBUG" == "" ] && rm -rf $TMPDIR

View File

@ -2,18 +2,6 @@
set -e set -e
DEPLOY_DIR=$( dirname $( realpath $0 )) DEPLOY_DIR=$( dirname $( realpath $0 ))
which yq || { echo "yq not found!"; exit 1; }
# Waits for max 300s and retries
function wait_for() {
local TRIES=0
while true; do
$@ && break
[ $TRIES -eq 200 ] && return 1
let TRIES=$TRIES+1
sleep 3
done
}
helm repo add kubezero https://zero-down-time.github.io/kubezero helm repo add kubezero https://zero-down-time.github.io/kubezero
helm repo update helm repo update
@ -24,72 +12,6 @@ if [ $rc -eq 0 ]; then
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml > generated-values.yaml helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
else else
# During bootstrap we first generate a minimal values.yaml to prevent various deadlocks echo "To bootstrap clusters please use bootstrap.sh !"
exit 1
# Generate ArgoCD password if not in values.yaml yet and add it
grep -q argocdServerAdminPassword values.yaml && rc=$? || rc=$?
if [ $rc -ne 0 ]; then
_argo_date="$(date -u --iso-8601=seconds)"
_argo_passwd="$($DEPLOY_DIR/argocd_password.py)"
cat <<EOF > _argocd_values.yaml
argo-cd:
configs:
secret:
# ArgoCD password: ${_argo_passwd%%:*} Please move to secure location !
argocdServerAdminPassword: "${_argo_passwd##*:}"
argocdServerAdminPasswordMtime: "$_argo_date"
EOF
yq merge -i --overwrite values.yaml _argocd_values.yaml && rm -f _argocd_values.yaml
fi
# Deploy initial argocd
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml -f $DEPLOY_DIR/values-step-1.yaml > generated-values.yaml
helm install -n argocd kubezero kubezero/kubezero-argo-cd --create-namespace -f generated-values.yaml
# Wait for argocd-server to be running
kubectl rollout status deployment -n argocd kubezero-argocd-server
# Now wait for cert-manager and the local CA to be bootstrapped
echo "Waiting for cert-manager to be deployed..."
wait_for kubectl get deployment -n cert-manager cert-manager-webhook 2>/dev/null 1>&2
kubectl rollout status deployment -n cert-manager cert-manager-webhook
# Either inject cert-manager backup or bootstrap
if [ -f cert-manager-backup.yaml ]; then
kubectl apply -f cert-manager-backup.yaml
else
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml -f $DEPLOY_DIR/values-step-2.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
wait_for kubectl get Issuer -n kube-system kubezero-local-ca-issuer 2>/dev/null 1>&2
kubectl wait --for=condition=Ready -n kube-system Issuer/kubezero-local-ca-issuer
fi
# Make sure kube-system is allowed to kiam
kubectl annotate --overwrite namespace kube-system 'iam.amazonaws.com/permitted=.*'
# Now that we have the cert-manager webhook, get the kiam certs in place but do NOT deploy kiam yet
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml -f $DEPLOY_DIR/values-step-3.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
kubectl wait --for=condition=Ready -n kube-system certificates/kiam-server
# Now lets make sure kiam is working
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml -f $DEPLOY_DIR/values-step-4.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
wait_for kubectl get daemonset -n kube-system kiam-agent 2>/dev/null 1>&2
kubectl rollout status daemonset -n kube-system kiam-agent
# Install Istio if enabled, but keep ArgoCD istio support disabled for now in case
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml -f $DEPLOY_DIR/values-step-5.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
wait_for kubectl get deployment -n istio-operator istio-operator 2>/dev/null 1>&2
kubectl rollout status deployment -n istio-operator istio-operator
# Metrics
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml -f $DEPLOY_DIR/values-step-6.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
wait_for kubectl get crds servicemonitors.monitoring.coreos.com 2>/dev/null 1>&2
# Finally we could enable the actual config and deploy all
helm template $DEPLOY_DIR -f values.yaml -f cloudbender.yaml > generated-values.yaml
helm upgrade -n argocd kubezero kubezero/kubezero-argo-cd -f generated-values.yaml
fi fi

View File

@ -134,6 +134,11 @@ kubezero:
logging: logging:
enabled: {{ .Values.logging.enabled }} enabled: {{ .Values.logging.enabled }}
values: values:
{{- with index .Values "logging" "eck-operator" }}
eck-operator:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.logging.elastic_password }} {{- if .Values.logging.elastic_password }}
elastic_password: {{ .Values.logging.elastic_password }} elastic_password: {{ .Values.logging.elastic_password }}
{{- end }} {{- end }}

View File

@ -1,17 +0,0 @@
kiam:
enabled: false
ready: false
cert-manager:
ready: false
istio:
enabled: false
ready: false
metrics:
enabled: false
ready: false
logging:
enabled: false

View File

@ -1,17 +0,0 @@
kiam:
enabled: false
ready: false
cert-manager:
ready: true
istio:
enabled: false
ready: false
metrics:
enabled: false
ready: false
logging:
enabled: false

View File

@ -1,17 +0,0 @@
kiam:
certsOnly: true
ready: false
cert-manager:
ready: true
istio:
enabled: false
ready: false
metrics:
enabled: false
ready: false
logging:
enabled: false

View File

@ -1,16 +0,0 @@
kiam:
ready: false
cert-manager:
ready: true
istio:
enabled: false
ready: false
metrics:
enabled: false
ready: false
logging:
enabled: false

View File

@ -1,9 +0,0 @@
istio:
ready: false
metrics:
enabled: false
ready: false
logging:
enabled: false

View File

@ -1,6 +0,0 @@
metrics:
enabled: true
ready: false
logging:
enabled: false

View File

@ -39,12 +39,15 @@ metrics:
logging: logging:
enabled: false enabled: false
eck-operator:
enabled: false
fluentd: fluentd:
enabled: false enabled: false
fluent-bit: fluent-bit:
enabled: false enabled: false
argo-cd: argo-cd:
enabled: false
server: {} server: {}
istio: istio:
enabled: true enabled: true