fix: Fix CNI upgrade, new ECK CRDs, new logging, Istio fixes
This commit is contained in:
parent
eafd8a8429
commit
a48e92285d
@ -16,7 +16,8 @@ protectKernelDefaults: {{ .Values.protectKernelDefaults }}
|
||||
# tlsCertFile: /var/lib/kubelet/pki/kubelet.crt
|
||||
# tlsPrivateKeyFile: /var/lib/kubelet/pki/kubelet.key
|
||||
tlsCipherSuites: [TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256]
|
||||
featureGates: {{ include "kubeadm.featuregates" ( dict "return" "map" ) }}
|
||||
featureGates:
|
||||
{{- include "kubeadm.featuregates" ( dict "return" "map" ) | nindent 2 }}
|
||||
# Minimal unit is 50m per pod
|
||||
podsPerCore: 20
|
||||
# cpuCFSQuotaPeriod: 10ms
|
||||
|
@ -1,16 +1,16 @@
|
||||
{{- /* Feature gates for all control plane components */ -}}
|
||||
{{- define "kubeadm.featuregates" -}}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "GenericEphemeralVolume" "InTreePluginAWSUnregister" "InTreePluginAzureDiskUnregister" "InTreePluginAzureFileUnregister" "InTreePluginGCEUnregister" "InTreePluginOpenStackUnregister" }}
|
||||
{{- define "kubeadm.featuregates" }}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "GenericEphemeralVolume" "KubeletCredentialProviders"}}
|
||||
{{- if eq .return "csv" }}
|
||||
{{- range $key := $gates }}
|
||||
{{- $key }}=true,
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- range $key := $gates }}
|
||||
{{ $key }}: true
|
||||
{{ $key }}: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- /* Etcd default initial cluster */ -}}
|
||||
|
17
charts/kubeadm/templates/credential-provider.yaml
Normal file
17
charts/kubeadm/templates/credential-provider.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: kubelet.config.k8s.io/v1alpha1
|
||||
kind: CredentialProviderConfig
|
||||
providers:
|
||||
- name: amazon-ecr-credential-helper
|
||||
matchImages:
|
||||
- "*.dkr.ecr.*.amazonaws.com"
|
||||
- "*.dkr.ecr.*.amazonaws.cn"
|
||||
- "*.dkr.ecr-fips.*.amazonaws.com"
|
||||
- "*.dkr.ecr.us-iso-east-1.c2s.ic.gov"
|
||||
- "*.dkr.ecr.us-isob-east-1.sc2s.sgov.gov"
|
||||
defaultCacheDuration: "12h"
|
||||
apiVersion: credentialprovider.kubelet.k8s.io/v1alpha1
|
||||
args:
|
||||
- get
|
||||
#env:
|
||||
# - name: AWS_PROFILE
|
||||
# value: example_profile
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-ci
|
||||
description: KubeZero umbrella chart for all things CI
|
||||
type: application
|
||||
version: 0.4.46
|
||||
version: 0.4.50
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -56,11 +56,13 @@ spec:
|
||||
- {{ .Values.jenkins.istio.agent.url }}
|
||||
gateways:
|
||||
- {{ .Values.jenkins.istio.agent.gateway }}
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: {{ template "kubezero-lib.fullname" (merge (dict "subchart" "jenkins") .) }}
|
||||
port:
|
||||
number: 50000
|
||||
tcp:
|
||||
- match:
|
||||
- port: 50000
|
||||
route:
|
||||
- destination:
|
||||
host: {{ template "kubezero-lib.fullname" (merge (dict "subchart" "jenkins") .) }}-agent
|
||||
port:
|
||||
number: 50000
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
@ -115,12 +115,12 @@ jenkins:
|
||||
|
||||
installPlugins:
|
||||
- kubernetes:3580.v78271e5631dc
|
||||
- workflow-aggregator:2.6
|
||||
- git:4.11.0
|
||||
- configuration-as-code:1414.v878271fc496f
|
||||
- workflow-aggregator:2.7
|
||||
- git:4.11.1
|
||||
- configuration-as-code:1429.v09b_044a_c93de
|
||||
- antisamy-markup-formatter:2.7
|
||||
- prometheus:2.0.11
|
||||
- htmlpublisher:1.29
|
||||
- htmlpublisher:1.30
|
||||
- build-discarder:60.v1747b0eb632a
|
||||
- dark-theme:156.v6cf16af6f9ef
|
||||
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-logging
|
||||
description: KubeZero Umbrella Chart for complete EFK stack
|
||||
type: application
|
||||
version: 0.7.21
|
||||
version: 0.8.0
|
||||
appVersion: 1.6.0
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
@ -19,7 +19,7 @@ dependencies:
|
||||
version: ">= 0.1.3"
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: eck-operator
|
||||
version: 1.6.0
|
||||
version: 2.1.0
|
||||
# repository: https://helm.elastic.co
|
||||
condition: eck-operator.enabled
|
||||
- name: fluentd
|
||||
|
@ -1,5 +1,5 @@
|
||||
apiVersion: v2
|
||||
appVersion: 1.6.0
|
||||
appVersion: 2.1.0
|
||||
description: 'A Helm chart for deploying the Elastic Cloud on Kubernetes (ECK) operator: the official Kubernetes operator for orchestrating Elasticsearch, Kibana, APM Server, Enterprise Search, and Beats.'
|
||||
home: https://github.com/elastic/cloud-on-k8s
|
||||
icon: https://helm.elastic.co/icons/eck.png
|
||||
@ -17,4 +17,4 @@ maintainers:
|
||||
name: Elastic
|
||||
name: eck-operator
|
||||
type: application
|
||||
version: 1.6.0
|
||||
version: 2.1.0
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -48,7 +48,7 @@ app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "eck-operator.selectorLabels" -}}
|
||||
{{- if .Values.internal.manifestGen }}
|
||||
{{- if .Values.global.manifestGen }}
|
||||
control-plane: elastic-operator
|
||||
{{- else }}
|
||||
app.kubernetes.io/name: {{ include "eck-operator.name" . }}
|
||||
@ -71,8 +71,8 @@ Create the name of the service account to use
|
||||
Determine effective Kubernetes version
|
||||
*/}}
|
||||
{{- define "eck-operator.effectiveKubeVersion" -}}
|
||||
{{- if .Values.internal.manifestGen -}}
|
||||
{{- semver .Values.internal.kubeVersion -}}
|
||||
{{- if .Values.global.manifestGen -}}
|
||||
{{- semver .Values.global.kubeVersion -}}
|
||||
{{- else -}}
|
||||
{{- .Capabilities.KubeVersion.Version -}}
|
||||
{{- end -}}
|
||||
@ -82,7 +82,7 @@ Determine effective Kubernetes version
|
||||
Determine the name for the webhook
|
||||
*/}}
|
||||
{{- define "eck-operator.webhookName" -}}
|
||||
{{- if .Values.internal.manifestGen -}}
|
||||
{{- if .Values.global.manifestGen -}}
|
||||
elastic-webhook.k8s.elastic.co
|
||||
{{- else -}}
|
||||
{{- $name := include "eck-operator.name" . -}}
|
||||
@ -94,7 +94,7 @@ elastic-webhook.k8s.elastic.co
|
||||
Determine the name for the webhook secret
|
||||
*/}}
|
||||
{{- define "eck-operator.webhookSecretName" -}}
|
||||
{{- if .Values.internal.manifestGen -}}
|
||||
{{- if .Values.global.manifestGen -}}
|
||||
elastic-webhook-server-cert
|
||||
{{- else -}}
|
||||
{{- $name := include "eck-operator.name" . -}}
|
||||
@ -106,7 +106,7 @@ elastic-webhook-server-cert
|
||||
Determine the name for the webhook service
|
||||
*/}}
|
||||
{{- define "eck-operator.webhookServiceName" -}}
|
||||
{{- if .Values.internal.manifestGen -}}
|
||||
{{- if .Values.global.manifestGen -}}
|
||||
elastic-webhook-server
|
||||
{{- else -}}
|
||||
{{- $name := include "eck-operator.name" . -}}
|
||||
@ -114,56 +114,10 @@ elastic-webhook-server
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Add the webhook sideEffects field on supported Kubernetes versions
|
||||
*/}}
|
||||
{{- define "eck-operator.webhookSideEffects" -}}
|
||||
{{- $kubeVersion := (include "eck-operator.effectiveKubeVersion" .) -}}
|
||||
{{- $kubeVersionSupported := semverCompare ">=1.13.0-0" $kubeVersion -}}
|
||||
{{- if $kubeVersionSupported }}
|
||||
sideEffects: "None"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Use v1 of ValidatingWebhookConfiguration on supported Kubernetes versions
|
||||
*/}}
|
||||
{{- define "eck-operator.webhookAPIVersion" -}}
|
||||
{{- $kubeVersion := (include "eck-operator.effectiveKubeVersion" .) -}}
|
||||
{{- $kubeVersionSupported := semverCompare ">=1.16.0-0" $kubeVersion -}}
|
||||
{{- if $kubeVersionSupported -}}
|
||||
admissionregistration.k8s.io/v1
|
||||
{{- else -}}
|
||||
admissionregistration.k8s.io/v1beta1
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Define admissionReviewVersions based on Kubernetes version
|
||||
*/}}
|
||||
{{- define "eck-operator.webhookAdmissionReviewVersions" -}}
|
||||
{{- $kubeVersion := (include "eck-operator.effectiveKubeVersion" .) -}}
|
||||
{{- $kubeVersionSupported := semverCompare ">=1.16.0-0" $kubeVersion -}}
|
||||
{{- if $kubeVersionSupported }}
|
||||
admissionReviewVersions: [v1beta1]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Define webhook match policy based on Kubernetes version
|
||||
*/}}
|
||||
{{- define "eck-operator.webhookMatchPolicy" -}}
|
||||
{{- $kubeVersion := (include "eck-operator.effectiveKubeVersion" .) -}}
|
||||
{{- $kubeVersionSupported := semverCompare ">=1.16.0-0" $kubeVersion -}}
|
||||
{{- if $kubeVersionSupported }}
|
||||
matchPolicy: Exact
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
RBAC permissions
|
||||
NOTE - any changes made to RBAC permissions below require
|
||||
updating docs/operating-eck/eck-permissions.asciidoc file.
|
||||
*/}}
|
||||
{{- define "eck-operator.rbacRules" -}}
|
||||
- apiGroups:
|
||||
@ -175,14 +129,20 @@ RBAC permissions
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- events
|
||||
- persistentvolumeclaims
|
||||
- secrets
|
||||
- services
|
||||
- configmaps
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
@ -223,8 +183,6 @@ RBAC permissions
|
||||
- elasticsearches
|
||||
- elasticsearches/status
|
||||
- elasticsearches/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
- enterpriselicenses
|
||||
- enterpriselicenses/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
@ -232,7 +190,6 @@ RBAC permissions
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- kibana.k8s.elastic.co
|
||||
resources:
|
||||
@ -246,7 +203,6 @@ RBAC permissions
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apm.k8s.elastic.co
|
||||
resources:
|
||||
@ -260,7 +216,6 @@ RBAC permissions
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- enterprisesearch.k8s.elastic.co
|
||||
resources:
|
||||
@ -274,7 +229,6 @@ RBAC permissions
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- beat.k8s.elastic.co
|
||||
resources:
|
||||
@ -288,7 +242,6 @@ RBAC permissions
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- agent.k8s.elastic.co
|
||||
resources:
|
||||
@ -302,7 +255,6 @@ RBAC permissions
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- maps.k8s.elastic.co
|
||||
resources:
|
||||
@ -316,7 +268,6 @@ RBAC permissions
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
@ -334,7 +285,6 @@ RBAC permissions on non-namespaced resources
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- mutatingwebhookconfigurations
|
||||
- validatingwebhookconfigurations
|
||||
verbs:
|
||||
- get
|
||||
@ -345,3 +295,17 @@ RBAC permissions on non-namespaced resources
|
||||
- patch
|
||||
- delete
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
RBAC permissions to read node labels
|
||||
*/}}
|
||||
{{- define "eck-operator.readNodeLabelsRbacRule" -}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
{{- end -}}
|
||||
|
@ -9,6 +9,9 @@ metadata:
|
||||
rules:
|
||||
{{ template "eck-operator.rbacRules" . | toYaml | indent 2 }}
|
||||
{{ template "eck-operator.clusterWideRbacRules" . | toYaml | indent 2 }}
|
||||
{{ if .Values.config.exposedNodeLabels }}
|
||||
{{ template "eck-operator.readNodeLabelsRbacRule" . | toYaml | indent 2 }}
|
||||
{{ end -}}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
|
@ -16,10 +16,14 @@ data:
|
||||
ca-cert-rotate-before: {{ .Values.config.caRotateBefore }}
|
||||
cert-validity: {{ .Values.config.certificatesValidity }}
|
||||
cert-rotate-before: {{ .Values.config.certificatesRotateBefore }}
|
||||
{{- if .Values.config.exposedNodeLabels }}
|
||||
exposed-node-labels: [{{ join "," .Values.config.exposedNodeLabels }}]
|
||||
{{- end }}
|
||||
set-default-security-context: {{ .Values.config.setDefaultSecurityContext }}
|
||||
kube-client-timeout: {{ .Values.config.kubeClientTimeout }}
|
||||
elasticsearch-client-timeout: {{ .Values.config.elasticsearchClientTimeout }}
|
||||
disable-telemetry: {{ .Values.telemetry.disabled }}
|
||||
distribution-channel: {{ .Values.telemetry.distributionChannel }}
|
||||
{{- if .Values.telemetry.interval }}
|
||||
telemetry-interval: {{ .Values.telemetry.interval }}
|
||||
{{- end }}
|
||||
|
@ -1,4 +1,4 @@
|
||||
{{- if (and .Values.internal.manifestGen .Values.internal.createOperatorNamespace) -}}
|
||||
{{- if (and .Values.global.manifestGen .Values.global.createOperatorNamespace) -}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
|
@ -46,7 +46,6 @@ spec:
|
||||
args:
|
||||
- "manager"
|
||||
- "--config=/conf/eck.yaml"
|
||||
- "--distribution-channel={{ .Values.telemetry.distributionChannel }}"
|
||||
{{- with .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
|
@ -1,6 +1,6 @@
|
||||
{{- if .Values.webhook.enabled -}}
|
||||
---
|
||||
apiVersion: {{ include "eck-operator.webhookAPIVersion" $ }}
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: {{ include "eck-operator.webhookName" . }}
|
||||
@ -27,9 +27,9 @@ webhooks:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: elastic-agent-validation-v1alpha1.k8s.elastic.co
|
||||
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- agent.k8s.elastic.co
|
||||
@ -56,9 +56,9 @@ webhooks:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: elastic-apm-validation-v1.k8s.elastic.co
|
||||
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apm.k8s.elastic.co
|
||||
@ -85,9 +85,9 @@ webhooks:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: elastic-apm-validation-v1beta1.k8s.elastic.co
|
||||
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apm.k8s.elastic.co
|
||||
@ -114,9 +114,9 @@ webhooks:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: elastic-beat-validation-v1beta1.k8s.elastic.co
|
||||
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- beat.k8s.elastic.co
|
||||
@ -143,9 +143,9 @@ webhooks:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: elastic-ent-validation-v1.k8s.elastic.co
|
||||
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- enterprisesearch.k8s.elastic.co
|
||||
@ -172,9 +172,9 @@ webhooks:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: elastic-ent-validation-v1beta1.k8s.elastic.co
|
||||
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- enterprisesearch.k8s.elastic.co
|
||||
@ -201,9 +201,9 @@ webhooks:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: elastic-es-validation-v1.k8s.elastic.co
|
||||
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- elasticsearch.k8s.elastic.co
|
||||
@ -230,9 +230,9 @@ webhooks:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: elastic-es-validation-v1beta1.k8s.elastic.co
|
||||
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- elasticsearch.k8s.elastic.co
|
||||
@ -259,9 +259,9 @@ webhooks:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: elastic-kb-validation-v1.k8s.elastic.co
|
||||
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kibana.k8s.elastic.co
|
||||
@ -288,9 +288,9 @@ webhooks:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: elastic-kb-validation-v1beta1.k8s.elastic.co
|
||||
{{- include "eck-operator.webhookMatchPolicy" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookAdmissionReviewVersions" $ | indent 2 }}
|
||||
{{- include "eck-operator.webhookSideEffects" $ | indent 2 }}
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kibana.k8s.elastic.co
|
||||
|
@ -32,7 +32,7 @@ imagePullSecrets: []
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 512Mi
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
@ -122,7 +122,7 @@ kubeAPIServerIP: null
|
||||
telemetry:
|
||||
# disabled determines whether the operator periodically updates ECK telemetry data for Kibana to consume.
|
||||
disabled: false
|
||||
# distibutionChannel denotes which distribution channel was used to install the operator.
|
||||
# distributionChannel denotes which distribution channel was used to install the operator.
|
||||
distributionChannel: "helm"
|
||||
|
||||
# config values for the operator.
|
||||
@ -155,8 +155,16 @@ config:
|
||||
# certificatesRotateBefore defines when to rotate a certificate that is due to expire.
|
||||
certificatesRotateBefore: 24h
|
||||
|
||||
# exposedNodeLabels is an array of regular expressions of node labels which are allowed to be copied as annotations on Elasticsearch Pods.
|
||||
exposedNodeLabels: [ "topology.kubernetes.io/.*", "failure-domain.beta.kubernetes.io/.*" ]
|
||||
|
||||
# setDefaultSecurityContext determines whether a default security context is set on application containers created by the operator.
|
||||
setDefaultSecurityContext: true
|
||||
# *note* that the default option now is "auto-detect" to attempt to set this properly automatically when both running
|
||||
# in an openshift cluster, and a standard kubernetes cluster. Valid values are as follows:
|
||||
# "auto-detect" : auto detect
|
||||
# "true" : set pod security context when creating resources.
|
||||
# "false" : do not set pod security context when creating resources.
|
||||
setDefaultSecurityContext: "auto-detect"
|
||||
|
||||
# kubeClientTimeout sets the request timeout for Kubernetes API calls made by the operator.
|
||||
kubeClientTimeout: 60s
|
||||
@ -183,7 +191,7 @@ podMonitor:
|
||||
annotations: {}
|
||||
|
||||
# namespace determines in which namespace the podMonitor will be deployed.
|
||||
# If not set the podMonitor will be created in the namespace to release is installed into
|
||||
# If not set the podMonitor will be created in the namespace where the Helm release is installed into
|
||||
# namespace: monitoring
|
||||
|
||||
# interval specifies the interval at which metrics should be scraped
|
||||
@ -199,8 +207,8 @@ podMonitor:
|
||||
podMetricsEndpointConfig: {}
|
||||
# honorTimestamps: true
|
||||
|
||||
# Internal use only
|
||||
internal:
|
||||
# Globals meant for internal use only
|
||||
global:
|
||||
# manifestGen specifies whether the chart is running under manifest generator.
|
||||
# This is used for tasks specific to generating the all-in-one.yaml file.
|
||||
manifestGen: false
|
||||
@ -208,4 +216,5 @@ internal:
|
||||
# Usually we do want that to happen (e.g. all-in-one.yaml) but, sometimes we don't (e.g. E2E tests).
|
||||
createOperatorNamespace: true
|
||||
# kubeVersion is the effective Kubernetes version we target when generating the all-in-one.yaml.
|
||||
kubeVersion: 1.12.0
|
||||
kubeVersion: 1.16.0
|
||||
|
||||
|
@ -244,7 +244,7 @@ fluent-bit:
|
||||
|
||||
image:
|
||||
#repository: public.ecr.aws/zero-downtime/fluent-bit
|
||||
tag: 1.9.2
|
||||
tag: 1.9.3
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
|
@ -18,7 +18,7 @@ Kubernetes: `>= 1.20.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| | aws-ebs-csi-driver | 2.6.5 |
|
||||
| | aws-ebs-csi-driver | 2.6.6 |
|
||||
| | aws-efs-csi-driver | 2.2.3 |
|
||||
| | gemini | 1.0.0 |
|
||||
| | lvm-localpv | 0.9.0 |
|
||||
|
@ -18,7 +18,7 @@
|
||||
"subdir": "contrib/mixin"
|
||||
}
|
||||
},
|
||||
"version": "c3c908e39a1343abbd987a8ea3cf98a7a2703385",
|
||||
"version": "08407ff7600eb16c4445d5f21c4fafaf19412e24",
|
||||
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
|
||||
},
|
||||
{
|
||||
@ -38,7 +38,7 @@
|
||||
"subdir": "grafana-builder"
|
||||
}
|
||||
},
|
||||
"version": "c26cc0815dfd32e84d1caba3d9bbb75f2b119937",
|
||||
"version": "37f8f9d015efac5f83c3f490a52de1d686c2cdc9",
|
||||
"sum": "0KkygBQd/AFzUvVzezE4qF/uDYgrwUXVpZfINBti0oc="
|
||||
},
|
||||
{
|
||||
|
@ -30,3 +30,5 @@ patch -i efs.patch -p0 --no-backup-if-mismatch
|
||||
# Metrics
|
||||
cd jsonnet
|
||||
make render
|
||||
|
||||
helm-docs
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero
|
||||
description: KubeZero - Root App of Apps chart
|
||||
type: application
|
||||
version: 1.22.8-3
|
||||
version: 1.22.8-4
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
|
@ -35,7 +35,9 @@ spec:
|
||||
syncPolicy:
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
{{- toYaml .Values.kubezero.syncPolicy | nindent 4 }}
|
||||
{{- with .Values.kubezero.syncPolicy }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
{{- include (print $name "-argo") $ }}
|
||||
{{- end }}
|
||||
|
@ -71,12 +71,14 @@ gateway:
|
||||
|
||||
telemetry:
|
||||
enabled: {{ $.Values.metrics.enabled }}
|
||||
{{- with index .Values "istio-ingress" "certificates" }}
|
||||
certificates:
|
||||
{{- range $cert := index .Values "istio-ingress" "certificates" }}
|
||||
{{- range $cert := . }}
|
||||
- name: {{ $cert.name }}
|
||||
dnsNames:
|
||||
{{- toYaml $cert.dnsNames | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
proxyProtocol: {{ default true (index .Values "istio-ingress" "proxyProtocol") }}
|
||||
|
||||
{{- end }}
|
||||
|
@ -38,12 +38,16 @@ istio-ingress:
|
||||
chart: kubezero-istio-gateway
|
||||
namespace: istio-ingress
|
||||
targetRevision: 0.8.0
|
||||
gateway:
|
||||
service: {}
|
||||
|
||||
istio-private-ingress:
|
||||
enabled: false
|
||||
chart: kubezero-istio-gateway
|
||||
namespace: istio-ingress
|
||||
targetRevision: 0.8.0
|
||||
gateway:
|
||||
service: {}
|
||||
|
||||
metrics:
|
||||
enabled: false
|
||||
|
@ -52,8 +52,12 @@ Wait each time for controller to join and all pods running.
|
||||
Might take a while ...
|
||||
|
||||
4. Migrate ArgoCD config for the cluster
|
||||
`./migrate_argo.sh <cluster/env/kubezero/application.yaml>`
|
||||
Adjust as needed, eg. ensure eck-operator is enabled if needed.
|
||||
```cat <cluster/env/kubezero/application.yaml> | ./release/v1.22/migrate_agro.py```
|
||||
Adjust as needed...
|
||||
|
||||
If ECK operator is running in your cluster make sure to replace the CRDs *BEFORE* committing the new kubezero config !
|
||||
```kubectl replace -f https://download.elastic.co/downloads/eck/2.1.0/crds.yaml```
|
||||
|
||||
git add / commit / push
|
||||
Watch ArgoCD do its work.
|
||||
|
||||
@ -63,4 +67,3 @@ once all new workers joined, drain old workers one by one,
|
||||
finally reset `desired` for each worker ASG which will terminate the old workers.
|
||||
|
||||
## Known issues
|
||||
|
||||
|
@ -160,9 +160,9 @@ if [ "$1" == 'upgrade' ]; then
|
||||
|
||||
######################
|
||||
|
||||
# Could be removed with 1.23 as we now have persistent etcd
|
||||
# Execute cluster backup to allow new controllers to join
|
||||
kubectl create job backup-cluster-now --from=cronjob/kubezero-backup -n kube-system
|
||||
|
||||
# That might take a while as the backup pod needs the CNIs to come online etc.
|
||||
retry 10 30 40 kubectl wait --for=condition=complete job/backup-cluster-now -n kube-system && kubectl delete job backup-cluster-now -n kube-system
|
||||
|
||||
@ -192,28 +192,31 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
|
||||
rm -rf ${HOSTFS}/var/lib/etcd/member
|
||||
|
||||
else
|
||||
retry 10 60 30 restic restore latest --no-lock -t / --tag $VERSION
|
||||
# Todo: 1.23
|
||||
# Workaround for 1.22 as the final backup is still tagged with the previous verion from the cronjob
|
||||
#retry 10 60 30 restic restore latest --no-lock -t / --tag $VERSION
|
||||
retry 10 60 30 restic restore latest --no-lock -t /
|
||||
|
||||
# Make last etcd snapshot available
|
||||
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
|
||||
# Make last etcd snapshot available
|
||||
cp ${WORKDIR}/etcd_snapshot ${HOSTFS}/etc/kubernetes
|
||||
|
||||
# Put PKI in place
|
||||
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
|
||||
# Put PKI in place
|
||||
cp -r ${WORKDIR}/pki ${HOSTFS}/etc/kubernetes
|
||||
|
||||
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
|
||||
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
|
||||
# Always use kubeadm kubectl config to never run into chicken egg with custom auth hooks
|
||||
cp ${WORKDIR}/admin.conf ${HOSTFS}/root/.kube/config
|
||||
|
||||
# etcd needs to resync during join
|
||||
if [[ "$1" =~ "^(restore)$" ]]; then
|
||||
# Only restore etcd data set if none exists already
|
||||
if [ ! -d ${HOSTFS}/var/lib/etcd/member ]; then
|
||||
etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \
|
||||
--name $ETCD_NODENAME \
|
||||
--data-dir="${HOSTFS}/var/lib/etcd" \
|
||||
--initial-cluster-token etcd-${CLUSTERNAME} \
|
||||
--initial-advertise-peer-urls https://${ETCD_NODENAME}:2380 \
|
||||
--initial-cluster $ETCD_NODENAME=https://${ETCD_NODENAME}:2380
|
||||
fi
|
||||
if [[ "$1" =~ "^(restore)$" ]]; then
|
||||
# Only restore etcd data set if none exists already
|
||||
if [ ! -d ${HOSTFS}/var/lib/etcd/member ]; then
|
||||
etcdctl snapshot restore ${HOSTFS}/etc/kubernetes/etcd_snapshot \
|
||||
--name $ETCD_NODENAME \
|
||||
--data-dir="${HOSTFS}/var/lib/etcd" \
|
||||
--initial-cluster-token etcd-${CLUSTERNAME} \
|
||||
--initial-advertise-peer-urls https://${ETCD_NODENAME}:2380 \
|
||||
--initial-cluster $ETCD_NODENAME=https://${ETCD_NODENAME}:2380
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -247,8 +250,8 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
|
||||
# Failsafe / etcd on ephmeral: we were a member but our dataset is missing
|
||||
# -> remove former self so we can re-join
|
||||
if [ -n "$MY_ID" -a ! -d ${HOSTFS}/var/lib/etcd/member ]; then
|
||||
# Remove former self first
|
||||
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
|
||||
# Remove former self first
|
||||
[ -n "$MY_ID" ] && retry 12 5 5 etcdctl member remove $MY_ID --endpoints=$etcd_endpoints
|
||||
MY_ID=""
|
||||
fi
|
||||
|
||||
@ -315,7 +318,7 @@ elif [[ "$1" =~ "^(bootstrap|restore|join)$" ]]; then
|
||||
fi
|
||||
|
||||
# install / update network and addons
|
||||
if [[ "$1" =~ "^(bootstrap|join|restore)$" ]]; then
|
||||
if [[ "$1" =~ "^(bootstrap)$" ]]; then
|
||||
# network
|
||||
yq eval '.network // ""' ${HOSTFS}/etc/kubernetes/kubezero.yaml > _values.yaml
|
||||
helm template $CHARTS/kubezero-network --namespace kube-system --include-crds --name-template network \
|
||||
|
96
releases/v1.22/migrate_argo.py
Executable file
96
releases/v1.22/migrate_argo.py
Executable file
@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
import argparse
|
||||
import io
|
||||
import ruamel.yaml
|
||||
|
||||
|
||||
yaml = ruamel.yaml.YAML()
|
||||
yaml.preserve_quotes = True
|
||||
yaml.explicit_start = True
|
||||
yaml.indent(mapping=2, sequence=4, offset=2)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="Update Route53 entries")
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
dest="version",
|
||||
default="1.22.8-4",
|
||||
action="store",
|
||||
required=False,
|
||||
help="Update KubeZero version",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
application = yaml.load(sys.stdin)
|
||||
|
||||
# Set version from cmd line
|
||||
if args.version:
|
||||
application["spec"]["source"]["targetRevision"] = args.version
|
||||
|
||||
# Extract Helm values
|
||||
values = yaml.load(application["spec"]["source"]["helm"]["values"])
|
||||
|
||||
### Do your thing
|
||||
|
||||
# New Istio Gateway charts
|
||||
if "private" in values["istio-ingress"]:
|
||||
values["istio-private-ingress"] = {
|
||||
"enabled": True,
|
||||
"certificates": values["istio-ingress"]["private"]["certificates"].copy()
|
||||
}
|
||||
|
||||
if "gateway" in values["istio-ingress"]["private"]:
|
||||
values["istio-private-ingress"]["gateway"] = {}
|
||||
|
||||
try:
|
||||
values["istio-private-ingress"]["gateway"]["replicaCount"] = values["istio-ingress"]["private"]["gateway"]["replicaCount"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if "ports" in values["istio-ingress"]["private"]["gateway"]:
|
||||
values["istio-private-ingress"]["gateway"]["service"] = {}
|
||||
values["istio-private-ingress"]["gateway"]["service"]["ports"] = []
|
||||
for port in values["istio-ingress"]["private"]["gateway"]["ports"]:
|
||||
if port["name"] not in ["status-port", "http2", "https"]:
|
||||
values["istio-private-ingress"]["gateway"]["service"]["ports"].append(port)
|
||||
|
||||
values["istio-ingress"].pop("private")
|
||||
|
||||
if "public" in values["istio-ingress"]:
|
||||
values["istio-ingress"]["certificates"] = values["istio-ingress"]["public"]["certificates"].copy()
|
||||
|
||||
if "gateway" in values["istio-ingress"]["public"]:
|
||||
values["istio-ingress"]["gateway"] = {}
|
||||
|
||||
try:
|
||||
values["istio-ingress"]["gateway"]["replicaCount"] = values["istio-ingress"]["public"]["gateway"]["replicaCount"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if "ports" in values["istio-ingress"]["public"]["gateway"]:
|
||||
values["istio-ingress"]["gateway"]["service"] = {}
|
||||
values["istio-ingress"]["gateway"]["service"]["ports"] = []
|
||||
for port in values["istio-ingress"]["public"]["gateway"]["ports"]:
|
||||
if port["name"] not in ["status-port", "http2", "https"]:
|
||||
values["istio-ingress"]["gateway"]["service"]["ports"].append(port)
|
||||
|
||||
values["istio-ingress"].pop("public")
|
||||
|
||||
if "global" in values["istio-ingress"]:
|
||||
values["istio-ingress"].pop("global")
|
||||
|
||||
# Remove Kiam
|
||||
if "kiam" in values:
|
||||
values.pop("kiam")
|
||||
|
||||
### End
|
||||
|
||||
# Merge new values
|
||||
buffer = io.StringIO()
|
||||
yaml.dump(values, buffer)
|
||||
application["spec"]["source"]["helm"]["values"] = buffer.getvalue()
|
||||
|
||||
# Dump final yaml
|
||||
yaml.dump(application, sys.stdout)
|
@ -1,17 +0,0 @@
|
||||
#!/bin/bash -x
|
||||
|
||||
YAML=$1
|
||||
|
||||
# Convert keys
|
||||
yq eval -i '
|
||||
.spec.source.targetRevision="1.22.8-2"
|
||||
' $YAML
|
||||
|
||||
# Extract values
|
||||
yq eval '.spec.source.helm.values' $1 > _values.yaml
|
||||
|
||||
|
||||
# merge _values.yaml back
|
||||
yq eval -Pi '.spec.source.helm.values |= strload("_values.yaml")' $YAML
|
||||
|
||||
rm -f _values.yaml
|
@ -2,11 +2,52 @@
|
||||
|
||||
VERSION="v1.22.8"
|
||||
|
||||
[ -n "$DEBUG" ] && DEBUG=1
|
||||
[ -n "$DEBUG" ] && set -x
|
||||
|
||||
# unset any AWS_DEFAULT_PROFILE as it will break aws-iam-auth
|
||||
unset AWS_DEFAULT_PROFILE
|
||||
|
||||
echo "Deploying node upgrade daemonSet..."
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kubezero-upgrade-${VERSION//.}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: kubezero-upgrade
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: kubezero-upgrade-${VERSION//.}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: kubezero-upgrade-${VERSION//.}
|
||||
spec:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: kubezero-upgrade-${VERSION//.}
|
||||
image: busybox
|
||||
command: ["/bin/sh"]
|
||||
args: ["-c", "[ -d /host/opt/cni/bin ] && mkdir -p /host/usr/libexec/cni && cp /host/opt/cni/bin/* /host/usr/libexec/cni ; sleep 300" ]
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
volumes:
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
EOF
|
||||
|
||||
kubectl rollout status daemonset -n kube-system kubezero-upgrade-${VERSION//.} --timeout 300s
|
||||
kubectl delete ds kubezero-upgrade-${VERSION//.} -n kube-system
|
||||
|
||||
|
||||
echo "Deploying cluster upgrade job ..."
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
|
@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -o pipefail
|
||||
|
||||
# Meant for testing only !!!
|
||||
|
||||
# This sets the Kubernetes Version in SSM
|
||||
# Make sure your AWS Profile and Region points to the right direction ...
|
||||
|
||||
CONGLOMERATE=$1
|
||||
VERSION=$2
|
||||
|
||||
P="/cloudbender/${CONGLOMERATE}/kubecontrol/meta/clusterversion"
|
||||
|
||||
export AWS_DEFAULT_OUTPUT=text
|
||||
|
||||
# First verify we point to an existing clusterVersion
|
||||
OLD=$(aws ssm get-parameter --name $P --with-decryption --query 'Parameter.Value' | base64 -d) || \
|
||||
{ echo "Cannot find an existing SSM parameter. Make sure your AWS profile and parameters are correct."; exit 1; }
|
||||
|
||||
echo "Current version: $OLD"
|
||||
aws ssm put-parameter --name $P --type SecureString --value "$(echo "$VERSION" | base64 -w0)" --overwrite
|
||||
echo "New version: $VERSION"
|
Loading…
Reference in New Issue
Block a user