From 8c1f45cae130f6b33a1f6d69438260888a3a85e5 Mon Sep 17 00:00:00 2001 From: Stefan Reimer Date: Thu, 13 Aug 2020 19:44:50 +0100 Subject: [PATCH] Various logging fixes to get a first version of ES and Kibana running --- charts/kubezero-logging/Chart.yaml | 4 ++-- charts/kubezero-logging/eck/all-in-one.yaml | 12 +++++----- charts/kubezero-logging/eck/update.sh | 2 +- .../templates/eck-operator.yaml | 12 +++++----- .../templates/elasticsearch.yaml | 13 +++++++--- .../templates/es-servicemonitor.yaml | 20 ---------------- .../templates/{es-users.yaml => secrets.yaml} | 6 +++-- .../templates/servicemonitor.yaml | 24 +++++++++++++++++++ charts/kubezero-logging/values-test.yaml | 8 +++++-- charts/kubezero-logging/values.yaml | 8 +++++-- 10 files changed, 65 insertions(+), 44 deletions(-) delete mode 100644 charts/kubezero-logging/templates/es-servicemonitor.yaml rename charts/kubezero-logging/templates/{es-users.yaml => secrets.yaml} (55%) create mode 100644 charts/kubezero-logging/templates/servicemonitor.yaml diff --git a/charts/kubezero-logging/Chart.yaml b/charts/kubezero-logging/Chart.yaml index 22e5aafb..87fc2f01 100644 --- a/charts/kubezero-logging/Chart.yaml +++ b/charts/kubezero-logging/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: kubezero-logging description: KubeZero Umbrella Chart for complete EFK stack type: application -version: 0.0.1 -appVersion: 1.2.0 +version: 0.0.2 +appVersion: 1.2.1 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: diff --git a/charts/kubezero-logging/eck/all-in-one.yaml b/charts/kubezero-logging/eck/all-in-one.yaml index 40cada8f..e2a15744 100644 --- a/charts/kubezero-logging/eck/all-in-one.yaml +++ b/charts/kubezero-logging/eck/all-in-one.yaml @@ -1028,8 +1028,8 @@ spec: description: Image is the Elasticsearch Docker image to deploy. type: string nodeSets: - description: 'NodeSets allow specifying groups of Elasticsearch nodes - sharing the same configuration and Pod templates. See: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-orchestration.html' + description: NodeSets allow specifying groups of Elasticsearch nodes + sharing the same configuration and Pod templates. items: description: NodeSet is the specification for a group of Elasticsearch nodes sharing the same configuration and a Pod template. @@ -1054,12 +1054,12 @@ spec: the Pods belonging to this NodeSet. type: object volumeClaimTemplates: - description: 'VolumeClaimTemplates is a list of persistent volume + description: VolumeClaimTemplates is a list of persistent volume claims to be used by each Pod in this NodeSet. Every claim in this list must have a matching volumeMount in one of the containers defined in the PodTemplate. Items defined here take precedence over any default claims added by the operator with the same - name. See: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html' + name. items: description: PersistentVolumeClaim is a user's request for and claim to a persistent volume @@ -2829,7 +2829,7 @@ spec: terminationGracePeriodSeconds: 10 serviceAccountName: elastic-operator containers: - - image: "docker.elastic.co/eck/eck-operator:1.2.0" + - image: "docker.elastic.co/eck/eck-operator:1.2.1" imagePullPolicy: IfNotPresent name: manager args: @@ -2849,7 +2849,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: OPERATOR_IMAGE - value: "docker.elastic.co/eck/eck-operator:1.2.0" + value: "docker.elastic.co/eck/eck-operator:1.2.1" - name: WEBHOOK_SECRET value: "elastic-webhook-server-cert" resources: diff --git a/charts/kubezero-logging/eck/update.sh b/charts/kubezero-logging/eck/update.sh index e67214a4..7083513a 100755 --- a/charts/kubezero-logging/eck/update.sh +++ b/charts/kubezero-logging/eck/update.sh @@ -1,6 +1,6 @@ #!/bin/bash -ECK_VERSION=1.2.0 +ECK_VERSION=1.2.1 curl -o all-in-one.yaml https://download.elastic.co/downloads/eck/${ECK_VERSION}/all-in-one.yaml diff --git a/charts/kubezero-logging/templates/eck-operator.yaml b/charts/kubezero-logging/templates/eck-operator.yaml index 82cc1375..54bb221b 100644 --- a/charts/kubezero-logging/templates/eck-operator.yaml +++ b/charts/kubezero-logging/templates/eck-operator.yaml @@ -1031,8 +1031,8 @@ spec: description: Image is the Elasticsearch Docker image to deploy. type: string nodeSets: - description: 'NodeSets allow specifying groups of Elasticsearch nodes - sharing the same configuration and Pod templates. See: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-orchestration.html' + description: NodeSets allow specifying groups of Elasticsearch nodes + sharing the same configuration and Pod templates. items: description: NodeSet is the specification for a group of Elasticsearch nodes sharing the same configuration and a Pod template. @@ -1057,12 +1057,12 @@ spec: the Pods belonging to this NodeSet. type: object volumeClaimTemplates: - description: 'VolumeClaimTemplates is a list of persistent volume + description: VolumeClaimTemplates is a list of persistent volume claims to be used by each Pod in this NodeSet. Every claim in this list must have a matching volumeMount in one of the containers defined in the PodTemplate. Items defined here take precedence over any default claims added by the operator with the same - name. See: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html' + name. items: description: PersistentVolumeClaim is a user's request for and claim to a persistent volume @@ -3021,10 +3021,10 @@ spec: fieldRef: fieldPath: metadata.namespace - name: OPERATOR_IMAGE - value: docker.elastic.co/eck/eck-operator:1.2.0 + value: docker.elastic.co/eck/eck-operator:1.2.1 - name: WEBHOOK_SECRET value: elastic-webhook-server-cert - image: docker.elastic.co/eck/eck-operator:1.2.0 + image: docker.elastic.co/eck/eck-operator:1.2.1 imagePullPolicy: IfNotPresent name: manager ports: diff --git a/charts/kubezero-logging/templates/elasticsearch.yaml b/charts/kubezero-logging/templates/elasticsearch.yaml index 25ad9940..94b8f1f0 100644 --- a/charts/kubezero-logging/templates/elasticsearch.yaml +++ b/charts/kubezero-logging/templates/elasticsearch.yaml @@ -25,16 +25,23 @@ spec: {{- if $.Values.es.s3_snapshot_iamrole }} metadata: annotations: - iam.amazonaws.com/role: {{ $.Values.es.s3_snapshot_iamrole }} + iam.amazonaws.com/role: {{ $.Values.es.s3Snapshot.iamrole }} {{- end }} spec: + {{- if or $.Values.es.prometheus $.Values.es.s3Snapshot.enabled }} initContainers: - name: install-plugins command: - sh - -c - | - bin/elasticsearch-plugin install --batch repository-s3 && bin/elasticsearch-plugin install --batch https://github.com/vvanholl/elasticsearch-prometheus-exporter/releases/download/{{ $.Values.version }}.0/prometheus-exporter-{{ $.Values.version }}.0.zip + {{- if $.Values.es.s3Snapshot.enabled }} + bin/elasticsearch-plugin install --batch repository-s3 + {{- end }} + {{- if $.Values.es.prometheus }} + bin/elasticsearch-plugin install --batch https://github.com/vvanholl/elasticsearch-prometheus-exporter/releases/download/{{ $.Values.version }}.0/prometheus-exporter-{{ $.Values.version }}.0.zip + {{- end }} + {{- end }} containers: - name: elasticsearch resources: @@ -52,7 +59,7 @@ spec: - podAffinityTerm: labelSelector: matchLabels: - elasticsearch.k8s.elastic.co/cluster-name: {{ $.Values.name }} + elasticsearch.k8s.elastic.co/cluster-name: {{ template "kubezero-lib.fullname" $ }} topologyKey: kubernetes.io/hostname {{- if .zone }} nodeAffinity: diff --git a/charts/kubezero-logging/templates/es-servicemonitor.yaml b/charts/kubezero-logging/templates/es-servicemonitor.yaml deleted file mode 100644 index 3a574b21..00000000 --- a/charts/kubezero-logging/templates/es-servicemonitor.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: es-{{ .Values.name }} - namespace: {{ .Release.Namespace }} -spec: - endpoints: - - basicAuth: - username: - name: {{ .Values.name }}-es-elastic-user - key: username - password: - name: {{ .Values.name }}-es-elastic-user - key: elastic - port: http - path: /_prometheus/metrics - selector: - matchLabels: - common.k8s.elastic.co/type: elasticsearch - elasticsearch.k8s.elastic.co/cluster-name: {{ .Values.name }} diff --git a/charts/kubezero-logging/templates/es-users.yaml b/charts/kubezero-logging/templates/secrets.yaml similarity index 55% rename from charts/kubezero-logging/templates/es-users.yaml rename to charts/kubezero-logging/templates/secrets.yaml index afbf1883..eb7d3951 100644 --- a/charts/kubezero-logging/templates/es-users.yaml +++ b/charts/kubezero-logging/templates/secrets.yaml @@ -4,9 +4,11 @@ type: Opaque metadata: labels: common.k8s.elastic.co/type: elasticsearch - elasticsearch.k8s.elastic.co/cluster-name: {{ .Values.name }} - name: {{ .Values.name }}-es-elastic-user + elasticsearch.k8s.elastic.co/cluster-name: {{ template "kubezero-lib.fullname" $ }} + name: {{ template "kubezero-lib.fullname" $ }}-elastic-user namespace: {{ .Release.Namespace }} + labels: +{{ include "kubezero-lib.labels" . | indent 4 }} data: username: {{ "elastic" | b64enc | quote }} elastic: {{ .Values.es.elastic_password | b64enc | quote }} diff --git a/charts/kubezero-logging/templates/servicemonitor.yaml b/charts/kubezero-logging/templates/servicemonitor.yaml new file mode 100644 index 00000000..208b31cd --- /dev/null +++ b/charts/kubezero-logging/templates/servicemonitor.yaml @@ -0,0 +1,24 @@ +{{- if .Values.es.prometheus }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kubezero-lib.fullname" . }}-es + namespace: {{ .Release.Namespace }} + labels: +{{ include "kubezero-lib.labels" . | indent 4 }} +spec: + endpoints: + - basicAuth: + username: + name: {{ template "kubezero-lib.fullname" $ }}-es-elastic-user + key: username + password: + name: {{ template "kubezero-lib.fullname" $ }}-es-elastic-user + key: elastic + port: http + path: /_prometheus/metrics + selector: + matchLabels: + common.k8s.elastic.co/type: elasticsearch + elasticsearch.k8s.elastic.co/cluster-name: {{ template "kubezero-lib.fullname" $ }} +{{- end }} diff --git a/charts/kubezero-logging/values-test.yaml b/charts/kubezero-logging/values-test.yaml index f15d69ab..b67a8f95 100644 --- a/charts/kubezero-logging/values-test.yaml +++ b/charts/kubezero-logging/values-test.yaml @@ -15,8 +15,12 @@ es: size: 16Gi class: local-sc-xfs zone: us-west-2a - s3_snapshot_iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots - elastic_password: "" # super_secret_elastic_password + s3Snapshot: + enabled: true + iamrole: "dfsf" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots + elastic_password: "dsfsfs" # super_secret_elastic_password + + prometheus: true kibana: replicas: 1 diff --git a/charts/kubezero-logging/values.yaml b/charts/kubezero-logging/values.yaml index 5e43797f..a0304fb2 100644 --- a/charts/kubezero-logging/values.yaml +++ b/charts/kubezero-logging/values.yaml @@ -3,7 +3,7 @@ # Declare variables to be passed into your templates. # This is for backwards compatibility with older zdt-logging setup -# fullnameOverride: logging +# fullnameOverride: "" # Version for ElasticSearch and Kibana have to match so we define it at top-level version: 7.8.1 @@ -15,9 +15,13 @@ es: # size: 16Gi # class: local-sc-xfs # zone: us-west-2a - s3_snapshot_iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots + s3Snapshot: + enabled: false + iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots elastic_password: "" # super_secret_elastic_password + prometheus: false + kibana: #replicas: 1 #servicename: kibana.example.com