Various logging fixes to get a first version of ES and Kibana running

This commit is contained in:
Stefan Reimer 2020-08-13 19:44:50 +01:00
parent f703a6d7e1
commit 7eabb00e72
10 changed files with 65 additions and 44 deletions

View File

@ -2,8 +2,8 @@ apiVersion: v2
name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack
type: application
version: 0.0.1
appVersion: 1.2.0
version: 0.0.2
appVersion: 1.2.1
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:

View File

@ -1028,8 +1028,8 @@ spec:
description: Image is the Elasticsearch Docker image to deploy.
type: string
nodeSets:
description: 'NodeSets allow specifying groups of Elasticsearch nodes
sharing the same configuration and Pod templates. See: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-orchestration.html'
description: NodeSets allow specifying groups of Elasticsearch nodes
sharing the same configuration and Pod templates.
items:
description: NodeSet is the specification for a group of Elasticsearch
nodes sharing the same configuration and a Pod template.
@ -1054,12 +1054,12 @@ spec:
the Pods belonging to this NodeSet.
type: object
volumeClaimTemplates:
description: 'VolumeClaimTemplates is a list of persistent volume
description: VolumeClaimTemplates is a list of persistent volume
claims to be used by each Pod in this NodeSet. Every claim in
this list must have a matching volumeMount in one of the containers
defined in the PodTemplate. Items defined here take precedence
over any default claims added by the operator with the same
name. See: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html'
name.
items:
description: PersistentVolumeClaim is a user's request for and
claim to a persistent volume
@ -2829,7 +2829,7 @@ spec:
terminationGracePeriodSeconds: 10
serviceAccountName: elastic-operator
containers:
- image: "docker.elastic.co/eck/eck-operator:1.2.0"
- image: "docker.elastic.co/eck/eck-operator:1.2.1"
imagePullPolicy: IfNotPresent
name: manager
args:
@ -2849,7 +2849,7 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: OPERATOR_IMAGE
value: "docker.elastic.co/eck/eck-operator:1.2.0"
value: "docker.elastic.co/eck/eck-operator:1.2.1"
- name: WEBHOOK_SECRET
value: "elastic-webhook-server-cert"
resources:

View File

@ -1,6 +1,6 @@
#!/bin/bash
ECK_VERSION=1.2.0
ECK_VERSION=1.2.1
curl -o all-in-one.yaml https://download.elastic.co/downloads/eck/${ECK_VERSION}/all-in-one.yaml

View File

@ -1031,8 +1031,8 @@ spec:
description: Image is the Elasticsearch Docker image to deploy.
type: string
nodeSets:
description: 'NodeSets allow specifying groups of Elasticsearch nodes
sharing the same configuration and Pod templates. See: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-orchestration.html'
description: NodeSets allow specifying groups of Elasticsearch nodes
sharing the same configuration and Pod templates.
items:
description: NodeSet is the specification for a group of Elasticsearch
nodes sharing the same configuration and a Pod template.
@ -1057,12 +1057,12 @@ spec:
the Pods belonging to this NodeSet.
type: object
volumeClaimTemplates:
description: 'VolumeClaimTemplates is a list of persistent volume
description: VolumeClaimTemplates is a list of persistent volume
claims to be used by each Pod in this NodeSet. Every claim in
this list must have a matching volumeMount in one of the containers
defined in the PodTemplate. Items defined here take precedence
over any default claims added by the operator with the same
name. See: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-volume-claim-templates.html'
name.
items:
description: PersistentVolumeClaim is a user's request for and
claim to a persistent volume
@ -3021,10 +3021,10 @@ spec:
fieldRef:
fieldPath: metadata.namespace
- name: OPERATOR_IMAGE
value: docker.elastic.co/eck/eck-operator:1.2.0
value: docker.elastic.co/eck/eck-operator:1.2.1
- name: WEBHOOK_SECRET
value: elastic-webhook-server-cert
image: docker.elastic.co/eck/eck-operator:1.2.0
image: docker.elastic.co/eck/eck-operator:1.2.1
imagePullPolicy: IfNotPresent
name: manager
ports:

View File

@ -25,16 +25,23 @@ spec:
{{- if $.Values.es.s3_snapshot_iamrole }}
metadata:
annotations:
iam.amazonaws.com/role: {{ $.Values.es.s3_snapshot_iamrole }}
iam.amazonaws.com/role: {{ $.Values.es.s3Snapshot.iamrole }}
{{- end }}
spec:
{{- if or $.Values.es.prometheus $.Values.es.s3Snapshot.enabled }}
initContainers:
- name: install-plugins
command:
- sh
- -c
- |
bin/elasticsearch-plugin install --batch repository-s3 && bin/elasticsearch-plugin install --batch https://github.com/vvanholl/elasticsearch-prometheus-exporter/releases/download/{{ $.Values.version }}.0/prometheus-exporter-{{ $.Values.version }}.0.zip
{{- if $.Values.es.s3Snapshot.enabled }}
bin/elasticsearch-plugin install --batch repository-s3
{{- end }}
{{- if $.Values.es.prometheus }}
bin/elasticsearch-plugin install --batch https://github.com/vvanholl/elasticsearch-prometheus-exporter/releases/download/{{ $.Values.version }}.0/prometheus-exporter-{{ $.Values.version }}.0.zip
{{- end }}
{{- end }}
containers:
- name: elasticsearch
resources:
@ -52,7 +59,7 @@ spec:
- podAffinityTerm:
labelSelector:
matchLabels:
elasticsearch.k8s.elastic.co/cluster-name: {{ $.Values.name }}
elasticsearch.k8s.elastic.co/cluster-name: {{ template "kubezero-lib.fullname" $ }}
topologyKey: kubernetes.io/hostname
{{- if .zone }}
nodeAffinity:

View File

@ -1,20 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: es-{{ .Values.name }}
namespace: {{ .Release.Namespace }}
spec:
endpoints:
- basicAuth:
username:
name: {{ .Values.name }}-es-elastic-user
key: username
password:
name: {{ .Values.name }}-es-elastic-user
key: elastic
port: http
path: /_prometheus/metrics
selector:
matchLabels:
common.k8s.elastic.co/type: elasticsearch
elasticsearch.k8s.elastic.co/cluster-name: {{ .Values.name }}

View File

@ -4,9 +4,11 @@ type: Opaque
metadata:
labels:
common.k8s.elastic.co/type: elasticsearch
elasticsearch.k8s.elastic.co/cluster-name: {{ .Values.name }}
name: {{ .Values.name }}-es-elastic-user
elasticsearch.k8s.elastic.co/cluster-name: {{ template "kubezero-lib.fullname" $ }}
name: {{ template "kubezero-lib.fullname" $ }}-elastic-user
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
data:
username: {{ "elastic" | b64enc | quote }}
elastic: {{ .Values.es.elastic_password | b64enc | quote }}

View File

@ -0,0 +1,24 @@
{{- if .Values.es.prometheus }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "kubezero-lib.fullname" . }}-es
namespace: {{ .Release.Namespace }}
labels:
{{ include "kubezero-lib.labels" . | indent 4 }}
spec:
endpoints:
- basicAuth:
username:
name: {{ template "kubezero-lib.fullname" $ }}-es-elastic-user
key: username
password:
name: {{ template "kubezero-lib.fullname" $ }}-es-elastic-user
key: elastic
port: http
path: /_prometheus/metrics
selector:
matchLabels:
common.k8s.elastic.co/type: elasticsearch
elasticsearch.k8s.elastic.co/cluster-name: {{ template "kubezero-lib.fullname" $ }}
{{- end }}

View File

@ -15,8 +15,12 @@ es:
size: 16Gi
class: local-sc-xfs
zone: us-west-2a
s3_snapshot_iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots
elastic_password: "" # super_secret_elastic_password
s3Snapshot:
enabled: true
iamrole: "dfsf" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots
elastic_password: "dsfsfs" # super_secret_elastic_password
prometheus: true
kibana:
replicas: 1

View File

@ -3,7 +3,7 @@
# Declare variables to be passed into your templates.
# This is for backwards compatibility with older zdt-logging setup
# fullnameOverride: logging
# fullnameOverride: ""
# Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.8.1
@ -15,9 +15,13 @@ es:
# size: 16Gi
# class: local-sc-xfs
# zone: us-west-2a
s3_snapshot_iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots
s3Snapshot:
enabled: false
iamrole: "" # INSERT_CLOUDFORMATION_OUTPUT_ElasticSearchSnapshots
elastic_password: "" # super_secret_elastic_password
prometheus: false
kibana:
#replicas: 1
#servicename: kibana.example.com