fix: improved crd handling, version bumps of network, new HAProxy support, argocd auth fix

This commit is contained in:
Stefan Reimer 2024-06-10 17:00:29 +00:00
parent 916c8a0c02
commit 2acfa302b4
12 changed files with 288 additions and 6 deletions

View File

@ -109,11 +109,18 @@ function delete_ns() {
}
# Extract crds via helm calls and apply delta=crds only
# Extract crds via helm calls
function _crds() {
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --skip-crds --set ${module}.installCRDs=false -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION > $WORKDIR/helm-no-crds.yaml
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds --set ${module}.installCRDs=true -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION > $WORKDIR/helm-crds.yaml
diff -e $WORKDIR/helm-no-crds.yaml $WORKDIR/helm-crds.yaml | head -n-1 | tail -n+2 > $WORKDIR/crds.yaml
helm secrets --evaluate-templates template $(chart_location $chart) -n $namespace --name-template $module $targetRevision --include-crds --set ${module}.installCRDs=true -f $WORKDIR/values.yaml $API_VERSIONS --kube-version $KUBE_VERSION $@ | python3 -c '
#!/usr/bin/python3
import yaml
import sys
for manifest in yaml.safe_load_all(sys.stdin):
if manifest:
if "kind" in manifest and manifest["kind"] == "CustomResourceDefinition":
print("---")
print(yaml.dump(manifest))' > $WORKDIR/crds.yaml
# Only apply if there are actually any crds
if [ -s $WORKDIR/crds.yaml ]; then
@ -134,6 +141,8 @@ import sys
for manifest in yaml.safe_load_all(sys.stdin):
if manifest:
if "kind" in manifest and manifest["kind"] == "CustomResourceDefinition":
continue
if "metadata" in manifest and "namespace" not in manifest["metadata"]:
manifest["metadata"]["namespace"] = sys.argv[1]
print("---")

View File

@ -19,7 +19,7 @@ spec:
{{- toYaml (index .Values "argo-cd" "istio" "ipBlocks") | nindent 8 }}
to:
- operation:
hosts: [{{ index .Values "argo-cd" "configs" "cm" "url" | quote }}]
hosts: [{{ get (urlParse (index .Values "argo-cd" "configs" "cm" "url")) "host" }}]
when:
- key: connection.sni
values:

View File

@ -0,0 +1,72 @@
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##################################################################################################
# httpbin service
##################################################################################################
apiVersion: v1
kind: ServiceAccount
metadata:
name: httpbin
---
apiVersion: v1
kind: Service
metadata:
name: httpbin
labels:
app: httpbin
service: httpbin
spec:
ports:
- name: http
port: 8000
targetPort: 8080
selector:
app: httpbin
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: httpbin
spec:
replicas: 1
selector:
matchLabels:
app: httpbin
version: v1
template:
metadata:
labels:
app: httpbin
version: v1
spec:
serviceAccountName: httpbin
containers:
- image: docker.io/kong/httpbin
imagePullPolicy: IfNotPresent
name: httpbin
# Same as found in Dockerfile's CMD but using an unprivileged port
command:
- gunicorn
- -b
- 0.0.0.0:8080
- httpbin:app
- -k
- gevent
env:
# Tells pipenv to use a writable directory instead of $HOME
- name: WORKON_HOME
value: /tmp
ports:
- containerPort: 8080

View File

@ -0,0 +1,15 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: httpbin
spec:
hosts:
- "httpbin.example.com"
gateways:
- istio-ingress/ingressgateway
http:
- route:
- destination:
port:
number: 8000
host: httpbin

View File

@ -59,8 +59,52 @@ Kubernetes: `>= 1.26.0`
| cilium.resources.limits.memory | string | `"1024Mi"` | |
| cilium.resources.requests.cpu | string | `"10m"` | |
| cilium.resources.requests.memory | string | `"256Mi"` | |
| cilium.routingMode | string | `"tunnel"` | |
| cilium.tunnelProtocol | string | `"geneve"` | |
| haproxy.PodDisruptionBudget.enable | bool | `false` | |
| haproxy.PodDisruptionBudget.minAvailable | int | `1` | |
| haproxy.args.defaults[0] | string | `"-f"` | |
| haproxy.args.defaults[1] | string | `"/usr/local/etc/haproxy/includes/global.cfg"` | |
| haproxy.args.defaults[2] | string | `"-f"` | |
| haproxy.args.defaults[3] | string | `"/usr/local/etc/haproxy/includes/prometheus.cfg"` | |
| haproxy.args.defaults[4] | string | `"-f"` | |
| haproxy.args.defaults[5] | string | `"/usr/local/etc/haproxy/haproxy.cfg"` | |
| haproxy.config | string | `"frontend fe_main\n bind :8080\n default_backend be_main\n\nbackend be_main\n server web1 10.0.0.1:8080 check\n"` | |
| haproxy.containerPorts.http | int | `8080` | |
| haproxy.containerPorts.https | string | `nil` | |
| haproxy.containerPorts.prometheus | int | `8404` | |
| haproxy.containerPorts.stat | string | `nil` | |
| haproxy.enabled | bool | `false` | |
| haproxy.includes."global.cfg" | string | `"global\n log stdout format raw local0\n maxconn 2048\n\ndefaults\n log global\n mode tcp\n option http-server-close\n timeout connect 10s\n timeout client 30s\n timeout client-fin 30s\n timeout server 30s\n timeout tunnel 1h\n\nresolvers coredns\n accepted_payload_size 4096\n parse-resolv-conf\n hold valid 10s\n hold other 10s\n hold refused 10s\n hold nx 10s\n hold timeout 10s\n"` | |
| haproxy.includes."prometheus.cfg" | string | `"frontend prometheus\n bind *:8404\n mode http\n http-request use-service prometheus-exporter if { path /metrics }\n no log\n stats enable\n stats uri /stats\n stats refresh 10s\n stats auth admin:letmein\n"` | |
| haproxy.livenessProbe.failureThreshold | int | `3` | |
| haproxy.livenessProbe.initialDelaySeconds | int | `0` | |
| haproxy.livenessProbe.periodSeconds | int | `10` | |
| haproxy.livenessProbe.successThreshold | int | `1` | |
| haproxy.livenessProbe.tcpSocket.port | int | `8404` | |
| haproxy.livenessProbe.timeoutSeconds | int | `1` | |
| haproxy.readinessProbe.failureThreshold | int | `3` | |
| haproxy.readinessProbe.initialDelaySeconds | int | `0` | |
| haproxy.readinessProbe.periodSeconds | int | `10` | |
| haproxy.readinessProbe.successThreshold | int | `1` | |
| haproxy.readinessProbe.tcpSocket.port | int | `8404` | |
| haproxy.readinessProbe.timeoutSeconds | int | `1` | |
| haproxy.replicaCount | int | `1` | |
| haproxy.resources.requests.cpu | string | `"10m"` | |
| haproxy.resources.requests.memory | string | `"48Mi"` | |
| haproxy.securityContext.allowPrivilegeEscalation | bool | `false` | |
| haproxy.securityContext.capabilities.drop[0] | string | `"ALL"` | |
| haproxy.securityContext.enabled | bool | `true` | |
| haproxy.securityContext.runAsGroup | int | `1000` | |
| haproxy.securityContext.runAsNonRoot | bool | `true` | |
| haproxy.securityContext.runAsUser | int | `1000` | |
| haproxy.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
| haproxy.serviceMonitor.enabled | bool | `false` | |
| haproxy.serviceMonitor.endpoints[0].interval | string | `"30s"` | |
| haproxy.serviceMonitor.endpoints[0].params.no-maint[0] | string | `"empty"` | |
| haproxy.serviceMonitor.endpoints[0].path | string | `"/metrics"` | |
| haproxy.serviceMonitor.endpoints[0].port | string | `"prometheus"` | |
| haproxy.serviceMonitor.endpoints[0].scheme | string | `"http"` | |
| metallb.controller.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | |
| metallb.controller.tolerations[0].effect | string | `"NoSchedule"` | |
| metallb.controller.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | |

View File

@ -0,0 +1,10 @@
configmap: haproxy-grafana-dashboards
gzip: true
condition: 'index .Values.haproxy.serviceMonitor.enabled'
folder: KubeZero
dashboards:
- name: HAProxy
url: https://grafana.com/api/dashboards/12693/revisions/8/download
tags:
- haproxy
- network

File diff suppressed because one or more lines are too long

View File

@ -1,3 +1,4 @@
{{- if .Values.multus.enabled }}
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@ -42,3 +43,4 @@ spec:
config:
description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
type: string
{{- end }}

View File

@ -7,6 +7,7 @@ set -ex
update_helm
# Create ZDT dashboard configmap
../kubezero-metrics/sync_grafana_dashboards.py dashboards.yaml templates/grafana-dashboards.yaml
../kubezero-metrics/sync_grafana_dashboards.py cilium-dashboards.yaml templates/cilium-grafana-dashboards.yaml
../kubezero-metrics/sync_grafana_dashboards.py haproxy-dashboards.yaml templates/haproxy-grafana-dashboards.yaml
update_docs

View File

@ -113,3 +113,117 @@ cilium:
haproxy:
enabled: false
replicaCount: 1
# enable pdb if replica > 1
PodDisruptionBudget:
enable: false
minAvailable: 1
containerPorts:
http: 8080
https: null
prometheus: 8404
stat: null
serviceMonitor:
enabled: false
endpoints:
- port: prometheus
path: /metrics
scheme: http
interval: 30s
params:
no-maint:
- empty
config: |
frontend fe_main
bind :8080
default_backend be_main
backend be_main
server web1 10.0.0.1:8080 check
includes:
global.cfg: |
global
log stdout format raw local0
maxconn 2048
defaults
log global
mode tcp
option http-server-close
timeout connect 10s
timeout client 30s
timeout client-fin 30s
timeout server 30s
timeout tunnel 1h
resolvers coredns
accepted_payload_size 4096
parse-resolv-conf
hold valid 10s
hold other 10s
hold refused 10s
hold nx 10s
hold timeout 10s
prometheus.cfg: |
frontend prometheus
bind *:8404
mode http
http-request use-service prometheus-exporter if { path /metrics }
no log
stats enable
stats uri /stats
stats refresh 10s
stats auth admin:letmein
args:
defaults:
- "-f"
- "/usr/local/etc/haproxy/includes/global.cfg"
- "-f"
- "/usr/local/etc/haproxy/includes/prometheus.cfg"
- "-f"
- "/usr/local/etc/haproxy/haproxy.cfg"
livenessProbe:
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 0
timeoutSeconds: 1
tcpSocket:
port: 8404
periodSeconds: 10
readinessProbe:
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 0
timeoutSeconds: 1
tcpSocket:
port: 8404
periodSeconds: 10
securityContext:
enabled: true
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
resources:
requests:
cpu: 10m
memory: 48Mi
# limits:
# cpu: 250m
# memory: 128Mi