Fix: fix for minimal ES version in logging, clustered control plane upgrade fix, tooling cleanup

This commit is contained in:
Stefan Reimer 2025-03-04 11:47:19 +00:00
parent 9fc9843283
commit 3304363986
13 changed files with 51 additions and 32 deletions

View File

@ -4,10 +4,10 @@
set -x set -x
ARTIFACTS=($(echo $1 | tr "," "\n")) ARTIFACTS=($(echo $1 | tr "," "\n"))
ACTION=$2 ACTION="${2:-apply}"
ARGOCD="${3:-False}"
LOCAL_DEV=1 LOCAL_DEV=1
ARGOCD="False"
#VERSION="latest" #VERSION="latest"
KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)" KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"
@ -85,7 +85,7 @@ helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $
# Root KubeZero apply directly and exit # Root KubeZero apply directly and exit
if [ ${ARTIFACTS[0]} == "kubezero" ]; then if [ ${ARTIFACTS[0]} == "kubezero" ]; then
kubectl replace -f $WORKDIR/kubezero/templates kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
exit $? exit $?
# "catch all" apply all enabled modules # "catch all" apply all enabled modules
@ -100,7 +100,7 @@ if [ "$ACTION" == "delete" ]; then
_helm delete ${ARTIFACTS[idx]} || true _helm delete ${ARTIFACTS[idx]} || true
done done
else else
if [ "$ACTION" == "" -o "$ACTION" == "crds" ]; then if [ "$ACTION" == "apply" -o "$ACTION" == "crds" ]; then
for t in ${ARTIFACTS[@]}; do for t in ${ARTIFACTS[@]}; do
_helm crds $t || true _helm crds $t || true
done done

View File

@ -104,9 +104,9 @@ pre_kubeadm() {
# Shared steps after calling kubeadm # Shared steps after calling kubeadm
post_kubeadm() { post_kubeadm() {
# KubeZero resources # KubeZero resources - will never be applied by ArgoCD
for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do
kubectl apply -f $f $LOG kubectl apply -f $f --server-side --force-conflicts $LOG
done done
} }
@ -115,9 +115,13 @@ post_kubeadm() {
control_plane_upgrade() { control_plane_upgrade() {
CMD=$1 CMD=$1
ARGOCD=$(argo_used)
render_kubeadm upgrade render_kubeadm upgrade
if [[ "$CMD" =~ ^(cluster)$ ]]; then if [[ "$CMD" =~ ^(cluster)$ ]]; then
pre_control_plane_upgrade_cluster
# get current values, argo app over cm # get current values, argo app over cm
get_kubezero_values $ARGOCD get_kubezero_values $ARGOCD
@ -133,7 +137,7 @@ control_plane_upgrade() {
kubectl get application kubezero -n argocd -o yaml | \ kubectl get application kubezero -n argocd -o yaml | \
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \ yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
> $WORKDIR/new-argocd-app.yaml > $WORKDIR/new-argocd-app.yaml
kubectl replace -f $WORKDIR/new-argocd-app.yaml kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
fi fi
pre_kubeadm pre_kubeadm
@ -147,13 +151,19 @@ control_plane_upgrade() {
# install re-certed kubectl config for root # install re-certed kubectl config for root
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
post_control_plane_upgrade_cluster
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm." echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
elif [[ "$CMD" =~ ^(final)$ ]]; then elif [[ "$CMD" =~ ^(final)$ ]]; then
pre_cluster_upgrade_final
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase # Finally upgrade addons last, with 1.32 we can ONLY call addon phase
#_kubeadm upgrade apply phase addon all $KUBE_VERSION #_kubeadm upgrade apply phase addon all $KUBE_VERSION
_kubeadm upgrade apply $KUBE_VERSION _kubeadm upgrade apply $KUBE_VERSION
post_cluster_upgrade_final
echo "Upgraded kubeadm addons." echo "Upgraded kubeadm addons."
fi fi
@ -394,17 +404,10 @@ for t in $@; do
join) control_plane_node join;; join) control_plane_node join;;
restore) control_plane_node restore;; restore) control_plane_node restore;;
kubeadm_upgrade) kubeadm_upgrade)
ARGOCD=$(argo_used)
# call hooks
pre_control_plane_upgrade_cluster
control_plane_upgrade cluster control_plane_upgrade cluster
post_control_plane_upgrade_cluster
;; ;;
finalize_cluster_upgrade) finalize_cluster_upgrade)
ARGOCD=$(argo_used)
pre_cluster_upgrade_final
control_plane_upgrade final control_plane_upgrade final
post_cluster_upgrade_final
;; ;;
apply_*) apply_*)
ARGOCD=$(argo_used) ARGOCD=$(argo_used)

View File

@ -34,6 +34,15 @@ function argo_used() {
} }
function field_manager() {
if [ "$1" == "True" ]; then
echo "--field-manager argo-controller"
else
echo ""
fi
}
# get kubezero-values from ArgoCD if available or use in-cluster CM # get kubezero-values from ArgoCD if available or use in-cluster CM
function get_kubezero_values() { function get_kubezero_values() {
local argo=${1:-"False"} local argo=${1:-"False"}
@ -114,7 +123,7 @@ function argo_app_synced() {
function create_ns() { function create_ns() {
local namespace=$1 local namespace=$1
if [ "$namespace" != "kube-system" ]; then if [ "$namespace" != "kube-system" ]; then
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace kubectl get ns $namespace > /dev/null || kubectl create ns $namespace $(field_manager $ARGOCD)
fi fi
} }
@ -144,7 +153,7 @@ for manifest in yaml.safe_load_all(sys.stdin):
# Only apply if there are actually any crds # Only apply if there are actually any crds
if [ -s $WORKDIR/crds.yaml ]; then if [ -s $WORKDIR/crds.yaml ]; then
[ -n "$DEBUG" ] && cat $WORKDIR/crds.yaml [ -n "$DEBUG" ] && cat $WORKDIR/crds.yaml
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts $(field_manager $ARGOCD)
fi fi
} }
@ -204,7 +213,7 @@ function _helm() {
declare -F ${module}-pre && ${module}-pre declare -F ${module}-pre && ${module}-pre
render render
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$? kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$?
# Optional post hook # Optional post hook
declare -F ${module}-post && ${module}-post declare -F ${module}-post && ${module}-post

6
charts/kubeadm/TODO Normal file
View File

@ -0,0 +1,6 @@
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_256_GCM_SHA384"}
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_128_GCM_SHA256"}
"RuntimeConfig from runtime service failed","err":"rpc error: code = Unimplemented desc = unknown method RuntimeConfig for service runtime.v1.RuntimeService"}
"CRI implementation should be updated to support RuntimeConfig when KubeletCgroupDriverFromCRI feature gate has been enabled. Falling back to using cgroupDriver from kubelet config."

View File

@ -2,8 +2,8 @@ apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration kind: ClusterConfiguration
kubernetesVersion: {{ .Chart.Version }} kubernetesVersion: {{ .Chart.Version }}
clusterName: {{ .Values.global.clusterName }} clusterName: {{ .Values.global.clusterName }}
#featureGates: featureGates:
# NonGracefulFailover: true ControlPlaneKubeletLocalMode: true
controlPlaneEndpoint: {{ .Values.api.endpoint }} controlPlaneEndpoint: {{ .Values.api.endpoint }}
networking: networking:
podSubnet: 10.244.0.0/16 podSubnet: 10.244.0.0/16

View File

@ -12,14 +12,12 @@ spec:
hosts: hosts:
- {{ .Values.gitea.istio.url }} - {{ .Values.gitea.istio.url }}
http: http:
{{- if .Values.gitea.istio.authProvider }} {{- if .Values.gitea.istio.blockApi }}
# https://github.com/go-gitea/gitea/issues/13606
- match: - match:
- uri: - uri:
regex: ^/user/login.* prefix: /api
redirect: directResponse:
uri: /user/oauth2/{{ .Values.gitea.istio.authProvider }} status: 401
redirectCode: 302
{{- end }} {{- end }}
- route: - route:
- destination: - destination:

View File

@ -87,6 +87,7 @@ gitea:
enabled: false enabled: false
gateway: istio-ingress/private-ingressgateway gateway: istio-ingress/private-ingressgateway
url: git.example.com url: git.example.com
blockApi: false
jenkins: jenkins:

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-logging name: kubezero-logging
description: KubeZero Umbrella Chart for complete EFK stack description: KubeZero Umbrella Chart for complete EFK stack
type: application type: application
version: 0.8.13 version: 0.8.14
appVersion: 1.6.0 appVersion: 1.6.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png

View File

@ -1,6 +1,6 @@
# kubezero-logging # kubezero-logging
![Version: 0.8.13](https://img.shields.io/badge/Version-0.8.13-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square) ![Version: 0.8.14](https://img.shields.io/badge/Version-0.8.14-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square)
KubeZero Umbrella Chart for complete EFK stack KubeZero Umbrella Chart for complete EFK stack
@ -18,7 +18,7 @@ Kubernetes: `>= 1.26.0`
| Repository | Name | Version | | Repository | Name | Version |
|------------|------|---------| |------------|------|---------|
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 | | https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
| https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 | | https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 | | https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
@ -135,7 +135,7 @@ Kubernetes: `>= 1.26.0`
| kibana.istio.enabled | bool | `false` | | | kibana.istio.enabled | bool | `false` | |
| kibana.istio.gateway | string | `"istio-system/ingressgateway"` | | | kibana.istio.gateway | string | `"istio-system/ingressgateway"` | |
| kibana.istio.url | string | `""` | | | kibana.istio.url | string | `""` | |
| version | string | `"7.17.3"` | | | version | string | `"7.17.7"` | |
## Resources: ## Resources:

View File

@ -2,7 +2,7 @@
# fullnameOverride: "" # fullnameOverride: ""
# Version for ElasticSearch and Kibana have to match so we define it at top-level # Version for ElasticSearch and Kibana have to match so we define it at top-level
version: 7.17.3 version: 7.17.7
elastic_password: "" # super_secret_elastic_password elastic_password: "" # super_secret_elastic_password

View File

@ -42,6 +42,8 @@ external-dns:
- "--aws-zone-type=public" - "--aws-zone-type=public"
- "--aws-zones-cache-duration=1h" - "--aws-zones-cache-duration=1h"
env: env:
- name: AWS_REGION
value: {{ .Values.global.aws.region }}
- name: AWS_ROLE_ARN - name: AWS_ROLE_ARN
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.externalDNS" value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.externalDNS"
- name: AWS_WEB_IDENTITY_TOKEN_FILE - name: AWS_WEB_IDENTITY_TOKEN_FILE

View File

@ -114,7 +114,7 @@ metrics:
logging: logging:
enabled: false enabled: false
namespace: logging namespace: logging
targetRevision: 0.8.13 targetRevision: 0.8.14
argo: argo:
enabled: false enabled: false