Compare commits
11 Commits
renovate/k
...
main
Author | SHA1 | Date | |
---|---|---|---|
545a7fd8b1 | |||
56a2926917 | |||
b8114bd053 | |||
53f940a54c | |||
58780f1e0e | |||
4c10271ec6 | |||
5246f57329 | |||
5bc6e6e435 | |||
cbcaec807a | |||
bfafccaf32 | |||
3304363986 |
@ -4,10 +4,10 @@
|
||||
set -x
|
||||
|
||||
ARTIFACTS=($(echo $1 | tr "," "\n"))
|
||||
ACTION=$2
|
||||
ACTION="${2:-apply}"
|
||||
ARGOCD="${3:-False}"
|
||||
|
||||
LOCAL_DEV=1
|
||||
ARGOCD="False"
|
||||
|
||||
#VERSION="latest"
|
||||
KUBE_VERSION="$(kubectl version -o json | jq -r .serverVersion.gitVersion)"
|
||||
@ -85,7 +85,7 @@ helm template $CHARTS/kubezero -f $WORKDIR/kubezero-values.yaml --kube-version $
|
||||
|
||||
# Root KubeZero apply directly and exit
|
||||
if [ ${ARTIFACTS[0]} == "kubezero" ]; then
|
||||
kubectl replace -f $WORKDIR/kubezero/templates
|
||||
kubectl replace -f $WORKDIR/kubezero/templates $(field_manager $ARGOCD)
|
||||
exit $?
|
||||
|
||||
# "catch all" apply all enabled modules
|
||||
@ -100,12 +100,12 @@ if [ "$ACTION" == "delete" ]; then
|
||||
_helm delete ${ARTIFACTS[idx]} || true
|
||||
done
|
||||
else
|
||||
if [ "$ACTION" == "" -o "$ACTION" == "crds" ]; then
|
||||
if [ "$ACTION" == "apply" -o "$ACTION" == "crds" ]; then
|
||||
for t in ${ARTIFACTS[@]}; do
|
||||
_helm crds $t || true
|
||||
done
|
||||
fi
|
||||
for t in ${ARTIFACTS[@]}; do
|
||||
_helm apply $t || true
|
||||
_helm $ACTION $t || true
|
||||
done
|
||||
fi
|
||||
|
@ -104,9 +104,9 @@ pre_kubeadm() {
|
||||
|
||||
# Shared steps after calling kubeadm
|
||||
post_kubeadm() {
|
||||
# KubeZero resources
|
||||
# KubeZero resources - will never be applied by ArgoCD
|
||||
for f in ${WORKDIR}/kubeadm/templates/resources/*.yaml; do
|
||||
kubectl apply -f $f $LOG
|
||||
kubectl apply -f $f --server-side --force-conflicts $LOG
|
||||
done
|
||||
}
|
||||
|
||||
@ -115,9 +115,13 @@ post_kubeadm() {
|
||||
control_plane_upgrade() {
|
||||
CMD=$1
|
||||
|
||||
ARGOCD=$(argo_used)
|
||||
|
||||
render_kubeadm upgrade
|
||||
|
||||
if [[ "$CMD" =~ ^(cluster)$ ]]; then
|
||||
pre_control_plane_upgrade_cluster
|
||||
|
||||
# get current values, argo app over cm
|
||||
get_kubezero_values $ARGOCD
|
||||
|
||||
@ -133,7 +137,7 @@ control_plane_upgrade() {
|
||||
kubectl get application kubezero -n argocd -o yaml | \
|
||||
yq ".spec.source.helm.valuesObject |= load(\"$WORKDIR/kubezero-values.yaml\") | .spec.source.targetRevision = strenv(kubezero_chart_version)" \
|
||||
> $WORKDIR/new-argocd-app.yaml
|
||||
kubectl replace -f $WORKDIR/new-argocd-app.yaml
|
||||
kubectl replace -f $WORKDIR/new-argocd-app.yaml $(field_manager $ARGOCD)
|
||||
fi
|
||||
|
||||
pre_kubeadm
|
||||
@ -147,13 +151,19 @@ control_plane_upgrade() {
|
||||
# install re-certed kubectl config for root
|
||||
cp ${HOSTFS}/etc/kubernetes/super-admin.conf ${HOSTFS}/root/.kube/config
|
||||
|
||||
post_control_plane_upgrade_cluster
|
||||
|
||||
echo "Successfully upgraded KubeZero control plane to $KUBE_VERSION using kubeadm."
|
||||
|
||||
elif [[ "$CMD" =~ ^(final)$ ]]; then
|
||||
pre_cluster_upgrade_final
|
||||
|
||||
# Finally upgrade addons last, with 1.32 we can ONLY call addon phase
|
||||
#_kubeadm upgrade apply phase addon all $KUBE_VERSION
|
||||
_kubeadm upgrade apply $KUBE_VERSION
|
||||
|
||||
post_cluster_upgrade_final
|
||||
|
||||
echo "Upgraded kubeadm addons."
|
||||
fi
|
||||
|
||||
@ -318,7 +328,10 @@ apply_module() {
|
||||
done
|
||||
|
||||
for t in $MODULES; do
|
||||
_helm apply $t
|
||||
#_helm apply $t
|
||||
|
||||
# During 1.31 we change the ArgoCD tracking so replace
|
||||
_helm replace $t
|
||||
done
|
||||
|
||||
echo "Applied KubeZero modules: $MODULES"
|
||||
@ -394,17 +407,10 @@ for t in $@; do
|
||||
join) control_plane_node join;;
|
||||
restore) control_plane_node restore;;
|
||||
kubeadm_upgrade)
|
||||
ARGOCD=$(argo_used)
|
||||
# call hooks
|
||||
pre_control_plane_upgrade_cluster
|
||||
control_plane_upgrade cluster
|
||||
post_control_plane_upgrade_cluster
|
||||
;;
|
||||
finalize_cluster_upgrade)
|
||||
ARGOCD=$(argo_used)
|
||||
pre_cluster_upgrade_final
|
||||
control_plane_upgrade final
|
||||
post_cluster_upgrade_final
|
||||
;;
|
||||
apply_*)
|
||||
ARGOCD=$(argo_used)
|
||||
|
@ -2,11 +2,10 @@
|
||||
|
||||
# Simulate well-known CRDs being available
|
||||
API_VERSIONS="-a monitoring.coreos.com/v1 -a snapshot.storage.k8s.io/v1 -a policy/v1/PodDisruptionBudget -a apiregistration.k8s.io/v1"
|
||||
LOCAL_DEV=${LOCAL_DEV:-""}
|
||||
|
||||
export HELM_SECRETS_BACKEND="vals"
|
||||
|
||||
LOCAL_DEV=${LOCAL_DEV:-""}
|
||||
|
||||
# Waits for max 300s and retries
|
||||
function wait_for() {
|
||||
local TRIES=0
|
||||
@ -34,6 +33,32 @@ function argo_used() {
|
||||
}
|
||||
|
||||
|
||||
function field_manager() {
|
||||
local argo=${1:-"False"}
|
||||
|
||||
if [ "$argo" == "True" ]; then
|
||||
echo "--field-manager argo-controller"
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function get_kubezero_secret() {
|
||||
export _key="$1"
|
||||
|
||||
kubectl get secrets -n kubezero kubezero-secrets -o yaml | yq '.data.[env(_key)]' | base64 -d -w0
|
||||
}
|
||||
|
||||
|
||||
function set_kubezero_secret() {
|
||||
local key="$1"
|
||||
local val="$2"
|
||||
|
||||
kubectl patch secret -n kubezero kubezero-secrets --patch="{\"data\": { \"$key\": \"$(echo -n $val |base64 -w0)\" }}"
|
||||
}
|
||||
|
||||
|
||||
# get kubezero-values from ArgoCD if available or use in-cluster CM
|
||||
function get_kubezero_values() {
|
||||
local argo=${1:-"False"}
|
||||
@ -96,25 +121,12 @@ function waitSystemPodsRunning() {
|
||||
done
|
||||
}
|
||||
|
||||
function argo_app_synced() {
|
||||
APP=$1
|
||||
|
||||
# Ensure we are synced otherwise bail out
|
||||
status=$(kubectl get application $APP -n argocd -o yaml | yq .status.sync.status)
|
||||
if [ "$status" != "Synced" ]; then
|
||||
echo "ArgoCD Application $APP not 'Synced'!"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
# make sure namespace exists prior to calling helm as the create-namespace options doesn't work
|
||||
function create_ns() {
|
||||
local namespace=$1
|
||||
if [ "$namespace" != "kube-system" ]; then
|
||||
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace
|
||||
kubectl get ns $namespace > /dev/null || kubectl create ns $namespace $(field_manager $ARGOCD)
|
||||
fi
|
||||
}
|
||||
|
||||
@ -144,7 +156,7 @@ for manifest in yaml.safe_load_all(sys.stdin):
|
||||
# Only apply if there are actually any crds
|
||||
if [ -s $WORKDIR/crds.yaml ]; then
|
||||
[ -n "$DEBUG" ] && cat $WORKDIR/crds.yaml
|
||||
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts
|
||||
kubectl apply -f $WORKDIR/crds.yaml --server-side --force-conflicts $(field_manager $ARGOCD)
|
||||
fi
|
||||
}
|
||||
|
||||
@ -193,7 +205,7 @@ function _helm() {
|
||||
# Allow custom CRD handling
|
||||
declare -F ${module}-crds && ${module}-crds || _crds
|
||||
|
||||
elif [ $action == "apply" ]; then
|
||||
elif [ $action == "apply" -o $action == "replace" ]; then
|
||||
echo "using values to $action of module $module: "
|
||||
cat $WORKDIR/values.yaml
|
||||
|
||||
@ -204,7 +216,8 @@ function _helm() {
|
||||
declare -F ${module}-pre && ${module}-pre
|
||||
|
||||
render
|
||||
kubectl $action -f $WORKDIR/helm.yaml --server-side --force-conflicts && rc=$? || rc=$?
|
||||
[ $action == "apply" ] && kubectl apply -f $WORKDIR/helm.yaml --server-side --force-conflicts $(field_manager $ARGOCD) && rc=$? || rc=$?
|
||||
[ $action == "replace" ] && kubectl replace -f $WORKDIR/helm.yaml $(field_manager $ARGOCD) && rc=$? || rc=$?
|
||||
|
||||
# Optional post hook
|
||||
declare -F ${module}-post && ${module}-post
|
||||
|
@ -17,22 +17,36 @@ failureModeDeny: false
|
||||
# - slow: 1 req/s over a minute per sourceIP
|
||||
descriptors:
|
||||
ingress:
|
||||
- key: speed
|
||||
value: slow
|
||||
- key: sourceIp
|
||||
value: sixtyPerMinute
|
||||
descriptors:
|
||||
- key: remote_address
|
||||
rate_limit:
|
||||
unit: minute
|
||||
requests_per_unit: 60
|
||||
- key: sourceIp
|
||||
value: tenPerSecond
|
||||
descriptors:
|
||||
- key: remote_address
|
||||
rate_limit:
|
||||
unit: second
|
||||
requests_per_unit: 10
|
||||
|
||||
privateIngress:
|
||||
- key: speed
|
||||
value: slow
|
||||
- key: sourceIp
|
||||
value: sixtyPerMinute
|
||||
descriptors:
|
||||
- key: remote_address
|
||||
rate_limit:
|
||||
unit: minute
|
||||
requests_per_unit: 60
|
||||
- key: sourceIp
|
||||
value: tenPerSecond
|
||||
descriptors:
|
||||
- key: remote_address
|
||||
rate_limit:
|
||||
unit: second
|
||||
requests_per_unit: 10
|
||||
|
||||
metrics:
|
||||
enabled: false
|
||||
|
6
charts/kubeadm/TODO
Normal file
6
charts/kubeadm/TODO
Normal file
@ -0,0 +1,6 @@
|
||||
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_256_GCM_SHA384"}
|
||||
"Use of insecure cipher detected.","v":0,"cipher":"TLS_RSA_WITH_AES_128_GCM_SHA256"}
|
||||
|
||||
|
||||
"RuntimeConfig from runtime service failed","err":"rpc error: code = Unimplemented desc = unknown method RuntimeConfig for service runtime.v1.RuntimeService"}
|
||||
"CRI implementation should be updated to support RuntimeConfig when KubeletCgroupDriverFromCRI feature gate has been enabled. Falling back to using cgroupDriver from kubelet config."
|
@ -2,8 +2,8 @@ apiVersion: kubeadm.k8s.io/v1beta4
|
||||
kind: ClusterConfiguration
|
||||
kubernetesVersion: {{ .Chart.Version }}
|
||||
clusterName: {{ .Values.global.clusterName }}
|
||||
#featureGates:
|
||||
# NonGracefulFailover: true
|
||||
featureGates:
|
||||
ControlPlaneKubeletLocalMode: true
|
||||
controlPlaneEndpoint: {{ .Values.api.endpoint }}
|
||||
networking:
|
||||
podSubnet: 10.244.0.0/16
|
||||
|
@ -3,7 +3,7 @@
|
||||
{{- /* v1.28: PodAndContainerStatsFromCRI still not working */ -}}
|
||||
{{- /* v1.28: UnknownVersionInteroperabilityProxy requires StorageVersionAPI which is still alpha in 1.30 */ -}}
|
||||
{{- define "kubeadm.featuregates" }}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "AuthorizeWithSelectors" "AuthorizeNodeWithSelectors" "ConsistentListFromCache" "VolumeAttributesClass" "WatchList"}}
|
||||
{{- $gates := list "CustomCPUCFSQuotaPeriod" "AuthorizeWithSelectors" "AuthorizeNodeWithSelectors" "ConsistentListFromCache" "VolumeAttributesClass" "WatchList" }}
|
||||
{{- if eq .return "csv" }}
|
||||
{{- range $key := $gates }}
|
||||
{{- $key }}=true,
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: v2
|
||||
description: KubeZero Argo - Events, Workflow, CD
|
||||
name: kubezero-argo
|
||||
version: 0.2.8
|
||||
version: 0.2.9
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -22,7 +22,7 @@ dependencies:
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-events.enabled
|
||||
- name: argo-cd
|
||||
version: 7.8.2
|
||||
version: 7.8.9
|
||||
repository: https://argoproj.github.io/argo-helm
|
||||
condition: argo-cd.enabled
|
||||
- name: argocd-apps
|
||||
|
@ -106,9 +106,6 @@ argo-cd:
|
||||
extraHosts: "git.zero-downtime.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7UgK7Z4dDcuIW1uMOsuwhrqdkJCvYG/ZjHtLM7WaKFxVRnzNnNkQJNncWIGNDUQ1xxrbsoSNRZDtk0NlOjNtx2aApSWl4iWghkpXELvsZtOZ7I9FSC/E6ImLC3KWfK7P0mhZaF6kHPfpu8Y6pjUyLBTpV1AaVwr0I8onyqGazJOVotTFaBFEi/sT0O2FUk7agwZYfj61w3JGOy3c+fmBcK3lXf/QM90tosOpJNuJ7n5Vk5FDDLkl9rO4XR/+mXHFvITiWb8F5C50YAwjYcy36yWSSryUAAHAuqpgotwh65vSG6fZvFhmEwO2BrCkOV5+k8iRfhy/yZODJzZ5V/5cbMbdZrY6lm/p5/S1wv8BEyPekBGdseqQjEO0IQiQHcMrfgTrrQ7ndbZzVZRByZI+wbGFkBCzNSJcNsoiHjs2EblxYyuW0qUvvrBxLnySvaxyPm4BOukSAZAOEaUrajpQlnHdnY1CGcgbwxw0LNv3euKQ3tDJSUlKO0Wd8d85PRv1THW4Ui9Lhsmv+BPA2vJZDOkx/n0oyPFAB0oyd5JNM38eFxLCmPC2OE63gDP+WmzVO61YCVTnvhpQjEOLawEWVFsk0y25R5z5BboDqJaOFnZF6i517O96cn17z3Ls4hxw3+0rlKczYRoyfUHs7KQENa4mY8YlJweNTBgld//RMUQ=="
|
||||
|
||||
params:
|
||||
controller.status.processors: 8
|
||||
controller.operation.processors: 4
|
||||
controller.kubectl.parallelism.limit: 8
|
||||
controller.resource.health.persist: "false"
|
||||
controller.diff.server.side: "true"
|
||||
controller.sync.timeout.seconds: 1800
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-ci
|
||||
description: KubeZero umbrella chart for all things CI
|
||||
type: application
|
||||
version: 0.8.20
|
||||
version: 0.8.21
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -18,19 +18,19 @@ dependencies:
|
||||
version: 0.2.1
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
- name: gitea
|
||||
version: 10.6.0
|
||||
version: 11.0.0
|
||||
repository: https://dl.gitea.io/charts/
|
||||
condition: gitea.enabled
|
||||
- name: jenkins
|
||||
version: 5.8.16
|
||||
version: 5.8.18
|
||||
repository: https://charts.jenkins.io
|
||||
condition: jenkins.enabled
|
||||
- name: trivy
|
||||
version: 0.11.1
|
||||
version: 0.12.0
|
||||
repository: https://aquasecurity.github.io/helm-charts/
|
||||
condition: trivy.enabled
|
||||
- name: renovate
|
||||
version: 39.180.2
|
||||
version: 39.200.0
|
||||
repository: https://docs.renovatebot.com/helm-charts
|
||||
condition: renovate.enabled
|
||||
kubeVersion: ">= 1.25.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-ci
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
KubeZero umbrella chart for all things CI
|
||||
|
||||
@ -18,11 +18,11 @@ Kubernetes: `>= 1.25.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.11.1 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.1.6 |
|
||||
| https://charts.jenkins.io | jenkins | 5.8.16 |
|
||||
| https://dl.gitea.io/charts/ | gitea | 10.6.0 |
|
||||
| https://docs.renovatebot.com/helm-charts | renovate | 39.180.2 |
|
||||
| https://aquasecurity.github.io/helm-charts/ | trivy | 0.12.0 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://charts.jenkins.io | jenkins | 5.8.18 |
|
||||
| https://dl.gitea.io/charts/ | gitea | 11.0.0 |
|
||||
| https://docs.renovatebot.com/helm-charts | renovate | 39.200.0 |
|
||||
|
||||
# Jenkins
|
||||
- default build retention 10 builds, 32days
|
||||
@ -68,7 +68,8 @@ Kubernetes: `>= 1.25.0`
|
||||
| gitea.gitea.metrics.enabled | bool | `false` | |
|
||||
| gitea.gitea.metrics.serviceMonitor.enabled | bool | `true` | |
|
||||
| gitea.image.rootless | bool | `true` | |
|
||||
| gitea.image.tag | string | `"1.23.4"` | |
|
||||
| gitea.image.tag | string | `"1.23.5"` | |
|
||||
| gitea.istio.blockApi | bool | `false` | |
|
||||
| gitea.istio.enabled | bool | `false` | |
|
||||
| gitea.istio.gateway | string | `"istio-ingress/private-ingressgateway"` | |
|
||||
| gitea.istio.url | string | `"git.example.com"` | |
|
||||
@ -83,6 +84,7 @@ Kubernetes: `>= 1.25.0`
|
||||
| gitea.resources.requests.memory | string | `"320Mi"` | |
|
||||
| gitea.securityContext.allowPrivilegeEscalation | bool | `false` | |
|
||||
| gitea.securityContext.capabilities.drop[0] | string | `"ALL"` | |
|
||||
| gitea.service.http.port | int | `80` | |
|
||||
| gitea.strategy.type | string | `"Recreate"` | |
|
||||
| gitea.test.enabled | bool | `false` | |
|
||||
| jenkins.agent.annotations."cluster-autoscaler.kubernetes.io/safe-to-evict" | string | `"false"` | |
|
||||
@ -156,7 +158,7 @@ Kubernetes: `>= 1.25.0`
|
||||
| jenkins.serviceAccountAgent.create | bool | `true` | |
|
||||
| jenkins.serviceAccountAgent.name | string | `"jenkins-podman-aws"` | |
|
||||
| renovate.cronjob.concurrencyPolicy | string | `"Forbid"` | |
|
||||
| renovate.cronjob.jobBackoffLimit | int | `3` | |
|
||||
| renovate.cronjob.jobBackoffLimit | int | `2` | |
|
||||
| renovate.cronjob.schedule | string | `"0 3 * * *"` | |
|
||||
| renovate.cronjob.successfulJobsHistoryLimit | int | `1` | |
|
||||
| renovate.enabled | bool | `false` | |
|
||||
|
@ -12,6 +12,14 @@ Use the following links to reference issues, PRs, and commits prior to v2.6.0.
|
||||
The changelog until v1.5.7 was auto-generated based on git commits.
|
||||
Those entries include a reference to the git commit to be able to get more details.
|
||||
|
||||
## 5.8.18
|
||||
|
||||
Update `jenkins/jenkins` to version `2.492.2-jdk17`
|
||||
|
||||
## 5.8.17
|
||||
|
||||
Update `kubernetes` to version `4314.v5b_846cf499eb_`
|
||||
|
||||
## 5.8.16
|
||||
|
||||
Update `docker.io/kiwigrid/k8s-sidecar` to version `1.30.1`
|
||||
|
@ -1,10 +1,10 @@
|
||||
annotations:
|
||||
artifacthub.io/category: integration-delivery
|
||||
artifacthub.io/changes: |
|
||||
- Update `docker.io/kiwigrid/k8s-sidecar` to version `1.30.1`
|
||||
- Update `jenkins/jenkins` to version `2.492.2-jdk17`
|
||||
artifacthub.io/images: |
|
||||
- name: jenkins
|
||||
image: docker.io/jenkins/jenkins:2.492.1-jdk17
|
||||
image: docker.io/jenkins/jenkins:2.492.2-jdk17
|
||||
- name: k8s-sidecar
|
||||
image: docker.io/kiwigrid/k8s-sidecar:1.30.1
|
||||
- name: inbound-agent
|
||||
@ -18,7 +18,7 @@ annotations:
|
||||
- name: support
|
||||
url: https://github.com/jenkinsci/helm-charts/issues
|
||||
apiVersion: v2
|
||||
appVersion: 2.492.1
|
||||
appVersion: 2.492.2
|
||||
description: 'Jenkins - Build great things at any scale! As the leading open source
|
||||
automation server, Jenkins provides over 2000 plugins to support building, deploying
|
||||
and automating any project. '
|
||||
@ -46,4 +46,4 @@ sources:
|
||||
- https://github.com/maorfr/kube-tasks
|
||||
- https://github.com/jenkinsci/configuration-as-code-plugin
|
||||
type: application
|
||||
version: 5.8.16
|
||||
version: 5.8.18
|
||||
|
@ -165,7 +165,7 @@ The following tables list the configurable parameters of the Jenkins chart and t
|
||||
| [controller.initializeOnce](./values.yaml#L424) | bool | Initialize only on first installation. Ensures plugins do not get updated inadvertently. Requires `persistence.enabled` to be set to `true` | `false` |
|
||||
| [controller.installLatestPlugins](./values.yaml#L413) | bool | Download the minimum required version or latest version of all dependencies | `true` |
|
||||
| [controller.installLatestSpecifiedPlugins](./values.yaml#L416) | bool | Set to true to download the latest version of any plugin that is requested to have the latest version | `false` |
|
||||
| [controller.installPlugins](./values.yaml#L405) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4313.va_9b_4fe2a_0e34","workflow-aggregator:600.vb_57cdd26fdd7","git:5.7.0","configuration-as-code:1932.v75cb_b_f1b_698d"]` |
|
||||
| [controller.installPlugins](./values.yaml#L405) | list | List of Jenkins plugins to install. If you don't want to install plugins, set it to `false` | `["kubernetes:4314.v5b_846cf499eb_","workflow-aggregator:600.vb_57cdd26fdd7","git:5.7.0","configuration-as-code:1932.v75cb_b_f1b_698d"]` |
|
||||
| [controller.javaOpts](./values.yaml#L162) | string | Append to `JAVA_OPTS` env var | `nil` |
|
||||
| [controller.jenkinsAdminEmail](./values.yaml#L96) | string | Email address for the administrator of the Jenkins instance | `nil` |
|
||||
| [controller.jenkinsHome](./values.yaml#L101) | string | Custom Jenkins home path | `"/var/jenkins_home"` |
|
||||
|
@ -403,7 +403,7 @@ controller:
|
||||
# Plugins will be installed during Jenkins controller start
|
||||
# -- List of Jenkins plugins to install. If you don't want to install plugins, set it to `false`
|
||||
installPlugins:
|
||||
- kubernetes:4313.va_9b_4fe2a_0e34
|
||||
- kubernetes:4314.v5b_846cf499eb_
|
||||
- workflow-aggregator:600.vb_57cdd26fdd7
|
||||
- git:5.7.0
|
||||
- configuration-as-code:1932.v75cb_b_f1b_698d
|
||||
|
@ -1,4 +1,5 @@
|
||||
{{- if and .Values.gitea.enabled .Values.gitea.istio.enabled .Values.gitea.istio.ipBlocks }}
|
||||
{{- if and .Values.gitea.enabled .Values.gitea.istio.enabled .Values.gitea.istio.ipBlocks .Values.gitea.istio.blockApi }}
|
||||
# Limit access to /api
|
||||
apiVersion: security.istio.io/v1beta1
|
||||
kind: AuthorizationPolicy
|
||||
metadata:
|
||||
@ -19,6 +20,7 @@ spec:
|
||||
to:
|
||||
- operation:
|
||||
hosts: ["{{ .Values.gitea.istio.url }}"]
|
||||
paths: [ "/api/*" ]
|
||||
when:
|
||||
- key: connection.sni
|
||||
values:
|
||||
|
@ -12,16 +12,15 @@ spec:
|
||||
hosts:
|
||||
- {{ .Values.gitea.istio.url }}
|
||||
http:
|
||||
{{- if .Values.gitea.istio.authProvider }}
|
||||
# https://github.com/go-gitea/gitea/issues/13606
|
||||
- match:
|
||||
- name: api
|
||||
match:
|
||||
- uri:
|
||||
regex: ^/user/login.*
|
||||
redirect:
|
||||
uri: /user/oauth2/{{ .Values.gitea.istio.authProvider }}
|
||||
redirectCode: 302
|
||||
{{- end }}
|
||||
- route:
|
||||
prefix: /api/
|
||||
route:
|
||||
- destination:
|
||||
host: gitea-http
|
||||
- name: notApi
|
||||
route:
|
||||
- destination:
|
||||
host: gitea-http
|
||||
tcp:
|
||||
|
@ -2,7 +2,7 @@ gitea:
|
||||
enabled: false
|
||||
|
||||
image:
|
||||
tag: 1.23.4
|
||||
tag: 1.23.5
|
||||
rootless: true
|
||||
|
||||
repliaCount: 1
|
||||
@ -87,6 +87,7 @@ gitea:
|
||||
enabled: false
|
||||
gateway: istio-ingress/private-ingressgateway
|
||||
url: git.example.com
|
||||
blockApi: false
|
||||
|
||||
|
||||
jenkins:
|
||||
|
@ -41,6 +41,7 @@ Kubernetes: `>= 1.30.0-0`
|
||||
| gateway.service.externalTrafficPolicy | string | `"Local"` | |
|
||||
| gateway.service.type | string | `"NodePort"` | |
|
||||
| gateway.terminationGracePeriodSeconds | int | `120` | |
|
||||
| hardening.preserveExternalRequestId | bool | `false` | |
|
||||
| hardening.rejectUnderscoresHeaders | bool | `true` | |
|
||||
| hardening.unescapeSlashes | bool | `true` | |
|
||||
| proxyProtocol | bool | `true` | |
|
||||
|
@ -32,6 +32,7 @@ spec:
|
||||
use_remote_address: true
|
||||
normalize_path: true
|
||||
merge_slashes: true
|
||||
preserve_external_request_id: {{ .Values.hardening.preserveExternalRequestId }}
|
||||
{{- if .Values.hardening.unescapeSlashes }}
|
||||
path_with_escaped_slashes_action: UNESCAPE_AND_REDIRECT
|
||||
{{- end }}
|
||||
|
@ -43,3 +43,4 @@ proxyProtocol: true
|
||||
hardening:
|
||||
rejectUnderscoresHeaders: true
|
||||
unescapeSlashes: true
|
||||
preserveExternalRequestId: false
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-istio
|
||||
description: KubeZero Umbrella Chart for Istio
|
||||
type: application
|
||||
version: 0.24.4
|
||||
version: 0.24.3
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
keywords:
|
||||
@ -20,10 +20,10 @@ dependencies:
|
||||
repository: https://cdn.zero-downtime.net/charts/
|
||||
condition: envoy-ratelimit.enabled
|
||||
- name: base
|
||||
version: 1.25.0
|
||||
version: 1.24.3
|
||||
repository: https://istio-release.storage.googleapis.com/charts
|
||||
- name: istiod
|
||||
version: 1.25.0
|
||||
version: 1.24.3
|
||||
repository: https://istio-release.storage.googleapis.com/charts
|
||||
- name: kiali-server
|
||||
version: "2.6.0"
|
||||
|
@ -30,17 +30,7 @@ Kubernetes: `>= 1.30.0-0`
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| envoy-ratelimit.descriptors.ingress[0].key | string | `"remote_address"` | |
|
||||
| envoy-ratelimit.descriptors.ingress[0].rate_limit.requests_per_unit | int | `10` | |
|
||||
| envoy-ratelimit.descriptors.ingress[0].rate_limit.unit | string | `"second"` | |
|
||||
| envoy-ratelimit.descriptors.privateIngress[0].key | string | `"remote_address"` | |
|
||||
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.requests_per_unit | int | `10` | |
|
||||
| envoy-ratelimit.descriptors.privateIngress[0].rate_limit.unit | string | `"second"` | |
|
||||
| envoy-ratelimit.enabled | bool | `false` | |
|
||||
| envoy-ratelimit.failureModeDeny | bool | `false` | |
|
||||
| envoy-ratelimit.localCacheSize | int | `1048576` | |
|
||||
| envoy-ratelimit.log.format | string | `"json"` | |
|
||||
| envoy-ratelimit.log.level | string | `"warn"` | |
|
||||
| global.defaultPodDisruptionBudget.enabled | bool | `false` | |
|
||||
| global.logAsJson | bool | `true` | |
|
||||
| global.variant | string | `"distroless"` | |
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v2
|
||||
name: kubezero-logging
|
||||
description: KubeZero Umbrella Chart for complete EFK stack
|
||||
type: application
|
||||
version: 0.8.13
|
||||
version: 0.8.14
|
||||
appVersion: 1.6.0
|
||||
home: https://kubezero.com
|
||||
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
|
||||
|
@ -1,6 +1,6 @@
|
||||
# kubezero-logging
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
KubeZero Umbrella Chart for complete EFK stack
|
||||
|
||||
@ -18,7 +18,7 @@ Kubernetes: `>= 1.26.0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | >= 0.1.6 |
|
||||
| https://cdn.zero-downtime.net/charts/ | kubezero-lib | 0.2.1 |
|
||||
| https://fluent.github.io/helm-charts | fluent-bit | 0.47.10 |
|
||||
| https://fluent.github.io/helm-charts | fluentd | 0.5.2 |
|
||||
|
||||
@ -135,7 +135,7 @@ Kubernetes: `>= 1.26.0`
|
||||
| kibana.istio.enabled | bool | `false` | |
|
||||
| kibana.istio.gateway | string | `"istio-system/ingressgateway"` | |
|
||||
| kibana.istio.url | string | `""` | |
|
||||
| version | string | `"7.17.3"` | |
|
||||
| version | string | `"7.17.7"` | |
|
||||
|
||||
## Resources:
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
# fullnameOverride: ""
|
||||
|
||||
# Version for ElasticSearch and Kibana have to match so we define it at top-level
|
||||
version: 7.17.3
|
||||
version: 7.17.7
|
||||
|
||||
elastic_password: "" # super_secret_elastic_password
|
||||
|
||||
|
@ -62,12 +62,8 @@ kube-prometheus-stack:
|
||||
memory: 128Mi
|
||||
|
||||
admissionWebhooks:
|
||||
patch:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
effect: NoSchedule
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
certManager:
|
||||
enabled: true
|
||||
|
||||
nodeExporter:
|
||||
enabled: true
|
||||
|
@ -9,6 +9,10 @@ metadata:
|
||||
namespace: argocd
|
||||
labels:
|
||||
{{- include "kubezero-lib.labels" . | nindent 4 }}
|
||||
{{- with ( index .Values $name "annotations" ) }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if not ( index .Values $name "retain" ) }}
|
||||
finalizers:
|
||||
- resources-finalizer.argocd.argoproj.io
|
||||
|
@ -42,6 +42,8 @@ external-dns:
|
||||
- "--aws-zone-type=public"
|
||||
- "--aws-zones-cache-duration=1h"
|
||||
env:
|
||||
- name: AWS_REGION
|
||||
value: {{ .Values.global.aws.region }}
|
||||
- name: AWS_ROLE_ARN
|
||||
value: "arn:aws:iam::{{ .Values.global.aws.accountId }}:role/{{ .Values.global.aws.region }}.{{ .Values.global.clusterName }}.externalDNS"
|
||||
- name: AWS_WEB_IDENTITY_TOKEN_FILE
|
||||
|
@ -2,10 +2,22 @@
|
||||
|
||||
argo-cd:
|
||||
enabled: {{ default "false" (index .Values "argo" "argo-cd" "enabled") }}
|
||||
{{- with index .Values "argo" "argo-cd" "configs" }}
|
||||
|
||||
configs:
|
||||
{{- with index .Values "argo" "argo-cd" "configs" }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
params:
|
||||
{{- if not $.Values.global.highAvailable }}
|
||||
# Reduce load on API server on single node control plane
|
||||
controller.status.processors: 2
|
||||
controller.operation.processors: 1
|
||||
controller.kubectl.parallelism.limit: 1
|
||||
{{- else }}
|
||||
controller.status.processors: 8
|
||||
controller.operation.processors: 4
|
||||
controller.kubectl.parallelism.limit: 4
|
||||
{{- end }}
|
||||
|
||||
controller:
|
||||
metrics:
|
||||
|
@ -114,7 +114,9 @@ metrics:
|
||||
logging:
|
||||
enabled: false
|
||||
namespace: logging
|
||||
targetRevision: 0.8.13
|
||||
targetRevision: 0.8.14
|
||||
annotations:
|
||||
argocd.argoproj.io/compare-options: ServerSideDiff=false
|
||||
|
||||
argo:
|
||||
enabled: false
|
||||
|
Loading…
x
Reference in New Issue
Block a user