feat: Remove TC, add Jenkins to CI, bugfixes for 1.21

This commit is contained in:
Stefan Reimer 2021-12-19 23:18:01 +01:00
parent 47fc751819
commit 826b4d356e
21 changed files with 85 additions and 373 deletions

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero-addons name: kubezero-addons
description: KubeZero umbrella chart for various optional cluster addons description: KubeZero umbrella chart for various optional cluster addons
type: application type: application
version: 0.2.2 version: 0.2.3
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:

View File

@ -1,4 +1,4 @@
{{- if .Values.clusterBackup.enabled }} {{- if and .Values.clusterBackup.enabled .Values.clusterBackup.repository }}
apiVersion: v1 apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:

View File

@ -2,17 +2,17 @@ apiVersion: v2
name: kubezero-ci name: kubezero-ci
description: KubeZero umbrella chart for all things CI description: KubeZero umbrella chart for all things CI
type: application type: application
version: 0.2.5 version: 0.3.0
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:
- kubezero - kubezero
- jenkins - jenkins
- goCD - goCD
- teamcity
- gitea - gitea
maintainers: maintainers:
- name: Quarky9 - name: Stefan Reimer
email: stefan@zero-downtime.net
dependencies: dependencies:
- name: kubezero-lib - name: kubezero-lib
version: ">= 0.1.4" version: ">= 0.1.4"
@ -25,8 +25,9 @@ dependencies:
version: 4.1.1 version: 4.1.1
repository: https://dl.gitea.io/charts/ repository: https://dl.gitea.io/charts/
condition: gitea.enabled condition: gitea.enabled
- name: teamcity - name: jenkins
version: 0.1.0 version: 3.9.4
condition: teamcity.enabled repository: https://charts.jenkins.io
condition: jenkins.enabled
kubeVersion: ">= 1.18.0" kubeVersion: ">= 1.20.0"

View File

@ -1,18 +0,0 @@
apiVersion: v2
name: teamcity
description: A Helm chart to install a simple TeamCity installation
type: application
version: 0.1.0
appVersion: "2021.2"
home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords:
- kubezero
- teamcity
maintainers:
- name: Quarky9
dependencies:
- name: kubezero-lib
version: ">= 0.1.4"
repository: https://cdn.zero-downtime.net/charts/
kubeVersion: ">= 1.18.0"

View File

@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "teamcity.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "teamcity.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "teamcity.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "teamcity.labels" -}}
helm.sh/chart: {{ include "teamcity.chart" . }}
{{ include "teamcity.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "teamcity.selectorLabels" -}}
app.kubernetes.io/name: {{ include "teamcity.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "teamcity.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "teamcity.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -1,50 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "teamcity.fullname" . }}-agent
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.agentReplicaCount }}
selector:
matchLabels:
app.kubernetes.io/instance: teamcity-agent
app.kubernetes.io/name: teamcity
template:
metadata:
labels:
app.kubernetes.io/instance: teamcity-agent
app.kubernetes.io/name: teamcity
spec:
# serviceAccountName: {{ include "teamcity.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: "{{ .Chart.Name }}-agent"
securityContext:
{{- toYaml .Values.securityContext | nindent 10 }}
image: "{{ .Values.image.agent.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: SERVER_URL
value: "{{ include "teamcity.fullname" . }}:8111"
resources:
{{- toYaml .Values.resources | nindent 10 }}
volumeMounts:
- name: teamcity-config
mountPath: /data/teamcity_agent/conf
volumes:
- name: teamcity-config
emptyDir: {}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end -}}

View File

@ -1,18 +0,0 @@
{{- if .Values.istio.enabled }}
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: {{ include "kubezero-lib.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
gateways:
- {{ .Values.istio.gateway }}
hosts:
- {{ .Values.istio.url }}
http:
- route:
- destination:
host: {{ include "teamcity.fullname" . }}
{{- end }}

View File

@ -1,28 +0,0 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "teamcity.fullname" . }}-manage-agents
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["list", "get"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "create", "list", "delete"]
- apiGroups: ["extensions", "apps"]
resources: ["deployments"]
verbs: ["list", "get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "teamcity.fullname" . }}-manage-agents
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "teamcity.fullname" . }}-manage-agents
subjects:
- kind: ServiceAccount
name: {{ include "teamcity.serviceAccountName" . }}
{{- end }}

View File

@ -1,33 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ include "teamcity.fullname" . }}-data
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
{{- if .Values.pvc.data.storageClass }}
storageClassName: {{ .Values.pvc.data.storageClass }}
{{- end }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.pvc.data.storageSize }}
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ include "teamcity.fullname" . }}-logs
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
{{- if .Values.pvc.logs.storageClass }}
storageClassName: {{ .Values.pvc.logs.storageClass }}
{{- end }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.pvc.logs.storageSize }}

View File

@ -1,63 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "teamcity.fullname" . }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
replicas: 1
selector:
matchLabels:
{{- include "kubezero-lib.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "kubezero-lib.labels" . | nindent 8 }}
spec:
serviceAccountName: {{ include "teamcity.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 10 }}
image: "{{ .Values.image.server.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: app
containerPort: 8111
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: teamcity-data
mountPath: /data/teamcity_server/datadir
- name: teamcity-logs
mountPath: /opt/teamcity/logs
volumes:
- name: teamcity-data
{{- if .Values.pvc.data }}
persistentVolumeClaim:
claimName: {{ include "teamcity.fullname" . }}-data
{{- else }}
emptyDir: {}
{{- end }}
- name: teamcity-logs
{{- if .Values.pvc.logs }}
persistentVolumeClaim:
claimName: {{ include "teamcity.fullname" . }}-logs
{{- else }}
emptyDir: {}
{{- end -}}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end -}}

View File

@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: "{{ include "teamcity.fullname" . }}"
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
protocol: TCP
targetPort: app
name: app
selector:
{{- include "kubezero-lib.selectorLabels" . | nindent 4 }}
sessionAffinity: None

View File

@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "teamcity.serviceAccountName" . }}
labels:
{{- include "kubezero-lib.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -1,57 +0,0 @@
# Default values for teamcity.
agentReplicaCount: 0
image:
server:
repository: jetbrains/teamcity-server
agent:
repository: jetbrains/teamcity-agent
tag: ""
pullPolicy: IfNotPresent
pvc:
data:
#storageClass: default
storageSize: 4Gi
logs:
#storageClass: default
storageSize: 1Gi
serviceAccount:
create: true
annotations: {}
name: ""
rbac:
create: true
podAnnotations: {}
podSecurityContext:
fsGroup: 1000
securityContext: {}
service:
type: ClusterIP
port: 8111
istio:
enabled: false
gateway: istio-ingress/private-ingressgateway
url: "" # tc.example.com
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,18 @@
{{- if and .Values.jenkins.enabled .Values.jenkins.istio.enabled .Values.jenkins.istio.allowBlocks }}
apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy
metadata:
name: {{ .Release.Name }}-jenkins-allowlist
namespace: istio-ingress
spec:
selector:
matchLabels:
app: istio-ingressgateway
rules:
- from:
- source:
ipBlocks: {{ .Values.jenkins.istio.allowBlocks | toYaml | nindent 8 }}
to:
- operation:
hosts: [{{ .Values.jenkins.istio.url }}]
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if and .Values.jenkins.enabled .Values.jenkins.istio.enabled }}
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: {{ .Release.Name }}-jenkins
namespace: {{ template "jenkins.namespace" . }}
spec:
hosts:
- {{ .Values.jenkins.istio.url }}
gateways:
- {{ .Values.jenkins.istio.gateway }}
http:
- route:
- destination:
host: {{ .Release.Name }}-jenkins
port:
number: 8080
{{- end }}

View File

@ -70,5 +70,17 @@ gitea:
jenkins: jenkins:
enabled: false enabled: false
teamcity: controller:
tagLabel: alpine
disableRememberMe: true
prometheus:
enabled: false enabled: false
testEnabled: false
persistence:
size: "2Gi"
istio:
enabled: false
gateway: istio-ingress/private-ingressgateway
url: jenkins.example.com

View File

@ -2,7 +2,7 @@ apiVersion: v2
name: kubezero name: kubezero
description: KubeZero - Root App of Apps chart description: KubeZero - Root App of Apps chart
type: application type: application
version: 1.21.8 version: 1.21.8-3
home: https://kubezero.com home: https://kubezero.com
icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png
keywords: keywords:

View File

@ -1,7 +1,22 @@
{{- define "addons-values" }} {{- define "addons-values" }}
{{- with index .Values "addons" "metallb" }} {{- with .Values.addons.clusterBackup }}
metallb: clusterBackup:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with index .Values "addons" "aws-node-termination-handler" }}
aws-node-termination-handler:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.addons.fuseDevicePlugin }}
fuseDevicePlugin:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- with .Values.addons.k8sEcrLoginRenew }}
k8sEcrLoginRenew:
{{- toYaml . | nindent 2 }} {{- toYaml . | nindent 2 }}
{{- end }} {{- end }}

View File

@ -8,7 +8,7 @@ HighAvailableControlplane: false
addons: addons:
enabled: false enabled: false
targetRevision: 0.1.0 targetRevision: 0.2.3
network: network:
enabled: false enabled: false
@ -28,6 +28,10 @@ kiam:
storage: storage:
enabled: false enabled: false
targetRevision: 0.4.3 targetRevision: 0.4.3
aws-ebs-csi-driver:
enabled: false
aws-efs-csi-driver:
enabled: false
istio: istio:
enabled: false enabled: false

View File

@ -199,6 +199,7 @@ elif [[ "$1" == 'node-upgrade' ]]; then
if [ -n "$restic_repo" ]; then if [ -n "$restic_repo" ]; then
yq -i eval ' yq -i eval '
.addons.clusterBackup.enabled = "true" | .addons.clusterBackup.repository = strenv(restic_repo) | .addons.clusterBackup.password = strenv(restic_pw) .addons.clusterBackup.enabled = "true" | .addons.clusterBackup.repository = strenv(restic_repo) | .addons.clusterBackup.password = strenv(restic_pw)
| .addons.clusterBackup.image.tag = strenv(KUBE_VERSION)
| .addons.clusterBackup.extraEnv[0].name = "AWS_DEFAULT_REGION" | .addons.clusterBackup.extraEnv[0].value = strenv(REGION) | .addons.clusterBackup.extraEnv[0].name = "AWS_DEFAULT_REGION" | .addons.clusterBackup.extraEnv[0].value = strenv(REGION)
' ${HOSTFS}/etc/kubernetes/kubezero.yaml ' ${HOSTFS}/etc/kubernetes/kubezero.yaml
fi fi

View File

@ -18,7 +18,7 @@ kubectl delete deployment efs-csi-controller -n kube-system
kubectl delete daemonSet efs-csi-node -n kube-system kubectl delete daemonSet efs-csi-node -n kube-system
# Remove calico Servicemonitor in case still around # Remove calico Servicemonitor in case still around
# kubectl delete servicemonitor calico-node -n kube-system kubectl delete servicemonitor calico-node -n kube-system
# Upgrade Prometheus stack, requires state metrics to be removed first # Upgrade Prometheus stack, requires state metrics to be removed first
kubectl delete deployment metrics-kube-state-metrics -n monitoring kubectl delete deployment metrics-kube-state-metrics -n monitoring