diff --git a/admin/dev_apply.sh b/admin/dev_apply.sh index 13b9492a..38731d64 100755 --- a/admin/dev_apply.sh +++ b/admin/dev_apply.sh @@ -4,7 +4,7 @@ set -x #VERSION="latest" -KUBE_VERSION="v1.27.7" +KUBE_VERSION="v1.27.8" WORKDIR=$(mktemp -p /tmp -d kubezero.XXX) SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) diff --git a/charts/kubeadm/Chart.yaml b/charts/kubeadm/Chart.yaml index c2622344..f7a04c12 100644 --- a/charts/kubeadm/Chart.yaml +++ b/charts/kubeadm/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: kubeadm description: KubeZero Kubeadm cluster config type: application -version: 1.27.7 +version: 1.27.8 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: diff --git a/charts/kubeadm/templates/apiserver/audit-webhook.yaml b/charts/kubeadm/templates/apiserver/audit-webhook.yaml index c6024128..126a8289 100644 --- a/charts/kubeadm/templates/apiserver/audit-webhook.yaml +++ b/charts/kubeadm/templates/apiserver/audit-webhook.yaml @@ -3,7 +3,7 @@ kind: Config clusters: - name: falco cluster: - server: http://falco-control-plane-k8saudit-webhook:9765/k8s-audit + server: http://falco-k8saudit-webhook:9765/k8s-audit contexts: - context: cluster: falco diff --git a/charts/kubezero-addons/Chart.yaml b/charts/kubezero-addons/Chart.yaml index e5a9c210..6e17073f 100644 --- a/charts/kubezero-addons/Chart.yaml +++ b/charts/kubezero-addons/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: kubezero-addons description: KubeZero umbrella chart for various optional cluster addons type: application -version: 0.8.3 -appVersion: v1.26 +version: 0.8.4 +appVersion: v1.27 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: @@ -15,7 +15,6 @@ keywords: - sealed-secrets - external-dns - aws-node-termination-handler - - falco maintainers: - name: Stefan Reimer email: stefan@zero-downtime.net @@ -45,9 +44,4 @@ dependencies: version: 1.5.0 repository: https://twin.github.io/helm-charts condition: aws-eks-asg-rolling-update-handler.enabled - - name: falco - version: 3.8.4 - repository: https://falcosecurity.github.io/charts - condition: falco-control-plane.enabled - alias: falco-control-plane kubeVersion: ">= 1.26.0" diff --git a/charts/kubezero-addons/README.md b/charts/kubezero-addons/README.md index 23394be7..fff4a5d4 100644 --- a/charts/kubezero-addons/README.md +++ b/charts/kubezero-addons/README.md @@ -1,6 +1,6 @@ # kubezero-addons -![Version: 0.8.2](https://img.shields.io/badge/Version-0.8.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.26](https://img.shields.io/badge/AppVersion-v1.26-informational?style=flat-square) +![Version: 0.8.4](https://img.shields.io/badge/Version-0.8.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.27](https://img.shields.io/badge/AppVersion-v1.27-informational?style=flat-square) KubeZero umbrella chart for various optional cluster addons @@ -18,12 +18,11 @@ Kubernetes: `>= 1.26.0` | Repository | Name | Version | |------------|------|---------| -| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.13.0 | -| https://falcosecurity.github.io/charts | falco-control-plane(falco) | 3.7.1 | +| https://bitnami-labs.github.io/sealed-secrets | sealed-secrets | 2.13.2 | | https://kubernetes-sigs.github.io/external-dns/ | external-dns | 1.13.1 | -| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.29.3 | -| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.14.1 | -| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.4.0 | +| https://kubernetes.github.io/autoscaler | cluster-autoscaler | 9.29.5 | +| https://nvidia.github.io/k8s-device-plugin | nvidia-device-plugin | 0.14.2 | +| https://twin.github.io/helm-charts | aws-eks-asg-rolling-update-handler | 1.5.0 | | oci://public.ecr.aws/aws-ec2/helm | aws-node-termination-handler | 0.22.0 | # MetalLB @@ -42,6 +41,8 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/) | Key | Type | Default | Description | |-----|------|---------|-------------| +| aws-eks-asg-rolling-update-handler.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | +| aws-eks-asg-rolling-update-handler.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | | aws-eks-asg-rolling-update-handler.enabled | bool | `false` | | | aws-eks-asg-rolling-update-handler.environmentVars[0].name | string | `"CLUSTER_NAME"` | | | aws-eks-asg-rolling-update-handler.environmentVars[0].value | string | `""` | | @@ -62,11 +63,14 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/) | aws-eks-asg-rolling-update-handler.environmentVars[8].name | string | `"AWS_STS_REGIONAL_ENDPOINTS"` | | | aws-eks-asg-rolling-update-handler.environmentVars[8].value | string | `"regional"` | | | aws-eks-asg-rolling-update-handler.image.repository | string | `"twinproduction/aws-eks-asg-rolling-update-handler"` | | -| aws-eks-asg-rolling-update-handler.image.tag | string | `"v1.8.1"` | | +| aws-eks-asg-rolling-update-handler.image.tag | string | `"v1.8.2"` | | | aws-eks-asg-rolling-update-handler.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | | aws-eks-asg-rolling-update-handler.resources.limits.memory | string | `"128Mi"` | | | aws-eks-asg-rolling-update-handler.resources.requests.cpu | string | `"10m"` | | | aws-eks-asg-rolling-update-handler.resources.requests.memory | string | `"32Mi"` | | +| aws-eks-asg-rolling-update-handler.securityContext.runAsNonRoot | bool | `true` | | +| aws-eks-asg-rolling-update-handler.securityContext.runAsUser | int | `1001` | | +| aws-eks-asg-rolling-update-handler.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | | aws-eks-asg-rolling-update-handler.tolerations[0].effect | string | `"NoSchedule"` | | | aws-eks-asg-rolling-update-handler.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | aws-node-termination-handler.checkASGTagBeforeDraining | bool | `false` | | @@ -129,32 +133,6 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/) | external-dns.tolerations[0].effect | string | `"NoSchedule"` | | | external-dns.tolerations[0].key | string | `"node-role.kubernetes.io/control-plane"` | | | external-dns.triggerLoopOnEvent | bool | `true` | | -| falco-control-plane.collectors | object | `{"enabled":false}` | Disable the collectors, no syscall events to enrich with metadata. | -| falco-control-plane.controller | object | `{"deployment":{"replicas":1},"kind":"deployment"}` | Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurabale. | -| falco-control-plane.controller.deployment.replicas | int | `1` | Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing. For more info check the section on Plugins in the README.md file. | -| falco-control-plane.driver | object | `{"enabled":false}` | Disable the drivers since we want to deploy only the k8saudit plugin. | -| falco-control-plane.enabled | bool | `false` | | -| falco-control-plane.falco.load_plugins[0] | string | `"k8saudit"` | | -| falco-control-plane.falco.load_plugins[1] | string | `"json"` | | -| falco-control-plane.falco.plugins[0].init_config.maxEventBytes | int | `1048576` | | -| falco-control-plane.falco.plugins[0].library_path | string | `"libk8saudit.so"` | | -| falco-control-plane.falco.plugins[0].name | string | `"k8saudit"` | | -| falco-control-plane.falco.plugins[0].open_params | string | `"http://:9765/k8s-audit"` | | -| falco-control-plane.falco.plugins[1].init_config | string | `""` | | -| falco-control-plane.falco.plugins[1].library_path | string | `"libjson.so"` | | -| falco-control-plane.falco.plugins[1].name | string | `"json"` | | -| falco-control-plane.falco.rules_file[0] | string | `"/etc/falco/k8s_audit_rules.yaml"` | | -| falco-control-plane.falco.rules_file[1] | string | `"/etc/falco/rules.d"` | | -| falco-control-plane.falcoctl.artifact.follow.enabled | bool | `true` | Enable the sidecar container. We do not support it yet for plugins. It is used only for rules feed such as k8saudit-rules rules. | -| falco-control-plane.falcoctl.artifact.install.enabled | bool | `true` | Enable the init container. We do not recommend installing (or following) plugins for security reasons since they are executable objects. | -| falco-control-plane.falcoctl.config.artifact.follow.refs | list | `["k8saudit-rules:0.6"]` | List of artifacts to be followed by the falcoctl sidecar container. Only rulesfiles, we do no recommend plugins for security reasonts since they are executable objects. | -| falco-control-plane.falcoctl.config.artifact.install.refs | list | `["k8saudit-rules:0.6"]` | List of artifacts to be installed by the falcoctl init container. Only rulesfiles, we do no recommend plugins for security reasonts since they are executable objects. | -| falco-control-plane.falcoctl.config.artifact.install.resolveDeps | bool | `false` | Do not resolve the depenencies for artifacts. By default is true, but for our use case we disable it. | -| falco-control-plane.fullnameOverride | string | `"falco-control-plane"` | | -| falco-control-plane.nodeSelector."node-role.kubernetes.io/control-plane" | string | `""` | | -| falco-control-plane.services[0].name | string | `"k8saudit-webhook"` | | -| falco-control-plane.services[0].ports[0].port | int | `9765` | | -| falco-control-plane.services[0].ports[0].protocol | string | `"TCP"` | | | forseti.aws.iamRoleArn | string | `""` | "arn:aws:iam::${AWS::AccountId}:role/${AWS::Region}.${ClusterName}.kubezeroForseti" | | forseti.aws.region | string | `""` | | | forseti.enabled | bool | `false` | | @@ -162,7 +140,7 @@ Device plugin for [AWS Neuron](https://aws.amazon.com/machine-learning/neuron/) | forseti.image.tag | string | `"v0.1.2"` | | | fuseDevicePlugin.enabled | bool | `false` | | | fuseDevicePlugin.image.name | string | `"public.ecr.aws/zero-downtime/fuse-device-plugin"` | | -| fuseDevicePlugin.image.tag | string | `"1.2.0"` | | +| fuseDevicePlugin.image.tag | string | `"v1.2.0"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node.kubernetes.io/instance-type"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | | | nvidia-device-plugin.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"g5.xlarge"` | | diff --git a/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/Chart.yaml b/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/Chart.yaml index 5581bdc5..152dbcc9 100644 --- a/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/Chart.yaml +++ b/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/Chart.yaml @@ -5,4 +5,4 @@ home: https://github.com/TwiN/aws-eks-asg-rolling-update-handler maintainers: - name: TwiN name: aws-eks-asg-rolling-update-handler -version: 1.4.0 +version: 1.5.0 diff --git a/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/README.md b/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/README.md index 9fecbedf..3e55f192 100644 --- a/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/README.md +++ b/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/README.md @@ -12,3 +12,5 @@ The following table lists the configurable parameters of the aws-eks-asg-rolling | resources | CPU/memory resource requests/limits | no | `{}` | | podAnnotations | Annotations to add to the aws-eks-asg-rolling-update-handler pod configuration | no | `{}` | | podLabels | Labels to add to the aws-eks-asg-rolling-update-handler pod configuration | no | `{}` | +| securityContext | Pod security context | no | `{}` | +| containerSecurityContext | Container security context | no | `{}` | diff --git a/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml b/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml index e742082b..d460861e 100644 --- a/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml +++ b/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/templates/deployment.yaml @@ -17,12 +17,16 @@ spec: {{ include "aws-eks-asg-rolling-update-handler.labels" . | indent 8 }} {{- with .Values.podLabels }} {{- toYaml . | nindent 8 }} - {{- end }} + {{- end }} annotations: {{- with .Values.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} spec: + {{- if .Values.securityContext }} + securityContext: + {{ toYaml .Values.securityContext | nindent 8 | trim }} + {{- end }} automountServiceAccountToken: true serviceAccountName: {{ template "aws-eks-asg-rolling-update-handler.serviceAccountName" . }} restartPolicy: Always @@ -31,6 +35,10 @@ spec: - name: {{ template "aws-eks-asg-rolling-update-handler.name" . }} image: {{ .Values.image.repository }}:{{ .Values.image.tag }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.containerSecurityContext }} + securityContext: + {{ toYaml .Values.containerSecurityContext | nindent 12 | trim }} + {{- end }} env: {{- toYaml .Values.environmentVars | nindent 12 }} {{- with .Values.resources }} diff --git a/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/values.yaml b/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/values.yaml index d2747ffa..d5c049d3 100644 --- a/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/values.yaml +++ b/charts/kubezero-addons/charts/aws-eks-asg-rolling-update-handler/values.yaml @@ -37,3 +37,15 @@ serviceAccount: create: true #name: aws-eks-asg-rolling-update-handler annotations: {} + +securityContext: {} + # runAsNonRoot: true + # runAsUser: 1001 + # seccompProfile: +# type: RuntimeDefault + +containerSecurityContext: {} + # allowPrivilegeEscalation: false + # capabilities: + # drop: +# - ALL diff --git a/charts/kubezero-addons/values.yaml b/charts/kubezero-addons/values.yaml index ca5e10f7..5942df77 100644 --- a/charts/kubezero-addons/values.yaml +++ b/charts/kubezero-addons/values.yaml @@ -54,7 +54,7 @@ aws-eks-asg-rolling-update-handler: enabled: false image: repository: twinproduction/aws-eks-asg-rolling-update-handler - tag: v1.8.1 + tag: v1.8.2 environmentVars: - name: CLUSTER_NAME @@ -77,6 +77,18 @@ aws-eks-asg-rolling-update-handler: - name: AWS_STS_REGIONAL_ENDPOINTS value: "regional" + securityContext: + runAsNonRoot: true + runAsUser: 1001 + seccompProfile: + type: RuntimeDefault + + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + resources: requests: cpu: 10m @@ -189,7 +201,7 @@ cluster-autoscaler: image: repository: registry.k8s.io/autoscaling/cluster-autoscaler - tag: v1.26.4 + tag: v1.27.3 autoDiscovery: clusterName: "" @@ -259,72 +271,3 @@ external-dns: #- istio-gateway provider: inmemory - -falco-control-plane: - enabled: false - - fullnameOverride: falco-control-plane - - # -- Disable the drivers since we want to deploy only the k8saudit plugin. - driver: - enabled: false - - # -- Disable the collectors, no syscall events to enrich with metadata. - collectors: - enabled: false - - nodeSelector: - node-role.kubernetes.io/control-plane: "" - - # -- Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurabale. - controller: - kind: deployment - deployment: - # -- Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing. - # For more info check the section on Plugins in the README.md file. - replicas: 1 - - - falcoctl: - artifact: - install: - # -- Enable the init container. We do not recommend installing (or following) plugins for security reasons since they are executable objects. - enabled: true - follow: - # -- Enable the sidecar container. We do not support it yet for plugins. It is used only for rules feed such as k8saudit-rules rules. - enabled: true - config: - artifact: - install: - # -- Do not resolve the depenencies for artifacts. By default is true, but for our use case we disable it. - resolveDeps: false - # -- List of artifacts to be installed by the falcoctl init container. - # Only rulesfiles, we do no recommend plugins for security reasonts since they are executable objects. - refs: [k8saudit-rules:0.6] - follow: - # -- List of artifacts to be followed by the falcoctl sidecar container. - # Only rulesfiles, we do no recommend plugins for security reasonts since they are executable objects. - refs: [k8saudit-rules:0.6] - - services: - - name: k8saudit-webhook - ports: - - port: 9765 # See plugin open_params - protocol: TCP - - falco: - rules_file: - - /etc/falco/k8s_audit_rules.yaml - - /etc/falco/rules.d - plugins: - - name: k8saudit - library_path: libk8saudit.so - init_config: - maxEventBytes: 1048576 - # sslCertificate: /etc/falco/falco.pem - open_params: "http://:9765/k8s-audit" - - name: json - library_path: libjson.so - init_config: "" - # Plugins that Falco will load. Note: the same plugins are installed by the falcoctl-artifact-install init container. - load_plugins: [k8saudit, json] diff --git a/charts/kubezero-falco/Chart.yaml b/charts/kubezero-falco/Chart.yaml new file mode 100644 index 00000000..488b94de --- /dev/null +++ b/charts/kubezero-falco/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v2 +name: kubezero-falco +description: Falco Container Security and Audit components +type: application +version: 0.1.0 +home: https://kubezero.com +icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png +keywords: + - kubezero + - falco +maintainers: + - name: Stefan Reimer + email: stefan@zero-downtime.net +dependencies: + - name: kubezero-lib + version: ">= 0.1.6" + repository: https://cdn.zero-downtime.net/charts/ + - name: falco + version: 3.8.4 + repository: https://falcosecurity.github.io/charts + condition: k8saudit.enabled + alias: k8saudit +kubeVersion: ">= 1.26.0" diff --git a/charts/kubezero-falco/files/rules/k8s_audit_rules.yaml b/charts/kubezero-falco/files/rules/k8s_audit_rules.yaml new file mode 100644 index 00000000..bd0675d9 --- /dev/null +++ b/charts/kubezero-falco/files/rules/k8s_audit_rules.yaml @@ -0,0 +1,762 @@ +# SPDX-License-Identifier: Apache-2.0 +# +# Copyright (C) 2023 The Falco Authors. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +- required_engine_version: 15 + +- required_plugin_versions: + - name: k8saudit + version: 0.6.0 + alternatives: + - name: k8saudit-eks + version: 0.2.0 + - name: json + version: 0.7.0 + +# Like always_true/always_false, but works with k8s audit events +- macro: k8s_audit_always_true + condition: (jevt.rawtime exists) + +- macro: k8s_audit_never_true + condition: (jevt.rawtime=0) + +# Generally only consider audit events once the response has completed +- list: k8s_audit_stages + items: ["ResponseComplete"] + +# Generally exclude users starting with "system:" +- macro: non_system_user + condition: (not ka.user.name startswith "system:") + +# This macro selects the set of Audit Events used by the below rules. +- macro: kevt + condition: (jevt.value[/stage] in (k8s_audit_stages)) + +- macro: kevt_started + condition: (jevt.value[/stage]=ResponseStarted) + +# If you wish to restrict activity to a specific set of users, override/append to this list. +# users created by kops are included +- list: vertical_pod_autoscaler_users + items: ["vpa-recommender", "vpa-updater"] + +- list: allowed_k8s_users + items: [ + "minikube", "minikube-user", "kubelet", "kops", "admin", "kube", "kube-proxy", "kube-apiserver-healthcheck", + "kubernetes-admin", + vertical_pod_autoscaler_users, + cluster-autoscaler, + "system:addon-manager", + "cloud-controller-manager", + "system:kube-controller-manager" + ] + +- list: eks_allowed_k8s_users + items: [ + "eks:node-manager", + "eks:certificate-controller", + "eks:fargate-scheduler", + "eks:k8s-metrics", + "eks:authenticator", + "eks:cluster-event-watcher", + "eks:nodewatcher", + "eks:pod-identity-mutating-webhook", + "eks:cloud-controller-manager", + "eks:vpc-resource-controller", + "eks:addon-manager", + ] +- +- rule: Disallowed K8s User + desc: Detect any k8s operation by users outside of an allowed set of users. + condition: kevt and non_system_user and not ka.user.name in (allowed_k8s_users) and not ka.user.name in (eks_allowed_k8s_users) + output: K8s Operation performed by user not in allowed list of users (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code) + priority: WARNING + source: k8s_audit + tags: [k8s] + +# In a local/user rules file, you could override this macro to +# explicitly enumerate the container images that you want to run in +# your environment. In this main falco rules file, there isn't any way +# to know all the containers that can run, so any container is +# allowed, by using the always_true macro. In the overridden macro, the condition +# would look something like (ka.req.pod.containers.image.repository in (my-repo/my-image)) +- macro: allowed_k8s_containers + condition: (k8s_audit_always_true) + +- macro: response_successful + condition: (ka.response.code startswith 2) + +- macro: kget + condition: ka.verb=get + +- macro: kcreate + condition: ka.verb=create + +- macro: kmodify + condition: (ka.verb in (create,update,patch)) + +- macro: kdelete + condition: ka.verb=delete + +- macro: pod + condition: ka.target.resource=pods and not ka.target.subresource exists + +- macro: pod_subresource + condition: ka.target.resource=pods and ka.target.subresource exists + +- macro: deployment + condition: ka.target.resource=deployments + +- macro: service + condition: ka.target.resource=services + +- macro: configmap + condition: ka.target.resource=configmaps + +- macro: namespace + condition: ka.target.resource=namespaces + +- macro: serviceaccount + condition: ka.target.resource=serviceaccounts + +- macro: clusterrole + condition: ka.target.resource=clusterroles + +- macro: clusterrolebinding + condition: ka.target.resource=clusterrolebindings + +- macro: role + condition: ka.target.resource=roles + +- macro: secret + condition: ka.target.resource=secrets + +- macro: health_endpoint + condition: ka.uri=/healthz or ka.uri startswith /healthz? + +- macro: live_endpoint + condition: ka.uri=/livez or ka.uri startswith /livez? + +- macro: ready_endpoint + condition: ka.uri=/readyz or ka.uri startswith /readyz? + +- rule: Create Disallowed Pod + desc: > + Detect an attempt to start a pod with a container image outside of a list of allowed images. + condition: kevt and pod and kcreate and not allowed_k8s_containers + output: Pod started with container not in allowed list (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + +- rule: Create Privileged Pod + desc: > + Detect an attempt to start a pod with a privileged container + condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (falco_privileged_images) + output: Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + +- macro: sensitive_vol_mount + condition: > + (ka.req.pod.volumes.hostpath intersects (/proc, /var/run/docker.sock, /, /etc, /root, /var/run/crio/crio.sock, /run/containerd/containerd.sock, /home/admin, /var/lib/kubelet, /var/lib/kubelet/pki, /etc/kubernetes, /etc/kubernetes/manifests)) + +- rule: Create Sensitive Mount Pod + desc: > + Detect an attempt to start a pod with a volume from a sensitive host directory (i.e. /proc). + Exceptions are made for known trusted images. + condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.pod.containers.image.repository in (falco_sensitive_mount_images) + output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace resource=%ka.target.resource images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes]) + priority: WARNING + source: k8s_audit + tags: [k8s] + +# These container images are allowed to run with hostnetwork=true +# TODO: Remove k8s.gcr.io reference after 01/Dec/2023 +- list: falco_hostnetwork_images + items: [ + gcr.io/google-containers/prometheus-to-sd, + gcr.io/projectcalico-org/typha, + gcr.io/projectcalico-org/node, + gke.gcr.io/gke-metadata-server, + gke.gcr.io/kube-proxy, + gke.gcr.io/netd-amd64, + k8s.gcr.io/ip-masq-agent-amd64, + k8s.gcr.io/prometheus-to-sd, + registry.k8s.io/ip-masq-agent-amd64, + registry.k8s.io/prometheus-to-sd + ] + +# Corresponds to K8s CIS Benchmark 1.7.4 +- rule: Create HostNetwork Pod + desc: Detect an attempt to start a pod using the host network. + condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostnetwork_images) + output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + +- list: falco_hostpid_images + items: [] + +- rule: Create HostPid Pod + desc: Detect an attempt to start a pod using the host pid namespace. + condition: kevt and pod and kcreate and ka.req.pod.host_pid intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostpid_images) + output: Pod started using host pid namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + +- list: falco_hostipc_images + items: [] + +- rule: Create HostIPC Pod + desc: Detect an attempt to start a pod using the host ipc namespace. + condition: kevt and pod and kcreate and ka.req.pod.host_ipc intersects (true) and not ka.req.pod.containers.image.repository in (falco_hostipc_images) + output: Pod started using host ipc namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + +- macro: user_known_node_port_service + condition: (k8s_audit_never_true) + +- rule: Create NodePort Service + desc: > + Detect an attempt to start a service with a NodePort service type + condition: kevt and service and kcreate and ka.req.service.type=NodePort and not user_known_node_port_service + output: NodePort Service Created (user=%ka.user.name service=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace ports=%ka.req.service.ports) + priority: WARNING + source: k8s_audit + tags: [k8s] + +- macro: contains_private_credentials + condition: > + (ka.req.configmap.obj contains "aws_access_key_id" or + ka.req.configmap.obj contains "aws-access-key-id" or + ka.req.configmap.obj contains "aws_s3_access_key_id" or + ka.req.configmap.obj contains "aws-s3-access-key-id" or + ka.req.configmap.obj contains "password" or + ka.req.configmap.obj contains "passphrase") + +- rule: Create/Modify Configmap With Private Credentials + desc: > + Detect creating/modifying a configmap containing a private credential (aws key, password, etc.) + condition: kevt and configmap and kmodify and contains_private_credentials + output: K8s configmap with private credential (user=%ka.user.name verb=%ka.verb resource=%ka.target.resource configmap=%ka.req.configmap.name config=%ka.req.configmap.obj) + priority: WARNING + source: k8s_audit + tags: [k8s] + +# Corresponds to K8s CIS Benchmark, 1.1.1. +- rule: Anonymous Request Allowed + desc: > + Detect any request made by the anonymous user that was allowed + condition: kevt and ka.user.name=system:anonymous and ka.auth.decision="allow" and not health_endpoint and not live_endpoint and not ready_endpoint + output: Request by anonymous user allowed (user=%ka.user.name verb=%ka.verb uri=%ka.uri reason=%ka.auth.reason)) + priority: WARNING + source: k8s_audit + tags: [k8s] + +# Roughly corresponds to K8s CIS Benchmark, 1.1.12. In this case, +# notifies an attempt to exec/attach to a privileged container. + +# Ideally, we'd add a more stringent rule that detects attaches/execs +# to a privileged pod, but that requires the engine for k8s audit +# events to be stateful, so it could know if a container named in an +# attach request was created privileged or not. For now, we have a +# less severe rule that detects attaches/execs to any pod. +# +# For the same reason, you can't use things like image names/prefixes, +# as the event that creates the pod (which has the images) is a +# separate event than the actual exec/attach to the pod. + +- macro: user_known_exec_pod_activities + condition: (k8s_audit_never_true) + +- rule: Attach/Exec Pod + desc: > + Detect any attempt to attach/exec to a pod + condition: kevt_started and pod_subresource and kcreate and ka.target.subresource in (exec,attach) and not user_known_exec_pod_activities + output: Attach/Exec to pod (user=%ka.user.name pod=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace action=%ka.target.subresource command=%ka.uri.param[command]) + priority: NOTICE + source: k8s_audit + tags: [k8s] + +- macro: user_known_pod_debug_activities + condition: (k8s_audit_never_true) + +# Only works when feature gate EphemeralContainers is enabled +- rule: EphemeralContainers Created + desc: > + Detect any ephemeral container created + condition: kevt and pod_subresource and kmodify and ka.target.subresource in (ephemeralcontainers) and not user_known_pod_debug_activities + output: Ephemeral container is created in pod (user=%ka.user.name pod=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace ephemeral_container_name=%jevt.value[/requestObject/ephemeralContainers/0/name] ephemeral_container_image=%jevt.value[/requestObject/ephemeralContainers/0/image]) + priority: NOTICE + source: k8s_audit + tags: [k8s] + +# In a local/user rules fie, you can append to this list to add additional allowed namespaces +- list: allowed_namespaces + items: [kube-system, kube-public, default] + +- rule: Create Disallowed Namespace + desc: Detect any attempt to create a namespace outside of a set of known namespaces + condition: kevt and namespace and kcreate and not ka.target.name in (allowed_namespaces) + output: Disallowed namespace created (user=%ka.user.name ns=%ka.target.name resource=%ka.target.resource) + priority: WARNING + source: k8s_audit + tags: [k8s] + +# Only defined for backwards compatibility. Use the more specific +# user_allowed_kube_namespace_image_list instead. +- list: user_trusted_image_list + items: [] + +- list: user_allowed_kube_namespace_image_list + items: [user_trusted_image_list] + +# Only defined for backwards compatibility. Use the more specific +# allowed_kube_namespace_image_list instead. +- list: k8s_image_list + items: [] + +# TODO: Remove k8s.gcr.io reference after 01/Dec/2023 +- list: allowed_kube_namespace_image_list + items: [ + gcr.io/google-containers/prometheus-to-sd, + gcr.io/projectcalico-org/node, + gke.gcr.io/addon-resizer, + gke.gcr.io/heapster, + gke.gcr.io/gke-metadata-server, + k8s.gcr.io/ip-masq-agent-amd64, + k8s.gcr.io/kube-apiserver, + registry.k8s.io/ip-masq-agent-amd64, + registry.k8s.io/kube-apiserver, + gke.gcr.io/kube-proxy, + gke.gcr.io/netd-amd64, + gke.gcr.io/watcher-daemonset, + k8s.gcr.io/addon-resizer, + k8s.gcr.io/prometheus-to-sd, + k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64, + k8s.gcr.io/k8s-dns-kube-dns-amd64, + k8s.gcr.io/k8s-dns-sidecar-amd64, + k8s.gcr.io/metrics-server-amd64, + registry.k8s.io/addon-resizer, + registry.k8s.io/prometheus-to-sd, + registry.k8s.io/k8s-dns-dnsmasq-nanny-amd64, + registry.k8s.io/k8s-dns-kube-dns-amd64, + registry.k8s.io/k8s-dns-sidecar-amd64, + registry.k8s.io/metrics-server-amd64, + kope/kube-apiserver-healthcheck, + k8s_image_list + ] + +- macro: allowed_kube_namespace_pods + condition: (ka.req.pod.containers.image.repository in (user_allowed_kube_namespace_image_list) or + ka.req.pod.containers.image.repository in (allowed_kube_namespace_image_list)) + +# Detect any new pod created in the kube-system namespace +- rule: Pod Created in Kube Namespace + desc: Detect any attempt to create a pod in the kube-system or kube-public namespaces + condition: kevt and pod and kcreate and ka.target.namespace in (kube-system, kube-public) and not allowed_kube_namespace_pods + output: Pod created in kube namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image) + priority: WARNING + source: k8s_audit + tags: [k8s] + +- list: user_known_sa_list + items: [] + +- list: known_sa_list + items: [ + coredns, + coredns-autoscaler, + cronjob-controller, + daemon-set-controller, + deployment-controller, + disruption-controller, + endpoint-controller, + endpointslice-controller, + endpointslicemirroring-controller, + generic-garbage-collector, + horizontal-pod-autoscaler, + job-controller, + namespace-controller, + node-controller, + persistent-volume-binder, + pod-garbage-collector, + pv-protection-controller, + pvc-protection-controller, + replicaset-controller, + resourcequota-controller, + root-ca-cert-publisher, + service-account-controller, + statefulset-controller + ] + +- macro: trusted_sa + condition: (ka.target.name in (known_sa_list, user_known_sa_list)) + +# Detect creating a service account in the kube-system/kube-public namespace +- rule: Service Account Created in Kube Namespace + desc: Detect any attempt to create a serviceaccount in the kube-system or kube-public namespaces + condition: kevt and serviceaccount and kcreate and ka.target.namespace in (kube-system, kube-public) and response_successful and not trusted_sa + output: Service account created in kube namespace (user=%ka.user.name serviceaccount=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace) + priority: WARNING + source: k8s_audit + tags: [k8s] + +# Detect any modify/delete to any ClusterRole starting with +# "system:". "system:coredns" is excluded as changes are expected in +# normal operation. +- rule: System ClusterRole Modified/Deleted + desc: Detect any attempt to modify/delete a ClusterRole/Role starting with system + condition: kevt and (role or clusterrole) and (kmodify or kdelete) and (ka.target.name startswith "system:") and + not ka.target.name in (system:coredns, system:managed-certificate-controller) + output: System ClusterRole/Role modified or deleted (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace action=%ka.verb) + priority: WARNING + source: k8s_audit + tags: [k8s] + +# Detect any attempt to create a ClusterRoleBinding to the cluster-admin user +# (expand this to any built-in cluster role that does "sensitive" things) +- rule: Attach to cluster-admin Role + desc: Detect any attempt to create a ClusterRoleBinding to the cluster-admin user + condition: kevt and clusterrolebinding and kcreate and ka.req.binding.role=cluster-admin + output: Cluster Role Binding to cluster-admin role (user=%ka.user.name subject=%ka.req.binding.subjects) + priority: WARNING + source: k8s_audit + tags: [k8s] + +- rule: ClusterRole With Wildcard Created + desc: Detect any attempt to create a Role/ClusterRole with wildcard resources or verbs + condition: kevt and (role or clusterrole) and kcreate and (ka.req.role.rules.resources intersects ("*") or ka.req.role.rules.verbs intersects ("*")) + output: Created Role/ClusterRole with wildcard (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules) + priority: WARNING + source: k8s_audit + tags: [k8s] + +- macro: writable_verbs + condition: > + (ka.req.role.rules.verbs intersects (create, update, patch, delete, deletecollection)) + +- rule: ClusterRole With Write Privileges Created + desc: Detect any attempt to create a Role/ClusterRole that can perform write-related actions + condition: kevt and (role or clusterrole) and kcreate and writable_verbs + output: Created Role/ClusterRole with write privileges (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules) + priority: NOTICE + source: k8s_audit + tags: [k8s] + +- rule: ClusterRole With Pod Exec Created + desc: Detect any attempt to create a Role/ClusterRole that can exec to pods + condition: kevt and (role or clusterrole) and kcreate and ka.req.role.rules.resources intersects ("pods/exec") + output: Created Role/ClusterRole with pod exec privileges (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules) + priority: WARNING + source: k8s_audit + tags: [k8s] + +# The rules below this point are less discriminatory and generally +# represent a stream of activity for a cluster. If you wish to disable +# these events, modify the following macro. +- macro: consider_activity_events + condition: (k8s_audit_always_true) + +- macro: kactivity + condition: (kevt and consider_activity_events) + +- rule: K8s Deployment Created + desc: Detect any attempt to create a deployment + condition: (kactivity and kcreate and deployment and response_successful) + output: K8s Deployment Created (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Deployment Deleted + desc: Detect any attempt to delete a deployment + condition: (kactivity and kdelete and deployment and response_successful) + output: K8s Deployment Deleted (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Service Created + desc: Detect any attempt to create a service + condition: (kactivity and kcreate and service and response_successful) + output: K8s Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Service Deleted + desc: Detect any attempt to delete a service + condition: (kactivity and kdelete and service and response_successful) + output: K8s Service Deleted (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s ConfigMap Created + desc: Detect any attempt to create a configmap + condition: (kactivity and kcreate and configmap and response_successful) + output: K8s ConfigMap Created (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s ConfigMap Deleted + desc: Detect any attempt to delete a configmap + condition: (kactivity and kdelete and configmap and response_successful) + output: K8s ConfigMap Deleted (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Namespace Created + desc: Detect any attempt to create a namespace + condition: (kactivity and kcreate and namespace and response_successful) + output: K8s Namespace Created (user=%ka.user.name namespace=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Namespace Deleted + desc: Detect any attempt to delete a namespace + condition: (kactivity and non_system_user and kdelete and namespace and response_successful) + output: K8s Namespace Deleted (user=%ka.user.name namespace=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Serviceaccount Created + desc: Detect any attempt to create a service account + condition: (kactivity and kcreate and serviceaccount and response_successful) + output: K8s Serviceaccount Created (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Serviceaccount Deleted + desc: Detect any attempt to delete a service account + condition: (kactivity and kdelete and serviceaccount and response_successful) + output: K8s Serviceaccount Deleted (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Role/Clusterrole Created + desc: Detect any attempt to create a cluster role/role + condition: (kactivity and kcreate and (clusterrole or role) and response_successful) + output: K8s Cluster Role Created (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Role/Clusterrole Deleted + desc: Detect any attempt to delete a cluster role/role + condition: (kactivity and kdelete and (clusterrole or role) and response_successful) + output: K8s Cluster Role Deleted (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Role/Clusterrolebinding Created + desc: Detect any attempt to create a clusterrolebinding + condition: (kactivity and kcreate and clusterrolebinding and response_successful) + output: K8s Cluster Role Binding Created (user=%ka.user.name binding=%ka.target.name resource=%ka.target.resource subjects=%ka.req.binding.subjects role=%ka.req.binding.role resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Role/Clusterrolebinding Deleted + desc: Detect any attempt to delete a clusterrolebinding + condition: (kactivity and kdelete and clusterrolebinding and response_successful) + output: K8s Cluster Role Binding Deleted (user=%ka.user.name binding=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Secret Created + desc: Detect any attempt to create a secret. Service account tokens are excluded. + condition: (kactivity and kcreate and secret and ka.target.namespace!=kube-system and non_system_user and response_successful) + output: K8s Secret Created (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Secret Deleted + desc: Detect any attempt to delete a secret. Service account tokens are excluded. + condition: (kactivity and kdelete and secret and ka.target.namespace!=kube-system and non_system_user and response_successful) + output: K8s Secret Deleted (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: INFO + source: k8s_audit + tags: [k8s] + +- rule: K8s Secret Get Successfully + desc: > + Detect any attempt to get a secret. Service account tokens are excluded. + condition: > + secret and kget + and kactivity + and response_successful + output: K8s Secret Get Successfully (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: ERROR + source: k8s_audit + tags: [k8s] + +- rule: K8s Secret Get Unsuccessfully Tried + desc: > + Detect an unsuccessful attempt to get the secret. Service account tokens are excluded. + condition: > + secret and kget + and kactivity + and not response_successful + output: K8s Secret Get Unsuccessfully Tried (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) + priority: WARNING + source: k8s_audit + tags: [k8s] + +# This rule generally matches all events, and as a result is disabled +# by default. If you wish to enable these events, modify the +# following macro. +# condition: (jevt.rawtime exists) +- macro: consider_all_events + condition: (k8s_audit_never_true) + +- macro: kall + condition: (kevt and consider_all_events) + +- rule: All K8s Audit Events + desc: Match all K8s Audit Events + condition: kall + output: K8s Audit Event received (user=%ka.user.name verb=%ka.verb uri=%ka.uri obj=%jevt.obj) + priority: DEBUG + source: k8s_audit + tags: [k8s] + + +# This macro disables following rule, change to k8s_audit_never_true to enable it +- macro: allowed_full_admin_users + condition: (k8s_audit_always_true) + +# This list includes some of the default user names for an administrator in several K8s installations +- list: full_admin_k8s_users + items: ["admin", "kubernetes-admin", "kubernetes-admin@kubernetes", "kubernetes-admin@cluster.local", "minikube-user"] + +# This rules detect an operation triggered by an user name that is +# included in the list of those that are default administrators upon +# cluster creation. This may signify a permission setting too broader. +# As we can't check for role of the user on a general ka.* event, this +# may or may not be an administrator. Customize the full_admin_k8s_users +# list to your needs, and activate at your discretion. + +# # How to test: +# # Execute any kubectl command connected using default cluster user, as: +# kubectl create namespace rule-test + +- rule: Full K8s Administrative Access + desc: Detect any k8s operation by a user name that may be an administrator with full access. + condition: > + kevt + and non_system_user + and ka.user.name in (full_admin_k8s_users) + and not allowed_full_admin_users + output: K8s Operation performed by full admin user (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code) + priority: WARNING + source: k8s_audit + tags: [k8s] + +- macro: ingress + condition: ka.target.resource=ingresses + +- macro: ingress_tls + condition: (jevt.value[/requestObject/spec/tls] exists) + +# # How to test: +# # Create an ingress.yaml file with content: +# apiVersion: networking.k8s.io/v1beta1 +# kind: Ingress +# metadata: +# name: test-ingress +# annotations: +# nginx.ingress.kubernetes.io/rewrite-target: / +# spec: +# rules: +# - http: +# paths: +# - path: /testpath +# backend: +# serviceName: test +# servicePort: 80 +# # Execute: kubectl apply -f ingress.yaml + +- rule: Ingress Object without TLS Certificate Created + desc: Detect any attempt to create an ingress without TLS certification. + condition: > + (kactivity and kcreate and ingress and response_successful and not ingress_tls) + output: > + K8s Ingress Without TLS Cert Created (user=%ka.user.name ingress=%ka.target.name + namespace=%ka.target.namespace resource=%ka.target.resource) + source: k8s_audit + priority: WARNING + tags: [k8s, network] + +- macro: node + condition: ka.target.resource=nodes + +- macro: allow_all_k8s_nodes + condition: (k8s_audit_always_true) + +- list: allowed_k8s_nodes + items: [] + +# # How to test: +# # Create a Falco monitored cluster with Kops +# # Increase the number of minimum nodes with: +# kops edit ig nodes +# kops apply --yes + +- rule: Untrusted Node Successfully Joined the Cluster + desc: > + Detect a node successfully joined the cluster outside of the list of allowed nodes. + condition: > + kevt and node + and kcreate + and response_successful + and not allow_all_k8s_nodes + and not ka.target.name in (allowed_k8s_nodes) + output: Node not in allowed list successfully joined the cluster (user=%ka.user.name node=%ka.target.name resource=%ka.target.resource) + priority: ERROR + source: k8s_audit + tags: [k8s] + +- rule: Untrusted Node Unsuccessfully Tried to Join the Cluster + desc: > + Detect an unsuccessful attempt to join the cluster for a node not in the list of allowed nodes. + condition: > + kevt and node + and kcreate + and not response_successful + and not allow_all_k8s_nodes + and not ka.target.name in (allowed_k8s_nodes) + output: Node not in allowed list tried unsuccessfully to join the cluster (user=%ka.user.name node=%ka.target.name reason=%ka.response.reason resource=%ka.target.resource) + priority: WARNING + source: k8s_audit + tags: [k8s] diff --git a/charts/kubezero-falco/templates/k8saudit-rules-cm.yaml b/charts/kubezero-falco/templates/k8saudit-rules-cm.yaml new file mode 100644 index 00000000..cde4473a --- /dev/null +++ b/charts/kubezero-falco/templates/k8saudit-rules-cm.yaml @@ -0,0 +1,10 @@ +{{- if .Values.k8saudit.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: falco-k8saudit-rules + labels: + {{- include "falco.labels" . | nindent 4 }} +data: + {{- (.Files.Glob "files/rules/**.yaml").AsConfig | nindent 2 }} +{{- end }} diff --git a/charts/kubezero-falco/update.sh b/charts/kubezero-falco/update.sh new file mode 100755 index 00000000..f8ffc8df --- /dev/null +++ b/charts/kubezero-falco/update.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -ex + +. ../../scripts/lib-update.sh + +#login_ecr_public +update_helm + +# update falco rules +wget -qO files/rules/k8s_audit_rules.yaml https://raw.githubusercontent.com/falcosecurity/plugins/master/plugins/k8saudit/rules/k8s_audit_rules.yaml + +update_docs diff --git a/charts/kubezero-falco/values.yaml b/charts/kubezero-falco/values.yaml new file mode 100644 index 00000000..b07ca66b --- /dev/null +++ b/charts/kubezero-falco/values.yaml @@ -0,0 +1,76 @@ +k8saudit: + enabled: false + + fullnameOverride: falco-k8saudit + + # -- Disable the drivers since we want to deploy only the k8saudit plugin. + driver: + enabled: false + + # -- Disable the collectors, no syscall events to enrich with metadata. + collectors: + enabled: false + + # falcoctl disabled so we can reduce resources quite a bit + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 1000m + memory: 512Mi + + nodeSelector: + node-role.kubernetes.io/control-plane: "" + + # -- Deploy Falco as a deployment. One instance of Falco is enough. Anyway the number of replicas is configurabale. + controller: + kind: deployment + deployment: + # -- Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing. + # For more info check the section on Plugins in the README.md file. + replicas: 1 + + # This provides k8s-audit rules via custom CM + mounts: + volumeMounts: + - mountPath: /etc/falco/rules.d + name: rules-volume + volumes: + - name: rules-volume + configMap: + name: falco-k8saudit-rules + + falcoctl: + artifact: + install: + enabled: false + follow: + enabled: false + + services: + - name: webhook + ports: + - port: 9765 # See plugin open_params + protocol: TCP + + falco: + rules_file: + - /etc/falco/rules.d + plugins: + - name: k8saudit + library_path: libk8saudit.so + init_config: + maxEventSize: 1048576 + open_params: "http://:9765/k8s-audit" + - name: json + library_path: libjson.so + init_config: "" + # Plugins that Falco will load. Note: the same plugins are installed by the falcoctl-artifact-install init container. + load_plugins: [k8saudit, json] + + json_output: true + buffered_outputs: true + log_syslog: false + syslog_output: + enabled: false diff --git a/charts/kubezero/Chart.yaml b/charts/kubezero/Chart.yaml index d1be8a81..d170de2e 100644 --- a/charts/kubezero/Chart.yaml +++ b/charts/kubezero/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: kubezero description: KubeZero - Root App of Apps chart type: application -version: 1.27.7 +version: 1.27.8 home: https://kubezero.com icon: https://cdn.zero-downtime.net/assets/kubezero/logo-small-64.png keywords: diff --git a/charts/kubezero/templates/falco.yaml b/charts/kubezero/templates/falco.yaml new file mode 100644 index 00000000..437ad8d9 --- /dev/null +++ b/charts/kubezero/templates/falco.yaml @@ -0,0 +1,15 @@ +{{- define "falco-values" }} + +{{- with .Values.falco.k8saudit }} +k8saudit: + {{- toYaml . | nindent 2 }} +{{- end }} + +{{- end }} + + +{{- define "falco-argo" }} + +{{- end }} + +{{ include "kubezero-app.app" . }} diff --git a/charts/kubezero/values.yaml b/charts/kubezero/values.yaml index 4d3eabc9..f1a5d36b 100644 --- a/charts/kubezero/values.yaml +++ b/charts/kubezero/values.yaml @@ -10,7 +10,7 @@ global: addons: enabled: true - targetRevision: 0.8.2 + targetRevision: 0.8.4 external-dns: enabled: false forseti: @@ -25,8 +25,6 @@ addons: enabled: false aws-eks-asg-rolling-update-handler: enabled: false - falco-control-plane: - enabled: false network: enabled: true @@ -77,6 +75,12 @@ istio-private-ingress: gateway: service: {} +falco: + enabled: false + k8saudit: + enabled: false + targetRevision: 0.1.0 + telemetry: enabled: false namespace: telemetry