# SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2023 The Falco Authors. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # - required_engine_version: 15 - required_plugin_versions: - name: k8saudit version: 0.7.0 alternatives: - name: k8saudit-eks version: 0.4.0 - name: k8saudit-gke version: 0.1.0 - name: json version: 0.7.0 # Like always_true/always_false, but works with k8s audit events - macro: k8s_audit_always_true condition: (jevt.rawtime exists) - macro: k8s_audit_never_true condition: (jevt.rawtime=0) # Generally only consider audit events once the response has completed - list: k8s_audit_stages items: ["ResponseComplete"] # Generally exclude users starting with "system:" - macro: non_system_user condition: (not ka.user.name startswith "system:") # This macro selects the set of Audit Events used by the below rules. - macro: kevt condition: (jevt.value[/stage] in (k8s_audit_stages)) - macro: kevt_started condition: (jevt.value[/stage]=ResponseStarted) # If you wish to restrict activity to a specific set of users, override/append to this list. # users created by kops are included - list: vertical_pod_autoscaler_users items: ["vpa-recommender", "vpa-updater"] - list: allowed_k8s_users items: [ "minikube", "minikube-user", "kubelet", "kops", "admin", "kube", "kube-proxy", "kube-apiserver-healthcheck", "kubernetes-admin", vertical_pod_autoscaler_users, cluster-autoscaler, "system:addon-manager", "cloud-controller-manager", "system:kube-controller-manager" ] - list: eks_allowed_k8s_users items: [ "eks:node-manager", "eks:certificate-controller", "eks:fargate-scheduler", "eks:k8s-metrics", "eks:authenticator", "eks:cluster-event-watcher", "eks:nodewatcher", "eks:pod-identity-mutating-webhook", "eks:cloud-controller-manager", "eks:vpc-resource-controller", "eks:addon-manager", ] - list: k8s_audit_sensitive_mount_images items: [ falcosecurity/falco, docker.io/falcosecurity/falco, public.ecr.aws/falcosecurity/falco, docker.io/sysdig/sysdig, sysdig/sysdig, gcr.io/google_containers/hyperkube, gcr.io/google_containers/kube-proxy, docker.io/calico/node, docker.io/rook/toolbox, docker.io/cloudnativelabs/kube-router, docker.io/consul, docker.io/datadog/docker-dd-agent, docker.io/datadog/agent, docker.io/docker/ucp-agent, docker.io/gliderlabs/logspout, docker.io/netdata/netdata, docker.io/google/cadvisor, docker.io/prom/node-exporter, amazon/amazon-ecs-agent, prom/node-exporter, amazon/cloudwatch-agent ] - list: k8s_audit_privileged_images items: [ falcosecurity/falco, docker.io/falcosecurity/falco, public.ecr.aws/falcosecurity/falco, docker.io/calico/node, calico/node, docker.io/cloudnativelabs/kube-router, docker.io/docker/ucp-agent, docker.io/mesosphere/mesos-slave, docker.io/rook/toolbox, docker.io/sysdig/sysdig, gcr.io/google_containers/kube-proxy, gcr.io/google-containers/startup-script, gcr.io/projectcalico-org/node, gke.gcr.io/kube-proxy, gke.gcr.io/gke-metadata-server, gke.gcr.io/netd-amd64, gke.gcr.io/watcher-daemonset, gcr.io/google-containers/prometheus-to-sd, registry.k8s.io/ip-masq-agent-amd64, registry.k8s.io/kube-proxy, registry.k8s.io/prometheus-to-sd, quay.io/calico/node, sysdig/sysdig, registry.k8s.io/dns/k8s-dns-node-cache, mcr.microsoft.com/oss/kubernetes/kube-proxy ] - rule: Disallowed K8s User desc: Detect any k8s operation by users outside of an allowed set of users. condition: kevt and non_system_user and not ka.user.name in (allowed_k8s_users) and not ka.user.name in (eks_allowed_k8s_users) output: K8s Operation performed by user not in allowed list of users (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code) priority: WARNING source: k8s_audit tags: [k8s] # In a local/user rules file, you could override this macro to # explicitly enumerate the container images that you want to run in # your environment. In this main falco rules file, there isn't any way # to know all the containers that can run, so any container is # allowed, by using the always_true macro. In the overridden macro, the condition # would look something like (ka.req.pod.containers.image.repository in (my-repo/my-image)) - macro: allowed_k8s_containers condition: (k8s_audit_always_true) - macro: response_successful condition: (ka.response.code startswith 2) - macro: kget condition: ka.verb=get - macro: kcreate condition: ka.verb=create - macro: kmodify condition: (ka.verb in (create,update,patch)) - macro: kdelete condition: ka.verb=delete - macro: pod condition: ka.target.resource=pods and not ka.target.subresource exists - macro: pod_subresource condition: ka.target.resource=pods and ka.target.subresource exists - macro: deployment condition: ka.target.resource=deployments - macro: service condition: ka.target.resource=services - macro: configmap condition: ka.target.resource=configmaps - macro: namespace condition: ka.target.resource=namespaces - macro: serviceaccount condition: ka.target.resource=serviceaccounts - macro: clusterrole condition: ka.target.resource=clusterroles - macro: clusterrolebinding condition: ka.target.resource=clusterrolebindings - macro: role condition: ka.target.resource=roles - macro: secret condition: ka.target.resource=secrets - macro: health_endpoint condition: ka.uri=/healthz or ka.uri startswith /healthz? - macro: live_endpoint condition: ka.uri=/livez or ka.uri startswith /livez? - macro: ready_endpoint condition: ka.uri=/readyz or ka.uri startswith /readyz? - rule: Create Disallowed Pod desc: > Detect an attempt to start a pod with a container image outside of a list of allowed images. condition: kevt and pod and kcreate and not allowed_k8s_containers output: Pod started with container not in allowed list (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image) priority: WARNING source: k8s_audit tags: [k8s] - rule: Create Privileged Pod desc: > Detect an attempt to start a pod with a privileged container condition: kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_privileged_images) output: Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image) priority: WARNING source: k8s_audit tags: [k8s] - macro: sensitive_vol_mount condition: > (ka.req.pod.volumes.hostpath intersects (/proc, /var/run/docker.sock, /, /etc, /root, /var/run/crio/crio.sock, /run/containerd/containerd.sock, /home/admin, /var/lib/kubelet, /var/lib/kubelet/pki, /etc/kubernetes, /etc/kubernetes/manifests)) - rule: Create Sensitive Mount Pod desc: > Detect an attempt to start a pod with a volume from a sensitive host directory (i.e. /proc). Exceptions are made for known trusted images. condition: kevt and pod and kcreate and sensitive_vol_mount and not ka.req.pod.containers.image.repository in (k8s_audit_sensitive_mount_images) output: Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace resource=%ka.target.resource images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes]) priority: WARNING source: k8s_audit tags: [k8s] # These container images are allowed to run with hostnetwork=true # TODO: Remove k8s.gcr.io reference after 01/Dec/2023 - list: k8s_audit_hostnetwork_images items: [ gcr.io/google-containers/prometheus-to-sd, gcr.io/projectcalico-org/typha, gcr.io/projectcalico-org/node, gke.gcr.io/gke-metadata-server, gke.gcr.io/kube-proxy, gke.gcr.io/netd-amd64, registry.k8s.io/ip-masq-agent-amd64, registry.k8s.io/prometheus-to-sd ] # Corresponds to K8s CIS Benchmark 1.7.4 - rule: Create HostNetwork Pod desc: Detect an attempt to start a pod using the host network. condition: kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostnetwork_images) output: Pod started using host network (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image) priority: WARNING source: k8s_audit tags: [k8s] - list: k8s_audit_hostpid_images items: [] - rule: Create HostPid Pod desc: Detect an attempt to start a pod using the host pid namespace. condition: kevt and pod and kcreate and ka.req.pod.host_pid intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostpid_images) output: Pod started using host pid namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image) priority: WARNING source: k8s_audit tags: [k8s] - list: k8s_audit_hostipc_images items: [] - rule: Create HostIPC Pod desc: Detect an attempt to start a pod using the host ipc namespace. condition: kevt and pod and kcreate and ka.req.pod.host_ipc intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostipc_images) output: Pod started using host ipc namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image) priority: WARNING source: k8s_audit tags: [k8s] - macro: user_known_node_port_service condition: (k8s_audit_never_true) - rule: Create NodePort Service desc: > Detect an attempt to start a service with a NodePort service type condition: kevt and service and kcreate and ka.req.service.type=NodePort and not user_known_node_port_service output: NodePort Service Created (user=%ka.user.name service=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace ports=%ka.req.service.ports) priority: WARNING source: k8s_audit tags: [k8s] - macro: contains_private_credentials condition: > (ka.req.configmap.obj contains "aws_access_key_id" or ka.req.configmap.obj contains "aws-access-key-id" or ka.req.configmap.obj contains "aws_s3_access_key_id" or ka.req.configmap.obj contains "aws-s3-access-key-id" or ka.req.configmap.obj contains "password" or ka.req.configmap.obj contains "passphrase") - rule: Create/Modify Configmap With Private Credentials desc: > Detect creating/modifying a configmap containing a private credential (aws key, password, etc.) condition: kevt and configmap and kmodify and contains_private_credentials output: K8s configmap with private credential (user=%ka.user.name verb=%ka.verb resource=%ka.target.resource configmap=%ka.req.configmap.name config=%ka.req.configmap.obj) priority: WARNING source: k8s_audit tags: [k8s] # Corresponds to K8s CIS Benchmark, 1.1.1. - rule: Anonymous Request Allowed desc: > Detect any request made by the anonymous user that was allowed condition: kevt and ka.user.name=system:anonymous and ka.auth.decision="allow" and not health_endpoint and not live_endpoint and not ready_endpoint output: Request by anonymous user allowed (user=%ka.user.name verb=%ka.verb uri=%ka.uri reason=%ka.auth.reason)) priority: WARNING source: k8s_audit tags: [k8s] # Roughly corresponds to K8s CIS Benchmark, 1.1.12. In this case, # notifies an attempt to exec/attach to a privileged container. # Ideally, we'd add a more stringent rule that detects attaches/execs # to a privileged pod, but that requires the engine for k8s audit # events to be stateful, so it could know if a container named in an # attach request was created privileged or not. For now, we have a # less severe rule that detects attaches/execs to any pod. # # For the same reason, you can't use things like image names/prefixes, # as the event that creates the pod (which has the images) is a # separate event than the actual exec/attach to the pod. - macro: user_known_exec_pod_activities condition: (k8s_audit_never_true) - rule: Attach/Exec Pod desc: > Detect any attempt to attach/exec to a pod condition: kevt_started and pod_subresource and kcreate and ka.target.subresource in (exec,attach) and not user_known_exec_pod_activities output: Attach/Exec to pod (user=%ka.user.name pod=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace action=%ka.target.subresource command=%ka.uri.param[command]) priority: NOTICE source: k8s_audit tags: [k8s] - macro: user_known_portforward_activities condition: (k8s_audit_never_true) - rule: port-forward desc: > Detect any attempt to portforward condition: ka.target.subresource in (portforward) and not user_known_portforward_activities output: Portforward to pod (user=%ka.user.name pod=%ka.target.name ns=%ka.target.namespace action=%ka.target.subresource ) priority: NOTICE source: k8s_audit tags: [k8s] - macro: user_known_pod_debug_activities condition: (k8s_audit_never_true) # Only works when feature gate EphemeralContainers is enabled - rule: EphemeralContainers Created desc: > Detect any ephemeral container created condition: kevt and pod_subresource and kmodify and ka.target.subresource in (ephemeralcontainers) and not user_known_pod_debug_activities output: Ephemeral container is created in pod (user=%ka.user.name pod=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace ephemeral_container_name=%jevt.value[/requestObject/ephemeralContainers/0/name] ephemeral_container_image=%jevt.value[/requestObject/ephemeralContainers/0/image]) priority: NOTICE source: k8s_audit tags: [k8s] # In a local/user rules fie, you can append to this list to add additional allowed namespaces - list: allowed_namespaces items: [kube-system, kube-public, default] - rule: Create Disallowed Namespace desc: Detect any attempt to create a namespace outside of a set of known namespaces condition: kevt and namespace and kcreate and not ka.target.name in (allowed_namespaces) output: Disallowed namespace created (user=%ka.user.name ns=%ka.target.name resource=%ka.target.resource) priority: WARNING source: k8s_audit tags: [k8s] # Only defined for backwards compatibility. Use the more specific # user_allowed_kube_namespace_image_list instead. - list: user_trusted_image_list items: [] - list: user_allowed_kube_namespace_image_list items: [user_trusted_image_list] # Only defined for backwards compatibility. Use the more specific # allowed_kube_namespace_image_list instead. - list: k8s_image_list items: [] # TODO: Remove k8s.gcr.io reference after 01/Dec/2023 - list: allowed_kube_namespace_image_list items: [ gcr.io/google-containers/prometheus-to-sd, gcr.io/projectcalico-org/node, gke.gcr.io/addon-resizer, gke.gcr.io/heapster, gke.gcr.io/gke-metadata-server, registry.k8s.io/ip-masq-agent-amd64, registry.k8s.io/kube-apiserver, gke.gcr.io/kube-proxy, gke.gcr.io/netd-amd64, gke.gcr.io/watcher-daemonset, registry.k8s.io/addon-resizer, registry.k8s.io/prometheus-to-sd, registry.k8s.io/k8s-dns-dnsmasq-nanny-amd64, registry.k8s.io/k8s-dns-kube-dns-amd64, registry.k8s.io/k8s-dns-sidecar-amd64, registry.k8s.io/metrics-server-amd64, kope/kube-apiserver-healthcheck, k8s_image_list ] - macro: allowed_kube_namespace_pods condition: (ka.req.pod.containers.image.repository in (user_allowed_kube_namespace_image_list) or ka.req.pod.containers.image.repository in (allowed_kube_namespace_image_list)) # Detect any new pod created in the kube-system namespace - rule: Pod Created in Kube Namespace desc: Detect any attempt to create a pod in the kube-system or kube-public namespaces condition: kevt and pod and kcreate and ka.target.namespace in (kube-system, kube-public) and not allowed_kube_namespace_pods output: Pod created in kube namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image) priority: WARNING source: k8s_audit tags: [k8s] - list: user_known_sa_list items: [] - list: known_sa_list items: [ coredns, coredns-autoscaler, cronjob-controller, daemon-set-controller, deployment-controller, disruption-controller, endpoint-controller, endpointslice-controller, endpointslicemirroring-controller, generic-garbage-collector, horizontal-pod-autoscaler, job-controller, namespace-controller, node-controller, persistent-volume-binder, pod-garbage-collector, pv-protection-controller, pvc-protection-controller, replicaset-controller, resourcequota-controller, root-ca-cert-publisher, service-account-controller, statefulset-controller ] - macro: trusted_sa condition: (ka.target.name in (known_sa_list, user_known_sa_list)) # Detect creating a service account in the kube-system/kube-public namespace - rule: Service Account Created in Kube Namespace desc: Detect any attempt to create a serviceaccount in the kube-system or kube-public namespaces condition: kevt and serviceaccount and kcreate and ka.target.namespace in (kube-system, kube-public) and response_successful and not trusted_sa output: Service account created in kube namespace (user=%ka.user.name serviceaccount=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace) priority: WARNING source: k8s_audit tags: [k8s] # Detect any modify/delete to any ClusterRole starting with # "system:". "system:coredns" is excluded as changes are expected in # normal operation. - rule: System ClusterRole Modified/Deleted desc: Detect any attempt to modify/delete a ClusterRole/Role starting with system condition: kevt and (role or clusterrole) and (kmodify or kdelete) and (ka.target.name startswith "system:") and not ka.target.name in (system:coredns, system:managed-certificate-controller) output: System ClusterRole/Role modified or deleted (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace action=%ka.verb) priority: WARNING source: k8s_audit tags: [k8s] # Detect any attempt to create a ClusterRoleBinding to the cluster-admin user # (expand this to any built-in cluster role that does "sensitive" things) - rule: Attach to cluster-admin Role desc: Detect any attempt to create a ClusterRoleBinding to the cluster-admin user condition: kevt and clusterrolebinding and kcreate and ka.req.binding.role=cluster-admin output: Cluster Role Binding to cluster-admin role (user=%ka.user.name subject=%ka.req.binding.subjects) priority: WARNING source: k8s_audit tags: [k8s] - rule: ClusterRole With Wildcard Created desc: Detect any attempt to create a Role/ClusterRole with wildcard resources or verbs condition: kevt and (role or clusterrole) and kcreate and (ka.req.role.rules.resources intersects ("*") or ka.req.role.rules.verbs intersects ("*")) output: Created Role/ClusterRole with wildcard (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules) priority: WARNING source: k8s_audit tags: [k8s] - macro: writable_verbs condition: > (ka.req.role.rules.verbs intersects (create, update, patch, delete, deletecollection)) - rule: ClusterRole With Write Privileges Created desc: Detect any attempt to create a Role/ClusterRole that can perform write-related actions condition: kevt and (role or clusterrole) and kcreate and writable_verbs output: Created Role/ClusterRole with write privileges (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules) priority: NOTICE source: k8s_audit tags: [k8s] - rule: ClusterRole With Pod Exec Created desc: Detect any attempt to create a Role/ClusterRole that can exec to pods condition: kevt and (role or clusterrole) and kcreate and ka.req.role.rules.resources intersects ("pods/exec") output: Created Role/ClusterRole with pod exec privileges (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules) priority: WARNING source: k8s_audit tags: [k8s] # The rules below this point are less discriminatory and generally # represent a stream of activity for a cluster. If you wish to disable # these events, modify the following macro. - macro: consider_activity_events condition: (k8s_audit_always_true) - macro: kactivity condition: (kevt and consider_activity_events) - rule: K8s Deployment Created desc: Detect any attempt to create a deployment condition: (kactivity and kcreate and deployment and response_successful) output: K8s Deployment Created (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Deployment Deleted desc: Detect any attempt to delete a deployment condition: (kactivity and kdelete and deployment and response_successful) output: K8s Deployment Deleted (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Service Created desc: Detect any attempt to create a service condition: (kactivity and kcreate and service and response_successful) output: K8s Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Service Deleted desc: Detect any attempt to delete a service condition: (kactivity and kdelete and service and response_successful) output: K8s Service Deleted (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s ConfigMap Created desc: Detect any attempt to create a configmap condition: (kactivity and kcreate and configmap and response_successful) output: K8s ConfigMap Created (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s ConfigMap Deleted desc: Detect any attempt to delete a configmap condition: (kactivity and kdelete and configmap and response_successful) output: K8s ConfigMap Deleted (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Namespace Created desc: Detect any attempt to create a namespace condition: (kactivity and kcreate and namespace and response_successful) output: K8s Namespace Created (user=%ka.user.name namespace=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Namespace Deleted desc: Detect any attempt to delete a namespace condition: (kactivity and non_system_user and kdelete and namespace and response_successful) output: K8s Namespace Deleted (user=%ka.user.name namespace=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Serviceaccount Created desc: Detect any attempt to create a service account condition: (kactivity and kcreate and serviceaccount and response_successful) output: K8s Serviceaccount Created (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Serviceaccount Deleted desc: Detect any attempt to delete a service account condition: (kactivity and kdelete and serviceaccount and response_successful) output: K8s Serviceaccount Deleted (user=%ka.user.name serviceaccount=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Role/Clusterrole Created desc: Detect any attempt to create a cluster role/role condition: (kactivity and kcreate and (clusterrole or role) and response_successful) output: K8s Cluster Role Created (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Role/Clusterrole Deleted desc: Detect any attempt to delete a cluster role/role condition: (kactivity and kdelete and (clusterrole or role) and response_successful) output: K8s Cluster Role Deleted (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Role/Clusterrolebinding Created desc: Detect any attempt to create a clusterrolebinding condition: (kactivity and kcreate and clusterrolebinding and response_successful) output: K8s Cluster Role Binding Created (user=%ka.user.name binding=%ka.target.name resource=%ka.target.resource subjects=%ka.req.binding.subjects role=%ka.req.binding.role resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Role/Clusterrolebinding Deleted desc: Detect any attempt to delete a clusterrolebinding condition: (kactivity and kdelete and clusterrolebinding and response_successful) output: K8s Cluster Role Binding Deleted (user=%ka.user.name binding=%ka.target.name resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Secret Created desc: Detect any attempt to create a secret. Service account tokens are excluded. condition: (kactivity and kcreate and secret and ka.target.namespace!=kube-system and non_system_user and response_successful) output: K8s Secret Created (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Secret Deleted desc: Detect any attempt to delete a secret. Service account tokens are excluded. condition: (kactivity and kdelete and secret and ka.target.namespace!=kube-system and non_system_user and response_successful) output: K8s Secret Deleted (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: INFO source: k8s_audit tags: [k8s] - rule: K8s Secret Get Successfully desc: > Detect any attempt to get a secret. Service account tokens are excluded. condition: > secret and kget and kactivity and response_successful output: K8s Secret Get Successfully (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: ERROR source: k8s_audit tags: [k8s] - rule: K8s Secret Get Unsuccessfully Tried desc: > Detect an unsuccessful attempt to get the secret. Service account tokens are excluded. condition: > secret and kget and kactivity and not response_successful output: K8s Secret Get Unsuccessfully Tried (user=%ka.user.name secret=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason) priority: WARNING source: k8s_audit tags: [k8s] # This rule generally matches all events, and as a result is disabled # by default. If you wish to enable these events, modify the # following macro. # condition: (jevt.rawtime exists) - macro: consider_all_events condition: (k8s_audit_never_true) - macro: kall condition: (kevt and consider_all_events) - rule: All K8s Audit Events desc: Match all K8s Audit Events condition: kall output: K8s Audit Event received (user=%ka.user.name verb=%ka.verb uri=%ka.uri obj=%jevt.obj) priority: DEBUG source: k8s_audit tags: [k8s] # This macro disables following rule, change to k8s_audit_never_true to enable it - macro: allowed_full_admin_users condition: (k8s_audit_always_true) # This list includes some of the default user names for an administrator in several K8s installations - list: full_admin_k8s_users items: ["admin", "kubernetes-admin", "kubernetes-admin@kubernetes", "kubernetes-admin@cluster.local", "minikube-user"] # This rules detect an operation triggered by an user name that is # included in the list of those that are default administrators upon # cluster creation. This may signify a permission setting too broader. # As we can't check for role of the user on a general ka.* event, this # may or may not be an administrator. Customize the full_admin_k8s_users # list to your needs, and activate at your discretion. # # How to test: # # Execute any kubectl command connected using default cluster user, as: # kubectl create namespace rule-test - rule: Full K8s Administrative Access desc: Detect any k8s operation by a user name that may be an administrator with full access. condition: > kevt and non_system_user and ka.user.name in (full_admin_k8s_users) and not allowed_full_admin_users output: K8s Operation performed by full admin user (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code) priority: WARNING source: k8s_audit tags: [k8s] - macro: ingress condition: ka.target.resource=ingresses - macro: ingress_tls condition: (jevt.value[/requestObject/spec/tls] exists) # # How to test: # # Create an ingress.yaml file with content: # apiVersion: networking.k8s.io/v1beta1 # kind: Ingress # metadata: # name: test-ingress # annotations: # nginx.ingress.kubernetes.io/rewrite-target: / # spec: # rules: # - http: # paths: # - path: /testpath # backend: # serviceName: test # servicePort: 80 # # Execute: kubectl apply -f ingress.yaml - rule: Ingress Object without TLS Certificate Created desc: Detect any attempt to create an ingress without TLS certification. condition: > (kactivity and kcreate and ingress and response_successful and not ingress_tls) output: > K8s Ingress Without TLS Cert Created (user=%ka.user.name ingress=%ka.target.name namespace=%ka.target.namespace resource=%ka.target.resource) source: k8s_audit priority: WARNING tags: [k8s, network] - macro: node condition: ka.target.resource=nodes - macro: allow_all_k8s_nodes condition: (k8s_audit_always_true) - list: allowed_k8s_nodes items: [] # # How to test: # # Create a Falco monitored cluster with Kops # # Increase the number of minimum nodes with: # kops edit ig nodes # kops apply --yes - rule: Untrusted Node Successfully Joined the Cluster desc: > Detect a node successfully joined the cluster outside of the list of allowed nodes. condition: > kevt and node and kcreate and response_successful and not allow_all_k8s_nodes and not ka.target.name in (allowed_k8s_nodes) output: Node not in allowed list successfully joined the cluster (user=%ka.user.name node=%ka.target.name resource=%ka.target.resource) priority: ERROR source: k8s_audit tags: [k8s] - rule: Untrusted Node Unsuccessfully Tried to Join the Cluster desc: > Detect an unsuccessful attempt to join the cluster for a node not in the list of allowed nodes. condition: > kevt and node and kcreate and not response_successful and not allow_all_k8s_nodes and not ka.target.name in (allowed_k8s_nodes) output: Node not in allowed list tried unsuccessfully to join the cluster (user=%ka.user.name node=%ka.target.name reason=%ka.response.reason resource=%ka.target.resource) priority: WARNING source: k8s_audit tags: [k8s]