desc:Detect any k8s operation by users outside of an allowed set of users.
condition:kevt and non_system_user and not ka.user.name in (allowed_k8s_users) and not ka.user.name in (eks_allowed_k8s_users)
output:K8s Operation performed by user not in allowed list of users (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code)
priority:WARNING
source:k8s_audit
tags:[k8s]
# In a local/user rules file, you could override this macro to
# explicitly enumerate the container images that you want to run in
# your environment. In this main falco rules file, there isn't any way
# to know all the containers that can run, so any container is
# allowed, by using the always_true macro. In the overridden macro, the condition
# would look something like (ka.req.pod.containers.image.repository in (my-repo/my-image))
- macro:allowed_k8s_containers
condition:(k8s_audit_always_true)
- macro:response_successful
condition:(ka.response.code startswith 2)
- macro:kget
condition:ka.verb=get
- macro:kcreate
condition:ka.verb=create
- macro:kmodify
condition:(ka.verb in (create,update,patch))
- macro:kdelete
condition:ka.verb=delete
- macro:pod
condition:ka.target.resource=pods and not ka.target.subresource exists
- macro:pod_subresource
condition:ka.target.resource=pods and ka.target.subresource exists
- macro:deployment
condition:ka.target.resource=deployments
- macro:service
condition:ka.target.resource=services
- macro:configmap
condition:ka.target.resource=configmaps
- macro:namespace
condition:ka.target.resource=namespaces
- macro:serviceaccount
condition:ka.target.resource=serviceaccounts
- macro:clusterrole
condition:ka.target.resource=clusterroles
- macro:clusterrolebinding
condition:ka.target.resource=clusterrolebindings
- macro:role
condition:ka.target.resource=roles
- macro:secret
condition:ka.target.resource=secrets
- macro:health_endpoint
condition:ka.uri=/healthz or ka.uri startswith /healthz?
- macro:live_endpoint
condition:ka.uri=/livez or ka.uri startswith /livez?
- macro:ready_endpoint
condition:ka.uri=/readyz or ka.uri startswith /readyz?
- rule:Create Disallowed Pod
desc:>
Detect an attempt to start a pod with a container image outside of a list of allowed images.
condition:kevt and pod and kcreate and not allowed_k8s_containers
output:Pod started with container not in allowed list (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority:WARNING
source:k8s_audit
tags:[k8s]
- rule:Create Privileged Pod
desc:>
Detect an attempt to start a pod with a privileged container
condition:kevt and pod and kcreate and ka.req.pod.containers.privileged intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_privileged_images)
output:Pod started with privileged container (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
output:Pod started with sensitive mount (user=%ka.user.name pod=%ka.resp.name ns=%ka.target.namespace resource=%ka.target.resource images=%ka.req.pod.containers.image volumes=%jevt.value[/requestObject/spec/volumes])
priority:WARNING
source:k8s_audit
tags:[k8s]
# These container images are allowed to run with hostnetwork=true
# TODO: Remove k8s.gcr.io reference after 01/Dec/2023
condition:kevt and pod and kcreate and ka.req.pod.host_network intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostnetwork_images)
output:Pod started using host network (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
condition:kevt and pod and kcreate and ka.req.pod.host_pid intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostpid_images)
condition:kevt and pod and kcreate and ka.req.pod.host_ipc intersects (true) and not ka.req.pod.containers.image.repository in (k8s_audit_hostipc_images)
output:Pod started using host ipc namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority:WARNING
source:k8s_audit
tags:[k8s]
- macro:user_known_node_port_service
condition:(k8s_audit_never_true)
- rule:Create NodePort Service
desc:>
Detect an attempt to start a service with a NodePort service type
condition:kevt and service and kcreate and ka.req.service.type=NodePort and not user_known_node_port_service
output:NodePort Service Created (user=%ka.user.name service=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace ports=%ka.req.service.ports)
priority:WARNING
source:k8s_audit
tags:[k8s]
- macro:contains_private_credentials
condition:>
(ka.req.configmap.obj contains "aws_access_key_id" or
ka.req.configmap.obj contains "aws-access-key-id" or
ka.req.configmap.obj contains "aws_s3_access_key_id" or
ka.req.configmap.obj contains "aws-s3-access-key-id" or
ka.req.configmap.obj contains "password" or
ka.req.configmap.obj contains "passphrase")
- rule:Create/Modify Configmap With Private Credentials
desc:>
Detect creating/modifying a configmap containing a private credential (aws key, password, etc.)
condition:kevt and configmap and kmodify and contains_private_credentials
output:K8s configmap with private credential (user=%ka.user.name verb=%ka.verb resource=%ka.target.resource configmap=%ka.req.configmap.name config=%ka.req.configmap.obj)
priority:WARNING
source:k8s_audit
tags:[k8s]
# Corresponds to K8s CIS Benchmark, 1.1.1.
- rule:Anonymous Request Allowed
desc:>
Detect any request made by the anonymous user that was allowed
condition:kevt and ka.user.name=system:anonymous and ka.auth.decision="allow" and not health_endpoint and not live_endpoint and not ready_endpoint
output:Request by anonymous user allowed (user=%ka.user.name verb=%ka.verb uri=%ka.uri reason=%ka.auth.reason))
priority:WARNING
source:k8s_audit
tags:[k8s]
# Roughly corresponds to K8s CIS Benchmark, 1.1.12. In this case,
# notifies an attempt to exec/attach to a privileged container.
# Ideally, we'd add a more stringent rule that detects attaches/execs
# to a privileged pod, but that requires the engine for k8s audit
# events to be stateful, so it could know if a container named in an
# attach request was created privileged or not. For now, we have a
# less severe rule that detects attaches/execs to any pod.
#
# For the same reason, you can't use things like image names/prefixes,
# as the event that creates the pod (which has the images) is a
# separate event than the actual exec/attach to the pod.
- macro:user_known_exec_pod_activities
condition:(k8s_audit_never_true)
- rule:Attach/Exec Pod
desc:>
Detect any attempt to attach/exec to a pod
condition:kevt_started and pod_subresource and kcreate and ka.target.subresource in (exec,attach) and not user_known_exec_pod_activities
output:Attach/Exec to pod (user=%ka.user.name pod=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace action=%ka.target.subresource command=%ka.uri.param[command])
# Only works when feature gate EphemeralContainers is enabled
- rule:EphemeralContainers Created
desc:>
Detect any ephemeral container created
condition:kevt and pod_subresource and kmodify and ka.target.subresource in (ephemeralcontainers) and not user_known_pod_debug_activities
output:Ephemeral container is created in pod (user=%ka.user.name pod=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace ephemeral_container_name=%jevt.value[/requestObject/ephemeralContainers/0/name] ephemeral_container_image=%jevt.value[/requestObject/ephemeralContainers/0/image])
priority:NOTICE
source:k8s_audit
tags:[k8s]
# In a local/user rules fie, you can append to this list to add additional allowed namespaces
- list:allowed_namespaces
items:[kube-system, kube-public, default]
- rule:Create Disallowed Namespace
desc:Detect any attempt to create a namespace outside of a set of known namespaces
condition:kevt and namespace and kcreate and not ka.target.name in (allowed_namespaces)
output:Disallowed namespace created (user=%ka.user.name ns=%ka.target.name resource=%ka.target.resource)
priority:WARNING
source:k8s_audit
tags:[k8s]
# Only defined for backwards compatibility. Use the more specific
# user_allowed_kube_namespace_image_list instead.
- list:user_trusted_image_list
items:[]
- list:user_allowed_kube_namespace_image_list
items:[user_trusted_image_list]
# Only defined for backwards compatibility. Use the more specific
# allowed_kube_namespace_image_list instead.
- list:k8s_image_list
items:[]
# TODO: Remove k8s.gcr.io reference after 01/Dec/2023
- list:allowed_kube_namespace_image_list
items:[
gcr.io/google-containers/prometheus-to-sd,
gcr.io/projectcalico-org/node,
gke.gcr.io/addon-resizer,
gke.gcr.io/heapster,
gke.gcr.io/gke-metadata-server,
registry.k8s.io/ip-masq-agent-amd64,
registry.k8s.io/kube-apiserver,
gke.gcr.io/kube-proxy,
gke.gcr.io/netd-amd64,
gke.gcr.io/watcher-daemonset,
registry.k8s.io/addon-resizer,
registry.k8s.io/prometheus-to-sd,
registry.k8s.io/k8s-dns-dnsmasq-nanny-amd64,
registry.k8s.io/k8s-dns-kube-dns-amd64,
registry.k8s.io/k8s-dns-sidecar-amd64,
registry.k8s.io/metrics-server-amd64,
kope/kube-apiserver-healthcheck,
k8s_image_list
]
- macro:allowed_kube_namespace_pods
condition:(ka.req.pod.containers.image.repository in (user_allowed_kube_namespace_image_list) or
ka.req.pod.containers.image.repository in (allowed_kube_namespace_image_list))
# Detect any new pod created in the kube-system namespace
- rule:Pod Created in Kube Namespace
desc:Detect any attempt to create a pod in the kube-system or kube-public namespaces
condition:kevt and pod and kcreate and ka.target.namespace in (kube-system, kube-public) and not allowed_kube_namespace_pods
output:Pod created in kube namespace (user=%ka.user.name pod=%ka.resp.name resource=%ka.target.resource ns=%ka.target.namespace images=%ka.req.pod.containers.image)
priority:WARNING
source:k8s_audit
tags:[k8s]
- list:user_known_sa_list
items:[]
- list:known_sa_list
items:[
coredns,
coredns-autoscaler,
cronjob-controller,
daemon-set-controller,
deployment-controller,
disruption-controller,
endpoint-controller,
endpointslice-controller,
endpointslicemirroring-controller,
generic-garbage-collector,
horizontal-pod-autoscaler,
job-controller,
namespace-controller,
node-controller,
persistent-volume-binder,
pod-garbage-collector,
pv-protection-controller,
pvc-protection-controller,
replicaset-controller,
resourcequota-controller,
root-ca-cert-publisher,
service-account-controller,
statefulset-controller
]
- macro:trusted_sa
condition:(ka.target.name in (known_sa_list, user_known_sa_list))
# Detect creating a service account in the kube-system/kube-public namespace
- rule:Service Account Created in Kube Namespace
desc:Detect any attempt to create a serviceaccount in the kube-system or kube-public namespaces
condition:kevt and serviceaccount and kcreate and ka.target.namespace in (kube-system, kube-public) and response_successful and not trusted_sa
output:Service account created in kube namespace (user=%ka.user.name serviceaccount=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace)
priority:WARNING
source:k8s_audit
tags:[k8s]
# Detect any modify/delete to any ClusterRole starting with
# "system:". "system:coredns" is excluded as changes are expected in
# normal operation.
- rule:System ClusterRole Modified/Deleted
desc:Detect any attempt to modify/delete a ClusterRole/Role starting with system
condition:kevt and (role or clusterrole) and (kmodify or kdelete) and (ka.target.name startswith "system:") and
not ka.target.name in (system:coredns, system:managed-certificate-controller)
output:System ClusterRole/Role modified or deleted (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource ns=%ka.target.namespace action=%ka.verb)
priority:WARNING
source:k8s_audit
tags:[k8s]
# Detect any attempt to create a ClusterRoleBinding to the cluster-admin user
# (expand this to any built-in cluster role that does "sensitive" things)
- rule:Attach to cluster-admin Role
desc:Detect any attempt to create a ClusterRoleBinding to the cluster-admin user
condition:kevt and clusterrolebinding and kcreate and ka.req.binding.role=cluster-admin
output:Cluster Role Binding to cluster-admin role (user=%ka.user.name subject=%ka.req.binding.subjects)
priority:WARNING
source:k8s_audit
tags:[k8s]
- rule:ClusterRole With Wildcard Created
desc:Detect any attempt to create a Role/ClusterRole with wildcard resources or verbs
condition:kevt and (role or clusterrole) and kcreate and (ka.req.role.rules.resources intersects ("*") or ka.req.role.rules.verbs intersects ("*"))
output:Created Role/ClusterRole with wildcard (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules)
desc:Detect any attempt to create a Role/ClusterRole that can perform write-related actions
condition:kevt and (role or clusterrole) and kcreate and writable_verbs
output:Created Role/ClusterRole with write privileges (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules)
priority:NOTICE
source:k8s_audit
tags:[k8s]
- rule:ClusterRole With Pod Exec Created
desc:Detect any attempt to create a Role/ClusterRole that can exec to pods
condition:kevt and (role or clusterrole) and kcreate and ka.req.role.rules.resources intersects ("pods/exec")
output:Created Role/ClusterRole with pod exec privileges (user=%ka.user.name role=%ka.target.name resource=%ka.target.resource rules=%ka.req.role.rules)
priority:WARNING
source:k8s_audit
tags:[k8s]
# The rules below this point are less discriminatory and generally
# represent a stream of activity for a cluster. If you wish to disable
# these events, modify the following macro.
- macro:consider_activity_events
condition:(k8s_audit_always_true)
- macro:kactivity
condition:(kevt and consider_activity_events)
- rule:K8s Deployment Created
desc:Detect any attempt to create a deployment
condition:(kactivity and kcreate and deployment and response_successful)
output:K8s Deployment Created (user=%ka.user.name deployment=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority:INFO
source:k8s_audit
tags:[k8s]
- rule:K8s Deployment Deleted
desc:Detect any attempt to delete a deployment
condition:(kactivity and kdelete and deployment and response_successful)
condition:(kactivity and kcreate and service and response_successful)
output:K8s Service Created (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority:INFO
source:k8s_audit
tags:[k8s]
- rule:K8s Service Deleted
desc:Detect any attempt to delete a service
condition:(kactivity and kdelete and service and response_successful)
output:K8s Service Deleted (user=%ka.user.name service=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority:INFO
source:k8s_audit
tags:[k8s]
- rule:K8s ConfigMap Created
desc:Detect any attempt to create a configmap
condition:(kactivity and kcreate and configmap and response_successful)
output:K8s ConfigMap Created (user=%ka.user.name configmap=%ka.target.name ns=%ka.target.namespace resource=%ka.target.resource resp=%ka.response.code decision=%ka.auth.decision reason=%ka.auth.reason)
priority:INFO
source:k8s_audit
tags:[k8s]
- rule:K8s ConfigMap Deleted
desc:Detect any attempt to delete a configmap
condition:(kactivity and kdelete and configmap and response_successful)
# This rules detect an operation triggered by an user name that is
# included in the list of those that are default administrators upon
# cluster creation. This may signify a permission setting too broader.
# As we can't check for role of the user on a general ka.* event, this
# may or may not be an administrator. Customize the full_admin_k8s_users
# list to your needs, and activate at your discretion.
# # How to test:
# # Execute any kubectl command connected using default cluster user, as:
# kubectl create namespace rule-test
- rule:Full K8s Administrative Access
desc:Detect any k8s operation by a user name that may be an administrator with full access.
condition:>
kevt
and non_system_user
and ka.user.name in (full_admin_k8s_users)
and not allowed_full_admin_users
output:K8s Operation performed by full admin user (user=%ka.user.name target=%ka.target.name/%ka.target.resource verb=%ka.verb uri=%ka.uri resp=%ka.response.code)
- rule:Untrusted Node Successfully Joined the Cluster
desc:>
Detect a node successfully joined the cluster outside of the list of allowed nodes.
condition:>
kevt and node
and kcreate
and response_successful
and not allow_all_k8s_nodes
and not ka.target.name in (allowed_k8s_nodes)
output:Node not in allowed list successfully joined the cluster (user=%ka.user.name node=%ka.target.name resource=%ka.target.resource)
priority:ERROR
source:k8s_audit
tags:[k8s]
- rule:Untrusted Node Unsuccessfully Tried to Join the Cluster
desc:>
Detect an unsuccessful attempt to join the cluster for a node not in the list of allowed nodes.
condition:>
kevt and node
and kcreate
and not response_successful
and not allow_all_k8s_nodes
and not ka.target.name in (allowed_k8s_nodes)
output:Node not in allowed list tried unsuccessfully to join the cluster (user=%ka.user.name node=%ka.target.name reason=%ka.response.reason resource=%ka.target.resource)