From 6a8c1cf72391cf2cc405bea556fc0d3be75cd7ff Mon Sep 17 00:00:00 2001 From: Stefan Reimer Date: Mon, 24 Jun 2024 13:03:39 +0000 Subject: [PATCH] feat: Migrate falco to use modern eBPF, latest version --- kubezero/falco-kernel/APKBUILD | 71 -- kubezero/falco/APKBUILD | 13 +- kubezero/falco/falco.yaml | 1053 ------------------- kubezero/falco/falco_rules.yaml | 1251 ----------------------- kubezero/falco/zdt_falco.yaml | 1053 ------------------- kubezero/falco/zdt_falco_rules.yaml | 1251 ----------------------- kubezero/falcoctl/APKBUILD | 4 +- kubezero/zdt-base/APKBUILD | 4 +- kubezero/zdt-base/zdt-base.post-install | 3 - 9 files changed, 10 insertions(+), 4693 deletions(-) delete mode 100644 kubezero/falco-kernel/APKBUILD delete mode 100644 kubezero/falco/falco.yaml delete mode 100644 kubezero/falco/falco_rules.yaml delete mode 100644 kubezero/falco/zdt_falco.yaml delete mode 100644 kubezero/falco/zdt_falco_rules.yaml diff --git a/kubezero/falco-kernel/APKBUILD b/kubezero/falco-kernel/APKBUILD deleted file mode 100644 index e9a4f17..0000000 --- a/kubezero/falco-kernel/APKBUILD +++ /dev/null @@ -1,71 +0,0 @@ -# Contributor: Stefan Reimer -# Maintainer: Stefan Reimer -_flavor=lts -_extra_flavors=virt - -pkgver=0.37.1 -pkgrel=0 -pkgname=falco-kernel-$_flavor -pkgdesc="Falco kernel module" -url="https://github.com/falcosecurity/falco" -arch="x86_64 aarch64" -license="AGPL-3.0" -makedepends="cmake linux-$_flavor-dev linux-headers" - # protobuf-dev jq-dev openssl-dev curl-dev c-ares-dev grpc-dev yaml-dev yaml-cpp-dev jsoncpp-dev re2-dev" - # perl autoconf elfutils-dev libtool argp-standalone musl-fts-dev musl-libintl musl-obstack-dev" -options="!check" - -source=" - falco-$pkgver.tar.gz::https://github.com/falcosecurity/falco/archive/refs/tags/$pkgver.tar.gz - " -builddir="$srcdir/falco-$pkgver" - -for f in $_extra_flavors; do - makedepends="$makedepends linux-$f-dev" - subpackages="$subpackages falco-kernel-$f:_extra" -done - -build() { - for flavor in $_flavor $_extra_flavors; do - mkdir -p $srcdir/falco-$pkgver/build-$flavor - - # Hack running the build inside a container other uname -r returns host kernel - KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-"$flavor")) - - cd $srcdir/falco-$pkgver/build-$flavor - cmake .. \ - -DCMAKE_BUILD_TYPE=Release \ - -DFALCO_VERSION=$pkgver \ - -DCMAKE_INSTALL_PREFIX=/usr \ - -DUSE_BUNDLED_DEPS=On \ - -DMUSL_OPTIMIZED_BUILD=On - - KERNELDIR=/lib/modules/$KERNEL_VERSION/build make driver - done -} - -_package() { - local flavor=$1 - local _out=$2 - - KERNEL_VERSION=$(basename $(ls -d /lib/modules/*-"$flavor")) - depends="linux-$flavor~$(echo $KERNEL_VERSION | sed -e 's/-.*$//')" - - cd $srcdir/falco-$pkgver/build-$flavor - mkdir -p "$_out"/lib/modules/$KERNEL_VERSION/kernel - gzip -9 -c driver/falco.ko > "$_out"/lib/modules/$KERNEL_VERSION/kernel/falco.ko.gz -} - -package() { - _package $_flavor $pkgdir -} - -_extra() { - flavor=${subpkgname##*-} - - _package $flavor $subpkgdir -} - -sha512sums=" -257d526c4d3eadbe2c79852221fdb8076f94e421aa66753628770ae7384137b4672064cbe1ba0a4d88d14e8a7d08e2521d5bd82a312c4b1442d8ea6fbbbb2f28 falco-0.37.1.tar.gz -" diff --git a/kubezero/falco/APKBUILD b/kubezero/falco/APKBUILD index 00511e0..e1b7164 100644 --- a/kubezero/falco/APKBUILD +++ b/kubezero/falco/APKBUILD @@ -1,13 +1,13 @@ # Contributor: Stefan Reimer # Maintainer: Stefan Reimer pkgname=falco -pkgver=0.37.1 +pkgver=0.38.1 pkgrel=0 pkgdesc="Falco is the open source solution for runtime security for hosts, containers, Kubernetes and the cloud" url="https://github.com/falcosecurity/falco" arch="x86_64 aarch64" license="AGPL-3.0" -makedepends="cmake linux-headers bash perl autoconf elfutils-dev libtool argp-standalone +makedepends="cmake clang bpftool linux-headers bash perl autoconf elfutils-dev libtool argp-standalone musl-fts-dev musl-libintl musl-legacy-error @@ -20,7 +20,6 @@ makedepends="cmake linux-headers bash perl autoconf elfutils-dev libtool argp-st # yaml-cpp-dev # " options="!check" -#depends="falco-kernel~$pkgver" # Original config # https://raw.githubusercontent.com/falcosecurity/rules/main/rules/falco_rules.yaml @@ -51,10 +50,10 @@ build() { -DMUSL_OPTIMIZED_BUILD=On \ -DBUILD_DRIVER=Off \ -DBUILD_BPF=Off \ - -DBUILD_LIBSCAP_MODERN_BPF=Off \ + -DBUILD_LIBSCAP_MODERN_BPF=On \ .. - make falco || bash + make falco } package() { @@ -65,7 +64,7 @@ package() { cd $pkgdir/etc/falco patch --no-backup-if-mismatch -i $srcdir/falco.patch patch --no-backup-if-mismatch -i $srcdir/rules.patch - + # We dont build anything on targets so remove sources rm -rf $pkgdir/usr/src rm -rf $pkgdir/usr/lib @@ -73,7 +72,7 @@ package() { } sha512sums=" -257d526c4d3eadbe2c79852221fdb8076f94e421aa66753628770ae7384137b4672064cbe1ba0a4d88d14e8a7d08e2521d5bd82a312c4b1442d8ea6fbbbb2f28 falco-0.37.1.tar.gz +f76b228328a3cf29f5795f7239393d7d05101f488e6ff09f5434237e906ec04a0139a5c91089c36cf3d01058584773b8fe0b1742e760a3e4953237fbc49e834f falco-0.38.1.tar.gz b152fcf6cd81895efa37797ab7ff1aac7350b5f51f2648aa9e3cce9d5ece55791ddf82c396e9da216293e2379a785a294cc972f28a91162dc5bc88ab09e1ab08 falco.patch 487b8b64d2399fd7b706be29e3722983bcdfde3ab5cf0f78b2e9fe1055a4ad958976f591e739491e25a06d7cdf6894c1e153e892a87b83c7a962e23c9a104528 rules.patch " diff --git a/kubezero/falco/falco.yaml b/kubezero/falco/falco.yaml deleted file mode 100644 index 7e446b4..0000000 --- a/kubezero/falco/falco.yaml +++ /dev/null @@ -1,1053 +0,0 @@ -# -# Copyright (C) 2023 The Falco Authors. -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -################ -# Config index # -################ - -# Here is an index of the configuration categories to help you navigate -# the Falco configuration file: -# -# (Falco command-line arguments) -# (Falco environment variables) -# Falco rules files -# rules_file -# Falco plugins -# load_plugins -# plugins -# Falco config files -# watch_config_files -# Falco outputs settings -# time_format_iso_8601 -# priority -# json_output -# json_include_output_property -# json_include_tags_property -# buffered_outputs -# outputs (throttling) -# Falco outputs channels -# stdout_output -# syslog_output -# file_output -# http_output -# program_output -# grpc_output -# Falco exposed services -# grpc -# webserver -# Falco logging / alerting / metrics related to software functioning (basic) -# log_stderr -# log_syslog -# log_level -# libs_logger -# Falco logging / alerting / metrics related to software functioning (advanced) -# output_timeout -# syscall_event_timeouts -# syscall_event_drops -# metrics -# Falco performance tuning (advanced) -# syscall_buf_size_preset -# syscall_drop_failed_exit -# base_syscalls -# modern_bpf.cpus_for_each_syscall_buffer -# Falco cloud orchestration systems integration -# metadata_download -# (Guidance for Kubernetes container engine command-line args settings) - - -################################ -# Falco command-line arguments # -################################ - -# To explore the latest command-line arguments supported by Falco for additional -# configuration, you can run `falco --help` in your terminal. You can also pass -# configuration options from this config file as command-line arguments by using -# the `-o` flag followed by the option name and value. In the following example, -# three config options (`json_output`, `log_level`, and -# `modern_bpf.cpus_for_each_syscall_buffer`) are passed as command-line -# arguments with their corresponding values: falco -o "json_output=true" -# -o "log_level=debug" -o "modern_bpf.cpus_for_each_syscall_buffer=4" -# Please note that command-line arguments take precedence over the options -# specified in this config file. - - -############################### -# Falco environment variables # -############################### - -# Customize Falco settings using environment variables: -# -# - "HOST_ROOT": Specifies the prefix to the underlying host `/proc` filesystem -# when deploying Falco over a container with read-only host mounts instead of -# directly on the host. Defaults to "/host". -# - "FALCO_BPF_PROBE": Specify a custom path to the BPF object code file (`bpf` -# driver). This is not needed for the modern_bpf driver. -# - "FALCO_HOSTNAME": Customize the hostname output field logged by Falco by -# setting the "FALCO_HOSTNAME" environment variable. -# - "FALCO_CGROUP_MEM_PATH": Specifies the file path holding the container -# memory usage metric for the `metrics` feature. Defaults to -# "/sys/fs/cgroup/memory/memory.usage_in_bytes" (Kubernetes). - - -##################### -# Falco rules files # -##################### - -# [Stable] `rules_file` -# -# Falco rules can be specified using files or directories, which are loaded at -# startup. The name "rules_file" is maintained for backwards compatibility. If -# the entry is a file, it will be read directly. If the entry is a directory, -# all files within that directory will be read in alphabetical order. -# -# The falco_rules.yaml file ships with the Falco package and is overridden with -# every new software version. falco_rules.local.yaml is only created if it -# doesn't already exist. -# -# To customize the set of rules, you can add your modifications to any file. -# It's important to note that the files or directories are read in the order -# specified here. In addition, rules are loaded by Falco in the order they -# appear within each rule file. -# -# If you have any customizations intended to override a previous configuration, -# make sure they appear in later files to take precedence. On the other hand, if -# the conditions of rules with the same event type(s) have the potential to -# overshadow each other, ensure that the more important rule appears first. This -# is because rules are evaluated on a "first match wins" basis, where the first -# rule that matches the conditions will be applied, and subsequent rules will -# not be evaluated for the same event type. -# -# By arranging the order of files and rules thoughtfully, you can ensure that -# desired customizations and rule behaviors are prioritized and applied as -# intended. -rules_file: - - /etc/falco/falco_rules.yaml - - /etc/falco/falco_rules.local.yaml - - /etc/falco/rules.d - - -################# -# Falco plugins # -################# - -# [Stable] `load_plugins` and `plugins` -# -# --- [Description] -# -# Falco plugins enable integration with other services in your ecosystem. -# They allow Falco to extend its functionality and leverage data sources such as -# Kubernetes audit logs or AWS CloudTrail logs. This enables Falco to perform -# fast on-host detections beyond syscalls and container events. The plugin -# system will continue to evolve with more specialized functionality in future -# releases. -# -# Please refer to the plugins repo at -# https://github.com/falcosecurity/plugins/blob/master/plugins/ for detailed -# documentation on the available plugins. This repository provides comprehensive -# information about each plugin and how to utilize them with Falco. -# -# Please note that if your intention is to enrich Falco syscall logs with fields -# such as `k8s.ns.name`, `k8s.pod.name`, and `k8s.pod.*`, you do not need to use -# the `k8saudit` plugin nor the `-k`/`-K` Kubernetes metadata enrichment. This -# information is automatically extracted from the container runtime socket. The -# `k8saudit` plugin is specifically designed to integrate with Kubernetes audit -# logs and is not required for basic enrichment of syscall logs with -# Kubernetes-related fields. -# -# --- [Usage] -# -# Disabled by default, indicated by an empty `load_plugins` list. Each plugin meant -# to be enabled needs to be listed as explicit list item. -# -# For example, if you want to use the `k8saudit` plugin, -# ensure it is configured appropriately and then change this to: -# load_plugins: [k8saudit, json] -load_plugins: [] - -# Customize subsettings for each enabled plugin. These settings will only be -# applied when the corresponding plugin is enabled using the `load_plugins` -# option. -plugins: - - name: k8saudit - library_path: libk8saudit.so - init_config: - # maxEventSize: 262144 - # webhookMaxBatchSize: 12582912 - # sslCertificate: /etc/falco/falco.pem - open_params: "http://:9765/k8s-audit" - - name: cloudtrail - library_path: libcloudtrail.so - # see docs for init_config and open_params: - # https://github.com/falcosecurity/plugins/blob/master/plugins/cloudtrail/README.md - - name: json - library_path: libjson.so - - -###################### -# Falco config files # -###################### - -# [Stable] `watch_config_files` -# -# Falco monitors configuration and rule files for changes and automatically -# reloads itself to apply the updated configuration when any modifications are -# detected. This feature is particularly useful when you want to make real-time -# changes to the configuration or rules of Falco without interrupting its -# operation or losing its state. For more information about Falco's state -# engine, please refer to the `base_syscalls` section. -watch_config_files: true - - -########################## -# Falco outputs settings # -########################## - -# [Stable] `time_format_iso_8601` -# -# When enabled, Falco will display log and output messages with times in the ISO -# 8601 format. By default, times are shown in the local time zone determined by -# the /etc/localtime configuration. -time_format_iso_8601: false - -# [Stable] `priority` -# -# Any rule with a priority level more severe than or equal to the specified -# minimum level will be loaded and run by Falco. This allows you to filter and -# control the rules based on their severity, ensuring that only rules of a -# certain priority or higher are active and evaluated by Falco. Supported -# levels: "emergency", "alert", "critical", "error", "warning", "notice", -# "info", "debug" -priority: debug - -# [Stable] `json_output` -# -# When enabled, Falco will output alert messages and rules file -# loading/validation results in JSON format, making it easier for downstream -# programs to process and consume the data. By default, this option is disabled. -json_output: false - -# [Stable] `json_include_output_property` -# -# When using JSON output in Falco, you have the option to include the "output" -# property itself in the generated JSON output. The "output" property provides -# additional information about the purpose of the rule. To reduce the logging -# volume, it is recommended to turn it off if it's not necessary for your use -# case. -json_include_output_property: true - -# [Stable] `json_include_tags_property` -# -# When using JSON output in Falco, you have the option to include the "tags" -# field of the rules in the generated JSON output. The "tags" field provides -# additional metadata associated with the rule. To reduce the logging volume, -# if the tags associated with the rule are not needed for your use case or can -# be added at a later stage, it is recommended to turn it off. -json_include_tags_property: true - -# [Stable] `buffered_outputs` -# -# Enabling buffering for the output queue can offer performance optimization, -# efficient resource usage, and smoother data flow, resulting in a more reliable -# output mechanism. By default, buffering is disabled (false). -buffered_outputs: false - -# [Stable] `outputs` -# -# A throttling mechanism, implemented as a token bucket, can be used to control -# the rate of Falco outputs. Each event source has its own rate limiter, -# ensuring that alerts from one source do not affect the throttling of others. -# The following options control the mechanism: -# - rate: the number of tokens (i.e. right to send a notification) gained per -# second. When 0, the throttling mechanism is disabled. Defaults to 0. -# - max_burst: the maximum number of tokens outstanding. Defaults to 1000. -# -# For example, setting the rate to 1 allows Falco to send up to 1000 -# notifications initially, followed by 1 notification per second. The burst -# capacity is fully restored after 1000 seconds of no activity. -# -# Throttling can be useful in various scenarios, such as preventing notification -# floods, managing system load, controlling event processing, or complying with -# rate limits imposed by external systems or APIs. It allows for better resource -# utilization, avoids overwhelming downstream systems, and helps maintain a -# balanced and controlled flow of notifications. -# -# With the default settings, the throttling mechanism is disabled. -outputs: - rate: 0 - max_burst: 1000 - - -########################## -# Falco outputs channels # -########################## - -# Falco supports various output channels, such as syslog, stdout, file, gRPC, -# webhook, and more. You can enable or disable these channels as needed to -# control where Falco alerts and log messages are directed. This flexibility -# allows seamless integration with your preferred logging and alerting systems. -# Multiple outputs can be enabled simultaneously. - -# [Stable] `stdout_output` -# -# Redirect logs to standard output. -stdout_output: - enabled: true - -# [Stable] `syslog_output` -# -# Send logs to syslog. -syslog_output: - enabled: true - -# [Stable] `file_output` -# -# When appending Falco alerts to a file, each new alert will be added to a new -# line. It's important to note that Falco does not perform log rotation for this -# file. If the `keep_alive` option is set to `true`, the file will be opened once -# and continuously written to, else the file will be reopened for each output -# message. Furthermore, the file will be closed and reopened if Falco receives -# the SIGUSR1 signal. -file_output: - enabled: false - keep_alive: false - filename: ./events.txt - -# [Stable] `http_output` -# -# Send logs to an HTTP endpoint or webhook. -# -# When using falcosidekick, it is necessary to set `json_output` to true, which is -# conveniently done automatically for you when using `falcosidekick.enabled=true`. -http_output: - enabled: false - url: http://some.url - user_agent: "falcosecurity/falco" - # Tell Falco to not verify the remote server. - insecure: false - # Path to the CA certificate that can verify the remote server. - ca_cert: "" - # Path to a specific file that will be used as the CA certificate store. - ca_bundle: "" - # Path to a folder that will be used as the CA certificate store. CA certificate need to be - # stored as indivitual PEM files in this directory. - ca_path: "/etc/ssl/certs" - -# [Stable] `program_output` -# -# Redirect the output to another program or command. -# -# Possible additional things you might want to do with program output: -# - send to a slack webhook: -# program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX" -# - logging (alternate method than syslog): -# program: logger -t falco-test -# - send over a network connection: -# program: nc host.example.com 80 -# If `keep_alive` is set to `true`, the program will be started once and -# continuously written to, with each output message on its own line. If -# `keep_alive` is set to `false`, the program will be re-spawned for each output -# message. Furthermore, the program will be re-spawned if Falco receives -# the SIGUSR1 signal. -program_output: - enabled: false - keep_alive: false - program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX" - -# [Stable] `grpc_output` -# -# Use gRPC as an output service. -# -# gRPC is a modern and high-performance framework for remote procedure calls -# (RPC). It utilizes protocol buffers for efficient data serialization. The gRPC -# output in Falco provides a modern and efficient way to integrate with other -# systems. By default the setting is turned off. Enabling this option stores -# output events in memory until they are consumed by a gRPC client. Ensure that -# you have a consumer for the output events or leave it disabled. -grpc_output: - enabled: false - - -########################## -# Falco exposed services # -########################## - -# [Stable] `grpc` -# -# Falco provides support for running a gRPC server using two main binding types: -# 1. Over the network with mandatory mutual TLS authentication (mTLS), which -# ensures secure communication -# 2. Local Unix socket binding with no authentication. By default, the -# gRPCserver in Falco is turned off with no enabled services (see -# `grpc_output`setting). -# -# To configure the gRPC server in Falco, you can make the following changes to -# the options: -# -# - Uncomment the relevant configuration options related to the gRPC server. -# - Update the paths of the generated certificates for mutual TLS authentication -# if you choose to use mTLS. -# - Specify the address to bind and expose the gRPC server. -# - Adjust the threadiness configuration to control the number of threads and -# contexts used by the server. -# -# Keep in mind that if any issues arise while creating the gRPC server, the -# information will be logged, but it will not stop the main Falco daemon. - -# gRPC server using mTLS -# grpc: -# enabled: true -# bind_address: "0.0.0.0:5060" -# # When the `threadiness` value is set to 0, Falco will automatically determine -# # the appropriate number of threads based on the number of online cores in the system. -# threadiness: 0 -# private_key: "/etc/falco/certs/server.key" -# cert_chain: "/etc/falco/certs/server.crt" -# root_certs: "/etc/falco/certs/ca.crt" - -# gRPC server using a local unix socket -grpc: - enabled: false - bind_address: "unix:///run/falco/falco.sock" - # When the `threadiness` value is set to 0, Falco will automatically determine - # the appropriate number of threads based on the number of online cores in the system. - threadiness: 0 - -# [Stable] `webserver` -# -# Falco supports an embedded webserver that runs within the Falco process, -# providing a lightweight and efficient way to expose web-based functionalities -# without the need for an external web server. The following endpoints are -# exposed: -# - /healthz: designed to be used for checking the health and availability of -# the Falco application (the name of the endpoint is configurable). -# - /versions: responds with a JSON object containing the version numbers of the -# internal Falco components (similar output as `falco --version -o -# json_output=true`). -# -# Please note that the /versions endpoint is particularly useful for other Falco -# services, such as `falcoctl`, to retrieve information about a running Falco -# instance. If you plan to use `falcoctl` locally or with Kubernetes, make sure -# the Falco webserver is enabled. -# -# The behavior of the webserver can be controlled with the following options, -# which are enabled by default: -# -# The `ssl_certificate` option specifies a combined SSL certificate and -# corresponding key that are contained in a single file. You can generate a -# key/cert as follows: -# -# $ openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 365 -out -# certificate.pem $ cat certificate.pem key.pem > falco.pem $ sudo cp falco.pem -# /etc/falco/falco.pem -webserver: - enabled: true - # When the `threadiness` value is set to 0, Falco will automatically determine - # the appropriate number of threads based on the number of online cores in the system. - threadiness: 0 - listen_port: 8765 - k8s_healthz_endpoint: /healthz - ssl_enabled: false - ssl_certificate: /etc/falco/falco.pem - - -############################################################################## -# Falco logging / alerting / metrics related to software functioning (basic) # -############################################################################## - -# [Stable] `log_stderr` and `log_syslog` -# -# Falco's logs related to the functioning of the software, which are not related -# to Falco alert outputs but rather its lifecycle, settings and potential -# errors, can be directed to stderr and/or syslog. -log_stderr: true -log_syslog: true - -# [Stable] `log_level` -# -# The `log_level` setting determines the minimum log level to include in Falco's -# logs related to the functioning of the software. This setting is separate from -# the `priority` field of rules and specifically controls the log level of -# Falco's operational logging. By specifying a log level, you can control the -# verbosity of Falco's operational logs. Only logs of a certain severity level -# or higher will be emitted. Supported levels: "emergency", "alert", "critical", -# "error", "warning", "notice", "info", "debug". -log_level: info - -# [Stable] `libs_logger` -# -# The `libs_logger` setting in Falco determines the minimum log level to include -# in the logs related to the functioning of the software of the underlying -# `libs` library, which Falco utilizes. This setting is independent of the -# `priority` field of rules and the `log_level` setting that controls Falco's -# operational logs. It allows you to specify the desired log level for the `libs` -# library specifically, providing more granular control over the logging -# behavior of the underlying components used by Falco. Only logs of a certain -# severity level or higher will be emitted. Supported levels: "emergency", -# "alert", "critical", "error", "warning", "notice", "info", "debug". It is not -# recommended for production use. -libs_logger: - enabled: false - severity: debug - - -################################################################################# -# Falco logging / alerting / metrics related to software functioning (advanced) # -################################################################################# - -# [Stable] `output_timeout` -# -# Generates Falco operational logs when `log_level=notice` at minimum -# -# A timeout error occurs when a process or operation takes longer to complete -# than the allowed or expected time limit. In the context of Falco, an output -# timeout error refers to the situation where an output channel fails to deliver -# an alert within a specified deadline. Various reasons, such as network issues, -# resource constraints, or performance bottlenecks can cause timeouts. -# -# The `output_timeout` parameter specifies the duration, in milliseconds, to -# wait before considering the deadline exceeded. By default, the timeout is set -# to 2000ms (2 seconds), meaning that the consumer of Falco outputs can block -# the Falco output channel for up to 2 seconds without triggering a timeout -# error. -# -# Falco actively monitors the performance of output channels. With this setting -# the timeout error can be logged, but please note that this requires setting -# Falco's operational logs `log_level` to a minimum of `notice`. -# -# It's important to note that Falco outputs will not be discarded from the -# output queue. This means that if an output channel becomes blocked -# indefinitely, it indicates a potential issue that needs to be addressed by the -# user. -output_timeout: 2000 - -# [Stable] `syscall_event_timeouts` -# -# Generates Falco operational logs when `log_level=notice` at minimum -# -# Falco utilizes a shared buffer between the kernel and userspace to receive -# events, such as system call information, in userspace. However, there may be -# cases where timeouts occur in the underlying libraries due to issues in -# reading events or the need to skip a particular event. While it is uncommon -# for Falco to experience consecutive event timeouts, it has the capability to -# detect such situations. You can configure the maximum number of consecutive -# timeouts without an event after which Falco will generate an alert, but please -# note that this requires setting Falco's operational logs `log_level` to a -# minimum of `notice`. The default value is set to 1000 consecutive timeouts -# without receiving any events. The mapping of this value to a time interval -# depends on the CPU frequency. -syscall_event_timeouts: - max_consecutives: 1000 - -# [Stable] `syscall_event_drops` -# -# Generates "Falco internal: syscall event drop" rule output when `priority=debug` at minimum -# -# --- [Description] -# -# Falco uses a shared buffer between the kernel and userspace to pass system -# call information. When Falco detects that this buffer is full and system calls -# have been dropped, it can take one or more of the following actions: -# - ignore: do nothing (default when list of actions is empty) -# - log: log a DEBUG message noting that the buffer was full -# - alert: emit a Falco alert noting that the buffer was full -# - exit: exit Falco with a non-zero rc -# -# Notice it is not possible to ignore and log/alert messages at the same time. -# -# The rate at which log/alert messages are emitted is governed by a token -# bucket. The rate corresponds to one message every 30 seconds with a burst of -# one message (by default). -# -# The messages are emitted when the percentage of dropped system calls with -# respect the number of events in the last second is greater than the given -# threshold (a double in the range [0, 1]). If you want to be alerted on any -# drops, set the threshold to 0. -# -# For debugging/testing it is possible to simulate the drops using the -# `simulate_drops: true`. In this case the threshold does not apply. -# -# --- [Usage] -# -# Enabled by default, but requires Falco rules config `priority` set to `debug`. -# Emits a Falco rule named "Falco internal: syscall event drop" as many times in -# a given time period as dictated by the settings. Statistics here reflect the -# delta in a 1s time period. -# -# If instead you prefer periodic metrics of monotonic counters at a regular -# interval, which include syscall drop statistics and additional metrics, -# explore the `metrics` configuration option. -syscall_event_drops: - threshold: .1 - actions: - - log - - alert - rate: .03333 - max_burst: 1 - simulate_drops: false - -# [Experimental] `metrics` -# -# Generates "Falco internal: metrics snapshot" rule output when `priority=info` at minimum -# By selecting `output_file`, equivalent JSON output will be appended to a file. -# -# periodic metric snapshots (including stats and resource utilization) captured -# at regular intervals -# -# --- [Description] -# -# Consider these key points about the `metrics` feature in Falco: -# -# - It introduces a redesigned stats/metrics system. -# - Native support for resource utilization metrics and specialized performance -# metrics. -# - Metrics are emitted as monotonic counters at predefined intervals -# (snapshots). -# - All metrics are consolidated into a single log message, adhering to the -# established rules schema and naming conventions. -# - Additional info fields complement the metrics and facilitate customized -# statistical analyses and correlations. -# - The metrics framework is designed for easy future extension. -# -# The `metrics` feature follows a specific schema and field naming convention. -# All metrics are collected as subfields under the `output_fields` key, similar -# to regular Falco rules. Each metric field name adheres to the grammar used in -# Falco rules. There are two new field classes introduced: `falco.` and `scap.`. -# The `falco.` class represents userspace counters, statistics, resource -# utilization, or useful information fields. The `scap.` class represents -# counters and statistics mostly obtained from Falco's kernel instrumentation -# before events are sent to userspace, but can include scap userspace stats as -# well. -# -# It's important to note that the output fields and their names can be subject -# to change until the metrics feature reaches a stable release. -# In addition, the majority of fields represent an instant snapshot, with the -# exception of event rates per second and drop percentage stats. These values -# are computed based on the delta between two snapshots. -# -# To customize the hostname in Falco, you can set the environment variable -# `FALCO_HOSTNAME` to your desired hostname. This is particularly useful in -# Kubernetes deployments where the hostname can be set to the pod name. -# -# --- [Usage] -# -# `enabled`: Disabled by default. -# -# `interval`: The stats interval in Falco follows the time duration definitions -# used by Prometheus. -# https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations -# -# Time durations are specified as a number, followed immediately by one of the -# following units: -# -# ms - millisecond -# s - second -# m - minute -# h - hour -# d - day - assuming a day has always 24h -# w - week - assuming a week has always 7d -# y - year - assuming a year has always 365d -# -# Example of a valid time duration: 1h30m20s10ms -# -# A minimum interval of 100ms is enforced for metric collection. However, for -# production environments, we recommend selecting one of the following intervals -# for optimal monitoring: -# -# 15m -# 30m -# 1h -# 4h -# 6h -# -# `output_rule`: To enable seamless metrics and performance monitoring, we -# recommend emitting metrics as the rule "Falco internal: metrics snapshot". -# This option is particularly useful when Falco logs are preserved in a data -# lake. Please note that to use this option, the Falco rules config `priority` -# must be set to `info` at a minimum. -# -# `output_file`: Append stats to a `jsonl` file. Use with caution in production -# as Falco does not automatically rotate the file. It can be used in combination -# with `output_rule`. -# -# `resource_utilization_enabled`: Emit CPU and memory usage metrics. CPU usage -# is reported as a percentage of one CPU and can be normalized to the total -# number of CPUs to determine overall usage. Memory metrics are provided in raw -# units (`kb` for `RSS`, `PSS` and `VSZ` or `bytes` for `container_memory_used`) -# and can be uniformly converted to megabytes (MB) using the -# `convert_memory_to_mb` functionality. In environments such as Kubernetes, it -# is crucial to track Falco's container memory usage. To customize the path of -# the memory metric file, you can create an environment variable named -# `FALCO_CGROUP_MEM_PATH` and set it to the desired file path. By default, Falco -# uses the file `/sys/fs/cgroup/memory/memory.usage_in_bytes` to monitor -# container memory usage, which aligns with Kubernetes' -# `container_memory_working_set_bytes` metric. -# -# `kernel_event_counters_enabled`: Emit kernel side event and drop counters, as -# an alternative to `syscall_event_drops`, but with some differences. These -# counters reflect monotonic values since Falco's start and are exported at a -# constant stats interval. -# -# `libbpf_stats_enabled`: Exposes statistics similar to `bpftool prog show`, -# providing information such as the number of invocations of each BPF program -# attached by Falco and the time spent in each program measured in nanoseconds. -# To enable this feature, the kernel must be >= 5.1, and the kernel -# configuration `/proc/sys/kernel/bpf_stats_enabled` must be set. This option, -# or an equivalent statistics feature, is not available for non `*bpf*` drivers. -# Additionally, please be aware that the current implementation of `libbpf` does -# not support granularity of statistics at the bpf tail call level. -# -# `include_empty_values`: When the option is set to true, fields with an empty -# numeric value will be included in the output. However, this rule does not -# apply to high-level fields such as `n_evts` or `n_drops`; they will always be -# included in the output even if their value is empty. This option can be -# beneficial for exploring the data schema and ensuring that fields with empty -# values are included in the output. -# -# todo: prometheus export option -# todo: syscall_counters_enabled option -metrics: - enabled: false - interval: 1h - output_rule: true - # output_file: /tmp/falco_stats.jsonl - resource_utilization_enabled: true - kernel_event_counters_enabled: true - libbpf_stats_enabled: true - convert_memory_to_mb: true - include_empty_values: false - - -####################################### -# Falco performance tuning (advanced) # -####################################### - -# [Stable] `syscall_buf_size_preset` -# -# --- [Description] -# -# The syscall buffer index determines the size of the shared space between Falco -# and its drivers. This shared space serves as a temporary storage for syscall -# events, allowing them to be transferred from the kernel to the userspace -# efficiently. The buffer size for each online CPU is determined by the buffer -# index, and each CPU has its own dedicated buffer. Adjusting this index allows -# you to control the overall size of the syscall buffers. -# -# --- [Usage] -# -# The index 0 is reserved, and each subsequent index corresponds to an -# increasing size in bytes. For example, index 1 corresponds to a size of 1 MB, -# index 2 corresponds to 2 MB, and so on: -# -# [(*), 1 MB, 2 MB, 4 MB, 8 MB, 16 MB, 32 MB, 64 MB, 128 MB, 256 MB, 512 MB] -# ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ -# | | | | | | | | | | | -# 0 1 2 3 4 5 6 7 8 9 10 -# -# -# The buffer dimensions in bytes are determined by the following requirements: -# (1) a power of 2. -# (2) a multiple of your system_page_dimension. -# (3) greater than `2 * (system_page_dimension). -# -# The buffer size constraints may limit the usability of certain indexes. Let's -# consider an example to illustrate this: -# -# If your system has a page size of 1 MB, the first available buffer size would -# be 4 MB because 2 MB is exactly equal to 2 * (system_page_size), which is not -# sufficient as we require more than 2 * (system_page_size). In this example, it -# is evident that if the page size is 1 MB, the first index that can be used is 3. -# -# However, in most cases, these constraints do not pose a limitation, and all -# indexes from 1 to 10 can be used. You can check your system's page size using -# the Falco `--page-size` command-line option. -# -# --- [Suggestions] -# -# The buffer size was previously fixed at 8 MB (index 4). You now have the -# option to adjust the size based on your needs. Increasing the size, such as to -# 16 MB (index 5), can reduce syscall drops in heavy production systems, but may -# impact performance. Decreasing the size can speed up the system but may -# increase syscall drops. It's important to note that the buffer size is mapped -# twice in the process' virtual memory, so a buffer of 8 MB will result in a 16 -# MB area in virtual memory. Use this parameter with caution and only modify it -# if the default size is not suitable for your use case. -syscall_buf_size_preset: 4 - -# [Experimental] `syscall_drop_failed_exit` -# -# Enabling this option in Falco allows it to drop failed system call exit events -# in the kernel driver before pushing them onto the ring buffer. This -# optimization can result in lower CPU usage and more efficient utilization of -# the ring buffer, potentially reducing the number of event losses. However, it -# is important to note that enabling this option also means sacrificing some -# visibility into the system. -syscall_drop_failed_exit: false - -# [Experimental] `base_syscalls`, use with caution, read carefully -# -# --- [Description] -# -# This option configures the set of syscalls that Falco traces. -# -# --- [Falco's State Engine] -# -# Falco requires a set of syscalls to build up state in userspace. For example, -# when spawning a new process or network connection, multiple syscalls are -# involved. Furthermore, properties of a process during its lifetime can be -# modified by syscalls. Falco accounts for this by enabling the collection of -# additional syscalls than the ones defined in the rules and by managing a smart -# process cache table in userspace. Processes are purged from this table when a -# process exits. -# -# By default, with -# ``` -# base_syscalls.custom_set = [] -# base_syscalls.repair = false -# ``` -# Falco enables tracing for a syscall set gathered: (1) from (enabled) Falco -# rules (2) from a static, more verbose set defined in -# `libsinsp::events::sinsp_state_sc_set` in -# libs/userspace/libsinsp/events/sinsp_events_ppm_sc.cpp This allows Falco to -# successfully build up it's state engine and life-cycle management. -# -# If the default behavior described above does not fit the user's use case for -# Falco, the `base_syscalls` option allows for finer end-user control of -# syscalls traced by Falco. -# -# --- [base_syscalls.custom_set] -# -# CAUTION: Misconfiguration of this setting may result in incomplete Falco event -# logs or Falco being unable to trace events entirely. -# -# `base_syscalls.custom_set` allows the user to explicitly define an additional -# set of syscalls to be traced in addition to the syscalls from each enabled -# Falco rule. -# -# This is useful in lowering CPU utilization and further tailoring Falco to -# specific environments according to your threat model and budget constraints. -# -# --- [base_syscalls.repair] -# -# `base_syscalls.repair` is an alternative to Falco's default state engine -# enforcement. When enabled, this option is designed to (1) ensure that Falco's -# state engine is correctly and successfully built-up (2) be the most system -# resource-friendly by activating the least number of additional syscalls -# (outside of those enabled for enabled rules) -# -# Setting `base_syscalls.repair` to `true` allows Falco to automatically -# configure what is described in the [Suggestions] section below. -# -# `base_syscalls.repair` can be enabled with an empty custom set, meaning with -# the following, -# ``` -# base_syscalls.custom_set = [] -# base_syscalls.repair = true -# ``` -# Falco enables tracing for a syscall set gathered: (1) from (enabled) Falco -# rules (2) from minimal set of additional syscalls needed to "repair" the -# state engine and properly log event conditions specified in enabled Falco -# rules -# -# --- [Usage] -# -# List of system calls names (), negative ("!") -# notation supported. -# -# Example: base_syscalls.custom_set: [, , -# "!"] base_syscalls.repair: -# -# We recommend to only exclude syscalls, e.g. "!mprotect" if you need a fast -# deployment update (overriding rules), else remove unwanted syscalls from the -# Falco rules. -# -# Passing `-o "log_level=debug" -o "log_stderr=true" --dry-run` to Falco's cmd -# args will print the final set of syscalls to STDOUT. -# -# --- [Suggestions] -# -# NOTE: setting `base_syscalls.repair: true` automates the following suggestions -# for you. -# -# These suggestions are subject to change as Falco and its state engine evolve. -# -# For execve* events: Some Falco fields for an execve* syscall are retrieved -# from the associated `clone`, `clone3`, `fork`, `vfork` syscalls when spawning -# a new process. The `close` syscall is used to purge file descriptors from -# Falco's internal thread / process cache table and is necessary for rules -# relating to file descriptors (e.g. open, openat, openat2, socket, connect, -# accept, accept4 ... and many more) -# -# Consider enabling the following syscalls in `base_syscalls.custom_set` for -# process rules: [clone, clone3, fork, vfork, execve, execveat, close] -# -# For networking related events: While you can log `connect` or `accept*` -# syscalls without the socket syscall, the log will not contain the ip tuples. -# Additionally, for `listen` and `accept*` syscalls, the `bind` syscall is also -# necessary. -# -# We recommend the following as the minimum set for networking-related rules: -# [clone, clone3, fork, vfork, execve, execveat, close, socket, bind, -# getsockopt] -# -# Lastly, for tracking the correct `uid`, `gid` or `sid`, `pgid` of a process -# when the running process opens a file or makes a network connection, consider -# adding the following to the above recommended syscall sets: ... setresuid, -# setsid, setuid, setgid, setpgid, setresgid, setsid, capset, chdir, chroot, -# fchdir ... -base_syscalls: - custom_set: [] - repair: false - -# [Stable] `modern_bpf.cpus_for_each_syscall_buffer`, modern_bpf only -# -# --- [Description] -# -# The modern_bpf driver in Falco utilizes the new BPF ring buffer, which has a -# different memory footprint compared to the current BPF driver that uses the -# perf buffer. The Falco core maintainers have discussed the differences and -# their implications, particularly in Kubernetes environments where limits need -# to be carefully set to avoid interference with the Falco daemonset deployment -# from the OOM killer. Based on guidance received from the kernel mailing list, -# it is recommended to assign multiple CPUs to one buffer instead of allocating -# a buffer for each CPU individually. This helps optimize resource allocation -# and prevent potential issues related to memory usage. -# -# This is an index that controls how many CPUs you want to assign to a single -# syscall buffer (ring buffer). By default, for modern_bpf every syscall buffer -# is associated to 2 CPUs, so the mapping is 1:2. The modern BPF probe allows -# you to choose different mappings, for example, changing the value to `1` -# results in a 1:1 mapping and would mean one syscall buffer for each CPU (this -# is the default for the `bpf` driver). -# -# --- [Usage] -# -# You can choose an index from 0 to MAX_NUMBER_ONLINE_CPUs to set the dimension -# of the syscall buffers. The value 0 represents a single buffer shared among -# all online CPUs. It serves as a flexible option when the exact number of -# online CPUs is unknown. Here's an example to illustrate this: -# -# Consider a system with 7 online CPUs: -# -# CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) -# -# - `1` means a syscall buffer for each CPU so 7 buffers -# -# CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) -# | | | | | | | -# BUFFERs 0 1 2 3 4 5 6 -# -# - `2` (Default value) means a syscall buffer for each CPU pair, so 4 buffers -# -# CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) -# | | | | | | | -# BUFFERs 0 0 1 1 2 2 3 -# -# Please note that in this example, there are 4 buffers in total. Three of the -# buffers are associated with pairs of CPUs, while the last buffer is mapped to -# a single CPU. This arrangement is necessary because we have an odd number of -# CPUs. -# -# - `0` or `MAX_NUMBER_ONLINE_CPUs` mean a syscall buffer shared between all -# CPUs, so 1 buffer -# -# CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) -# | | | | | | | -# BUFFERs 0 0 0 0 0 0 0 -# -# Moreover, you have the option to combine this parameter with -# `syscall_buf_size_preset` index. For instance, you can create a large shared -# syscall buffer of 512 MB (using syscall_buf_size_preset=10) that is -# allocated among all the online CPUs. -# -# --- [Suggestions] -# -# The default choice of index 2 (one syscall buffer for each CPU pair) was made -# because the modern bpf probe utilizes a different memory allocation strategy -# compared to the other two drivers (bpf and kernel module). However, you have -# the flexibility to experiment and find the optimal configuration for your -# system. -# -# When considering a fixed syscall_buf_size_preset and a fixed buffer dimension: -# - Increasing this configs value results in lower number of buffers and you can -# speed up your system and reduce memory usage -# - However, using too few buffers may increase contention in the kernel, -# leading to a slowdown. -# -# If you have low event throughputs and minimal drops, reducing the number of -# buffers (higher `cpus_for_each_syscall_buffer`) can lower the memory footprint. -modern_bpf: - cpus_for_each_syscall_buffer: 2 - - -################################################# -# Falco cloud orchestration systems integration # -################################################# - -# [Stable] `metadata_download` -# -# When connected to an orchestrator like Kubernetes, Falco has the capability to -# collect metadata and enrich system call events with contextual data. The -# parameters mentioned here control the downloading process of this metadata. -# -# Please note that support for Mesos is deprecated, so these parameters -# currently apply only to Kubernetes. When using Falco with Kubernetes, you can -# enable this functionality by using the `-k` or `-K` command-line flag. -# -# However, it's worth mentioning that for important Kubernetes metadata fields -# such as namespace or pod name, these fields are automatically extracted from -# the container runtime, providing the necessary enrichment for common use cases -# of syscall-based threat detection. -# -# In summary, the `-k` flag is typically not required for most scenarios involving -# Kubernetes workload owner enrichment. The `-k` flag is primarily used when -# additional metadata is required beyond the standard fields, catering to more -# specific use cases, see https://falco.org/docs/reference/rules/supported-fields/#field-class-k8s. -metadata_download: - max_mb: 100 - chunk_wait_us: 1000 - watch_freq_sec: 1 - -# [Stable] Guidance for Kubernetes container engine command-line args settings -# -# Modern cloud environments, particularly Kubernetes, heavily rely on -# containerized workload deployments. When capturing events with Falco, it -# becomes essential to identify the owner of the workload for which events are -# being captured, such as syscall events. Falco integrates with the container -# runtime to enrich its events with container information, including fields like -# `container.image.repository`, `container.image.tag`, ... , `k8s.ns.name`, -# `k8s.pod.name`, `k8s.pod.*` in the Falco output (Falco retrieves Kubernetes -# namespace and pod name directly from the container runtime, see -# https://falco.org/docs/reference/rules/supported-fields/#field-class-container). -# -# Furthermore, Falco exposes container events themselves as a data source for -# alerting. To achieve this integration with the container runtime, Falco -# requires access to the runtime socket. By default, for Kubernetes, Falco -# attempts to connect to the following sockets: -# "/run/containerd/containerd.sock", "/run/crio/crio.sock", -# "/run/k3s/containerd/containerd.sock". If you have a custom path, you can use -# the `--cri` option to specify the correct location. -# -# In some cases, you may encounter empty fields for container metadata. To -# address this, you can explore the `--disable-cri-async` option, which disables -# asynchronous fetching if the fetch operation is not completing quickly enough. -# -# To get more information on these command-line arguments, you can run `falco -# --help` in your terminal to view their current descriptions. -# -# !!! The options mentioned here are not available in the falco.yaml -# configuration file. Instead, they can can be used as a command-line argument -# when running the Falco command. diff --git a/kubezero/falco/falco_rules.yaml b/kubezero/falco/falco_rules.yaml deleted file mode 100644 index c3738b0..0000000 --- a/kubezero/falco/falco_rules.yaml +++ /dev/null @@ -1,1251 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# -# Copyright (C) 2023 The Falco Authors. -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Information about rules tags and fields can be found here: https://falco.org/docs/rules/#tags-for-current-falco-ruleset -# The initial item in the `tags` fields reflects the maturity level of the rules introduced upon the proposal https://github.com/falcosecurity/rules/blob/main/proposals/20230605-rules-adoption-management-maturity-framework.md -# `tags` fields also include information about the type of workload inspection (host and/or container), and Mitre Attack killchain phases and Mitre TTP code(s) -# Mitre Attack References: -# [1] https://attack.mitre.org/tactics/enterprise/ -# [2] https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json - -# Starting with version 8, the Falco engine supports exceptions. -# However the Falco rules file does not use them by default. -- required_engine_version: 0.26.0 - -# Currently disabled as read/write are ignored syscalls. The nearly -# similar open_write/open_read check for files being opened for -# reading/writing. -# - macro: write -# condition: (syscall.type=write and fd.type in (file, directory)) -# - macro: read -# condition: (syscall.type=read and evt.dir=> and fd.type in (file, directory)) - -- macro: open_write - condition: (evt.type in (open,openat,openat2) and evt.is_open_write=true and fd.typechar='f' and fd.num>=0) - -- macro: open_read - condition: (evt.type in (open,openat,openat2) and evt.is_open_read=true and fd.typechar='f' and fd.num>=0) - -# Failed file open attempts, useful to detect threat actors making mistakes -# https://man7.org/linux/man-pages/man3/errno.3.html -# evt.res=ENOENT - No such file or directory -# evt.res=EACCESS - Permission denied -- macro: open_file_failed - condition: (evt.type in (open,openat,openat2) and fd.typechar='f' and fd.num=-1 and evt.res startswith E) - -# This macro `never_true` is used as placeholder for tuning negative logical sub-expressions, for example -# - macro: allowed_ssh_hosts -# condition: (never_true) -# can be used in a rules' expression with double negation `and not allowed_ssh_hosts` which effectively evaluates -# to true and does nothing, the perfect empty template for `logical` cases as opposed to list templates. -# When tuning the rule you can override the macro with something useful, e.g. -# - macro: allowed_ssh_hosts -# condition: (evt.hostname contains xyz) -- macro: never_true - condition: (evt.num=0) - -# This macro `always_true` is the flip side of the macro `never_true` and currently is commented out as -# it is not used. You can use it as placeholder for a positive logical sub-expression tuning template -# macro, e.g. `and custom_procs`, where -# - macro: custom_procs -# condition: (always_true) -# later you can customize, override the macros to something like -# - macro: custom_procs -# condition: (proc.name in (custom1, custom2, custom3)) -# - macro: always_true -# condition: (evt.num>=0) - -# In some cases, such as dropped system call events, information about -# the process name may be missing. For some rules that really depend -# on the identity of the process performing an action such as opening -# a file, etc., we require that the process name be known. -- macro: proc_name_exists - condition: (proc.name!="") - -- macro: spawned_process - condition: (evt.type in (execve, execveat) and evt.dir=<) - -- macro: create_symlink - condition: (evt.type in (symlink, symlinkat) and evt.dir=<) - -- macro: create_hardlink - condition: (evt.type in (link, linkat) and evt.dir=<) - -- macro: kernel_module_load - condition: (evt.type in (init_module, finit_module) and evt.dir=<) - -- macro: dup - condition: (evt.type in (dup, dup2, dup3)) - -# File categories -- macro: etc_dir - condition: (fd.name startswith /etc/) - -- list: shell_binaries - items: [ash, bash, csh, ksh, sh, tcsh, zsh, dash] - -- macro: shell_procs - condition: (proc.name in (shell_binaries)) - -# dpkg -L login | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," -- list: login_binaries - items: [ - login, systemd, '"(systemd)"', systemd-logind, su, - nologin, faillog, lastlog, newgrp, sg - ] - -# dpkg -L passwd | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," -- list: passwd_binaries - items: [ - shadowconfig, grpck, pwunconv, grpconv, pwck, - groupmod, vipw, pwconv, useradd, newusers, cppw, chpasswd, usermod, - groupadd, groupdel, grpunconv, chgpasswd, userdel, chage, chsh, - gpasswd, chfn, expiry, passwd, vigr, cpgr, adduser, addgroup, deluser, delgroup - ] - -# repoquery -l shadow-utils | grep bin | xargs ls -ld | grep -v '^d' | -# awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," -- list: shadowutils_binaries - items: [ - chage, gpasswd, lastlog, newgrp, sg, adduser, deluser, chpasswd, - groupadd, groupdel, addgroup, delgroup, groupmems, groupmod, grpck, grpconv, grpunconv, - newusers, pwck, pwconv, pwunconv, useradd, userdel, usermod, vigr, vipw, unix_chkpwd - ] - -- list: http_server_binaries - items: [nginx, httpd, httpd-foregroun, lighttpd, apache, apache2] - -- list: db_server_binaries - items: [mysqld, postgres, sqlplus] - -- list: postgres_mgmt_binaries - items: [pg_dumpall, pg_ctl, pg_lsclusters, pg_ctlcluster] - -- list: nosql_server_binaries - items: [couchdb, memcached, redis-server, rabbitmq-server, mongod] - -- list: gitlab_binaries - items: [gitlab-shell, gitlab-mon, gitlab-runner-b, git] - -- macro: server_procs - condition: (proc.name in (http_server_binaries, db_server_binaries, docker_binaries, sshd)) - -# The explicit quotes are needed to avoid the - characters being -# interpreted by the filter expression. -- list: rpm_binaries - items: [dnf, dnf-automatic, rpm, rpmkey, yum, '"75-system-updat"', rhsmcertd-worke, rhsmcertd, subscription-ma, - repoquery, rpmkeys, rpmq, yum-cron, yum-config-mana, yum-debug-dump, - abrt-action-sav, rpmdb_stat, microdnf, rhn_check, yumdb] - -- list: deb_binaries - items: [dpkg, dpkg-preconfigu, dpkg-reconfigur, dpkg-divert, apt, apt-get, aptitude, - frontend, preinst, add-apt-reposit, apt-auto-remova, apt-key, - apt-listchanges, unattended-upgr, apt-add-reposit, apt-cache, apt.systemd.dai - ] -- list: python_package_managers - items: [pip, pip3, conda] - -# The truncated dpkg-preconfigu is intentional, process names are -# truncated at the falcosecurity-libs level. -- list: package_mgmt_binaries - items: [rpm_binaries, deb_binaries, update-alternat, gem, npm, python_package_managers, sane-utils.post, alternatives, chef-client, apk, snapd] - -- macro: run_by_package_mgmt_binaries - condition: (proc.aname in (package_mgmt_binaries, needrestart)) - -# A canonical set of processes that run other programs with different -# privileges or as a different user. -- list: userexec_binaries - items: [sudo, su, suexec, critical-stack, dzdo] - -- list: user_mgmt_binaries - items: [login_binaries, passwd_binaries, shadowutils_binaries] - -- list: hids_binaries - items: [aide, aide.wrapper, update-aide.con, logcheck, syslog-summary, osqueryd, ossec-syscheckd] - -- list: vpn_binaries - items: [openvpn] - -- list: nomachine_binaries - items: [nxexec, nxnode.bin, nxserver.bin, nxclient.bin] - -- list: mail_binaries - items: [ - sendmail, sendmail-msp, postfix, procmail, exim4, - pickup, showq, mailq, dovecot, imap-login, imap, - mailmng-core, pop3-login, dovecot-lda, pop3 - ] - -- list: mail_config_binaries - items: [ - update_conf, parse_mc, makemap_hash, newaliases, update_mk, update_tlsm4, - update_db, update_mc, ssmtp.postinst, mailq, postalias, postfix.config., - postfix.config, postfix-script, postconf - ] - -- list: sensitive_file_names - items: [/etc/shadow, /etc/sudoers, /etc/pam.conf, /etc/security/pwquality.conf] - -- list: sensitive_directory_names - items: [/, /etc, /etc/, /root, /root/] - -- macro: sensitive_files - condition: > - ((fd.name startswith /etc and fd.name in (sensitive_file_names)) or - fd.directory in (/etc/sudoers.d, /etc/pam.d)) - -# Indicates that the process is new. Currently detected using time -# since process was started, using a threshold of 5 seconds. -- macro: proc_is_new - condition: (proc.duration <= 5000000000) - -# Use this to test whether the event occurred within a container. -# When displaying container information in the output field, use -# %container.info, without any leading term (file=%fd.name -# %container.info user=%user.name user_loginuid=%user.loginuid, and not file=%fd.name -# container=%container.info user=%user.name user_loginuid=%user.loginuid). The output will change -# based on the context and whether or not -pk/-pm/-pc was specified on -# the command line. -- macro: container - condition: (container.id != host) - -- macro: interactive - condition: > - ((proc.aname=sshd and proc.name != sshd) or - proc.name=systemd-logind or proc.name=login) - -- list: cron_binaries - items: [anacron, cron, crond, crontab] - -# https://github.com/liske/needrestart -- list: needrestart_binaries - items: [needrestart, 10-dpkg, 20-rpm, 30-pacman] - -# Possible scripts run by sshkit -- list: sshkit_script_binaries - items: [10_etc_sudoers., 10_passwd_group] - -# System users that should never log into a system. Consider adding your own -# service users (e.g. 'apache' or 'mysqld') here. -- macro: system_users - condition: (user.name in (bin, daemon, games, lp, mail, nobody, sshd, sync, uucp, www-data)) - -- macro: ansible_running_python - condition: (proc.name in (python, pypy, python3) and proc.cmdline contains ansible) - -# Qualys seems to run a variety of shell subprocesses, at various -# levels. This checks at a few levels without the cost of a full -# proc.aname, which traverses the full parent hierarchy. -- macro: run_by_qualys - condition: > - (proc.pname=qualys-cloud-ag or - proc.aname[2]=qualys-cloud-ag or - proc.aname[3]=qualys-cloud-ag or - proc.aname[4]=qualys-cloud-ag) - -- macro: run_by_google_accounts_daemon - condition: > - (proc.aname[1] startswith google_accounts or - proc.aname[2] startswith google_accounts or - proc.aname[3] startswith google_accounts) - -# Chef is similar. -- macro: run_by_chef - condition: (proc.aname[2]=chef_command_wr or proc.aname[3]=chef_command_wr or - proc.aname[2]=chef-client or proc.aname[3]=chef-client or - proc.name=chef-client) - -# Also handles running semi-indirectly via scl -- macro: run_by_foreman - condition: > - (user.name=foreman and - ((proc.pname in (rake, ruby, scl) and proc.aname[5] in (tfm-rake,tfm-ruby)) or - (proc.pname=scl and proc.aname[2] in (tfm-rake,tfm-ruby)))) - -- macro: python_mesos_marathon_scripting - condition: (proc.pcmdline startswith "python3 /marathon-lb/marathon_lb.py") - -- macro: splunk_running_forwarder - condition: (proc.pname=splunkd and proc.cmdline startswith "sh -c /opt/splunkforwarder") - -- macro: perl_running_plesk - condition: (proc.cmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager" or - proc.pcmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager") - -- macro: perl_running_updmap - condition: (proc.cmdline startswith "perl /usr/bin/updmap") - -- macro: perl_running_centrifydc - condition: (proc.cmdline startswith "perl /usr/share/centrifydc") - -- macro: runuser_reading_pam - condition: (proc.name=runuser and fd.directory=/etc/pam.d) - -# CIS Linux Benchmark program -- macro: linux_bench_reading_etc_shadow - condition: ((proc.aname[2]=linux-bench and - proc.name in (awk,cut,grep)) and - (fd.name=/etc/shadow or - fd.directory=/etc/pam.d)) - -- macro: veritas_driver_script - condition: (proc.cmdline startswith "perl /opt/VRTSsfmh/bin/mh_driver.pl") - -- macro: user_ssh_directory - condition: (fd.name contains '/.ssh/' and fd.name glob '/home/*/.ssh/*') - -- macro: directory_traversal - condition: (fd.nameraw contains '../' and fd.nameraw glob '*../*../*') - -# ****************************************************************************** -# * "Directory traversal monitored file read" requires FALCO_ENGINE_VERSION 13 * -# ****************************************************************************** -- rule: Directory traversal monitored file read - desc: > - Web applications can be vulnerable to directory traversal attacks that allow accessing files outside of the web app's root directory - (e.g. Arbitrary File Read bugs). System directories like /etc are typically accessed via absolute paths. Access patterns outside of this - (here path traversal) can be regarded as suspicious. This rule includes failed file open attempts. - condition: > - (open_read or open_file_failed) - and (etc_dir or user_ssh_directory or - fd.name startswith /root/.ssh or - fd.name contains "id_rsa") - and directory_traversal - and not proc.pname in (shell_binaries) - enabled: true - output: Read monitored file via directory traversal (file=%fd.name fileraw=%fd.nameraw gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, filesystem, mitre_credential_access, T1555] - -- macro: cmp_cp_by_passwd - condition: (proc.name in (cmp, cp) and proc.pname in (passwd, run-parts)) - -- macro: user_known_read_sensitive_files_activities - condition: (never_true) - -- rule: Read sensitive file trusted after startup - desc: > - An attempt to read any sensitive file (e.g. files containing user/password/authentication - information) by a trusted program after startup. Trusted programs might read these files - at startup to load initial state, but not afterwards. Can be customized as needed. - In modern containerized cloud infrastructures, accessing traditional Linux sensitive files - might be less relevant, yet it remains valuable for baseline detections. While we provide additional - rules for SSH or cloud vendor-specific credentials, you can significantly enhance your security - program by crafting custom rules for critical application credentials unique to your environment. - condition: > - open_read - and sensitive_files - and server_procs - and not proc_is_new - and proc.name!="sshd" - and not user_known_read_sensitive_files_activities - output: Sensitive file opened for reading by trusted program after startup (file=%fd.name pcmdline=%proc.pcmdline gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, filesystem, mitre_credential_access, T1555] - -- list: read_sensitive_file_binaries - items: [ - iptables, ps, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, sshd, - vsftpd, systemd, mysql_install_d, psql, screen, debconf-show, sa-update, - pam-auth-update, pam-config, /usr/sbin/spamd, polkit-agent-he, lsattr, file, sosreport, - scxcimservera, adclient, rtvscand, cockpit-session, userhelper, ossec-syscheckd - ] - -# Add conditions to this macro (probably in a separate file, -# overwriting this macro) to allow for specific combinations of -# programs accessing sensitive files. -# fluentd_writing_conf_files is a good example to follow, as it -# specifies both the program doing the writing as well as the specific -# files it is allowed to modify. -# -# In this file, it just takes one of the macros in the base rule -# and repeats it. -- macro: user_read_sensitive_file_conditions - condition: cmp_cp_by_passwd - -- list: read_sensitive_file_images - items: [] - -- macro: user_read_sensitive_file_containers - condition: (container and container.image.repository in (read_sensitive_file_images)) - -# This macro detects man-db postinst, see https://salsa.debian.org/debian/man-db/-/blob/master/debian/postinst -# The rule "Read sensitive file untrusted" use this macro to avoid FPs. -- macro: mandb_postinst - condition: > - (proc.name=perl and proc.args startswith "-e" and - proc.args contains "@pwd = getpwnam(" and - proc.args contains "exec " and - proc.args contains "/usr/bin/mandb") - -- rule: Read sensitive file untrusted - desc: > - An attempt to read any sensitive file (e.g. files containing user/password/authentication - information). Exceptions are made for known trusted programs. Can be customized as needed. - In modern containerized cloud infrastructures, accessing traditional Linux sensitive files - might be less relevant, yet it remains valuable for baseline detections. While we provide additional - rules for SSH or cloud vendor-specific credentials, you can significantly enhance your security - program by crafting custom rules for critical application credentials unique to your environment. - condition: > - open_read - and sensitive_files - and proc_name_exists - and not proc.name in (user_mgmt_binaries, userexec_binaries, package_mgmt_binaries, - cron_binaries, read_sensitive_file_binaries, shell_binaries, hids_binaries, - vpn_binaries, mail_config_binaries, nomachine_binaries, sshkit_script_binaries, - in.proftpd, mandb, salt-call, salt-minion, postgres_mgmt_binaries, - google_oslogin_ - ) - and not cmp_cp_by_passwd - and not ansible_running_python - and not run_by_qualys - and not run_by_chef - and not run_by_google_accounts_daemon - and not user_read_sensitive_file_conditions - and not mandb_postinst - and not perl_running_plesk - and not perl_running_updmap - and not veritas_driver_script - and not perl_running_centrifydc - and not runuser_reading_pam - and not linux_bench_reading_etc_shadow - and not user_known_read_sensitive_files_activities - and not user_read_sensitive_file_containers - output: Sensitive file opened for reading by non-trusted program (file=%fd.name gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, filesystem, mitre_credential_access, T1555] - -- macro: postgres_running_wal_e - condition: (proc.pname=postgres and (proc.cmdline startswith "sh -c envdir /etc/wal-e.d/env /usr/local/bin/wal-e" or proc.cmdline startswith "sh -c envdir \"/run/etc/wal-e.d/env\" wal-g wal-push")) - -- macro: redis_running_prepost_scripts - condition: (proc.aname[2]=redis-server and (proc.cmdline contains "redis-server.post-up.d" or proc.cmdline contains "redis-server.pre-up.d")) - -- macro: rabbitmq_running_scripts - condition: > - (proc.pname=beam.smp and - (proc.cmdline startswith "sh -c exec ps" or - proc.cmdline startswith "sh -c exec inet_gethost" or - proc.cmdline= "sh -s unix:cmd" or - proc.cmdline= "sh -c exec /bin/sh -s unix:cmd 2>&1")) - -- macro: rabbitmqctl_running_scripts - condition: (proc.aname[2]=rabbitmqctl and proc.cmdline startswith "sh -c ") - -- macro: run_by_appdynamics - condition: (proc.pexe endswith java and proc.pcmdline contains " -jar -Dappdynamics") - -# The binaries in this list and their descendents are *not* allowed -# spawn shells. This includes the binaries spawning shells directly as -# well as indirectly. For example, apache -> php/perl for -# mod_{php,perl} -> some shell is also not allowed, because the shell -# has apache as an ancestor. -- list: protected_shell_spawning_binaries - items: [ - http_server_binaries, db_server_binaries, nosql_server_binaries, mail_binaries, - fluentd, flanneld, splunkd, consul, smbd, runsv, PM2 - ] - -- macro: parent_java_running_zookeeper - condition: (proc.pexe endswith java and proc.pcmdline contains org.apache.zookeeper.server) - -- macro: parent_java_running_kafka - condition: (proc.pexe endswith java and proc.pcmdline contains kafka.Kafka) - -- macro: parent_java_running_elasticsearch - condition: (proc.pexe endswith java and proc.pcmdline contains org.elasticsearch.bootstrap.Elasticsearch) - -- macro: parent_java_running_activemq - condition: (proc.pexe endswith java and proc.pcmdline contains activemq.jar) - -- macro: parent_java_running_cassandra - condition: (proc.pexe endswith java and (proc.pcmdline contains "-Dcassandra.config.loader" or proc.pcmdline contains org.apache.cassandra.service.CassandraDaemon)) - -- macro: parent_java_running_jboss_wildfly - condition: (proc.pexe endswith java and proc.pcmdline contains org.jboss) - -- macro: parent_java_running_glassfish - condition: (proc.pexe endswith java and proc.pcmdline contains com.sun.enterprise.glassfish) - -- macro: parent_java_running_hadoop - condition: (proc.pexe endswith java and proc.pcmdline contains org.apache.hadoop) - -- macro: parent_java_running_datastax - condition: (proc.pexe endswith java and proc.pcmdline contains com.datastax) - -- macro: nginx_starting_nginx - condition: (proc.pname=nginx and proc.cmdline contains "/usr/sbin/nginx -c /etc/nginx/nginx.conf") - -- macro: nginx_running_aws_s3_cp - condition: (proc.pname=nginx and proc.cmdline startswith "sh -c /usr/local/bin/aws s3 cp") - -- macro: consul_running_net_scripts - condition: (proc.pname=consul and (proc.cmdline startswith "sh -c curl" or proc.cmdline startswith "sh -c nc")) - -- macro: consul_running_alert_checks - condition: (proc.pname=consul and proc.cmdline startswith "sh -c /bin/consul-alerts") - -- macro: serf_script - condition: (proc.cmdline startswith "sh -c serf") - -- macro: check_process_status - condition: (proc.cmdline startswith "sh -c kill -0 ") - -# In some cases, you may want to consider node processes run directly -# in containers as protected shell spawners. Examples include using -# pm2-docker or pm2 start some-app.js --no-daemon-mode as the direct -# entrypoint of the container, and when the node app is a long-lived -# server using something like express. -# -# However, there are other uses of node related to build pipelines for -# which node is not really a server but instead a general scripting -# tool. In these cases, shells are very likely and in these cases you -# don't want to consider node processes protected shell spawners. -# -# We have to choose one of these cases, so we consider node processes -# as unprotected by default. If you want to consider any node process -# run in a container as a protected shell spawner, override the below -# macro to remove the "never_true" clause, which allows it to take effect. -- macro: possibly_node_in_container - condition: (never_true and (proc.pname=node and proc.aname[3]=docker-containe)) - -# Similarly, you may want to consider any shell spawned by apache -# tomcat as suspect. The famous apache struts attack (CVE-2017-5638) -# could be exploited to do things like spawn shells. -# -# However, many applications *do* use tomcat to run arbitrary shells, -# as a part of build pipelines, etc. -# -# Like for node, we make this case opt-in. -- macro: possibly_parent_java_running_tomcat - condition: (never_true and proc.pexe endswith java and proc.pcmdline contains org.apache.catalina.startup.Bootstrap) - -- macro: protected_shell_spawner - condition: > - (proc.aname in (protected_shell_spawning_binaries) - or parent_java_running_zookeeper - or parent_java_running_kafka - or parent_java_running_elasticsearch - or parent_java_running_activemq - or parent_java_running_cassandra - or parent_java_running_jboss_wildfly - or parent_java_running_glassfish - or parent_java_running_hadoop - or parent_java_running_datastax - or possibly_parent_java_running_tomcat - or possibly_node_in_container) - -- list: mesos_shell_binaries - items: [mesos-docker-ex, mesos-slave, mesos-health-ch] - -# Note that runsv is both in protected_shell_spawner and the -# exclusions by pname. This means that runsv can itself spawn shells -# (the ./run and ./finish scripts), but the processes runsv can not -# spawn shells. -- rule: Run shell untrusted - desc: > - An attempt to spawn a shell below a non-shell application. The non-shell applications that are monitored are - defined in the protected_shell_spawner macro, with protected_shell_spawning_binaries being the list you can - easily customize. For Java parent processes, please note that Java often has a custom process name. Therefore, - rely more on proc.exe to define Java applications. This rule can be noisier, as you can see in the exhaustive - existing tuning. However, given it is very behavior-driven and broad, it is universally relevant to catch - general Remote Code Execution (RCE). Allocate time to tune this rule for your use cases and reduce noise. - Tuning suggestions include looking at the duration of the parent process (proc.ppid.duration) to define your - long-running app processes. Checking for newer fields such as proc.vpgid.name and proc.vpgid.exe instead of the - direct parent process being a non-shell application could make the rule more robust. - condition: > - spawned_process - and shell_procs - and proc.pname exists - and protected_shell_spawner - and not proc.pname in (shell_binaries, gitlab_binaries, cron_binaries, user_known_shell_spawn_binaries, - needrestart_binaries, - mesos_shell_binaries, - erl_child_setup, exechealthz, - PM2, PassengerWatchd, c_rehash, svlogd, logrotate, hhvm, serf, - lb-controller, nvidia-installe, runsv, statsite, erlexec, calico-node, - "puma reactor") - and not proc.cmdline in (known_shell_spawn_cmdlines) - and not proc.aname in (unicorn_launche) - and not consul_running_net_scripts - and not consul_running_alert_checks - and not nginx_starting_nginx - and not nginx_running_aws_s3_cp - and not run_by_package_mgmt_binaries - and not serf_script - and not check_process_status - and not run_by_foreman - and not python_mesos_marathon_scripting - and not splunk_running_forwarder - and not postgres_running_wal_e - and not redis_running_prepost_scripts - and not rabbitmq_running_scripts - and not rabbitmqctl_running_scripts - and not run_by_appdynamics - and not user_shell_container_exclusions - output: Shell spawned by untrusted binary (parent_exe=%proc.pexe parent_exepath=%proc.pexepath pcmdline=%proc.pcmdline gparent=%proc.aname[2] ggparent=%proc.aname[3] aname[4]=%proc.aname[4] aname[5]=%proc.aname[5] aname[6]=%proc.aname[6] aname[7]=%proc.aname[7] evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, host, container, process, shell, mitre_execution, T1059.004] - -# These images are allowed both to run with --privileged and to mount -# sensitive paths from the host filesystem. -# -# NOTE: This list is only provided for backwards compatibility with -# older local falco rules files that may have been appending to -# trusted_images. To make customizations, it's better to add images to -# either privileged_images or falco_sensitive_mount_images. -- list: trusted_images - items: [] - -- list: sematext_images - items: [docker.io/sematext/sematext-agent-docker, docker.io/sematext/agent, docker.io/sematext/logagent, - registry.access.redhat.com/sematext/sematext-agent-docker, - registry.access.redhat.com/sematext/agent, - registry.access.redhat.com/sematext/logagent] - -# Falco containers -- list: falco_containers - items: - - falcosecurity/falco - - docker.io/falcosecurity/falco - - public.ecr.aws/falcosecurity/falco - -# Falco no driver containers -- list: falco_no_driver_containers - items: - - falcosecurity/falco-no-driver - - docker.io/falcosecurity/falco-no-driver - - public.ecr.aws/falcosecurity/falco-no-driver - -# These container images are allowed to run with --privileged and full set of capabilities -# TODO: Remove k8s.gcr.io reference after 01/Dec/2023 -- list: falco_privileged_images - items: [ - falco_containers, - docker.io/calico/node, - calico/node, - docker.io/cloudnativelabs/kube-router, - docker.io/docker/ucp-agent, - docker.io/mesosphere/mesos-slave, - docker.io/rook/toolbox, - docker.io/sysdig/sysdig, - gcr.io/google_containers/kube-proxy, - gcr.io/google-containers/startup-script, - gcr.io/projectcalico-org/node, - gke.gcr.io/kube-proxy, - gke.gcr.io/gke-metadata-server, - gke.gcr.io/netd-amd64, - gke.gcr.io/watcher-daemonset, - gcr.io/google-containers/prometheus-to-sd, - k8s.gcr.io/ip-masq-agent-amd64, - k8s.gcr.io/kube-proxy, - k8s.gcr.io/prometheus-to-sd, - registry.k8s.io/ip-masq-agent-amd64, - registry.k8s.io/kube-proxy, - registry.k8s.io/prometheus-to-sd, - quay.io/calico/node, - sysdig/sysdig, - sematext_images, - k8s.gcr.io/dns/k8s-dns-node-cache, - registry.k8s.io/dns/k8s-dns-node-cache, - mcr.microsoft.com/oss/kubernetes/kube-proxy - ] - -# The steps libcontainer performs to set up the root program for a container are: -# - clone + exec self to a program runc:[0:PARENT] -# - clone a program runc:[1:CHILD] which sets up all the namespaces -# - clone a second program runc:[2:INIT] + exec to the root program. -# The parent of runc:[2:INIT] is runc:0:PARENT] -# As soon as 1:CHILD is created, 0:PARENT exits, so there's a race -# where at the time 2:INIT execs the root program, 0:PARENT might have -# already exited, or might still be around. So we handle both. -# We also let runc:[1:CHILD] count as the parent process, which can occur -# when we lose events and lose track of state. -- macro: container_entrypoint - condition: (not proc.pname exists or proc.pname in (runc:[0:PARENT], runc:[1:CHILD], runc, docker-runc, exe, docker-runc-cur, containerd-shim, systemd, crio)) - -- macro: user_known_system_user_login - condition: (never_true) - -# Anything run interactively by root -# - condition: evt.type != switch and user.name = root and proc.name != sshd and interactive -# output: "Interactive root (%user.name %proc.name %evt.dir %evt.type %evt.args %fd.name)" -# priority: WARNING -- rule: System user interactive - desc: > - System (e.g. non-login) users spawning new processes. Can add custom service users (e.g. apache or mysqld). - 'Interactive' is defined as new processes as descendants of an ssh session or login process. Consider further tuning - by only looking at processes in a terminal / tty (proc.tty != 0). A newer field proc.is_vpgid_leader could be of help - to distinguish if the process was "directly" executed, for instance, in a tty, or executed as a descendant process in the - same process group, which, for example, is the case when subprocesses are spawned from a script. Consider this rule - as a great template rule to monitor interactive accesses to your systems more broadly. However, such a custom rule would be - unique to your environment. The rule "Terminal shell in container" that fires when using "kubectl exec" is more Kubernetes - relevant, whereas this one could be more interesting for the underlying host. - condition: > - spawned_process - and system_users - and interactive - and not user_known_system_user_login - output: System user ran an interactive command (evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: INFO - tags: [maturity_stable, host, container, users, mitre_execution, T1059, NIST_800-53_AC-2] - -# In some cases, a shell is expected to be run in a container. For example, configuration -# management software may do this, which is expected. -- macro: user_expected_terminal_shell_in_container_conditions - condition: (never_true) - -- rule: Terminal shell in container - desc: > - A shell was used as the entrypoint/exec point into a container with an attached terminal. Parent process may have - legitimately already exited and be null (read container_entrypoint macro). Common when using "kubectl exec" in Kubernetes. - Correlate with k8saudit exec logs if possible to find user or serviceaccount token used (fuzzy correlation by namespace and pod name). - Rather than considering it a standalone rule, it may be best used as generic auditing rule while examining other triggered - rules in this container/tty. - condition: > - spawned_process - and container - and shell_procs - and proc.tty != 0 - and container_entrypoint - and not user_expected_terminal_shell_in_container_conditions - output: A shell was spawned in a container with an attached terminal (evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, container, shell, mitre_execution, T1059] - -# For some container types (mesos), there isn't a container image to -# work with, and the container name is autogenerated, so there isn't -# any stable aspect of the software to work with. In this case, we -# fall back to allowing certain command lines. -- list: known_shell_spawn_cmdlines - items: [ - '"sh -c uname -p 2> /dev/null"', - '"sh -c uname -s 2>&1"', - '"sh -c uname -r 2>&1"', - '"sh -c uname -v 2>&1"', - '"sh -c uname -a 2>&1"', - '"sh -c ruby -v 2>&1"', - '"sh -c getconf CLK_TCK"', - '"sh -c getconf PAGESIZE"', - '"sh -c LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null"', - '"sh -c LANG=C /sbin/ldconfig -p 2>/dev/null"', - '"sh -c /sbin/ldconfig -p 2>/dev/null"', - '"sh -c stty -a 2>/dev/null"', - '"sh -c stty -a < /dev/tty"', - '"sh -c stty -g < /dev/tty"', - '"sh -c node index.js"', - '"sh -c node index"', - '"sh -c node ./src/start.js"', - '"sh -c node app.js"', - '"sh -c node -e \"require(''nan'')\""', - '"sh -c node -e \"require(''nan'')\")"', - '"sh -c node $NODE_DEBUG_OPTION index.js "', - '"sh -c crontab -l 2"', - '"sh -c lsb_release -a"', - '"sh -c lsb_release -is 2>/dev/null"', - '"sh -c whoami"', - '"sh -c node_modules/.bin/bower-installer"', - '"sh -c /bin/hostname -f 2> /dev/null"', - '"sh -c locale -a"', - '"sh -c -t -i"', - '"sh -c openssl version"', - '"bash -c id -Gn kafadmin"', - '"sh -c /bin/sh -c ''date +%%s''"', - '"sh -c /usr/share/lighttpd/create-mime.conf.pl"' - ] - -# This list allows for easy additions to the set of commands allowed -# to run shells in containers without having to without having to copy -# and override the entire run shell in container macro. Once -# https://github.com/falcosecurity/falco/issues/255 is fixed this will be a -# bit easier, as someone could append of any of the existing lists. -- list: user_known_shell_spawn_binaries - items: [] - -# This macro allows for easy additions to the set of commands allowed -# to run shells in containers without having to override the entire -# rule. Its default value is an expression that always is false, which -# becomes true when the "not ..." in the rule is applied. -- macro: user_shell_container_exclusions - condition: (never_true) - -# Containers from IBM Cloud -- list: ibm_cloud_containers - items: - - icr.io/ext/sysdig/agent - - registry.ng.bluemix.net/armada-master/metrics-server-amd64 - - registry.ng.bluemix.net/armada-master/olm - -# In a local/user rules file, list the namespace or container images that are -# allowed to contact the K8s API Server from within a container. This -# might cover cases where the K8s infrastructure itself is running -# within a container. -# TODO: Remove k8s.gcr.io reference after 01/Dec/2023 -- macro: k8s_containers - condition: > - (container.image.repository in (gcr.io/google_containers/hyperkube-amd64, - gcr.io/google_containers/kube2sky, - docker.io/sysdig/sysdig, sysdig/sysdig, - fluent/fluentd-kubernetes-daemonset, prom/prometheus, - falco_containers, - falco_no_driver_containers, - ibm_cloud_containers, - velero/velero, - quay.io/jetstack/cert-manager-cainjector, weaveworks/kured, - quay.io/prometheus-operator/prometheus-operator, k8s.gcr.io/ingress-nginx/kube-webhook-certgen, - registry.k8s.io/ingress-nginx/kube-webhook-certgen, quay.io/spotahome/redis-operator, - registry.opensource.zalan.do/acid/postgres-operator, registry.opensource.zalan.do/acid/postgres-operator-ui, - rabbitmqoperator/cluster-operator, quay.io/kubecost1/kubecost-cost-model, - docker.io/bitnami/prometheus, docker.io/bitnami/kube-state-metrics, mcr.microsoft.com/oss/azure/aad-pod-identity/nmi) - or (k8s.ns.name = "kube-system")) - -- macro: k8s_api_server - condition: (fd.sip.name="kubernetes.default.svc.cluster.local") - -- macro: user_known_contact_k8s_api_server_activities - condition: (never_true) - -- rule: Contact K8S API Server From Container - desc: > - Detect attempts to communicate with the K8S API Server from a container by non-profiled users. Kubernetes APIs play a - pivotal role in configuring the cluster management lifecycle. Detecting potential unauthorized access to the API server - is of utmost importance. Audit your complete infrastructure and pinpoint any potential machines from which the API server - might be accessible based on your network layout. If Falco can't operate on all these machines, consider analyzing the - Kubernetes audit logs (typically drained from control nodes, and Falco offers a k8saudit plugin) as an additional data - source for detections within the control plane. - condition: > - evt.type=connect and evt.dir=< - and (fd.typechar=4 or fd.typechar=6) - and container - and k8s_api_server - and not k8s_containers - and not user_known_contact_k8s_api_server_activities - output: Unexpected connection to K8s API Server from container (connection=%fd.name lport=%fd.lport rport=%fd.rport fd_type=%fd.type fd_proto=fd.l4proto evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, container, network, k8s, mitre_discovery, T1565] - -- rule: Netcat Remote Code Execution in Container - desc: > - Netcat Program runs inside container that allows remote code execution and may be utilized - as a part of a variety of reverse shell payload https://github.com/swisskyrepo/PayloadsAllTheThings/. - These programs are of higher relevance as they are commonly installed on UNIX-like operating systems. - Can fire in combination with the "Redirect STDOUT/STDIN to Network Connection in Container" - rule as it utilizes a different evt.type. - condition: > - spawned_process - and container - and ((proc.name = "nc" and (proc.cmdline contains " -e" or - proc.cmdline contains " -c")) or - (proc.name = "ncat" and (proc.args contains "--sh-exec" or - proc.args contains "--exec" or proc.args contains "-e " or - proc.args contains "-c " or proc.args contains "--lua-exec")) - ) - output: Netcat runs inside container that allows remote code execution (evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, container, network, process, mitre_execution, T1059] - -- list: grep_binaries - items: [grep, egrep, fgrep] - -- macro: grep_commands - condition: (proc.name in (grep_binaries)) - -# a less restrictive search for things that might be passwords/ssh/user etc. -- macro: grep_more - condition: (never_true) - -- macro: private_key_or_password - condition: > - (proc.args icontains "BEGIN PRIVATE" or - proc.args icontains "BEGIN OPENSSH PRIVATE" or - proc.args icontains "BEGIN RSA PRIVATE" or - proc.args icontains "BEGIN DSA PRIVATE" or - proc.args icontains "BEGIN EC PRIVATE" or - (grep_more and - (proc.args icontains " pass " or - proc.args icontains " ssh " or - proc.args icontains " user ")) - ) - -- rule: Search Private Keys or Passwords - desc: > - Detect attempts to search for private keys or passwords using the grep or find command. This is often seen with - unsophisticated attackers, as there are many ways to access files using bash built-ins that could go unnoticed. - Regardless, this serves as a solid baseline detection that can be tailored to cover these gaps while maintaining - an acceptable noise level. - condition: > - spawned_process - and ((grep_commands and private_key_or_password) or - (proc.name = "find" and (proc.args contains "id_rsa" or - proc.args contains "id_dsa" or - proc.args contains "id_ed25519" or - proc.args contains "id_ecdsa" - ) - )) - output: Grep private keys or passwords activities found (evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: - WARNING - tags: [maturity_stable, host, container, process, filesystem, mitre_credential_access, T1552.001] - -- list: log_directories - items: [/var/log, /dev/log] - -- list: log_files - items: [syslog, auth.log, secure, kern.log, cron, user.log, dpkg.log, last.log, yum.log, access_log, mysql.log, mysqld.log] - -- macro: access_log_files - condition: (fd.directory in (log_directories) or fd.filename in (log_files)) - -# a placeholder for whitelist log files that could be cleared. Recommend the macro as (fd.name startswith "/var/log/app1*") -- macro: allowed_clear_log_files - condition: (never_true) - -- macro: trusted_logging_images - condition: (container.image.repository endswith "splunk/fluentd-hec" or - container.image.repository endswith "fluent/fluentd-kubernetes-daemonset" or - container.image.repository endswith "openshift3/ose-logging-fluentd" or - container.image.repository endswith "containernetworking/azure-npm") - -- macro: containerd_activities - condition: (proc.name=containerd and (fd.name startswith "/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/" or - fd.name startswith "/var/lib/containerd/tmpmounts/")) - -- rule: Clear Log Activities - desc: > - Detect clearing of critical access log files, typically done to erase evidence that could be attributed to an adversary's - actions. To effectively customize and operationalize this detection, check for potentially missing log file destinations - relevant to your environment, and adjust the profiled containers you wish not to be alerted on. - condition: > - open_write - and access_log_files - and evt.arg.flags contains "O_TRUNC" - and not containerd_activities - and not trusted_logging_images - and not allowed_clear_log_files - output: Log files were tampered (file=%fd.name evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: - WARNING - tags: [maturity_stable, host, container, filesystem, mitre_defense_evasion, T1070, NIST_800-53_AU-10] - -- list: data_remove_commands - items: [shred, mkfs, mke2fs] - -- macro: clear_data_procs - condition: (proc.name in (data_remove_commands)) - -- macro: user_known_remove_data_activities - condition: (never_true) - -- rule: Remove Bulk Data from Disk - desc: > - Detect a process running to clear bulk data from disk with the intention to destroy data, possibly interrupting availability - to systems. Profile your environment and use user_known_remove_data_activities to tune this rule. - condition: > - spawned_process - and clear_data_procs - and not user_known_remove_data_activities - output: Bulk data has been removed from disk (file=%fd.name evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: - WARNING - tags: [maturity_stable, host, container, process, filesystem, mitre_impact, T1485] - -- rule: Create Symlink Over Sensitive Files - desc: > - Detect symlinks created over a curated list of sensitive files or subdirectories under /etc/ or - root directories. Can be customized as needed. Refer to further and equivalent guidance within the - rule "Read sensitive file untrusted". - condition: > - create_symlink - and (evt.arg.target in (sensitive_file_names) or evt.arg.target in (sensitive_directory_names)) - output: Symlinks created over sensitive files (target=%evt.arg.target linkpath=%evt.arg.linkpath evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, filesystem, mitre_credential_access, T1555] - -- rule: Create Hardlink Over Sensitive Files - desc: > - Detect hardlink created over a curated list of sensitive files or subdirectories under /etc/ or - root directories. Can be customized as needed. Refer to further and equivalent guidance within the - rule "Read sensitive file untrusted". - condition: > - create_hardlink - and (evt.arg.oldpath in (sensitive_file_names)) - output: Hardlinks created over sensitive files (target=%evt.arg.target linkpath=%evt.arg.linkpath evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, filesystem, mitre_credential_access, T1555] - -- list: user_known_packet_socket_binaries - items: [] - -- rule: Packet socket created in container - desc: > - Detect new packet socket at the device driver (OSI Layer 2) level in a container. Packet socket could be used for ARP Spoofing - and privilege escalation (CVE-2020-14386) by an attacker. Noise can be reduced by using the user_known_packet_socket_binaries - template list. - condition: > - evt.type=socket - and container - and evt.arg[0] contains AF_PACKET - and not proc.name in (user_known_packet_socket_binaries) - output: Packet socket was created in a container (socket_info=%evt.args connection=%fd.name lport=%fd.lport rport=%fd.rport fd_type=%fd.type fd_proto=fd.l4proto evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, container, network, mitre_credential_access, T1557.002] - -- macro: user_known_stand_streams_redirect_activities - condition: (never_true) - -# As of engine version 20 this rule can be improved by using the fd.types[] -# field so it only triggers once when all three of std{out,err,in} are -# redirected. -# -# - list: ip_sockets -# items: ["ipv4", "ipv6"] -# -# - rule: Redirect STDOUT/STDIN to Network Connection in Container once -# condition: dup and container and evt.rawres in (0, 1, 2) and fd.type in (ip_sockets) and fd.types[0] in (ip_sockets) and fd.types[1] in (ip_sockets) and fd.types[2] in (ip_sockets) and not user_known_stand_streams_redirect_activities -# -# The following rule has not been changed by default as existing users could be -# relying on the rule triggering when any of std{out,err,in} are redirected. -- rule: Redirect STDOUT/STDIN to Network Connection in Container - desc: > - Detect redirection of stdout/stdin to a network connection within a container, achieved by utilizing a - variant of the dup syscall (potential reverse shell or remote code execution - https://github.com/swisskyrepo/PayloadsAllTheThings/). This detection is behavior-based and may generate - noise in the system, and can be adjusted using the user_known_stand_streams_redirect_activities template - macro. Tuning can be performed similarly to existing detections based on process lineage or container images, - and/or it can be limited to interactive tty (tty != 0). - condition: > - dup - and container - and evt.rawres in (0, 1, 2) - and fd.type in ("ipv4", "ipv6") - and not user_known_stand_streams_redirect_activities - output: Redirect stdout/stdin to network connection (gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] fd.sip=%fd.sip connection=%fd.name lport=%fd.lport rport=%fd.rport fd_type=%fd.type fd_proto=fd.l4proto evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, container, network, process, mitre_execution, T1059] - -- list: allowed_container_images_loading_kernel_module - items: [] - -- rule: Linux Kernel Module Injection Detected - desc: > - Inject Linux Kernel Modules from containers using insmod or modprobe with init_module and finit_module - syscalls, given the precondition of sys_module effective capabilities. Profile the environment and consider - allowed_container_images_loading_kernel_module to reduce noise and account for legitimate cases. - condition: > - kernel_module_load - and container - and thread.cap_effective icontains sys_module - and not container.image.repository in (allowed_container_images_loading_kernel_module) - output: Linux Kernel Module injection from container (parent_exepath=%proc.pexepath gparent=%proc.aname[2] gexepath=%proc.aexepath[2] module=%proc.args res=%evt.res evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, process, mitre_persistence, TA0003] - -- rule: Debugfs Launched in Privileged Container - desc: > - Detect file system debugger debugfs launched inside a privileged container which might lead to container escape. - This rule has a more narrow scope. - condition: > - spawned_process - and container - and container.privileged=true - and proc.name=debugfs - output: Debugfs launched started in a privileged container (evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, container, cis, process, mitre_privilege_escalation, T1611] - -- rule: Detect release_agent File Container Escapes - desc: > - Detect an attempt to exploit a container escape using release_agent file. - By running a container with certains capabilities, a privileged user can modify - release_agent file and escape from the container. - condition: > - open_write - and container - and fd.name endswith release_agent - and (user.uid=0 or thread.cap_effective contains CAP_DAC_OVERRIDE) - and thread.cap_effective contains CAP_SYS_ADMIN - output: Detect an attempt to exploit a container escape using release_agent file (file=%fd.name cap_effective=%thread.cap_effective evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: CRITICAL - tags: [maturity_stable, container, process, mitre_privilege_escalation, T1611] - -- list: docker_binaries - items: [docker, dockerd, containerd-shim, "runc:[1:CHILD]", pause, exe, docker-compose, docker-entrypoi, docker-runc-cur, docker-current, dockerd-current] - -- list: known_ptrace_binaries - items: [] - -- macro: known_ptrace_procs - condition: (proc.name in (known_ptrace_binaries)) - -- macro: ptrace_attach_or_injection - condition: > - (evt.type=ptrace and evt.dir=> and - (evt.arg.request contains PTRACE_POKETEXT or - evt.arg.request contains PTRACE_POKEDATA or - evt.arg.request contains PTRACE_ATTACH or - evt.arg.request contains PTRACE_SEIZE or - evt.arg.request contains PTRACE_SETREGS)) - -- rule: PTRACE attached to process - desc: > - Detect an attempt to inject potentially malicious code into a process using PTRACE in order to evade - process-based defenses or elevate privileges. Common anti-patterns are debuggers. Additionally, profiling - your environment via the known_ptrace_procs template macro can reduce noise. - A successful ptrace syscall generates multiple logs at once. - condition: > - ptrace_attach_or_injection - and proc_name_exists - and not known_ptrace_procs - output: Detected ptrace PTRACE_ATTACH attempt (proc_pcmdline=%proc.pcmdline evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, process, mitre_privilege_escalation, T1055.008] - -- rule: PTRACE anti-debug attempt - desc: > - Detect usage of the PTRACE system call with the PTRACE_TRACEME argument, indicating a program actively attempting - to avoid debuggers attaching to the process. This behavior is typically indicative of malware activity. - Read more about PTRACE in the "PTRACE attached to process" rule. - condition: > - evt.type=ptrace and evt.dir=> - and evt.arg.request contains PTRACE_TRACEME - and proc_name_exists - output: Detected potential PTRACE_TRACEME anti-debug attempt (proc_pcmdline=%proc.pcmdline evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, host, container, process, mitre_defense_evasion, T1622] - -- macro: private_aws_credentials - condition: > - (proc.args icontains "aws_access_key_id" or - proc.args icontains "aws_secret_access_key" or - proc.args icontains "aws_session_token" or - proc.args icontains "accesskeyid" or - proc.args icontains "secretaccesskey") - -- rule: Find AWS Credentials - desc: > - Detect attempts to search for private keys or passwords using the grep or find command, particularly targeting standard - AWS credential locations. This is often seen with unsophisticated attackers, as there are many ways to access files - using bash built-ins that could go unnoticed. Regardless, this serves as a solid baseline detection that can be tailored - to cover these gaps while maintaining an acceptable noise level. This rule complements the rule "Search Private Keys or Passwords". - condition: > - spawned_process - and ((grep_commands and private_aws_credentials) or - (proc.name = "find" and proc.args endswith ".aws/credentials")) - output: Detected AWS credentials search activity (proc_pcmdline=%proc.pcmdline proc_cwd=%proc.cwd group_gid=%group.gid group_name=%group.name user_loginname=%user.loginname evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, process, aws, mitre_credential_access, T1552] - -- rule: Execution from /dev/shm - desc: > - This rule detects file execution in the /dev/shm directory, a tactic often used by threat actors to store their readable, writable, and - occasionally executable files. /dev/shm acts as a link to the host or other containers, creating vulnerabilities for their compromise - as well. Notably, /dev/shm remains unchanged even after a container restart. Consider this rule alongside the newer - "Drop and execute new binary in container" rule. - condition: > - spawned_process - and (proc.exe startswith "/dev/shm/" or - (proc.cwd startswith "/dev/shm/" and proc.exe startswith "./" ) or - (shell_procs and proc.args startswith "-c /dev/shm") or - (shell_procs and proc.args startswith "-i /dev/shm") or - (shell_procs and proc.args startswith "/dev/shm") or - (proc.cwd startswith "/dev/shm/" and proc.args startswith "./" )) - and not container.image.repository in (falco_privileged_images, trusted_images) - output: File execution detected from /dev/shm (evt_res=%evt.res file=%fd.name proc_cwd=%proc.cwd proc_pcmdline=%proc.pcmdline user_loginname=%user.loginname group_gid=%group.gid group_name=%group.name evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, mitre_execution, T1059.004] - -# List of allowed container images that are known to execute binaries not part of their base image. -- list: known_drop_and_execute_containers - items: [] - -- rule: Drop and execute new binary in container - desc: > - Detect if an executable not belonging to the base image of a container is being executed. - The drop and execute pattern can be observed very often after an attacker gained an initial foothold. - is_exe_upper_layer filter field only applies for container runtimes that use overlayfs as union mount filesystem. - Adopters can utilize the provided template list known_drop_and_execute_containers containing allowed container - images known to execute binaries not included in their base image. Alternatively, you could exclude non-production - namespaces in Kubernetes settings by adjusting the rule further. This helps reduce noise by applying application - and environment-specific knowledge to this rule. Common anti-patterns include administrators or SREs performing - ad-hoc debugging. - condition: > - spawned_process - and container - and proc.is_exe_upper_layer=true - and not container.image.repository in (known_drop_and_execute_containers) - output: Executing binary not part of base image (proc_exe=%proc.exe proc_sname=%proc.sname gparent=%proc.aname[2] proc_exe_ino_ctime=%proc.exe_ino.ctime proc_exe_ino_mtime=%proc.exe_ino.mtime proc_exe_ino_ctime_duration_proc_start=%proc.exe_ino.ctime_duration_proc_start proc_cwd=%proc.cwd container_start_ts=%container.start_ts evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: CRITICAL - tags: [maturity_stable, container, process, mitre_persistence, TA0003, PCI_DSS_11.5.1] - -# RFC1918 addresses were assigned for private network usage -- list: rfc_1918_addresses - items: ['"10.0.0.0/8"', '"172.16.0.0/12"', '"192.168.0.0/16"'] - -- macro: outbound - condition: > - (((evt.type = connect and evt.dir=<) or - (evt.type in (sendto,sendmsg) and evt.dir=< and - fd.l4proto != tcp and fd.connected=false and fd.name_changed=true)) and - (fd.typechar = 4 or fd.typechar = 6) and - (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8" and not fd.snet in (rfc_1918_addresses)) and - (evt.rawres >= 0 or evt.res = EINPROGRESS)) - -- list: ssh_non_standard_ports - items: [80, 8080, 88, 443, 8443, 53, 4444] - -- macro: ssh_non_standard_ports_network - condition: (fd.sport in (ssh_non_standard_ports)) - -- rule: Disallowed SSH Connection Non Standard Port - desc: > - Detect any new outbound SSH connection from the host or container using a non-standard port. This rule holds the potential - to detect a family of reverse shells that cause the victim machine to connect back out over SSH, with STDIN piped from - the SSH connection to a shell's STDIN, and STDOUT of the shell piped back over SSH. Such an attack can be launched against - any app that is vulnerable to command injection. The upstream rule only covers a limited selection of non-standard ports. - We suggest adding more ports, potentially incorporating ranges based on your environment's knowledge and custom SSH port - configurations. This rule can complement the "Redirect STDOUT/STDIN to Network Connection in Container" or - "Disallowed SSH Connection" rule. - condition: > - outbound - and proc.exe endswith ssh - and fd.l4proto=tcp - and ssh_non_standard_ports_network - output: Disallowed SSH Connection (connection=%fd.name lport=%fd.lport rport=%fd.rport fd_type=%fd.type fd_proto=fd.l4proto evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, host, container, network, process, mitre_execution, T1059] - -- list: known_memfd_execution_binaries - items: [] - -- macro: known_memfd_execution_processes - condition: (proc.name in (known_memfd_execution_binaries)) - -- rule: Fileless execution via memfd_create - desc: > - Detect if a binary is executed from memory using the memfd_create technique. This is a well-known defense evasion - technique for executing malware on a victim machine without storing the payload on disk and to avoid leaving traces - about what has been executed. Adopters can whitelist processes that may use fileless execution for benign purposes - by adding items to the list known_memfd_execution_processes. - condition: > - spawned_process - and proc.is_exe_from_memfd=true - and not known_memfd_execution_processes - output: Fileless execution via memfd_create (container_start_ts=%container.start_ts proc_cwd=%proc.cwd evt_res=%evt.res proc_sname=%proc.sname gparent=%proc.aname[2] evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: CRITICAL - tags: [maturity_stable, host, container, process, mitre_defense_evasion, T1620] diff --git a/kubezero/falco/zdt_falco.yaml b/kubezero/falco/zdt_falco.yaml deleted file mode 100644 index 404b79f..0000000 --- a/kubezero/falco/zdt_falco.yaml +++ /dev/null @@ -1,1053 +0,0 @@ -# -# Copyright (C) 2023 The Falco Authors. -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -################ -# Config index # -################ - -# Here is an index of the configuration categories to help you navigate -# the Falco configuration file: -# -# (Falco command-line arguments) -# (Falco environment variables) -# Falco rules files -# rules_file -# Falco plugins -# load_plugins -# plugins -# Falco config files -# watch_config_files -# Falco outputs settings -# time_format_iso_8601 -# priority -# json_output -# json_include_output_property -# json_include_tags_property -# buffered_outputs -# outputs (throttling) -# Falco outputs channels -# stdout_output -# syslog_output -# file_output -# http_output -# program_output -# grpc_output -# Falco exposed services -# grpc -# webserver -# Falco logging / alerting / metrics related to software functioning (basic) -# log_stderr -# log_syslog -# log_level -# libs_logger -# Falco logging / alerting / metrics related to software functioning (advanced) -# output_timeout -# syscall_event_timeouts -# syscall_event_drops -# metrics -# Falco performance tuning (advanced) -# syscall_buf_size_preset -# syscall_drop_failed_exit -# base_syscalls -# modern_bpf.cpus_for_each_syscall_buffer -# Falco cloud orchestration systems integration -# metadata_download -# (Guidance for Kubernetes container engine command-line args settings) - - -################################ -# Falco command-line arguments # -################################ - -# To explore the latest command-line arguments supported by Falco for additional -# configuration, you can run `falco --help` in your terminal. You can also pass -# configuration options from this config file as command-line arguments by using -# the `-o` flag followed by the option name and value. In the following example, -# three config options (`json_output`, `log_level`, and -# `modern_bpf.cpus_for_each_syscall_buffer`) are passed as command-line -# arguments with their corresponding values: falco -o "json_output=true" -# -o "log_level=debug" -o "modern_bpf.cpus_for_each_syscall_buffer=4" -# Please note that command-line arguments take precedence over the options -# specified in this config file. - - -############################### -# Falco environment variables # -############################### - -# Customize Falco settings using environment variables: -# -# - "HOST_ROOT": Specifies the prefix to the underlying host `/proc` filesystem -# when deploying Falco over a container with read-only host mounts instead of -# directly on the host. Defaults to "/host". -# - "FALCO_BPF_PROBE": Specify a custom path to the BPF object code file (`bpf` -# driver). This is not needed for the modern_bpf driver. -# - "FALCO_HOSTNAME": Customize the hostname output field logged by Falco by -# setting the "FALCO_HOSTNAME" environment variable. -# - "FALCO_CGROUP_MEM_PATH": Specifies the file path holding the container -# memory usage metric for the `metrics` feature. Defaults to -# "/sys/fs/cgroup/memory/memory.usage_in_bytes" (Kubernetes). - - -##################### -# Falco rules files # -##################### - -# [Stable] `rules_file` -# -# Falco rules can be specified using files or directories, which are loaded at -# startup. The name "rules_file" is maintained for backwards compatibility. If -# the entry is a file, it will be read directly. If the entry is a directory, -# all files within that directory will be read in alphabetical order. -# -# The falco_rules.yaml file ships with the Falco package and is overridden with -# every new software version. falco_rules.local.yaml is only created if it -# doesn't already exist. -# -# To customize the set of rules, you can add your modifications to any file. -# It's important to note that the files or directories are read in the order -# specified here. In addition, rules are loaded by Falco in the order they -# appear within each rule file. -# -# If you have any customizations intended to override a previous configuration, -# make sure they appear in later files to take precedence. On the other hand, if -# the conditions of rules with the same event type(s) have the potential to -# overshadow each other, ensure that the more important rule appears first. This -# is because rules are evaluated on a "first match wins" basis, where the first -# rule that matches the conditions will be applied, and subsequent rules will -# not be evaluated for the same event type. -# -# By arranging the order of files and rules thoughtfully, you can ensure that -# desired customizations and rule behaviors are prioritized and applied as -# intended. -rules_file: - - /etc/falco/falco_rules.yaml - - /etc/falco/falco_rules.local.yaml - - /etc/falco/rules.d - - -################# -# Falco plugins # -################# - -# [Stable] `load_plugins` and `plugins` -# -# --- [Description] -# -# Falco plugins enable integration with other services in your ecosystem. -# They allow Falco to extend its functionality and leverage data sources such as -# Kubernetes audit logs or AWS CloudTrail logs. This enables Falco to perform -# fast on-host detections beyond syscalls and container events. The plugin -# system will continue to evolve with more specialized functionality in future -# releases. -# -# Please refer to the plugins repo at -# https://github.com/falcosecurity/plugins/blob/master/plugins/ for detailed -# documentation on the available plugins. This repository provides comprehensive -# information about each plugin and how to utilize them with Falco. -# -# Please note that if your intention is to enrich Falco syscall logs with fields -# such as `k8s.ns.name`, `k8s.pod.name`, and `k8s.pod.*`, you do not need to use -# the `k8saudit` plugin nor the `-k`/`-K` Kubernetes metadata enrichment. This -# information is automatically extracted from the container runtime socket. The -# `k8saudit` plugin is specifically designed to integrate with Kubernetes audit -# logs and is not required for basic enrichment of syscall logs with -# Kubernetes-related fields. -# -# --- [Usage] -# -# Disabled by default, indicated by an empty `load_plugins` list. Each plugin meant -# to be enabled needs to be listed as explicit list item. -# -# For example, if you want to use the `k8saudit` plugin, -# ensure it is configured appropriately and then change this to: -# load_plugins: [k8saudit, json] -load_plugins: [] - -# Customize subsettings for each enabled plugin. These settings will only be -# applied when the corresponding plugin is enabled using the `load_plugins` -# option. -plugins: - - name: k8saudit - library_path: libk8saudit.so - init_config: - # maxEventSize: 262144 - # webhookMaxBatchSize: 12582912 - # sslCertificate: /etc/falco/falco.pem - open_params: "http://:9765/k8s-audit" - - name: cloudtrail - library_path: libcloudtrail.so - # see docs for init_config and open_params: - # https://github.com/falcosecurity/plugins/blob/master/plugins/cloudtrail/README.md - - name: json - library_path: libjson.so - - -###################### -# Falco config files # -###################### - -# [Stable] `watch_config_files` -# -# Falco monitors configuration and rule files for changes and automatically -# reloads itself to apply the updated configuration when any modifications are -# detected. This feature is particularly useful when you want to make real-time -# changes to the configuration or rules of Falco without interrupting its -# operation or losing its state. For more information about Falco's state -# engine, please refer to the `base_syscalls` section. -watch_config_files: true - - -########################## -# Falco outputs settings # -########################## - -# [Stable] `time_format_iso_8601` -# -# When enabled, Falco will display log and output messages with times in the ISO -# 8601 format. By default, times are shown in the local time zone determined by -# the /etc/localtime configuration. -time_format_iso_8601: false - -# [Stable] `priority` -# -# Any rule with a priority level more severe than or equal to the specified -# minimum level will be loaded and run by Falco. This allows you to filter and -# control the rules based on their severity, ensuring that only rules of a -# certain priority or higher are active and evaluated by Falco. Supported -# levels: "emergency", "alert", "critical", "error", "warning", "notice", -# "info", "debug" -priority: debug - -# [Stable] `json_output` -# -# When enabled, Falco will output alert messages and rules file -# loading/validation results in JSON format, making it easier for downstream -# programs to process and consume the data. By default, this option is disabled. -json_output: true - -# [Stable] `json_include_output_property` -# -# When using JSON output in Falco, you have the option to include the "output" -# property itself in the generated JSON output. The "output" property provides -# additional information about the purpose of the rule. To reduce the logging -# volume, it is recommended to turn it off if it's not necessary for your use -# case. -json_include_output_property: true - -# [Stable] `json_include_tags_property` -# -# When using JSON output in Falco, you have the option to include the "tags" -# field of the rules in the generated JSON output. The "tags" field provides -# additional metadata associated with the rule. To reduce the logging volume, -# if the tags associated with the rule are not needed for your use case or can -# be added at a later stage, it is recommended to turn it off. -json_include_tags_property: true - -# [Stable] `buffered_outputs` -# -# Enabling buffering for the output queue can offer performance optimization, -# efficient resource usage, and smoother data flow, resulting in a more reliable -# output mechanism. By default, buffering is disabled (false). -buffered_outputs: true - -# [Stable] `outputs` -# -# A throttling mechanism, implemented as a token bucket, can be used to control -# the rate of Falco outputs. Each event source has its own rate limiter, -# ensuring that alerts from one source do not affect the throttling of others. -# The following options control the mechanism: -# - rate: the number of tokens (i.e. right to send a notification) gained per -# second. When 0, the throttling mechanism is disabled. Defaults to 0. -# - max_burst: the maximum number of tokens outstanding. Defaults to 1000. -# -# For example, setting the rate to 1 allows Falco to send up to 1000 -# notifications initially, followed by 1 notification per second. The burst -# capacity is fully restored after 1000 seconds of no activity. -# -# Throttling can be useful in various scenarios, such as preventing notification -# floods, managing system load, controlling event processing, or complying with -# rate limits imposed by external systems or APIs. It allows for better resource -# utilization, avoids overwhelming downstream systems, and helps maintain a -# balanced and controlled flow of notifications. -# -# With the default settings, the throttling mechanism is disabled. -outputs: - rate: 0 - max_burst: 1000 - - -########################## -# Falco outputs channels # -########################## - -# Falco supports various output channels, such as syslog, stdout, file, gRPC, -# webhook, and more. You can enable or disable these channels as needed to -# control where Falco alerts and log messages are directed. This flexibility -# allows seamless integration with your preferred logging and alerting systems. -# Multiple outputs can be enabled simultaneously. - -# [Stable] `stdout_output` -# -# Redirect logs to standard output. -stdout_output: - enabled: true - -# [Stable] `syslog_output` -# -# Send logs to syslog. -syslog_output: - enabled: true - -# [Stable] `file_output` -# -# When appending Falco alerts to a file, each new alert will be added to a new -# line. It's important to note that Falco does not perform log rotation for this -# file. If the `keep_alive` option is set to `true`, the file will be opened once -# and continuously written to, else the file will be reopened for each output -# message. Furthermore, the file will be closed and reopened if Falco receives -# the SIGUSR1 signal. -file_output: - enabled: false - keep_alive: false - filename: ./events.txt - -# [Stable] `http_output` -# -# Send logs to an HTTP endpoint or webhook. -# -# When using falcosidekick, it is necessary to set `json_output` to true, which is -# conveniently done automatically for you when using `falcosidekick.enabled=true`. -http_output: - enabled: false - url: http://some.url - user_agent: "falcosecurity/falco" - # Tell Falco to not verify the remote server. - insecure: false - # Path to the CA certificate that can verify the remote server. - ca_cert: "" - # Path to a specific file that will be used as the CA certificate store. - ca_bundle: "" - # Path to a folder that will be used as the CA certificate store. CA certificate need to be - # stored as indivitual PEM files in this directory. - ca_path: "/etc/ssl/certs" - -# [Stable] `program_output` -# -# Redirect the output to another program or command. -# -# Possible additional things you might want to do with program output: -# - send to a slack webhook: -# program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX" -# - logging (alternate method than syslog): -# program: logger -t falco-test -# - send over a network connection: -# program: nc host.example.com 80 -# If `keep_alive` is set to `true`, the program will be started once and -# continuously written to, with each output message on its own line. If -# `keep_alive` is set to `false`, the program will be re-spawned for each output -# message. Furthermore, the program will be re-spawned if Falco receives -# the SIGUSR1 signal. -program_output: - enabled: false - keep_alive: false - program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX" - -# [Stable] `grpc_output` -# -# Use gRPC as an output service. -# -# gRPC is a modern and high-performance framework for remote procedure calls -# (RPC). It utilizes protocol buffers for efficient data serialization. The gRPC -# output in Falco provides a modern and efficient way to integrate with other -# systems. By default the setting is turned off. Enabling this option stores -# output events in memory until they are consumed by a gRPC client. Ensure that -# you have a consumer for the output events or leave it disabled. -grpc_output: - enabled: false - - -########################## -# Falco exposed services # -########################## - -# [Stable] `grpc` -# -# Falco provides support for running a gRPC server using two main binding types: -# 1. Over the network with mandatory mutual TLS authentication (mTLS), which -# ensures secure communication -# 2. Local Unix socket binding with no authentication. By default, the -# gRPCserver in Falco is turned off with no enabled services (see -# `grpc_output`setting). -# -# To configure the gRPC server in Falco, you can make the following changes to -# the options: -# -# - Uncomment the relevant configuration options related to the gRPC server. -# - Update the paths of the generated certificates for mutual TLS authentication -# if you choose to use mTLS. -# - Specify the address to bind and expose the gRPC server. -# - Adjust the threadiness configuration to control the number of threads and -# contexts used by the server. -# -# Keep in mind that if any issues arise while creating the gRPC server, the -# information will be logged, but it will not stop the main Falco daemon. - -# gRPC server using mTLS -# grpc: -# enabled: true -# bind_address: "0.0.0.0:5060" -# # When the `threadiness` value is set to 0, Falco will automatically determine -# # the appropriate number of threads based on the number of online cores in the system. -# threadiness: 0 -# private_key: "/etc/falco/certs/server.key" -# cert_chain: "/etc/falco/certs/server.crt" -# root_certs: "/etc/falco/certs/ca.crt" - -# gRPC server using a local unix socket -grpc: - enabled: false - bind_address: "unix:///run/falco/falco.sock" - # When the `threadiness` value is set to 0, Falco will automatically determine - # the appropriate number of threads based on the number of online cores in the system. - threadiness: 0 - -# [Stable] `webserver` -# -# Falco supports an embedded webserver that runs within the Falco process, -# providing a lightweight and efficient way to expose web-based functionalities -# without the need for an external web server. The following endpoints are -# exposed: -# - /healthz: designed to be used for checking the health and availability of -# the Falco application (the name of the endpoint is configurable). -# - /versions: responds with a JSON object containing the version numbers of the -# internal Falco components (similar output as `falco --version -o -# json_output=true`). -# -# Please note that the /versions endpoint is particularly useful for other Falco -# services, such as `falcoctl`, to retrieve information about a running Falco -# instance. If you plan to use `falcoctl` locally or with Kubernetes, make sure -# the Falco webserver is enabled. -# -# The behavior of the webserver can be controlled with the following options, -# which are enabled by default: -# -# The `ssl_certificate` option specifies a combined SSL certificate and -# corresponding key that are contained in a single file. You can generate a -# key/cert as follows: -# -# $ openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 365 -out -# certificate.pem $ cat certificate.pem key.pem > falco.pem $ sudo cp falco.pem -# /etc/falco/falco.pem -webserver: - enabled: true - # When the `threadiness` value is set to 0, Falco will automatically determine - # the appropriate number of threads based on the number of online cores in the system. - threadiness: 0 - listen_port: 8765 - k8s_healthz_endpoint: /healthz - ssl_enabled: false - ssl_certificate: /etc/falco/falco.pem - - -############################################################################## -# Falco logging / alerting / metrics related to software functioning (basic) # -############################################################################## - -# [Stable] `log_stderr` and `log_syslog` -# -# Falco's logs related to the functioning of the software, which are not related -# to Falco alert outputs but rather its lifecycle, settings and potential -# errors, can be directed to stderr and/or syslog. -log_stderr: true -log_syslog: true - -# [Stable] `log_level` -# -# The `log_level` setting determines the minimum log level to include in Falco's -# logs related to the functioning of the software. This setting is separate from -# the `priority` field of rules and specifically controls the log level of -# Falco's operational logging. By specifying a log level, you can control the -# verbosity of Falco's operational logs. Only logs of a certain severity level -# or higher will be emitted. Supported levels: "emergency", "alert", "critical", -# "error", "warning", "notice", "info", "debug". -log_level: info - -# [Stable] `libs_logger` -# -# The `libs_logger` setting in Falco determines the minimum log level to include -# in the logs related to the functioning of the software of the underlying -# `libs` library, which Falco utilizes. This setting is independent of the -# `priority` field of rules and the `log_level` setting that controls Falco's -# operational logs. It allows you to specify the desired log level for the `libs` -# library specifically, providing more granular control over the logging -# behavior of the underlying components used by Falco. Only logs of a certain -# severity level or higher will be emitted. Supported levels: "emergency", -# "alert", "critical", "error", "warning", "notice", "info", "debug". It is not -# recommended for production use. -libs_logger: - enabled: false - severity: debug - - -################################################################################# -# Falco logging / alerting / metrics related to software functioning (advanced) # -################################################################################# - -# [Stable] `output_timeout` -# -# Generates Falco operational logs when `log_level=notice` at minimum -# -# A timeout error occurs when a process or operation takes longer to complete -# than the allowed or expected time limit. In the context of Falco, an output -# timeout error refers to the situation where an output channel fails to deliver -# an alert within a specified deadline. Various reasons, such as network issues, -# resource constraints, or performance bottlenecks can cause timeouts. -# -# The `output_timeout` parameter specifies the duration, in milliseconds, to -# wait before considering the deadline exceeded. By default, the timeout is set -# to 2000ms (2 seconds), meaning that the consumer of Falco outputs can block -# the Falco output channel for up to 2 seconds without triggering a timeout -# error. -# -# Falco actively monitors the performance of output channels. With this setting -# the timeout error can be logged, but please note that this requires setting -# Falco's operational logs `log_level` to a minimum of `notice`. -# -# It's important to note that Falco outputs will not be discarded from the -# output queue. This means that if an output channel becomes blocked -# indefinitely, it indicates a potential issue that needs to be addressed by the -# user. -output_timeout: 2000 - -# [Stable] `syscall_event_timeouts` -# -# Generates Falco operational logs when `log_level=notice` at minimum -# -# Falco utilizes a shared buffer between the kernel and userspace to receive -# events, such as system call information, in userspace. However, there may be -# cases where timeouts occur in the underlying libraries due to issues in -# reading events or the need to skip a particular event. While it is uncommon -# for Falco to experience consecutive event timeouts, it has the capability to -# detect such situations. You can configure the maximum number of consecutive -# timeouts without an event after which Falco will generate an alert, but please -# note that this requires setting Falco's operational logs `log_level` to a -# minimum of `notice`. The default value is set to 1000 consecutive timeouts -# without receiving any events. The mapping of this value to a time interval -# depends on the CPU frequency. -syscall_event_timeouts: - max_consecutives: 1000 - -# [Stable] `syscall_event_drops` -# -# Generates "Falco internal: syscall event drop" rule output when `priority=debug` at minimum -# -# --- [Description] -# -# Falco uses a shared buffer between the kernel and userspace to pass system -# call information. When Falco detects that this buffer is full and system calls -# have been dropped, it can take one or more of the following actions: -# - ignore: do nothing (default when list of actions is empty) -# - log: log a DEBUG message noting that the buffer was full -# - alert: emit a Falco alert noting that the buffer was full -# - exit: exit Falco with a non-zero rc -# -# Notice it is not possible to ignore and log/alert messages at the same time. -# -# The rate at which log/alert messages are emitted is governed by a token -# bucket. The rate corresponds to one message every 30 seconds with a burst of -# one message (by default). -# -# The messages are emitted when the percentage of dropped system calls with -# respect the number of events in the last second is greater than the given -# threshold (a double in the range [0, 1]). If you want to be alerted on any -# drops, set the threshold to 0. -# -# For debugging/testing it is possible to simulate the drops using the -# `simulate_drops: true`. In this case the threshold does not apply. -# -# --- [Usage] -# -# Enabled by default, but requires Falco rules config `priority` set to `debug`. -# Emits a Falco rule named "Falco internal: syscall event drop" as many times in -# a given time period as dictated by the settings. Statistics here reflect the -# delta in a 1s time period. -# -# If instead you prefer periodic metrics of monotonic counters at a regular -# interval, which include syscall drop statistics and additional metrics, -# explore the `metrics` configuration option. -syscall_event_drops: - threshold: .1 - actions: - - log - - alert - rate: .03333 - max_burst: 1 - simulate_drops: false - -# [Experimental] `metrics` -# -# Generates "Falco internal: metrics snapshot" rule output when `priority=info` at minimum -# By selecting `output_file`, equivalent JSON output will be appended to a file. -# -# periodic metric snapshots (including stats and resource utilization) captured -# at regular intervals -# -# --- [Description] -# -# Consider these key points about the `metrics` feature in Falco: -# -# - It introduces a redesigned stats/metrics system. -# - Native support for resource utilization metrics and specialized performance -# metrics. -# - Metrics are emitted as monotonic counters at predefined intervals -# (snapshots). -# - All metrics are consolidated into a single log message, adhering to the -# established rules schema and naming conventions. -# - Additional info fields complement the metrics and facilitate customized -# statistical analyses and correlations. -# - The metrics framework is designed for easy future extension. -# -# The `metrics` feature follows a specific schema and field naming convention. -# All metrics are collected as subfields under the `output_fields` key, similar -# to regular Falco rules. Each metric field name adheres to the grammar used in -# Falco rules. There are two new field classes introduced: `falco.` and `scap.`. -# The `falco.` class represents userspace counters, statistics, resource -# utilization, or useful information fields. The `scap.` class represents -# counters and statistics mostly obtained from Falco's kernel instrumentation -# before events are sent to userspace, but can include scap userspace stats as -# well. -# -# It's important to note that the output fields and their names can be subject -# to change until the metrics feature reaches a stable release. -# In addition, the majority of fields represent an instant snapshot, with the -# exception of event rates per second and drop percentage stats. These values -# are computed based on the delta between two snapshots. -# -# To customize the hostname in Falco, you can set the environment variable -# `FALCO_HOSTNAME` to your desired hostname. This is particularly useful in -# Kubernetes deployments where the hostname can be set to the pod name. -# -# --- [Usage] -# -# `enabled`: Disabled by default. -# -# `interval`: The stats interval in Falco follows the time duration definitions -# used by Prometheus. -# https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations -# -# Time durations are specified as a number, followed immediately by one of the -# following units: -# -# ms - millisecond -# s - second -# m - minute -# h - hour -# d - day - assuming a day has always 24h -# w - week - assuming a week has always 7d -# y - year - assuming a year has always 365d -# -# Example of a valid time duration: 1h30m20s10ms -# -# A minimum interval of 100ms is enforced for metric collection. However, for -# production environments, we recommend selecting one of the following intervals -# for optimal monitoring: -# -# 15m -# 30m -# 1h -# 4h -# 6h -# -# `output_rule`: To enable seamless metrics and performance monitoring, we -# recommend emitting metrics as the rule "Falco internal: metrics snapshot". -# This option is particularly useful when Falco logs are preserved in a data -# lake. Please note that to use this option, the Falco rules config `priority` -# must be set to `info` at a minimum. -# -# `output_file`: Append stats to a `jsonl` file. Use with caution in production -# as Falco does not automatically rotate the file. It can be used in combination -# with `output_rule`. -# -# `resource_utilization_enabled`: Emit CPU and memory usage metrics. CPU usage -# is reported as a percentage of one CPU and can be normalized to the total -# number of CPUs to determine overall usage. Memory metrics are provided in raw -# units (`kb` for `RSS`, `PSS` and `VSZ` or `bytes` for `container_memory_used`) -# and can be uniformly converted to megabytes (MB) using the -# `convert_memory_to_mb` functionality. In environments such as Kubernetes, it -# is crucial to track Falco's container memory usage. To customize the path of -# the memory metric file, you can create an environment variable named -# `FALCO_CGROUP_MEM_PATH` and set it to the desired file path. By default, Falco -# uses the file `/sys/fs/cgroup/memory/memory.usage_in_bytes` to monitor -# container memory usage, which aligns with Kubernetes' -# `container_memory_working_set_bytes` metric. -# -# `kernel_event_counters_enabled`: Emit kernel side event and drop counters, as -# an alternative to `syscall_event_drops`, but with some differences. These -# counters reflect monotonic values since Falco's start and are exported at a -# constant stats interval. -# -# `libbpf_stats_enabled`: Exposes statistics similar to `bpftool prog show`, -# providing information such as the number of invocations of each BPF program -# attached by Falco and the time spent in each program measured in nanoseconds. -# To enable this feature, the kernel must be >= 5.1, and the kernel -# configuration `/proc/sys/kernel/bpf_stats_enabled` must be set. This option, -# or an equivalent statistics feature, is not available for non `*bpf*` drivers. -# Additionally, please be aware that the current implementation of `libbpf` does -# not support granularity of statistics at the bpf tail call level. -# -# `include_empty_values`: When the option is set to true, fields with an empty -# numeric value will be included in the output. However, this rule does not -# apply to high-level fields such as `n_evts` or `n_drops`; they will always be -# included in the output even if their value is empty. This option can be -# beneficial for exploring the data schema and ensuring that fields with empty -# values are included in the output. -# -# todo: prometheus export option -# todo: syscall_counters_enabled option -metrics: - enabled: false - interval: 1h - output_rule: true - # output_file: /tmp/falco_stats.jsonl - resource_utilization_enabled: true - kernel_event_counters_enabled: true - libbpf_stats_enabled: true - convert_memory_to_mb: true - include_empty_values: false - - -####################################### -# Falco performance tuning (advanced) # -####################################### - -# [Stable] `syscall_buf_size_preset` -# -# --- [Description] -# -# The syscall buffer index determines the size of the shared space between Falco -# and its drivers. This shared space serves as a temporary storage for syscall -# events, allowing them to be transferred from the kernel to the userspace -# efficiently. The buffer size for each online CPU is determined by the buffer -# index, and each CPU has its own dedicated buffer. Adjusting this index allows -# you to control the overall size of the syscall buffers. -# -# --- [Usage] -# -# The index 0 is reserved, and each subsequent index corresponds to an -# increasing size in bytes. For example, index 1 corresponds to a size of 1 MB, -# index 2 corresponds to 2 MB, and so on: -# -# [(*), 1 MB, 2 MB, 4 MB, 8 MB, 16 MB, 32 MB, 64 MB, 128 MB, 256 MB, 512 MB] -# ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ -# | | | | | | | | | | | -# 0 1 2 3 4 5 6 7 8 9 10 -# -# -# The buffer dimensions in bytes are determined by the following requirements: -# (1) a power of 2. -# (2) a multiple of your system_page_dimension. -# (3) greater than `2 * (system_page_dimension). -# -# The buffer size constraints may limit the usability of certain indexes. Let's -# consider an example to illustrate this: -# -# If your system has a page size of 1 MB, the first available buffer size would -# be 4 MB because 2 MB is exactly equal to 2 * (system_page_size), which is not -# sufficient as we require more than 2 * (system_page_size). In this example, it -# is evident that if the page size is 1 MB, the first index that can be used is 3. -# -# However, in most cases, these constraints do not pose a limitation, and all -# indexes from 1 to 10 can be used. You can check your system's page size using -# the Falco `--page-size` command-line option. -# -# --- [Suggestions] -# -# The buffer size was previously fixed at 8 MB (index 4). You now have the -# option to adjust the size based on your needs. Increasing the size, such as to -# 16 MB (index 5), can reduce syscall drops in heavy production systems, but may -# impact performance. Decreasing the size can speed up the system but may -# increase syscall drops. It's important to note that the buffer size is mapped -# twice in the process' virtual memory, so a buffer of 8 MB will result in a 16 -# MB area in virtual memory. Use this parameter with caution and only modify it -# if the default size is not suitable for your use case. -syscall_buf_size_preset: 4 - -# [Experimental] `syscall_drop_failed_exit` -# -# Enabling this option in Falco allows it to drop failed system call exit events -# in the kernel driver before pushing them onto the ring buffer. This -# optimization can result in lower CPU usage and more efficient utilization of -# the ring buffer, potentially reducing the number of event losses. However, it -# is important to note that enabling this option also means sacrificing some -# visibility into the system. -syscall_drop_failed_exit: false - -# [Experimental] `base_syscalls`, use with caution, read carefully -# -# --- [Description] -# -# This option configures the set of syscalls that Falco traces. -# -# --- [Falco's State Engine] -# -# Falco requires a set of syscalls to build up state in userspace. For example, -# when spawning a new process or network connection, multiple syscalls are -# involved. Furthermore, properties of a process during its lifetime can be -# modified by syscalls. Falco accounts for this by enabling the collection of -# additional syscalls than the ones defined in the rules and by managing a smart -# process cache table in userspace. Processes are purged from this table when a -# process exits. -# -# By default, with -# ``` -# base_syscalls.custom_set = [] -# base_syscalls.repair = false -# ``` -# Falco enables tracing for a syscall set gathered: (1) from (enabled) Falco -# rules (2) from a static, more verbose set defined in -# `libsinsp::events::sinsp_state_sc_set` in -# libs/userspace/libsinsp/events/sinsp_events_ppm_sc.cpp This allows Falco to -# successfully build up it's state engine and life-cycle management. -# -# If the default behavior described above does not fit the user's use case for -# Falco, the `base_syscalls` option allows for finer end-user control of -# syscalls traced by Falco. -# -# --- [base_syscalls.custom_set] -# -# CAUTION: Misconfiguration of this setting may result in incomplete Falco event -# logs or Falco being unable to trace events entirely. -# -# `base_syscalls.custom_set` allows the user to explicitly define an additional -# set of syscalls to be traced in addition to the syscalls from each enabled -# Falco rule. -# -# This is useful in lowering CPU utilization and further tailoring Falco to -# specific environments according to your threat model and budget constraints. -# -# --- [base_syscalls.repair] -# -# `base_syscalls.repair` is an alternative to Falco's default state engine -# enforcement. When enabled, this option is designed to (1) ensure that Falco's -# state engine is correctly and successfully built-up (2) be the most system -# resource-friendly by activating the least number of additional syscalls -# (outside of those enabled for enabled rules) -# -# Setting `base_syscalls.repair` to `true` allows Falco to automatically -# configure what is described in the [Suggestions] section below. -# -# `base_syscalls.repair` can be enabled with an empty custom set, meaning with -# the following, -# ``` -# base_syscalls.custom_set = [] -# base_syscalls.repair = true -# ``` -# Falco enables tracing for a syscall set gathered: (1) from (enabled) Falco -# rules (2) from minimal set of additional syscalls needed to "repair" the -# state engine and properly log event conditions specified in enabled Falco -# rules -# -# --- [Usage] -# -# List of system calls names (), negative ("!") -# notation supported. -# -# Example: base_syscalls.custom_set: [, , -# "!"] base_syscalls.repair: -# -# We recommend to only exclude syscalls, e.g. "!mprotect" if you need a fast -# deployment update (overriding rules), else remove unwanted syscalls from the -# Falco rules. -# -# Passing `-o "log_level=debug" -o "log_stderr=true" --dry-run` to Falco's cmd -# args will print the final set of syscalls to STDOUT. -# -# --- [Suggestions] -# -# NOTE: setting `base_syscalls.repair: true` automates the following suggestions -# for you. -# -# These suggestions are subject to change as Falco and its state engine evolve. -# -# For execve* events: Some Falco fields for an execve* syscall are retrieved -# from the associated `clone`, `clone3`, `fork`, `vfork` syscalls when spawning -# a new process. The `close` syscall is used to purge file descriptors from -# Falco's internal thread / process cache table and is necessary for rules -# relating to file descriptors (e.g. open, openat, openat2, socket, connect, -# accept, accept4 ... and many more) -# -# Consider enabling the following syscalls in `base_syscalls.custom_set` for -# process rules: [clone, clone3, fork, vfork, execve, execveat, close] -# -# For networking related events: While you can log `connect` or `accept*` -# syscalls without the socket syscall, the log will not contain the ip tuples. -# Additionally, for `listen` and `accept*` syscalls, the `bind` syscall is also -# necessary. -# -# We recommend the following as the minimum set for networking-related rules: -# [clone, clone3, fork, vfork, execve, execveat, close, socket, bind, -# getsockopt] -# -# Lastly, for tracking the correct `uid`, `gid` or `sid`, `pgid` of a process -# when the running process opens a file or makes a network connection, consider -# adding the following to the above recommended syscall sets: ... setresuid, -# setsid, setuid, setgid, setpgid, setresgid, setsid, capset, chdir, chroot, -# fchdir ... -base_syscalls: - custom_set: [] - repair: false - -# [Stable] `modern_bpf.cpus_for_each_syscall_buffer`, modern_bpf only -# -# --- [Description] -# -# The modern_bpf driver in Falco utilizes the new BPF ring buffer, which has a -# different memory footprint compared to the current BPF driver that uses the -# perf buffer. The Falco core maintainers have discussed the differences and -# their implications, particularly in Kubernetes environments where limits need -# to be carefully set to avoid interference with the Falco daemonset deployment -# from the OOM killer. Based on guidance received from the kernel mailing list, -# it is recommended to assign multiple CPUs to one buffer instead of allocating -# a buffer for each CPU individually. This helps optimize resource allocation -# and prevent potential issues related to memory usage. -# -# This is an index that controls how many CPUs you want to assign to a single -# syscall buffer (ring buffer). By default, for modern_bpf every syscall buffer -# is associated to 2 CPUs, so the mapping is 1:2. The modern BPF probe allows -# you to choose different mappings, for example, changing the value to `1` -# results in a 1:1 mapping and would mean one syscall buffer for each CPU (this -# is the default for the `bpf` driver). -# -# --- [Usage] -# -# You can choose an index from 0 to MAX_NUMBER_ONLINE_CPUs to set the dimension -# of the syscall buffers. The value 0 represents a single buffer shared among -# all online CPUs. It serves as a flexible option when the exact number of -# online CPUs is unknown. Here's an example to illustrate this: -# -# Consider a system with 7 online CPUs: -# -# CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) -# -# - `1` means a syscall buffer for each CPU so 7 buffers -# -# CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) -# | | | | | | | -# BUFFERs 0 1 2 3 4 5 6 -# -# - `2` (Default value) means a syscall buffer for each CPU pair, so 4 buffers -# -# CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) -# | | | | | | | -# BUFFERs 0 0 1 1 2 2 3 -# -# Please note that in this example, there are 4 buffers in total. Three of the -# buffers are associated with pairs of CPUs, while the last buffer is mapped to -# a single CPU. This arrangement is necessary because we have an odd number of -# CPUs. -# -# - `0` or `MAX_NUMBER_ONLINE_CPUs` mean a syscall buffer shared between all -# CPUs, so 1 buffer -# -# CPUs 0 X 2 3 X X 6 7 8 9 (X means offline CPU) -# | | | | | | | -# BUFFERs 0 0 0 0 0 0 0 -# -# Moreover, you have the option to combine this parameter with -# `syscall_buf_size_preset` index. For instance, you can create a large shared -# syscall buffer of 512 MB (using syscall_buf_size_preset=10) that is -# allocated among all the online CPUs. -# -# --- [Suggestions] -# -# The default choice of index 2 (one syscall buffer for each CPU pair) was made -# because the modern bpf probe utilizes a different memory allocation strategy -# compared to the other two drivers (bpf and kernel module). However, you have -# the flexibility to experiment and find the optimal configuration for your -# system. -# -# When considering a fixed syscall_buf_size_preset and a fixed buffer dimension: -# - Increasing this configs value results in lower number of buffers and you can -# speed up your system and reduce memory usage -# - However, using too few buffers may increase contention in the kernel, -# leading to a slowdown. -# -# If you have low event throughputs and minimal drops, reducing the number of -# buffers (higher `cpus_for_each_syscall_buffer`) can lower the memory footprint. -modern_bpf: - cpus_for_each_syscall_buffer: 2 - - -################################################# -# Falco cloud orchestration systems integration # -################################################# - -# [Stable] `metadata_download` -# -# When connected to an orchestrator like Kubernetes, Falco has the capability to -# collect metadata and enrich system call events with contextual data. The -# parameters mentioned here control the downloading process of this metadata. -# -# Please note that support for Mesos is deprecated, so these parameters -# currently apply only to Kubernetes. When using Falco with Kubernetes, you can -# enable this functionality by using the `-k` or `-K` command-line flag. -# -# However, it's worth mentioning that for important Kubernetes metadata fields -# such as namespace or pod name, these fields are automatically extracted from -# the container runtime, providing the necessary enrichment for common use cases -# of syscall-based threat detection. -# -# In summary, the `-k` flag is typically not required for most scenarios involving -# Kubernetes workload owner enrichment. The `-k` flag is primarily used when -# additional metadata is required beyond the standard fields, catering to more -# specific use cases, see https://falco.org/docs/reference/rules/supported-fields/#field-class-k8s. -metadata_download: - max_mb: 100 - chunk_wait_us: 1000 - watch_freq_sec: 1 - -# [Stable] Guidance for Kubernetes container engine command-line args settings -# -# Modern cloud environments, particularly Kubernetes, heavily rely on -# containerized workload deployments. When capturing events with Falco, it -# becomes essential to identify the owner of the workload for which events are -# being captured, such as syscall events. Falco integrates with the container -# runtime to enrich its events with container information, including fields like -# `container.image.repository`, `container.image.tag`, ... , `k8s.ns.name`, -# `k8s.pod.name`, `k8s.pod.*` in the Falco output (Falco retrieves Kubernetes -# namespace and pod name directly from the container runtime, see -# https://falco.org/docs/reference/rules/supported-fields/#field-class-container). -# -# Furthermore, Falco exposes container events themselves as a data source for -# alerting. To achieve this integration with the container runtime, Falco -# requires access to the runtime socket. By default, for Kubernetes, Falco -# attempts to connect to the following sockets: -# "/run/containerd/containerd.sock", "/run/crio/crio.sock", -# "/run/k3s/containerd/containerd.sock". If you have a custom path, you can use -# the `--cri` option to specify the correct location. -# -# In some cases, you may encounter empty fields for container metadata. To -# address this, you can explore the `--disable-cri-async` option, which disables -# asynchronous fetching if the fetch operation is not completing quickly enough. -# -# To get more information on these command-line arguments, you can run `falco -# --help` in your terminal to view their current descriptions. -# -# !!! The options mentioned here are not available in the falco.yaml -# configuration file. Instead, they can can be used as a command-line argument -# when running the Falco command. diff --git a/kubezero/falco/zdt_falco_rules.yaml b/kubezero/falco/zdt_falco_rules.yaml deleted file mode 100644 index 350693c..0000000 --- a/kubezero/falco/zdt_falco_rules.yaml +++ /dev/null @@ -1,1251 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# -# Copyright (C) 2023 The Falco Authors. -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Information about rules tags and fields can be found here: https://falco.org/docs/rules/#tags-for-current-falco-ruleset -# The initial item in the `tags` fields reflects the maturity level of the rules introduced upon the proposal https://github.com/falcosecurity/rules/blob/main/proposals/20230605-rules-adoption-management-maturity-framework.md -# `tags` fields also include information about the type of workload inspection (host and/or container), and Mitre Attack killchain phases and Mitre TTP code(s) -# Mitre Attack References: -# [1] https://attack.mitre.org/tactics/enterprise/ -# [2] https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json - -# Starting with version 8, the Falco engine supports exceptions. -# However the Falco rules file does not use them by default. -- required_engine_version: 0.26.0 - -# Currently disabled as read/write are ignored syscalls. The nearly -# similar open_write/open_read check for files being opened for -# reading/writing. -# - macro: write -# condition: (syscall.type=write and fd.type in (file, directory)) -# - macro: read -# condition: (syscall.type=read and evt.dir=> and fd.type in (file, directory)) - -- macro: open_write - condition: (evt.type in (open,openat,openat2) and evt.is_open_write=true and fd.typechar='f' and fd.num>=0) - -- macro: open_read - condition: (evt.type in (open,openat,openat2) and evt.is_open_read=true and fd.typechar='f' and fd.num>=0) - -# Failed file open attempts, useful to detect threat actors making mistakes -# https://man7.org/linux/man-pages/man3/errno.3.html -# evt.res=ENOENT - No such file or directory -# evt.res=EACCESS - Permission denied -- macro: open_file_failed - condition: (evt.type in (open,openat,openat2) and fd.typechar='f' and fd.num=-1 and evt.res startswith E) - -# This macro `never_true` is used as placeholder for tuning negative logical sub-expressions, for example -# - macro: allowed_ssh_hosts -# condition: (never_true) -# can be used in a rules' expression with double negation `and not allowed_ssh_hosts` which effectively evaluates -# to true and does nothing, the perfect empty template for `logical` cases as opposed to list templates. -# When tuning the rule you can override the macro with something useful, e.g. -# - macro: allowed_ssh_hosts -# condition: (evt.hostname contains xyz) -- macro: never_true - condition: (evt.num=0) - -# This macro `always_true` is the flip side of the macro `never_true` and currently is commented out as -# it is not used. You can use it as placeholder for a positive logical sub-expression tuning template -# macro, e.g. `and custom_procs`, where -# - macro: custom_procs -# condition: (always_true) -# later you can customize, override the macros to something like -# - macro: custom_procs -# condition: (proc.name in (custom1, custom2, custom3)) -# - macro: always_true -# condition: (evt.num>=0) - -# In some cases, such as dropped system call events, information about -# the process name may be missing. For some rules that really depend -# on the identity of the process performing an action such as opening -# a file, etc., we require that the process name be known. -- macro: proc_name_exists - condition: (proc.name!="") - -- macro: spawned_process - condition: (evt.type in (execve, execveat) and evt.dir=<) - -- macro: create_symlink - condition: (evt.type in (symlink, symlinkat) and evt.dir=<) - -- macro: create_hardlink - condition: (evt.type in (link, linkat) and evt.dir=<) - -- macro: kernel_module_load - condition: (evt.type in (init_module, finit_module) and evt.dir=<) - -- macro: dup - condition: (evt.type in (dup, dup2, dup3)) - -# File categories -- macro: etc_dir - condition: (fd.name startswith /etc/) - -- list: shell_binaries - items: [ash, bash, csh, ksh, sh, tcsh, zsh, dash] - -- macro: shell_procs - condition: (proc.name in (shell_binaries)) - -# dpkg -L login | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," -- list: login_binaries - items: [ - login, systemd, '"(systemd)"', systemd-logind, su, - nologin, faillog, lastlog, newgrp, sg - ] - -# dpkg -L passwd | grep bin | xargs ls -ld | grep -v '^d' | awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," -- list: passwd_binaries - items: [ - shadowconfig, grpck, pwunconv, grpconv, pwck, - groupmod, vipw, pwconv, useradd, newusers, cppw, chpasswd, usermod, - groupadd, groupdel, grpunconv, chgpasswd, userdel, chage, chsh, - gpasswd, chfn, expiry, passwd, vigr, cpgr, adduser, addgroup, deluser, delgroup - ] - -# repoquery -l shadow-utils | grep bin | xargs ls -ld | grep -v '^d' | -# awk '{print $9}' | xargs -L 1 basename | tr "\\n" "," -- list: shadowutils_binaries - items: [ - chage, gpasswd, lastlog, newgrp, sg, adduser, deluser, chpasswd, - groupadd, groupdel, addgroup, delgroup, groupmems, groupmod, grpck, grpconv, grpunconv, - newusers, pwck, pwconv, pwunconv, useradd, userdel, usermod, vigr, vipw, unix_chkpwd - ] - -- list: http_server_binaries - items: [nginx, httpd, httpd-foregroun, lighttpd, apache, apache2] - -- list: db_server_binaries - items: [mysqld, postgres, sqlplus] - -- list: postgres_mgmt_binaries - items: [pg_dumpall, pg_ctl, pg_lsclusters, pg_ctlcluster] - -- list: nosql_server_binaries - items: [couchdb, memcached, redis-server, rabbitmq-server, mongod] - -- list: gitlab_binaries - items: [gitlab-shell, gitlab-mon, gitlab-runner-b, git] - -- macro: server_procs - condition: (proc.name in (http_server_binaries, db_server_binaries, docker_binaries, sshd)) - -# The explicit quotes are needed to avoid the - characters being -# interpreted by the filter expression. -- list: rpm_binaries - items: [dnf, dnf-automatic, rpm, rpmkey, yum, '"75-system-updat"', rhsmcertd-worke, rhsmcertd, subscription-ma, - repoquery, rpmkeys, rpmq, yum-cron, yum-config-mana, yum-debug-dump, - abrt-action-sav, rpmdb_stat, microdnf, rhn_check, yumdb] - -- list: deb_binaries - items: [dpkg, dpkg-preconfigu, dpkg-reconfigur, dpkg-divert, apt, apt-get, aptitude, - frontend, preinst, add-apt-reposit, apt-auto-remova, apt-key, - apt-listchanges, unattended-upgr, apt-add-reposit, apt-cache, apt.systemd.dai - ] -- list: python_package_managers - items: [pip, pip3, conda] - -# The truncated dpkg-preconfigu is intentional, process names are -# truncated at the falcosecurity-libs level. -- list: package_mgmt_binaries - items: [rpm_binaries, deb_binaries, update-alternat, gem, npm, python_package_managers, sane-utils.post, alternatives, chef-client, apk, snapd] - -- macro: run_by_package_mgmt_binaries - condition: (proc.aname in (package_mgmt_binaries, needrestart)) - -# A canonical set of processes that run other programs with different -# privileges or as a different user. -- list: userexec_binaries - items: [doas, sudo, su, suexec, critical-stack, dzdo] - -- list: user_mgmt_binaries - items: [login_binaries, passwd_binaries, shadowutils_binaries] - -- list: hids_binaries - items: [aide, aide.wrapper, update-aide.con, logcheck, syslog-summary, osqueryd, ossec-syscheckd] - -- list: vpn_binaries - items: [openvpn] - -- list: nomachine_binaries - items: [nxexec, nxnode.bin, nxserver.bin, nxclient.bin] - -- list: mail_binaries - items: [ - sendmail, sendmail-msp, postfix, procmail, exim4, - pickup, showq, mailq, dovecot, imap-login, imap, - mailmng-core, pop3-login, dovecot-lda, pop3 - ] - -- list: mail_config_binaries - items: [ - update_conf, parse_mc, makemap_hash, newaliases, update_mk, update_tlsm4, - update_db, update_mc, ssmtp.postinst, mailq, postalias, postfix.config., - postfix.config, postfix-script, postconf - ] - -- list: sensitive_file_names - items: [/etc/shadow, /etc/doas.d/doas.conf, /etc/sudoers, /etc/pam.conf, /etc/security/pwquality.conf] - -- list: sensitive_directory_names - items: [/, /etc, /etc/, /root, /root/] - -- macro: sensitive_files - condition: > - ((fd.name startswith /etc and fd.name in (sensitive_file_names)) or - fd.directory in (/etc/sudoers.d, /etc/pam.d, /etc/doas.d)) - -# Indicates that the process is new. Currently detected using time -# since process was started, using a threshold of 5 seconds. -- macro: proc_is_new - condition: (proc.duration <= 5000000000) - -# Use this to test whether the event occurred within a container. -# When displaying container information in the output field, use -# %container.info, without any leading term (file=%fd.name -# %container.info user=%user.name user_loginuid=%user.loginuid, and not file=%fd.name -# container=%container.info user=%user.name user_loginuid=%user.loginuid). The output will change -# based on the context and whether or not -pk/-pm/-pc was specified on -# the command line. -- macro: container - condition: (container.id != host) - -- macro: interactive - condition: > - ((proc.aname=sshd and proc.name != sshd) or - proc.name=systemd-logind or proc.name=login) - -- list: cron_binaries - items: [anacron, cron, crond, crontab] - -# https://github.com/liske/needrestart -- list: needrestart_binaries - items: [needrestart, 10-dpkg, 20-rpm, 30-pacman] - -# Possible scripts run by sshkit -- list: sshkit_script_binaries - items: [10_etc_sudoers., 10_passwd_group] - -# System users that should never log into a system. Consider adding your own -# service users (e.g. 'apache' or 'mysqld') here. -- macro: system_users - condition: (user.name in (bin, daemon, games, lp, mail, nobody, sshd, sync, uucp, www-data)) - -- macro: ansible_running_python - condition: (proc.name in (python, pypy, python3) and proc.cmdline contains ansible) - -# Qualys seems to run a variety of shell subprocesses, at various -# levels. This checks at a few levels without the cost of a full -# proc.aname, which traverses the full parent hierarchy. -- macro: run_by_qualys - condition: > - (proc.pname=qualys-cloud-ag or - proc.aname[2]=qualys-cloud-ag or - proc.aname[3]=qualys-cloud-ag or - proc.aname[4]=qualys-cloud-ag) - -- macro: run_by_google_accounts_daemon - condition: > - (proc.aname[1] startswith google_accounts or - proc.aname[2] startswith google_accounts or - proc.aname[3] startswith google_accounts) - -# Chef is similar. -- macro: run_by_chef - condition: (proc.aname[2]=chef_command_wr or proc.aname[3]=chef_command_wr or - proc.aname[2]=chef-client or proc.aname[3]=chef-client or - proc.name=chef-client) - -# Also handles running semi-indirectly via scl -- macro: run_by_foreman - condition: > - (user.name=foreman and - ((proc.pname in (rake, ruby, scl) and proc.aname[5] in (tfm-rake,tfm-ruby)) or - (proc.pname=scl and proc.aname[2] in (tfm-rake,tfm-ruby)))) - -- macro: python_mesos_marathon_scripting - condition: (proc.pcmdline startswith "python3 /marathon-lb/marathon_lb.py") - -- macro: splunk_running_forwarder - condition: (proc.pname=splunkd and proc.cmdline startswith "sh -c /opt/splunkforwarder") - -- macro: perl_running_plesk - condition: (proc.cmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager" or - proc.pcmdline startswith "perl /opt/psa/admin/bin/plesk_agent_manager") - -- macro: perl_running_updmap - condition: (proc.cmdline startswith "perl /usr/bin/updmap") - -- macro: perl_running_centrifydc - condition: (proc.cmdline startswith "perl /usr/share/centrifydc") - -- macro: runuser_reading_pam - condition: (proc.name=runuser and fd.directory=/etc/pam.d) - -# CIS Linux Benchmark program -- macro: linux_bench_reading_etc_shadow - condition: ((proc.aname[2]=linux-bench and - proc.name in (awk,cut,grep)) and - (fd.name=/etc/shadow or - fd.directory=/etc/pam.d)) - -- macro: veritas_driver_script - condition: (proc.cmdline startswith "perl /opt/VRTSsfmh/bin/mh_driver.pl") - -- macro: user_ssh_directory - condition: (fd.name contains '/.ssh/' and fd.name glob '/home/*/.ssh/*') - -- macro: directory_traversal - condition: (fd.nameraw contains '../' and fd.nameraw glob '*../*../*') - -# ****************************************************************************** -# * "Directory traversal monitored file read" requires FALCO_ENGINE_VERSION 13 * -# ****************************************************************************** -- rule: Directory traversal monitored file read - desc: > - Web applications can be vulnerable to directory traversal attacks that allow accessing files outside of the web app's root directory - (e.g. Arbitrary File Read bugs). System directories like /etc are typically accessed via absolute paths. Access patterns outside of this - (here path traversal) can be regarded as suspicious. This rule includes failed file open attempts. - condition: > - (open_read or open_file_failed) - and (etc_dir or user_ssh_directory or - fd.name startswith /root/.ssh or - fd.name contains "id_rsa") - and directory_traversal - and not proc.pname in (shell_binaries) - enabled: true - output: Read monitored file via directory traversal (file=%fd.name fileraw=%fd.nameraw gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, filesystem, mitre_credential_access, T1555] - -- macro: cmp_cp_by_passwd - condition: (proc.name in (cmp, cp) and proc.pname in (passwd, run-parts)) - -- macro: user_known_read_sensitive_files_activities - condition: (never_true) - -- rule: Read sensitive file trusted after startup - desc: > - An attempt to read any sensitive file (e.g. files containing user/password/authentication - information) by a trusted program after startup. Trusted programs might read these files - at startup to load initial state, but not afterwards. Can be customized as needed. - In modern containerized cloud infrastructures, accessing traditional Linux sensitive files - might be less relevant, yet it remains valuable for baseline detections. While we provide additional - rules for SSH or cloud vendor-specific credentials, you can significantly enhance your security - program by crafting custom rules for critical application credentials unique to your environment. - condition: > - open_read - and sensitive_files - and server_procs - and not proc_is_new - and proc.name!="sshd" - and not user_known_read_sensitive_files_activities - output: Sensitive file opened for reading by trusted program after startup (file=%fd.name pcmdline=%proc.pcmdline gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, filesystem, mitre_credential_access, T1555] - -- list: read_sensitive_file_binaries - items: [ - iptables, ps, lsb_release, check-new-relea, dumpe2fs, accounts-daemon, sshd, - vsftpd, systemd, mysql_install_d, psql, screen, debconf-show, sa-update, - pam-auth-update, pam-config, /usr/sbin/spamd, polkit-agent-he, lsattr, file, sosreport, - scxcimservera, adclient, rtvscand, cockpit-session, userhelper, ossec-syscheckd - ] - -# Add conditions to this macro (probably in a separate file, -# overwriting this macro) to allow for specific combinations of -# programs accessing sensitive files. -# fluentd_writing_conf_files is a good example to follow, as it -# specifies both the program doing the writing as well as the specific -# files it is allowed to modify. -# -# In this file, it just takes one of the macros in the base rule -# and repeats it. -- macro: user_read_sensitive_file_conditions - condition: cmp_cp_by_passwd - -- list: read_sensitive_file_images - items: [] - -- macro: user_read_sensitive_file_containers - condition: (container and container.image.repository in (read_sensitive_file_images)) - -# This macro detects man-db postinst, see https://salsa.debian.org/debian/man-db/-/blob/master/debian/postinst -# The rule "Read sensitive file untrusted" use this macro to avoid FPs. -- macro: mandb_postinst - condition: > - (proc.name=perl and proc.args startswith "-e" and - proc.args contains "@pwd = getpwnam(" and - proc.args contains "exec " and - proc.args contains "/usr/bin/mandb") - -- rule: Read sensitive file untrusted - desc: > - An attempt to read any sensitive file (e.g. files containing user/password/authentication - information). Exceptions are made for known trusted programs. Can be customized as needed. - In modern containerized cloud infrastructures, accessing traditional Linux sensitive files - might be less relevant, yet it remains valuable for baseline detections. While we provide additional - rules for SSH or cloud vendor-specific credentials, you can significantly enhance your security - program by crafting custom rules for critical application credentials unique to your environment. - condition: > - open_read - and sensitive_files - and proc_name_exists - and not proc.name in (user_mgmt_binaries, userexec_binaries, package_mgmt_binaries, - cron_binaries, read_sensitive_file_binaries, shell_binaries, hids_binaries, - vpn_binaries, mail_config_binaries, nomachine_binaries, sshkit_script_binaries, - in.proftpd, mandb, salt-call, salt-minion, postgres_mgmt_binaries, - google_oslogin_ - ) - and not cmp_cp_by_passwd - and not ansible_running_python - and not run_by_qualys - and not run_by_chef - and not run_by_google_accounts_daemon - and not user_read_sensitive_file_conditions - and not mandb_postinst - and not perl_running_plesk - and not perl_running_updmap - and not veritas_driver_script - and not perl_running_centrifydc - and not runuser_reading_pam - and not linux_bench_reading_etc_shadow - and not user_known_read_sensitive_files_activities - and not user_read_sensitive_file_containers - output: Sensitive file opened for reading by non-trusted program (file=%fd.name gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, filesystem, mitre_credential_access, T1555] - -- macro: postgres_running_wal_e - condition: (proc.pname=postgres and (proc.cmdline startswith "sh -c envdir /etc/wal-e.d/env /usr/local/bin/wal-e" or proc.cmdline startswith "sh -c envdir \"/run/etc/wal-e.d/env\" wal-g wal-push")) - -- macro: redis_running_prepost_scripts - condition: (proc.aname[2]=redis-server and (proc.cmdline contains "redis-server.post-up.d" or proc.cmdline contains "redis-server.pre-up.d")) - -- macro: rabbitmq_running_scripts - condition: > - (proc.pname=beam.smp and - (proc.cmdline startswith "sh -c exec ps" or - proc.cmdline startswith "sh -c exec inet_gethost" or - proc.cmdline= "sh -s unix:cmd" or - proc.cmdline= "sh -c exec /bin/sh -s unix:cmd 2>&1")) - -- macro: rabbitmqctl_running_scripts - condition: (proc.aname[2]=rabbitmqctl and proc.cmdline startswith "sh -c ") - -- macro: run_by_appdynamics - condition: (proc.pexe endswith java and proc.pcmdline contains " -jar -Dappdynamics") - -# The binaries in this list and their descendents are *not* allowed -# spawn shells. This includes the binaries spawning shells directly as -# well as indirectly. For example, apache -> php/perl for -# mod_{php,perl} -> some shell is also not allowed, because the shell -# has apache as an ancestor. -- list: protected_shell_spawning_binaries - items: [ - http_server_binaries, db_server_binaries, nosql_server_binaries, mail_binaries, - fluentd, flanneld, splunkd, consul, smbd, runsv, PM2 - ] - -- macro: parent_java_running_zookeeper - condition: (proc.pexe endswith java and proc.pcmdline contains org.apache.zookeeper.server) - -- macro: parent_java_running_kafka - condition: (proc.pexe endswith java and proc.pcmdline contains kafka.Kafka) - -- macro: parent_java_running_elasticsearch - condition: (proc.pexe endswith java and proc.pcmdline contains org.elasticsearch.bootstrap.Elasticsearch) - -- macro: parent_java_running_activemq - condition: (proc.pexe endswith java and proc.pcmdline contains activemq.jar) - -- macro: parent_java_running_cassandra - condition: (proc.pexe endswith java and (proc.pcmdline contains "-Dcassandra.config.loader" or proc.pcmdline contains org.apache.cassandra.service.CassandraDaemon)) - -- macro: parent_java_running_jboss_wildfly - condition: (proc.pexe endswith java and proc.pcmdline contains org.jboss) - -- macro: parent_java_running_glassfish - condition: (proc.pexe endswith java and proc.pcmdline contains com.sun.enterprise.glassfish) - -- macro: parent_java_running_hadoop - condition: (proc.pexe endswith java and proc.pcmdline contains org.apache.hadoop) - -- macro: parent_java_running_datastax - condition: (proc.pexe endswith java and proc.pcmdline contains com.datastax) - -- macro: nginx_starting_nginx - condition: (proc.pname=nginx and proc.cmdline contains "/usr/sbin/nginx -c /etc/nginx/nginx.conf") - -- macro: nginx_running_aws_s3_cp - condition: (proc.pname=nginx and proc.cmdline startswith "sh -c /usr/local/bin/aws s3 cp") - -- macro: consul_running_net_scripts - condition: (proc.pname=consul and (proc.cmdline startswith "sh -c curl" or proc.cmdline startswith "sh -c nc")) - -- macro: consul_running_alert_checks - condition: (proc.pname=consul and proc.cmdline startswith "sh -c /bin/consul-alerts") - -- macro: serf_script - condition: (proc.cmdline startswith "sh -c serf") - -- macro: check_process_status - condition: (proc.cmdline startswith "sh -c kill -0 ") - -# In some cases, you may want to consider node processes run directly -# in containers as protected shell spawners. Examples include using -# pm2-docker or pm2 start some-app.js --no-daemon-mode as the direct -# entrypoint of the container, and when the node app is a long-lived -# server using something like express. -# -# However, there are other uses of node related to build pipelines for -# which node is not really a server but instead a general scripting -# tool. In these cases, shells are very likely and in these cases you -# don't want to consider node processes protected shell spawners. -# -# We have to choose one of these cases, so we consider node processes -# as unprotected by default. If you want to consider any node process -# run in a container as a protected shell spawner, override the below -# macro to remove the "never_true" clause, which allows it to take effect. -- macro: possibly_node_in_container - condition: (never_true and (proc.pname=node and proc.aname[3]=docker-containe)) - -# Similarly, you may want to consider any shell spawned by apache -# tomcat as suspect. The famous apache struts attack (CVE-2017-5638) -# could be exploited to do things like spawn shells. -# -# However, many applications *do* use tomcat to run arbitrary shells, -# as a part of build pipelines, etc. -# -# Like for node, we make this case opt-in. -- macro: possibly_parent_java_running_tomcat - condition: (never_true and proc.pexe endswith java and proc.pcmdline contains org.apache.catalina.startup.Bootstrap) - -- macro: protected_shell_spawner - condition: > - (proc.aname in (protected_shell_spawning_binaries) - or parent_java_running_zookeeper - or parent_java_running_kafka - or parent_java_running_elasticsearch - or parent_java_running_activemq - or parent_java_running_cassandra - or parent_java_running_jboss_wildfly - or parent_java_running_glassfish - or parent_java_running_hadoop - or parent_java_running_datastax - or possibly_parent_java_running_tomcat - or possibly_node_in_container) - -- list: mesos_shell_binaries - items: [mesos-docker-ex, mesos-slave, mesos-health-ch] - -# Note that runsv is both in protected_shell_spawner and the -# exclusions by pname. This means that runsv can itself spawn shells -# (the ./run and ./finish scripts), but the processes runsv can not -# spawn shells. -- rule: Run shell untrusted - desc: > - An attempt to spawn a shell below a non-shell application. The non-shell applications that are monitored are - defined in the protected_shell_spawner macro, with protected_shell_spawning_binaries being the list you can - easily customize. For Java parent processes, please note that Java often has a custom process name. Therefore, - rely more on proc.exe to define Java applications. This rule can be noisier, as you can see in the exhaustive - existing tuning. However, given it is very behavior-driven and broad, it is universally relevant to catch - general Remote Code Execution (RCE). Allocate time to tune this rule for your use cases and reduce noise. - Tuning suggestions include looking at the duration of the parent process (proc.ppid.duration) to define your - long-running app processes. Checking for newer fields such as proc.vpgid.name and proc.vpgid.exe instead of the - direct parent process being a non-shell application could make the rule more robust. - condition: > - spawned_process - and shell_procs - and proc.pname exists - and protected_shell_spawner - and not proc.pname in (shell_binaries, gitlab_binaries, cron_binaries, user_known_shell_spawn_binaries, - needrestart_binaries, - mesos_shell_binaries, - erl_child_setup, exechealthz, - PM2, PassengerWatchd, c_rehash, svlogd, logrotate, hhvm, serf, - lb-controller, nvidia-installe, runsv, statsite, erlexec, calico-node, - "puma reactor") - and not proc.cmdline in (known_shell_spawn_cmdlines) - and not proc.aname in (unicorn_launche) - and not consul_running_net_scripts - and not consul_running_alert_checks - and not nginx_starting_nginx - and not nginx_running_aws_s3_cp - and not run_by_package_mgmt_binaries - and not serf_script - and not check_process_status - and not run_by_foreman - and not python_mesos_marathon_scripting - and not splunk_running_forwarder - and not postgres_running_wal_e - and not redis_running_prepost_scripts - and not rabbitmq_running_scripts - and not rabbitmqctl_running_scripts - and not run_by_appdynamics - and not user_shell_container_exclusions - output: Shell spawned by untrusted binary (parent_exe=%proc.pexe parent_exepath=%proc.pexepath pcmdline=%proc.pcmdline gparent=%proc.aname[2] ggparent=%proc.aname[3] aname[4]=%proc.aname[4] aname[5]=%proc.aname[5] aname[6]=%proc.aname[6] aname[7]=%proc.aname[7] evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, host, container, process, shell, mitre_execution, T1059.004] - -# These images are allowed both to run with --privileged and to mount -# sensitive paths from the host filesystem. -# -# NOTE: This list is only provided for backwards compatibility with -# older local falco rules files that may have been appending to -# trusted_images. To make customizations, it's better to add images to -# either privileged_images or falco_sensitive_mount_images. -- list: trusted_images - items: [] - -- list: sematext_images - items: [docker.io/sematext/sematext-agent-docker, docker.io/sematext/agent, docker.io/sematext/logagent, - registry.access.redhat.com/sematext/sematext-agent-docker, - registry.access.redhat.com/sematext/agent, - registry.access.redhat.com/sematext/logagent] - -# Falco containers -- list: falco_containers - items: - - falcosecurity/falco - - docker.io/falcosecurity/falco - - public.ecr.aws/falcosecurity/falco - -# Falco no driver containers -- list: falco_no_driver_containers - items: - - falcosecurity/falco-no-driver - - docker.io/falcosecurity/falco-no-driver - - public.ecr.aws/falcosecurity/falco-no-driver - -# These container images are allowed to run with --privileged and full set of capabilities -# TODO: Remove k8s.gcr.io reference after 01/Dec/2023 -- list: falco_privileged_images - items: [ - falco_containers, - docker.io/calico/node, - calico/node, - docker.io/cloudnativelabs/kube-router, - docker.io/docker/ucp-agent, - docker.io/mesosphere/mesos-slave, - docker.io/rook/toolbox, - docker.io/sysdig/sysdig, - gcr.io/google_containers/kube-proxy, - gcr.io/google-containers/startup-script, - gcr.io/projectcalico-org/node, - gke.gcr.io/kube-proxy, - gke.gcr.io/gke-metadata-server, - gke.gcr.io/netd-amd64, - gke.gcr.io/watcher-daemonset, - gcr.io/google-containers/prometheus-to-sd, - k8s.gcr.io/ip-masq-agent-amd64, - k8s.gcr.io/kube-proxy, - k8s.gcr.io/prometheus-to-sd, - registry.k8s.io/ip-masq-agent-amd64, - registry.k8s.io/kube-proxy, - registry.k8s.io/prometheus-to-sd, - quay.io/calico/node, - sysdig/sysdig, - sematext_images, - k8s.gcr.io/dns/k8s-dns-node-cache, - registry.k8s.io/dns/k8s-dns-node-cache, - mcr.microsoft.com/oss/kubernetes/kube-proxy - ] - -# The steps libcontainer performs to set up the root program for a container are: -# - clone + exec self to a program runc:[0:PARENT] -# - clone a program runc:[1:CHILD] which sets up all the namespaces -# - clone a second program runc:[2:INIT] + exec to the root program. -# The parent of runc:[2:INIT] is runc:0:PARENT] -# As soon as 1:CHILD is created, 0:PARENT exits, so there's a race -# where at the time 2:INIT execs the root program, 0:PARENT might have -# already exited, or might still be around. So we handle both. -# We also let runc:[1:CHILD] count as the parent process, which can occur -# when we lose events and lose track of state. -- macro: container_entrypoint - condition: (not proc.pname exists or proc.pname in (runc:[0:PARENT], runc:[1:CHILD], runc, docker-runc, exe, docker-runc-cur, containerd-shim, systemd, crio)) - -- macro: user_known_system_user_login - condition: (never_true) - -# Anything run interactively by root -# - condition: evt.type != switch and user.name = root and proc.name != sshd and interactive -# output: "Interactive root (%user.name %proc.name %evt.dir %evt.type %evt.args %fd.name)" -# priority: WARNING -- rule: System user interactive - desc: > - System (e.g. non-login) users spawning new processes. Can add custom service users (e.g. apache or mysqld). - 'Interactive' is defined as new processes as descendants of an ssh session or login process. Consider further tuning - by only looking at processes in a terminal / tty (proc.tty != 0). A newer field proc.is_vpgid_leader could be of help - to distinguish if the process was "directly" executed, for instance, in a tty, or executed as a descendant process in the - same process group, which, for example, is the case when subprocesses are spawned from a script. Consider this rule - as a great template rule to monitor interactive accesses to your systems more broadly. However, such a custom rule would be - unique to your environment. The rule "Terminal shell in container" that fires when using "kubectl exec" is more Kubernetes - relevant, whereas this one could be more interesting for the underlying host. - condition: > - spawned_process - and system_users - and interactive - and not user_known_system_user_login - output: System user ran an interactive command (evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: INFO - tags: [maturity_stable, host, container, users, mitre_execution, T1059, NIST_800-53_AC-2] - -# In some cases, a shell is expected to be run in a container. For example, configuration -# management software may do this, which is expected. -- macro: user_expected_terminal_shell_in_container_conditions - condition: (never_true) - -- rule: Terminal shell in container - desc: > - A shell was used as the entrypoint/exec point into a container with an attached terminal. Parent process may have - legitimately already exited and be null (read container_entrypoint macro). Common when using "kubectl exec" in Kubernetes. - Correlate with k8saudit exec logs if possible to find user or serviceaccount token used (fuzzy correlation by namespace and pod name). - Rather than considering it a standalone rule, it may be best used as generic auditing rule while examining other triggered - rules in this container/tty. - condition: > - spawned_process - and container - and shell_procs - and proc.tty != 0 - and container_entrypoint - and not user_expected_terminal_shell_in_container_conditions - output: A shell was spawned in a container with an attached terminal (evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, container, shell, mitre_execution, T1059] - -# For some container types (mesos), there isn't a container image to -# work with, and the container name is autogenerated, so there isn't -# any stable aspect of the software to work with. In this case, we -# fall back to allowing certain command lines. -- list: known_shell_spawn_cmdlines - items: [ - '"sh -c uname -p 2> /dev/null"', - '"sh -c uname -s 2>&1"', - '"sh -c uname -r 2>&1"', - '"sh -c uname -v 2>&1"', - '"sh -c uname -a 2>&1"', - '"sh -c ruby -v 2>&1"', - '"sh -c getconf CLK_TCK"', - '"sh -c getconf PAGESIZE"', - '"sh -c LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null"', - '"sh -c LANG=C /sbin/ldconfig -p 2>/dev/null"', - '"sh -c /sbin/ldconfig -p 2>/dev/null"', - '"sh -c stty -a 2>/dev/null"', - '"sh -c stty -a < /dev/tty"', - '"sh -c stty -g < /dev/tty"', - '"sh -c node index.js"', - '"sh -c node index"', - '"sh -c node ./src/start.js"', - '"sh -c node app.js"', - '"sh -c node -e \"require(''nan'')\""', - '"sh -c node -e \"require(''nan'')\")"', - '"sh -c node $NODE_DEBUG_OPTION index.js "', - '"sh -c crontab -l 2"', - '"sh -c lsb_release -a"', - '"sh -c lsb_release -is 2>/dev/null"', - '"sh -c whoami"', - '"sh -c node_modules/.bin/bower-installer"', - '"sh -c /bin/hostname -f 2> /dev/null"', - '"sh -c locale -a"', - '"sh -c -t -i"', - '"sh -c openssl version"', - '"bash -c id -Gn kafadmin"', - '"sh -c /bin/sh -c ''date +%%s''"', - '"sh -c /usr/share/lighttpd/create-mime.conf.pl"' - ] - -# This list allows for easy additions to the set of commands allowed -# to run shells in containers without having to without having to copy -# and override the entire run shell in container macro. Once -# https://github.com/falcosecurity/falco/issues/255 is fixed this will be a -# bit easier, as someone could append of any of the existing lists. -- list: user_known_shell_spawn_binaries - items: [] - -# This macro allows for easy additions to the set of commands allowed -# to run shells in containers without having to override the entire -# rule. Its default value is an expression that always is false, which -# becomes true when the "not ..." in the rule is applied. -- macro: user_shell_container_exclusions - condition: (never_true) - -# Containers from IBM Cloud -- list: ibm_cloud_containers - items: - - icr.io/ext/sysdig/agent - - registry.ng.bluemix.net/armada-master/metrics-server-amd64 - - registry.ng.bluemix.net/armada-master/olm - -# In a local/user rules file, list the namespace or container images that are -# allowed to contact the K8s API Server from within a container. This -# might cover cases where the K8s infrastructure itself is running -# within a container. -# TODO: Remove k8s.gcr.io reference after 01/Dec/2023 -- macro: k8s_containers - condition: > - (container.image.repository in (gcr.io/google_containers/hyperkube-amd64, - gcr.io/google_containers/kube2sky, - docker.io/sysdig/sysdig, sysdig/sysdig, - fluent/fluentd-kubernetes-daemonset, prom/prometheus, - falco_containers, - falco_no_driver_containers, - ibm_cloud_containers, - velero/velero, - quay.io/jetstack/cert-manager-cainjector, weaveworks/kured, - quay.io/prometheus-operator/prometheus-operator, k8s.gcr.io/ingress-nginx/kube-webhook-certgen, - registry.k8s.io/ingress-nginx/kube-webhook-certgen, quay.io/spotahome/redis-operator, - registry.opensource.zalan.do/acid/postgres-operator, registry.opensource.zalan.do/acid/postgres-operator-ui, - rabbitmqoperator/cluster-operator, quay.io/kubecost1/kubecost-cost-model, - docker.io/bitnami/prometheus, docker.io/bitnami/kube-state-metrics, mcr.microsoft.com/oss/azure/aad-pod-identity/nmi) - or (k8s.ns.name = "kube-system")) - -- macro: k8s_api_server - condition: (fd.sip.name="kubernetes.default.svc.cluster.local") - -- macro: user_known_contact_k8s_api_server_activities - condition: (never_true) - -- rule: Contact K8S API Server From Container - desc: > - Detect attempts to communicate with the K8S API Server from a container by non-profiled users. Kubernetes APIs play a - pivotal role in configuring the cluster management lifecycle. Detecting potential unauthorized access to the API server - is of utmost importance. Audit your complete infrastructure and pinpoint any potential machines from which the API server - might be accessible based on your network layout. If Falco can't operate on all these machines, consider analyzing the - Kubernetes audit logs (typically drained from control nodes, and Falco offers a k8saudit plugin) as an additional data - source for detections within the control plane. - condition: > - evt.type=connect and evt.dir=< - and (fd.typechar=4 or fd.typechar=6) - and container - and k8s_api_server - and not k8s_containers - and not user_known_contact_k8s_api_server_activities - output: Unexpected connection to K8s API Server from container (connection=%fd.name lport=%fd.lport rport=%fd.rport fd_type=%fd.type fd_proto=fd.l4proto evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, container, network, k8s, mitre_discovery, T1565] - -- rule: Netcat Remote Code Execution in Container - desc: > - Netcat Program runs inside container that allows remote code execution and may be utilized - as a part of a variety of reverse shell payload https://github.com/swisskyrepo/PayloadsAllTheThings/. - These programs are of higher relevance as they are commonly installed on UNIX-like operating systems. - Can fire in combination with the "Redirect STDOUT/STDIN to Network Connection in Container" - rule as it utilizes a different evt.type. - condition: > - spawned_process - and container - and ((proc.name = "nc" and (proc.cmdline contains " -e" or - proc.cmdline contains " -c")) or - (proc.name = "ncat" and (proc.args contains "--sh-exec" or - proc.args contains "--exec" or proc.args contains "-e " or - proc.args contains "-c " or proc.args contains "--lua-exec")) - ) - output: Netcat runs inside container that allows remote code execution (evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, container, network, process, mitre_execution, T1059] - -- list: grep_binaries - items: [grep, egrep, fgrep] - -- macro: grep_commands - condition: (proc.name in (grep_binaries)) - -# a less restrictive search for things that might be passwords/ssh/user etc. -- macro: grep_more - condition: (never_true) - -- macro: private_key_or_password - condition: > - (proc.args icontains "BEGIN PRIVATE" or - proc.args icontains "BEGIN OPENSSH PRIVATE" or - proc.args icontains "BEGIN RSA PRIVATE" or - proc.args icontains "BEGIN DSA PRIVATE" or - proc.args icontains "BEGIN EC PRIVATE" or - (grep_more and - (proc.args icontains " pass " or - proc.args icontains " ssh " or - proc.args icontains " user ")) - ) - -- rule: Search Private Keys or Passwords - desc: > - Detect attempts to search for private keys or passwords using the grep or find command. This is often seen with - unsophisticated attackers, as there are many ways to access files using bash built-ins that could go unnoticed. - Regardless, this serves as a solid baseline detection that can be tailored to cover these gaps while maintaining - an acceptable noise level. - condition: > - spawned_process - and ((grep_commands and private_key_or_password) or - (proc.name = "find" and (proc.args contains "id_rsa" or - proc.args contains "id_dsa" or - proc.args contains "id_ed25519" or - proc.args contains "id_ecdsa" - ) - )) - output: Grep private keys or passwords activities found (evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: - WARNING - tags: [maturity_stable, host, container, process, filesystem, mitre_credential_access, T1552.001] - -- list: log_directories - items: [/var/log, /dev/log] - -- list: log_files - items: [syslog, auth.log, secure, kern.log, cron, user.log, dpkg.log, last.log, yum.log, access_log, mysql.log, mysqld.log] - -- macro: access_log_files - condition: (fd.directory in (log_directories) or fd.filename in (log_files)) - -# a placeholder for whitelist log files that could be cleared. Recommend the macro as (fd.name startswith "/var/log/app1*") -- macro: allowed_clear_log_files - condition: (never_true) - -- macro: trusted_logging_images - condition: (container.image.repository endswith "splunk/fluentd-hec" or - container.image.repository endswith "fluent/fluentd-kubernetes-daemonset" or - container.image.repository endswith "openshift3/ose-logging-fluentd" or - container.image.repository endswith "containernetworking/azure-npm") - -- macro: containerd_activities - condition: (proc.name=containerd and (fd.name startswith "/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/" or - fd.name startswith "/var/lib/containerd/tmpmounts/")) - -- rule: Clear Log Activities - desc: > - Detect clearing of critical access log files, typically done to erase evidence that could be attributed to an adversary's - actions. To effectively customize and operationalize this detection, check for potentially missing log file destinations - relevant to your environment, and adjust the profiled containers you wish not to be alerted on. - condition: > - open_write - and access_log_files - and evt.arg.flags contains "O_TRUNC" - and not containerd_activities - and not trusted_logging_images - and not allowed_clear_log_files - output: Log files were tampered (file=%fd.name evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: - WARNING - tags: [maturity_stable, host, container, filesystem, mitre_defense_evasion, T1070, NIST_800-53_AU-10] - -- list: data_remove_commands - items: [shred, mkfs, mke2fs] - -- macro: clear_data_procs - condition: (proc.name in (data_remove_commands)) - -- macro: user_known_remove_data_activities - condition: (never_true) - -- rule: Remove Bulk Data from Disk - desc: > - Detect a process running to clear bulk data from disk with the intention to destroy data, possibly interrupting availability - to systems. Profile your environment and use user_known_remove_data_activities to tune this rule. - condition: > - spawned_process - and clear_data_procs - and not user_known_remove_data_activities - output: Bulk data has been removed from disk (file=%fd.name evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: - WARNING - tags: [maturity_stable, host, container, process, filesystem, mitre_impact, T1485] - -- rule: Create Symlink Over Sensitive Files - desc: > - Detect symlinks created over a curated list of sensitive files or subdirectories under /etc/ or - root directories. Can be customized as needed. Refer to further and equivalent guidance within the - rule "Read sensitive file untrusted". - condition: > - create_symlink - and (evt.arg.target in (sensitive_file_names) or evt.arg.target in (sensitive_directory_names)) - output: Symlinks created over sensitive files (target=%evt.arg.target linkpath=%evt.arg.linkpath evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, filesystem, mitre_credential_access, T1555] - -- rule: Create Hardlink Over Sensitive Files - desc: > - Detect hardlink created over a curated list of sensitive files or subdirectories under /etc/ or - root directories. Can be customized as needed. Refer to further and equivalent guidance within the - rule "Read sensitive file untrusted". - condition: > - create_hardlink - and (evt.arg.oldpath in (sensitive_file_names)) - output: Hardlinks created over sensitive files (target=%evt.arg.target linkpath=%evt.arg.linkpath evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, filesystem, mitre_credential_access, T1555] - -- list: user_known_packet_socket_binaries - items: [] - -- rule: Packet socket created in container - desc: > - Detect new packet socket at the device driver (OSI Layer 2) level in a container. Packet socket could be used for ARP Spoofing - and privilege escalation (CVE-2020-14386) by an attacker. Noise can be reduced by using the user_known_packet_socket_binaries - template list. - condition: > - evt.type=socket - and container - and evt.arg[0] contains AF_PACKET - and not proc.name in (user_known_packet_socket_binaries) - output: Packet socket was created in a container (socket_info=%evt.args connection=%fd.name lport=%fd.lport rport=%fd.rport fd_type=%fd.type fd_proto=fd.l4proto evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, container, network, mitre_credential_access, T1557.002] - -- macro: user_known_stand_streams_redirect_activities - condition: (never_true) - -# As of engine version 20 this rule can be improved by using the fd.types[] -# field so it only triggers once when all three of std{out,err,in} are -# redirected. -# -# - list: ip_sockets -# items: ["ipv4", "ipv6"] -# -# - rule: Redirect STDOUT/STDIN to Network Connection in Container once -# condition: dup and container and evt.rawres in (0, 1, 2) and fd.type in (ip_sockets) and fd.types[0] in (ip_sockets) and fd.types[1] in (ip_sockets) and fd.types[2] in (ip_sockets) and not user_known_stand_streams_redirect_activities -# -# The following rule has not been changed by default as existing users could be -# relying on the rule triggering when any of std{out,err,in} are redirected. -- rule: Redirect STDOUT/STDIN to Network Connection in Container - desc: > - Detect redirection of stdout/stdin to a network connection within a container, achieved by utilizing a - variant of the dup syscall (potential reverse shell or remote code execution - https://github.com/swisskyrepo/PayloadsAllTheThings/). This detection is behavior-based and may generate - noise in the system, and can be adjusted using the user_known_stand_streams_redirect_activities template - macro. Tuning can be performed similarly to existing detections based on process lineage or container images, - and/or it can be limited to interactive tty (tty != 0). - condition: > - dup - and container - and evt.rawres in (0, 1, 2) - and fd.type in ("ipv4", "ipv6") - and not user_known_stand_streams_redirect_activities - output: Redirect stdout/stdin to network connection (gparent=%proc.aname[2] ggparent=%proc.aname[3] gggparent=%proc.aname[4] fd.sip=%fd.sip connection=%fd.name lport=%fd.lport rport=%fd.rport fd_type=%fd.type fd_proto=fd.l4proto evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, container, network, process, mitre_execution, T1059] - -- list: allowed_container_images_loading_kernel_module - items: [] - -- rule: Linux Kernel Module Injection Detected - desc: > - Inject Linux Kernel Modules from containers using insmod or modprobe with init_module and finit_module - syscalls, given the precondition of sys_module effective capabilities. Profile the environment and consider - allowed_container_images_loading_kernel_module to reduce noise and account for legitimate cases. - condition: > - kernel_module_load - and container - and thread.cap_effective icontains sys_module - and not container.image.repository in (allowed_container_images_loading_kernel_module) - output: Linux Kernel Module injection from container (parent_exepath=%proc.pexepath gparent=%proc.aname[2] gexepath=%proc.aexepath[2] module=%proc.args res=%evt.res evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, process, mitre_persistence, TA0003] - -- rule: Debugfs Launched in Privileged Container - desc: > - Detect file system debugger debugfs launched inside a privileged container which might lead to container escape. - This rule has a more narrow scope. - condition: > - spawned_process - and container - and container.privileged=true - and proc.name=debugfs - output: Debugfs launched started in a privileged container (evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, container, cis, process, mitre_privilege_escalation, T1611] - -- rule: Detect release_agent File Container Escapes - desc: > - Detect an attempt to exploit a container escape using release_agent file. - By running a container with certains capabilities, a privileged user can modify - release_agent file and escape from the container. - condition: > - open_write - and container - and fd.name endswith release_agent - and (user.uid=0 or thread.cap_effective contains CAP_DAC_OVERRIDE) - and thread.cap_effective contains CAP_SYS_ADMIN - output: Detect an attempt to exploit a container escape using release_agent file (file=%fd.name cap_effective=%thread.cap_effective evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: CRITICAL - tags: [maturity_stable, container, process, mitre_privilege_escalation, T1611] - -- list: docker_binaries - items: [docker, dockerd, containerd-shim, "runc:[1:CHILD]", pause, exe, docker-compose, docker-entrypoi, docker-runc-cur, docker-current, dockerd-current] - -- list: known_ptrace_binaries - items: [] - -- macro: known_ptrace_procs - condition: (proc.name in (known_ptrace_binaries)) - -- macro: ptrace_attach_or_injection - condition: > - (evt.type=ptrace and evt.dir=> and - (evt.arg.request contains PTRACE_POKETEXT or - evt.arg.request contains PTRACE_POKEDATA or - evt.arg.request contains PTRACE_ATTACH or - evt.arg.request contains PTRACE_SEIZE or - evt.arg.request contains PTRACE_SETREGS)) - -- rule: PTRACE attached to process - desc: > - Detect an attempt to inject potentially malicious code into a process using PTRACE in order to evade - process-based defenses or elevate privileges. Common anti-patterns are debuggers. Additionally, profiling - your environment via the known_ptrace_procs template macro can reduce noise. - A successful ptrace syscall generates multiple logs at once. - condition: > - ptrace_attach_or_injection - and proc_name_exists - and not known_ptrace_procs - output: Detected ptrace PTRACE_ATTACH attempt (proc_pcmdline=%proc.pcmdline evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, process, mitre_privilege_escalation, T1055.008] - -- rule: PTRACE anti-debug attempt - desc: > - Detect usage of the PTRACE system call with the PTRACE_TRACEME argument, indicating a program actively attempting - to avoid debuggers attaching to the process. This behavior is typically indicative of malware activity. - Read more about PTRACE in the "PTRACE attached to process" rule. - condition: > - evt.type=ptrace and evt.dir=> - and evt.arg.request contains PTRACE_TRACEME - and proc_name_exists - output: Detected potential PTRACE_TRACEME anti-debug attempt (proc_pcmdline=%proc.pcmdline evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, host, container, process, mitre_defense_evasion, T1622] - -- macro: private_aws_credentials - condition: > - (proc.args icontains "aws_access_key_id" or - proc.args icontains "aws_secret_access_key" or - proc.args icontains "aws_session_token" or - proc.args icontains "accesskeyid" or - proc.args icontains "secretaccesskey") - -- rule: Find AWS Credentials - desc: > - Detect attempts to search for private keys or passwords using the grep or find command, particularly targeting standard - AWS credential locations. This is often seen with unsophisticated attackers, as there are many ways to access files - using bash built-ins that could go unnoticed. Regardless, this serves as a solid baseline detection that can be tailored - to cover these gaps while maintaining an acceptable noise level. This rule complements the rule "Search Private Keys or Passwords". - condition: > - spawned_process - and ((grep_commands and private_aws_credentials) or - (proc.name = "find" and proc.args endswith ".aws/credentials")) - output: Detected AWS credentials search activity (proc_pcmdline=%proc.pcmdline proc_cwd=%proc.cwd group_gid=%group.gid group_name=%group.name user_loginname=%user.loginname evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, process, aws, mitre_credential_access, T1552] - -- rule: Execution from /dev/shm - desc: > - This rule detects file execution in the /dev/shm directory, a tactic often used by threat actors to store their readable, writable, and - occasionally executable files. /dev/shm acts as a link to the host or other containers, creating vulnerabilities for their compromise - as well. Notably, /dev/shm remains unchanged even after a container restart. Consider this rule alongside the newer - "Drop and execute new binary in container" rule. - condition: > - spawned_process - and (proc.exe startswith "/dev/shm/" or - (proc.cwd startswith "/dev/shm/" and proc.exe startswith "./" ) or - (shell_procs and proc.args startswith "-c /dev/shm") or - (shell_procs and proc.args startswith "-i /dev/shm") or - (shell_procs and proc.args startswith "/dev/shm") or - (proc.cwd startswith "/dev/shm/" and proc.args startswith "./" )) - and not container.image.repository in (falco_privileged_images, trusted_images) - output: File execution detected from /dev/shm (evt_res=%evt.res file=%fd.name proc_cwd=%proc.cwd proc_pcmdline=%proc.pcmdline user_loginname=%user.loginname group_gid=%group.gid group_name=%group.name evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: WARNING - tags: [maturity_stable, host, container, mitre_execution, T1059.004] - -# List of allowed container images that are known to execute binaries not part of their base image. -- list: known_drop_and_execute_containers - items: [] - -- rule: Drop and execute new binary in container - desc: > - Detect if an executable not belonging to the base image of a container is being executed. - The drop and execute pattern can be observed very often after an attacker gained an initial foothold. - is_exe_upper_layer filter field only applies for container runtimes that use overlayfs as union mount filesystem. - Adopters can utilize the provided template list known_drop_and_execute_containers containing allowed container - images known to execute binaries not included in their base image. Alternatively, you could exclude non-production - namespaces in Kubernetes settings by adjusting the rule further. This helps reduce noise by applying application - and environment-specific knowledge to this rule. Common anti-patterns include administrators or SREs performing - ad-hoc debugging. - condition: > - spawned_process - and container - and proc.is_exe_upper_layer=true - and not container.image.repository in (known_drop_and_execute_containers) - output: Executing binary not part of base image (proc_exe=%proc.exe proc_sname=%proc.sname gparent=%proc.aname[2] proc_exe_ino_ctime=%proc.exe_ino.ctime proc_exe_ino_mtime=%proc.exe_ino.mtime proc_exe_ino_ctime_duration_proc_start=%proc.exe_ino.ctime_duration_proc_start proc_cwd=%proc.cwd container_start_ts=%container.start_ts evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: CRITICAL - tags: [maturity_stable, container, process, mitre_persistence, TA0003, PCI_DSS_11.5.1] - -# RFC1918 addresses were assigned for private network usage -- list: rfc_1918_addresses - items: ['"10.0.0.0/8"', '"172.16.0.0/12"', '"192.168.0.0/16"'] - -- macro: outbound - condition: > - (((evt.type = connect and evt.dir=<) or - (evt.type in (sendto,sendmsg) and evt.dir=< and - fd.l4proto != tcp and fd.connected=false and fd.name_changed=true)) and - (fd.typechar = 4 or fd.typechar = 6) and - (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8" and not fd.snet in (rfc_1918_addresses)) and - (evt.rawres >= 0 or evt.res = EINPROGRESS)) - -- list: ssh_non_standard_ports - items: [80, 8080, 88, 443, 8443, 53, 4444] - -- macro: ssh_non_standard_ports_network - condition: (fd.sport in (ssh_non_standard_ports)) - -- rule: Disallowed SSH Connection Non Standard Port - desc: > - Detect any new outbound SSH connection from the host or container using a non-standard port. This rule holds the potential - to detect a family of reverse shells that cause the victim machine to connect back out over SSH, with STDIN piped from - the SSH connection to a shell's STDIN, and STDOUT of the shell piped back over SSH. Such an attack can be launched against - any app that is vulnerable to command injection. The upstream rule only covers a limited selection of non-standard ports. - We suggest adding more ports, potentially incorporating ranges based on your environment's knowledge and custom SSH port - configurations. This rule can complement the "Redirect STDOUT/STDIN to Network Connection in Container" or - "Disallowed SSH Connection" rule. - condition: > - outbound - and proc.exe endswith ssh - and fd.l4proto=tcp - and ssh_non_standard_ports_network - output: Disallowed SSH Connection (connection=%fd.name lport=%fd.lport rport=%fd.rport fd_type=%fd.type fd_proto=fd.l4proto evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: NOTICE - tags: [maturity_stable, host, container, network, process, mitre_execution, T1059] - -- list: known_memfd_execution_binaries - items: [] - -- macro: known_memfd_execution_processes - condition: (proc.name in (known_memfd_execution_binaries)) - -- rule: Fileless execution via memfd_create - desc: > - Detect if a binary is executed from memory using the memfd_create technique. This is a well-known defense evasion - technique for executing malware on a victim machine without storing the payload on disk and to avoid leaving traces - about what has been executed. Adopters can whitelist processes that may use fileless execution for benign purposes - by adding items to the list known_memfd_execution_processes. - condition: > - spawned_process - and proc.is_exe_from_memfd=true - and not known_memfd_execution_processes - output: Fileless execution via memfd_create (container_start_ts=%container.start_ts proc_cwd=%proc.cwd evt_res=%evt.res proc_sname=%proc.sname gparent=%proc.aname[2] evt_type=%evt.type user=%user.name user_uid=%user.uid user_loginuid=%user.loginuid process=%proc.name proc_exepath=%proc.exepath parent=%proc.pname command=%proc.cmdline terminal=%proc.tty exe_flags=%evt.arg.flags %container.info) - priority: CRITICAL - tags: [maturity_stable, host, container, process, mitre_defense_evasion, T1620] diff --git a/kubezero/falcoctl/APKBUILD b/kubezero/falcoctl/APKBUILD index 3b2c4f5..fec9afb 100644 --- a/kubezero/falcoctl/APKBUILD +++ b/kubezero/falcoctl/APKBUILD @@ -1,7 +1,7 @@ # Contributor: Stefan Reimer # Maintainer: Stefan Reimer pkgname=falcoctl -pkgver=0.7.3 +pkgver=0.8.0 pkgrel=0 pkgdesc="The official CLI tool for working with Falco and its ecosystem components." url="https://github.com/falcosecurity/falcoctl" @@ -33,5 +33,5 @@ package() { } sha512sums=" -61e539322c91125569c432ea1fc98c84b928795089829a062e6b5c74c7d1223cd71e557b7a8972ba7c6d1b534d1b87da254ee01e12c14038ced5a8f85a22a623 falcoctl-0.7.3.tar.gz +e62b59339ed1005bfcb9e59242bc187e8c9505173fc2c506f8990abf905062aaccdcc465fd01ffeec90886af1f4afea8448c3f128c84b18b145ffdf0a0f90dbf falcoctl-0.8.0.tar.gz " diff --git a/kubezero/zdt-base/APKBUILD b/kubezero/zdt-base/APKBUILD index d61b9e8..0d5d955 100644 --- a/kubezero/zdt-base/APKBUILD +++ b/kubezero/zdt-base/APKBUILD @@ -111,7 +111,7 @@ nocloud() { } sha512sums=" -c1808572d074e1a91e0efc3c31462f6035159338843e51fbccca5102b2923506ce60ba9e1ef00b2fbb134da7a33f55af364e1bff15c272eb7f4ebc6035f33887 common.sh +36469bda1c6620547b8365610f8631142f42fae2a01408a622ba6ae6f85b45f2b5d6c785aa4d84895da6d91657061ab787beeb35c4883e2d3ba19d9a2841496f common.sh cf8b75a81bb35e853761d21b15b5b109f15350c54daaf66d2912541a20f758c3ca237d58932e5608d2d3867fe15a07ebd694fd1c313a8290d15afc2b27a575dd boot.sh eb7d5b6f92f500dbaba04a915cdd8d66e90456ca86bed86b3a9243f0c25577a9aa42c2ba28c3cad9dda6e6f2d14363411d78eff35656c7c60a6a8646f43dcba5 cloudbender-early.init cac71c605324ad8e60b72f54b8c39ee0924205fcd1f072af9df92b0e8216bcde887ffec677eb2f0eacce3df430f31d5b5609e997d85f14389ee099fbde3c478f cloudbender.init @@ -123,7 +123,7 @@ b86dec8c059642309b2f583191457b7fac7264b75dc5f4a06ad641de6b76589c0571b8b72b515195 484bdcf001b71ce5feed26935db437c613c059790b99f3f5a3e788b129f3e22ba096843585309993446a88c0ab5d60fd0fa530ef3cfb6de1fd34ffc828172329 syslog-ng.logrotate.conf e86eed7dd2f4507b04050b869927b471e8de26bc7d97e7064850478323380a0580a92de302509901ea531d6e3fa79afcbf24997ef13cd0496bb3ee719ad674ee syslog-ng.apparmor cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e cloudbender.stop -b93cec571afe5128ab4d7c3998b3dc48753897f37169a111f606a48d1982e6ffce52a4ac9568a6a062f621148fb652049b84926a40a62d89be3786e6836261e6 cloudbender.start +f106f3e9befdeaad6beef4bada0c774eb7745568b8d29eb86970ac9ea73d1aaac080676d399a11d462973d10e1aef08125bf78d7a362db47a53a2ba06df7d9b4 cloudbender.start f8c052c7ec12c71937c7b8bc05d8374c588f345e303b30eda9c8612dff8f8f34a87a433648a3e9b85b278196ece198533b29680a303ff6478171d43f8e095189 dhcpcd-mtu.hook e00a8f296c76446fe1241bf804c0108f47a2676f377a413ee9fede0943362a6582cad30fe13edd93f3d0daab0e2d7696553fb9458dca62adc05572dce339021a monitrc c955dabe692c0a4a2fa2b09ab9096f6b14e83064b34ae8d22697096daf6551f00b590d837787d66ea1d0030a7cc30bef583cc4c936c980465663e73aec5fa2dc monit_alert.sh.aws diff --git a/kubezero/zdt-base/zdt-base.post-install b/kubezero/zdt-base/zdt-base.post-install index 3f27f5c..f7cda20 100644 --- a/kubezero/zdt-base/zdt-base.post-install +++ b/kubezero/zdt-base/zdt-base.post-install @@ -19,9 +19,6 @@ sed -i -e 's/^[\s#]*FAST_STARTUP=.*/FAST_STARTUP=yes/' /etc/conf.d/chronyd #sed -i -e 's/^[\s#]*rc_parallel=.*/rc_parallel="YES"/' /etc/rc.conf #echo 'enable parallel openRC' -# load falco kernel module at boot -grep -q falco /etc/modules || echo falco >> /etc/modules - # Setup syslog-ng json logging and apparmor tweaks cp /lib/zdt/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf cp /lib/zdt/syslog-ng.logrotate.conf /etc/logrotate.d/syslog-ng