charts/falco/values.home.yaml
2024-07-24 21:52:03 +08:00

1300 lines
58 KiB
YAML

# Default values for Falco.
###############################
# General deployment settings #
###############################
image:
# -- The image pull policy.
pullPolicy: IfNotPresent
# -- The image registry to pull from.
registry: docker.io
# -- The image repository to pull from
repository: falcosecurity/falco-no-driver
# -- The image tag to pull. Overrides the image tag whose default is the chart appVersion.
tag: ""
# -- Secrets containing credentials when pulling from private/secure registries.
imagePullSecrets: []
# -- Put here the new name if you want to override the release name used for Falco components.
nameOverride: ""
# -- Same as nameOverride but for the fullname.
fullnameOverride: ""
# -- Override the deployment namespace
namespaceOverride: ""
# -- Add additional pod annotations
podAnnotations: {}
serviceAccount:
# -- Specifies whether a service account should be created.
create: false
# -- Annotations to add to the service account.
annotations: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# -- Add additional pod labels
podLabels: {}
# -- Set pod priorityClassName
podPriorityClassName:
# -- Set securityContext for the pods
# These security settings are overriden by the ones specified for the specific
# containers when there is overlap.
podSecurityContext: {}
# Note that `containerSecurityContext`:
# - will not apply to init containers, if any;
# - takes precedence over other automatic configurations (see below).
#
# Based on the `driver` configuration the auto generated settings are:
# 1) driver.enabled = false:
# securityContext: {}
#
# 2) driver.enabled = true and (driver.kind = module || driver.kind = modern-bpf):
# securityContext:
# privileged: true
#
# 3) driver.enabled = true and driver.kind = ebpf:
# securityContext:
# privileged: true
#
# 4) driver.enabled = true and driver.kind = ebpf and driver.ebpf.leastPrivileged = true
# securityContext:
# capabilities:
# add:
# - BPF
# - SYS_RESOURCE
# - PERFMON
# - SYS_PTRACE
#
# -- Set securityContext for the Falco container.For more info see the "falco.securityContext" helper in "pod-template.tpl"
containerSecurityContext: {}
scc:
# -- Create OpenShift's Security Context Constraint.
create: true
resources:
# -- Although resources needed are subjective on the actual workload we provide
# a sane defaults ones. If you have more questions or concerns, please refer
# to #falco slack channel for more info about it.
requests:
cpu: 100m
memory: 512Mi
# -- Maximum amount of resources that Falco container could get.
# If you are enabling more than one source in falco, than consider to increase
# the cpu limits.
limits:
cpu: 1000m
memory: 1024Mi
# -- Selectors used to deploy Falco on a given node/nodes.
nodeSelector: {}
# -- Affinity constraint for pods' scheduling.
affinity: {}
# -- Tolerations to allow Falco to run on Kubernetes masters.
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
# -- Parameters used
healthChecks:
livenessProbe:
# -- Tells the kubelet that it should wait X seconds before performing the first probe.
initialDelaySeconds: 60
# -- Number of seconds after which the probe times out.
timeoutSeconds: 5
# -- Specifies that the kubelet should perform the check every x seconds.
periodSeconds: 15
readinessProbe:
# -- Tells the kubelet that it should wait X seconds before performing the first probe.
initialDelaySeconds: 30
# -- Number of seconds after which the probe times out.
timeoutSeconds: 5
# -- Specifies that the kubelet should perform the check every x seconds.
periodSeconds: 15
# -- Attach the Falco process to a tty inside the container. Needed to flush Falco logs as soon as they are emitted.
# Set it to "true" when you need the Falco logs to be immediately displayed.
tty: false
#########################
# Scenario requirements #
#########################
# Sensors dislocation configuration (scenario requirement)
controller:
# Available options: deployment, daemonset.
kind: daemonset
# Annotations to add to the daemonset or deployment
annotations: {}
daemonset:
updateStrategy:
# You can also customize maxUnavailable or minReadySeconds if you
# need it
# -- Perform rolling updates by default in the DaemonSet agent
# ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
type: RollingUpdate
deployment:
# -- Number of replicas when installing Falco using a deployment. Change it if you really know what you are doing.
# For more info check the section on Plugins in the README.md file.
replicas: 1
# -- Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10)
# revisionHistoryLimit: 1
# -- Network services configuration (scenario requirement)
# Add here your services to be deployed together with Falco.
services:
# Example configuration for the "k8sauditlog" plugin
# - name: k8saudit-webhook
# type: NodePort
# ports:
# - port: 9765 # See plugin open_params
# nodePort: 30007
# protocol: TCP
# File access configuration (scenario requirement)
mounts:
# -- A list of volumes you want to add to the Falco pods.
volumes: []
# -- A list of volumes you want to add to the Falco pods.
volumeMounts: []
# -- By default, `/proc` from the host is only mounted into the Falco pod when `driver.enabled` is set to `true`. This flag allows it to override this behaviour for edge cases where `/proc` is needed but syscall data source is not enabled at the same time (e.g. for specific plugins).
enforceProcMount: false
# Driver settings (scenario requirement)
driver:
# -- Set it to false if you want to deploy Falco without the drivers.
# Always set it to false when using Falco with plugins.
enabled: true
# -- kind tells Falco which driver to use. Available options: kmod (kernel driver), ebpf (eBPF probe), modern_ebpf (modern eBPF probe).
kind: modern_ebpf
# -- kmod holds the configuration for the kernel module.
kmod:
# -- bufSizePreset determines the size of the shared space between Falco and its drivers.
# This shared space serves as a temporary storage for syscall events.
bufSizePreset: 4
# -- dropFailedExit if set true drops failed system call exit events before pushing them to userspace.
dropFailedExit: false
# -- Configuration section for ebpf driver.
ebpf:
# -- path where the eBPF probe is located. It comes handy when the probe have been installed in the nodes using tools other than the init
# container deployed with the chart.
path: "${HOME}/.falco/falco-bpf.o"
# -- Needed to enable eBPF JIT at runtime for performance reasons.
# Can be skipped if eBPF JIT is enabled from outside the container
hostNetwork: false
# -- Constrain Falco with capabilities instead of running a privileged container.
# Ensure the eBPF driver is enabled (i.e., setting the `driver.kind` option to `ebpf`).
# Capabilities used: {CAP_SYS_RESOURCE, CAP_SYS_ADMIN, CAP_SYS_PTRACE}.
# On kernel versions >= 5.8 'CAP_PERFMON' and 'CAP_BPF' could replace 'CAP_SYS_ADMIN' but please pay attention to the 'kernel.perf_event_paranoid' value on your system.
# Usually 'kernel.perf_event_paranoid>2' means that you cannot use 'CAP_PERFMON' and you should fallback to 'CAP_SYS_ADMIN', but the behavior changes across different distros.
# Read more on that here: https://falco.org/docs/event-sources/kernel/#least-privileged-mode-1
leastPrivileged: false
# -- bufSizePreset determines the size of the shared space between Falco and its drivers.
# This shared space serves as a temporary storage for syscall events.
bufSizePreset: 4
# -- dropFailedExit if set true drops failed system call exit events before pushing them to userspace.
dropFailedExit: false
modernEbpf:
# -- Constrain Falco with capabilities instead of running a privileged container.
# Ensure the modern bpf driver is enabled (i.e., setting the `driver.kind` option to `modern-bpf`).
# Capabilities used: {CAP_SYS_RESOURCE, CAP_BPF, CAP_PERFMON, CAP_SYS_PTRACE}.
# Read more on that here: https://falco.org/docs/event-sources/kernel/#least-privileged-mode-2
leastPrivileged: false
# -- bufSizePreset determines the size of the shared space between Falco and its drivers.
# This shared space serves as a temporary storage for syscall events.
bufSizePreset: 4
# -- dropFailedExit if set true drops failed system call exit events before pushing them to userspace.
dropFailedExit: false
# -- cpusForEachBuffer is the index that controls how many CPUs to assign to a single syscall buffer.
cpusForEachBuffer: 2
# -- Gvisor configuration. Based on your system you need to set the appropriate values.
# Please, remember to add pod tolerations and affinities in order to schedule the Falco pods in the gVisor enabled nodes.
gvisor:
# -- Runsc container runtime configuration. Falco needs to interact with it in order to intercept the activity of the sandboxed pods.
runsc:
# -- Absolute path of the `runsc` binary in the k8s nodes.
path: /home/containerd/usr/local/sbin
# -- Absolute path of the root directory of the `runsc` container runtime. It is of vital importance for Falco since `runsc` stores there the information of the workloads handled by it;
root: /run/containerd/runsc
# -- Absolute path of the `runsc` configuration file, used by Falco to set its configuration and make aware `gVisor` of its presence.
config: /run/containerd/runsc/config.toml
# -- Configuration for the Falco init container.
loader:
# -- Enable/disable the init container.
enabled: true
initContainer:
image:
# -- The image pull policy.
pullPolicy: IfNotPresent
# -- The image registry to pull from.
registry: docker.io
# -- The image repository to pull from.
repository: falcosecurity/falco-driver-loader
# -- Overrides the image tag whose default is the chart appVersion.
tag: ""
# -- Extra environment variables that will be pass onto Falco driver loader init container.
env: []
# -- Arguments to pass to the Falco driver loader init container.
args: []
# -- Resources requests and limits for the Falco driver loader init container.
resources: {}
# -- Security context for the Falco driver loader init container. Overrides the default security context. If driver.kind == "module" you must at least set `privileged: true`.
securityContext: {}
# Collectors for data enrichment (scenario requirement)
collectors:
# -- Enable/disable all the metadata collectors.
enabled: true
docker:
# -- Enable Docker support.
enabled: true
# -- The path of the Docker daemon socket.
socket: /var/run/docker.sock
containerd:
# -- Enable ContainerD support.
enabled: true
# -- The path of the ContainerD socket.
socket: /run/containerd/containerd.sock
crio:
# -- Enable CRI-O support.
enabled: true
# -- The path of the CRI-O socket.
socket: /run/crio/crio.sock
# -- kubernetes holds the configuration for the kubernetes collector. Starting from version 0.37.0 of Falco, the legacy
# kubernetes client has been removed. A new standalone component named k8s-metacollector and a Falco plugin have been developed
# to solve the issues that were present in the old implementation. More info here: https://github.com/falcosecurity/falco/issues/2973
kubernetes:
# -- enabled specifies whether the Kubernetes metadata should be collected using the k8smeta plugin and the k8s-metacollector component.
# It will deploy the k8s-metacollector external component that fetches Kubernetes metadata and pushes them to Falco instances.
# For more info see:
# https://github.com/falcosecurity/k8s-metacollector
# https://github.com/falcosecurity/charts/tree/master/charts/k8s-metacollector
# When this option is disabled, Falco falls back to the container annotations to grab the metadata.
# In such a case, only the ID, name, namespace, labels of the pod will be available.
enabled: true
# --pluginRef is the OCI reference for the k8smeta plugin. It could be a full reference such as:
# "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.1.0". Or just name + tag: k8smeta:0.1.0.
pluginRef: "ghcr.io/falcosecurity/plugins/plugin/k8smeta:0.1.0"
# -- collectorHostname is the address of the k8s-metacollector. When not specified it will be set to match
# k8s-metacollector service. e.x: falco-k8smetacollecto.falco.svc. If for any reason you need to override
# it, make sure to set here the address of the k8s-metacollector.
# It is used by the k8smeta plugin to connect to the k8s-metacollector.
collectorHostname: ""
# -- collectorPort designates the port on which the k8s-metacollector gRPC service listens. If not specified
# the value of the port named `broker-grpc` in k8s-metacollector.service.ports is used. The default values is 45000.
# It is used by the k8smeta plugin to connect to the k8s-metacollector.
collectorPort: ""
###########################
# Extras and customization #
############################
extra:
# -- Extra environment variables that will be pass onto Falco containers.
env: []
# -- Extra command-line arguments.
args: []
# -- Additional initContainers for Falco pods.
initContainers: []
# -- certificates used by webserver and grpc server.
# paste certificate content or use helm with --set-file
# or use existing secret containing key, crt, ca as well as pem bundle
certs:
# -- Existing secret containing the following key, crt and ca as well as the bundle pem.
existingSecret: ""
server:
# -- Key used by gRPC and webserver.
key: ""
# -- Certificate used by gRPC and webserver.
crt: ""
ca:
# -- CA certificate used by gRPC, webserver and AuditSink validation.
crt: ""
existingClientSecret: ""
client:
# -- Key used by http mTLS client.
key: ""
# -- Certificate used by http mTLS client.
crt: ""
# -- Third party rules enabled for Falco. More info on the dedicated section in README.md file.
customRules:
{}
# Although Falco comes with a nice default rule set for detecting weird
# behavior in containers, our users are going to customize the run-time
# security rule sets or policies for the specific container images and
# applications they run. This feature can be handled in this section.
#
# Example:
#
# rules-traefik.yaml: |-
# [ rule body ]
########################
# Falco integrations #
########################
# -- For configuration values, see https://github.com/falcosecurity/charts/blob/master/charts/falcosidekick/values.yaml
falcosidekick:
# -- Enable falcosidekick deployment.
enabled: true
# -- Enable usage of full FQDN of falcosidekick service (useful when a Proxy is used).
fullfqdn: false
# -- Listen port. Default value: 2801
listenPort: ""
####################
# falcoctl config #
####################
falcoctl:
image:
# -- The image pull policy.
pullPolicy: IfNotPresent
# -- The image registry to pull from.
registry: docker.io
# -- The image repository to pull from.
repository: falcosecurity/falcoctl
# -- The image tag to pull.
tag: "0.7.2"
artifact:
# -- Runs "falcoctl artifact install" command as an init container. It is used to install artfacts before
# Falco starts. It provides them to Falco by using an emptyDir volume.
install:
enabled: true
# -- Extra environment variables that will be pass onto falcoctl-artifact-install init container.
env: []
# -- Arguments to pass to the falcoctl-artifact-install init container.
args: ["--log-format=json"]
# -- Resources requests and limits for the falcoctl-artifact-install init container.
resources: {}
# -- Security context for the falcoctl init container.
securityContext: {}
# -- A list of volume mounts you want to add to the falcoctl-artifact-install init container.
mounts:
volumeMounts: []
# -- Runs "falcoctl artifact follow" command as a sidecar container. It is used to automatically check for
# updates given a list of artifacts. If an update is found it downloads and installs it in a shared folder (emptyDir)
# that is accessible by Falco. Rulesfiles are automatically detected and loaded by Falco once they are installed in the
# correct folder by falcoctl. To prevent new versions of artifacts from breaking Falco, the tool checks if it is compatible
# with the running version of Falco before installing it.
follow:
enabled: true
# -- Extra environment variables that will be pass onto falcoctl-artifact-follow sidecar container.
env: []
# -- Arguments to pass to the falcoctl-artifact-follow sidecar container.
args: ["--log-format=json"]
# -- Resources requests and limits for the falcoctl-artifact-follow sidecar container.
resources: {}
# -- Security context for the falcoctl-artifact-follow sidecar container.
securityContext: {}
# -- A list of volume mounts you want to add to the falcoctl-artifact-follow sidecar container.
mounts:
volumeMounts: []
# -- Configuration file of the falcoctl tool. It is saved in a configmap and mounted on the falcotl containers.
config:
# -- List of indexes that falcoctl downloads and uses to locate and download artiafcts. For more info see:
# https://github.com/falcosecurity/falcoctl/blob/main/proposals/20220916-rules-and-plugin-distribution.md#index-file-overview
indexes:
- name: falcosecurity
url: https://falcosecurity.github.io/falcoctl/index.yaml
# -- Configuration used by the artifact commands.
artifact:
# -- List of artifact types that falcoctl will handle. If the configured refs resolves to an artifact whose type is not contained
# in the list it will refuse to downloade and install that artifact.
allowedTypes:
- rulesfile
- plugin
install:
# -- Resolve the dependencies for artifacts.
resolveDeps: true
# -- List of artifacts to be installed by the falcoctl init container.
refs: [falco-rules:3]
# -- Directory where the rulesfiles are saved. The path is relative to the container, which in this case is an emptyDir
# mounted also by the Falco pod.
rulesfilesDir: /rulesfiles
# -- Same as the one above but for the artifacts.
pluginsDir: /plugins
follow:
# -- List of artifacts to be followed by the falcoctl sidecar container.
refs: [falco-rules:3]
# -- How often the tool checks for new versions of the followed artifacts.
every: 6h
# -- HTTP endpoint that serves the api versions of the Falco instance. It is used to check if the new versions are compatible
# with the running Falco instance.
falcoversions: http://localhost:8765/versions
# -- See the fields of the artifact.install section.
rulesfilesDir: /rulesfiles
# -- See the fields of the artifact.install section.
pluginsDir: /plugins
######################
# falco.yaml config #
######################
falco:
#####################
# Falco rules files #
#####################
# [Stable] `rules_file`
#
# Falco rules can be specified using files or directories, which are loaded at
# startup. The name "rules_file" is maintained for backwards compatibility. If
# the entry is a file, it will be read directly. If the entry is a directory,
# all files within that directory will be read in alphabetical order.
#
# The falco_rules.yaml file ships with the Falco package and is overridden with
# every new software version. falco_rules.local.yaml is only created if it
# doesn't already exist.
#
# To customize the set of rules, you can add your modifications to any file.
# It's important to note that the files or directories are read in the order
# specified here. In addition, rules are loaded by Falco in the order they
# appear within each rule file.
#
# If you have any customizations intended to override a previous configuration,
# make sure they appear in later files to take precedence. On the other hand, if
# the conditions of rules with the same event type(s) have the potential to
# overshadow each other, ensure that the more important rule appears first. This
# is because rules are evaluated on a "first match wins" basis, where the first
# rule that matches the conditions will be applied, and subsequent rules will
# not be evaluated for the same event type.
#
# By arranging the order of files and rules thoughtfully, you can ensure that
# desired customizations and rule behaviors are prioritized and applied as
# intended.
# -- The location of the rules files that will be consumed by Falco.
rules_file:
- /etc/falco/falco_rules.yaml
- /etc/falco/falco_rules.local.yaml
- /etc/falco/rules.d
# [Experimental] `rule_matching`
#
# - Falco has to be performant when evaluating rules against events. To quickly
# understand which rules could trigger on a specific event, Falco maintains
# buckets of rules sharing the same event type in a map. Then, the lookup
# in each bucket is performed through linear search. The `rule_matching`
# configuration key's values are:
# - "first": when evaluating conditions of rules in a bucket, Falco will stop
# to evaluate rules if it finds a matching rules. Since rules are stored
# in buckets in the order they are defined in the rules files, this option
# could prevent other rules to trigger even if their condition is met, causing
# a shadowing problem.
# - "all": with this value Falco will continue evaluating all the rules
# stored in the bucket, so that multiple rules could be triggered upon one
# event.
rule_matching: first
# [Experimental] `outputs_queue`
#
# -- Falco utilizes tbb::concurrent_bounded_queue for handling outputs, and this parameter
# allows you to customize the queue capacity. Please refer to the official documentation:
# https://oneapi-src.github.io/oneTBB/main/tbb_userguide/Concurrent_Queue_Classes.html.
# On a healthy system with optimized Falco rules, the queue should not fill up.
# If it does, it is most likely happening due to the entire event flow being too slow,
# indicating that the server is under heavy load.
#
# `capacity`: the maximum number of items allowed in the queue is determined by this value.
# Setting the value to 0 (which is the default) is equivalent to keeping the queue unbounded.
# In other words, when this configuration is set to 0, the number of allowed items is
# effectively set to the largest possible long value, disabling this setting.
#
# In the case of an unbounded queue, if the available memory on the system is consumed,
# the Falco process would be OOM killed. When using this option and setting the capacity,
# the current event would be dropped, and the event loop would continue. This behavior mirrors
# kernel-side event drops when the buffer between kernel space and user space is full.
outputs_queue:
capacity: 0
#################
# Falco plugins #
#################
# [Stable] `load_plugins` and `plugins`
#
# --- [Description]
#
# Falco plugins enable integration with other services in the your ecosystem.
# They allow Falco to extend its functionality and leverage data sources such as
# Kubernetes audit logs or AWS CloudTrail logs. This enables Falco to perform
# fast on-host detections beyond syscalls and container events. The plugin
# system will continue to evolve with more specialized functionality in future
# releases.
#
# Please refer to the plugins repo at
# https://github.com/falcosecurity/plugins/blob/master/plugins/ for detailed
# documentation on the available plugins. This repository provides comprehensive
# information about each plugin and how to utilize them with Falco.
#
# Please note that if your intention is to enrich Falco syscall logs with fields
# such as `k8s.ns.name`, `k8s.pod.name`, and `k8s.pod.*`, you do not need to use
# the `k8saudit` plugin. This information is automatically extracted from the
# container runtime socket. The `k8saudit` plugin is specifically designed to
# integrate with Kubernetes audit logs and is not required for basic enrichment
# of syscall logs with Kubernetes-related fields.
#
# --- [Usage]
#
# Disabled by default, indicated by an empty `load_plugins` list. Each plugin meant
# to be enabled needs to be listed as explicit list item.
#
# For example, if you want to use the `k8saudit` plugin,
# ensure it is configured appropriately and then change this to:
# load_plugins: [k8saudit, json]
# -- Add here all plugins and their configuration. Please
# consult the plugins documentation for more info. Remember to add the plugins name in
# "load_plugins: []" in order to load them in Falco.
load_plugins: []
# -- Customize subsettings for each enabled plugin. These settings will only be
# applied when the corresponding plugin is enabled using the `load_plugins`
# option.
plugins:
- name: k8saudit
library_path: libk8saudit.so
init_config:
# maxEventSize: 262144
# webhookMaxBatchSize: 12582912
# sslCertificate: /etc/falco/falco.pem
open_params: "http://:9765/k8s-audit"
- name: cloudtrail
library_path: libcloudtrail.so
# see docs for init_config and open_params:
# https://github.com/falcosecurity/plugins/blob/master/plugins/cloudtrail/README.md
- name: json
library_path: libjson.so
init_config: ""
######################
# Falco config files #
######################
# [Stable] `watch_config_files`
#
# Falco monitors configuration and rule files for changes and automatically
# reloads itself to apply the updated configuration when any modifications are
# detected. This feature is particularly useful when you want to make real-time
# changes to the configuration or rules of Falco without interrupting its
# operation or losing its state. For more information about Falco's state
# engine, please refer to the `base_syscalls` section.
# -- Watch config file and rules files for modification.
# When a file is modified, Falco will propagate new config,
# by reloading itself.
watch_config_files: true
##########################
# Falco outputs settings #
##########################
# [Stable] `time_format_iso_8601`
#
# -- When enabled, Falco will display log and output messages with times in the ISO
# 8601 format. By default, times are shown in the local time zone determined by
# the /etc/localtime configuration.
time_format_iso_8601: false
# [Stable] `priority`
#
# -- Any rule with a priority level more severe than or equal to the specified
# minimum level will be loaded and run by Falco. This allows you to filter and
# control the rules based on their severity, ensuring that only rules of a
# certain priority or higher are active and evaluated by Falco. Supported
# levels: "emergency", "alert", "critical", "error", "warning", "notice",
# "info", "debug"
priority: debug
# [Stable] `json_output`
#
# -- When enabled, Falco will output alert messages and rules file
# loading/validation results in JSON format, making it easier for downstream
# programs to process and consume the data. By default, this option is disabled.
json_output: false
# [Stable] `json_include_output_property`
#
# -- When using JSON output in Falco, you have the option to include the "output"
# property itself in the generated JSON output. The "output" property provides
# additional information about the purpose of the rule. To reduce the logging
# volume, it is recommended to turn it off if it's not necessary for your use
# case.
json_include_output_property: true
# [Stable] `json_include_tags_property`
#
# -- When using JSON output in Falco, you have the option to include the "tags"
# field of the rules in the generated JSON output. The "tags" field provides
# additional metadata associated with the rule. To reduce the logging volume,
# if the tags associated with the rule are not needed for your use case or can
# be added at a later stage, it is recommended to turn it off.
json_include_tags_property: true
# [Stable] `buffered_outputs`
#
# -- Enabling buffering for the output queue can offer performance optimization,
# efficient resource usage, and smoother data flow, resulting in a more reliable
# output mechanism. By default, buffering is disabled (false).
buffered_outputs: false
# [Stable] `outputs`
#
# -- A throttling mechanism, implemented as a token bucket, can be used to control
# the rate of Falco outputs. Each event source has its own rate limiter,
# ensuring that alerts from one source do not affect the throttling of others.
# The following options control the mechanism:
# - rate: the number of tokens (i.e. right to send a notification) gained per
# second. When 0, the throttling mechanism is disabled. Defaults to 0.
# - max_burst: the maximum number of tokens outstanding. Defaults to 1000.
#
# For example, setting the rate to 1 allows Falco to send up to 1000
# notifications initially, followed by 1 notification per second. The burst
# capacity is fully restored after 1000 seconds of no activity.
#
# Throttling can be useful in various scenarios, such as preventing notification
# floods, managing system load, controlling event processing, or complying with
# rate limits imposed by external systems or APIs. It allows for better resource
# utilization, avoids overwhelming downstream systems, and helps maintain a
# balanced and controlled flow of notifications.
#
# With the default settings, the throttling mechanism is disabled.
outputs:
rate: 0
max_burst: 1000
##########################
# Falco outputs channels #
##########################
# Falco supports various output channels, such as syslog, stdout, file, gRPC,
# webhook, and more. You can enable or disable these channels as needed to
# control where Falco alerts and log messages are directed. This flexibility
# allows seamless integration with your preferred logging and alerting systems.
# Multiple outputs can be enabled simultaneously.
# [Stable] `stdout_output`
#
# -- Redirect logs to standard output.
stdout_output:
enabled: true
# [Stable] `syslog_output`
#
# -- Send logs to syslog.
syslog_output:
enabled: true
# [Stable] `file_output`
#
# -- When appending Falco alerts to a file, each new alert will be added to a new
# line. It's important to note that Falco does not perform log rotation for this
# file. If the `keep_alive` option is set to `true`, the file will be opened once
# and continuously written to, else the file will be reopened for each output
# message. Furthermore, the file will be closed and reopened if Falco receives
# the SIGUSR1 signal.
file_output:
enabled: false
keep_alive: false
filename: ./events.txt
# [Stable] `http_output`
#
# -- Send logs to an HTTP endpoint or webhook.
http_output:
enabled: false
url: ""
user_agent: "falcosecurity/falco"
# -- Tell Falco to not verify the remote server.
insecure: false
# -- Path to the CA certificate that can verify the remote server.
ca_cert: ""
# -- Path to a specific file that will be used as the CA certificate store.
ca_bundle: ""
# -- Path to a folder that will be used as the CA certificate store. CA certificate need to be
# stored as indivitual PEM files in this directory.
ca_path: "/etc/falco/certs/"
# -- Tell Falco to use mTLS
mtls: false
# -- Path to the client cert.
client_cert: "/etc/falco/certs/client/client.crt"
# -- Path to the client key.
client_key: "/etc/falco/certs/client/client.key"
# -- Whether to echo server answers to stdout
echo: false
# -- compress_uploads whether to compress data sent to http endpoint.
compress_uploads: false
# -- keep_alive whether to keep alive the connection.
keep_alive: false
# [Stable] `program_output`
#
# -- Redirect the output to another program or command.
#
# Possible additional things you might want to do with program output:
# - send to a slack webhook:
# program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX"
# - logging (alternate method than syslog):
# program: logger -t falco-test
# - send over a network connection:
# program: nc host.example.com 80
# If `keep_alive` is set to `true`, the program will be started once and
# continuously written to, with each output message on its own line. If
# `keep_alive` is set to `false`, the program will be re-spawned for each output
# message. Furthermore, the program will be re-spawned if Falco receives
# the SIGUSR1 signal.
program_output:
enabled: false
keep_alive: false
program: "jq '{text: .output}' | curl -d @- -X POST https://hooks.slack.com/services/XXX"
# [Stable] `grpc_output`
#
# -- Use gRPC as an output service.
#
# gRPC is a modern and high-performance framework for remote procedure calls
# (RPC). It utilizes protocol buffers for efficient data serialization. The gRPC
# output in Falco provides a modern and efficient way to integrate with other
# systems. By default the setting is turned off. Enabling this option stores
# output events in memory until they are consumed by a gRPC client. Ensure that
# you have a consumer for the output events or leave it disabled.
grpc_output:
enabled: false
##########################
# Falco exposed services #
##########################
# [Stable] `grpc`
#
# Falco provides support for running a gRPC server using two main binding types:
# 1. Over the network with mandatory mutual TLS authentication (mTLS), which
# ensures secure communication
# 2. Local Unix socket binding with no authentication. By default, the
# gRPCserver in Falco is turned off with no enabled services (see
# `grpc_output`setting).
#
# To configure the gRPC server in Falco, you can make the following changes to
# the options:
#
# - Uncomment the relevant configuration options related to the gRPC server.
# - Update the paths of the generated certificates for mutual TLS authentication
# if you choose to use mTLS.
# - Specify the address to bind and expose the gRPC server.
# - Adjust the threadiness configuration to control the number of threads and
# contexts used by the server.
#
# Keep in mind that if any issues arise while creating the gRPC server, the
# information will be logged, but it will not stop the main Falco daemon.
# gRPC server using mTLS
# grpc:
# enabled: true
# bind_address: "0.0.0.0:5060"
# # When the `threadiness` value is set to 0, Falco will automatically determine
# # the appropriate number of threads based on the number of online cores in the system.
# threadiness: 0
# private_key: "/etc/falco/certs/server.key"
# cert_chain: "/etc/falco/certs/server.crt"
# root_certs: "/etc/falco/certs/ca.crt"
# -- gRPC server using a local unix socket
grpc:
enabled: false
bind_address: "unix:///run/falco/falco.sock"
# -- When the `threadiness` value is set to 0, Falco will automatically determine
# the appropriate number of threads based on the number of online cores in the system.
threadiness: 0
# [Stable] `webserver`
#
# -- Falco supports an embedded webserver that runs within the Falco process,
# providing a lightweight and efficient way to expose web-based functionalities
# without the need for an external web server. The following endpoints are
# exposed:
# - /healthz: designed to be used for checking the health and availability of
# the Falco application (the name of the endpoint is configurable).
# - /versions: responds with a JSON object containing the version numbers of the
# internal Falco components (similar output as `falco --version -o
# json_output=true`).
#
# Please note that the /versions endpoint is particularly useful for other Falco
# services, such as `falcoctl`, to retrieve information about a running Falco
# instance. If you plan to use `falcoctl` locally or with Kubernetes, make sure
# the Falco webserver is enabled.
#
# The behavior of the webserver can be controlled with the following options,
# which are enabled by default:
#
# The `ssl_certificate` option specifies a combined SSL certificate and
# corresponding key that are contained in a single file. You can generate a
# key/cert as follows:
#
# $ openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 365 -out
# certificate.pem $ cat certificate.pem key.pem > falco.pem $ sudo cp falco.pem
# /etc/falco/falco.pem
webserver:
enabled: true
# When the `threadiness` value is set to 0, Falco will automatically determine
# the appropriate number of threads based on the number of online cores in the system.
threadiness: 0
listen_port: 8765
k8s_healthz_endpoint: /healthz
ssl_enabled: false
ssl_certificate: /etc/falco/falco.pem
prometheus_metrics_enabled: true
##############################################################################
# Falco logging / alerting / metrics related to software functioning (basic) #
##############################################################################
# [Stable] `log_stderr` and `log_syslog`
#
# Falco's logs related to the functioning of the software, which are not related
# to Falco alert outputs but rather its lifecycle, settings and potential
# errors, can be directed to stderr and/or syslog.
# -- Send information logs to stderr. Note these are *not* security
# notification logs! These are just Falco lifecycle (and possibly error) logs.
log_stderr: true
# -- Send information logs to syslog. Note these are *not* security
# notification logs! These are just Falco lifecycle (and possibly error) logs.
log_syslog: true
# [Stable] `log_level`
#
# -- The `log_level` setting determines the minimum log level to include in Falco's
# logs related to the functioning of the software. This setting is separate from
# the `priority` field of rules and specifically controls the log level of
# Falco's operational logging. By specifying a log level, you can control the
# verbosity of Falco's operational logs. Only logs of a certain severity level
# or higher will be emitted. Supported levels: "emergency", "alert", "critical",
# "error", "warning", "notice", "info", "debug".
log_level: info
# [Stable] `libs_logger`
#
# -- The `libs_logger` setting in Falco determines the minimum log level to include
# in the logs related to the functioning of the software of the underlying
# `libs` library, which Falco utilizes. This setting is independent of the
# `priority` field of rules and the `log_level` setting that controls Falco's
# operational logs. It allows you to specify the desired log level for the `libs`
# library specifically, providing more granular control over the logging
# behavior of the underlying components used by Falco. Only logs of a certain
# severity level or higher will be emitted. Supported levels: "emergency",
# "alert", "critical", "error", "warning", "notice", "info", "debug". It is not
# recommended for production use.
libs_logger:
enabled: false
severity: debug
#################################################################################
# Falco logging / alerting / metrics related to software functioning (advanced) #
#################################################################################
# [Stable] `output_timeout`
#
# Generates Falco operational logs when `log_level=notice` at minimum
#
# A timeout error occurs when a process or operation takes longer to complete
# than the allowed or expected time limit. In the context of Falco, an output
# timeout error refers to the situation where an output channel fails to deliver
# an alert within a specified deadline. Various reasons, such as network issues,
# resource constraints, or performance bottlenecks can cause timeouts.
#
# -- The `output_timeout` parameter specifies the duration, in milliseconds, to
# wait before considering the deadline exceeded. By default, the timeout is set
# to 2000ms (2 seconds), meaning that the consumer of Falco outputs can block
# the Falco output channel for up to 2 seconds without triggering a timeout
# error.
#
# Falco actively monitors the performance of output channels. With this setting
# the timeout error can be logged, but please note that this requires setting
# Falco's operational logs `log_level` to a minimum of `notice`.
#
# It's important to note that Falco outputs will not be discarded from the
# output queue. This means that if an output channel becomes blocked
# indefinitely, it indicates a potential issue that needs to be addressed by the
# user.
output_timeout: 2000
# [Stable] `syscall_event_timeouts`
#
# -- Generates Falco operational logs when `log_level=notice` at minimum
#
# Falco utilizes a shared buffer between the kernel and userspace to receive
# events, such as system call information, in userspace. However, there may be
# cases where timeouts occur in the underlying libraries due to issues in
# reading events or the need to skip a particular event. While it is uncommon
# for Falco to experience consecutive event timeouts, it has the capability to
# detect such situations. You can configure the maximum number of consecutive
# timeouts without an event after which Falco will generate an alert, but please
# note that this requires setting Falco's operational logs `log_level` to a
# minimum of `notice`. The default value is set to 1000 consecutive timeouts
# without receiving any events. The mapping of this value to a time interval
# depends on the CPU frequency.
syscall_event_timeouts:
max_consecutives: 1000
# [Stable] `syscall_event_drops`
#
# Generates "Falco internal: syscall event drop" rule output when `priority=debug` at minimum
#
# --- [Description]
#
# Falco uses a shared buffer between the kernel and userspace to pass system
# call information. When Falco detects that this buffer is full and system calls
# have been dropped, it can take one or more of the following actions:
# - ignore: do nothing (default when list of actions is empty)
# - log: log a DEBUG message noting that the buffer was full
# - alert: emit a Falco alert noting that the buffer was full
# - exit: exit Falco with a non-zero rc
#
# Notice it is not possible to ignore and log/alert messages at the same time.
#
# The rate at which log/alert messages are emitted is governed by a token
# bucket. The rate corresponds to one message every 30 seconds with a burst of
# one message (by default).
#
# The messages are emitted when the percentage of dropped system calls with
# respect the number of events in the last second is greater than the given
# threshold (a double in the range [0, 1]). If you want to be alerted on any
# drops, set the threshold to 0.
#
# For debugging/testing it is possible to simulate the drops using the
# `simulate_drops: true`. In this case the threshold does not apply.
#
# --- [Usage]
#
# Enabled by default, but requires Falco rules config `priority` set to `debug`.
# Emits a Falco rule named "Falco internal: syscall event drop" as many times in
# a given time period as dictated by the settings. Statistics here reflect the
# delta in a 1s time period.
#
# If instead you prefer periodic metrics of monotonic counters at a regular
# interval, which include syscall drop statistics and additional metrics,
# explore the `metrics` configuration option.
# -- For debugging/testing it is possible to simulate the drops using
# the `simulate_drops: true`. In this case the threshold does not apply.
syscall_event_drops:
# -- The messages are emitted when the percentage of dropped system calls
# with respect the number of events in the last second
# is greater than the given threshold (a double in the range [0, 1]).
threshold: .1
# -- Actions to be taken when system calls were dropped from the circular buffer.
actions:
- log
- alert
# -- Rate at which log/alert messages are emitted.
rate: .03333
# -- Max burst of messages emitted.
max_burst: 1
# -- Flag to enable drops for debug purposes.
simulate_drops: false
# [Experimental] `metrics`
#
# -- Generates "Falco internal: metrics snapshot" rule output when `priority=info` at minimum
#
# periodic metric snapshots (including stats and resource utilization) captured
# at regular intervals
#
# --- [Description]
#
# Consider these key points about the `metrics` feature in Falco:
#
# - It introduces a redesigned stats/metrics system.
# - Native support for resource utilization metrics and specialized performance
# metrics.
# - Metrics are emitted as monotonic counters at predefined intervals
# (snapshots).
# - All metrics are consolidated into a single log message, adhering to the
# established rules schema and naming conventions.
# - Additional info fields complement the metrics and facilitate customized
# statistical analyses and correlations.
# - The metrics framework is designed for easy future extension.
#
# The `metrics` feature follows a specific schema and field naming convention.
# All metrics are collected as subfields under the `output_fields` key, similar
# to regular Falco rules. Each metric field name adheres to the grammar used in
# Falco rules. There are two new field classes introduced: `falco.` and `scap.`.
# The `falco.` class represents userspace counters, statistics, resource
# utilization, or useful information fields. The `scap.` class represents
# counters and statistics mostly obtained from Falco's kernel instrumentation
# before events are sent to userspace, but can include scap userspace stats as
# well.
#
# It's important to note that the output fields and their names can be subject
# to change until the metrics feature reaches a stable release.
#
# To customize the hostname in Falco, you can set the environment variable
# `FALCO_HOSTNAME` to your desired hostname. This is particularly useful in
# Kubernetes deployments where the hostname can be set to the pod name.
#
# --- [Usage]
#
# `enabled`: Disabled by default.
#
# `interval`: The stats interval in Falco follows the time duration definitions
# used by Prometheus.
# https://prometheus.io/docs/prometheus/latest/querying/basics/#time-durations
#
# Time durations are specified as a number, followed immediately by one of the
# following units:
#
# ms - millisecond
# s - second
# m - minute
# h - hour
# d - day - assuming a day has always 24h
# w - week - assuming a week has always 7d
# y - year - assuming a year has always 365d
#
# Example of a valid time duration: 1h30m20s10ms
#
# A minimum interval of 100ms is enforced for metric collection. However, for
# production environments, we recommend selecting one of the following intervals
# for optimal monitoring:
#
# 15m
# 30m
# 1h
# 4h
# 6h
#
# `output_rule`: To enable seamless metrics and performance monitoring, we
# recommend emitting metrics as the rule "Falco internal: metrics snapshot".
# This option is particularly useful when Falco logs are preserved in a data
# lake. Please note that to use this option, the Falco rules config `priority`
# must be set to `info` at a minimum.
#
# `output_file`: Append stats to a `jsonl` file. Use with caution in production
# as Falco does not automatically rotate the file.
#
# `resource_utilization_enabled`: Emit CPU and memory usage metrics. CPU usage
# is reported as a percentage of one CPU and can be normalized to the total
# number of CPUs to determine overall usage. Memory metrics are provided in raw
# units (`kb` for `RSS`, `PSS` and `VSZ` or `bytes` for `container_memory_used`)
# and can be uniformly converted to megabytes (MB) using the
# `convert_memory_to_mb` functionality. In environments such as Kubernetes when
# deployed as daemonset, it is crucial to track Falco's container memory usage.
# To customize the path of the memory metric file, you can create an environment
# variable named `FALCO_CGROUP_MEM_PATH` and set it to the desired file path. By
# default, Falco uses the file `/sys/fs/cgroup/memory/memory.usage_in_bytes` to
# monitor container memory usage, which aligns with Kubernetes'
# `container_memory_working_set_bytes` metric. Finally, we emit the overall host
# CPU and memory usages, along with the total number of processes and open file
# descriptors (fds) on the host, obtained from the proc file system unrelated to
# Falco's monitoring. These metrics help assess Falco's usage in relation to the
# server's workload intensity.
#
# `state_counters_enabled`: Emit counters related to Falco's state engine, including
# added, removed threads or file descriptors (fds), and failed lookup, store, or
# retrieve actions in relation to Falco's underlying process cache table (threadtable).
# We also log the number of currently cached containers if applicable.
#
# `kernel_event_counters_enabled`: Emit kernel side event and drop counters, as
# an alternative to `syscall_event_drops`, but with some differences. These
# counters reflect monotonic values since Falco's start and are exported at a
# constant stats interval.
#
# `libbpf_stats_enabled`: Exposes statistics similar to `bpftool prog show`,
# providing information such as the number of invocations of each BPF program
# attached by Falco and the time spent in each program measured in nanoseconds.
# To enable this feature, the kernel must be >= 5.1, and the kernel
# configuration `/proc/sys/kernel/bpf_stats_enabled` must be set. This option,
# or an equivalent statistics feature, is not available for non `*bpf*` drivers.
# Additionally, please be aware that the current implementation of `libbpf` does
# not support granularity of statistics at the bpf tail call level.
#
# `include_empty_values`: When the option is set to true, fields with an empty
# numeric value will be included in the output. However, this rule does not
# apply to high-level fields such as `n_evts` or `n_drops`; they will always be
# included in the output even if their value is empty. This option can be
# beneficial for exploring the data schema and ensuring that fields with empty
# values are included in the output.
# todo: prometheus export option
# todo: syscall_counters_enabled option
metrics:
enabled: true
interval: 1h
output_rule: true
# output_file: /tmp/falco_stats.jsonl
resource_utilization_enabled: true
state_counters_enabled: true
kernel_event_counters_enabled: true
libbpf_stats_enabled: true
convert_memory_to_mb: true
include_empty_values: false
#######################################
# Falco performance tuning (advanced) #
#######################################
# [Experimental] `base_syscalls`, use with caution, read carefully
#
# --- [Description]
#
# -- This option configures the set of syscalls that Falco traces.
#
# --- [Falco's State Engine]
#
# Falco requires a set of syscalls to build up state in userspace. For example,
# when spawning a new process or network connection, multiple syscalls are
# involved. Furthermore, properties of a process during its lifetime can be
# modified by syscalls. Falco accounts for this by enabling the collection of
# additional syscalls than the ones defined in the rules and by managing a smart
# process cache table in userspace. Processes are purged from this table when a
# process exits.
#
# By default, with
# ```
# base_syscalls.custom_set = []
# base_syscalls.repair = false
# ```
# Falco enables tracing for a syscall set gathered: (1) from (enabled) Falco
# rules (2) from a static, more verbose set defined in
# `libsinsp::events::sinsp_state_sc_set` in
# libs/userspace/libsinsp/events/sinsp_events_ppm_sc.cpp This allows Falco to
# successfully build up it's state engine and life-cycle management.
#
# If the default behavior described above does not fit the user's use case for
# Falco, the `base_syscalls` option allows for finer end-user control of
# syscalls traced by Falco.
#
# --- [base_syscalls.custom_set]
#
# CAUTION: Misconfiguration of this setting may result in incomplete Falco event
# logs or Falco being unable to trace events entirely.
#
# `base_syscalls.custom_set` allows the user to explicitly define an additional
# set of syscalls to be traced in addition to the syscalls from each enabled
# Falco rule.
#
# This is useful in lowering CPU utilization and further tailoring Falco to
# specific environments according to your threat model and budget constraints.
#
# --- [base_syscalls.repair]
#
# `base_syscalls.repair` is an alternative to Falco's default state engine
# enforcement. When enabled, this option is designed to (1) ensure that Falco's
# state engine is correctly and successfully built-up (2) be the most system
# resource-friendly by activating the least number of additional syscalls
# (outside of those enabled for enabled rules)
#
# Setting `base_syscalls.repair` to `true` allows Falco to automatically
# configure what is described in the [Suggestions] section below.
#
# `base_syscalls.repair` can be enabled with an empty custom set, meaning with
# the following,
# ```
# base_syscalls.custom_set = []
# base_syscalls.repair = true
# ```
# Falco enables tracing for a syscall set gathered: (1) from (enabled) Falco
# rules (2) from minimal set of additional syscalls needed to "repair" the
# state engine and properly log event conditions specified in enabled Falco
# rules
#
# --- [Usage]
#
# List of system calls names (<syscall-name>), negative ("!<syscall-name>")
# notation supported.
#
# Example: base_syscalls.custom_set: [<syscall-name>, <syscall-name>,
# "!<syscall-name>"] base_syscalls.repair: <bool>
#
# We recommend to only exclude syscalls, e.g. "!mprotect" if you need a fast
# deployment update (overriding rules), else remove unwanted syscalls from the
# Falco rules.
#
# Passing `-o "log_level=debug" -o "log_stderr=true" --dry-run` to Falco's cmd
# args will print the final set of syscalls to STDOUT.
#
# --- [Suggestions]
#
# NOTE: setting `base_syscalls.repair: true` automates the following suggestions
# for you.
#
# These suggestions are subject to change as Falco and its state engine evolve.
#
# For execve* events: Some Falco fields for an execve* syscall are retrieved
# from the associated `clone`, `clone3`, `fork`, `vfork` syscalls when spawning
# a new process. The `close` syscall is used to purge file descriptors from
# Falco's internal thread / process cache table and is necessary for rules
# relating to file descriptors (e.g. open, openat, openat2, socket, connect,
# accept, accept4 ... and many more)
#
# Consider enabling the following syscalls in `base_syscalls.custom_set` for
# process rules: [clone, clone3, fork, vfork, execve, execveat, close]
#
# For networking related events: While you can log `connect` or `accept*`
# syscalls without the socket syscall, the log will not contain the ip tuples.
# Additionally, for `listen` and `accept*` syscalls, the `bind` syscall is also
# necessary.
#
# We recommend the following as the minimum set for networking-related rules:
# [clone, clone3, fork, vfork, execve, execveat, close, socket, bind,
# getsockopt]
#
# Lastly, for tracking the correct `uid`, `gid` or `sid`, `pgid` of a process
# when the running process opens a file or makes a network connection, consider
# adding the following to the above recommended syscall sets: ... setresuid,
# setsid, setuid, setgid, setpgid, setresgid, setsid, capset, chdir, chroot,
# fchdir ...
base_syscalls:
custom_set: []
repair: false
#################################################
# Falco cloud orchestration systems integration #
#################################################
# [Stable] Guidance for Kubernetes container engine command-line args settings
#
# Modern cloud environments, particularly Kubernetes, heavily rely on
# containerized workload deployments. When capturing events with Falco, it
# becomes essential to identify the owner of the workload for which events are
# being captured, such as syscall events. Falco integrates with the container
# runtime to enrich its events with container information, including fields like
# `container.image.repository`, `container.image.tag`, ... , `k8s.ns.name`,
# `k8s.pod.name`, `k8s.pod.*` in the Falco output (Falco retrieves Kubernetes
# namespace and pod name directly from the container runtime, see
# https://falco.org/docs/reference/rules/supported-fields/#field-class-container).
#
# Furthermore, Falco exposes container events themselves as a data source for
# alerting. To achieve this integration with the container runtime, Falco
# requires access to the runtime socket. By default, for Kubernetes, Falco
# attempts to connect to the following sockets:
# "/run/containerd/containerd.sock", "/run/crio/crio.sock",
# "/run/k3s/containerd/containerd.sock". If you have a custom path, you can use
# the `--cri` option to specify the correct location.
#
# In some cases, you may encounter empty fields for container metadata. To
# address this, you can explore the `--disable-cri-async` option, which disables
# asynchronous fetching if the fetch operation is not completing quickly enough.
#
# To get more information on these command-line arguments, you can run `falco
# --help` in your terminal to view their current descriptions.
#
# !!! The options mentioned here are not available in the falco.yaml
# configuration file. Instead, they can can be used as a command-line argument
# when running the Falco command.