IMPORTANT: No additional bug fixes or documentation updates
will be released for this version. For the latest information, see the
current release documentation.
auditbeat.reference.yml
editauditbeat.reference.yml
editThe following reference file is available with your Auditbeat installation. It
shows all non-deprecated Auditbeat options. You can copy from this file and paste
configurations into the auditbeat.yml
file to customize it.
The reference file is located in the same directory as the
auditbeat.yml
file. To locate the file, see Directory layout.
The contents of the file are included here for your convenience.
########################## Auditbeat Configuration ############################# # This is a reference configuration file documenting all non-deprecated options # in comments. For a shorter configuration example that contains only the most # common options, please see auditbeat.yml in the same directory. # # You can find the full configuration reference here: # https://www.elastic.co/guide/en/beats/auditbeat/index.html #============================ Config Reloading ================================ # Config reloading allows to dynamically load modules. Each file which is # monitored must contain one or multiple modules as a list. auditbeat.config.modules: # Glob pattern for configuration reloading path: ${path.config}/modules.d/*.yml # Period on which files under path should be checked for changes reload.period: 10s # Set to true to enable config reloading reload.enabled: false # Maximum amount of time to randomly delay the start of a dataset. Use 0 to # disable startup delay. auditbeat.max_start_delay: 10s #========================== Modules configuration ============================= auditbeat.modules: # The auditd module collects events from the audit framework in the Linux # kernel. You need to specify audit rules for the events that you want to audit. - module: auditd resolve_ids: true failure_mode: silent backlog_limit: 8196 rate_limit: 0 include_raw_message: false include_warnings: false # Set to true to publish fields with null values in events. #keep_null: false # Load audit rules from separate files. Same format as audit.rules(7). audit_rule_files: [ '${path.config}/audit.rules.d/*.conf' ] audit_rules: | ## Define audit rules here. ## Create file watches (-w) or syscall audits (-a or -A). Uncomment these ## examples or add your own rules. ## If you are on a 64 bit platform, everything should be running ## in 64 bit mode. This rule will detect any use of the 32 bit syscalls ## because this might be a sign of someone exploiting a hole in the 32 ## bit API. #-a always,exit -F arch=b32 -S all -F key=32bit-abi ## Executions. #-a always,exit -F arch=b64 -S execve,execveat -k exec ## External access (warning: these can be expensive to audit). #-a always,exit -F arch=b64 -S accept,bind,connect -F key=external-access ## Identity changes. #-w /etc/group -p wa -k identity #-w /etc/passwd -p wa -k identity #-w /etc/gshadow -p wa -k identity ## Unauthorized access attempts. #-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EACCES -k access #-a always,exit -F arch=b64 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access # The file integrity module sends events when files are changed (created, # updated, deleted). The events contain file metadata and hashes. - module: file_integrity paths: - /bin - /usr/bin - /sbin - /usr/sbin - /etc # List of regular expressions to filter out notifications for unwanted files. # Wrap in single quotes to workaround YAML escaping rules. By default no files # are ignored. exclude_files: - '(?i)\.sw[nop]$' - '~$' - '/\.git($|/)' # List of regular expressions used to explicitly include files. When configured, # Auditbeat will ignore files unless they match a pattern. #include_files: #- '/\.ssh($|/)' # Scan over the configured file paths at startup and send events for new or # modified files since the last time Auditbeat was running. scan_at_start: true # Average scan rate. This throttles the amount of CPU and I/O that Auditbeat # consumes at startup while scanning. Default is "50 MiB". scan_rate_per_sec: 50 MiB # Limit on the size of files that will be hashed. Default is "100 MiB". # Limit on the size of files that will be hashed. Default is "100 MiB". max_file_size: 100 MiB # Hash types to compute when the file changes. Supported types are # blake2b_256, blake2b_384, blake2b_512, md5, sha1, sha224, sha256, sha384, # sha512, sha512_224, sha512_256, sha3_224, sha3_256, sha3_384, sha3_512, and xxh64. # Default is sha1. hash_types: [sha1] # Detect changes to files included in subdirectories. Disabled by default. recursive: false # Set to true to publish fields with null values in events. #keep_null: false #================================ General ====================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. # If this options is not defined, the hostname is used. #name: # The tags of the shipper are included in their own field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] # Optional fields that you can specify to add additional information to the # output. Fields can be scalar values, arrays, dictionaries, or any nested # combination of these. #fields: # env: staging # If this option is set to true, the custom fields are stored as top-level # fields in the output document instead of being grouped under a fields # sub-dictionary. Default is false. #fields_under_root: false # Internal queue configuration for buffering events to be published. #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs # bulk_max_size) to the output, the moment the output is ready to server # another batch of events. #mem: # Max number of events the queue can buffer. #events: 4096 # Hints the minimum number of events stored in the queue, # before providing a batch of events to the outputs. # The default value is set to 2048. # A value of 0 ensures events are immediately available # to be sent to the outputs. #flush.min_events: 2048 # Maximum duration after which events are available to the outputs, # if the number of events stored in the queue is < `flush.min_events`. #flush.timeout: 1s # The spool queue will store events in a local spool file, before # forwarding the events to the outputs. # # Beta: spooling to disk is currently a beta feature. Use with care. # # The spool file is a circular buffer, which blocks once the file/buffer is full. # Events are put into a write buffer and flushed once the write buffer # is full or the flush_timeout is triggered. # Once ACKed by the output, events are removed immediately from the queue, # making space for new events to be persisted. #spool: # The file namespace configures the file path and the file creation settings. # Once the file exists, the `size`, `page_size` and `prealloc` settings # will have no more effect. #file: # Location of spool file. The default value is ${path.data}/spool.dat. #path: "${path.data}/spool.dat" # Configure file permissions if file is created. The default value is 0600. #permissions: 0600 # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. #size: 100MiB # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. #page_size: 4KiB # If prealloc is set, the required space for the file is reserved using # truncate. The default value is true. #prealloc: true # Spool writer settings # Events are serialized into a write buffer. The write buffer is flushed if: # - The buffer limit has been reached. # - The configured limit of buffered events is reached. # - The flush timeout is triggered. #write: # Sets the write buffer size. #buffer_size: 1MiB # Maximum duration after which events are flushed if the write buffer # is not full yet. The default value is 1s. #flush.timeout: 1s # Number of maximum buffered events. The write buffer is flushed once the # limit is reached. #flush.events: 16384 # Configure the on-disk event encoding. The encoding can be changed # between restarts. # Valid encodings are: json, ubjson, and cbor. #codec: cbor #read: # Reader flush timeout, waiting for more events to become available, so # to fill a complete batch as required by the outputs. # If flush_timeout is 0, all available events are forwarded to the # outputs immediately. # The default value is 0s. #flush.timeout: 0s # Sets the maximum number of CPUs that can be executing simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: #================================ Processors =================================== # Processors are used to reduce the number of fields in the exported event or to # enhance the event with external metadata. This section defines a list of # processors that are applied one by one and the first one receives the initial # event: # # event -> filter1 -> event1 -> filter2 ->event2 ... # # The supported processors are drop_fields, drop_event, include_fields, # decode_json_fields, and add_cloud_metadata. # # For example, you can use the following processors to keep the fields that # contain CPU load percentages, but remove the fields that contain CPU ticks # values: # #processors: #- include_fields: # fields: ["cpu"] #- drop_fields: # fields: ["cpu.user", "cpu.system"] # # The following example drops the events that have the HTTP response code 200: # #processors: #- drop_event: # when: # equals: # http.code: 200 # # The following example renames the field a to b: # #processors: #- rename: # fields: # - from: "a" # to: "b" # # The following example tokenizes the string into fields: # #processors: #- dissect: # tokenizer: "%{key1} - %{key2}" # field: "message" # target_prefix: "dissect" # # The following example enriches each event with metadata from the cloud # provider about the host machine. It works on EC2, GCE, DigitalOcean, # Tencent Cloud, and Alibaba Cloud. # #processors: #- add_cloud_metadata: ~ # # The following example enriches each event with the machine's local time zone # offset from UTC. # #processors: #- add_locale: # format: offset # # The following example enriches each event with docker metadata, it matches # given fields to an existing container id and adds info from that container: # #processors: #- add_docker_metadata: # host: "unix:///var/run/docker.sock" # match_fields: ["system.process.cgroup.id"] # match_pids: ["process.pid", "process.ppid"] # match_source: true # match_source_index: 4 # match_short_id: false # cleanup_timeout: 60 # labels.dedot: false # # To connect to Docker over TLS you must specify a client and CA certificate. # #ssl: # # certificate_authority: "/etc/pki/root/ca.pem" # # certificate: "/etc/pki/client/cert.pem" # # key: "/etc/pki/client/cert.key" # # The following example enriches each event with docker metadata, it matches # container id from log path available in `source` field (by default it expects # it to be /var/lib/docker/containers/*/*.log). # #processors: #- add_docker_metadata: ~ # # The following example enriches each event with host metadata. # #processors: #- add_host_metadata: # netinfo.enabled: false # # The following example enriches each event with process metadata using # process IDs included in the event. # #processors: #- add_process_metadata: # match_pids: ["system.process.ppid"] # target: system.process.parent # # The following example decodes fields containing JSON strings # and replaces the strings with valid JSON objects. # #processors: #- decode_json_fields: # fields: ["field1", "field2", ...] # process_array: false # max_depth: 1 # target: "" # overwrite_keys: false # #processors: #- decompress_gzip_field: # from: "field1" # to: "field2" # ignore_missing: false # fail_on_error: true # # The following example copies the value of message to message_copied # #processors: #- copy_fields: # fields: # - from: message # to: message_copied # fail_on_error: true # ignore_missing: false # # The following example truncates the value of message to 1024 bytes # #processors: #- truncate_fields: # fields: # - message # max_bytes: 1024 # fail_on_error: false # ignore_missing: true # # The following example preserves the raw message under event.original # #processors: #- copy_fields: # fields: # - from: message # to: event.original # fail_on_error: false # ignore_missing: true #- truncate_fields: # fields: # - event.original # max_bytes: 1024 # fail_on_error: false # ignore_missing: true #============================= Elastic Cloud ================================== # These settings simplify using Auditbeat with the Elastic Cloud (https://cloud.elastic.co/). # The cloud.id setting overwrites the `output.elasticsearch.hosts` and # `setup.kibana.host` options. # You can find the `cloud.id` in the Elastic Cloud web UI. #cloud.id: # The cloud.auth setting overwrites the `output.elasticsearch.username` and # `output.elasticsearch.password` settings. The format is `<user>:<pass>`. #cloud.auth: #================================ Outputs ====================================== # Configure what output to use when sending the data collected by the beat. #-------------------------- Elasticsearch output ------------------------------- output.elasticsearch: # Boolean flag to enable or disable the output module. #enabled: true # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) # In case you specify and additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 hosts: ["localhost:9200"] # Set gzip compression level. #compression_level: 0 # Configure escaping HTML symbols in strings. #escape_html: false # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" #password: "changeme" # Dictionary of HTTP parameters to pass within the URL with index operations. #parameters: #param1: value1 #param2: value2 # Number of workers per Elasticsearch host. #worker: 1 # Optional index name. The default is "auditbeat" plus date # and generates [auditbeat-]YYYY.MM.DD keys. # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "auditbeat-%{[agent.version]}-%{+yyyy.MM.dd}" # Optional ingest node pipeline. By default no pipeline will be used. #pipeline: "" # Optional HTTP path #path: "/elasticsearch" # Custom HTTP headers to add to each request #headers: # X-My-Header: Contents of the header # Proxy server URL #proxy_url: http://proxy:3128 # Whether to disable proxy settings for outgoing connections. If true, this # takes precedence over both the proxy_url field and any environment settings # (HTTP_PROXY, HTTPS_PROXY). The default is false. #proxy_disable: false # The number of times a particular Elasticsearch index operation is attempted. If # the indexing operation doesn't succeed after this many retries, the events are # dropped. The default is 3. #max_retries: 3 # The maximum number of events to bulk in a single Elasticsearch bulk API index request. # The default is 50. #bulk_max_size: 50 # The number of seconds to wait before trying to reconnect to Elasticsearch # after a network error. After waiting backoff.init seconds, the Beat # tries to reconnect. If the attempt fails, the backoff timer is increased # exponentially up to backoff.max. After a successful connection, the backoff # timer is reset. The default is 1s. #backoff.init: 1s # The maximum number of seconds to wait before attempting to connect to # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. #ssl.enabled: true # Configure SSL verification mode. If `none` is configured, all server hosts # and certificates will be accepted. In this mode, SSL-based connections are # susceptible to man-in-the-middle attacks. Use only for testing. Default is # `full`. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client certificate key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never #----------------------------- Logstash output --------------------------------- #output.logstash: # Boolean flag to enable or disable the output module. #enabled: true # The Logstash hosts #hosts: ["localhost:5044"] # Number of workers per Logstash host. #worker: 1 # Set gzip compression level. #compression_level: 3 # Configure escaping HTML symbols in strings. #escape_html: false # Optional maximum time to live for a connection to Logstash, after which the # connection will be re-established. A value of `0s` (the default) will # disable this feature. # # Not yet supported for async connections (i.e. with the "pipelining" option set) #ttl: 30s # Optionally load-balance events between Logstash hosts. Default is false. #loadbalance: false # Number of batches to be sent asynchronously to Logstash while processing # new batches. #pipelining: 2 # If enabled only a subset of events in a batch of events is transferred per # transaction. The number of events to be sent increases up to `bulk_max_size` # if no error is encountered. #slow_start: false # The number of seconds to wait before trying to reconnect to Logstash # after a network error. After waiting backoff.init seconds, the Beat # tries to reconnect. If the attempt fails, the backoff timer is increased # exponentially up to backoff.max. After a successful connection, the backoff # timer is reset. The default is 1s. #backoff.init: 1s # The maximum number of seconds to wait before attempting to connect to # Logstash after a network error. The default is 60s. #backoff.max: 60s # Optional index name. The default index name is set to auditbeat # in all lowercase. #index: 'auditbeat' # SOCKS5 proxy server URL #proxy_url: socks5://user:password@socks5-server:2233 # Resolve names locally when using a proxy server. Defaults to false. #proxy_use_local_resolver: false # Enable SSL support. SSL is automatically enabled if any SSL setting is set. #ssl.enabled: true # Configure SSL verification mode. If `none` is configured, all server hosts # and certificates will be accepted. In this mode, SSL based connections are # susceptible to man-in-the-middle attacks. Use only for testing. Default is # `full`. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client certificate key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the Certificate Key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never # The number of times to retry publishing an event after a publishing failure. # After the specified number of retries, the events are typically dropped. # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting # and retry until all events are published. Set max_retries to a value less # than 0 to retry until all events are published. The default is 3. #max_retries: 3 # The maximum number of events to bulk in a single Logstash request. The # default is 2048. #bulk_max_size: 2048 # The number of seconds to wait for responses from the Logstash server before # timing out. The default is 30s. #timeout: 30s #------------------------------- Kafka output ---------------------------------- #output.kafka: # Boolean flag to enable or disable the output module. #enabled: true # The list of Kafka broker addresses from which to fetch the cluster metadata. # The cluster metadata contain the actual Kafka brokers events are published # to. #hosts: ["localhost:9092"] # The Kafka topic used for produced events. The setting can be a format string # using any event field. To set the topic from document type use `%{[type]}`. #topic: beats # The Kafka event key setting. Use format string to create a unique event key. # By default no event key will be generated. #key: '' # The Kafka event partitioning strategy. Default hashing strategy is `hash` # using the `output.kafka.key` setting or randomly distributes events if # `output.kafka.key` is not configured. #partition.hash: # If enabled, events will only be published to partitions with reachable # leaders. Default is false. #reachable_only: false # Configure alternative event field names used to compute the hash value. # If empty `output.kafka.key` setting will be used. # Default value is empty list. #hash: [] # Authentication details. Password is required if username is set. #username: '' #password: '' # Kafka version Auditbeat is assumed to run against. Defaults to the "1.0.0". #version: '1.0.0' # Configure JSON encoding #codec.json: # Pretty-print JSON event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false # Metadata update configuration. Metadata contains leader information # used to decide which broker to use when publishing. #metadata: # Max metadata request retry attempts when cluster is in middle of leader # election. Defaults to 3 retries. #retry.max: 3 # Wait time between retries during leader elections. Default is 250ms. #retry.backoff: 250ms # Refresh metadata interval. Defaults to every 10 minutes. #refresh_frequency: 10m # Strategy for fetching the topics metadata from the broker. Default is false. #full: false # The number of concurrent load-balanced Kafka output workers. #worker: 1 # The number of times to retry publishing an event after a publishing failure. # After the specified number of retries, events are typically dropped. # Some Beats, such as Filebeat, ignore the max_retries setting and retry until # all events are published. Set max_retries to a value less than 0 to retry # until all events are published. The default is 3. #max_retries: 3 # The maximum number of events to bulk in a single Kafka request. The default # is 2048. #bulk_max_size: 2048 # Duration to wait before sending bulk Kafka request. 0 is no delay. The default # is 0. #bulk_flush_frequency: 0s # The number of seconds to wait for responses from the Kafka brokers before # timing out. The default is 30s. #timeout: 30s # The maximum duration a broker will wait for number of required ACKs. The # default is 10s. #broker_timeout: 10s # The number of messages buffered for each Kafka broker. The default is 256. #channel_buffer_size: 256 # The keep-alive period for an active network connection. If 0s, keep-alives # are disabled. The default is 0 seconds. #keep_alive: 0 # Sets the output compression codec. Must be one of none, snappy and gzip. The # default is gzip. #compression: gzip # Set the compression level. Currently only gzip provides a compression level # between 0 and 9. The default value is chosen by the compression algorithm. #compression_level: 4 # The maximum permitted size of JSON-encoded messages. Bigger messages will be # dropped. The default value is 1000000 (bytes). This value should be equal to # or less than the broker's message.max.bytes. #max_message_bytes: 1000000 # The ACK reliability level required from broker. 0=no response, 1=wait for # local commit, -1=wait for all replicas to commit. The default is 1. Note: # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently # on error. #required_acks: 1 # The configurable ClientID used for logging, debugging, and auditing # purposes. The default is "beats". #client_id: beats # Enable SSL support. SSL is automatically enabled if any SSL setting is set. #ssl.enabled: true # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Configure SSL verification mode. If `none` is configured, all server hosts # and certificates will be accepted. In this mode, SSL based connections are # susceptible to man-in-the-middle attacks. Use only for testing. Default is # `full`. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client Certificate Key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the Certificate Key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never #------------------------------- Redis output ---------------------------------- #output.redis: # Boolean flag to enable or disable the output module. #enabled: true # Configure JSON encoding #codec.json: # Pretty print json event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false # The list of Redis servers to connect to. If load-balancing is enabled, the # events are distributed to the servers in the list. If one server becomes # unreachable, the events are distributed to the reachable servers only. #hosts: ["localhost:6379"] # The name of the Redis list or channel the events are published to. The # default is auditbeat. #key: auditbeat # The password to authenticate to Redis with. The default is no authentication. #password: # The Redis database number where the events are published. The default is 0. #db: 0 # The Redis data type to use for publishing events. If the data type is list, # the Redis RPUSH command is used. If the data type is channel, the Redis # PUBLISH command is used. The default value is list. #datatype: list # The number of workers to use for each host configured to publish events to # Redis. Use this setting along with the loadbalance option. For example, if # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each # host). #worker: 1 # If set to true and multiple hosts or workers are configured, the output # plugin load balances published events onto all Redis hosts. If set to false, # the output plugin sends all events to only one host (determined at random) # and will switch to another host if the currently selected one becomes # unreachable. The default value is true. #loadbalance: true # The Redis connection timeout in seconds. The default is 5 seconds. #timeout: 5s # The number of times to retry publishing an event after a publishing failure. # After the specified number of retries, the events are typically dropped. # Some Beats, such as Filebeat, ignore the max_retries setting and retry until # all events are published. Set max_retries to a value less than 0 to retry # until all events are published. The default is 3. #max_retries: 3 # The number of seconds to wait before trying to reconnect to Redis # after a network error. After waiting backoff.init seconds, the Beat # tries to reconnect. If the attempt fails, the backoff timer is increased # exponentially up to backoff.max. After a successful connection, the backoff # timer is reset. The default is 1s. #backoff.init: 1s # The maximum number of seconds to wait before attempting to connect to # Redis after a network error. The default is 60s. #backoff.max: 60s # The maximum number of events to bulk in a single Redis request or pipeline. # The default is 2048. #bulk_max_size: 2048 # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The # value must be a URL with a scheme of socks5://. #proxy_url: # This option determines whether Redis hostnames are resolved locally when # using a proxy. The default value is false, which means that name resolution # occurs on the proxy server. #proxy_use_local_resolver: false # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. #ssl.enabled: true # Configure SSL verification mode. If `none` is configured, all server hosts # and certificates will be accepted. In this mode, SSL based connections are # susceptible to man-in-the-middle attacks. Use only for testing. Default is # `full`. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # Optional SSL configuration options. SSL is off by default. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client Certificate Key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the Certificate Key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never #------------------------------- File output ----------------------------------- #output.file: # Boolean flag to enable or disable the output module. #enabled: true # Configure JSON encoding #codec.json: # Pretty-print JSON event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false # Path to the directory where to save the generated files. The option is # mandatory. #path: "/tmp/auditbeat" # Name of the generated files. The default is `auditbeat` and it generates # files: `auditbeat`, `auditbeat.1`, `auditbeat.2`, etc. #filename: auditbeat # Maximum size in kilobytes of each file. When this size is reached, and on # every Auditbeat restart, the files are rotated. The default value is 10240 # kB. #rotate_every_kb: 10000 # Maximum number of files under path. When this number of files is reached, # the oldest file is deleted and the rest are shifted from last to first. The # default is 7 files. #number_of_files: 7 # Permissions to use for file creation. The default is 0600. #permissions: 0600 #----------------------------- Console output --------------------------------- #output.console: # Boolean flag to enable or disable the output module. #enabled: true # Configure JSON encoding #codec.json: # Pretty-print JSON event #pretty: false # Configure escaping HTML symbols in strings. #escape_html: false #================================= Paths ====================================== # The home path for the Auditbeat installation. This is the default base path # for all other path settings and for miscellaneous files that come with the # distribution (for example, the sample dashboards). # If not set by a CLI flag or in the configuration file, the default for the # home path is the location of the binary. #path.home: # The configuration path for the Auditbeat installation. This is the default # base path for configuration files, including the main YAML configuration file # and the Elasticsearch template file. If not set by a CLI flag or in the # configuration file, the default for the configuration path is the home path. #path.config: ${path.home} # The data path for the Auditbeat installation. This is the default base path # for all the files in which Auditbeat needs to store its data. If not set by a # CLI flag or in the configuration file, the default for the data path is a data # subdirectory inside the home path. #path.data: ${path.home}/data # The logs path for a Auditbeat installation. This is the default location for # the Beat's log files. If not set by a CLI flag or in the configuration file, # the default for the logs path is a logs subdirectory inside the home path. #path.logs: ${path.home}/logs #================================ Keystore ========================================== # Location of the Keystore containing the keys and their sensitive values. #keystore.path: "${path.config}/beats.keystore" #============================== Dashboards ===================================== # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the # options here, or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana # The URL from where to download the dashboards archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: # The file archive (zip file) from where to read the dashboards. It is used instead # of the directory when it has a value. #setup.dashboards.file: # In case the archive contains the dashboards from multiple Beats, this lets you # select which one to load. You can load all the dashboards in the archive by # setting this to the empty string. #setup.dashboards.beat: auditbeat # The name of the Kibana index to use for setting the configuration. Default is ".kibana" #setup.dashboards.kibana_index: .kibana # The Elasticsearch index name. This overwrites the index name defined in the # dashboards and index pattern. Example: testbeat-* #setup.dashboards.index: # Always use the Kibana API for loading the dashboards instead of autodetecting # how to install the dashboards by first querying Elasticsearch. #setup.dashboards.always_kibana: false # If true and Kibana is not reachable at the time when dashboards are loaded, # it will retry to reconnect to Kibana instead of exiting with an error. #setup.dashboards.retry.enabled: false # Duration interval between Kibana connection retries. #setup.dashboards.retry.interval: 1s # Maximum number of retries before exiting with an error, 0 for unlimited retrying. #setup.dashboards.retry.maximum: 0 #============================== Template ===================================== # A template is used to set the mapping in Elasticsearch # By default template loading is enabled and the template is loaded. # These settings can be adjusted to load your own template or overwrite existing ones. # Set to false to disable template loading. #setup.template.enabled: true # Template name. By default the template name is "auditbeat-%{[agent.version]}" # The template name and pattern has to be set in case the Elasticsearch index pattern is modified. #setup.template.name: "auditbeat-%{[agent.version]}" # Template pattern. By default the template pattern is "-%{[agent.version]}-*" to apply to the default index settings. # The first part is the version of the beat and then -* is used to match all daily indices. # The template name and pattern has to be set in case the Elasticsearch index pattern is modified. #setup.template.pattern: "auditbeat-%{[agent.version]}-*" # Path to fields.yml file to generate the template #setup.template.fields: "${path.config}/fields.yml" # A list of fields to be added to the template and Kibana index pattern. Also # specify setup.template.overwrite: true to overwrite the existing template. # This setting is experimental. #setup.template.append_fields: #- name: field_name # type: field_type # Enable JSON template loading. If this is enabled, the fields.yml is ignored. #setup.template.json.enabled: false # Path to the JSON template file #setup.template.json.path: "${path.config}/template.json" # Name under which the template is stored in Elasticsearch #setup.template.json.name: "" # Overwrite existing template #setup.template.overwrite: false # Elasticsearch template settings setup.template.settings: # A dictionary of settings to place into the settings.index dictionary # of the Elasticsearch template. For more details, please check # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html #index: #number_of_shards: 1 #codec: best_compression #number_of_routing_shards: 30 # A dictionary of settings for the _source field. For more details, please check # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html #_source: #enabled: false #============================== Setup ILM ===================================== # Configure index lifecycle management (ILM). These settings create a write # alias and add additional settings to the index template. When ILM is enabled, # output.elasticsearch.index is ignored, and the write alias is used to set the # index name. # Enable ILM support. Valid values are true, false, and auto. When set to auto # (the default), the Beat uses index lifecycle management when it connects to a # cluster that supports ILM; otherwise, it creates daily indices. #setup.ilm.enabled: auto # Set the prefix used in the index lifecycle write alias name. The default alias # name is 'auditbeat-%{[agent.version]}'. #setup.ilm.rollover_alias: "auditbeat" # Set the rollover index pattern. The default is "%{now/d}-000001". #setup.ilm.pattern: "{now/d}-000001" # Set the lifecycle policy name. The default policy name is # 'auditbeat-%{[agent.version]}'. #setup.ilm.policy_name: "mypolicy" # The path to a JSON file that contains a lifecycle policy configuration. Used # to load your own lifecycle policy. #setup.ilm.policy_file: # Disable the check for an existing lifecycle policy. The default is false. If # you disable this check, set setup.ilm.overwrite: true so the lifecycle policy # can be installed. #setup.ilm.check_exists: false # Overwrite the lifecycle policy at startup. The default is false. #setup.ilm.overwrite: false #============================== Kibana ===================================== # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. # This requires a Kibana endpoint configuration. setup.kibana: # Kibana Host # Scheme and port can be left out and will be set to the default (http and 5601) # In case you specify and additional path, the scheme is required: http://localhost:5601/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 #host: "localhost:5601" # Optional protocol and basic auth credentials. #protocol: "https" #username: "elastic" #password: "changeme" # Optional HTTP path #path: "" # Use SSL settings for HTTPS. Default is true. #ssl.enabled: true # Configure SSL verification mode. If `none` is configured, all server hosts # and certificates will be accepted. In this mode, SSL based connections are # susceptible to man-in-the-middle attacks. Use only for testing. Default is # `full`. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # SSL configuration. The default is off. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client certificate key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] #================================ Logging ====================================== # There are four options for the log output: file, stderr, syslog, eventlog # The file output is the default. # Sets log level. The default log level is info. # Available log levels are: error, warning, info, debug #logging.level: info # Enable debug output for selected components. To enable all selectors use ["*"] # Other available selectors are "beat", "publish", "service" # Multiple selectors can be chained. #logging.selectors: [ ] # Send all logging output to stderr. The default is false. #logging.to_stderr: false # Send all logging output to syslog. The default is false. #logging.to_syslog: false # Send all logging output to Windows Event Logs. The default is false. #logging.to_eventlog: false # If enabled, Auditbeat periodically logs its internal metrics that have changed # in the last period. For each metric that changed, the delta from the value at # the beginning of the period is logged. Also, the total values for # all non-zero internal metrics are logged on shutdown. The default is true. #logging.metrics.enabled: true # The period after which to log the internal metrics. The default is 30s. #logging.metrics.period: 30s # Logging to rotating files. Set logging.to_files to false to disable logging to # files. logging.to_files: true logging.files: # Configure the path where the logs are written. The default is the logs directory # under the home path (the binary location). #path: /var/log/auditbeat # The name of the files where the logs are written to. #name: auditbeat # Configure log file size limit. If limit is reached, log file will be # automatically rotated #rotateeverybytes: 10485760 # = 10MB # Number of rotated log files to keep. Oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 # Enable log file rotation on time intervals in addition to size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 # Rotate existing logs on startup rather than appending to the existing # file. Defaults to true. # rotateonstartup: true # Set to true to log messages in JSON format. #logging.json: false #============================== X-Pack Monitoring =============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The # reporting is disabled by default. # Set to true to enable the monitoring reporter. #monitoring.enabled: false # Sets the UUID of the Elasticsearch cluster under which monitoring data for this # Auditbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch # is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the # Elasticsearch output are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such # that it is pointing to your Elasticsearch monitoring cluster, you can simply # uncomment the following line. #monitoring.elasticsearch: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) # In case you specify and additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] # Set gzip compression level. #compression_level: 0 # Optional protocol and basic auth credentials. #protocol: "https" #username: "beats_system" #password: "changeme" # Dictionary of HTTP parameters to pass within the URL with index operations. #parameters: #param1: value1 #param2: value2 # Custom HTTP headers to add to each request #headers: # X-My-Header: Contents of the header # Proxy server url #proxy_url: http://proxy:3128 # The number of times a particular Elasticsearch index operation is attempted. If # the indexing operation doesn't succeed after this many retries, the events are # dropped. The default is 3. #max_retries: 3 # The maximum number of events to bulk in a single Elasticsearch bulk API index request. # The default is 50. #bulk_max_size: 50 # The number of seconds to wait before trying to reconnect to Elasticsearch # after a network error. After waiting backoff.init seconds, the Beat # tries to reconnect. If the attempt fails, the backoff timer is increased # exponentially up to backoff.max. After a successful connection, the backoff # timer is reset. The default is 1s. #backoff.init: 1s # The maximum number of seconds to wait before attempting to connect to # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s # Configure HTTP request timeout before failing an request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. #ssl.enabled: true # Configure SSL verification mode. If `none` is configured, all server hosts # and certificates will be accepted. In this mode, SSL based connections are # susceptible to man-in-the-middle attacks. Use only for testing. Default is # `full`. #ssl.verification_mode: full # List of supported/valid TLS versions. By default all TLS versions from 1.0 up to # 1.2 are enabled. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] # SSL configuration. The default is off. # List of root certificates for HTTPS server verifications #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] # Certificate for SSL client authentication #ssl.certificate: "/etc/pki/client/cert.pem" # Client certificate key #ssl.key: "/etc/pki/client/cert.key" # Optional passphrase for decrypting the certificate key. #ssl.key_passphrase: '' # Configure cipher suites to be used for SSL connections #ssl.cipher_suites: [] # Configure curve types for ECDHE-based cipher suites #ssl.curve_types: [] # Configure what types of renegotiation are supported. Valid options are # never, once, and freely. Default is never. #ssl.renegotiation: never #metrics.period: 10s #state.period: 1m #================================ HTTP Endpoint ====================================== # Each beat can expose internal metrics through a HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. # Stats can be access through http://localhost:5066/stats . For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false # The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 # Define which user should be owning the named pipe. #http.named_pipe.user: # Define which the permissions that should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: #============================= Process Security ================================ # Enable or disable seccomp system call filtering on Linux. Default is enabled. #seccomp.enabled: true #================================= Migration ================================== # This allows to enable 6.7 migration aliases #migration.6_to_7.enabled: false