Files
securityonion/salt/filebeat/etc/filebeat.yml
2021-08-05 11:02:09 -04:00

535 lines
16 KiB
YAML

{%- if grains.role == 'so-heavynode' %}
{%- set MANAGER = salt['grains.get']('host' '') %}
{%- else %}
{%- set MANAGER = salt['grains.get']('master') %}
{%- endif %}
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
{%- set HOSTNAME = salt['grains.get']('host', '') %}
{%- set ZEEKVER = salt['pillar.get']('global:mdengine', 'COMMUNITY') %}
{%- set WAZUHENABLED = salt['pillar.get']('global:wazuh', '0') %}
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
{%- set FBMEMEVENTS = salt['pillar.get']('filebeat:mem_events', 2048) -%}
{%- set FBMEMFLUSHMINEVENTS = salt['pillar.get']('filebeat:mem_flush_min_events', 2048) -%}
{%- set FBLSWORKERS = salt['pillar.get']('filebeat:ls_workers', 1) -%}
{%- set FBLSBULKMAXSIZE = salt['pillar.get']('filebeat:ls_bulk_max_size', 2048) -%}
{%- set FBLOGGINGLEVEL = salt['pillar.get']('filebeat:logging:level', 'warning') -%}
name: {{ HOSTNAME }}
#================================ Logging ======================================
# There are four options for the log output: file, stderr, syslog, eventlog
# The file output is the default.
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
logging.level: {{ FBLOGGINGLEVEL }}
# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are "beat", "publish", "service"
# Multiple selectors can be chained.
#logging.selectors: [ ]
# Send all logging output to syslog. The default is false.
#logging.to_syslog: false
# Send all logging output to Windows Event Logs. The default is false.
#logging.to_eventlog: false
# If enabled, filebeat periodically logs its internal metrics that have changed
# in the last period. For each metric that changed, the delta from the value at
# the beginning of the period is logged. Also, the total values for
# all non-zero internal metrics are logged on shutdown. The default is true.
#logging.metrics.enabled: true
# The period after which to log the internal metrics. The default is 30s.
#logging.metrics.period: 30s
# Logging to rotating files. Set logging.to_files to false to disable logging to
# files.
logging.to_files: true
logging.files:
# Configure the path where the logs are written. The default is the logs directory
# under the home path (the binary location).
path: /usr/share/filebeat/logs
# The name of the files where the logs are written to.
name: filebeat.log
# Configure log file size limit. If limit is reached, log file will be
# automatically rotated
rotateeverybytes: 10485760 # = 10MB
# Number of rotated log files to keep. Oldest files will be deleted first.
keepfiles: 7
# The permissions mask to apply when rotating log files. The default value is 0600.
# Must be a valid Unix-style file permissions mask expressed in octal notation.
#permissions: 0600
# Set to true to log messages in json format.
#logging.json: false
#========================== Modules configuration ============================
filebeat.config.modules:
enabled: true
path: ${path.config}/modules.d/*.yml
filebeat.modules:
#=========================== Filebeat prospectors =============================
# List of prospectors to fetch data.
filebeat.inputs:
#------------------------------ Log prospector --------------------------------
- type: udp
enabled: true
host: "0.0.0.0:514"
fields:
module: syslog
dataset: syslog
pipeline: "syslog"
index: "so-syslog"
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
- type: tcp
enabled: true
host: "0.0.0.0:514"
fields:
module: syslog
dataset: syslog
pipeline: "syslog"
index: "so-syslog"
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %}
- type: log
paths:
- /logs/logscan/alerts.log
fields:
module: logscan
dataset: alert
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
clean_removed: true
close_removed: false
{%- endif %}
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-sensor', 'so-helix', 'so-heavynode', 'so-import'] %}
{%- if ZEEKVER != 'SURICATA' %}
{%- for LOGNAME in salt['pillar.get']('zeeklogs:enabled', '') %}
- type: log
paths:
- /nsm/zeek/logs/current/{{ LOGNAME }}.log
fields:
module: zeek
dataset: {{ LOGNAME }}
category: network
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
clean_removed: true
close_removed: false
- type: log
paths:
- /nsm/import/*/zeek/logs/{{ LOGNAME }}.log
fields:
module: zeek
dataset: {{ LOGNAME }}
category: network
imported: true
processors:
- add_tags:
tags: ["import"]
- dissect:
tokenizer: "/nsm/import/%{import.id}/zeek/logs/%{import.file}"
field: "log.file.path"
target_prefix: ""
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
clean_removed: false
close_removed: false
{%- endfor %}
{%- endif %}
- type: log
paths:
- /nsm/suricata/eve*.json
fields:
module: suricata
dataset: common
category: network
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
clean_removed: false
close_removed: false
- type: log
paths:
- /nsm/import/*/suricata/eve*.json
fields:
module: suricata
dataset: common
category: network
imported: true
processors:
- add_tags:
tags: ["import"]
- dissect:
tokenizer: "/nsm/import/%{import.id}/suricata/%{import.file}"
field: "log.file.path"
target_prefix: ""
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
clean_removed: false
close_removed: false
{%- if STRELKAENABLED == 1 %}
- type: log
paths:
- /nsm/strelka/log/strelka.log
fields:
module: strelka
category: file
dataset: file
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
clean_removed: false
close_removed: false
{%- endif %}
{%- endif %}
{%- if WAZUHENABLED == 1 %}
- type: log
paths:
- /wazuh/archives/archives.json
fields:
module: ossec
category: host
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
pipeline: "ossec"
fields_under_root: true
clean_removed: false
close_removed: false
{%- endif %}
{%- if FLEETMANAGER or FLEETNODE %}
- type: log
paths:
- /nsm/osquery/fleet/result.log
fields:
module: osquery
dataset: query_result
category: host
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
clean_removed: false
close_removed: false
{%- endif %}
{%- if INPUTS %}
# USER PILLAR DEFINED INPUTS
{{ INPUTS | yaml(False) }}
{%- endif %}
{% if OUTPUT -%}
# USER PILLAR DEFINED OUTPUT
{%- set types = OUTPUT.keys() | list %}
{%- set type = types[0] %}
output.{{ type }}:
{%- for i in OUTPUT[type].items() %}
{{ i[0] }}: {{ i[1]}}
{%- endfor %}
{%- else %}
#----------------------------- Elasticsearch/Logstash output ---------------------------------
{%- if grains['role'] in ["so-eval", "so-import"] %}
output.elasticsearch:
enabled: true
hosts: ["https://{{ MANAGER }}:9200"]
{%- if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
username: "{{ ES_USER }}"
password: "{{ ES_PASS }}"
{%- endif %}
ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
pipelines:
- pipeline: "%{[module]}.%{[dataset]}"
indices:
- index: "so-import"
when.contains:
tags: "import"
- index: "so-zeek"
when.contains:
module: "zeek"
- index: "so-ids"
when.contains:
module: "suricata"
- index: "so-ossec"
when.contains:
module: "ossec"
- index: "so-osquery"
when.contains:
module: "osquery"
- index: "so-strelka"
when.contains:
module: "strelka"
- index: "so-logscan"
when.contains:
module: "logscan"
setup.template.enabled: false
{%- else %}
output.logstash:
# Boolean flag to enable or disable the output module.
enabled: true
# The Logstash hosts
hosts: ["{{ MANAGER }}:5644"]
# Number of workers per Logstash host.
worker: {{ FBLSWORKERS }}
# Number of records to send to Logstash input at a time
bulk_max_size: {{ FBLSBULKMAXSIZE }}
# Set gzip compression level.
#compression_level: 3
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
# Certificate for SSL client authentication
ssl.certificate: "/usr/share/filebeat/filebeat.crt"
# Client Certificate Key
ssl.key: "/usr/share/filebeat/filebeat.key"
setup.template.enabled: false
# A dictionary of settings to place into the settings.index dictionary
# of the Elasticsearch template. For more details, please check
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
#index:
#number_of_shards: 1
#codec: best_compression
#number_of_routing_shards: 30
# A dictionary of settings for the _source field. For more details, please check
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
#_source:
#enabled: false
{%- endif %}
{% endif %}
#============================== Kibana =====================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
#setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Optional HTTP Path
#path: ""
# Use SSL settings for HTTPS. Default is true.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
#============================== Xpack Monitoring =====================================
# filebeat can export internal metrics to a central Elasticsearch monitoring cluster.
# This requires xpack monitoring to be enabled in Elasticsearch.
# The reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#xpack.monitoring.enabled: false
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well. Any setting that is not set is
# automatically inherited from the Elasticsearch output configuration, so if you
# have the Elasticsearch output configured, you can simply uncomment the
# following line, and leave the rest commented out.
#xpack.monitoring.elasticsearch:
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (http and 9200)
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
#hosts: ["localhost:9200"]
# Set gzip compression level.
#compression_level: 0
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "beats_system"
#password: "changeme"
# Dictionary of HTTP parameters to pass within the url with index operations.
#parameters:
#param1: value1
#param2: value2
# Custom HTTP headers to add to each request
#headers:
# X-My-Header: Contents of the header
# Proxy server url
#proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#================================ HTTP Endpoint ======================================
# Each beat can expose internal metrics through a HTTP endpoint. For security
# reasons the endpoint is disabled by default. This feature is currently experimental.
# Stats can be access through http://localhost:5066/stats . For pretty JSON output
# append ?pretty to the URL.
# Defines if the HTTP endpoint is enabled.
http.enabled: true
# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
http.host: 0.0.0.0
# Port on which the HTTP endpoint will bind. Default is 5066.
http.port: 5066
queue.mem.events: {{ FBMEMEVENTS }}
queue.mem.flush.min_events: {{ FBMEMFLUSHMINEVENTS }}