mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge remote-tracking branch 'origin/reyesj2/kafka' into kaffytaffy
This commit is contained in:
@@ -65,7 +65,8 @@ container_list() {
|
|||||||
"so-strelka-manager"
|
"so-strelka-manager"
|
||||||
"so-suricata"
|
"so-suricata"
|
||||||
"so-telegraf"
|
"so-telegraf"
|
||||||
"so-zeek"
|
"so-zeek"
|
||||||
|
"so-kafka"
|
||||||
)
|
)
|
||||||
else
|
else
|
||||||
TRUSTED_CONTAINERS=(
|
TRUSTED_CONTAINERS=(
|
||||||
|
|||||||
@@ -1,101 +1,109 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
{% if sls.split('.')[0] in allowed_states %}
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
{% set kafka_ips_logstash = [] %}
|
{% set kafka_ips_logstash = [] %}
|
||||||
{% set kafka_ips_kraft = [] %}
|
{% set kafka_ips_kraft = [] %}
|
||||||
{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %}
|
{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %}
|
||||||
{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %}
|
{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %}
|
||||||
{% set kafka_ip = GLOBALS.node_ip %}
|
{% set kafka_ip = GLOBALS.node_ip %}
|
||||||
|
|
||||||
{% set nodes = salt['pillar.get']('kafka:nodes', {}) %}
|
{% set nodes = salt['pillar.get']('kafka:nodes', {}) %}
|
||||||
{% set combined = [] %}
|
{% set combined = [] %}
|
||||||
{% for hostname, data in nodes.items() %}
|
{% for hostname, data in nodes.items() %}
|
||||||
{% do combined.append(data.nodeid ~ "@" ~ hostname) %}
|
{% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% set kraft_controller_quorum_voters = ','.join(combined) %}
|
{% set kraft_controller_quorum_voters = ','.join(combined) %}
|
||||||
|
|
||||||
{# Create list for kafka <-> logstash/searchnode communcations #}
|
{# Create list for kafka <-> logstash/searchnode communcations #}
|
||||||
{% for node, node_data in kafkanodes.items() %}
|
{% for node, node_data in kafkanodes.items() %}
|
||||||
{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %}
|
{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% set kafka_server_list = "','".join(kafka_ips_logstash) %}
|
{% set kafka_server_list = "','".join(kafka_ips_logstash) %}
|
||||||
|
|
||||||
{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #}
|
{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #}
|
||||||
{% for node, node_data in kafkanodes.items() %}
|
{% for node, node_data in kafkanodes.items() %}
|
||||||
{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %}
|
{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% set kraft_server_list = "','".join(kafka_ips_kraft) %}
|
{% set kraft_server_list = "','".join(kafka_ips_kraft) %}
|
||||||
|
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- ssl
|
- ssl
|
||||||
|
|
||||||
kafka_group:
|
kafka_group:
|
||||||
group.present:
|
group.present:
|
||||||
- name: kafka
|
- name: kafka
|
||||||
- gid: 960
|
- gid: 960
|
||||||
|
|
||||||
kafka:
|
kafka:
|
||||||
user.present:
|
user.present:
|
||||||
- uid: 960
|
- uid: 960
|
||||||
- gid: 960
|
- gid: 960
|
||||||
|
|
||||||
{# Future tools to query kafka directly / show consumer groups
|
{# Future tools to query kafka directly / show consumer groups
|
||||||
kafka_sbin_tools:
|
kafka_sbin_tools:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /usr/sbin
|
- name: /usr/sbin
|
||||||
- source: salt://kafka/tools/sbin
|
- source: salt://kafka/tools/sbin
|
||||||
- user: 960
|
- user: 960
|
||||||
- group: 960
|
- group: 960
|
||||||
- file_mode: 755 #}
|
- file_mode: 755 #}
|
||||||
|
|
||||||
kakfa_log_dir:
|
kafka_sbin_jinja_tools:
|
||||||
file.directory:
|
file.recurse:
|
||||||
- name: /opt/so/log/kafka
|
- name: /usr/sbin
|
||||||
- user: 960
|
- source: salt://kafka/tools/sbin_jinja
|
||||||
- group: 960
|
- user: 960
|
||||||
- makedirs: True
|
- group: 960
|
||||||
|
- file_mode: 755
|
||||||
kafka_data_dir:
|
- template: jinja
|
||||||
file.directory:
|
|
||||||
- name: /nsm/kafka/data
|
kakfa_log_dir:
|
||||||
- user: 960
|
file.directory:
|
||||||
- group: 960
|
- name: /opt/so/log/kafka
|
||||||
- makedirs: True
|
- user: 960
|
||||||
|
- group: 960
|
||||||
{# When docker container is created an added to registry. Update so-kafka-generate-keystore script #}
|
- makedirs: True
|
||||||
kafka_keystore_script:
|
|
||||||
cmd.script:
|
kafka_data_dir:
|
||||||
- source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore
|
file.directory:
|
||||||
- tempalte: jinja
|
- name: /nsm/kafka/data
|
||||||
- cwd: /opt/so
|
- user: 960
|
||||||
- defaults:
|
- group: 960
|
||||||
GLOBALS: {{ GLOBALS }}
|
- makedirs: True
|
||||||
|
|
||||||
kafka_kraft_server_properties:
|
kafka_keystore_script:
|
||||||
file.managed:
|
cmd.script:
|
||||||
- source: salt://kafka/etc/server.properties.jinja
|
- source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore
|
||||||
- name: /opt/so/conf/kafka/server.properties
|
- template: jinja
|
||||||
- template: jinja
|
- cwd: /opt/so
|
||||||
- defaults:
|
- defaults:
|
||||||
kafka_nodeid: {{ kafka_nodeid }}
|
GLOBALS: {{ GLOBALS }}
|
||||||
kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }}
|
|
||||||
kafka_ip: {{ kafka_ip }}
|
kafka_kraft_server_properties:
|
||||||
- user: 960
|
file.managed:
|
||||||
- group: 960
|
- source: salt://kafka/etc/server.properties.jinja
|
||||||
- makedirs: True
|
- name: /opt/so/conf/kafka/server.properties
|
||||||
- show_changes: False
|
- template: jinja
|
||||||
|
- defaults:
|
||||||
{% else %}
|
kafka_nodeid: {{ kafka_nodeid }}
|
||||||
|
kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }}
|
||||||
{{sls}}_state_not_allowed:
|
kafka_ip: {{ kafka_ip }}
|
||||||
test.fail_without_changes:
|
- user: 960
|
||||||
- name: {{sls}}_state_not_allowed
|
- group: 960
|
||||||
|
- makedirs: True
|
||||||
|
- show_changes: False
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -1,46 +1,47 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
{% if sls.split('.')[0] in allowed_states %}
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||||
|
{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %}
|
||||||
include:
|
|
||||||
- kafka.sostatus
|
include:
|
||||||
- kafka.config
|
- kafka.sostatus
|
||||||
- kafka.storage
|
- kafka.config
|
||||||
|
- kafka.storage
|
||||||
so-kafka:
|
|
||||||
docker_container.running:
|
so-kafka:
|
||||||
- image: so-kafka
|
docker_container.running:
|
||||||
- hostname: so-kafka
|
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }}
|
||||||
- name: so-kafka
|
- hostname: so-kafka
|
||||||
- networks:
|
- name: so-kafka
|
||||||
- sobridge:
|
- networks:
|
||||||
- ipv4_address: {{ DOCKER.containers['so-kafka'].ip }}
|
- sobridge:
|
||||||
- user: kafka
|
- ipv4_address: {{ DOCKER.containers['so-kafka'].ip }}
|
||||||
- port_bindings:
|
- user: kafka
|
||||||
{% for BINDING in DOCKER.containers['so-kafka'].port_bindings %}
|
- port_bindings:
|
||||||
- {{ BINDING }}
|
{% for BINDING in DOCKER.containers['so-kafka'].port_bindings %}
|
||||||
{% endfor %}
|
- {{ BINDING }}
|
||||||
- binds:
|
{% endfor %}
|
||||||
- /etc/pki/kafka.jks:/etc/pki/kafka.jks
|
- binds:
|
||||||
- /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts
|
- /etc/pki/kafka.jks:/etc/pki/kafka.jks
|
||||||
- /nsm/kafka/data/:/nsm/kafka/data/:rw
|
- /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts
|
||||||
- /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties
|
- /nsm/kafka/data/:/nsm/kafka/data/:rw
|
||||||
|
- /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties
|
||||||
delete_so-kafka_so-status.disabled:
|
|
||||||
file.uncomment:
|
delete_so-kafka_so-status.disabled:
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
file.uncomment:
|
||||||
- regex: ^so-kafka$
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
- regex: ^so-kafka$
|
||||||
{% else %}
|
|
||||||
|
{% else %}
|
||||||
{{sls}}_state_not_allowed:
|
|
||||||
test.fail_without_changes:
|
{{sls}}_state_not_allowed:
|
||||||
- name: {{sls}}_state_not_allowed
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -1,123 +1,123 @@
|
|||||||
# This configuration file is intended for use in KRaft mode, where
|
# This configuration file is intended for use in KRaft mode, where
|
||||||
# Apache ZooKeeper is not present. See config/kraft/README.md for details.
|
# Apache ZooKeeper is not present. See config/kraft/README.md for details.
|
||||||
#
|
#
|
||||||
|
|
||||||
############################# Server Basics #############################
|
############################# Server Basics #############################
|
||||||
|
|
||||||
# The role of this server. Setting this puts us in KRaft mode
|
# The role of this server. Setting this puts us in KRaft mode
|
||||||
process.roles=broker,controller
|
process.roles=broker,controller
|
||||||
|
|
||||||
# The node id associated with this instance's roles
|
# The node id associated with this instance's roles
|
||||||
node.id={{ kafka_nodeid }}
|
node.id={{ kafka_nodeid }}
|
||||||
|
|
||||||
# The connect string for the controller quorum
|
# The connect string for the controller quorum
|
||||||
controller.quorum.voters={{ kraft_controller_quorum_voters }}
|
controller.quorum.voters={{ kraft_controller_quorum_voters }}
|
||||||
|
|
||||||
############################# Socket Server Settings #############################
|
############################# Socket Server Settings #############################
|
||||||
|
|
||||||
# The address the socket server listens on.
|
# The address the socket server listens on.
|
||||||
# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum.
|
# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum.
|
||||||
# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(),
|
# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(),
|
||||||
# with PLAINTEXT listener name, and port 9092.
|
# with PLAINTEXT listener name, and port 9092.
|
||||||
# FORMAT:
|
# FORMAT:
|
||||||
# listeners = listener_name://host_name:port
|
# listeners = listener_name://host_name:port
|
||||||
# EXAMPLE:
|
# EXAMPLE:
|
||||||
# listeners = PLAINTEXT://your.host.name:9092
|
# listeners = PLAINTEXT://your.host.name:9092
|
||||||
listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093
|
listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093
|
||||||
|
|
||||||
# Name of listener used for communication between brokers.
|
# Name of listener used for communication between brokers.
|
||||||
inter.broker.listener.name=BROKER
|
inter.broker.listener.name=BROKER
|
||||||
|
|
||||||
# Listener name, hostname and port the broker will advertise to clients.
|
# Listener name, hostname and port the broker will advertise to clients.
|
||||||
# If not set, it uses the value for "listeners".
|
# If not set, it uses the value for "listeners".
|
||||||
advertised.listeners=BROKER://{{ kafka_ip }}:9092
|
advertised.listeners=BROKER://{{ kafka_ip }}:9092
|
||||||
|
|
||||||
# A comma-separated list of the names of the listeners used by the controller.
|
# A comma-separated list of the names of the listeners used by the controller.
|
||||||
# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol
|
# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol
|
||||||
# This is required if running in KRaft mode.
|
# This is required if running in KRaft mode.
|
||||||
controller.listener.names=CONTROLLER
|
controller.listener.names=CONTROLLER
|
||||||
|
|
||||||
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
|
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
|
||||||
listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL
|
listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL
|
||||||
|
|
||||||
#SSL configuration
|
#SSL configuration
|
||||||
ssl.keystore.location=/etc/pki/kafka.jks
|
ssl.keystore.location=/etc/pki/kafka.jks
|
||||||
ssl.keystore.pasword=changeit
|
ssl.keystore.password=changeit
|
||||||
ssl.keystore.type=JKS
|
ssl.keystore.type=JKS
|
||||||
ssl.truststore.location=/etc/pki/java/sos/cacerts
|
ssl.truststore.location=/etc/pki/java/sos/cacerts
|
||||||
ssl.truststore.password=changeit
|
ssl.truststore.password=changeit
|
||||||
|
|
||||||
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
|
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
|
||||||
num.network.threads=3
|
num.network.threads=3
|
||||||
|
|
||||||
# The number of threads that the server uses for processing requests, which may include disk I/O
|
# The number of threads that the server uses for processing requests, which may include disk I/O
|
||||||
num.io.threads=8
|
num.io.threads=8
|
||||||
|
|
||||||
# The send buffer (SO_SNDBUF) used by the socket server
|
# The send buffer (SO_SNDBUF) used by the socket server
|
||||||
socket.send.buffer.bytes=102400
|
socket.send.buffer.bytes=102400
|
||||||
|
|
||||||
# The receive buffer (SO_RCVBUF) used by the socket server
|
# The receive buffer (SO_RCVBUF) used by the socket server
|
||||||
socket.receive.buffer.bytes=102400
|
socket.receive.buffer.bytes=102400
|
||||||
|
|
||||||
# The maximum size of a request that the socket server will accept (protection against OOM)
|
# The maximum size of a request that the socket server will accept (protection against OOM)
|
||||||
socket.request.max.bytes=104857600
|
socket.request.max.bytes=104857600
|
||||||
|
|
||||||
|
|
||||||
############################# Log Basics #############################
|
############################# Log Basics #############################
|
||||||
|
|
||||||
# A comma separated list of directories under which to store log files
|
# A comma separated list of directories under which to store log files
|
||||||
log.dirs=/nsm/kafka/data
|
log.dirs=/nsm/kafka/data
|
||||||
|
|
||||||
# The default number of log partitions per topic. More partitions allow greater
|
# The default number of log partitions per topic. More partitions allow greater
|
||||||
# parallelism for consumption, but this will also result in more files across
|
# parallelism for consumption, but this will also result in more files across
|
||||||
# the brokers.
|
# the brokers.
|
||||||
num.partitions=1
|
num.partitions=1
|
||||||
|
|
||||||
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
|
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
|
||||||
# This value is recommended to be increased for installations with data dirs located in RAID array.
|
# This value is recommended to be increased for installations with data dirs located in RAID array.
|
||||||
num.recovery.threads.per.data.dir=1
|
num.recovery.threads.per.data.dir=1
|
||||||
|
|
||||||
############################# Internal Topic Settings #############################
|
############################# Internal Topic Settings #############################
|
||||||
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
|
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
|
||||||
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
|
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
|
||||||
offsets.topic.replication.factor=1
|
offsets.topic.replication.factor=1
|
||||||
transaction.state.log.replication.factor=1
|
transaction.state.log.replication.factor=1
|
||||||
transaction.state.log.min.isr=1
|
transaction.state.log.min.isr=1
|
||||||
|
|
||||||
############################# Log Flush Policy #############################
|
############################# Log Flush Policy #############################
|
||||||
|
|
||||||
# Messages are immediately written to the filesystem but by default we only fsync() to sync
|
# Messages are immediately written to the filesystem but by default we only fsync() to sync
|
||||||
# the OS cache lazily. The following configurations control the flush of data to disk.
|
# the OS cache lazily. The following configurations control the flush of data to disk.
|
||||||
# There are a few important trade-offs here:
|
# There are a few important trade-offs here:
|
||||||
# 1. Durability: Unflushed data may be lost if you are not using replication.
|
# 1. Durability: Unflushed data may be lost if you are not using replication.
|
||||||
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
|
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
|
||||||
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
|
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
|
||||||
# The settings below allow one to configure the flush policy to flush data after a period of time or
|
# The settings below allow one to configure the flush policy to flush data after a period of time or
|
||||||
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
|
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
|
||||||
|
|
||||||
# The number of messages to accept before forcing a flush of data to disk
|
# The number of messages to accept before forcing a flush of data to disk
|
||||||
#log.flush.interval.messages=10000
|
#log.flush.interval.messages=10000
|
||||||
|
|
||||||
# The maximum amount of time a message can sit in a log before we force a flush
|
# The maximum amount of time a message can sit in a log before we force a flush
|
||||||
#log.flush.interval.ms=1000
|
#log.flush.interval.ms=1000
|
||||||
|
|
||||||
############################# Log Retention Policy #############################
|
############################# Log Retention Policy #############################
|
||||||
|
|
||||||
# The following configurations control the disposal of log segments. The policy can
|
# The following configurations control the disposal of log segments. The policy can
|
||||||
# be set to delete segments after a period of time, or after a given size has accumulated.
|
# be set to delete segments after a period of time, or after a given size has accumulated.
|
||||||
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
|
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
|
||||||
# from the end of the log.
|
# from the end of the log.
|
||||||
|
|
||||||
# The minimum age of a log file to be eligible for deletion due to age
|
# The minimum age of a log file to be eligible for deletion due to age
|
||||||
log.retention.hours=168
|
log.retention.hours=168
|
||||||
|
|
||||||
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
|
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
|
||||||
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
|
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
|
||||||
#log.retention.bytes=1073741824
|
#log.retention.bytes=1073741824
|
||||||
|
|
||||||
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
|
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
|
||||||
log.segment.bytes=1073741824
|
log.segment.bytes=1073741824
|
||||||
|
|
||||||
# The interval at which log segments are checked to see if they can be deleted according
|
# The interval at which log segments are checked to see if they can be deleted according
|
||||||
# to the retention policies
|
# to the retention policies
|
||||||
log.retention.check.interval.ms=300000
|
log.retention.check.interval.ms=300000
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{# Create map.jinja to enable / disable kafka from UI #}
|
{# Create map.jinja to enable / disable kafka from UI #}
|
||||||
{# Temporarily just enable kafka #}
|
{# Temporarily just enable kafka #}
|
||||||
include:
|
include:
|
||||||
- kafka.enabled
|
- kafka.enabled
|
||||||
|
|||||||
@@ -1,21 +1,21 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
{% if sls.split('.')[0] in allowed_states %}
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
|
||||||
append_so-kafka_so-status.conf:
|
append_so-kafka_so-status.conf:
|
||||||
file.append:
|
file.append:
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
- text: so-kafka
|
- text: so-kafka
|
||||||
- unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf
|
- unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf
|
||||||
|
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|
||||||
{{sls}}_state_not_allowed:
|
{{sls}}_state_not_allowed:
|
||||||
test.fail_without_changes:
|
test.fail_without_changes:
|
||||||
- name: {{sls}}_state_not_allowed
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -1,31 +1,31 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
{% if sls.split('.')[0] in allowed_states %}
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%}
|
{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%}
|
||||||
|
|
||||||
{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #}
|
{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #}
|
||||||
{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %}
|
{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %}
|
||||||
{% else %}
|
{% else %}
|
||||||
kafka_storage_init:
|
kafka_storage_init:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: |
|
- name: |
|
||||||
docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh so-kafka format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/server.properties
|
docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/newserver.properties
|
||||||
kafka_rm_kafkainit:
|
kafka_rm_kafkainit:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: |
|
- name: |
|
||||||
docker rm so-kafkainit
|
docker rm so-kafkainit
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|
||||||
{{sls}}_state_not_allowed:
|
{{sls}}_state_not_allowed:
|
||||||
test.fail_without_changes:
|
test.fail_without_changes:
|
||||||
- name: {{sls}}_state_not_allowed
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -8,7 +8,7 @@
|
|||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
if [ ! -f /etc/pki/kafka.jks ]; then
|
if [ ! -f /etc/pki/kafka.jks ]; then
|
||||||
docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool so-kafka -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srsstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -alias kafkastore -noprompt
|
docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt
|
||||||
docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks
|
docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks
|
||||||
docker rm so-kafka-keystore
|
docker rm so-kafka-keystore
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
include:
|
include:
|
||||||
- ssl
|
- ssl
|
||||||
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
|
{% if GLOBALS.role not in ['so-receiver','so-fleet', 'so-kafkanode'] %}
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|||||||
@@ -1,22 +1,22 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
local_salt_dir=/opt/so/saltstack/local
|
local_salt_dir=/opt/so/saltstack/local
|
||||||
|
|
||||||
if [[ -f /usr/sbin/so-common ]]; then
|
if [[ -f /usr/sbin/so-common ]]; then
|
||||||
source /usr/sbin/so-common
|
source /usr/sbin/so-common
|
||||||
else
|
else
|
||||||
source $(dirname $0)/../../../common/tools/sbin/so-common
|
source $(dirname $0)/../../../common/tools/sbin/so-common
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then
|
if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then
|
||||||
kafka_cluster_id=$(get_random_value 22)
|
kafka_cluster_id=$(get_random_value 22)
|
||||||
echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls
|
echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls
|
||||||
else
|
else
|
||||||
echo 'kafka_cluster_id exists'
|
echo 'kafka_cluster_id exists'
|
||||||
salt-call pillar.get secrets
|
salt-call pillar.get secrets
|
||||||
fi
|
fi
|
||||||
@@ -629,15 +629,6 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
set_minion_info
|
set_minion_info
|
||||||
whiptail_end_settings
|
whiptail_end_settings
|
||||||
|
|
||||||
elif [[ $is_kafka ]]; then
|
|
||||||
info "Setting up as node type Kafka broker"
|
|
||||||
#check_requirements "kafka"
|
|
||||||
networking_needful
|
|
||||||
collect_mngr_hostname
|
|
||||||
add_mngr_ip_to_hosts
|
|
||||||
check_manager_connection
|
|
||||||
set_minion_info
|
|
||||||
whiptail_end_settings
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $waitforstate ]]; then
|
if [[ $waitforstate ]]; then
|
||||||
|
|||||||
Reference in New Issue
Block a user