From 26abe9067154676df31060bec3b3981ca5af0e05 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 12:19:46 -0400 Subject: [PATCH 1/6] Removed duplicate kafka setup Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- setup/so-setup | 9 --------- 1 file changed, 9 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index a50fea19d..191b25ef2 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -629,15 +629,6 @@ if ! [[ -f $install_opt_file ]]; then set_minion_info whiptail_end_settings - elif [[ $is_kafka ]]; then - info "Setting up as node type Kafka broker" - #check_requirements "kafka" - networking_needful - collect_mngr_hostname - add_mngr_ip_to_hosts - check_manager_connection - set_minion_info - whiptail_end_settings fi if [[ $waitforstate ]]; then From 1b49c8540e466f9b9c602b77245dac58cc5609e4 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:32:15 -0400 Subject: [PATCH 2/6] Fix kafka keystore script Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 208 +++++++++--------- .../sbin_jinja/so-kafka-generate-keystore | 2 +- 2 files changed, 109 insertions(+), 101 deletions(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 8caaa01cd..ddf2777a1 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -1,101 +1,109 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} - -{% set kafka_ips_logstash = [] %} -{% set kafka_ips_kraft = [] %} -{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} -{% set kafka_ip = GLOBALS.node_ip %} - -{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set combined = [] %} -{% for hostname, data in nodes.items() %} - {% do combined.append(data.nodeid ~ "@" ~ hostname) %} -{% endfor %} -{% set kraft_controller_quorum_voters = ','.join(combined) %} - -{# Create list for kafka <-> logstash/searchnode communcations #} -{% for node, node_data in kafkanodes.items() %} -{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} -{% endfor %} -{% set kafka_server_list = "','".join(kafka_ips_logstash) %} - -{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #} -{% for node, node_data in kafkanodes.items() %} -{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %} -{% endfor %} -{% set kraft_server_list = "','".join(kafka_ips_kraft) %} - - -include: - - ssl - -kafka_group: - group.present: - - name: kafka - - gid: 960 - -kafka: - user.present: - - uid: 960 - - gid: 960 - -{# Future tools to query kafka directly / show consumer groups -kafka_sbin_tools: - file.recurse: - - name: /usr/sbin - - source: salt://kafka/tools/sbin - - user: 960 - - group: 960 - - file_mode: 755 #} - -kakfa_log_dir: - file.directory: - - name: /opt/so/log/kafka - - user: 960 - - group: 960 - - makedirs: True - -kafka_data_dir: - file.directory: - - name: /nsm/kafka/data - - user: 960 - - group: 960 - - makedirs: True - -{# When docker container is created an added to registry. Update so-kafka-generate-keystore script #} -kafka_keystore_script: - cmd.script: - - source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore - - tempalte: jinja - - cwd: /opt/so - - defaults: - GLOBALS: {{ GLOBALS }} - -kafka_kraft_server_properties: - file.managed: - - source: salt://kafka/etc/server.properties.jinja - - name: /opt/so/conf/kafka/server.properties - - template: jinja - - defaults: - kafka_nodeid: {{ kafka_nodeid }} - kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }} - kafka_ip: {{ kafka_ip }} - - user: 960 - - group: 960 - - makedirs: True - - show_changes: False - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set kafka_ips_logstash = [] %} +{% set kafka_ips_kraft = [] %} +{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} +{% set kafka_ip = GLOBALS.node_ip %} + +{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set combined = [] %} +{% for hostname, data in nodes.items() %} + {% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %} +{% endfor %} +{% set kraft_controller_quorum_voters = ','.join(combined) %} + +{# Create list for kafka <-> logstash/searchnode communcations #} +{% for node, node_data in kafkanodes.items() %} +{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} +{% endfor %} +{% set kafka_server_list = "','".join(kafka_ips_logstash) %} + +{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #} +{% for node, node_data in kafkanodes.items() %} +{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %} +{% endfor %} +{% set kraft_server_list = "','".join(kafka_ips_kraft) %} + + +include: + - ssl + +kafka_group: + group.present: + - name: kafka + - gid: 960 + +kafka: + user.present: + - uid: 960 + - gid: 960 + +{# Future tools to query kafka directly / show consumer groups +kafka_sbin_tools: + file.recurse: + - name: /usr/sbin + - source: salt://kafka/tools/sbin + - user: 960 + - group: 960 + - file_mode: 755 #} + +kafka_sbin_jinja_tools: + file.recurse: + - name: /usr/sbin + - source: salt://kafka/tools/sbin_jinja + - user: 960 + - group: 960 + - file_mode: 755 + - template: jinja + +kakfa_log_dir: + file.directory: + - name: /opt/so/log/kafka + - user: 960 + - group: 960 + - makedirs: True + +kafka_data_dir: + file.directory: + - name: /nsm/kafka/data + - user: 960 + - group: 960 + - makedirs: True + +kafka_keystore_script: + cmd.script: + - source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore + - template: jinja + - cwd: /opt/so + - defaults: + GLOBALS: {{ GLOBALS }} + +kafka_kraft_server_properties: + file.managed: + - source: salt://kafka/etc/server.properties.jinja + - name: /opt/so/conf/kafka/server.properties + - template: jinja + - defaults: + kafka_nodeid: {{ kafka_nodeid }} + kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }} + kafka_ip: {{ kafka_ip }} + - user: 960 + - group: 960 + - makedirs: True + - show_changes: False + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore index 69bb6ad87..1809c7a93 100644 --- a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore +++ b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore @@ -8,7 +8,7 @@ . /usr/sbin/so-common if [ ! -f /etc/pki/kafka.jks ]; then - docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool so-kafka -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srsstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -alias kafkastore -noprompt + docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks docker rm so-kafka-keystore else From b032eed22a94a80a62e83f4dfe79914523a7024c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:34:06 -0400 Subject: [PATCH 3/6] Update kafka to use manager docker registry Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/common/tools/sbin/so-image-common | 3 +- salt/kafka/enabled.sls | 91 +++++++++++++------------- 2 files changed, 48 insertions(+), 46 deletions(-) diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 7900b3c52..d322c8e9b 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -67,7 +67,8 @@ container_list() { "so-strelka-manager" "so-suricata" "so-telegraf" - "so-zeek" + "so-zeek" + "so-kafka" ) else TRUSTED_CONTAINERS=( diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 1bf7dcf8b..31d375e23 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -1,46 +1,47 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'docker/docker.map.jinja' import DOCKER %} - -include: - - kafka.sostatus - - kafka.config - - kafka.storage - -so-kafka: - docker_container.running: - - image: so-kafka - - hostname: so-kafka - - name: so-kafka - - networks: - - sobridge: - - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} - - user: kafka - - port_bindings: - {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} - - {{ BINDING }} - {% endfor %} - - binds: - - /etc/pki/kafka.jks:/etc/pki/kafka.jks - - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts - - /nsm/kafka/data/:/nsm/kafka/data/:rw - - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties - -delete_so-kafka_so-status.disabled: - file.uncomment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-kafka$ - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'docker/docker.map.jinja' import DOCKER %} +{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} + +include: + - kafka.sostatus + - kafka.config + - kafka.storage + +so-kafka: + docker_container.running: + - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} + - hostname: so-kafka + - name: so-kafka + - networks: + - sobridge: + - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} + - user: kafka + - port_bindings: + {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} + - {{ BINDING }} + {% endfor %} + - binds: + - /etc/pki/kafka.jks:/etc/pki/kafka.jks + - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts + - /nsm/kafka/data/:/nsm/kafka/data/:rw + - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + +delete_so-kafka_so-status.disabled: + file.uncomment: + - name: /opt/so/conf/so-status/so-status.conf + - regex: ^so-kafka$ + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file From 643d4831c10b09714021ae97e19e0ea679433b94 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:35:14 -0400 Subject: [PATCH 4/6] CRLF -> LF Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/init.sls | 18 +++++----- salt/kafka/sostatus.sls | 40 ++++++++++----------- salt/manager/tools/sbin/so-kafka-clusterid | 42 +++++++++++----------- 3 files changed, 50 insertions(+), 50 deletions(-) diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index 653cd4b88..903c66867 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -1,9 +1,9 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{# Create map.jinja to enable / disable kafka from UI #} -{# Temporarily just enable kafka #} -include: - - kafka.enabled +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{# Create map.jinja to enable / disable kafka from UI #} +{# Temporarily just enable kafka #} +include: + - kafka.enabled diff --git a/salt/kafka/sostatus.sls b/salt/kafka/sostatus.sls index 4c7519964..37c868a46 100644 --- a/salt/kafka/sostatus.sls +++ b/salt/kafka/sostatus.sls @@ -1,21 +1,21 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -append_so-kafka_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-kafka - - unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} + +append_so-kafka_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-kafka + - unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index 64833a0d2..719973247 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -1,22 +1,22 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -local_salt_dir=/opt/so/saltstack/local - -if [[ -f /usr/sbin/so-common ]]; then - source /usr/sbin/so-common -else - source $(dirname $0)/../../../common/tools/sbin/so-common -fi - -if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then - kafka_cluster_id=$(get_random_value 22) - echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls -else - echo 'kafka_cluster_id exists' - salt-call pillar.get secrets +#!/bin/bash + +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +local_salt_dir=/opt/so/saltstack/local + +if [[ -f /usr/sbin/so-common ]]; then + source /usr/sbin/so-common +else + source $(dirname $0)/../../../common/tools/sbin/so-common +fi + +if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then + kafka_cluster_id=$(get_random_value 22) + echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls +else + echo 'kafka_cluster_id exists' + salt-call pillar.get secrets fi \ No newline at end of file From 7f5741c43b9eac3f0409a1eaa7dbe44fc70140d2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:36:22 -0400 Subject: [PATCH 5/6] Fix kafka storage setup Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/storage.sls | 60 +++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls index dc114ef4f..778c054e2 100644 --- a/salt/kafka/storage.sls +++ b/salt/kafka/storage.sls @@ -1,31 +1,31 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%} - -{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} -{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} -{% else %} -kafka_storage_init: - cmd.run: - - name: | - docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh so-kafka format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/server.properties -kafka_rm_kafkainit: - cmd.run: - - name: | - docker rm so-kafkainit -{% endif %} - - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%} + +{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} +{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} +{% else %} +kafka_storage_init: + cmd.run: + - name: | + docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/newserver.properties +kafka_rm_kafkainit: + cmd.run: + - name: | + docker rm so-kafkainit +{% endif %} + + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file From 82830c81733a925bf9c6c4748946d53ba1039358 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:37:39 -0400 Subject: [PATCH 6/6] Fix typos and fix error related to elasticsearch saltstate being called from logstash state. Logstash will be removed from kafkanodes in future Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/etc/server.properties.jinja | 244 ++++++++++++------------- salt/logstash/config.sls | 2 +- 2 files changed, 123 insertions(+), 123 deletions(-) diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index ad5ac67a9..eb60eda60 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -1,123 +1,123 @@ -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. See config/kraft/README.md for details. -# - -############################# Server Basics ############################# - -# The role of this server. Setting this puts us in KRaft mode -process.roles=broker,controller - -# The node id associated with this instance's roles -node.id={{ kafka_nodeid }} - -# The connect string for the controller quorum -controller.quorum.voters={{ kraft_controller_quorum_voters }} - -############################# Socket Server Settings ############################# - -# The address the socket server listens on. -# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. -# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), -# with PLAINTEXT listener name, and port 9092. -# FORMAT: -# listeners = listener_name://host_name:port -# EXAMPLE: -# listeners = PLAINTEXT://your.host.name:9092 -listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 - -# Name of listener used for communication between brokers. -inter.broker.listener.name=BROKER - -# Listener name, hostname and port the broker will advertise to clients. -# If not set, it uses the value for "listeners". -advertised.listeners=BROKER://{{ kafka_ip }}:9092 - -# A comma-separated list of the names of the listeners used by the controller. -# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol -# This is required if running in KRaft mode. -controller.listener.names=CONTROLLER - -# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details -listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL - -#SSL configuration -ssl.keystore.location=/etc/pki/kafka.jks -ssl.keystore.pasword=changeit -ssl.keystore.type=JKS -ssl.truststore.location=/etc/pki/java/sos/cacerts -ssl.truststore.password=changeit - -# The number of threads that the server uses for receiving requests from the network and sending responses to the network -num.network.threads=3 - -# The number of threads that the server uses for processing requests, which may include disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=102400 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=102400 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma separated list of directories under which to store log files -log.dirs=/nsm/kafka/data - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=1 - -# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 - -############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -offsets.topic.replication.factor=1 -transaction.state.log.replication.factor=1 -transaction.state.log.min.isr=1 - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion due to age -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log unless the remaining -# segments drop below log.retention.bytes. Functions independently of log.retention.hours. -#log.retention.bytes=1073741824 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=1073741824 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. See config/kraft/README.md for details. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker,controller + +# The node id associated with this instance's roles +node.id={{ kafka_nodeid }} + +# The connect string for the controller quorum +controller.quorum.voters={{ kraft_controller_quorum_voters }} + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. +# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. +# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), +# with PLAINTEXT listener name, and port 9092. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 + +# Name of listener used for communication between brokers. +inter.broker.listener.name=BROKER + +# Listener name, hostname and port the broker will advertise to clients. +# If not set, it uses the value for "listeners". +advertised.listeners=BROKER://{{ kafka_ip }}:9092 + +# A comma-separated list of the names of the listeners used by the controller. +# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol +# This is required if running in KRaft mode. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL + +#SSL configuration +ssl.keystore.location=/etc/pki/kafka.jks +ssl.keystore.password=changeit +ssl.keystore.type=JKS +ssl.truststore.location=/etc/pki/java/sos/cacerts +ssl.truststore.password=changeit + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/nsm/kafka/data + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies log.retention.check.interval.ms=300000 \ No newline at end of file diff --git a/salt/logstash/config.sls b/salt/logstash/config.sls index 8a59c83b7..402d1ef20 100644 --- a/salt/logstash/config.sls +++ b/salt/logstash/config.sls @@ -12,7 +12,7 @@ include: - ssl - {% if GLOBALS.role not in ['so-receiver','so-fleet'] %} + {% if GLOBALS.role not in ['so-receiver','so-fleet', 'so-kafkanode'] %} - elasticsearch {% endif %}